[Pkg-xen-changes] r87 - branches/20060307-fern/patches

Ralph Passgang tha-guest at costa.debian.org
Tue Mar 7 11:26:55 UTC 2006


Author: tha-guest
Date: 2006-03-07 11:26:46 +0000 (Tue, 07 Mar 2006)
New Revision: 87

Added:
   branches/20060307-fern/patches/linux-2.6.12-xen.hg8746.patch
Removed:
   branches/20060307-fern/patches/linux-2.6.12-xen.patch
Log:
- removed static patch file and uploaded a newer version.
- also renamed the patch to include the exact xen hg revision
  (this should help keeping this file up2date without guessing
  on which hg-version the patch was created)


Added: branches/20060307-fern/patches/linux-2.6.12-xen.hg8746.patch
===================================================================
--- branches/20060307-fern/patches/linux-2.6.12-xen.hg8746.patch	2006-03-07 11:23:14 UTC (rev 86)
+++ branches/20060307-fern/patches/linux-2.6.12-xen.hg8746.patch	2006-03-07 11:26:46 UTC (rev 87)
@@ -0,0 +1,110928 @@
+diff -Nurp pristine-linux-2.6.12/arch/i386/Kconfig linux-2.6.12-xen/arch/i386/Kconfig
+--- pristine-linux-2.6.12/arch/i386/Kconfig	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/Kconfig	2006-03-05 23:54:37.188023911 +0100
+@@ -487,6 +487,19 @@ config SMP
+ 
+ 	  If you don't know what to do here, say N.
+ 
++config SMP_ALTERNATIVES
++	bool "SMP alternatives support (EXPERIMENTAL)"
++	depends on SMP && EXPERIMENTAL
++	help
++	  Try to reduce the overhead of running an SMP kernel on a uniprocessor
++	  host slightly by replacing certain key instruction sequences
++	  according to whether we currently have more than one CPU available.
++	  This should provide a noticeable boost to performance when
++	  running SMP kernels on UP machines, and have negligible impact
++	  when running on an true SMP host.
++
++          If unsure, say N.
++	  
+ config NR_CPUS
+ 	int "Maximum number of CPUs (2-255)"
+ 	range 2 255
+@@ -1226,6 +1239,15 @@ config SCx200
+ 	  This support is also available as a module.  If compiled as a
+ 	  module, it will be called scx200.
+ 
++config HOTPLUG_CPU
++	bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
++	depends on SMP && HOTPLUG && EXPERIMENTAL
++	---help---
++	  Say Y here to experiment with turning CPUs off and on.  CPUs
++	  can be controlled through /sys/devices/system/cpu.
++
++	  Say N.
++
+ source "drivers/pcmcia/Kconfig"
+ 
+ source "drivers/pci/hotplug/Kconfig"
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/apic.c linux-2.6.12-xen/arch/i386/kernel/apic.c
+--- pristine-linux-2.6.12/arch/i386/kernel/apic.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/apic.c	2006-03-05 23:54:37.018048959 +0100
+@@ -26,6 +26,7 @@
+ #include <linux/mc146818rtc.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/sysdev.h>
++#include <linux/cpu.h>
+ 
+ #include <asm/atomic.h>
+ #include <asm/smp.h>
+@@ -1048,7 +1049,7 @@ void __init setup_secondary_APIC_clock(v
+ 	setup_APIC_timer(calibration_result);
+ }
+ 
+-void __init disable_APIC_timer(void)
++void __devinit disable_APIC_timer(void)
+ {
+ 	if (using_apic_timer) {
+ 		unsigned long v;
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/cpu/cpufreq/powernow-k8.c linux-2.6.12-xen/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+--- pristine-linux-2.6.12/arch/i386/kernel/cpu/cpufreq/powernow-k8.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/cpu/cpufreq/powernow-k8.c	2006-03-05 23:54:34.363440119 +0100
+@@ -44,7 +44,7 @@
+ 
+ #define PFX "powernow-k8: "
+ #define BFX PFX "BIOS error: "
+-#define VERSION "version 1.40.2"
++#define VERSION "version 1.40.4"
+ #include "powernow-k8.h"
+ 
+ /* serialize freq changes  */
+@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
+ {
+ 	struct powernow_k8_data *data;
+ 	cpumask_t oldmask = CPU_MASK_ALL;
+-	int rc;
++	int rc, i;
+ 
+ 	if (!check_supported_cpu(pol->cpu))
+ 		return -ENODEV;
+@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
+ 	printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
+ 	       data->currfid, data->currvid);
+ 
+-	powernow_data[pol->cpu] = data;
++	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
++		powernow_data[i] = data;
++	}
+ 
+ 	return 0;
+ 
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/io_apic.c linux-2.6.12-xen/arch/i386/kernel/io_apic.c
+--- pristine-linux-2.6.12/arch/i386/kernel/io_apic.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/io_apic.c	2006-03-05 23:54:37.047044686 +0100
+@@ -576,9 +576,11 @@ static int balanced_irq(void *unused)
+ 		try_to_freeze(PF_FREEZE);
+ 		if (time_after(jiffies,
+ 				prev_balance_time+balanced_irq_interval)) {
++			preempt_disable();
+ 			do_irq_balance();
+ 			prev_balance_time = jiffies;
+ 			time_remaining = balanced_irq_interval;
++			preempt_enable();
+ 		}
+ 	}
+ 	return 0;
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/irq.c linux-2.6.12-xen/arch/i386/kernel/irq.c
+--- pristine-linux-2.6.12/arch/i386/kernel/irq.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/irq.c	2006-03-05 23:54:37.048044539 +0100
+@@ -15,6 +15,9 @@
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel_stat.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/delay.h>
+ 
+ DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp;
+ EXPORT_PER_CPU_SYMBOL(irq_stat);
+@@ -210,9 +213,8 @@ int show_interrupts(struct seq_file *p, 
+ 
+ 	if (i == 0) {
+ 		seq_printf(p, "           ");
+-		for (j=0; j<NR_CPUS; j++)
+-			if (cpu_online(j))
+-				seq_printf(p, "CPU%d       ",j);
++		for_each_cpu(j)
++			seq_printf(p, "CPU%d       ",j);
+ 		seq_putc(p, '\n');
+ 	}
+ 
+@@ -225,9 +227,8 @@ int show_interrupts(struct seq_file *p, 
+ #ifndef CONFIG_SMP
+ 		seq_printf(p, "%10u ", kstat_irqs(i));
+ #else
+-		for (j = 0; j < NR_CPUS; j++)
+-			if (cpu_online(j))
+-				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++		for_each_cpu(j)
++			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ #endif
+ 		seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ 		seq_printf(p, "  %s", action->name);
+@@ -240,16 +241,13 @@ skip:
+ 		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ 	} else if (i == NR_IRQS) {
+ 		seq_printf(p, "NMI: ");
+-		for (j = 0; j < NR_CPUS; j++)
+-			if (cpu_online(j))
+-				seq_printf(p, "%10u ", nmi_count(j));
++		for_each_cpu(j)
++ 			seq_printf(p, "%10u ", nmi_count(j));
+ 		seq_putc(p, '\n');
+ #ifdef CONFIG_X86_LOCAL_APIC
+ 		seq_printf(p, "LOC: ");
+-		for (j = 0; j < NR_CPUS; j++)
+-			if (cpu_online(j))
+-				seq_printf(p, "%10u ",
+-					per_cpu(irq_stat,j).apic_timer_irqs);
++		for_each_cpu(j)
++			seq_printf(p, "%10u ", per_cpu(irq_stat,j).apic_timer_irqs);
+ 		seq_putc(p, '\n');
+ #endif
+ 		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+@@ -259,3 +257,45 @@ skip:
+ 	}
+ 	return 0;
+ }
++
++#ifdef CONFIG_HOTPLUG_CPU
++#include <mach_apic.h>
++
++void fixup_irqs(cpumask_t map)
++{
++	unsigned int irq;
++	static int warned;
++
++	for (irq = 0; irq < NR_IRQS; irq++) {
++		cpumask_t mask;
++		if (irq == 2)
++			continue;
++
++		cpus_and(mask, irq_affinity[irq], map);
++		if (any_online_cpu(mask) == NR_CPUS) {
++			printk("Breaking affinity for irq %i\n", irq);
++			mask = map;
++		}
++		if (irq_desc[irq].handler->set_affinity)
++			irq_desc[irq].handler->set_affinity(irq, mask);
++		else if (irq_desc[irq].action && !(warned++))
++			printk("Cannot set affinity for irq %i\n", irq);
++	}
++
++#if 0
++	barrier();
++	/* Ingo Molnar says: "after the IO-APIC masks have been redirected
++	   [note the nop - the interrupt-enable boundary on x86 is two
++	   instructions from sti] - to flush out pending hardirqs and
++	   IPIs. After this point nothing is supposed to reach this CPU." */
++	__asm__ __volatile__("sti; nop; cli");
++	barrier();
++#else
++	/* That doesn't seem sufficient.  Give it 1ms. */
++	local_irq_enable();
++	mdelay(1);
++	local_irq_disable();
++#endif
++}
++#endif
++
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/Makefile linux-2.6.12-xen/arch/i386/kernel/Makefile
+--- pristine-linux-2.6.12/arch/i386/kernel/Makefile	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/Makefile	2006-03-05 23:54:37.188023911 +0100
+@@ -33,6 +33,7 @@ obj-$(CONFIG_ACPI_SRAT) 	+= srat.o
+ obj-$(CONFIG_HPET_TIMER) 	+= time_hpet.o
+ obj-$(CONFIG_EFI) 		+= efi.o efi_stub.o
+ obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
++obj-$(CONFIG_SMP_ALTERNATIVES)  += smpalts.o
+ 
+ EXTRA_AFLAGS   := -traditional
+ 
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/msr.c linux-2.6.12-xen/arch/i386/kernel/msr.c
+--- pristine-linux-2.6.12/arch/i386/kernel/msr.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/msr.c	2006-03-05 23:54:37.049044392 +0100
+@@ -260,7 +260,7 @@ static struct file_operations msr_fops =
+ 	.open = msr_open,
+ };
+ 
+-static int msr_class_simple_device_add(int i)
++static int __devinit msr_class_simple_device_add(int i)
+ {
+ 	int err = 0;
+ 	struct class_device *class_err;
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/process.c linux-2.6.12-xen/arch/i386/kernel/process.c
+--- pristine-linux-2.6.12/arch/i386/kernel/process.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/process.c	2006-03-05 23:54:37.049044392 +0100
+@@ -13,6 +13,7 @@
+ 
+ #include <stdarg.h>
+ 
++#include <linux/cpu.h>
+ #include <linux/errno.h>
+ #include <linux/sched.h>
+ #include <linux/fs.h>
+@@ -54,6 +55,9 @@
+ #include <linux/irq.h>
+ #include <linux/err.h>
+ 
++#include <asm/tlbflush.h>
++#include <asm/cpu.h>
++
+ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+ 
+ static int hlt_counter;
+@@ -138,6 +142,34 @@ static void poll_idle (void)
+ 	}
+ }
+ 
++#ifdef CONFIG_HOTPLUG_CPU
++#include <asm/nmi.h>
++/* We don't actually take CPU down, just spin without interrupts. */
++static inline void play_dead(void)
++{
++	/* Ack it */
++	__get_cpu_var(cpu_state) = CPU_DEAD;
++
++	/* We shouldn't have to disable interrupts while dead, but
++	 * some interrupts just don't seem to go away, and this makes
++	 * it "work" for testing purposes. */
++	/* Death loop */
++	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
++		cpu_relax();
++
++	local_irq_disable();
++	__flush_tlb_all();
++	cpu_set(smp_processor_id(), cpu_online_map);
++	enable_APIC_timer();
++	local_irq_enable();
++}
++#else
++static inline void play_dead(void)
++{
++	BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
+ /*
+  * The idle thread. There's no useful work to be
+  * done, so just try to conserve power and have a
+@@ -160,6 +192,9 @@ void cpu_idle (void)
+ 			if (!idle)
+ 				idle = default_idle;
+ 
++			if (cpu_is_offline(cpu))
++				play_dead();
++
+ 			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
+ 			idle();
+ 		}
+@@ -827,6 +862,8 @@ asmlinkage int sys_get_thread_area(struc
+ 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ 		return -EINVAL;
+ 
++	memset(&info, 0, sizeof(info));
++
+ 	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+ 
+ 	info.entry_number = idx;
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/smpalts.c linux-2.6.12-xen/arch/i386/kernel/smpalts.c
+--- pristine-linux-2.6.12/arch/i386/kernel/smpalts.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/i386/kernel/smpalts.c	2006-03-05 23:54:37.189023764 +0100
+@@ -0,0 +1,85 @@
++#include <linux/kernel.h>
++#include <asm/system.h>
++#include <asm/smp_alt.h>
++#include <asm/processor.h>
++#include <asm/string.h>
++
++struct smp_replacement_record {
++	unsigned char targ_size;
++	unsigned char smp1_size;
++	unsigned char smp2_size;
++	unsigned char up_size;
++	unsigned char feature;
++	unsigned char data[0];
++};
++
++struct smp_alternative_record {
++	void *targ_start;
++	struct smp_replacement_record *repl;
++};
++
++extern struct smp_alternative_record __start_smp_alternatives_table,
++  __stop_smp_alternatives_table;
++extern unsigned long __init_begin, __init_end;
++
++void prepare_for_smp(void)
++{
++	struct smp_alternative_record *r;
++	printk(KERN_INFO "Enabling SMP...\n");
++	for (r = &__start_smp_alternatives_table;
++	     r != &__stop_smp_alternatives_table;
++	     r++) {
++		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
++		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
++		BUG_ON(r->repl->targ_size < r->repl->up_size);
++               if (system_state == SYSTEM_RUNNING &&
++                   r->targ_start >= (void *)&__init_begin &&
++                   r->targ_start < (void *)&__init_end)
++                       continue;
++		if (r->repl->feature != (unsigned char)-1 &&
++		    boot_cpu_has(r->repl->feature)) {
++			memcpy(r->targ_start,
++			       r->repl->data + r->repl->smp1_size,
++			       r->repl->smp2_size);
++			memset(r->targ_start + r->repl->smp2_size,
++			       0x90,
++			       r->repl->targ_size - r->repl->smp2_size);
++		} else {
++			memcpy(r->targ_start,
++			       r->repl->data,
++			       r->repl->smp1_size);
++			memset(r->targ_start + r->repl->smp1_size,
++			       0x90,
++			       r->repl->targ_size - r->repl->smp1_size);
++		}
++	}
++	/* Paranoia */
++	asm volatile ("jmp 1f\n1:");
++	mb();
++}
++
++void unprepare_for_smp(void)
++{
++	struct smp_alternative_record *r;
++	printk(KERN_INFO "Disabling SMP...\n");
++	for (r = &__start_smp_alternatives_table;
++	     r != &__stop_smp_alternatives_table;
++	     r++) {
++		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
++		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
++		BUG_ON(r->repl->targ_size < r->repl->up_size);
++               if (system_state == SYSTEM_RUNNING &&
++                   r->targ_start >= (void *)&__init_begin &&
++                   r->targ_start < (void *)&__init_end)
++                       continue;
++		memcpy(r->targ_start,
++		       r->repl->data + r->repl->smp1_size + r->repl->smp2_size,
++		       r->repl->up_size);
++		memset(r->targ_start + r->repl->up_size,
++		       0x90,
++		       r->repl->targ_size - r->repl->up_size);
++	}
++	/* Paranoia */
++	asm volatile ("jmp 1f\n1:");
++	mb();
++}
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/smpboot.c linux-2.6.12-xen/arch/i386/kernel/smpboot.c
+--- pristine-linux-2.6.12/arch/i386/kernel/smpboot.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/smpboot.c	2006-03-05 23:54:37.190023616 +0100
+@@ -44,6 +44,9 @@
+ #include <linux/smp_lock.h>
+ #include <linux/irq.h>
+ #include <linux/bootmem.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/percpu.h>
+ 
+ #include <linux/delay.h>
+ #include <linux/mc146818rtc.h>
+@@ -90,6 +93,9 @@ static int trampoline_exec;
+ 
+ static void map_cpu_to_logical_apicid(void);
+ 
++/* State of each CPU. */
++DEFINE_PER_CPU(int, cpu_state) = { 0 };
++
+ /*
+  * Currently trivial. Write the real->protected mode
+  * bootstrap into the page concerned. The caller
+@@ -1001,6 +1007,11 @@ static void __init smp_boot_cpus(unsigne
+ 		if (max_cpus <= cpucount+1)
+ 			continue;
+ 
++#ifdef CONFIG_SMP_ALTERNATIVES
++		if (kicked == 1)
++			prepare_for_smp();
++#endif
++
+ 		if (do_boot_cpu(apicid))
+ 			printk("CPU #%d not responding - cannot use it.\n",
+ 								apicid);
+@@ -1107,6 +1118,9 @@ static void __init smp_boot_cpus(unsigne
+    who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
+ void __init smp_prepare_cpus(unsigned int max_cpus)
+ {
++	smp_commenced_mask = cpumask_of_cpu(0);
++	cpu_callin_map = cpumask_of_cpu(0);
++	mb();
+ 	smp_boot_cpus(max_cpus);
+ }
+ 
+@@ -1116,20 +1130,104 @@ void __devinit smp_prepare_boot_cpu(void
+ 	cpu_set(smp_processor_id(), cpu_callout_map);
+ }
+ 
+-int __devinit __cpu_up(unsigned int cpu)
++#ifdef CONFIG_HOTPLUG_CPU
++
++/* must be called with the cpucontrol mutex held */
++static int __devinit cpu_enable(unsigned int cpu)
+ {
+-	/* This only works at boot for x86.  See "rewrite" above. */
+-	if (cpu_isset(cpu, smp_commenced_mask)) {
+-		local_irq_enable();
+-		return -ENOSYS;
++	/* get the target out of its holding state */
++	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
++	wmb();
++
++	/* wait for the processor to ack it. timeout? */
++	while (!cpu_online(cpu))
++		cpu_relax();
++
++	fixup_irqs(cpu_online_map);
++	/* counter the disable in fixup_irqs() */
++	local_irq_enable();
++	return 0;
++}
++
++int __cpu_disable(void)
++{
++	cpumask_t map = cpu_online_map;
++	int cpu = smp_processor_id();
++
++	/*
++	 * Perhaps use cpufreq to drop frequency, but that could go
++	 * into generic code.
++ 	 *
++	 * We won't take down the boot processor on i386 due to some
++	 * interrupts only being able to be serviced by the BSP.
++	 * Especially so if we're not using an IOAPIC	-zwane
++	 */
++	if (cpu == 0)
++		return -EBUSY;
++
++	/* We enable the timer again on the exit path of the death loop */
++	disable_APIC_timer();
++	/* Allow any queued timer interrupts to get serviced */
++	local_irq_enable();
++	mdelay(1);
++	local_irq_disable();
++
++	cpu_clear(cpu, map);
++	fixup_irqs(map);
++	/* It's now safe to remove this processor from the online map */
++	cpu_clear(cpu, cpu_online_map);
++	return 0;
++}
++
++void __cpu_die(unsigned int cpu)
++{
++	/* We don't do anything here: idle task is faking death itself. */
++	unsigned int i;
++
++	for (i = 0; i < 10; i++) {
++		/* They ack this in play_dead by setting CPU_DEAD */
++		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
++			return;
++		current->state = TASK_UNINTERRUPTIBLE;
++		schedule_timeout(HZ/10);
+ 	}
++ 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
++}
++#else /* ... !CONFIG_HOTPLUG_CPU */
++int __cpu_disable(void)
++{
++	return -ENOSYS;
++}
+ 
++void __cpu_die(unsigned int cpu)
++{
++	/* We said "no" in __cpu_disable */
++	BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __devinit __cpu_up(unsigned int cpu)
++{
+ 	/* In case one didn't come up */
+ 	if (!cpu_isset(cpu, cpu_callin_map)) {
++		printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
+ 		local_irq_enable();
+ 		return -EIO;
+ 	}
+ 
++#ifdef CONFIG_HOTPLUG_CPU
++	/* Already up, and in cpu_quiescent now? */
++	if (cpu_isset(cpu, smp_commenced_mask)) {
++		cpu_enable(cpu);
++		return 0;
++	}
++#endif
++
++#ifdef CONFIG_SMP_ALTERNATIVES
++	if (num_online_cpus() == 1)
++		prepare_for_smp();
++#endif
++
+ 	local_irq_enable();
+ 	/* Unleash the CPU! */
+ 	cpu_set(cpu, smp_commenced_mask);
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/smpboot.c.orig linux-2.6.12-xen/arch/i386/kernel/smpboot.c.orig
+--- pristine-linux-2.6.12/arch/i386/kernel/smpboot.c.orig	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/i386/kernel/smpboot.c.orig	2006-03-05 23:54:37.058043066 +0100
+@@ -0,0 +1,1260 @@
++/*
++ *	x86 SMP booting functions
++ *
++ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
++ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo at redhat.com>
++ *
++ *	Much of the core SMP work is based on previous work by Thomas Radke, to
++ *	whom a great many thanks are extended.
++ *
++ *	Thanks to Intel for making available several different Pentium,
++ *	Pentium Pro and Pentium-II/Xeon MP machines.
++ *	Original development of Linux SMP code supported by Caldera.
++ *
++ *	This code is released under the GNU General Public License version 2 or
++ *	later.
++ *
++ *	Fixes
++ *		Felix Koop	:	NR_CPUS used properly
++ *		Jose Renau	:	Handle single CPU case.
++ *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
++ *		Greg Wright	:	Fix for kernel stacks panic.
++ *		Erich Boleyn	:	MP v1.4 and additional changes.
++ *	Matthias Sattler	:	Changes for 2.1 kernel map.
++ *	Michel Lespinasse	:	Changes for 2.1 kernel map.
++ *	Michael Chastain	:	Change trampoline.S to gnu as.
++ *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
++ *		Ingo Molnar	:	Added APIC timers, based on code
++ *					from Jose Renau
++ *		Ingo Molnar	:	various cleanups and rewrites
++ *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
++ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
++ *		Martin J. Bligh	: 	Added support for multi-quad systems
++ *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
++*		Rusty Russell	:	Hacked into shape for new "hotplug" boot process. */
++
++#include <linux/module.h>
++#include <linux/config.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/smp_lock.h>
++#include <linux/irq.h>
++#include <linux/bootmem.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/percpu.h>
++
++#include <linux/delay.h>
++#include <linux/mc146818rtc.h>
++#include <asm/tlbflush.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++
++#include <mach_apic.h>
++#include <mach_wakecpu.h>
++#include <smpboot_hooks.h>
++
++/* Set if we find a B stepping CPU */
++static int __initdata smp_b_stepping;
++
++/* Number of siblings per CPU package */
++int smp_num_siblings = 1;
++int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
++EXPORT_SYMBOL(phys_proc_id);
++int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
++EXPORT_SYMBOL(cpu_core_id);
++
++/* bitmap of online cpus */
++cpumask_t cpu_online_map;
++
++cpumask_t cpu_callin_map;
++cpumask_t cpu_callout_map;
++static cpumask_t smp_commenced_mask;
++
++/* Per CPU bogomips and other parameters */
++struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
++
++u8 x86_cpu_to_apicid[NR_CPUS] =
++			{ [0 ... NR_CPUS-1] = 0xff };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++
++/*
++ * Trampoline 80x86 program as an array.
++ */
++
++extern unsigned char trampoline_data [];
++extern unsigned char trampoline_end  [];
++static unsigned char *trampoline_base;
++static int trampoline_exec;
++
++static void map_cpu_to_logical_apicid(void);
++
++/* State of each CPU. */
++DEFINE_PER_CPU(int, cpu_state) = { 0 };
++
++/*
++ * Currently trivial. Write the real->protected mode
++ * bootstrap into the page concerned. The caller
++ * has made sure it's suitably aligned.
++ */
++
++static unsigned long __init setup_trampoline(void)
++{
++	memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
++	return virt_to_phys(trampoline_base);
++}
++
++/*
++ * We are called very early to get the low memory for the
++ * SMP bootup trampoline page.
++ */
++void __init smp_alloc_memory(void)
++{
++	trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
++	/*
++	 * Has to be in very low memory so we can execute
++	 * real-mode AP code.
++	 */
++	if (__pa(trampoline_base) >= 0x9F000)
++		BUG();
++	/*
++	 * Make the SMP trampoline executable:
++	 */
++	trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
++}
++
++/*
++ * The bootstrap kernel entry code has set these up. Save them for
++ * a given CPU
++ */
++
++static void __init smp_store_cpu_info(int id)
++{
++	struct cpuinfo_x86 *c = cpu_data + id;
++
++	*c = boot_cpu_data;
++	if (id!=0)
++		identify_cpu(c);
++	/*
++	 * Mask B, Pentium, but not Pentium MMX
++	 */
++	if (c->x86_vendor == X86_VENDOR_INTEL &&
++	    c->x86 == 5 &&
++	    c->x86_mask >= 1 && c->x86_mask <= 4 &&
++	    c->x86_model <= 3)
++		/*
++		 * Remember we have B step Pentia with bugs
++		 */
++		smp_b_stepping = 1;
++
++	/*
++	 * Certain Athlons might work (for various values of 'work') in SMP
++	 * but they are not certified as MP capable.
++	 */
++	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
++
++		/* Athlon 660/661 is valid. */	
++		if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
++			goto valid_k7;
++
++		/* Duron 670 is valid */
++		if ((c->x86_model==7) && (c->x86_mask==0))
++			goto valid_k7;
++
++		/*
++		 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
++		 * It's worth noting that the A5 stepping (662) of some Athlon XP's
++		 * have the MP bit set.
++		 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
++		 */
++		if (((c->x86_model==6) && (c->x86_mask>=2)) ||
++		    ((c->x86_model==7) && (c->x86_mask>=1)) ||
++		     (c->x86_model> 7))
++			if (cpu_has_mp)
++				goto valid_k7;
++
++		/* If we get here, it's not a certified SMP capable AMD system. */
++		tainted |= TAINT_UNSAFE_SMP;
++	}
++
++valid_k7:
++	;
++}
++
++/*
++ * TSC synchronization.
++ *
++ * We first check whether all CPUs have their TSC's synchronized,
++ * then we print a warning if not, and always resync.
++ */
++
++static atomic_t tsc_start_flag = ATOMIC_INIT(0);
++static atomic_t tsc_count_start = ATOMIC_INIT(0);
++static atomic_t tsc_count_stop = ATOMIC_INIT(0);
++static unsigned long long tsc_values[NR_CPUS];
++
++#define NR_LOOPS 5
++
++static void __init synchronize_tsc_bp (void)
++{
++	int i;
++	unsigned long long t0;
++	unsigned long long sum, avg;
++	long long delta;
++	unsigned long one_usec;
++	int buggy = 0;
++
++	printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
++
++	/* convert from kcyc/sec to cyc/usec */
++	one_usec = cpu_khz / 1000;
++
++	atomic_set(&tsc_start_flag, 1);
++	wmb();
++
++	/*
++	 * We loop a few times to get a primed instruction cache,
++	 * then the last pass is more or less synchronized and
++	 * the BP and APs set their cycle counters to zero all at
++	 * once. This reduces the chance of having random offsets
++	 * between the processors, and guarantees that the maximum
++	 * delay between the cycle counters is never bigger than
++	 * the latency of information-passing (cachelines) between
++	 * two CPUs.
++	 */
++	for (i = 0; i < NR_LOOPS; i++) {
++		/*
++		 * all APs synchronize but they loop on '== num_cpus'
++		 */
++		while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
++			mb();
++		atomic_set(&tsc_count_stop, 0);
++		wmb();
++		/*
++		 * this lets the APs save their current TSC:
++		 */
++		atomic_inc(&tsc_count_start);
++
++		rdtscll(tsc_values[smp_processor_id()]);
++		/*
++		 * We clear the TSC in the last loop:
++		 */
++		if (i == NR_LOOPS-1)
++			write_tsc(0, 0);
++
++		/*
++		 * Wait for all APs to leave the synchronization point:
++		 */
++		while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
++			mb();
++		atomic_set(&tsc_count_start, 0);
++		wmb();
++		atomic_inc(&tsc_count_stop);
++	}
++
++	sum = 0;
++	for (i = 0; i < NR_CPUS; i++) {
++		if (cpu_isset(i, cpu_callout_map)) {
++			t0 = tsc_values[i];
++			sum += t0;
++		}
++	}
++	avg = sum;
++	do_div(avg, num_booting_cpus());
++
++	sum = 0;
++	for (i = 0; i < NR_CPUS; i++) {
++		if (!cpu_isset(i, cpu_callout_map))
++			continue;
++		delta = tsc_values[i] - avg;
++		if (delta < 0)
++			delta = -delta;
++		/*
++		 * We report bigger than 2 microseconds clock differences.
++		 */
++		if (delta > 2*one_usec) {
++			long realdelta;
++			if (!buggy) {
++				buggy = 1;
++				printk("\n");
++			}
++			realdelta = delta;
++			do_div(realdelta, one_usec);
++			if (tsc_values[i] < avg)
++				realdelta = -realdelta;
++
++			printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta);
++		}
++
++		sum += delta;
++	}
++	if (!buggy)
++		printk("passed.\n");
++}
++
++static void __init synchronize_tsc_ap (void)
++{
++	int i;
++
++	/*
++	 * Not every cpu is online at the time
++	 * this gets called, so we first wait for the BP to
++	 * finish SMP initialization:
++	 */
++	while (!atomic_read(&tsc_start_flag)) mb();
++
++	for (i = 0; i < NR_LOOPS; i++) {
++		atomic_inc(&tsc_count_start);
++		while (atomic_read(&tsc_count_start) != num_booting_cpus())
++			mb();
++
++		rdtscll(tsc_values[smp_processor_id()]);
++		if (i == NR_LOOPS-1)
++			write_tsc(0, 0);
++
++		atomic_inc(&tsc_count_stop);
++		while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
++	}
++}
++#undef NR_LOOPS
++
++extern void calibrate_delay(void);
++
++static atomic_t init_deasserted;
++
++static void __init smp_callin(void)
++{
++	int cpuid, phys_id;
++	unsigned long timeout;
++
++	/*
++	 * If waken up by an INIT in an 82489DX configuration
++	 * we may get here before an INIT-deassert IPI reaches
++	 * our local APIC.  We have to wait for the IPI or we'll
++	 * lock up on an APIC access.
++	 */
++	wait_for_init_deassert(&init_deasserted);
++
++	/*
++	 * (This works even if the APIC is not enabled.)
++	 */
++	phys_id = GET_APIC_ID(apic_read(APIC_ID));
++	cpuid = smp_processor_id();
++	if (cpu_isset(cpuid, cpu_callin_map)) {
++		printk("huh, phys CPU#%d, CPU#%d already present??\n",
++					phys_id, cpuid);
++		BUG();
++	}
++	Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
++
++	/*
++	 * STARTUP IPIs are fragile beasts as they might sometimes
++	 * trigger some glue motherboard logic. Complete APIC bus
++	 * silence for 1 second, this overestimates the time the
++	 * boot CPU is spending to send the up to 2 STARTUP IPIs
++	 * by a factor of two. This should be enough.
++	 */
++
++	/*
++	 * Waiting 2s total for startup (udelay is not yet working)
++	 */
++	timeout = jiffies + 2*HZ;
++	while (time_before(jiffies, timeout)) {
++		/*
++		 * Has the boot CPU finished it's STARTUP sequence?
++		 */
++		if (cpu_isset(cpuid, cpu_callout_map))
++			break;
++		rep_nop();
++	}
++
++	if (!time_before(jiffies, timeout)) {
++		printk("BUG: CPU%d started up but did not get a callout!\n",
++			cpuid);
++		BUG();
++	}
++
++	/*
++	 * the boot CPU has finished the init stage and is spinning
++	 * on callin_map until we finish. We are free to set up this
++	 * CPU, first the APIC. (this is probably redundant on most
++	 * boards)
++	 */
++
++	Dprintk("CALLIN, before setup_local_APIC().\n");
++	smp_callin_clear_local_apic();
++	setup_local_APIC();
++	map_cpu_to_logical_apicid();
++
++	/*
++	 * Get our bogomips.
++	 */
++	calibrate_delay();
++	Dprintk("Stack at about %p\n",&cpuid);
++
++	/*
++	 * Save our processor parameters
++	 */
++ 	smp_store_cpu_info(cpuid);
++
++	disable_APIC_timer();
++
++	/*
++	 * Allow the master to continue.
++	 */
++	cpu_set(cpuid, cpu_callin_map);
++
++	/*
++	 *      Synchronize the TSC with the BP
++	 */
++	if (cpu_has_tsc && cpu_khz)
++		synchronize_tsc_ap();
++}
++
++static int cpucount;
++
++/*
++ * Activate a secondary processor.
++ */
++static void __init start_secondary(void *unused)
++{
++	/*
++	 * Dont put anything before smp_callin(), SMP
++	 * booting is too fragile that we want to limit the
++	 * things done here to the most necessary things.
++	 */
++	cpu_init();
++	smp_callin();
++	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
++		rep_nop();
++	setup_secondary_APIC_clock();
++	if (nmi_watchdog == NMI_IO_APIC) {
++		disable_8259A_irq(0);
++		enable_NMI_through_LVT0(NULL);
++		enable_8259A_irq(0);
++	}
++	enable_APIC_timer();
++	/*
++	 * low-memory mappings have been cleared, flush them from
++	 * the local TLBs too.
++	 */
++	local_flush_tlb();
++	cpu_set(smp_processor_id(), cpu_online_map);
++
++	/* We can take interrupts now: we're officially "up". */
++	local_irq_enable();
++
++	wmb();
++	cpu_idle();
++}
++
++/*
++ * Everything has been set up for the secondary
++ * CPUs - they just need to reload everything
++ * from the task structure
++ * This function must not return.
++ */
++void __init initialize_secondary(void)
++{
++	/*
++	 * We don't actually need to load the full TSS,
++	 * basically just the stack pointer and the eip.
++	 */
++
++	asm volatile(
++		"movl %0,%%esp\n\t"
++		"jmp *%1"
++		:
++		:"r" (current->thread.esp),"r" (current->thread.eip));
++}
++
++extern struct {
++	void * esp;
++	unsigned short ss;
++} stack_start;
++
++#ifdef CONFIG_NUMA
++
++/* which logical CPUs are on which nodes */
++cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
++				{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
++/* which node each logical CPU is on */
++int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
++EXPORT_SYMBOL(cpu_2_node);
++
++/* set up a mapping between cpu and node. */
++static inline void map_cpu_to_node(int cpu, int node)
++{
++	printk("Mapping cpu %d to node %d\n", cpu, node);
++	cpu_set(cpu, node_2_cpu_mask[node]);
++	cpu_2_node[cpu] = node;
++}
++
++/* undo a mapping between cpu and node. */
++static inline void unmap_cpu_to_node(int cpu)
++{
++	int node;
++
++	printk("Unmapping cpu %d from all nodes\n", cpu);
++	for (node = 0; node < MAX_NUMNODES; node ++)
++		cpu_clear(cpu, node_2_cpu_mask[node]);
++	cpu_2_node[cpu] = 0;
++}
++#else /* !CONFIG_NUMA */
++
++#define map_cpu_to_node(cpu, node)	({})
++#define unmap_cpu_to_node(cpu)	({})
++
++#endif /* CONFIG_NUMA */
++
++u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++static void map_cpu_to_logical_apicid(void)
++{
++	int cpu = smp_processor_id();
++	int apicid = logical_smp_processor_id();
++
++	cpu_2_logical_apicid[cpu] = apicid;
++	map_cpu_to_node(cpu, apicid_to_node(apicid));
++}
++
++static void unmap_cpu_to_logical_apicid(int cpu)
++{
++	cpu_2_logical_apicid[cpu] = BAD_APICID;
++	unmap_cpu_to_node(cpu);
++}
++
++#if APIC_DEBUG
++static inline void __inquire_remote_apic(int apicid)
++{
++	int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
++	char *names[] = { "ID", "VERSION", "SPIV" };
++	int timeout, status;
++
++	printk("Inquiring remote APIC #%d...\n", apicid);
++
++	for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
++		printk("... APIC #%d %s: ", apicid, names[i]);
++
++		/*
++		 * Wait for idle.
++		 */
++		apic_wait_icr_idle();
++
++		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
++		apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
++
++		timeout = 0;
++		do {
++			udelay(100);
++			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
++		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
++
++		switch (status) {
++		case APIC_ICR_RR_VALID:
++			status = apic_read(APIC_RRR);
++			printk("%08x\n", status);
++			break;
++		default:
++			printk("failed\n");
++		}
++	}
++}
++#endif
++
++#ifdef WAKE_SECONDARY_VIA_NMI
++/* 
++ * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
++ * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
++ * won't ... remember to clear down the APIC, etc later.
++ */
++static int __init
++wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
++{
++	unsigned long send_status = 0, accept_status = 0;
++	int timeout, maxlvt;
++
++	/* Target chip */
++	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
++
++	/* Boot on the stack */
++	/* Kick the second */
++	apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
++
++	Dprintk("Waiting for send to finish...\n");
++	timeout = 0;
++	do {
++		Dprintk("+");
++		udelay(100);
++		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
++	} while (send_status && (timeout++ < 1000));
++
++	/*
++	 * Give the other CPU some time to accept the IPI.
++	 */
++	udelay(200);
++	/*
++	 * Due to the Pentium erratum 3AP.
++	 */
++	maxlvt = get_maxlvt();
++	if (maxlvt > 3) {
++		apic_read_around(APIC_SPIV);
++		apic_write(APIC_ESR, 0);
++	}
++	accept_status = (apic_read(APIC_ESR) & 0xEF);
++	Dprintk("NMI sent.\n");
++
++	if (send_status)
++		printk("APIC never delivered???\n");
++	if (accept_status)
++		printk("APIC delivery error (%lx).\n", accept_status);
++
++	return (send_status | accept_status);
++}
++#endif	/* WAKE_SECONDARY_VIA_NMI */
++
++#ifdef WAKE_SECONDARY_VIA_INIT
++static int __init
++wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
++{
++	unsigned long send_status = 0, accept_status = 0;
++	int maxlvt, timeout, num_starts, j;
++
++	/*
++	 * Be paranoid about clearing APIC errors.
++	 */
++	if (APIC_INTEGRATED(apic_version[phys_apicid])) {
++		apic_read_around(APIC_SPIV);
++		apic_write(APIC_ESR, 0);
++		apic_read(APIC_ESR);
++	}
++
++	Dprintk("Asserting INIT.\n");
++
++	/*
++	 * Turn INIT on target chip
++	 */
++	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
++
++	/*
++	 * Send IPI
++	 */
++	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
++				| APIC_DM_INIT);
++
++	Dprintk("Waiting for send to finish...\n");
++	timeout = 0;
++	do {
++		Dprintk("+");
++		udelay(100);
++		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
++	} while (send_status && (timeout++ < 1000));
++
++	mdelay(10);
++
++	Dprintk("Deasserting INIT.\n");
++
++	/* Target chip */
++	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
++
++	/* Send IPI */
++	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
++
++	Dprintk("Waiting for send to finish...\n");
++	timeout = 0;
++	do {
++		Dprintk("+");
++		udelay(100);
++		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
++	} while (send_status && (timeout++ < 1000));
++
++	atomic_set(&init_deasserted, 1);
++
++	/*
++	 * Should we send STARTUP IPIs ?
++	 *
++	 * Determine this based on the APIC version.
++	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
++	 */
++	if (APIC_INTEGRATED(apic_version[phys_apicid]))
++		num_starts = 2;
++	else
++		num_starts = 0;
++
++	/*
++	 * Run STARTUP IPI loop.
++	 */
++	Dprintk("#startup loops: %d.\n", num_starts);
++
++	maxlvt = get_maxlvt();
++
++	for (j = 1; j <= num_starts; j++) {
++		Dprintk("Sending STARTUP #%d.\n",j);
++		apic_read_around(APIC_SPIV);
++		apic_write(APIC_ESR, 0);
++		apic_read(APIC_ESR);
++		Dprintk("After apic_write.\n");
++
++		/*
++		 * STARTUP IPI
++		 */
++
++		/* Target chip */
++		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
++
++		/* Boot on the stack */
++		/* Kick the second */
++		apic_write_around(APIC_ICR, APIC_DM_STARTUP
++					| (start_eip >> 12));
++
++		/*
++		 * Give the other CPU some time to accept the IPI.
++		 */
++		udelay(300);
++
++		Dprintk("Startup point 1.\n");
++
++		Dprintk("Waiting for send to finish...\n");
++		timeout = 0;
++		do {
++			Dprintk("+");
++			udelay(100);
++			send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
++		} while (send_status && (timeout++ < 1000));
++
++		/*
++		 * Give the other CPU some time to accept the IPI.
++		 */
++		udelay(200);
++		/*
++		 * Due to the Pentium erratum 3AP.
++		 */
++		if (maxlvt > 3) {
++			apic_read_around(APIC_SPIV);
++			apic_write(APIC_ESR, 0);
++		}
++		accept_status = (apic_read(APIC_ESR) & 0xEF);
++		if (send_status || accept_status)
++			break;
++	}
++	Dprintk("After Startup.\n");
++
++	if (send_status)
++		printk("APIC never delivered???\n");
++	if (accept_status)
++		printk("APIC delivery error (%lx).\n", accept_status);
++
++	return (send_status | accept_status);
++}
++#endif	/* WAKE_SECONDARY_VIA_INIT */
++
++extern cpumask_t cpu_initialized;
++
++static int __init do_boot_cpu(int apicid)
++/*
++ * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
++ * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
++ * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
++ */
++{
++	struct task_struct *idle;
++	unsigned long boot_error;
++	int timeout, cpu;
++	unsigned long start_eip;
++	unsigned short nmi_high = 0, nmi_low = 0;
++
++	cpu = ++cpucount;
++	/*
++	 * We can't use kernel_thread since we must avoid to
++	 * reschedule the child.
++	 */
++	idle = fork_idle(cpu);
++	if (IS_ERR(idle))
++		panic("failed fork for CPU %d", cpu);
++	idle->thread.eip = (unsigned long) start_secondary;
++	/* start_eip had better be page-aligned! */
++	start_eip = setup_trampoline();
++
++	/* So we see what's up   */
++	printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
++	/* Stack for startup_32 can be just as for start_secondary onwards */
++	stack_start.esp = (void *) idle->thread.esp;
++
++	irq_ctx_init(cpu);
++
++	/*
++	 * This grunge runs the startup process for
++	 * the targeted processor.
++	 */
++
++	atomic_set(&init_deasserted, 0);
++
++	Dprintk("Setting warm reset code and vector.\n");
++
++	store_NMI_vector(&nmi_high, &nmi_low);
++
++	smpboot_setup_warm_reset_vector(start_eip);
++
++	/*
++	 * Starting actual IPI sequence...
++	 */
++	boot_error = wakeup_secondary_cpu(apicid, start_eip);
++
++	if (!boot_error) {
++		/*
++		 * allow APs to start initializing.
++		 */
++		Dprintk("Before Callout %d.\n", cpu);
++		cpu_set(cpu, cpu_callout_map);
++		Dprintk("After Callout %d.\n", cpu);
++
++		/*
++		 * Wait 5s total for a response
++		 */
++		for (timeout = 0; timeout < 50000; timeout++) {
++			if (cpu_isset(cpu, cpu_callin_map))
++				break;	/* It has booted */
++			udelay(100);
++		}
++
++		if (cpu_isset(cpu, cpu_callin_map)) {
++			/* number CPUs logically, starting from 1 (BSP is 0) */
++			Dprintk("OK.\n");
++			printk("CPU%d: ", cpu);
++			print_cpu_info(&cpu_data[cpu]);
++			Dprintk("CPU has booted.\n");
++		} else {
++			boot_error= 1;
++			if (*((volatile unsigned char *)trampoline_base)
++					== 0xA5)
++				/* trampoline started but...? */
++				printk("Stuck ??\n");
++			else
++				/* trampoline code not run */
++				printk("Not responding.\n");
++			inquire_remote_apic(apicid);
++		}
++	}
++	x86_cpu_to_apicid[cpu] = apicid;
++	if (boot_error) {
++		/* Try to put things back the way they were before ... */
++		unmap_cpu_to_logical_apicid(cpu);
++		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
++		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
++		cpucount--;
++	}
++
++	/* mark "stuck" area as not stuck */
++	*((volatile unsigned long *)trampoline_base) = 0;
++
++	return boot_error;
++}
++
++static void smp_tune_scheduling (void)
++{
++	unsigned long cachesize;       /* kB   */
++	unsigned long bandwidth = 350; /* MB/s */
++	/*
++	 * Rough estimation for SMP scheduling, this is the number of
++	 * cycles it takes for a fully memory-limited process to flush
++	 * the SMP-local cache.
++	 *
++	 * (For a P5 this pretty much means we will choose another idle
++	 *  CPU almost always at wakeup time (this is due to the small
++	 *  L1 cache), on PIIs it's around 50-100 usecs, depending on
++	 *  the cache size)
++	 */
++
++	if (!cpu_khz) {
++		/*
++		 * this basically disables processor-affinity
++		 * scheduling on SMP without a TSC.
++		 */
++		return;
++	} else {
++		cachesize = boot_cpu_data.x86_cache_size;
++		if (cachesize == -1) {
++			cachesize = 16; /* Pentiums, 2x8kB cache */
++			bandwidth = 100;
++		}
++	}
++}
++
++/*
++ * Cycle through the processors sending APIC IPIs to boot each.
++ */
++
++static int boot_cpu_logical_apicid;
++/* Where the IO area was mapped on multiquad, always 0 otherwise */
++void *xquad_portio;
++
++cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
++cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_core_map);
++
++static void __init smp_boot_cpus(unsigned int max_cpus)
++{
++	int apicid, cpu, bit, kicked;
++	unsigned long bogosum = 0;
++
++	/*
++	 * Setup boot CPU information
++	 */
++	smp_store_cpu_info(0); /* Final full version of the data */
++	printk("CPU%d: ", 0);
++	print_cpu_info(&cpu_data[0]);
++
++	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
++	boot_cpu_logical_apicid = logical_smp_processor_id();
++	x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
++
++	current_thread_info()->cpu = 0;
++	smp_tune_scheduling();
++	cpus_clear(cpu_sibling_map[0]);
++	cpu_set(0, cpu_sibling_map[0]);
++
++	cpus_clear(cpu_core_map[0]);
++	cpu_set(0, cpu_core_map[0]);
++
++	/*
++	 * If we couldn't find an SMP configuration at boot time,
++	 * get out of here now!
++	 */
++	if (!smp_found_config && !acpi_lapic) {
++		printk(KERN_NOTICE "SMP motherboard not detected.\n");
++		smpboot_clear_io_apic_irqs();
++		phys_cpu_present_map = physid_mask_of_physid(0);
++		if (APIC_init_uniprocessor())
++			printk(KERN_NOTICE "Local APIC not detected."
++					   " Using dummy APIC emulation.\n");
++		map_cpu_to_logical_apicid();
++		cpu_set(0, cpu_sibling_map[0]);
++		cpu_set(0, cpu_core_map[0]);
++		return;
++	}
++
++	/*
++	 * Should not be necessary because the MP table should list the boot
++	 * CPU too, but we do it for the sake of robustness anyway.
++	 * Makes no sense to do this check in clustered apic mode, so skip it
++	 */
++	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
++		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
++				boot_cpu_physical_apicid);
++		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
++	}
++
++	/*
++	 * If we couldn't find a local APIC, then get out of here now!
++	 */
++	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
++		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
++			boot_cpu_physical_apicid);
++		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
++		smpboot_clear_io_apic_irqs();
++		phys_cpu_present_map = physid_mask_of_physid(0);
++		cpu_set(0, cpu_sibling_map[0]);
++		cpu_set(0, cpu_core_map[0]);
++		return;
++	}
++
++	verify_local_APIC();
++
++	/*
++	 * If SMP should be disabled, then really disable it!
++	 */
++	if (!max_cpus) {
++		smp_found_config = 0;
++		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
++		smpboot_clear_io_apic_irqs();
++		phys_cpu_present_map = physid_mask_of_physid(0);
++		cpu_set(0, cpu_sibling_map[0]);
++		cpu_set(0, cpu_core_map[0]);
++		return;
++	}
++
++	connect_bsp_APIC();
++	setup_local_APIC();
++	map_cpu_to_logical_apicid();
++
++
++	setup_portio_remap();
++
++	/*
++	 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
++	 *
++	 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
++	 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 
++	 * clustered apic ID.
++	 */
++	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
++
++	kicked = 1;
++	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
++		apicid = cpu_present_to_apicid(bit);
++		/*
++		 * Don't even attempt to start the boot CPU!
++		 */
++		if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
++			continue;
++
++		if (!check_apicid_present(bit))
++			continue;
++		if (max_cpus <= cpucount+1)
++			continue;
++
++		if (do_boot_cpu(apicid))
++			printk("CPU #%d not responding - cannot use it.\n",
++								apicid);
++		else
++			++kicked;
++	}
++
++	/*
++	 * Cleanup possible dangling ends...
++	 */
++	smpboot_restore_warm_reset_vector();
++
++	/*
++	 * Allow the user to impress friends.
++	 */
++	Dprintk("Before bogomips.\n");
++	for (cpu = 0; cpu < NR_CPUS; cpu++)
++		if (cpu_isset(cpu, cpu_callout_map))
++			bogosum += cpu_data[cpu].loops_per_jiffy;
++	printk(KERN_INFO
++		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
++		cpucount+1,
++		bogosum/(500000/HZ),
++		(bogosum/(5000/HZ))%100);
++	
++	Dprintk("Before bogocount - setting activated=1.\n");
++
++	if (smp_b_stepping)
++		printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
++
++	/*
++	 * Don't taint if we are running SMP kernel on a single non-MP
++	 * approved Athlon
++	 */
++	if (tainted & TAINT_UNSAFE_SMP) {
++		if (cpucount)
++			printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
++		else
++			tainted &= ~TAINT_UNSAFE_SMP;
++	}
++
++	Dprintk("Boot done.\n");
++
++	/*
++	 * construct cpu_sibling_map[], so that we can tell sibling CPUs
++	 * efficiently.
++	 */
++	for (cpu = 0; cpu < NR_CPUS; cpu++) {
++		cpus_clear(cpu_sibling_map[cpu]);
++		cpus_clear(cpu_core_map[cpu]);
++	}
++
++	for (cpu = 0; cpu < NR_CPUS; cpu++) {
++		struct cpuinfo_x86 *c = cpu_data + cpu;
++		int siblings = 0;
++		int i;
++		if (!cpu_isset(cpu, cpu_callout_map))
++			continue;
++
++		if (smp_num_siblings > 1) {
++			for (i = 0; i < NR_CPUS; i++) {
++				if (!cpu_isset(i, cpu_callout_map))
++					continue;
++				if (cpu_core_id[cpu] == cpu_core_id[i]) {
++					siblings++;
++					cpu_set(i, cpu_sibling_map[cpu]);
++				}
++			}
++		} else {
++			siblings++;
++			cpu_set(cpu, cpu_sibling_map[cpu]);
++		}
++
++		if (siblings != smp_num_siblings) {
++			printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
++			smp_num_siblings = siblings;
++		}
++
++		if (c->x86_num_cores > 1) {
++			for (i = 0; i < NR_CPUS; i++) {
++				if (!cpu_isset(i, cpu_callout_map))
++					continue;
++				if (phys_proc_id[cpu] == phys_proc_id[i]) {
++					cpu_set(i, cpu_core_map[cpu]);
++				}
++			}
++		} else {
++			cpu_core_map[cpu] = cpu_sibling_map[cpu];
++		}
++	}
++
++	smpboot_setup_io_apic();
++
++	setup_boot_APIC_clock();
++
++	/*
++	 * Synchronize the TSC with the AP
++	 */
++	if (cpu_has_tsc && cpucount && cpu_khz)
++		synchronize_tsc_bp();
++}
++
++/* These are wrappers to interface to the new boot process.  Someone
++   who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
++void __init smp_prepare_cpus(unsigned int max_cpus)
++{
++	smp_commenced_mask = cpumask_of_cpu(0);
++	cpu_callin_map = cpumask_of_cpu(0);
++	mb();
++	smp_boot_cpus(max_cpus);
++}
++
++void __devinit smp_prepare_boot_cpu(void)
++{
++	cpu_set(smp_processor_id(), cpu_online_map);
++	cpu_set(smp_processor_id(), cpu_callout_map);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/* must be called with the cpucontrol mutex held */
++static int __devinit cpu_enable(unsigned int cpu)
++{
++	/* get the target out of its holding state */
++	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
++	wmb();
++
++	/* wait for the processor to ack it. timeout? */
++	while (!cpu_online(cpu))
++		cpu_relax();
++
++	fixup_irqs(cpu_online_map);
++	/* counter the disable in fixup_irqs() */
++	local_irq_enable();
++	return 0;
++}
++
++int __cpu_disable(void)
++{
++	cpumask_t map = cpu_online_map;
++	int cpu = smp_processor_id();
++
++	/*
++	 * Perhaps use cpufreq to drop frequency, but that could go
++	 * into generic code.
++ 	 *
++	 * We won't take down the boot processor on i386 due to some
++	 * interrupts only being able to be serviced by the BSP.
++	 * Especially so if we're not using an IOAPIC	-zwane
++	 */
++	if (cpu == 0)
++		return -EBUSY;
++
++	/* We enable the timer again on the exit path of the death loop */
++	disable_APIC_timer();
++	/* Allow any queued timer interrupts to get serviced */
++	local_irq_enable();
++	mdelay(1);
++	local_irq_disable();
++
++	cpu_clear(cpu, map);
++	fixup_irqs(map);
++	/* It's now safe to remove this processor from the online map */
++	cpu_clear(cpu, cpu_online_map);
++	return 0;
++}
++
++void __cpu_die(unsigned int cpu)
++{
++	/* We don't do anything here: idle task is faking death itself. */
++	unsigned int i;
++
++	for (i = 0; i < 10; i++) {
++		/* They ack this in play_dead by setting CPU_DEAD */
++		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
++			return;
++		current->state = TASK_UNINTERRUPTIBLE;
++		schedule_timeout(HZ/10);
++	}
++ 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
++}
++#else /* ... !CONFIG_HOTPLUG_CPU */
++int __cpu_disable(void)
++{
++	return -ENOSYS;
++}
++
++void __cpu_die(unsigned int cpu)
++{
++	/* We said "no" in __cpu_disable */
++	BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __devinit __cpu_up(unsigned int cpu)
++{
++	/* In case one didn't come up */
++	if (!cpu_isset(cpu, cpu_callin_map)) {
++		printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
++		local_irq_enable();
++		return -EIO;
++	}
++
++#ifdef CONFIG_HOTPLUG_CPU
++	/* Already up, and in cpu_quiescent now? */
++	if (cpu_isset(cpu, smp_commenced_mask)) {
++		cpu_enable(cpu);
++		return 0;
++	}
++#endif
++
++	local_irq_enable();
++	/* Unleash the CPU! */
++	cpu_set(cpu, smp_commenced_mask);
++	while (!cpu_isset(cpu, cpu_online_map))
++		mb();
++	return 0;
++}
++
++void __init smp_cpus_done(unsigned int max_cpus)
++{
++#ifdef CONFIG_X86_IO_APIC
++	setup_ioapic_dest();
++#endif
++	zap_low_mappings();
++	/*
++	 * Disable executability of the SMP trampoline:
++	 */
++	set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
++}
++
++void __init smp_intr_init(void)
++{
++	/*
++	 * IRQ0 must be given a fixed assignment and initialized,
++	 * because it's used before the IO-APIC is set up.
++	 */
++	set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
++
++	/*
++	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
++	 * IPI, driven by wakeup.
++	 */
++	set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
++
++	/* IPI for invalidation */
++	set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
++
++	/* IPI for generic function call */
++	set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
++}
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/smp.c linux-2.6.12-xen/arch/i386/kernel/smp.c
+--- pristine-linux-2.6.12/arch/i386/kernel/smp.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/smp.c	2006-03-05 23:54:37.059042918 +0100
+@@ -19,6 +19,7 @@
+ #include <linux/mc146818rtc.h>
+ #include <linux/cache.h>
+ #include <linux/interrupt.h>
++#include <linux/cpu.h>
+ 
+ #include <asm/mtrr.h>
+ #include <asm/tlbflush.h>
+@@ -163,7 +164,7 @@ void send_IPI_mask_bitmask(cpumask_t cpu
+ 	unsigned long flags;
+ 
+ 	local_irq_save(flags);
+-		
++	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
+ 	/*
+ 	 * Wait for idle.
+ 	 */
+@@ -345,21 +346,21 @@ out:
+ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+ 						unsigned long va)
+ {
+-	cpumask_t tmp;
+ 	/*
+ 	 * A couple of (to be removed) sanity checks:
+ 	 *
+-	 * - we do not send IPIs to not-yet booted CPUs.
+ 	 * - current CPU must not be in mask
+ 	 * - mask must exist :)
+ 	 */
+ 	BUG_ON(cpus_empty(cpumask));
+-
+-	cpus_and(tmp, cpumask, cpu_online_map);
+-	BUG_ON(!cpus_equal(cpumask, tmp));
+ 	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ 	BUG_ON(!mm);
+ 
++	/* If a CPU which we ran on has gone down, OK. */
++	cpus_and(cpumask, cpumask, cpu_online_map);
++	if (cpus_empty(cpumask))
++		return;
++
+ 	/*
+ 	 * i'm not happy about this global shared spinlock in the
+ 	 * MM hot path, but we'll see how contended it is.
+@@ -474,6 +475,7 @@ void flush_tlb_all(void)
+  */
+ void smp_send_reschedule(int cpu)
+ {
++	WARN_ON(cpu_is_offline(cpu));
+ 	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
+ }
+ 
+@@ -514,10 +516,16 @@ int smp_call_function (void (*func) (voi
+  */
+ {
+ 	struct call_data_struct data;
+-	int cpus = num_online_cpus()-1;
++	int cpus;
+ 
+-	if (!cpus)
++	/* Holding any lock stops cpus from going down. */
++	spin_lock(&call_lock);
++	cpus = num_online_cpus()-1;
++
++	if (!cpus) {
++		spin_unlock(&call_lock);
+ 		return 0;
++	}
+ 
+ 	/* Can deadlock when called with interrupts disabled */
+ 	WARN_ON(irqs_disabled());
+@@ -529,7 +537,6 @@ int smp_call_function (void (*func) (voi
+ 	if (wait)
+ 		atomic_set(&data.finished, 0);
+ 
+-	spin_lock(&call_lock);
+ 	call_data = &data;
+ 	mb();
+ 	
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/traps.c linux-2.6.12-xen/arch/i386/kernel/traps.c
+--- pristine-linux-2.6.12/arch/i386/kernel/traps.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/traps.c	2006-03-05 23:54:37.125033194 +0100
+@@ -521,18 +521,11 @@ static void mem_parity_error(unsigned ch
+ 
+ static void io_check_error(unsigned char reason, struct pt_regs * regs)
+ {
+-	unsigned long i;
+-
+ 	printk("NMI: IOCK error (debug interrupt?)\n");
+ 	show_registers(regs);
+ 
+ 	/* Re-enable the IOCK line, wait for a few seconds */
+-	reason = (reason & 0xf) | 8;
+-	outb(reason, 0x61);
+-	i = 2000;
+-	while (--i) udelay(1000);
+-	reason &= ~8;
+-	outb(reason, 0x61);
++	clear_io_check_error(reason);
+ }
+ 
+ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+@@ -624,6 +617,14 @@ fastcall void do_nmi(struct pt_regs * re
+ 	nmi_enter();
+ 
+ 	cpu = smp_processor_id();
++
++#ifdef CONFIG_HOTPLUG_CPU
++	if (!cpu_online(cpu)) {
++		nmi_exit();
++		return;
++	}
++#endif
++
+ 	++nmi_count(cpu);
+ 
+ 	if (!nmi_callback(regs, cpu))
+diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/vmlinux.lds.S linux-2.6.12-xen/arch/i386/kernel/vmlinux.lds.S
+--- pristine-linux-2.6.12/arch/i386/kernel/vmlinux.lds.S	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/kernel/vmlinux.lds.S	2006-03-05 23:54:37.191023469 +0100
+@@ -30,6 +30,13 @@ SECTIONS
+   __ex_table : { *(__ex_table) }
+   __stop___ex_table = .;
+ 
++  . = ALIGN(16);
++  __start_smp_alternatives_table = .;
++  __smp_alternatives : { *(__smp_alternatives) }
++  __stop_smp_alternatives_table = .;
++
++  __smp_replacements : { *(__smp_replacements) }
++
+   RODATA
+ 
+   /* writeable */
+diff -Nurp pristine-linux-2.6.12/arch/i386/mm/pageattr.c linux-2.6.12-xen/arch/i386/mm/pageattr.c
+--- pristine-linux-2.6.12/arch/i386/mm/pageattr.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/mm/pageattr.c	2006-03-05 23:54:37.182024795 +0100
+@@ -75,7 +75,7 @@ static void set_pmd_pte(pte_t *kpte, uns
+ 	unsigned long flags;
+ 
+ 	set_pte_atomic(kpte, pte); 	/* change init_mm */
+-	if (PTRS_PER_PMD > 1)
++	if (HAVE_SHARED_KERNEL_PMD)
+ 		return;
+ 
+ 	spin_lock_irqsave(&pgd_lock, flags);
+diff -Nurp pristine-linux-2.6.12/arch/i386/mm/pgtable.c linux-2.6.12-xen/arch/i386/mm/pgtable.c
+--- pristine-linux-2.6.12/arch/i386/mm/pgtable.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/i386/mm/pgtable.c	2006-03-05 23:54:37.183024648 +0100
+@@ -199,19 +199,20 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
+ {
+ 	unsigned long flags;
+ 
+-	if (PTRS_PER_PMD == 1)
++	if (PTRS_PER_PMD > 1) {
++		if (HAVE_SHARED_KERNEL_PMD)
++			memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
++			       swapper_pg_dir + USER_PTRS_PER_PGD,
++			       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
++	} else {
+ 		spin_lock_irqsave(&pgd_lock, flags);
+-
+-	memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
+-			swapper_pg_dir + USER_PTRS_PER_PGD,
+-			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+-
+-	if (PTRS_PER_PMD > 1)
+-		return;
+-
+-	pgd_list_add(pgd);
+-	spin_unlock_irqrestore(&pgd_lock, flags);
+-	memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++		memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
++		       swapper_pg_dir + USER_PTRS_PER_PGD,
++		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
++		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++		pgd_list_add(pgd);
++		spin_unlock_irqrestore(&pgd_lock, flags);
++	}
+ }
+ 
+ /* never called when PTRS_PER_PMD > 1 */
+@@ -238,6 +239,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+ 			goto out_oom;
+ 		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
+ 	}
++
++	if (!HAVE_SHARED_KERNEL_PMD) {
++		unsigned long flags;
++
++		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++			if (!pmd)
++				goto out_oom;
++			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
++		}
++
++		spin_lock_irqsave(&pgd_lock, flags);
++		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++			pgd_t *kpgd = pgd_offset_k(v);
++			pud_t *kpud = pud_offset(kpgd, v);
++			pmd_t *kpmd = pmd_offset(kpud, v);
++			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++			memcpy(pmd, kpmd, PAGE_SIZE);
++		}
++		pgd_list_add(pgd);
++		spin_unlock_irqrestore(&pgd_lock, flags);
++	}
++
+ 	return pgd;
+ 
+ out_oom:
+@@ -252,9 +277,23 @@ void pgd_free(pgd_t *pgd)
+ 	int i;
+ 
+ 	/* in the PAE case user pgd entries are overwritten before usage */
+-	if (PTRS_PER_PMD > 1)
+-		for (i = 0; i < USER_PTRS_PER_PGD; ++i)
+-			kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
++	if (PTRS_PER_PMD > 1) {
++		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++			kmem_cache_free(pmd_cache, pmd);
++		}
++		if (!HAVE_SHARED_KERNEL_PMD) {
++			unsigned long flags;
++			spin_lock_irqsave(&pgd_lock, flags);
++			pgd_list_del(pgd);
++			spin_unlock_irqrestore(&pgd_lock, flags);
++			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++				kmem_cache_free(pmd_cache, pmd);
++			}
++		}
++	}
+ 	/* in the non-PAE case, free_pgtables() clears user pgd entries */
+ 	kmem_cache_free(pgd_cache, pgd);
+ }
+diff -Nurp pristine-linux-2.6.12/arch/ia64/hp/sim/Makefile linux-2.6.12-xen/arch/ia64/hp/sim/Makefile
+--- pristine-linux-2.6.12/arch/ia64/hp/sim/Makefile	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ia64/hp/sim/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -14,3 +14,5 @@ obj-$(CONFIG_HP_SIMETH)	+= simeth.o
+ obj-$(CONFIG_HP_SIMSERIAL) += simserial.o
+ obj-$(CONFIG_HP_SIMSERIAL_CONSOLE) += hpsim_console.o
+ obj-$(CONFIG_HP_SIMSCSI) += simscsi.o
++obj-$(CONFIG_XEN) += simserial.o
++obj-$(CONFIG_XEN) += hpsim_console.o
+diff -Nurp pristine-linux-2.6.12/arch/ia64/Kconfig linux-2.6.12-xen/arch/ia64/Kconfig
+--- pristine-linux-2.6.12/arch/ia64/Kconfig	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ia64/Kconfig	2006-03-05 23:36:30.000000000 +0100
+@@ -46,6 +46,53 @@ config GENERIC_IOMAP
+ 	bool
+ 	default y
+ 
++config XEN
++	bool
++	default y
++	help
++	  Enable Xen hypervisor support.  Resulting kernel runs
++	  both as a guest OS on Xen and natively on hardware.
++
++config ARCH_XEN
++	bool
++	default y
++	help
++	  TEMP ONLY. Needs to be on for drivers/xen to build.
++
++config XEN_PRIVILEGED_GUEST
++	bool "Privileged Guest"
++	default n
++	help
++	  Used in drivers/xen/privcmd.c.  Should go away?
++
++config XEN_PHYSDEV_ACCESS
++	depends on XEN
++	bool
++	default y
++
++config XEN_BLKDEV_GRANT
++	depends on XEN
++	bool
++	default y
++
++config XEN_BLKDEV_FRONTEND
++	depends on XEN
++	bool
++	default y
++
++config XEN_VT
++	bool "Override for turning on CONFIG_VT for domU"
++	default y
++	help
++	  Hack to turn off CONFIG_VT for domU
++
++config VT
++	bool
++	default y if XEN && XEN_VT
++	default n if XEN && !XEN_VT
++	help
++	  Hack to turn off CONFIG_VT for domU
++
+ config SCHED_NO_NO_OMIT_FRAME_POINTER
+ 	bool
+ 	default y
+diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/entry.S linux-2.6.12-xen/arch/ia64/kernel/entry.S
+--- pristine-linux-2.6.12/arch/ia64/kernel/entry.S	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ia64/kernel/entry.S	2006-03-05 23:36:30.000000000 +0100
+@@ -181,7 +181,7 @@ END(sys_clone)
+  *	called.  The code starting at .map relies on this.  The rest of the code
+  *	doesn't care about the interrupt masking status.
+  */
+-GLOBAL_ENTRY(ia64_switch_to)
++GLOBAL_ENTRY(__ia64_switch_to)
+ 	.prologue
+ 	alloc r16=ar.pfs,1,0,0,0
+ 	DO_SAVE_SWITCH_STACK
+@@ -235,7 +235,7 @@ GLOBAL_ENTRY(ia64_switch_to)
+ 	;;
+ 	itr.d dtr[r25]=r23		// wire in new mapping...
+ 	br.cond.sptk .done
+-END(ia64_switch_to)
++END(__ia64_switch_to)
+ 
+ /*
+  * Note that interrupts are enabled during save_switch_stack and load_switch_stack.  This
+@@ -376,7 +376,7 @@ END(save_switch_stack)
+  *	- b7 holds address to return to
+  *	- must not touch r8-r11
+  */
+-ENTRY(load_switch_stack)
++GLOBAL_ENTRY(load_switch_stack)
+ 	.prologue
+ 	.altrp b7
+ 
+@@ -500,7 +500,7 @@ END(clone)
+ 	 * because some system calls (such as ia64_execve) directly
+ 	 * manipulate ar.pfs.
+ 	 */
+-GLOBAL_ENTRY(ia64_trace_syscall)
++GLOBAL_ENTRY(__ia64_trace_syscall)
+ 	PT_REGS_UNWIND_INFO(0)
+ 	/*
+ 	 * We need to preserve the scratch registers f6-f11 in case the system
+@@ -570,7 +570,7 @@ strace_error:
+ (p6)	mov r10=-1
+ (p6)	mov r8=r9
+ 	br.cond.sptk .strace_save_retval
+-END(ia64_trace_syscall)
++END(__ia64_trace_syscall)
+ 
+ 	/*
+ 	 * When traced and returning from sigreturn, we invoke syscall_trace but then
+@@ -623,8 +623,11 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
+ 	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
+ 	mov r10=r0				// clear error indication in r10
+ (p7)	br.cond.spnt handle_syscall_error	// handle potential syscall failure
++	;;
++	// don't fall through, ia64_leave_syscall may be #define'd
++	br.cond.sptk.few ia64_leave_syscall
++	;;
+ END(ia64_ret_from_syscall)
+-	// fall through
+ /*
+  * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
+  *	need to switch to bank 0 and doesn't restore the scratch registers.
+@@ -669,7 +672,7 @@ END(ia64_ret_from_syscall)
+  *	      ar.csd: cleared
+  *	      ar.ssd: cleared
+  */
+-ENTRY(ia64_leave_syscall)
++GLOBAL_ENTRY(__ia64_leave_syscall)
+ 	PT_REGS_UNWIND_INFO(0)
+ 	/*
+ 	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
+@@ -770,7 +773,7 @@ ENTRY(ia64_leave_syscall)
+ 	mov.m ar.ccv=r0		// clear ar.ccv
+ (pNonSys) br.cond.dpnt.many dont_preserve_current_frame
+ 	br.cond.sptk.many rbs_switch
+-END(ia64_leave_syscall)
++END(__ia64_leave_syscall)
+ 
+ #ifdef CONFIG_IA32_SUPPORT
+ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
+@@ -782,10 +785,13 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
+ 	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
+ 	.mem.offset 8,0
+ 	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
++	;;
++	// don't fall through, ia64_leave_kernel may be #define'd
++	br.cond.sptk.few ia64_leave_kernel
++	;;
+ END(ia64_ret_from_ia32_execve)
+-	// fall through
+ #endif /* CONFIG_IA32_SUPPORT */
+-GLOBAL_ENTRY(ia64_leave_kernel)
++GLOBAL_ENTRY(__ia64_leave_kernel)
+ 	PT_REGS_UNWIND_INFO(0)
+ 	/*
+ 	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
+@@ -1131,7 +1137,7 @@ skip_rbs_switch:
+ 	ld8 r10=[r3]
+ 	br.cond.sptk.many .work_processed_syscall	// re-check
+ 
+-END(ia64_leave_kernel)
++END(__ia64_leave_kernel)
+ 
+ ENTRY(handle_syscall_error)
+ 	/*
+@@ -1171,7 +1177,7 @@ END(ia64_invoke_schedule_tail)
+ 	 * be set up by the caller.  We declare 8 input registers so the system call
+ 	 * args get preserved, in case we need to restart a system call.
+ 	 */
+-ENTRY(notify_resume_user)
++GLOBAL_ENTRY(notify_resume_user)
+ 	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+ 	alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
+ 	mov r9=ar.unat
+@@ -1259,7 +1265,7 @@ ENTRY(sys_rt_sigreturn)
+ 	adds sp=16,sp
+ 	;;
+ 	ld8 r9=[sp]				// load new ar.unat
+-	mov.sptk b7=r8,ia64_leave_kernel
++	mov.sptk b7=r8,__ia64_leave_kernel
+ 	;;
+ 	mov ar.unat=r9
+ 	br.many b7
+diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/head.S linux-2.6.12-xen/arch/ia64/kernel/head.S
+--- pristine-linux-2.6.12/arch/ia64/kernel/head.S	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ia64/kernel/head.S	2006-03-05 23:36:30.000000000 +0100
+@@ -370,6 +370,10 @@ start_ap:
+ 
+ 	// This is executed by the bootstrap processor (bsp) only:
+ 
++#ifdef CONFIG_XEN
++	br.call.sptk.many rp=early_xen_setup
++	;;
++#endif
+ #ifdef CONFIG_IA64_FW_EMU
+ 	// initialize PAL & SAL emulator:
+ 	br.call.sptk.many rp=sys_fw_init
+diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/pal.S linux-2.6.12-xen/arch/ia64/kernel/pal.S
+--- pristine-linux-2.6.12/arch/ia64/kernel/pal.S	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ia64/kernel/pal.S	2006-03-05 23:36:30.000000000 +0100
+@@ -16,6 +16,7 @@
+ #include <asm/processor.h>
+ 
+ 	.data
++	.globl pal_entry_point
+ pal_entry_point:
+ 	data8 ia64_pal_default_handler
+ 	.text
+@@ -53,7 +54,7 @@ END(ia64_pal_default_handler)
+  * in4	       1 ==> clear psr.ic,  0 ==> don't clear psr.ic
+  *
+  */
+-GLOBAL_ENTRY(ia64_pal_call_static)
++GLOBAL_ENTRY(__ia64_pal_call_static)
+ 	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
+ 	alloc loc1 = ar.pfs,5,5,0,0
+ 	movl loc2 = pal_entry_point
+@@ -90,7 +91,7 @@ GLOBAL_ENTRY(ia64_pal_call_static)
+ 	;;
+ 	srlz.d				// seralize restoration of psr.l
+ 	br.ret.sptk.many b0
+-END(ia64_pal_call_static)
++END(__ia64_pal_call_static)
+ 
+ /*
+  * Make a PAL call using the stacked registers calling convention.
+diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/ptrace.c linux-2.6.12-xen/arch/ia64/kernel/ptrace.c
+--- pristine-linux-2.6.12/arch/ia64/kernel/ptrace.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ia64/kernel/ptrace.c	2006-03-05 23:54:35.275305760 +0100
+@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
+ 				*data = (pt->cr_ipsr & IPSR_MASK);
+ 			return 0;
+ 
++		      case PT_AR_RSC:
++			if (write_access)
++				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
++			else
++				*data = pt->ar_rsc;
++			return 0;
++
+ 		      case PT_AR_RNAT:
+ 			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
+ 			rnat_addr = (long) ia64_rse_rnat_addr((long *)
+@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
+ 		      case PT_AR_BSPSTORE:
+ 			ptr = pt_reg_addr(pt, ar_bspstore);
+ 			break;
+-		      case PT_AR_RSC:
+-			ptr = pt_reg_addr(pt, ar_rsc);
+-			break;
+ 		      case PT_AR_UNAT:
+ 			ptr = pt_reg_addr(pt, ar_unat);
+ 			break;
+@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
+ static long
+ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
+ {
+-	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
++	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
+ 	struct unw_frame_info info;
+ 	struct switch_stack *sw;
+ 	struct ia64_fpreg fpval;
+@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
+ 	/* app regs */
+ 
+ 	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
+-	retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
++	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
+ 	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
+ 	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
+ 	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
+@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
+ 	retval |= __get_user(nat_bits, &ppr->nat);
+ 
+ 	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
++	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
+ 	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
+ 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
+ 	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
+diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/setup.c linux-2.6.12-xen/arch/ia64/kernel/setup.c
+--- pristine-linux-2.6.12/arch/ia64/kernel/setup.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ia64/kernel/setup.c	2006-03-05 23:36:30.000000000 +0100
+@@ -273,6 +273,9 @@ io_port_init (void)
+ static inline int __init
+ early_console_setup (char *cmdline)
+ {
++#ifdef CONFIG_XEN
++	if (!early_xen_console_setup(cmdline)) return 0;
++#endif
+ #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
+ 	{
+ 		extern int sn_serial_console_early_setup(void);
+diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/signal.c linux-2.6.12-xen/arch/ia64/kernel/signal.c
+--- pristine-linux-2.6.12/arch/ia64/kernel/signal.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ia64/kernel/signal.c	2006-03-05 23:54:35.327298099 +0100
+@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
+ static long
+ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
+ {
+-	unsigned long ip, flags, nat, um, cfm;
++	unsigned long ip, flags, nat, um, cfm, rsc;
+ 	long err;
+ 
+ 	/* Always make any pending restarted system calls return -EINTR */
+@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
+ 	err |= __get_user(ip, &sc->sc_ip);			/* instruction pointer */
+ 	err |= __get_user(cfm, &sc->sc_cfm);
+ 	err |= __get_user(um, &sc->sc_um);			/* user mask */
+-	err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
++	err |= __get_user(rsc, &sc->sc_ar_rsc);
+ 	err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
+ 	err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
+ 	err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
+@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
+ 	err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);	/* r15 */
+ 
+ 	scr->pt.cr_ifs = cfm | (1UL << 63);
++	scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
+ 
+ 	/* establish new instruction pointer: */
+ 	scr->pt.cr_iip = ip & ~0x3UL;
+diff -Nurp pristine-linux-2.6.12/arch/ia64/Makefile linux-2.6.12-xen/arch/ia64/Makefile
+--- pristine-linux-2.6.12/arch/ia64/Makefile	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ia64/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -11,6 +11,9 @@
+ NM := $(CROSS_COMPILE)nm -B
+ READELF := $(CROSS_COMPILE)readelf
+ 
++# following is temporary pending xen directory restructuring
++NOSTDINC_FLAGS += -Iinclude/asm-xen
++
+ export AWK
+ 
+ CHECKFLAGS	+= -m64 -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
+@@ -57,9 +60,15 @@ core-$(CONFIG_IA64_GENERIC) 	+= arch/ia6
+ core-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/dig/
+ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
+ core-$(CONFIG_IA64_SGI_SN2)	+= arch/ia64/sn/
++core-$(CONFIG_XEN)		+= arch/ia64/xen/
+ 
+ drivers-$(CONFIG_PCI)		+= arch/ia64/pci/
++ifneq ($(CONFIG_XEN),y)
+ drivers-$(CONFIG_IA64_HP_SIM)	+= arch/ia64/hp/sim/
++endif
++ifneq ($(CONFIG_IA64_GENERIC),y)
++drivers-$(CONFIG_XEN)		+= arch/ia64/hp/sim/
++endif
+ drivers-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/hp/common/ arch/ia64/hp/zx1/
+ drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
+ drivers-$(CONFIG_IA64_GENERIC)	+= arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
+@@ -83,6 +92,8 @@ archclean:
+ 	$(Q)$(MAKE) $(clean)=$(boot)
+ 
+ CLEAN_FILES += include/asm-ia64/.offsets.h.stamp vmlinux.gz bootloader
++#CLEAN_FILES += include/asm-xen/xen-public include/asm-ia64/xen/asm-xsi-offsets.h
++#CLEAN_FILES += include/asm-xen/linux-public include/asm-xen/asm-ia64/hypervisor.h
+ 
+ MRPROPER_FILES += include/asm-ia64/offsets.h
+ 
+@@ -95,11 +106,27 @@ include/asm-ia64/offsets.h: arch/ia64/ke
+ 
+ arch/ia64/kernel/asm-offsets.s: include/asm-ia64/.offsets.h.stamp
+ 
++#XEN_PATH ?= $(srctree)/../xen-ia64-unstable.hg/
+ include/asm-ia64/.offsets.h.stamp:
+ 	mkdir -p include/asm-ia64
+ 	[ -s include/asm-ia64/offsets.h ] \
+ 	 || echo "#define IA64_TASK_SIZE 0" > include/asm-ia64/offsets.h
+ 	touch $@
++	[ -e include/asm-xen/asm ] \
++	 || ln -s asm-ia64 include/asm-xen/asm
++#	[ -e include/asm-xen/xen-public ] \
++#	 || ln -s $(XEN_PATH)/xen/include/public \
++#		include/asm-xen/xen-public
++#	[ -e include/asm-ia64/xen/asm-xsi-offsets.h ] \
++#	 || ln -s $(XEN_PATH)/xen/include/asm-ia64/asm-xsi-offsets.h \
++#		include/asm-ia64/xen/asm-xsi-offsets.h
++#	[ -e include/asm-xen/linux-public ] \
++#	 || ln -s $(XEN_PATH)/linux-2.6-xen-sparse/include/asm-xen/linux-public \
++		include/asm-xen/linux-public
++	[ -e include/asm-xen/asm-ia64/hypervisor.h ] \
++	 || ln -s $(XEN_PATH)/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypervisor.h \
++		include/asm-xen/asm-ia64/hypervisor.h
++
+ 
+ boot:	lib/lib.a vmlinux
+ 	$(Q)$(MAKE) $(build)=$(boot) $@
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/coreMakefile linux-2.6.12-xen/arch/ia64/xen/drivers/coreMakefile
+--- pristine-linux-2.6.12/arch/ia64/xen/drivers/coreMakefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/drivers/coreMakefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,24 @@
++#
++# Makefile for the linux kernel.
++#
++
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++CPPFLAGS_vmlinux.lds += -U$(XENARCH)
++
++$(obj)/vmlinux.lds.S:
++	@ln -fsn $(srctree)/arch/$(XENARCH)/kernel/vmlinux.lds.S $@
++
++
++obj-y   := gnttab.o
++obj-$(CONFIG_PROC_FS) += xen_proc.o
++
++ifeq ($(ARCH),ia64)
++obj-y   += evtchn_ia64.o
++obj-y   += xenia64_init.o
++else
++extra-y += vmlinux.lds
++obj-y   += reboot.o evtchn.o fixup.o 
++obj-$(CONFIG_SMP)     += smp.o		# setup_profiling_timer def'd in ia64
++obj-$(CONFIG_NET)     += skbuff.o	# until networking is up on ia64
++endif
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/evtchn_ia64.c linux-2.6.12-xen/arch/ia64/xen/drivers/evtchn_ia64.c
+--- pristine-linux-2.6.12/arch/ia64/xen/drivers/evtchn_ia64.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/drivers/evtchn_ia64.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,266 @@
++/* NOTE: This file split off from evtchn.c because there was
++   some discussion that the mechanism is sufficiently different.
++   It may be possible to merge it back in the future... djm */
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <asm/hw_irq.h>
++#include <asm-xen/evtchn.h>
++
++#define MAX_EVTCHN 1024
++
++/* Xen will never allocate port zero for any purpose. */
++#define VALID_EVTCHN(_chn) (((_chn) != 0) && ((_chn) < MAX_EVTCHN))
++
++/* Binding types. Hey, only IRQT_VIRQ and IRQT_EVTCHN are supported now
++ * for XEN/IA64 - ktian1
++ */
++enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
++
++/* Constructor for packed IRQ information. */
++#define mk_irq_info(type, index, evtchn)				\
++	(((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
++/* Convenient shorthand for packed representation of an unbound IRQ. */
++#define IRQ_UNBOUND	mk_irq_info(IRQT_UNBOUND, 0, 0)
++/* Accessor macros for packed IRQ information. */
++#define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
++#define index_from_irq(irq)  ((u8)(irq_info[irq] >> 16))
++#define type_from_irq(irq)   ((u8)(irq_info[irq] >> 24))
++
++/* Packed IRQ information: binding type, sub-type index, and event channel. */
++static u32 irq_info[NR_IRQS];
++
++/* One note for XEN/IA64 is that we have all event channels bound to one
++ * physical irq vector. So we always mean evtchn vector identical to 'irq'
++ * vector in this context. - ktian1
++ */
++static struct {
++	irqreturn_t (*handler)(int, void *, struct pt_regs *);
++	void *dev_id;
++	char opened;	/* Whether allocated */
++} evtchns[MAX_EVTCHN];
++
++/*
++ * This lock protects updates to the following mapping and reference-count
++ * arrays. The lock does not need to be acquired to read the mapping tables.
++ */
++static spinlock_t irq_mapping_update_lock;
++
++void mask_evtchn(int port)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++	synch_set_bit(port, &s->evtchn_mask[0]);
++}
++EXPORT_SYMBOL(mask_evtchn);
++
++void unmask_evtchn(int port)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++	unsigned int cpu = smp_processor_id();
++	vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
++
++#if 0	// FIXME: diverged from x86 evtchn.c
++	/* Slow path (hypercall) if this is a non-local port. */
++	if (unlikely(cpu != cpu_from_evtchn(port))) {
++		evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
++				   .u.unmask.port = port };
++		(void)HYPERVISOR_event_channel_op(&op);
++		return;
++	}
++#endif
++
++	synch_clear_bit(port, &s->evtchn_mask[0]);
++
++	/*
++	 * The following is basically the equivalent of 'hw_resend_irq'. Just
++	 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
++	 * masked.
++	 */
++	if (synch_test_bit(port, &s->evtchn_pending[0]) && 
++	    !synch_test_and_set_bit(port / BITS_PER_LONG,
++				    &vcpu_info->evtchn_pending_sel)) {
++		vcpu_info->evtchn_upcall_pending = 1;
++		if (!vcpu_info->evtchn_upcall_mask)
++			force_evtchn_callback();
++	}
++}
++EXPORT_SYMBOL(unmask_evtchn);
++
++
++#define unbound_irq(e) (VALID_EVTCHN(e) && (!evtchns[(e)].opened))
++int bind_virq_to_irqhandler(
++	unsigned int virq,
++	unsigned int cpu,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id)
++{
++    evtchn_op_t op;
++    int evtchn;
++
++    spin_lock(&irq_mapping_update_lock);
++
++    op.cmd = EVTCHNOP_bind_virq;
++    op.u.bind_virq.virq = virq;
++    op.u.bind_virq.vcpu = cpu;
++    BUG_ON(HYPERVISOR_event_channel_op(&op) != 0 );
++    evtchn = op.u.bind_virq.port;
++
++    if (!unbound_irq(evtchn))
++	return -EINVAL;
++
++    evtchns[evtchn].handler = handler;
++    evtchns[evtchn].dev_id = dev_id;
++    evtchns[evtchn].opened = 1;
++    irq_info[evtchn] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++
++    unmask_evtchn(evtchn);
++    spin_unlock(&irq_mapping_update_lock);
++    return evtchn;
++}
++
++int bind_evtchn_to_irqhandler(unsigned int evtchn,
++                   irqreturn_t (*handler)(int, void *, struct pt_regs *),
++                   unsigned long irqflags, const char * devname, void *dev_id)
++{
++    spin_lock(&irq_mapping_update_lock);
++
++    if (!unbound_irq(evtchn))
++	return -EINVAL;
++
++    evtchns[evtchn].handler = handler;
++    evtchns[evtchn].dev_id = dev_id;
++    evtchns[evtchn].opened = 1;
++    irq_info[evtchn] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
++
++    unmask_evtchn(evtchn);
++    spin_unlock(&irq_mapping_update_lock);
++    return evtchn;
++}
++
++int bind_ipi_to_irqhandler(
++	unsigned int ipi,
++	unsigned int cpu,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id)
++{
++    printk("%s is called which has not been supported now...?\n", __FUNCTION__);
++    while(1);
++}
++
++void unbind_from_irqhandler(unsigned int irq, void *dev_id)
++{
++    evtchn_op_t op;
++    int evtchn = evtchn_from_irq(irq);
++
++    spin_lock(&irq_mapping_update_lock);
++
++    if (unbound_irq(irq))
++        return;
++
++    op.cmd = EVTCHNOP_close;
++    op.u.close.port = evtchn;
++    BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
++
++    switch (type_from_irq(irq)) {
++	case IRQT_VIRQ:
++	    /* Add smp stuff later... */
++	    break;
++	case IRQT_IPI:
++	    /* Add smp stuff later... */
++	    break;
++	default:
++	    break;
++    }
++
++    mask_evtchn(evtchn);
++    evtchns[evtchn].handler = NULL;
++    evtchns[evtchn].opened = 0;
++
++    spin_unlock(&irq_mapping_update_lock);
++}
++
++void notify_remote_via_irq(int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (!unbound_irq(evtchn))
++		notify_remote_via_evtchn(evtchn);
++}
++
++irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
++{
++    unsigned long  l1, l2;
++    unsigned int   l1i, l2i, port;
++    irqreturn_t (*handler)(int, void *, struct pt_regs *);
++    shared_info_t *s = HYPERVISOR_shared_info;
++    vcpu_info_t   *vcpu_info = &s->vcpu_info[smp_processor_id()];
++
++    vcpu_info->evtchn_upcall_mask = 1;
++    vcpu_info->evtchn_upcall_pending = 0;
++
++    /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
++    l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
++    while ( l1 != 0 )
++    {
++        l1i = __ffs(l1);
++        l1 &= ~(1UL << l1i);
++
++        while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
++        {
++            l2i = __ffs(l2);
++            l2 &= ~(1UL << l2i);
++
++            port = (l1i * BITS_PER_LONG) + l2i;
++            if ( (handler = evtchns[port].handler) != NULL )
++	    {
++		clear_evtchn(port);
++                handler(port, evtchns[port].dev_id, regs);
++	    }
++            else
++	    {
++                evtchn_device_upcall(port);
++	    }
++        }
++    }
++    vcpu_info->evtchn_upcall_mask = 0;
++    return IRQ_HANDLED;
++}
++
++void force_evtchn_callback(void)
++{
++	//(void)HYPERVISOR_xen_version(0, NULL);
++}
++
++static struct irqaction evtchn_irqaction = {
++	.handler =	evtchn_interrupt,
++	.flags =	SA_INTERRUPT,
++	.name =		"xen-event-channel"
++};
++
++int evtchn_irq = 0xe9;
++void __init evtchn_init(void)
++{
++    shared_info_t *s = HYPERVISOR_shared_info;
++    vcpu_info_t   *vcpu_info = &s->vcpu_info[smp_processor_id()];
++
++#if 0
++    int ret;
++    irq = assign_irq_vector(AUTO_ASSIGN);
++    ret = request_irq(irq, evtchn_interrupt, 0, "xen-event-channel", NULL);
++    if (ret < 0)
++    {
++	printk("xen-event-channel unable to get irq %d (%d)\n", irq, ret);
++	return;
++    }
++#endif
++    register_percpu_irq(evtchn_irq, &evtchn_irqaction);
++
++    vcpu_info->arch.evtchn_vector = evtchn_irq;
++    printk("xen-event-channel using irq %d\n", evtchn_irq);
++
++    spin_lock_init(&irq_mapping_update_lock);
++    memset(evtchns, 0, sizeof(evtchns));
++}
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/Makefile linux-2.6.12-xen/arch/ia64/xen/drivers/Makefile
+--- pristine-linux-2.6.12/arch/ia64/xen/drivers/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/drivers/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,20 @@
++
++obj-y   += util.o
++
++obj-y	+= core/
++obj-y	+= console/
++obj-y	+= evtchn/
++#obj-y	+= balloon/
++obj-y	+= privcmd/
++obj-y	+= blkback/
++#obj-y	+= netback/
++obj-y	+= blkfront/
++obj-y	+= xenbus/
++#obj-y	+= netfront/
++#obj-$(CONFIG_XEN_PRIVILEGED_GUEST)	+= privcmd/
++#obj-$(CONFIG_XEN_BLKDEV_BACKEND)	+= blkback/
++#obj-$(CONFIG_XEN_NETDEV_BACKEND)	+= netback/
++#obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= blkfront/
++#obj-$(CONFIG_XEN_NETDEV_FRONTEND)	+= netfront/
++#obj-$(CONFIG_XEN_BLKDEV_TAP)    	+= blktap/
++
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/blkback.c.patch linux-2.6.12-xen/arch/ia64/xen/drivers/patches/blkback.c.patch
+--- pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/blkback.c.patch	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/drivers/patches/blkback.c.patch	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,57 @@
++diff -Naur xen/blkback/blkback.c xen.patched/blkback/blkback.c
++--- xen/blkback/blkback.c	2005-09-23 10:54:50.000000000 -0600
+++++ xen.patched/blkback/blkback.c	2005-09-23 10:57:51.000000000 -0600
++@@ -30,10 +30,16 @@
++ static unsigned long mmap_vstart;
++ #define MMAP_PAGES						\
++ 	(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
+++#ifdef __ia64__
+++static void *pending_vaddrs[MMAP_PAGES];
+++#define MMAP_VADDR(_idx, _i) \
+++	(unsigned long)(pending_vaddrs[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
+++#else
++ #define MMAP_VADDR(_req,_seg)						\
++ 	(mmap_vstart +							\
++ 	 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +	\
++ 	 ((_seg) * PAGE_SIZE))
+++#endif
++ 
++ /*
++  * Each outstanding request that we've passed to the lower device layers has a 
++@@ -377,9 +383,13 @@
++ 			goto bad_descriptor;
++ 		}
++ 
+++#ifdef __ia64__
+++		MMAP_VADDR(pending_idx,i) = gnttab_map_vaddr(map[i]);
+++#else
++ 		phys_to_machine_mapping[__pa(MMAP_VADDR(
++ 			pending_idx, i)) >> PAGE_SHIFT] =
++ 			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
+++#endif
++ 
++ 		pending_handle(pending_idx, i) = map[i].handle;
++ 	}
++@@ -500,9 +510,22 @@
++ 
++ 	blkif_interface_init();
++ 
+++#ifdef __ia64__
+++    {
+++	extern unsigned long alloc_empty_foreign_map_page_range(unsigned long pages);
+++	int i;
+++
+++	mmap_vstart =  alloc_empty_foreign_map_page_range(MMAP_PAGES);
+++	printk("Allocated mmap_vstart: 0x%lx\n", mmap_vstart);
+++	for(i = 0; i < MMAP_PAGES; i++)
+++	    pending_vaddrs[i] = mmap_vstart + (i << PAGE_SHIFT);
+++	BUG_ON(mmap_vstart == NULL);
+++    }
+++#else
++ 	page = balloon_alloc_empty_page_range(MMAP_PAGES);
++ 	BUG_ON(page == NULL);
++ 	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+++#endif
++ 
++ 	pending_cons = 0;
++ 	pending_prod = MAX_PENDING_REQS;
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/console.c.patch linux-2.6.12-xen/arch/ia64/xen/drivers/patches/console.c.patch
+--- pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/console.c.patch	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/drivers/patches/console.c.patch	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,18 @@
++--- xen/console/console.c	2005-11-02 14:13:07.000000000 +0100
+++++ xen.patched/console/console.c	2005-11-02 14:21:20.000000000 +0100
++@@ -768,9 +771,15 @@
++ #endif
++ 
++ 	if (xen_start_info->flags & SIF_INITDOMAIN) {
+++#ifdef __ia64__
+++		xencons_priv_irq = bind_virq_to_evtchn(VIRQ_CONSOLE);
+++		bind_evtchn_to_irqhandler(xencons_priv_irq,
+++				xencons_priv_interrupt, 0, "console", NULL);
+++#else
++ 		xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
++ 		(void)request_irq(xencons_priv_irq,
++ 				  xencons_priv_interrupt, 0, "console", NULL);
+++#endif
++ 	} else {
++ 		xencons_ring_register_receiver(xencons_rx);
++ 	}
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/devmem.c.patch linux-2.6.12-xen/arch/ia64/xen/drivers/patches/devmem.c.patch
+--- pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/devmem.c.patch	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/drivers/patches/devmem.c.patch	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,3 @@
++diff -Naur xen/core/devmem.c xen.patched/core/devmem.c
++--- xen/core/devmem.c	2005-09-23 10:54:50.000000000 -0600
+++++ xen.patched/core/devmem.c	2005-09-23 10:57:51.000000000 -0600
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/gnttab.c.patch linux-2.6.12-xen/arch/ia64/xen/drivers/patches/gnttab.c.patch
+--- pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/gnttab.c.patch	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/drivers/patches/gnttab.c.patch	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,46 @@
++diff -Naur xen/core/gnttab.c xen.patched/core/gnttab.c
++--- xen/core/gnttab.c	2005-09-23 10:54:50.000000000 -0600
+++++ xen.patched/core/gnttab.c	2005-09-23 10:57:51.000000000 -0600
++@@ -346,6 +350,10 @@
++ 	if ( hypercall.op != __HYPERVISOR_grant_table_op )
++ 		return -ENOSYS;
++ 
+++
+++#ifdef __ia64__
+++	ret = HYPERVISOR_grant_table_op(hypercall.arg[0], (void *)hypercall.arg[1], hypercall.arg[2]);
+++#else
++ 	/* hypercall-invoking asm taken from privcmd.c */
++ 	__asm__ __volatile__ (
++ 		"pushl %%ebx; pushl %%ecx; pushl %%edx; "
++@@ -359,6 +367,7 @@
++ 		TRAP_INSTR "; "
++ 		"popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
++ 		: "=a" (ret) : "0" (&hypercall) : "memory" );
+++#endif
++ 
++ 	return ret;
++ }
++@@ -423,8 +432,13 @@
++ 	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
++ 	BUG_ON(setup.status != 0);
++ 
+++#ifdef __ia64__
+++	shared = __va(frames[0] << PAGE_SHIFT);
+++	printk("grant table at %p\n", shared);
+++#else
++ 	for (i = 0; i < NR_GRANT_FRAMES; i++)
++ 		set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
+++#endif
++ 
++ 	return 0;
++ }
++@@ -450,7 +466,9 @@
++ 
++ 	BUG_ON(gnttab_resume());
++ 
+++#ifndef __ia64__
++ 	shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
+++#endif
++ 
++ 	for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
++ 		gnttab_list[i] = i + 1;
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/privcmd.c.patch linux-2.6.12-xen/arch/ia64/xen/drivers/patches/privcmd.c.patch
+--- pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/privcmd.c.patch	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/drivers/patches/privcmd.c.patch	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,43 @@
++diff -Naur xen/privcmd/privcmd.c xen.patched/privcmd/privcmd.c
++--- xen/privcmd/privcmd.c	2005-09-23 10:54:50.000000000 -0600
+++++ xen.patched/privcmd/privcmd.c	2005-09-23 10:57:51.000000000 -0600
++@@ -180,6 +183,15 @@
++ 		for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
++ 			if (get_user(mfn, p))
++ 				return -EFAULT;
+++#ifdef __ia64__
+++			ret = remap_pfn_range(vma,
+++					      addr&PAGE_MASK,
+++					      mfn,
+++					      1<<PAGE_SHIFT,
+++					      vma->vm_page_prot);
+++			if (ret < 0)
+++			    goto batch_err;
+++#else
++ 
++ 			ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
++ 			if (ret)
++@@ -190,6 +202,7 @@
++ 
++ 			if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
++ 				put_user(0xF0000000 | mfn, p);
+++#endif
++ 		}
++ 
++ 		ret = 0;
++@@ -205,6 +218,7 @@
++ 	break;
++ #endif
++ 
+++#ifndef __ia64__
++ 	case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN: {
++ 		unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
++ 		pgd_t *pgd = pgd_offset_k(m2pv);
++@@ -216,6 +230,7 @@
++ 			-EFAULT: 0;
++ 	}
++ 	break;
+++#endif
++ 
++ 	default:
++ 		ret = -EINVAL;
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/README linux-2.6.12-xen/arch/ia64/xen/drivers/README
+--- pristine-linux-2.6.12/arch/ia64/xen/drivers/README	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/drivers/README	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,2 @@
++This is a temporary location for source/Makefiles that need to be
++patched/reworked in drivers/xen to work with xenlinux/ia64.
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/xenia64_init.c linux-2.6.12-xen/arch/ia64/xen/drivers/xenia64_init.c
+--- pristine-linux-2.6.12/arch/ia64/xen/drivers/xenia64_init.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/drivers/xenia64_init.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,55 @@
++#ifdef __ia64__
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/efi.h>
++#include <asm/sal.h>
++#include <asm/hypervisor.h>
++/* #include <asm-xen/evtchn.h> */
++#include <linux/vmalloc.h>
++
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)0xf100000000000000;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++static int initialized;
++start_info_t *xen_start_info;
++
++int xen_init(void)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++
++	if (initialized)
++		return running_on_xen ? 0 : -1;
++
++	if (!running_on_xen)
++		return -1;
++
++	xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
++	xen_start_info->flags = s->arch.flags;
++	printk("Running on Xen! start_info_pfn=0x%lx nr_pages=%d flags=0x%x\n",
++		s->arch.start_info_pfn, xen_start_info->nr_pages,
++		xen_start_info->flags);
++
++	evtchn_init();
++	initialized = 1;
++	return 0;
++}
++
++/* We just need a range of legal va here, though finally identity
++ * mapped one is instead used for gnttab mapping.
++ */
++unsigned long alloc_empty_foreign_map_page_range(unsigned long pages)
++{
++	struct vm_struct *vma;
++
++	if ( (vma = get_vm_area(PAGE_SIZE * pages, VM_ALLOC)) == NULL )
++		return NULL;
++
++	return (unsigned long)vma->addr;
++}
++
++#if 0
++/* These should be define'd but some drivers use them without
++ * a convenient arch include */
++unsigned long mfn_to_pfn(unsigned long mfn) { return mfn; }
++#endif
++#endif
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/hypercall.S linux-2.6.12-xen/arch/ia64/xen/hypercall.S
+--- pristine-linux-2.6.12/arch/ia64/xen/hypercall.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/hypercall.S	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,323 @@
++/*
++ * Support routines for Xen hypercalls
++ *
++ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer at hp.com>
++ */
++
++#include <linux/config.h>
++#include <asm/processor.h>
++#include <asm/asmmacro.h>
++
++GLOBAL_ENTRY(xen_get_ivr)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov r8=cr.ivr;;
++(p7)	br.ret.sptk.many rp
++	;;
++	movl r9=XSI_PSR_IC
++	;;
++	ld8 r10=[r9]
++	;;
++	st8 [r9]=r0
++	;;
++	XEN_HYPER_GET_IVR
++	;;
++	st8 [r9]=r10
++	br.ret.sptk.many rp
++	;;
++END(xen_get_ivr)
++
++GLOBAL_ENTRY(xen_get_tpr)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov r8=cr.tpr;;
++(p7)	br.ret.sptk.many rp
++	;;
++	movl r9=XSI_PSR_IC
++	;;
++	ld8 r10=[r9]
++	;;
++	st8 [r9]=r0
++	;;
++	XEN_HYPER_GET_TPR
++	;;
++	st8 [r9]=r10
++	br.ret.sptk.many rp
++	;;
++END(xen_get_tpr)
++
++GLOBAL_ENTRY(xen_set_tpr)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov cr.tpr=r32;;
++(p7)	br.ret.sptk.many rp
++	;;
++	movl r9=XSI_PSR_IC
++	mov r8=r32
++	;;
++	ld8 r10=[r9]
++	;;
++	st8 [r9]=r0
++	;;
++	XEN_HYPER_SET_TPR
++	;;
++	st8 [r9]=r10
++	br.ret.sptk.many rp
++	;;
++END(xen_set_tpr)
++
++GLOBAL_ENTRY(xen_eoi)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov cr.eoi=r0;;
++(p7)	br.ret.sptk.many rp
++	;;
++	movl r9=XSI_PSR_IC
++	mov r8=r32
++	;;
++	ld8 r10=[r9]
++	;;
++	st8 [r9]=r0
++	;;
++	XEN_HYPER_EOI
++	;;
++	st8 [r9]=r10
++	br.ret.sptk.many rp
++	;;
++END(xen_eoi)
++
++GLOBAL_ENTRY(xen_thash)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	thash r8=r32;;
++(p7)	br.ret.sptk.many rp
++	;;
++	movl r9=XSI_PSR_IC
++	mov r8=r32
++	;;
++	ld8 r10=[r9]
++	;;
++	st8 [r9]=r0
++	;;
++	XEN_HYPER_THASH
++	;;
++	st8 [r9]=r10
++	;;
++	br.ret.sptk.many rp
++	;;
++END(xen_thash)
++
++GLOBAL_ENTRY(xen_set_itm)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov cr.itm=r32;;
++(p7)	br.ret.sptk.many rp
++	;;
++	movl r9=XSI_PSR_IC
++	mov r8=r32
++	;;
++	ld8 r10=[r9]
++	;;
++	st8 [r9]=r0
++	;;
++	XEN_HYPER_SET_ITM
++	;;
++	st8 [r9]=r10
++	;;
++	br.ret.sptk.many rp
++	;;
++END(xen_set_itm)
++
++GLOBAL_ENTRY(xen_ptcga)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	ptc.ga r32,r33;;
++(p7)	br.ret.sptk.many rp
++	;;
++	movl r11=XSI_PSR_IC
++	mov r8=r32
++	mov r9=r33
++	;;
++	ld8 r10=[r11]
++	;;
++	st8 [r11]=r0
++	;;
++	XEN_HYPER_PTC_GA
++	;;
++	st8 [r11]=r10
++	;;
++	br.ret.sptk.many rp
++	;;
++END(xen_ptcga)
++
++GLOBAL_ENTRY(xen_get_rr)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov r8=rr[r32];;
++(p7)	br.ret.sptk.many rp
++	;;
++	movl r9=XSI_PSR_IC
++	mov r8=r32
++	;;
++	ld8 r10=[r9]
++	;;
++	st8 [r9]=r0
++	;;
++	XEN_HYPER_GET_RR
++	;;
++	st8 [r9]=r10
++	;;
++	br.ret.sptk.many rp
++	;;
++END(xen_get_rr)
++
++GLOBAL_ENTRY(xen_set_rr)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov rr[r32]=r33;;
++(p7)	br.ret.sptk.many rp
++	;;
++	movl r11=XSI_PSR_IC
++	mov r8=r32
++	mov r9=r33
++	;;
++	ld8 r10=[r11]
++	;;
++	st8 [r11]=r0
++	;;
++	XEN_HYPER_SET_RR
++	;;
++	st8 [r11]=r10
++	;;
++	br.ret.sptk.many rp
++	;;
++END(xen_set_rr)
++
++GLOBAL_ENTRY(xen_set_kr)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.ne p7,p0=r8,r0;;
++(p7)	br.cond.spnt.few 1f;
++	;;
++	cmp.eq p7,p0=r8,r0
++	adds r8=-1,r8;;
++(p7)	mov ar0=r9
++(p7)	br.ret.sptk.many rp;;
++	cmp.eq p7,p0=r8,r0
++	adds r8=-1,r8;;
++(p7)	mov ar1=r9
++(p7)	br.ret.sptk.many rp;;
++	cmp.eq p7,p0=r8,r0
++	adds r8=-1,r8;;
++(p7)	mov ar2=r9
++(p7)	br.ret.sptk.many rp;;
++	cmp.eq p7,p0=r8,r0
++	adds r8=-1,r8;;
++(p7)	mov ar3=r9
++(p7)	br.ret.sptk.many rp;;
++	cmp.eq p7,p0=r8,r0
++	adds r8=-1,r8;;
++(p7)	mov ar4=r9
++(p7)	br.ret.sptk.many rp;;
++	cmp.eq p7,p0=r8,r0
++	adds r8=-1,r8;;
++(p7)	mov ar5=r9
++(p7)	br.ret.sptk.many rp;;
++	cmp.eq p7,p0=r8,r0
++	adds r8=-1,r8;;
++(p7)	mov ar6=r9
++(p7)	br.ret.sptk.many rp;;
++	cmp.eq p7,p0=r8,r0
++	adds r8=-1,r8;;
++(p7)	mov ar7=r9
++(p7)	br.ret.sptk.many rp;;
++
++1:	movl r11=XSI_PSR_IC
++	mov r8=r32
++	mov r9=r33
++	;;
++	ld8 r10=[r11]
++	;;
++	st8 [r11]=r0
++	;;
++	XEN_HYPER_SET_KR
++	;;
++	st8 [r11]=r10
++	;;
++	br.ret.sptk.many rp
++	;;
++END(xen_set_rr)
++
++GLOBAL_ENTRY(xen_fc)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	fc r32;;
++(p7)	br.ret.sptk.many rp
++	;;
++	ptc.e r96		// this is a "privified" fc r32
++	;;
++	br.ret.sptk.many rp
++END(xen_fc)
++
++GLOBAL_ENTRY(xen_get_cpuid)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov r8=cpuid[r32];;
++(p7)	br.ret.sptk.many rp
++	;;
++	mov r72=rr[r32]		// this is a "privified" mov r8=cpuid[r32]
++	;;
++	br.ret.sptk.many rp
++END(xen_get_cpuid)
++
++GLOBAL_ENTRY(xen_get_pmd)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov r8=pmd[r32];;
++(p7)	br.ret.sptk.many rp
++	;;
++	mov r72=pmc[r32] 	// this is a "privified" mov r8=pmd[r32]
++	;;
++	br.ret.sptk.many rp
++END(xen_get_pmd)
++
++#ifdef CONFIG_IA32_SUPPORT
++GLOBAL_ENTRY(xen_get_eflag)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov r8=ar24;;
++(p7)	br.ret.sptk.many rp
++	;;
++	mov ar24=r72		// this is a "privified" mov r8=ar.eflg
++	;;
++	br.ret.sptk.many rp
++END(xen_get_eflag)
++// some bits aren't set if pl!=0, see SDM vol1 3.1.8
++GLOBAL_ENTRY(xen_set_eflag)
++	movl r8=running_on_xen;;
++	ld4 r8=[r8];;
++	cmp.eq p7,p0=r8,r0;;
++(p7)	mov ar24=r32
++(p7)	br.ret.sptk.many rp
++	;;
++	// FIXME: this remains no-op'd because it generates
++	// a privileged register (general exception) trap rather than
++	// a privileged operation fault
++	//mov ar24=r32
++	;;
++	br.ret.sptk.many rp
++END(xen_get_eflag)
++#endif
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/Makefile linux-2.6.12-xen/arch/ia64/xen/Makefile
+--- pristine-linux-2.6.12/arch/ia64/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,5 @@
++#
++# Makefile for Xen components
++#
++
++obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o xenhpski.o xenconsole.o xen_ksyms.o
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenconsole.c linux-2.6.12-xen/arch/ia64/xen/xenconsole.c
+--- pristine-linux-2.6.12/arch/ia64/xen/xenconsole.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/xenconsole.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,19 @@
++#include <linux/config.h>
++#include <linux/console.h>
++
++int
++early_xen_console_setup (char *cmdline)
++{
++#ifdef CONFIG_XEN
++#ifndef CONFIG_IA64_HP_SIM
++	extern int running_on_xen;
++	if (running_on_xen) {
++		extern struct console hpsim_cons;
++		hpsim_cons.flags |= CON_BOOT;
++		register_console(&hpsim_cons);
++		return 0;
++	}
++#endif
++#endif
++	return -1;
++}
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenentry.S linux-2.6.12-xen/arch/ia64/xen/xenentry.S
+--- pristine-linux-2.6.12/arch/ia64/xen/xenentry.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/xenentry.S	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,850 @@
++/*
++ * ia64/xen/entry.S
++ *
++ * Alternate kernel routines for Xen.  Heavily leveraged from
++ *   ia64/kernel/entry.S
++ *
++ * Copyright (C) 2005 Hewlett-Packard Co
++ *	Dan Magenheimer <dan.magenheimer at .hp.com>
++ */
++
++#include <linux/config.h>
++
++#include <asm/asmmacro.h>
++#include <asm/cache.h>
++#include <asm/errno.h>
++#include <asm/kregs.h>
++#include <asm/offsets.h>
++#include <asm/pgtable.h>
++#include <asm/percpu.h>
++#include <asm/processor.h>
++#include <asm/thread_info.h>
++#include <asm/unistd.h>
++
++#ifdef CONFIG_XEN
++#include "xenminstate.h"
++#else
++#include "minstate.h"
++#endif
++
++/*
++ * prev_task <- ia64_switch_to(struct task_struct *next)
++ *	With Ingo's new scheduler, interrupts are disabled when this routine gets
++ *	called.  The code starting at .map relies on this.  The rest of the code
++ *	doesn't care about the interrupt masking status.
++ */
++#ifdef CONFIG_XEN
++GLOBAL_ENTRY(xen_switch_to)
++	.prologue
++	alloc r16=ar.pfs,1,0,0,0
++	movl r22=running_on_xen;;
++	ld4 r22=[r22];;
++	cmp.eq p7,p0=r22,r0
++(p7)	br.cond.sptk.many __ia64_switch_to;;
++#else
++GLOBAL_ENTRY(ia64_switch_to)
++	.prologue
++	alloc r16=ar.pfs,1,0,0,0
++#endif
++	DO_SAVE_SWITCH_STACK
++	.body
++
++	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
++	movl r25=init_task
++	mov r27=IA64_KR(CURRENT_STACK)
++	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
++	dep r20=0,in0,61,3		// physical address of "next"
++	;;
++	st8 [r22]=sp			// save kernel stack pointer of old task
++	shr.u r26=r20,IA64_GRANULE_SHIFT
++	cmp.eq p7,p6=r25,in0
++	;;
++#ifdef CONFIG_XEN
++	movl r8=XSI_PSR_IC
++	;;
++	st4 [r8]=r0	// force psr.ic off for hyperprivop(s)
++	;;
++#endif
++	/*
++	 * If we've already mapped this task's page, we can skip doing it again.
++	 */
++(p6)	cmp.eq p7,p6=r26,r27
++(p6)	br.cond.dpnt .map
++	;;
++.done:
++#ifdef CONFIG_XEN
++	// psr.ic already off
++	// update "current" application register
++	mov r8=IA64_KR_CURRENT
++	mov r9=in0;;
++	XEN_HYPER_SET_KR
++	ld8 sp=[r21]			// load kernel stack pointer of new task
++	movl r27=XSI_PSR_IC
++	mov r8=1
++	;;
++	st4 [r27]=r8			// psr.ic back on
++	;;
++#else
++(p6)	ssm psr.ic			// if we had to map, reenable the psr.ic bit FIRST!!!
++	;;
++(p6)	srlz.d
++	ld8 sp=[r21]			// load kernel stack pointer of new task
++	mov IA64_KR(CURRENT)=in0	// update "current" application register
++#endif
++	mov r8=r13			// return pointer to previously running task
++	mov r13=in0			// set "current" pointer
++	;;
++	DO_LOAD_SWITCH_STACK
++
++#ifdef CONFIG_SMP
++	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs
++#endif
++	br.ret.sptk.many rp		// boogie on out in new context
++
++.map:
++#ifdef CONFIG_XEN
++	// psr.ic already off
++#else
++	rsm psr.ic			// interrupts (psr.i) are already disabled here
++#endif
++	movl r25=PAGE_KERNEL
++	;;
++	srlz.d
++	or r23=r25,r20			// construct PA | page properties
++	mov r25=IA64_GRANULE_SHIFT<<2
++	;;
++#ifdef CONFIG_XEN
++	movl r8=XSI_ITIR
++	;;
++	st8 [r8]=r25
++	;;
++	movl r8=XSI_IFA
++	;;
++	st8 [r8]=in0			 // VA of next task...
++	;;
++	mov r25=IA64_TR_CURRENT_STACK
++	// remember last page we mapped...
++	mov r8=IA64_KR_CURRENT_STACK
++	mov r9=r26;;
++	XEN_HYPER_SET_KR;;
++#else
++	mov cr.itir=r25
++	mov cr.ifa=in0			// VA of next task...
++	;;
++	mov r25=IA64_TR_CURRENT_STACK
++	mov IA64_KR(CURRENT_STACK)=r26	// remember last page we mapped...
++#endif
++	;;
++	itr.d dtr[r25]=r23		// wire in new mapping...
++	br.cond.sptk .done
++#ifdef CONFIG_XEN
++END(xen_switch_to)
++#else
++END(ia64_switch_to)
++#endif
++
++	/*
++	 * Invoke a system call, but do some tracing before and after the call.
++	 * We MUST preserve the current register frame throughout this routine
++	 * because some system calls (such as ia64_execve) directly
++	 * manipulate ar.pfs.
++	 */
++#ifdef CONFIG_XEN
++GLOBAL_ENTRY(xen_trace_syscall)
++	PT_REGS_UNWIND_INFO(0)
++	movl r16=running_on_xen;;
++	ld4 r16=[r16];;
++	cmp.eq p7,p0=r16,r0
++(p7)	br.cond.sptk.many __ia64_trace_syscall;;
++#else
++GLOBAL_ENTRY(ia64_trace_syscall)
++	PT_REGS_UNWIND_INFO(0)
++#endif
++	/*
++	 * We need to preserve the scratch registers f6-f11 in case the system
++	 * call is sigreturn.
++	 */
++	adds r16=PT(F6)+16,sp
++	adds r17=PT(F7)+16,sp
++	;;
++ 	stf.spill [r16]=f6,32
++ 	stf.spill [r17]=f7,32
++	;;
++ 	stf.spill [r16]=f8,32
++ 	stf.spill [r17]=f9,32
++	;;
++ 	stf.spill [r16]=f10
++ 	stf.spill [r17]=f11
++	br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
++	adds r16=PT(F6)+16,sp
++	adds r17=PT(F7)+16,sp
++	;;
++	ldf.fill f6=[r16],32
++	ldf.fill f7=[r17],32
++	;;
++	ldf.fill f8=[r16],32
++	ldf.fill f9=[r17],32
++	;;
++	ldf.fill f10=[r16]
++	ldf.fill f11=[r17]
++	// the syscall number may have changed, so re-load it and re-calculate the
++	// syscall entry-point:
++	adds r15=PT(R15)+16,sp			// r15 = &pt_regs.r15 (syscall #)
++	;;
++	ld8 r15=[r15]
++	mov r3=NR_syscalls - 1
++	;;
++	adds r15=-1024,r15
++	movl r16=sys_call_table
++	;;
++	shladd r20=r15,3,r16			// r20 = sys_call_table + 8*(syscall-1024)
++	cmp.leu p6,p7=r15,r3
++	;;
++(p6)	ld8 r20=[r20]				// load address of syscall entry point
++(p7)	movl r20=sys_ni_syscall
++	;;
++	mov b6=r20
++	br.call.sptk.many rp=b6			// do the syscall
++.strace_check_retval:
++	cmp.lt p6,p0=r8,r0			// syscall failed?
++	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
++	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10
++	mov r10=0
++(p6)	br.cond.sptk strace_error		// syscall failed ->
++	;;					// avoid RAW on r10
++.strace_save_retval:
++.mem.offset 0,0; st8.spill [r2]=r8		// store return value in slot for r8
++.mem.offset 8,0; st8.spill [r3]=r10		// clear error indication in slot for r10
++	br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
++.ret3:	br.cond.sptk .work_pending_syscall_end
++
++strace_error:
++	ld8 r3=[r2]				// load pt_regs.r8
++	sub r9=0,r8				// negate return value to get errno value
++	;;
++	cmp.ne p6,p0=r3,r0			// is pt_regs.r8!=0?
++	adds r3=16,r2				// r3=&pt_regs.r10
++	;;
++(p6)	mov r10=-1
++(p6)	mov r8=r9
++	br.cond.sptk .strace_save_retval
++#ifdef CONFIG_XEN
++END(xen_trace_syscall)
++#else
++END(ia64_trace_syscall)
++#endif
++
++/*
++ * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
++ *	need to switch to bank 0 and doesn't restore the scratch registers.
++ *	To avoid leaking kernel bits, the scratch registers are set to
++ *	the following known-to-be-safe values:
++ *
++ *		  r1: restored (global pointer)
++ *		  r2: cleared
++ *		  r3: 1 (when returning to user-level)
++ *	      r8-r11: restored (syscall return value(s))
++ *		 r12: restored (user-level stack pointer)
++ *		 r13: restored (user-level thread pointer)
++ *		 r14: cleared
++ *		 r15: restored (syscall #)
++ *	     r16-r17: cleared
++ *		 r18: user-level b6
++ *		 r19: cleared
++ *		 r20: user-level ar.fpsr
++ *		 r21: user-level b0
++ *		 r22: cleared
++ *		 r23: user-level ar.bspstore
++ *		 r24: user-level ar.rnat
++ *		 r25: user-level ar.unat
++ *		 r26: user-level ar.pfs
++ *		 r27: user-level ar.rsc
++ *		 r28: user-level ip
++ *		 r29: user-level psr
++ *		 r30: user-level cfm
++ *		 r31: user-level pr
++ *	      f6-f11: cleared
++ *		  pr: restored (user-level pr)
++ *		  b0: restored (user-level rp)
++ *	          b6: restored
++ *		  b7: cleared
++ *	     ar.unat: restored (user-level ar.unat)
++ *	      ar.pfs: restored (user-level ar.pfs)
++ *	      ar.rsc: restored (user-level ar.rsc)
++ *	     ar.rnat: restored (user-level ar.rnat)
++ *	 ar.bspstore: restored (user-level ar.bspstore)
++ *	     ar.fpsr: restored (user-level ar.fpsr)
++ *	      ar.ccv: cleared
++ *	      ar.csd: cleared
++ *	      ar.ssd: cleared
++ */
++#ifdef CONFIG_XEN
++GLOBAL_ENTRY(xen_leave_syscall)
++	PT_REGS_UNWIND_INFO(0)
++	movl r22=running_on_xen;;
++	ld4 r22=[r22];;
++	cmp.eq p7,p0=r22,r0
++(p7)	br.cond.sptk.many __ia64_leave_syscall;;
++#else
++ENTRY(ia64_leave_syscall)
++	PT_REGS_UNWIND_INFO(0)
++#endif
++	/*
++	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
++	 * user- or fsys-mode, hence we disable interrupts early on.
++	 *
++	 * p6 controls whether current_thread_info()->flags needs to be check for
++	 * extra work.  We always check for extra work when returning to user-level.
++	 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
++	 * is 0.  After extra work processing has been completed, execution
++	 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
++	 * needs to be redone.
++	 */
++#ifdef CONFIG_PREEMPT
++	rsm psr.i				// disable interrupts
++	cmp.eq pLvSys,p0=r0,r0			// pLvSys=1: leave from syscall
++(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
++	;;
++	.pred.rel.mutex pUStk,pKStk
++(pKStk) ld4 r21=[r20]			// r21 <- preempt_count
++(pUStk)	mov r21=0			// r21 <- 0
++	;;
++	cmp.eq p6,p0=r21,r0		// p6 <- pUStk || (preempt_count == 0)
++#else /* !CONFIG_PREEMPT */
++#ifdef CONFIG_XEN
++	movl r2=XSI_PSR_I
++	;;
++(pUStk)	st4 [r2]=r0
++#else
++(pUStk)	rsm psr.i
++#endif
++	cmp.eq pLvSys,p0=r0,r0		// pLvSys=1: leave from syscall
++(pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
++#endif
++.work_processed_syscall:
++	adds r2=PT(LOADRS)+16,r12
++	adds r3=PT(AR_BSPSTORE)+16,r12
++	adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
++	;;
++(p6)	ld4 r31=[r18]				// load current_thread_info()->flags
++	ld8 r19=[r2],PT(B6)-PT(LOADRS)		// load ar.rsc value for "loadrs"
++	mov b7=r0		// clear b7
++	;;
++	ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)	// load ar.bspstore (may be garbage)
++	ld8 r18=[r2],PT(R9)-PT(B6)		// load b6
++(p6)	and r15=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
++	;;
++	mov r16=ar.bsp				// M2  get existing backing store pointer
++(p6)	cmp4.ne.unc p6,p0=r15, r0		// any special work pending?
++(p6)	br.cond.spnt .work_pending_syscall
++	;;
++	// start restoring the state saved on the kernel stack (struct pt_regs):
++	ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
++	ld8 r11=[r3],PT(CR_IIP)-PT(R11)
++	mov f6=f0		// clear f6
++	;;
++	invala			// M0|1 invalidate ALAT
++#ifdef CONFIG_XEN
++	movl r29=XSI_PSR_IC
++	;;
++	st8	[r29]=r0	// note: clears both vpsr.i and vpsr.ic!
++	;;
++#else
++	rsm psr.i | psr.ic	// M2 initiate turning off of interrupt and interruption collection
++#endif
++	mov f9=f0		// clear f9
++
++	ld8 r29=[r2],16		// load cr.ipsr
++	ld8 r28=[r3],16			// load cr.iip
++	mov f8=f0		// clear f8
++	;;
++	ld8 r30=[r2],16		// M0|1 load cr.ifs
++	mov.m ar.ssd=r0		// M2 clear ar.ssd
++	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
++	;;
++	ld8 r25=[r3],16		// M0|1 load ar.unat
++	mov.m ar.csd=r0		// M2 clear ar.csd
++	mov r22=r0		// clear r22
++	;;
++	ld8 r26=[r2],PT(B0)-PT(AR_PFS)	// M0|1 load ar.pfs
++(pKStk)	mov r22=psr		// M2 read PSR now that interrupts are disabled
++	mov f10=f0		// clear f10
++	;;
++	ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
++	ld8 r27=[r3],PT(PR)-PT(AR_RSC)	// load ar.rsc
++	mov f11=f0		// clear f11
++	;;
++	ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)	// load ar.rnat (may be garbage)
++	ld8 r31=[r3],PT(R1)-PT(PR)		// load predicates
++(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
++	;;
++	ld8 r20=[r2],PT(R12)-PT(AR_FPSR)	// load ar.fpsr
++	ld8.fill r1=[r3],16	// load r1
++(pUStk) mov r17=1
++	;;
++	srlz.d			// M0  ensure interruption collection is off
++	ld8.fill r13=[r3],16
++	mov f7=f0		// clear f7
++	;;
++	ld8.fill r12=[r2]	// restore r12 (sp)
++	ld8.fill r15=[r3]	// restore r15
++	addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
++	;;
++(pUStk)	ld4 r3=[r3]		// r3 = cpu_data->phys_stacked_size_p8
++(pUStk) st1 [r14]=r17
++	mov b6=r18		// I0  restore b6
++	;;
++	mov r14=r0		// clear r14
++	shr.u r18=r19,16	// I0|1 get byte size of existing "dirty" partition
++(pKStk) br.cond.dpnt.many skip_rbs_switch
++
++	mov.m ar.ccv=r0		// clear ar.ccv
++(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
++	br.cond.sptk.many rbs_switch
++#ifdef CONFIG_XEN
++END(xen_leave_syscall)
++#else
++END(ia64_leave_syscall)
++#endif
++
++#ifdef CONFIG_XEN
++GLOBAL_ENTRY(xen_leave_kernel)
++	PT_REGS_UNWIND_INFO(0)
++	movl r22=running_on_xen;;
++	ld4 r22=[r22];;
++	cmp.eq p7,p0=r22,r0
++(p7)	br.cond.sptk.many __ia64_leave_kernel;;
++#else
++GLOBAL_ENTRY(ia64_leave_kernel)
++	PT_REGS_UNWIND_INFO(0)
++#endif
++	/*
++	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
++	 * user- or fsys-mode, hence we disable interrupts early on.
++	 *
++	 * p6 controls whether current_thread_info()->flags needs to be check for
++	 * extra work.  We always check for extra work when returning to user-level.
++	 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
++	 * is 0.  After extra work processing has been completed, execution
++	 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
++	 * needs to be redone.
++	 */
++#ifdef CONFIG_PREEMPT
++	rsm psr.i				// disable interrupts
++	cmp.eq p0,pLvSys=r0,r0			// pLvSys=0: leave from kernel
++(pKStk)	adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
++	;;
++	.pred.rel.mutex pUStk,pKStk
++(pKStk)	ld4 r21=[r20]			// r21 <- preempt_count
++(pUStk)	mov r21=0			// r21 <- 0
++	;;
++	cmp.eq p6,p0=r21,r0		// p6 <- pUStk || (preempt_count == 0)
++#else
++#ifdef CONFIG_XEN
++(pUStk)	movl r17=XSI_PSR_I
++	;;
++(pUStk)	st4 [r17]=r0
++	;;
++#else
++(pUStk)	rsm psr.i
++#endif
++	cmp.eq p0,pLvSys=r0,r0		// pLvSys=0: leave from kernel
++(pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
++#endif
++.work_processed_kernel:
++	adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
++	;;
++(p6)	ld4 r31=[r17]				// load current_thread_info()->flags
++	adds r21=PT(PR)+16,r12
++	;;
++
++	lfetch [r21],PT(CR_IPSR)-PT(PR)
++	adds r2=PT(B6)+16,r12
++	adds r3=PT(R16)+16,r12
++	;;
++	lfetch [r21]
++	ld8 r28=[r2],8		// load b6
++	adds r29=PT(R24)+16,r12
++
++	ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
++	adds r30=PT(AR_CCV)+16,r12
++(p6)	and r19=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
++	;;
++	ld8.fill r24=[r29]
++	ld8 r15=[r30]		// load ar.ccv
++(p6)	cmp4.ne.unc p6,p0=r19, r0		// any special work pending?
++	;;
++	ld8 r29=[r2],16		// load b7
++	ld8 r30=[r3],16		// load ar.csd
++(p6)	br.cond.spnt .work_pending
++	;;
++	ld8 r31=[r2],16		// load ar.ssd
++	ld8.fill r8=[r3],16
++	;;
++	ld8.fill r9=[r2],16
++	ld8.fill r10=[r3],PT(R17)-PT(R10)
++	;;
++	ld8.fill r11=[r2],PT(R18)-PT(R11)
++	ld8.fill r17=[r3],16
++	;;
++	ld8.fill r18=[r2],16
++	ld8.fill r19=[r3],16
++	;;
++	ld8.fill r20=[r2],16
++	ld8.fill r21=[r3],16
++	mov ar.csd=r30
++	mov ar.ssd=r31
++	;;
++#ifdef CONFIG_XEN
++	movl r22=XSI_PSR_IC
++	;;
++	st8 [r22]=r0		// note: clears both vpsr.i and vpsr.ic!
++	;;
++#else
++	rsm psr.i | psr.ic	// initiate turning off of interrupt and interruption collection
++#endif
++	invala			// invalidate ALAT
++	;;
++	ld8.fill r22=[r2],24
++	ld8.fill r23=[r3],24
++	mov b6=r28
++	;;
++	ld8.fill r25=[r2],16
++	ld8.fill r26=[r3],16
++	mov b7=r29
++	;;
++	ld8.fill r27=[r2],16
++	ld8.fill r28=[r3],16
++	;;
++	ld8.fill r29=[r2],16
++	ld8.fill r30=[r3],24
++	;;
++	ld8.fill r31=[r2],PT(F9)-PT(R31)
++	adds r3=PT(F10)-PT(F6),r3
++	;;
++	ldf.fill f9=[r2],PT(F6)-PT(F9)
++	ldf.fill f10=[r3],PT(F8)-PT(F10)
++	;;
++	ldf.fill f6=[r2],PT(F7)-PT(F6)
++	;;
++	ldf.fill f7=[r2],PT(F11)-PT(F7)
++	ldf.fill f8=[r3],32
++	;;
++	srlz.i			// ensure interruption collection is off
++	mov ar.ccv=r15
++	;;
++	ldf.fill f11=[r2]
++#ifdef CONFIG_XEN
++	;;
++	// r16-r31 all now hold bank1 values
++	movl r2=XSI_BANK1_R16
++	movl r3=XSI_BANK1_R16+8
++	;;
++	st8.spill [r2]=r16,16
++	st8.spill [r3]=r17,16
++	;;
++	st8.spill [r2]=r18,16
++	st8.spill [r3]=r19,16
++	;;
++	st8.spill [r2]=r20,16
++	st8.spill [r3]=r21,16
++	;;
++	st8.spill [r2]=r22,16
++	st8.spill [r3]=r23,16
++	;;
++	st8.spill [r2]=r24,16
++	st8.spill [r3]=r25,16
++	;;
++	st8.spill [r2]=r26,16
++	st8.spill [r3]=r27,16
++	;;
++	st8.spill [r2]=r28,16
++	st8.spill [r3]=r29,16
++	;;
++	st8.spill [r2]=r30,16
++	st8.spill [r3]=r31,16
++	;;
++	movl r2=XSI_BANKNUM;;
++	st4 [r2]=r0;
++#else
++	bsw.0			// switch back to bank 0 (no stop bit required beforehand...)
++#endif
++	;;
++(pUStk)	mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
++	adds r16=PT(CR_IPSR)+16,r12
++	adds r17=PT(CR_IIP)+16,r12
++
++(pKStk)	mov r22=psr		// M2 read PSR now that interrupts are disabled
++	nop.i 0
++	nop.i 0
++	;;
++	ld8 r29=[r16],16	// load cr.ipsr
++	ld8 r28=[r17],16	// load cr.iip
++	;;
++	ld8 r30=[r16],16	// load cr.ifs
++	ld8 r25=[r17],16	// load ar.unat
++	;;
++	ld8 r26=[r16],16	// load ar.pfs
++	ld8 r27=[r17],16	// load ar.rsc
++	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
++	;;
++	ld8 r24=[r16],16	// load ar.rnat (may be garbage)
++	ld8 r23=[r17],16	// load ar.bspstore (may be garbage)
++	;;
++	ld8 r31=[r16],16	// load predicates
++	ld8 r21=[r17],16	// load b0
++	;;
++	ld8 r19=[r16],16	// load ar.rsc value for "loadrs"
++	ld8.fill r1=[r17],16	// load r1
++	;;
++	ld8.fill r12=[r16],16
++	ld8.fill r13=[r17],16
++(pUStk)	adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
++	;;
++	ld8 r20=[r16],16	// ar.fpsr
++	ld8.fill r15=[r17],16
++	;;
++	ld8.fill r14=[r16],16
++	ld8.fill r2=[r17]
++(pUStk)	mov r17=1
++	;;
++	ld8.fill r3=[r16]
++(pUStk)	st1 [r18]=r17		// restore current->thread.on_ustack
++	shr.u r18=r19,16	// get byte size of existing "dirty" partition
++	;;
++	mov r16=ar.bsp		// get existing backing store pointer
++	addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
++	;;
++	ld4 r17=[r17]		// r17 = cpu_data->phys_stacked_size_p8
++(pKStk)	br.cond.dpnt skip_rbs_switch
++
++	/*
++	 * Restore user backing store.
++	 *
++	 * NOTE: alloc, loadrs, and cover can't be predicated.
++	 */
++(pNonSys) br.cond.dpnt dont_preserve_current_frame
++
++rbs_switch:
++#ifdef CONFIG_XEN
++	XEN_HYPER_COVER;
++#else
++	cover				// add current frame into dirty partition and set cr.ifs
++#endif
++	;;
++	mov r19=ar.bsp			// get new backing store pointer
++	sub r16=r16,r18			// krbs = old bsp - size of dirty partition
++	cmp.ne p9,p0=r0,r0		// clear p9 to skip restore of cr.ifs
++	;;
++	sub r19=r19,r16			// calculate total byte size of dirty partition
++	add r18=64,r18			// don't force in0-in7 into memory...
++	;;
++	shl r19=r19,16			// shift size of dirty partition into loadrs position
++	;;
++dont_preserve_current_frame:
++	/*
++	 * To prevent leaking bits between the kernel and user-space,
++	 * we must clear the stacked registers in the "invalid" partition here.
++	 * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
++	 * 5 registers/cycle on McKinley).
++	 */
++#	define pRecurse	p6
++#	define pReturn	p7
++#ifdef CONFIG_ITANIUM
++#	define Nregs	10
++#else
++#	define Nregs	14
++#endif
++	alloc loc0=ar.pfs,2,Nregs-2,2,0
++	shr.u loc1=r18,9		// RNaTslots <= floor(dirtySize / (64*8))
++	sub r17=r17,r18			// r17 = (physStackedSize + 8) - dirtySize
++	;;
++	mov ar.rsc=r19			// load ar.rsc to be used for "loadrs"
++	shladd in0=loc1,3,r17
++	mov in1=0
++	;;
++	TEXT_ALIGN(32)
++rse_clear_invalid:
++#ifdef CONFIG_ITANIUM
++	// cycle 0
++ { .mii
++	alloc loc0=ar.pfs,2,Nregs-2,2,0
++	cmp.lt pRecurse,p0=Nregs*8,in0	// if more than Nregs regs left to clear, (re)curse
++	add out0=-Nregs*8,in0
++}{ .mfb
++	add out1=1,in1			// increment recursion count
++	nop.f 0
++	nop.b 0				// can't do br.call here because of alloc (WAW on CFM)
++	;;
++}{ .mfi	// cycle 1
++	mov loc1=0
++	nop.f 0
++	mov loc2=0
++}{ .mib
++	mov loc3=0
++	mov loc4=0
++(pRecurse) br.call.sptk.many b0=rse_clear_invalid
++
++}{ .mfi	// cycle 2
++	mov loc5=0
++	nop.f 0
++	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret
++}{ .mib
++	mov loc6=0
++	mov loc7=0
++(pReturn) br.ret.sptk.many b0
++}
++#else /* !CONFIG_ITANIUM */
++	alloc loc0=ar.pfs,2,Nregs-2,2,0
++	cmp.lt pRecurse,p0=Nregs*8,in0	// if more than Nregs regs left to clear, (re)curse
++	add out0=-Nregs*8,in0
++	add out1=1,in1			// increment recursion count
++	mov loc1=0
++	mov loc2=0
++	;;
++	mov loc3=0
++	mov loc4=0
++	mov loc5=0
++	mov loc6=0
++	mov loc7=0
++(pRecurse) br.call.sptk.few b0=rse_clear_invalid
++	;;
++	mov loc8=0
++	mov loc9=0
++	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret
++	mov loc10=0
++	mov loc11=0
++(pReturn) br.ret.sptk.many b0
++#endif /* !CONFIG_ITANIUM */
++#	undef pRecurse
++#	undef pReturn
++	;;
++	alloc r17=ar.pfs,0,0,0,0	// drop current register frame
++	;;
++	loadrs
++	;;
++skip_rbs_switch:
++	mov ar.unat=r25		// M2
++(pKStk)	extr.u r22=r22,21,1	// I0 extract current value of psr.pp from r22
++(pLvSys)mov r19=r0		// A  clear r19 for leave_syscall, no-op otherwise
++	;;
++(pUStk)	mov ar.bspstore=r23	// M2
++(pKStk)	dep r29=r22,r29,21,1	// I0 update ipsr.pp with psr.pp
++(pLvSys)mov r16=r0		// A  clear r16 for leave_syscall, no-op otherwise
++	;;
++#ifdef CONFIG_XEN
++	movl r25=XSI_IPSR
++	;;
++	st8[r25]=r29,XSI_IFS-XSI_IPSR
++	;;
++#else
++	mov cr.ipsr=r29		// M2
++#endif
++	mov ar.pfs=r26		// I0
++(pLvSys)mov r17=r0		// A  clear r17 for leave_syscall, no-op otherwise
++
++#ifdef CONFIG_XEN
++(p9)	st8 [r25]=r30
++	;;
++	adds r25=XSI_IIP-XSI_IFS,r25
++	;;
++#else
++(p9)	mov cr.ifs=r30		// M2
++#endif
++	mov b0=r21		// I0
++(pLvSys)mov r18=r0		// A  clear r18 for leave_syscall, no-op otherwise
++
++	mov ar.fpsr=r20		// M2
++#ifdef CONFIG_XEN
++	st8	[r25]=r28
++#else
++	mov cr.iip=r28		// M2
++#endif
++	nop 0
++	;;
++(pUStk)	mov ar.rnat=r24		// M2 must happen with RSE in lazy mode
++	nop 0
++(pLvSys)mov r2=r0
++
++	mov ar.rsc=r27		// M2
++	mov pr=r31,-1		// I0
++#ifdef CONFIG_XEN
++	;;
++	XEN_HYPER_RFI;
++#else
++	rfi			// B
++#endif
++
++	/*
++	 * On entry:
++	 *	r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
++	 *	r31 = current->thread_info->flags
++	 * On exit:
++	 *	p6 = TRUE if work-pending-check needs to be redone
++	 */
++.work_pending_syscall:
++	add r2=-8,r2
++	add r3=-8,r3
++	;;
++	st8 [r2]=r8
++	st8 [r3]=r10
++.work_pending:
++	tbit.nz p6,p0=r31,TIF_SIGDELAYED		// signal delayed from  MCA/INIT/NMI/PMI context?
++(p6)	br.cond.sptk.few .sigdelayed
++	;;
++	tbit.z p6,p0=r31,TIF_NEED_RESCHED		// current_thread_info()->need_resched==0?
++(p6)	br.cond.sptk.few .notify
++#ifdef CONFIG_PREEMPT
++(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
++	;;
++(pKStk) st4 [r20]=r21
++	ssm psr.i		// enable interrupts
++#endif
++	br.call.spnt.many rp=schedule
++.ret9:	cmp.eq p6,p0=r0,r0				// p6 <- 1
++#ifdef CONFIG_XEN
++	movl r2=XSI_PSR_I
++	;;
++	st4 [r2]=r0
++#else
++	rsm psr.i		// disable interrupts
++#endif
++	;;
++#ifdef CONFIG_PREEMPT
++(pKStk)	adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
++	;;
++(pKStk)	st4 [r20]=r0		// preempt_count() <- 0
++#endif
++(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
++	br.cond.sptk.many .work_processed_kernel	// re-check
++
++.notify:
++(pUStk)	br.call.spnt.many rp=notify_resume_user
++.ret10:	cmp.ne p6,p0=r0,r0				// p6 <- 0
++(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
++	br.cond.sptk.many .work_processed_kernel	// don't re-check
++
++// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
++// it could not be delivered.  Deliver it now.  The signal might be for us and
++// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
++// signal.
++
++.sigdelayed:
++	br.call.sptk.many rp=do_sigdelayed
++	cmp.eq p6,p0=r0,r0				// p6 <- 1, always re-check
++(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
++	br.cond.sptk.many .work_processed_kernel	// re-check
++
++.work_pending_syscall_end:
++	adds r2=PT(R8)+16,r12
++	adds r3=PT(R10)+16,r12
++	;;
++	ld8 r8=[r2]
++	ld8 r10=[r3]
++	br.cond.sptk.many .work_processed_syscall	// re-check
++
++#ifdef CONFIG_XEN
++END(xen_leave_kernel)
++#else
++END(ia64_leave_kernel)
++#endif
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenhpski.c linux-2.6.12-xen/arch/ia64/xen/xenhpski.c
+--- pristine-linux-2.6.12/arch/ia64/xen/xenhpski.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/xenhpski.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,19 @@
++
++extern unsigned long xen_get_cpuid(int);
++
++int
++running_on_sim(void)
++{
++	int i;
++	long cpuid[6];
++
++	for (i = 0; i < 5; ++i)
++		cpuid[i] = xen_get_cpuid(i);
++	if ((cpuid[0] & 0xff) != 'H') return 0;
++	if ((cpuid[3] & 0xff) != 0x4) return 0;
++	if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
++	if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
++	if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
++	return 1;
++}
++
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenivt.S linux-2.6.12-xen/arch/ia64/xen/xenivt.S
+--- pristine-linux-2.6.12/arch/ia64/xen/xenivt.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/xenivt.S	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,2044 @@
++/*
++ * arch/ia64/xen/ivt.S
++ *
++ * Copyright (C) 2005 Hewlett-Packard Co
++ *	Dan Magenheimer <dan.magenheimer at hp.com>
++ */
++/*
++ * This file defines the interruption vector table used by the CPU.
++ * It does not include one entry per possible cause of interruption.
++ *
++ * The first 20 entries of the table contain 64 bundles each while the
++ * remaining 48 entries contain only 16 bundles each.
++ *
++ * The 64 bundles are used to allow inlining the whole handler for critical
++ * interruptions like TLB misses.
++ *
++ *  For each entry, the comment is as follows:
++ *
++ *		// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
++ *  entry offset ----/     /         /                  /          /
++ *  entry number ---------/         /                  /          /
++ *  size of the entry -------------/                  /          /
++ *  vector name -------------------------------------/          /
++ *  interruptions triggering this vector ----------------------/
++ *
++ * The table is 32KB in size and must be aligned on 32KB boundary.
++ * (The CPU ignores the 15 lower bits of the address)
++ *
++ * Table is based upon EAS2.6 (Oct 1999)
++ */
++
++#include <linux/config.h>
++
++#include <asm/asmmacro.h>
++#include <asm/break.h>
++#include <asm/ia32.h>
++#include <asm/kregs.h>
++#include <asm/offsets.h>
++#include <asm/pgtable.h>
++#include <asm/processor.h>
++#include <asm/ptrace.h>
++#include <asm/system.h>
++#include <asm/thread_info.h>
++#include <asm/unistd.h>
++#include <asm/errno.h>
++
++#ifdef CONFIG_XEN
++#define ia64_ivt xen_ivt
++#endif
++
++#if 1
++# define PSR_DEFAULT_BITS	psr.ac
++#else
++# define PSR_DEFAULT_BITS	0
++#endif
++
++#if 0
++  /*
++   * This lets you track the last eight faults that occurred on the CPU.  Make sure ar.k2 isn't
++   * needed for something else before enabling this...
++   */
++# define DBG_FAULT(i)	mov r16=ar.k2;;	shl r16=r16,8;;	add r16=(i),r16;;mov ar.k2=r16
++#else
++# define DBG_FAULT(i)
++#endif
++
++#define MINSTATE_VIRT	/* needed by minstate.h */
++#include "xenminstate.h"
++
++#define FAULT(n)									\
++	mov r31=pr;									\
++	mov r19=n;;			/* prepare to save predicates */		\
++	br.sptk.many dispatch_to_fault_handler
++
++	.section .text.ivt,"ax"
++
++	.align 32768	// align on 32KB boundary
++	.global ia64_ivt
++ia64_ivt:
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
++ENTRY(vhpt_miss)
++	DBG_FAULT(0)
++	/*
++	 * The VHPT vector is invoked when the TLB entry for the virtual page table
++	 * is missing.  This happens only as a result of a previous
++	 * (the "original") TLB miss, which may either be caused by an instruction
++	 * fetch or a data access (or non-access).
++	 *
++	 * What we do here is normal TLB miss handing for the _original_ miss, followed
++	 * by inserting the TLB entry for the virtual page table page that the VHPT
++	 * walker was attempting to access.  The latter gets inserted as long
++	 * as both L1 and L2 have valid mappings for the faulting address.
++	 * The TLB entry for the original miss gets inserted only if
++	 * the L3 entry indicates that the page is present.
++	 *
++	 * do_page_fault gets invoked in the following cases:
++	 *	- the faulting virtual address uses unimplemented address bits
++	 *	- the faulting virtual address has no L1, L2, or L3 mapping
++	 */
++#ifdef CONFIG_XEN
++	movl r16=XSI_IFA
++	;;
++	ld8 r16=[r16]
++#ifdef CONFIG_HUGETLB_PAGE
++	movl r18=PAGE_SHIFT
++	movl r25=XSI_ITIR
++	;;
++	ld8 r25=[r25]
++#endif
++	;;
++#else
++	mov r16=cr.ifa				// get address that caused the TLB miss
++#ifdef CONFIG_HUGETLB_PAGE
++	movl r18=PAGE_SHIFT
++	mov r25=cr.itir
++#endif
++#endif
++	;;
++#ifdef CONFIG_XEN
++	XEN_HYPER_RSM_PSR_DT;
++#else
++	rsm psr.dt				// use physical addressing for data
++#endif
++	mov r31=pr				// save the predicate registers
++	mov r19=IA64_KR(PT_BASE)		// get page table base address
++	shl r21=r16,3				// shift bit 60 into sign bit
++	shr.u r17=r16,61			// get the region number into r17
++	;;
++	shr r22=r21,3
++#ifdef CONFIG_HUGETLB_PAGE
++	extr.u r26=r25,2,6
++	;;
++	cmp.ne p8,p0=r18,r26
++	sub r27=r26,r18
++	;;
++(p8)	dep r25=r18,r25,2,6
++(p8)	shr r22=r22,r27
++#endif
++	;;
++	cmp.eq p6,p7=5,r17			// is IFA pointing into to region 5?
++	shr.u r18=r22,PGDIR_SHIFT		// get bits 33-63 of the faulting address
++	;;
++(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
++
++	srlz.d
++	LOAD_PHYSICAL(p6, r19, swapper_pg_dir)	// region 5 is rooted at swapper_pg_dir
++
++	.pred.rel "mutex", p6, p7
++(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
++(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
++	;;
++(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
++(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
++	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
++	shr.u r18=r22,PMD_SHIFT			// shift L2 index into position
++	;;
++	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
++	;;
++(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
++	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page table entry
++	;;
++(p7)	ld8 r20=[r17]				// fetch the L2 entry (may be 0)
++	shr.u r19=r22,PAGE_SHIFT		// shift L3 index into position
++	;;
++(p7)	cmp.eq.or.andcm p6,p7=r20,r0		// was L2 entry NULL?
++	dep r21=r19,r20,3,(PAGE_SHIFT-3)	// compute address of L3 page table entry
++	;;
++#ifdef CONFIG_XEN
++(p7)	ld8 r18=[r21]				// read the L3 PTE
++	movl r19=XSI_ISR
++	;;
++	ld8 r19=[r19]
++	;;
++(p7)	tbit.z p6,p7=r18,_PAGE_P_BIT		// page present bit cleared?
++	movl r22=XSI_IHA
++	;;
++	ld8 r22=[r22]
++	;;
++#else
++(p7)	ld8 r18=[r21]				// read the L3 PTE
++	mov r19=cr.isr				// cr.isr bit 0 tells us if this is an insn miss
++	;;
++(p7)	tbit.z p6,p7=r18,_PAGE_P_BIT		// page present bit cleared?
++	mov r22=cr.iha				// get the VHPT address that caused the TLB miss
++	;;					// avoid RAW on p7
++#endif
++(p7)	tbit.nz.unc p10,p11=r19,32		// is it an instruction TLB miss?
++	dep r23=0,r20,0,PAGE_SHIFT		// clear low bits to get page address
++	;;
++#ifdef CONFIG_XEN
++	mov r24=r8
++	mov r8=r18
++	;;
++(p10)	XEN_HYPER_ITC_D
++	;;
++(p11)	XEN_HYPER_ITC_I
++	;;
++	mov r8=r24
++	;;
++(p6)	br.cond.spnt.many page_fault		// handle bad address/page not present (page fault)
++	;;
++	movl r24=XSI_IFA
++	;;
++	st8 [r24]=r22
++	;;
++#else
++(p10)	itc.i r18				// insert the instruction TLB entry
++(p11)	itc.d r18				// insert the data TLB entry
++(p6)	br.cond.spnt.many page_fault		// handle bad address/page not present (page fault)
++	mov cr.ifa=r22
++#endif
++
++#ifdef CONFIG_HUGETLB_PAGE
++(p8)	mov cr.itir=r25				// change to default page-size for VHPT
++#endif
++
++	/*
++	 * Now compute and insert the TLB entry for the virtual page table.  We never
++	 * execute in a page table page so there is no need to set the exception deferral
++	 * bit.
++	 */
++	adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
++	;;
++#ifdef CONFIG_XEN
++(p7)	mov r25=r8
++(p7)	mov r8=r24
++	;;
++(p7)	XEN_HYPER_ITC_D
++	;;
++(p7)	mov r8=r25
++	;;
++#else
++(p7)	itc.d r24
++#endif
++	;;
++#ifdef CONFIG_SMP
++	/*
++	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
++	 * cannot possibly affect the following loads:
++	 */
++	dv_serialize_data
++
++	/*
++	 * Re-check L2 and L3 pagetable.  If they changed, we may have received a ptc.g
++	 * between reading the pagetable and the "itc".  If so, flush the entry we
++	 * inserted and retry.
++	 */
++	ld8 r25=[r21]				// read L3 PTE again
++	ld8 r26=[r17]				// read L2 entry again
++	;;
++	cmp.ne p6,p7=r26,r20			// did L2 entry change
++	mov r27=PAGE_SHIFT<<2
++	;;
++(p6)	ptc.l r22,r27				// purge PTE page translation
++(p7)	cmp.ne.or.andcm p6,p7=r25,r18		// did L3 PTE change
++	;;
++(p6)	ptc.l r16,r27				// purge translation
++#endif
++
++	mov pr=r31,-1				// restore predicate registers
++#ifdef CONFIG_XEN
++	XEN_HYPER_RFI;
++#else
++	rfi
++#endif
++END(vhpt_miss)
++
++	.org ia64_ivt+0x400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
++ENTRY(itlb_miss)
++	DBG_FAULT(1)
++	/*
++	 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
++	 * page table.  If a nested TLB miss occurs, we switch into physical
++	 * mode, walk the page table, and then re-execute the L3 PTE read
++	 * and go on normally after that.
++	 */
++#ifdef CONFIG_XEN
++	movl r16=XSI_IFA
++	;;
++	ld8 r16=[r16]
++#else
++	mov r16=cr.ifa				// get virtual address
++#endif
++	mov r29=b0				// save b0
++	mov r31=pr				// save predicates
++.itlb_fault:
++#ifdef CONFIG_XEN
++	movl r17=XSI_IHA
++	;;
++	ld8 r17=[r17]				// get virtual address of L3 PTE
++#else
++	mov r17=cr.iha				// get virtual address of L3 PTE
++#endif
++	movl r30=1f				// load nested fault continuation point
++	;;
++1:	ld8 r18=[r17]				// read L3 PTE
++	;;
++	mov b0=r29
++	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
++(p6)	br.cond.spnt page_fault
++	;;
++#ifdef CONFIG_XEN
++	mov r19=r8
++	mov r8=r18
++	;;
++	XEN_HYPER_ITC_I
++	;;
++	mov r8=r19
++#else
++	itc.i r18
++#endif
++	;;
++#ifdef CONFIG_SMP
++	/*
++	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
++	 * cannot possibly affect the following loads:
++	 */
++	dv_serialize_data
++
++	ld8 r19=[r17]				// read L3 PTE again and see if same
++	mov r20=PAGE_SHIFT<<2			// setup page size for purge
++	;;
++	cmp.ne p7,p0=r18,r19
++	;;
++(p7)	ptc.l r16,r20
++#endif
++	mov pr=r31,-1
++#ifdef CONFIG_XEN
++	XEN_HYPER_RFI;
++#else
++	rfi
++#endif
++END(itlb_miss)
++
++	.org ia64_ivt+0x0800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
++ENTRY(dtlb_miss)
++	DBG_FAULT(2)
++	/*
++	 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
++	 * page table.  If a nested TLB miss occurs, we switch into physical
++	 * mode, walk the page table, and then re-execute the L3 PTE read
++	 * and go on normally after that.
++	 */
++#ifdef CONFIG_XEN
++	movl r16=XSI_IFA
++	;;
++	ld8 r16=[r16]
++#else
++	mov r16=cr.ifa				// get virtual address
++#endif
++	mov r29=b0				// save b0
++	mov r31=pr				// save predicates
++dtlb_fault:
++#ifdef CONFIG_XEN
++	movl r17=XSI_IHA
++	;;
++	ld8 r17=[r17]				// get virtual address of L3 PTE
++#else
++	mov r17=cr.iha				// get virtual address of L3 PTE
++#endif
++	movl r30=1f				// load nested fault continuation point
++	;;
++1:	ld8 r18=[r17]				// read L3 PTE
++	;;
++	mov b0=r29
++	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
++(p6)	br.cond.spnt page_fault
++	;;
++#ifdef CONFIG_XEN
++	mov r19=r8
++	mov r8=r18
++	;;
++	XEN_HYPER_ITC_D
++	;;
++	mov r8=r19
++	;;
++#else
++	itc.d r18
++#endif
++	;;
++#ifdef CONFIG_SMP
++	/*
++	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
++	 * cannot possibly affect the following loads:
++	 */
++	dv_serialize_data
++
++	ld8 r19=[r17]				// read L3 PTE again and see if same
++	mov r20=PAGE_SHIFT<<2			// setup page size for purge
++	;;
++	cmp.ne p7,p0=r18,r19
++	;;
++(p7)	ptc.l r16,r20
++#endif
++	mov pr=r31,-1
++#ifdef CONFIG_XEN
++	XEN_HYPER_RFI;
++#else
++	rfi
++#endif
++END(dtlb_miss)
++
++	.org ia64_ivt+0x0c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
++ENTRY(alt_itlb_miss)
++	DBG_FAULT(3)
++#ifdef CONFIG_XEN
++	movl r31=XSI_IPSR
++	;;
++	ld8 r21=[r31],XSI_IFA-XSI_IPSR	// get ipsr, point to ifa
++	movl r17=PAGE_KERNEL
++	;;
++	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
++	;;
++	ld8 r16=[r31]		// get ifa
++	mov r31=pr
++	;;
++#else
++	mov r16=cr.ifa		// get address that caused the TLB miss
++	movl r17=PAGE_KERNEL
++	mov r21=cr.ipsr
++	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
++	mov r31=pr
++	;;
++#endif
++#ifdef CONFIG_DISABLE_VHPT
++	shr.u r22=r16,61			// get the region number into r21
++	;;
++	cmp.gt p8,p0=6,r22			// user mode
++	;;
++#ifndef CONFIG_XEN
++(p8)	thash r17=r16
++	;;
++(p8)	mov cr.iha=r17
++#endif
++(p8)	mov r29=b0				// save b0
++(p8)	br.cond.dptk .itlb_fault
++#endif
++	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
++	and r19=r19,r16		// clear ed, reserved bits, and PTE control bits
++	shr.u r18=r16,57	// move address bit 61 to bit 4
++	;;
++	andcm r18=0x10,r18	// bit 4=~address-bit(61)
++	cmp.ne p8,p0=r0,r23	// psr.cpl != 0?
++	or r19=r17,r19		// insert PTE control bits into r19
++	;;
++	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
++(p8)	br.cond.spnt page_fault
++	;;
++#ifdef CONFIG_XEN
++	mov r18=r8
++	mov r8=r19
++	;;
++	XEN_HYPER_ITC_I
++	;;
++	mov r8=r18
++	;;
++	mov pr=r31,-1
++	;;
++	XEN_HYPER_RFI;
++#else
++	itc.i r19		// insert the TLB entry
++	mov pr=r31,-1
++	rfi
++#endif
++END(alt_itlb_miss)
++
++	.org ia64_ivt+0x1000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
++ENTRY(alt_dtlb_miss)
++	DBG_FAULT(4)
++#ifdef CONFIG_XEN
++	movl r31=XSI_IPSR
++	;;
++	ld8 r21=[r31],XSI_ISR-XSI_IPSR	// get ipsr, point to isr
++	movl r17=PAGE_KERNEL
++	;;
++	ld8 r20=[r31],XSI_IFA-XSI_ISR	// get isr, point to ifa
++	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
++	;;
++	ld8 r16=[r31]		// get ifa
++	mov r31=pr
++	;;
++#else
++	mov r16=cr.ifa		// get address that caused the TLB miss
++	movl r17=PAGE_KERNEL
++	mov r20=cr.isr
++	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
++	mov r21=cr.ipsr
++	mov r31=pr
++	;;
++#endif
++#ifdef CONFIG_DISABLE_VHPT
++	shr.u r22=r16,61			// get the region number into r21
++	;;
++	cmp.gt p8,p0=6,r22			// access to region 0-5
++	;;
++#ifndef CONFIG_XEN
++(p8)	thash r17=r16
++	;;
++(p8)	mov cr.iha=r17
++#endif
++(p8)	mov r29=b0				// save b0
++(p8)	br.cond.dptk dtlb_fault
++#endif
++	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
++	and r22=IA64_ISR_CODE_MASK,r20		// get the isr.code field
++	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT	// is speculation bit on?
++	shr.u r18=r16,57			// move address bit 61 to bit 4
++	and r19=r19,r16				// clear ed, reserved bits, and PTE control bits
++	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
++	;;
++	andcm r18=0x10,r18	// bit 4=~address-bit(61)
++	cmp.ne p8,p0=r0,r23
++(p9)	cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
++(p8)	br.cond.spnt page_fault
++
++	dep r21=-1,r21,IA64_PSR_ED_BIT,1
++	or r19=r19,r17		// insert PTE control bits into r19
++	;;
++	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
++(p6)	mov cr.ipsr=r21
++	;;
++#ifdef CONFIG_XEN
++(p7)	mov r18=r8
++(p7)	mov r8=r19
++	;;
++(p7)	XEN_HYPER_ITC_D
++	;;
++(p7)	mov r8=r18
++	;;
++	mov pr=r31,-1
++	;;
++	XEN_HYPER_RFI;
++#else
++(p7)	itc.d r19		// insert the TLB entry
++	mov pr=r31,-1
++	rfi
++#endif
++END(alt_dtlb_miss)
++
++	.org ia64_ivt+0x1400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
++ENTRY(nested_dtlb_miss)
++	/*
++	 * In the absence of kernel bugs, we get here when the virtually mapped linear
++	 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
++	 * Access-bit, or Data Access-bit faults).  If the DTLB entry for the virtual page
++	 * table is missing, a nested TLB miss fault is triggered and control is
++	 * transferred to this point.  When this happens, we lookup the pte for the
++	 * faulting address by walking the page table in physical mode and return to the
++	 * continuation point passed in register r30 (or call page_fault if the address is
++	 * not mapped).
++	 *
++	 * Input:	r16:	faulting address
++	 *		r29:	saved b0
++	 *		r30:	continuation address
++	 *		r31:	saved pr
++	 *
++	 * Output:	r17:	physical address of L3 PTE of faulting address
++	 *		r29:	saved b0
++	 *		r30:	continuation address
++	 *		r31:	saved pr
++	 *
++	 * Clobbered:	b0, r18, r19, r21, psr.dt (cleared)
++	 */
++#ifdef CONFIG_XEN
++	XEN_HYPER_RSM_PSR_DT;
++#else
++	rsm psr.dt				// switch to using physical data addressing
++#endif
++	mov r19=IA64_KR(PT_BASE)		// get the page table base address
++	shl r21=r16,3				// shift bit 60 into sign bit
++	;;
++	shr.u r17=r16,61			// get the region number into r17
++	;;
++	cmp.eq p6,p7=5,r17			// is faulting address in region 5?
++	shr.u r18=r16,PGDIR_SHIFT		// get bits 33-63 of faulting address
++	;;
++(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
++
++	srlz.d
++	LOAD_PHYSICAL(p6, r19, swapper_pg_dir)	// region 5 is rooted at swapper_pg_dir
++
++	.pred.rel "mutex", p6, p7
++(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
++(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
++	;;
++(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
++(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
++	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
++	shr.u r18=r16,PMD_SHIFT			// shift L2 index into position
++	;;
++	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
++	;;
++(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
++	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page table entry
++	;;
++(p7)	ld8 r17=[r17]				// fetch the L2 entry (may be 0)
++	shr.u r19=r16,PAGE_SHIFT		// shift L3 index into position
++	;;
++(p7)	cmp.eq.or.andcm p6,p7=r17,r0		// was L2 entry NULL?
++	dep r17=r19,r17,3,(PAGE_SHIFT-3)	// compute address of L3 page table entry
++(p6)	br.cond.spnt page_fault
++	mov b0=r30
++	br.sptk.many b0				// return to continuation point
++END(nested_dtlb_miss)
++
++	.org ia64_ivt+0x1800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
++ENTRY(ikey_miss)
++	DBG_FAULT(6)
++	FAULT(6)
++END(ikey_miss)
++
++	//-----------------------------------------------------------------------------------
++	// call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
++ENTRY(page_fault)
++#ifdef CONFIG_XEN
++	XEN_HYPER_SSM_PSR_DT;
++#else
++	ssm psr.dt
++	;;
++	srlz.i
++#endif
++	;;
++	SAVE_MIN_WITH_COVER
++	alloc r15=ar.pfs,0,0,3,0
++#ifdef CONFIG_XEN
++	movl r3=XSI_ISR
++	;;
++	ld8 out1=[r3],XSI_IFA-XSI_ISR		// get vcr.isr, point to ifa
++	;;
++	ld8 out0=[r3]				// get vcr.ifa
++	mov r14=1
++	;;
++	add r3=XSI_PSR_IC-XSI_IFA, r3		// point to vpsr.ic
++	;;
++	st4 [r3]=r14				// vpsr.ic = 1
++	adds r3=8,r2				// set up second base pointer
++	;;
++#else
++	mov out0=cr.ifa
++	mov out1=cr.isr
++	adds r3=8,r2				// set up second base pointer
++	;;
++	ssm psr.ic | PSR_DEFAULT_BITS
++	;;
++	srlz.i					// guarantee that interruption collectin is on
++	;;
++#endif
++#ifdef CONFIG_XEN
++	br.cond.sptk.many	xen_page_fault
++	;;
++done_xen_page_fault:
++#endif
++(p15)	ssm psr.i				// restore psr.i
++	movl r14=ia64_leave_kernel
++	;;
++	SAVE_REST
++	mov rp=r14
++	;;
++	adds out2=16,r12			// out2 = pointer to pt_regs
++	br.call.sptk.many b6=ia64_do_page_fault	// ignore return address
++END(page_fault)
++
++	.org ia64_ivt+0x1c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
++ENTRY(dkey_miss)
++	DBG_FAULT(7)
++	FAULT(7)
++#ifdef CONFIG_XEN
++	// Leaving this code inline above results in an IVT section overflow
++	// There is no particular reason for this code to be here...
++xen_page_fault:
++(p15)	movl r3=XSI_PSR_I
++	;;
++(p15)	st4 [r3]=r14,XSI_PEND-XSI_PSR_I		// if (p15) vpsr.i = 1
++	mov r14=r0
++	;;
++(p15)	ld4 r14=[r3]				// if (pending_interrupts)
++	adds r3=8,r2				// re-set up second base pointer
++	;;
++(p15)	cmp.ne	p15,p0=r14,r0
++	;;
++	br.cond.sptk.many done_xen_page_fault
++	;;
++#endif
++END(dkey_miss)
++
++	.org ia64_ivt+0x2000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
++ENTRY(dirty_bit)
++	DBG_FAULT(8)
++	/*
++	 * What we do here is to simply turn on the dirty bit in the PTE.  We need to
++	 * update both the page-table and the TLB entry.  To efficiently access the PTE,
++	 * we address it through the virtual page table.  Most likely, the TLB entry for
++	 * the relevant virtual page table page is still present in the TLB so we can
++	 * normally do this without additional TLB misses.  In case the necessary virtual
++	 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
++	 * up the physical address of the L3 PTE and then continue at label 1 below.
++	 */
++#ifdef CONFIG_XEN
++	movl r16=XSI_IFA
++	;;
++	ld8 r16=[r16]
++	;;
++#else
++	mov r16=cr.ifa				// get the address that caused the fault
++#endif
++	movl r30=1f				// load continuation point in case of nested fault
++	;;
++#ifdef CONFIG_XEN
++#if 1
++	mov r18=r8;
++	mov r8=r16;
++	XEN_HYPER_THASH;;
++	mov r17=r8;
++	mov r8=r18;;
++#else
++	tak r17=r80				// "privified" thash
++#endif
++#else
++	thash r17=r16				// compute virtual address of L3 PTE
++#endif
++	mov r29=b0				// save b0 in case of nested fault
++	mov r31=pr				// save pr
++#ifdef CONFIG_SMP
++	mov r28=ar.ccv				// save ar.ccv
++	;;
++1:	ld8 r18=[r17]
++	;;					// avoid RAW on r18
++	mov ar.ccv=r18				// set compare value for cmpxchg
++	or r25=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits
++	;;
++	cmpxchg8.acq r26=[r17],r25,ar.ccv
++	mov r24=PAGE_SHIFT<<2
++	;;
++	cmp.eq p6,p7=r26,r18
++	;;
++(p6)	itc.d r25				// install updated PTE
++	;;
++	/*
++	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
++	 * cannot possibly affect the following loads:
++	 */
++	dv_serialize_data
++
++	ld8 r18=[r17]				// read PTE again
++	;;
++	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
++	;;
++(p7)	ptc.l r16,r24
++	mov b0=r29				// restore b0
++	mov ar.ccv=r28
++#else
++	;;
++1:	ld8 r18=[r17]
++	;;					// avoid RAW on r18
++	or r18=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits
++	mov b0=r29				// restore b0
++	;;
++	st8 [r17]=r18				// store back updated PTE
++	itc.d r18				// install updated PTE
++#endif
++	mov pr=r31,-1				// restore pr
++#ifdef CONFIG_XEN
++	XEN_HYPER_RFI;
++#else
++	rfi
++#endif
++END(dirty_bit)
++
++	.org ia64_ivt+0x2400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
++ENTRY(iaccess_bit)
++	DBG_FAULT(9)
++	// Like Entry 8, except for instruction access
++#ifdef CONFIG_XEN
++	movl r16=XSI_IFA
++	;;
++	ld8 r16=[r16]
++	;;
++#else
++	mov r16=cr.ifa				// get the address that caused the fault
++#endif
++	movl r30=1f				// load continuation point in case of nested fault
++	mov r31=pr				// save predicates
++#ifdef CONFIG_ITANIUM
++	/*
++	 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
++	 */
++	mov r17=cr.ipsr
++	;;
++	mov r18=cr.iip
++	tbit.z p6,p0=r17,IA64_PSR_IS_BIT	// IA64 instruction set?
++	;;
++(p6)	mov r16=r18				// if so, use cr.iip instead of cr.ifa
++#endif /* CONFIG_ITANIUM */
++	;;
++#ifdef CONFIG_XEN
++#if 1
++	mov r18=r8;
++	mov r8=r16;
++	XEN_HYPER_THASH;;
++	mov r17=r8;
++	mov r8=r18;;
++#else
++	tak r17=r80				// "privified" thash
++#endif
++#else
++	thash r17=r16				// compute virtual address of L3 PTE
++#endif
++	mov r29=b0				// save b0 in case of nested fault)
++#ifdef CONFIG_SMP
++	mov r28=ar.ccv				// save ar.ccv
++	;;
++1:	ld8 r18=[r17]
++	;;
++	mov ar.ccv=r18				// set compare value for cmpxchg
++	or r25=_PAGE_A,r18			// set the accessed bit
++	;;
++	cmpxchg8.acq r26=[r17],r25,ar.ccv
++	mov r24=PAGE_SHIFT<<2
++	;;
++	cmp.eq p6,p7=r26,r18
++	;;
++#ifdef CONFIG_XEN
++	mov r26=r8
++	mov r8=r25
++	;;
++(p6)	XEN_HYPER_ITC_I
++	;;
++	mov r8=r26
++	;;
++#else
++(p6)	itc.i r25				// install updated PTE
++#endif
++	;;
++	/*
++	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
++	 * cannot possibly affect the following loads:
++	 */
++	dv_serialize_data
++
++	ld8 r18=[r17]				// read PTE again
++	;;
++	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
++	;;
++(p7)	ptc.l r16,r24
++	mov b0=r29				// restore b0
++	mov ar.ccv=r28
++#else /* !CONFIG_SMP */
++	;;
++1:	ld8 r18=[r17]
++	;;
++	or r18=_PAGE_A,r18			// set the accessed bit
++	mov b0=r29				// restore b0
++	;;
++	st8 [r17]=r18				// store back updated PTE
++	itc.i r18				// install updated PTE
++#endif /* !CONFIG_SMP */
++	mov pr=r31,-1
++#ifdef CONFIG_XEN
++	XEN_HYPER_RFI;
++#else
++	rfi
++#endif
++END(iaccess_bit)
++
++	.org ia64_ivt+0x2800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
++ENTRY(daccess_bit)
++	DBG_FAULT(10)
++	// Like Entry 8, except for data access
++#ifdef CONFIG_XEN
++	movl r16=XSI_IFA
++	;;
++	ld8 r16=[r16]
++	;;
++#else
++	mov r16=cr.ifa				// get the address that caused the fault
++#endif
++	movl r30=1f				// load continuation point in case of nested fault
++	;;
++#ifdef CONFIG_XEN
++#if 1
++	mov r18=r8;
++	mov r8=r16;
++	XEN_HYPER_THASH;;
++	mov r17=r8;
++	mov r8=r18;;
++#else
++	tak r17=r80				// "privified" thash
++#endif
++#else
++	thash r17=r16				// compute virtual address of L3 PTE
++#endif
++	mov r31=pr
++	mov r29=b0				// save b0 in case of nested fault)
++#ifdef CONFIG_SMP
++	mov r28=ar.ccv				// save ar.ccv
++	;;
++1:	ld8 r18=[r17]
++	;;					// avoid RAW on r18
++	mov ar.ccv=r18				// set compare value for cmpxchg
++	or r25=_PAGE_A,r18			// set the dirty bit
++	;;
++	cmpxchg8.acq r26=[r17],r25,ar.ccv
++	mov r24=PAGE_SHIFT<<2
++	;;
++	cmp.eq p6,p7=r26,r18
++	;;
++#ifdef CONFIG_XEN
++	mov r26=r8
++	mov r8=r25
++	;;
++(p6)	XEN_HYPER_ITC_D
++	;;
++	mov r8=r26
++	;;
++#else
++(p6)	itc.d r25				// install updated PTE
++#endif
++	/*
++	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
++	 * cannot possibly affect the following loads:
++	 */
++	dv_serialize_data
++	;;
++	ld8 r18=[r17]				// read PTE again
++	;;
++	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
++	;;
++(p7)	ptc.l r16,r24
++	mov ar.ccv=r28
++#else
++	;;
++1:	ld8 r18=[r17]
++	;;					// avoid RAW on r18
++	or r18=_PAGE_A,r18			// set the accessed bit
++	;;
++	st8 [r17]=r18				// store back updated PTE
++	itc.d r18				// install updated PTE
++#endif
++	mov b0=r29				// restore b0
++	mov pr=r31,-1
++#ifdef CONFIG_XEN
++	XEN_HYPER_RFI;
++#else
++	rfi
++#endif
++END(daccess_bit)
++
++	.org ia64_ivt+0x2c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
++ENTRY(break_fault)
++	/*
++	 * The streamlined system call entry/exit paths only save/restore the initial part
++	 * of pt_regs.  This implies that the callers of system-calls must adhere to the
++	 * normal procedure calling conventions.
++	 *
++	 *   Registers to be saved & restored:
++	 *	CR registers: cr.ipsr, cr.iip, cr.ifs
++	 *	AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
++	 * 	others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
++	 *   Registers to be restored only:
++	 * 	r8-r11: output value from the system call.
++	 *
++	 * During system call exit, scratch registers (including r15) are modified/cleared
++	 * to prevent leaking bits from kernel to user level.
++	 */
++	DBG_FAULT(11)
++	mov r16=IA64_KR(CURRENT)		// r16 = current task; 12 cycle read lat.
++#ifdef CONFIG_XEN
++	movl r31=XSI_IPSR
++	;;
++	ld8 r29=[r31],XSI_IIP-XSI_IPSR		// get ipsr, point to iip
++	mov r18=__IA64_BREAK_SYSCALL
++	mov r21=ar.fpsr
++	;;
++	ld8 r28=[r31],XSI_IIM-XSI_IIP		// get iip, point to iim
++	mov r19=b6
++	mov r25=ar.unat
++	;;
++	ld8 r17=[r31]				// get iim
++	mov r27=ar.rsc
++	mov r26=ar.pfs
++	;;
++#else
++	mov r17=cr.iim
++	mov r18=__IA64_BREAK_SYSCALL
++	mov r21=ar.fpsr
++	mov r29=cr.ipsr
++	mov r19=b6
++	mov r25=ar.unat
++	mov r27=ar.rsc
++	mov r26=ar.pfs
++	mov r28=cr.iip
++#endif
++	mov r31=pr				// prepare to save predicates
++	mov r20=r1
++	;;
++	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
++	cmp.eq p0,p7=r18,r17			// is this a system call? (p7 <- false, if so)
++(p7)	br.cond.spnt non_syscall
++	;;
++	ld1 r17=[r16]				// load current->thread.on_ustack flag
++	st1 [r16]=r0				// clear current->thread.on_ustack flag
++	add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16	// set r1 for MINSTATE_START_SAVE_MIN_VIRT
++	;;
++	invala
++
++	/* adjust return address so we skip over the break instruction: */
++
++	extr.u r8=r29,41,2			// extract ei field from cr.ipsr
++	;;
++	cmp.eq p6,p7=2,r8			// isr.ei==2?
++	mov r2=r1				// setup r2 for ia64_syscall_setup
++	;;
++(p6)	mov r8=0				// clear ei to 0
++(p6)	adds r28=16,r28				// switch cr.iip to next bundle cr.ipsr.ei wrapped
++(p7)	adds r8=1,r8				// increment ei to next slot
++	;;
++	cmp.eq pKStk,pUStk=r0,r17		// are we in kernel mode already?
++	dep r29=r8,r29,41,2			// insert new ei into cr.ipsr
++	;;
++
++	// switch from user to kernel RBS:
++	MINSTATE_START_SAVE_MIN_VIRT
++	br.call.sptk.many b7=ia64_syscall_setup
++	;;
++#ifdef CONFIG_XEN
++	mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2;;
++#else
++	MINSTATE_END_SAVE_MIN_VIRT		// switch to bank 1
++#endif
++#ifdef CONFIG_XEN
++	movl r3=XSI_PSR_IC
++	mov r16=1
++	;;
++#if 1
++	st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC	// vpsr.ic = 1
++	;;
++(p15)	st4 [r3]=r16,XSI_PEND-XSI_PSR_I		// if (p15) vpsr.i = 1
++	mov r16=r0
++	;;
++(p15)	ld4 r16=[r3]				// if (pending_interrupts)
++	;;
++	cmp.ne	p6,p0=r16,r0
++	;;
++(p6)	ssm	psr.i				//   do a real ssm psr.i
++	;;
++#else
++//	st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC	// vpsr.ic = 1
++	adds r3=XSI_PSR_I-XSI_PSR_IC,r3		// SKIP vpsr.ic = 1
++	;;
++(p15)	st4 [r3]=r16,XSI_PEND-XSI_PSR_I		// if (p15) vpsr.i = 1
++	mov r16=r0
++	;;
++(p15)	ld4 r16=[r3]				// if (pending_interrupts)
++	;;
++	cmp.ne	p6,p0=r16,r0
++	;;
++//(p6)	ssm	psr.i				//   do a real ssm psr.i
++//(p6)	XEN_HYPER_SSM_I;
++(p6)	break 0x7;
++	;;
++#endif
++	mov r3=NR_syscalls - 1
++	;;
++#else
++	ssm psr.ic | PSR_DEFAULT_BITS
++	;;
++	srlz.i					// guarantee that interruption collection is on
++	mov r3=NR_syscalls - 1
++	;;
++(p15)	ssm psr.i				// restore psr.i
++#endif
++	// p10==true means out registers are more than 8 or r15's Nat is true
++(p10)	br.cond.spnt.many ia64_ret_from_syscall
++	;;
++	movl r16=sys_call_table
++
++	adds r15=-1024,r15			// r15 contains the syscall number---subtract 1024
++	movl r2=ia64_ret_from_syscall
++	;;
++	shladd r20=r15,3,r16			// r20 = sys_call_table + 8*(syscall-1024)
++	cmp.leu p6,p7=r15,r3			// (syscall > 0 && syscall < 1024 + NR_syscalls) ?
++	mov rp=r2				// set the real return addr
++	;;
++(p6)	ld8 r20=[r20]				// load address of syscall entry point
++(p7)	movl r20=sys_ni_syscall
++
++	add r2=TI_FLAGS+IA64_TASK_SIZE,r13
++	;;
++	ld4 r2=[r2]				// r2 = current_thread_info()->flags
++	;;
++	and r2=_TIF_SYSCALL_TRACEAUDIT,r2	// mask trace or audit
++	;;
++	cmp.eq p8,p0=r2,r0
++	mov b6=r20
++	;;
++(p8)	br.call.sptk.many b6=b6			// ignore this return addr
++	br.cond.sptk ia64_trace_syscall
++	// NOT REACHED
++END(break_fault)
++
++	.org ia64_ivt+0x3000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
++ENTRY(interrupt)
++	DBG_FAULT(12)
++	mov r31=pr		// prepare to save predicates
++	;;
++	SAVE_MIN_WITH_COVER	// uses r31; defines r2 and r3
++#ifdef CONFIG_XEN
++	movl r3=XSI_PSR_IC
++	mov r14=1
++	;;
++	st4 [r3]=r14
++#else
++	ssm psr.ic | PSR_DEFAULT_BITS
++#endif
++	;;
++	adds r3=8,r2		// set up second base pointer for SAVE_REST
++	srlz.i			// ensure everybody knows psr.ic is back on
++	;;
++	SAVE_REST
++	;;
++	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
++#ifdef CONFIG_XEN
++	;;
++	br.call.sptk.many rp=xen_get_ivr
++	;;
++	mov out0=r8		// pass cr.ivr as first arg
++#else
++	mov out0=cr.ivr		// pass cr.ivr as first arg
++#endif
++	add out1=16,sp		// pass pointer to pt_regs as second arg
++	;;
++	srlz.d			// make sure we see the effect of cr.ivr
++	movl r14=ia64_leave_kernel
++	;;
++	mov rp=r14
++	br.call.sptk.many b6=ia64_handle_irq
++END(interrupt)
++
++	.org ia64_ivt+0x3400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x3400 Entry 13 (size 64 bundles) Reserved
++	DBG_FAULT(13)
++	FAULT(13)
++
++	.org ia64_ivt+0x3800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x3800 Entry 14 (size 64 bundles) Reserved
++	DBG_FAULT(14)
++	FAULT(14)
++
++	/*
++	 * There is no particular reason for this code to be here, other than that
++	 * there happens to be space here that would go unused otherwise.  If this
++	 * fault ever gets "unreserved", simply moved the following code to a more
++	 * suitable spot...
++	 *
++	 * ia64_syscall_setup() is a separate subroutine so that it can
++	 *	allocate stacked registers so it can safely demine any
++	 *	potential NaT values from the input registers.
++	 *
++	 * On entry:
++	 *	- executing on bank 0 or bank 1 register set (doesn't matter)
++	 *	-  r1: stack pointer
++	 *	-  r2: current task pointer
++	 *	-  r3: preserved
++	 *	- r11: original contents (saved ar.pfs to be saved)
++	 *	- r12: original contents (sp to be saved)
++	 *	- r13: original contents (tp to be saved)
++	 *	- r15: original contents (syscall # to be saved)
++	 *	- r18: saved bsp (after switching to kernel stack)
++	 *	- r19: saved b6
++	 *	- r20: saved r1 (gp)
++	 *	- r21: saved ar.fpsr
++	 *	- r22: kernel's register backing store base (krbs_base)
++	 *	- r23: saved ar.bspstore
++	 *	- r24: saved ar.rnat
++	 *	- r25: saved ar.unat
++	 *	- r26: saved ar.pfs
++	 *	- r27: saved ar.rsc
++	 *	- r28: saved cr.iip
++	 *	- r29: saved cr.ipsr
++	 *	- r31: saved pr
++	 *	-  b0: original contents (to be saved)
++	 * On exit:
++	 *	- executing on bank 1 registers
++	 *	- psr.ic enabled, interrupts restored
++	 *	-  p10: TRUE if syscall is invoked with more than 8 out
++	 *		registers or r15's Nat is true
++	 *	-  r1: kernel's gp
++	 *	-  r3: preserved (same as on entry)
++	 *	-  r8: -EINVAL if p10 is true
++	 *	- r12: points to kernel stack
++	 *	- r13: points to current task
++	 *	- p15: TRUE if interrupts need to be re-enabled
++	 *	- ar.fpsr: set to kernel settings
++	 */
++#ifndef CONFIG_XEN
++GLOBAL_ENTRY(ia64_syscall_setup)
++#if PT(B6) != 0
++# error This code assumes that b6 is the first field in pt_regs.
++#endif
++	st8 [r1]=r19				// save b6
++	add r16=PT(CR_IPSR),r1			// initialize first base pointer
++	add r17=PT(R11),r1			// initialize second base pointer
++	;;
++	alloc r19=ar.pfs,8,0,0,0		// ensure in0-in7 are writable
++	st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR)	// save cr.ipsr
++	tnat.nz p8,p0=in0
++
++	st8.spill [r17]=r11,PT(CR_IIP)-PT(R11)	// save r11
++	tnat.nz p9,p0=in1
++(pKStk)	mov r18=r0				// make sure r18 isn't NaT
++	;;
++
++	st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS)	// save ar.pfs
++	st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP)	// save cr.iip
++	mov r28=b0				// save b0 (2 cyc)
++	;;
++
++	st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT)	// save ar.unat
++	dep r19=0,r19,38,26			// clear all bits but 0..37 [I0]
++(p8)	mov in0=-1
++	;;
++
++	st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS)	// store ar.pfs.pfm in cr.ifs
++	extr.u r11=r19,7,7	// I0		// get sol of ar.pfs
++	and r8=0x7f,r19		// A		// get sof of ar.pfs
++
++	st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
++	tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
++(p9)	mov in1=-1
++	;;
++
++(pUStk) sub r18=r18,r22				// r18=RSE.ndirty*8
++	tnat.nz p10,p0=in2
++	add r11=8,r11
++	;;
++(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16		// skip over ar_rnat field
++(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17	// skip over ar_bspstore field
++	tnat.nz p11,p0=in3
++	;;
++(p10)	mov in2=-1
++	tnat.nz p12,p0=in4				// [I0]
++(p11)	mov in3=-1
++	;;
++(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT)	// save ar.rnat
++(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE)	// save ar.bspstore
++	shl r18=r18,16				// compute ar.rsc to be used for "loadrs"
++	;;
++	st8 [r16]=r31,PT(LOADRS)-PT(PR)		// save predicates
++	st8 [r17]=r28,PT(R1)-PT(B0)		// save b0
++	tnat.nz p13,p0=in5				// [I0]
++	;;
++	st8 [r16]=r18,PT(R12)-PT(LOADRS)	// save ar.rsc value for "loadrs"
++	st8.spill [r17]=r20,PT(R13)-PT(R1)	// save original r1
++(p12)	mov in4=-1
++	;;
++
++.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12)	// save r12
++.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13)		// save r13
++(p13)	mov in5=-1
++	;;
++	st8 [r16]=r21,PT(R8)-PT(AR_FPSR)	// save ar.fpsr
++	tnat.nz p14,p0=in6
++	cmp.lt p10,p9=r11,r8	// frame size can't be more than local+8
++	;;
++	stf8 [r16]=f1		// ensure pt_regs.r8 != 0 (see handle_syscall_error)
++(p9)	tnat.nz p10,p0=r15
++	adds r12=-16,r1		// switch to kernel memory stack (with 16 bytes of scratch)
++
++	st8.spill [r17]=r15			// save r15
++	tnat.nz p8,p0=in7
++	nop.i 0
++
++	mov r13=r2				// establish `current'
++	movl r1=__gp				// establish kernel global pointer
++	;;
++(p14)	mov in6=-1
++(p8)	mov in7=-1
++	nop.i 0
++
++	cmp.eq pSys,pNonSys=r0,r0		// set pSys=1, pNonSys=0
++	movl r17=FPSR_DEFAULT
++	;;
++	mov.m ar.fpsr=r17			// set ar.fpsr to kernel default value
++(p10)	mov r8=-EINVAL
++	br.ret.sptk.many b7
++END(ia64_syscall_setup)
++#endif
++
++	.org ia64_ivt+0x3c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x3c00 Entry 15 (size 64 bundles) Reserved
++	DBG_FAULT(15)
++	FAULT(15)
++
++	/*
++	 * Squatting in this space ...
++	 *
++	 * This special case dispatcher for illegal operation faults allows preserved
++	 * registers to be modified through a callback function (asm only) that is handed
++	 * back from the fault handler in r8. Up to three arguments can be passed to the
++	 * callback function by returning an aggregate with the callback as its first
++	 * element, followed by the arguments.
++	 */
++ENTRY(dispatch_illegal_op_fault)
++	SAVE_MIN_WITH_COVER
++	ssm psr.ic | PSR_DEFAULT_BITS
++	;;
++	srlz.i		// guarantee that interruption collection is on
++	;;
++(p15)	ssm psr.i	// restore psr.i
++	adds r3=8,r2	// set up second base pointer for SAVE_REST
++	;;
++	alloc r14=ar.pfs,0,0,1,0	// must be first in insn group
++	mov out0=ar.ec
++	;;
++	SAVE_REST
++	;;
++	br.call.sptk.many rp=ia64_illegal_op_fault
++.ret0:	;;
++	alloc r14=ar.pfs,0,0,3,0	// must be first in insn group
++	mov out0=r9
++	mov out1=r10
++	mov out2=r11
++	movl r15=ia64_leave_kernel
++	;;
++	mov rp=r15
++	mov b6=r8
++	;;
++	cmp.ne p6,p0=0,r8
++(p6)	br.call.dpnt.many b6=b6		// call returns to ia64_leave_kernel
++	br.sptk.many ia64_leave_kernel
++END(dispatch_illegal_op_fault)
++
++	.org ia64_ivt+0x4000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x4000 Entry 16 (size 64 bundles) Reserved
++	DBG_FAULT(16)
++	FAULT(16)
++
++	.org ia64_ivt+0x4400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x4400 Entry 17 (size 64 bundles) Reserved
++	DBG_FAULT(17)
++	FAULT(17)
++
++ENTRY(non_syscall)
++	SAVE_MIN_WITH_COVER
++
++	// There is no particular reason for this code to be here, other than that
++	// there happens to be space here that would go unused otherwise.  If this
++	// fault ever gets "unreserved", simply moved the following code to a more
++	// suitable spot...
++
++	alloc r14=ar.pfs,0,0,2,0
++	mov out0=cr.iim
++	add out1=16,sp
++	adds r3=8,r2			// set up second base pointer for SAVE_REST
++
++	ssm psr.ic | PSR_DEFAULT_BITS
++	;;
++	srlz.i				// guarantee that interruption collection is on
++	;;
++(p15)	ssm psr.i			// restore psr.i
++	movl r15=ia64_leave_kernel
++	;;
++	SAVE_REST
++	mov rp=r15
++	;;
++	br.call.sptk.many b6=ia64_bad_break	// avoid WAW on CFM and ignore return addr
++END(non_syscall)
++
++	.org ia64_ivt+0x4800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x4800 Entry 18 (size 64 bundles) Reserved
++	DBG_FAULT(18)
++	FAULT(18)
++
++	/*
++	 * There is no particular reason for this code to be here, other than that
++	 * there happens to be space here that would go unused otherwise.  If this
++	 * fault ever gets "unreserved", simply moved the following code to a more
++	 * suitable spot...
++	 */
++
++ENTRY(dispatch_unaligned_handler)
++	SAVE_MIN_WITH_COVER
++	;;
++	alloc r14=ar.pfs,0,0,2,0		// now it's safe (must be first in insn group!)
++	mov out0=cr.ifa
++	adds out1=16,sp
++
++	ssm psr.ic | PSR_DEFAULT_BITS
++	;;
++	srlz.i					// guarantee that interruption collection is on
++	;;
++(p15)	ssm psr.i				// restore psr.i
++	adds r3=8,r2				// set up second base pointer
++	;;
++	SAVE_REST
++	movl r14=ia64_leave_kernel
++	;;
++	mov rp=r14
++	br.sptk.many ia64_prepare_handle_unaligned
++END(dispatch_unaligned_handler)
++
++	.org ia64_ivt+0x4c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x4c00 Entry 19 (size 64 bundles) Reserved
++	DBG_FAULT(19)
++	FAULT(19)
++
++	/*
++	 * There is no particular reason for this code to be here, other than that
++	 * there happens to be space here that would go unused otherwise.  If this
++	 * fault ever gets "unreserved", simply moved the following code to a more
++	 * suitable spot...
++	 */
++
++ENTRY(dispatch_to_fault_handler)
++	/*
++	 * Input:
++	 *	psr.ic:	off
++	 *	r19:	fault vector number (e.g., 24 for General Exception)
++	 *	r31:	contains saved predicates (pr)
++	 */
++	SAVE_MIN_WITH_COVER_R19
++	alloc r14=ar.pfs,0,0,5,0
++	mov out0=r15
++#ifdef CONFIG_XEN
++	movl out1=XSI_ISR
++	;;
++	adds out2=XSI_IFA-XSI_ISR,out1
++	adds out3=XSI_IIM-XSI_ISR,out1
++	adds out4=XSI_ITIR-XSI_ISR,out1
++	;;
++	ld8 out1=[out1]
++	ld8 out2=[out2]
++	ld8 out3=[out4]
++	ld8 out4=[out4]
++	;;
++#else
++	mov out1=cr.isr
++	mov out2=cr.ifa
++	mov out3=cr.iim
++	mov out4=cr.itir
++	;;
++#endif
++	ssm psr.ic | PSR_DEFAULT_BITS
++	;;
++	srlz.i					// guarantee that interruption collection is on
++	;;
++(p15)	ssm psr.i				// restore psr.i
++	adds r3=8,r2				// set up second base pointer for SAVE_REST
++	;;
++	SAVE_REST
++	movl r14=ia64_leave_kernel
++	;;
++	mov rp=r14
++	br.call.sptk.many b6=ia64_fault
++END(dispatch_to_fault_handler)
++
++//
++// --- End of long entries, Beginning of short entries
++//
++
++	.org ia64_ivt+0x5000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
++ENTRY(page_not_present)
++	DBG_FAULT(20)
++	mov r16=cr.ifa
++	rsm psr.dt
++	/*
++	 * The Linux page fault handler doesn't expect non-present pages to be in
++	 * the TLB.  Flush the existing entry now, so we meet that expectation.
++	 */
++	mov r17=PAGE_SHIFT<<2
++	;;
++	ptc.l r16,r17
++	;;
++	mov r31=pr
++	srlz.d
++	br.sptk.many page_fault
++END(page_not_present)
++
++	.org ia64_ivt+0x5100
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
++ENTRY(key_permission)
++	DBG_FAULT(21)
++	mov r16=cr.ifa
++	rsm psr.dt
++	mov r31=pr
++	;;
++	srlz.d
++	br.sptk.many page_fault
++END(key_permission)
++
++	.org ia64_ivt+0x5200
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
++ENTRY(iaccess_rights)
++	DBG_FAULT(22)
++	mov r16=cr.ifa
++	rsm psr.dt
++	mov r31=pr
++	;;
++	srlz.d
++	br.sptk.many page_fault
++END(iaccess_rights)
++
++	.org ia64_ivt+0x5300
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
++ENTRY(daccess_rights)
++	DBG_FAULT(23)
++#ifdef CONFIG_XEN
++	movl r16=XSI_IFA
++	;;
++	ld8 r16=[r16]
++	;;
++	XEN_HYPER_RSM_PSR_DT;
++#else
++	mov r16=cr.ifa
++	rsm psr.dt
++#endif
++	mov r31=pr
++	;;
++	srlz.d
++	br.sptk.many page_fault
++END(daccess_rights)
++
++	.org ia64_ivt+0x5400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
++ENTRY(general_exception)
++	DBG_FAULT(24)
++	mov r16=cr.isr
++	mov r31=pr
++	;;
++	cmp4.eq p6,p0=0,r16
++(p6)	br.sptk.many dispatch_illegal_op_fault
++	;;
++	mov r19=24		// fault number
++	br.sptk.many dispatch_to_fault_handler
++END(general_exception)
++
++	.org ia64_ivt+0x5500
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
++ENTRY(disabled_fp_reg)
++	DBG_FAULT(25)
++	rsm psr.dfh		// ensure we can access fph
++	;;
++	srlz.d
++	mov r31=pr
++	mov r19=25
++	br.sptk.many dispatch_to_fault_handler
++END(disabled_fp_reg)
++
++	.org ia64_ivt+0x5600
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
++ENTRY(nat_consumption)
++	DBG_FAULT(26)
++	FAULT(26)
++END(nat_consumption)
++
++	.org ia64_ivt+0x5700
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
++ENTRY(speculation_vector)
++	DBG_FAULT(27)
++	/*
++	 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
++	 * this part of the architecture is not implemented in hardware on some CPUs, such
++	 * as Itanium.  Thus, in general we need to emulate the behavior.  IIM contains
++	 * the relative target (not yet sign extended).  So after sign extending it we
++	 * simply add it to IIP.  We also need to reset the EI field of the IPSR to zero,
++	 * i.e., the slot to restart into.
++	 *
++	 * cr.imm contains zero_ext(imm21)
++	 */
++	mov r18=cr.iim
++	;;
++	mov r17=cr.iip
++	shl r18=r18,43			// put sign bit in position (43=64-21)
++	;;
++
++	mov r16=cr.ipsr
++	shr r18=r18,39			// sign extend (39=43-4)
++	;;
++
++	add r17=r17,r18			// now add the offset
++	;;
++	mov cr.iip=r17
++	dep r16=0,r16,41,2		// clear EI
++	;;
++
++	mov cr.ipsr=r16
++	;;
++
++#ifdef CONFIG_XEN
++	XEN_HYPER_RFI;
++#else
++	rfi
++#endif
++END(speculation_vector)
++
++	.org ia64_ivt+0x5800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5800 Entry 28 (size 16 bundles) Reserved
++	DBG_FAULT(28)
++	FAULT(28)
++
++	.org ia64_ivt+0x5900
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
++ENTRY(debug_vector)
++	DBG_FAULT(29)
++	FAULT(29)
++END(debug_vector)
++
++	.org ia64_ivt+0x5a00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
++ENTRY(unaligned_access)
++	DBG_FAULT(30)
++	mov r16=cr.ipsr
++	mov r31=pr		// prepare to save predicates
++	;;
++	br.sptk.many dispatch_unaligned_handler
++END(unaligned_access)
++
++	.org ia64_ivt+0x5b00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
++ENTRY(unsupported_data_reference)
++	DBG_FAULT(31)
++	FAULT(31)
++END(unsupported_data_reference)
++
++	.org ia64_ivt+0x5c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
++ENTRY(floating_point_fault)
++	DBG_FAULT(32)
++	FAULT(32)
++END(floating_point_fault)
++
++	.org ia64_ivt+0x5d00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
++ENTRY(floating_point_trap)
++	DBG_FAULT(33)
++	FAULT(33)
++END(floating_point_trap)
++
++	.org ia64_ivt+0x5e00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
++ENTRY(lower_privilege_trap)
++	DBG_FAULT(34)
++	FAULT(34)
++END(lower_privilege_trap)
++
++	.org ia64_ivt+0x5f00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
++ENTRY(taken_branch_trap)
++	DBG_FAULT(35)
++	FAULT(35)
++END(taken_branch_trap)
++
++	.org ia64_ivt+0x6000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
++ENTRY(single_step_trap)
++	DBG_FAULT(36)
++	FAULT(36)
++END(single_step_trap)
++
++	.org ia64_ivt+0x6100
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6100 Entry 37 (size 16 bundles) Reserved
++	DBG_FAULT(37)
++	FAULT(37)
++
++	.org ia64_ivt+0x6200
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6200 Entry 38 (size 16 bundles) Reserved
++	DBG_FAULT(38)
++	FAULT(38)
++
++	.org ia64_ivt+0x6300
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6300 Entry 39 (size 16 bundles) Reserved
++	DBG_FAULT(39)
++	FAULT(39)
++
++	.org ia64_ivt+0x6400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6400 Entry 40 (size 16 bundles) Reserved
++	DBG_FAULT(40)
++	FAULT(40)
++
++	.org ia64_ivt+0x6500
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6500 Entry 41 (size 16 bundles) Reserved
++	DBG_FAULT(41)
++	FAULT(41)
++
++	.org ia64_ivt+0x6600
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6600 Entry 42 (size 16 bundles) Reserved
++	DBG_FAULT(42)
++	FAULT(42)
++
++	.org ia64_ivt+0x6700
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6700 Entry 43 (size 16 bundles) Reserved
++	DBG_FAULT(43)
++	FAULT(43)
++
++	.org ia64_ivt+0x6800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6800 Entry 44 (size 16 bundles) Reserved
++	DBG_FAULT(44)
++	FAULT(44)
++
++	.org ia64_ivt+0x6900
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
++ENTRY(ia32_exception)
++	DBG_FAULT(45)
++	FAULT(45)
++END(ia32_exception)
++
++	.org ia64_ivt+0x6a00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
++ENTRY(ia32_intercept)
++	DBG_FAULT(46)
++#ifdef	CONFIG_IA32_SUPPORT
++	mov r31=pr
++	mov r16=cr.isr
++	;;
++	extr.u r17=r16,16,8	// get ISR.code
++	mov r18=ar.eflag
++	mov r19=cr.iim		// old eflag value
++	;;
++	cmp.ne p6,p0=2,r17
++(p6)	br.cond.spnt 1f		// not a system flag fault
++	xor r16=r18,r19
++	;;
++	extr.u r17=r16,18,1	// get the eflags.ac bit
++	;;
++	cmp.eq p6,p0=0,r17
++(p6)	br.cond.spnt 1f		// eflags.ac bit didn't change
++	;;
++	mov pr=r31,-1		// restore predicate registers
++#ifdef CONFIG_XEN
++	XEN_HYPER_RFI;
++#else
++	rfi
++#endif
++
++1:
++#endif	// CONFIG_IA32_SUPPORT
++	FAULT(46)
++END(ia32_intercept)
++
++	.org ia64_ivt+0x6b00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
++ENTRY(ia32_interrupt)
++	DBG_FAULT(47)
++#ifdef CONFIG_IA32_SUPPORT
++	mov r31=pr
++	br.sptk.many dispatch_to_ia32_handler
++#else
++	FAULT(47)
++#endif
++END(ia32_interrupt)
++
++	.org ia64_ivt+0x6c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6c00 Entry 48 (size 16 bundles) Reserved
++	DBG_FAULT(48)
++	FAULT(48)
++
++	.org ia64_ivt+0x6d00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6d00 Entry 49 (size 16 bundles) Reserved
++	DBG_FAULT(49)
++	FAULT(49)
++
++	.org ia64_ivt+0x6e00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6e00 Entry 50 (size 16 bundles) Reserved
++	DBG_FAULT(50)
++	FAULT(50)
++
++	.org ia64_ivt+0x6f00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x6f00 Entry 51 (size 16 bundles) Reserved
++	DBG_FAULT(51)
++	FAULT(51)
++
++	.org ia64_ivt+0x7000
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7000 Entry 52 (size 16 bundles) Reserved
++	DBG_FAULT(52)
++	FAULT(52)
++
++	.org ia64_ivt+0x7100
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7100 Entry 53 (size 16 bundles) Reserved
++	DBG_FAULT(53)
++	FAULT(53)
++
++	.org ia64_ivt+0x7200
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7200 Entry 54 (size 16 bundles) Reserved
++	DBG_FAULT(54)
++	FAULT(54)
++
++	.org ia64_ivt+0x7300
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7300 Entry 55 (size 16 bundles) Reserved
++	DBG_FAULT(55)
++	FAULT(55)
++
++	.org ia64_ivt+0x7400
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7400 Entry 56 (size 16 bundles) Reserved
++	DBG_FAULT(56)
++	FAULT(56)
++
++	.org ia64_ivt+0x7500
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7500 Entry 57 (size 16 bundles) Reserved
++	DBG_FAULT(57)
++	FAULT(57)
++
++	.org ia64_ivt+0x7600
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7600 Entry 58 (size 16 bundles) Reserved
++	DBG_FAULT(58)
++	FAULT(58)
++
++	.org ia64_ivt+0x7700
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7700 Entry 59 (size 16 bundles) Reserved
++	DBG_FAULT(59)
++	FAULT(59)
++
++	.org ia64_ivt+0x7800
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7800 Entry 60 (size 16 bundles) Reserved
++	DBG_FAULT(60)
++	FAULT(60)
++
++	.org ia64_ivt+0x7900
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7900 Entry 61 (size 16 bundles) Reserved
++	DBG_FAULT(61)
++	FAULT(61)
++
++	.org ia64_ivt+0x7a00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7a00 Entry 62 (size 16 bundles) Reserved
++	DBG_FAULT(62)
++	FAULT(62)
++
++	.org ia64_ivt+0x7b00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7b00 Entry 63 (size 16 bundles) Reserved
++	DBG_FAULT(63)
++	FAULT(63)
++
++	.org ia64_ivt+0x7c00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7c00 Entry 64 (size 16 bundles) Reserved
++	DBG_FAULT(64)
++	FAULT(64)
++
++	.org ia64_ivt+0x7d00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7d00 Entry 65 (size 16 bundles) Reserved
++	DBG_FAULT(65)
++	FAULT(65)
++
++	.org ia64_ivt+0x7e00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7e00 Entry 66 (size 16 bundles) Reserved
++	DBG_FAULT(66)
++	FAULT(66)
++
++#ifdef CONFIG_XEN
++	/*
++	 * There is no particular reason for this code to be here, other than that
++	 * there happens to be space here that would go unused otherwise.  If this
++	 * fault ever gets "unreserved", simply moved the following code to a more
++	 * suitable spot...
++	 */
++
++GLOBAL_ENTRY(xen_bsw1)
++	/* FIXME: THIS CODE IS NOT NaT SAFE! */
++	movl r30=XSI_BANKNUM;
++	mov r31=1;;
++	st4 [r30]=r31;
++	movl r30=XSI_BANK1_R16;
++	movl r31=XSI_BANK1_R16+8;;
++	ld8 r16=[r30],16; ld8 r17=[r31],16;;
++	ld8 r18=[r30],16; ld8 r19=[r31],16;;
++	ld8 r20=[r30],16; ld8 r21=[r31],16;;
++	ld8 r22=[r30],16; ld8 r23=[r31],16;;
++	ld8 r24=[r30],16; ld8 r25=[r31],16;;
++	ld8 r26=[r30],16; ld8 r27=[r31],16;;
++	ld8 r28=[r30],16; ld8 r29=[r31],16;;
++	ld8 r30=[r30]; ld8 r31=[r31];;
++	br.ret.sptk.many b0
++#endif
++
++	.org ia64_ivt+0x7f00
++/////////////////////////////////////////////////////////////////////////////////////////
++// 0x7f00 Entry 67 (size 16 bundles) Reserved
++	DBG_FAULT(67)
++	FAULT(67)
++
++#ifdef CONFIG_IA32_SUPPORT
++
++	/*
++	 * There is no particular reason for this code to be here, other than that
++	 * there happens to be space here that would go unused otherwise.  If this
++	 * fault ever gets "unreserved", simply moved the following code to a more
++	 * suitable spot...
++	 */
++
++	// IA32 interrupt entry point
++
++ENTRY(dispatch_to_ia32_handler)
++	SAVE_MIN
++	;;
++	mov r14=cr.isr
++	ssm psr.ic | PSR_DEFAULT_BITS
++	;;
++	srlz.i					// guarantee that interruption collection is on
++	;;
++(p15)	ssm psr.i
++	adds r3=8,r2		// Base pointer for SAVE_REST
++	;;
++	SAVE_REST
++	;;
++	mov r15=0x80
++	shr r14=r14,16		// Get interrupt number
++	;;
++	cmp.ne p6,p0=r14,r15
++(p6)	br.call.dpnt.many b6=non_ia32_syscall
++
++	adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp	// 16 byte hole per SW conventions
++	adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
++	;;
++	cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
++	ld8 r8=[r14]		// get r8
++	;;
++	st8 [r15]=r8		// save original EAX in r1 (IA32 procs don't use the GP)
++	;;
++	alloc r15=ar.pfs,0,0,6,0	// must first in an insn group
++	;;
++	ld4 r8=[r14],8		// r8 == eax (syscall number)
++	mov r15=IA32_NR_syscalls
++	;;
++	cmp.ltu.unc p6,p7=r8,r15
++	ld4 out1=[r14],8	// r9 == ecx
++	;;
++	ld4 out2=[r14],8	// r10 == edx
++	;;
++	ld4 out0=[r14]		// r11 == ebx
++	adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
++	;;
++	ld4 out5=[r14],PT(R14)-PT(R13)	// r13 == ebp
++	;;
++	ld4 out3=[r14],PT(R15)-PT(R14)	// r14 == esi
++	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
++	;;
++	ld4 out4=[r14]		// r15 == edi
++	movl r16=ia32_syscall_table
++	;;
++(p6)	shladd r16=r8,3,r16	// force ni_syscall if not valid syscall number
++	ld4 r2=[r2]		// r2 = current_thread_info()->flags
++	;;
++	ld8 r16=[r16]
++	and r2=_TIF_SYSCALL_TRACEAUDIT,r2	// mask trace or audit
++	;;
++	mov b6=r16
++	movl r15=ia32_ret_from_syscall
++	cmp.eq p8,p0=r2,r0
++	;;
++	mov rp=r15
++(p8)	br.call.sptk.many b6=b6
++	br.cond.sptk ia32_trace_syscall
++
++non_ia32_syscall:
++	alloc r15=ar.pfs,0,0,2,0
++	mov out0=r14				// interrupt #
++	add out1=16,sp				// pointer to pt_regs
++	;;			// avoid WAW on CFM
++	br.call.sptk.many rp=ia32_bad_interrupt
++.ret1:	movl r15=ia64_leave_kernel
++	;;
++	mov rp=r15
++	br.ret.sptk.many rp
++END(dispatch_to_ia32_handler)
++
++#endif /* CONFIG_IA32_SUPPORT */
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xen_ksyms.c linux-2.6.12-xen/arch/ia64/xen/xen_ksyms.c
+--- pristine-linux-2.6.12/arch/ia64/xen/xen_ksyms.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/xen_ksyms.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,12 @@
++/*
++ * Architecture-specific kernel symbols
++ *
++ * Don't put any exports here unless it's defined in an assembler file.
++ * All other exports should be put directly after the definition.
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++
++extern int is_running_on_xen(void);
++EXPORT_SYMBOL(is_running_on_xen);
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenminstate.h linux-2.6.12-xen/arch/ia64/xen/xenminstate.h
+--- pristine-linux-2.6.12/arch/ia64/xen/xenminstate.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/xenminstate.h	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,367 @@
++#include <linux/config.h>
++
++#include <asm/cache.h>
++
++#ifdef CONFIG_XEN
++#include "../kernel/entry.h"
++#else
++#include "entry.h"
++#endif
++
++/*
++ * For ivt.s we want to access the stack virtually so we don't have to disable translation
++ * on interrupts.
++ *
++ *  On entry:
++ *	r1:	pointer to current task (ar.k6)
++ */
++#define MINSTATE_START_SAVE_MIN_VIRT								\
++(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
++	;;											\
++(pUStk)	mov.m r24=ar.rnat;									\
++(pUStk)	addl r22=IA64_RBS_OFFSET,r1;			/* compute base of RBS */		\
++(pKStk) mov r1=sp;					/* get sp  */				\
++	;;											\
++(pUStk) lfetch.fault.excl.nt1 [r22];								\
++(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
++(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
++	;;											\
++(pUStk)	mov ar.bspstore=r22;				/* switch to kernel RBS */		\
++(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;			/* if in kernel mode, use sp (r12) */	\
++	;;											\
++(pUStk)	mov r18=ar.bsp;										\
++(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
++
++#define MINSTATE_END_SAVE_MIN_VIRT								\
++	bsw.1;			/* switch back to bank 1 (must be last in insn group) */	\
++	;;
++
++/*
++ * For mca_asm.S we want to access the stack physically since the state is saved before we
++ * go virtual and don't want to destroy the iip or ipsr.
++ */
++#define MINSTATE_START_SAVE_MIN_PHYS								\
++(pKStk) mov r3=IA64_KR(PER_CPU_DATA);;								\
++(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;;							\
++(pKStk) ld8 r3 = [r3];;										\
++(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;;						\
++(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;						\
++(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
++(pUStk)	addl r22=IA64_RBS_OFFSET,r1;		/* compute base of register backing store */	\
++	;;											\
++(pUStk)	mov r24=ar.rnat;									\
++(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
++(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
++(pUStk)	dep r22=-1,r22,61,3;			/* compute kernel virtual addr of RBS */	\
++	;;											\
++(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;		/* if in kernel mode, use sp (r12) */		\
++(pUStk)	mov ar.bspstore=r22;			/* switch to kernel RBS */			\
++	;;											\
++(pUStk)	mov r18=ar.bsp;										\
++(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
++
++#define MINSTATE_END_SAVE_MIN_PHYS								\
++	dep r12=-1,r12,61,3;		/* make sp a kernel virtual address */			\
++	;;
++
++#ifdef MINSTATE_VIRT
++# define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT)
++# define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_VIRT
++# define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_VIRT
++#endif
++
++#ifdef MINSTATE_PHYS
++# define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT);; tpa reg=reg
++# define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_PHYS
++# define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_PHYS
++#endif
++
++/*
++ * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
++ * the minimum state necessary that allows us to turn psr.ic back
++ * on.
++ *
++ * Assumed state upon entry:
++ *	psr.ic: off
++ *	r31:	contains saved predicates (pr)
++ *
++ * Upon exit, the state is as follows:
++ *	psr.ic: off
++ *	 r2 = points to &pt_regs.r16
++ *	 r8 = contents of ar.ccv
++ *	 r9 = contents of ar.csd
++ *	r10 = contents of ar.ssd
++ *	r11 = FPSR_DEFAULT
++ *	r12 = kernel sp (kernel virtual address)
++ *	r13 = points to current task_struct (kernel virtual address)
++ *	p15 = TRUE if psr.i is set in cr.ipsr
++ *	predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
++ *		preserved
++ * CONFIG_XEN note: p6/p7 are not preserved
++ *
++ * Note that psr.ic is NOT turned on by this macro.  This is so that
++ * we can pass interruption state as arguments to a handler.
++ */
++#ifdef CONFIG_XEN
++#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)							\
++	MINSTATE_GET_CURRENT(r16);	/* M (or M;;I) */					\
++	mov r27=ar.rsc;			/* M */							\
++	mov r20=r1;			/* A */							\
++	mov r25=ar.unat;		/* M */							\
++	/* mov r29=cr.ipsr;		/* M */							\
++	movl r29=XSI_IPSR;;									\
++	ld8 r29=[r29];;										\
++	mov r26=ar.pfs;			/* I */							\
++	/* mov r28=cr.iip;		/* M */							\
++	movl r28=XSI_IIP;;									\
++	ld8 r28=[r28];;										\
++	mov r21=ar.fpsr;		/* M */							\
++	COVER;			/* B;; (or nothing) */					\
++	;;											\
++	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16;						\
++	;;											\
++	ld1 r17=[r16];				/* load current->thread.on_ustack flag */	\
++	st1 [r16]=r0;				/* clear current->thread.on_ustack flag */	\
++	adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16						\
++	/* switch from user to kernel RBS: */							\
++	;;											\
++	invala;				/* M */							\
++	/* SAVE_IFS; /* see xen special handling below */						\
++	cmp.eq pKStk,pUStk=r0,r17;		/* are we in kernel mode already? */		\
++	;;											\
++	MINSTATE_START_SAVE_MIN									\
++	adds r17=2*L1_CACHE_BYTES,r1;		/* really: biggest cache-line size */		\
++	adds r16=PT(CR_IPSR),r1;								\
++	;;											\
++	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;						\
++	st8 [r16]=r29;		/* save cr.ipsr */						\
++	;;											\
++	lfetch.fault.excl.nt1 [r17];								\
++	tbit.nz p15,p0=r29,IA64_PSR_I_BIT;							\
++	mov r29=b0										\
++	;;											\
++	adds r16=PT(R8),r1;	/* initialize first base pointer */				\
++	adds r17=PT(R9),r1;	/* initialize second base pointer */				\
++(pKStk)	mov r18=r0;		/* make sure r18 isn't NaT */					\
++	;;											\
++.mem.offset 0,0; st8.spill [r16]=r8,16;								\
++.mem.offset 8,0; st8.spill [r17]=r9,16;								\
++        ;;											\
++.mem.offset 0,0; st8.spill [r16]=r10,24;							\
++.mem.offset 8,0; st8.spill [r17]=r11,24;							\
++        ;;											\
++	/* xen special handling for possibly lazy cover */					\
++	movl r8=XSI_INCOMPL_REGFR;								\
++	;;											\
++	ld4 r30=[r8];										\
++	;;											\
++	cmp.eq	p6,p7=r30,r0;									\
++	;; /* not sure if this stop bit is necessary */						\
++(p6)	adds r8=XSI_PRECOVER_IFS-XSI_INCOMPL_REGFR,r8;						\
++(p7)	adds r8=XSI_IFS-XSI_INCOMPL_REGFR,r8;							\
++	;;											\
++	ld8 r30=[r8];										\
++	;;											\
++	st8 [r16]=r28,16;	/* save cr.iip */						\
++	st8 [r17]=r30,16;	/* save cr.ifs */						\
++(pUStk)	sub r18=r18,r22;	/* r18=RSE.ndirty*8 */						\
++	mov r8=ar.ccv;										\
++	mov r9=ar.csd;										\
++	mov r10=ar.ssd;										\
++	movl r11=FPSR_DEFAULT;   /* L-unit */							\
++	;;											\
++	st8 [r16]=r25,16;	/* save ar.unat */						\
++	st8 [r17]=r26,16;	/* save ar.pfs */						\
++	shl r18=r18,16;		/* compute ar.rsc to be used for "loadrs" */			\
++	;;											\
++	st8 [r16]=r27,16;	/* save ar.rsc */						\
++(pUStk)	st8 [r17]=r24,16;	/* save ar.rnat */						\
++(pKStk)	adds r17=16,r17;	/* skip over ar_rnat field */					\
++	;;			/* avoid RAW on r16 & r17 */					\
++(pUStk)	st8 [r16]=r23,16;	/* save ar.bspstore */						\
++	st8 [r17]=r31,16;	/* save predicates */						\
++(pKStk)	adds r16=16,r16;	/* skip over ar_bspstore field */				\
++	;;											\
++	st8 [r16]=r29,16;	/* save b0 */							\
++	st8 [r17]=r18,16;	/* save ar.rsc value for "loadrs" */				\
++	cmp.eq pNonSys,pSys=r0,r0	/* initialize pSys=0, pNonSys=1 */			\
++	;;											\
++.mem.offset 0,0; st8.spill [r16]=r20,16;	/* save original r1 */				\
++.mem.offset 8,0; st8.spill [r17]=r12,16;							\
++	adds r12=-16,r1;	/* switch to kernel memory stack (with 16 bytes of scratch) */	\
++	;;											\
++.mem.offset 0,0; st8.spill [r16]=r13,16;							\
++.mem.offset 8,0; st8.spill [r17]=r21,16;	/* save ar.fpsr */				\
++	mov r13=IA64_KR(CURRENT);	/* establish `current' */				\
++	;;											\
++.mem.offset 0,0; st8.spill [r16]=r15,16;							\
++.mem.offset 8,0; st8.spill [r17]=r14,16;							\
++	;;											\
++.mem.offset 0,0; st8.spill [r16]=r2,16;								\
++.mem.offset 8,0; st8.spill [r17]=r3,16;								\
++	;;											\
++	EXTRA;											\
++	mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2;					\
++	adds r2=IA64_PT_REGS_R16_OFFSET,r1;							\
++	;;											\
++	movl r1=__gp;		/* establish kernel global pointer */				\
++	;;											\
++	/* MINSTATE_END_SAVE_MIN */
++#else
++#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)							\
++	MINSTATE_GET_CURRENT(r16);	/* M (or M;;I) */					\
++	mov r27=ar.rsc;			/* M */							\
++	mov r20=r1;			/* A */							\
++	mov r25=ar.unat;		/* M */							\
++	mov r29=cr.ipsr;		/* M */							\
++	mov r26=ar.pfs;			/* I */							\
++	mov r28=cr.iip;			/* M */							\
++	mov r21=ar.fpsr;		/* M */							\
++	COVER;				/* B;; (or nothing) */					\
++	;;											\
++	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16;						\
++	;;											\
++	ld1 r17=[r16];				/* load current->thread.on_ustack flag */	\
++	st1 [r16]=r0;				/* clear current->thread.on_ustack flag */	\
++	adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16						\
++	/* switch from user to kernel RBS: */							\
++	;;											\
++	invala;				/* M */							\
++	SAVE_IFS;										\
++	cmp.eq pKStk,pUStk=r0,r17;		/* are we in kernel mode already? */		\
++	;;											\
++	MINSTATE_START_SAVE_MIN									\
++	adds r17=2*L1_CACHE_BYTES,r1;		/* really: biggest cache-line size */		\
++	adds r16=PT(CR_IPSR),r1;								\
++	;;											\
++	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;						\
++	st8 [r16]=r29;		/* save cr.ipsr */						\
++	;;											\
++	lfetch.fault.excl.nt1 [r17];								\
++	tbit.nz p15,p0=r29,IA64_PSR_I_BIT;							\
++	mov r29=b0										\
++	;;											\
++	adds r16=PT(R8),r1;	/* initialize first base pointer */				\
++	adds r17=PT(R9),r1;	/* initialize second base pointer */				\
++(pKStk)	mov r18=r0;		/* make sure r18 isn't NaT */					\
++	;;											\
++.mem.offset 0,0; st8.spill [r16]=r8,16;								\
++.mem.offset 8,0; st8.spill [r17]=r9,16;								\
++        ;;											\
++.mem.offset 0,0; st8.spill [r16]=r10,24;							\
++.mem.offset 8,0; st8.spill [r17]=r11,24;							\
++        ;;											\
++	st8 [r16]=r28,16;	/* save cr.iip */						\
++	st8 [r17]=r30,16;	/* save cr.ifs */						\
++(pUStk)	sub r18=r18,r22;	/* r18=RSE.ndirty*8 */						\
++	mov r8=ar.ccv;										\
++	mov r9=ar.csd;										\
++	mov r10=ar.ssd;										\
++	movl r11=FPSR_DEFAULT;   /* L-unit */							\
++	;;											\
++	st8 [r16]=r25,16;	/* save ar.unat */						\
++	st8 [r17]=r26,16;	/* save ar.pfs */						\
++	shl r18=r18,16;		/* compute ar.rsc to be used for "loadrs" */			\
++	;;											\
++	st8 [r16]=r27,16;	/* save ar.rsc */						\
++(pUStk)	st8 [r17]=r24,16;	/* save ar.rnat */						\
++(pKStk)	adds r17=16,r17;	/* skip over ar_rnat field */					\
++	;;			/* avoid RAW on r16 & r17 */					\
++(pUStk)	st8 [r16]=r23,16;	/* save ar.bspstore */						\
++	st8 [r17]=r31,16;	/* save predicates */						\
++(pKStk)	adds r16=16,r16;	/* skip over ar_bspstore field */				\
++	;;											\
++	st8 [r16]=r29,16;	/* save b0 */							\
++	st8 [r17]=r18,16;	/* save ar.rsc value for "loadrs" */				\
++	cmp.eq pNonSys,pSys=r0,r0	/* initialize pSys=0, pNonSys=1 */			\
++	;;											\
++.mem.offset 0,0; st8.spill [r16]=r20,16;	/* save original r1 */				\
++.mem.offset 8,0; st8.spill [r17]=r12,16;							\
++	adds r12=-16,r1;	/* switch to kernel memory stack (with 16 bytes of scratch) */	\
++	;;											\
++.mem.offset 0,0; st8.spill [r16]=r13,16;							\
++.mem.offset 8,0; st8.spill [r17]=r21,16;	/* save ar.fpsr */				\
++	mov r13=IA64_KR(CURRENT);	/* establish `current' */				\
++	;;											\
++.mem.offset 0,0; st8.spill [r16]=r15,16;							\
++.mem.offset 8,0; st8.spill [r17]=r14,16;							\
++	;;											\
++.mem.offset 0,0; st8.spill [r16]=r2,16;								\
++.mem.offset 8,0; st8.spill [r17]=r3,16;								\
++	adds r2=IA64_PT_REGS_R16_OFFSET,r1;							\
++	;;											\
++	EXTRA;											\
++	movl r1=__gp;		/* establish kernel global pointer */				\
++	;;											\
++	MINSTATE_END_SAVE_MIN
++#endif
++
++/*
++ * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
++ *
++ * Assumed state upon entry:
++ *	psr.ic: on
++ *	r2:	points to &pt_regs.r16
++ *	r3:	points to &pt_regs.r17
++ *	r8:	contents of ar.ccv
++ *	r9:	contents of ar.csd
++ *	r10:	contents of ar.ssd
++ *	r11:	FPSR_DEFAULT
++ *
++ * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
++ */
++#define SAVE_REST				\
++.mem.offset 0,0; st8.spill [r2]=r16,16;		\
++.mem.offset 8,0; st8.spill [r3]=r17,16;		\
++	;;					\
++.mem.offset 0,0; st8.spill [r2]=r18,16;		\
++.mem.offset 8,0; st8.spill [r3]=r19,16;		\
++	;;					\
++.mem.offset 0,0; st8.spill [r2]=r20,16;		\
++.mem.offset 8,0; st8.spill [r3]=r21,16;		\
++	mov r18=b6;				\
++	;;					\
++.mem.offset 0,0; st8.spill [r2]=r22,16;		\
++.mem.offset 8,0; st8.spill [r3]=r23,16;		\
++	mov r19=b7;				\
++	;;					\
++.mem.offset 0,0; st8.spill [r2]=r24,16;		\
++.mem.offset 8,0; st8.spill [r3]=r25,16;		\
++	;;					\
++.mem.offset 0,0; st8.spill [r2]=r26,16;		\
++.mem.offset 8,0; st8.spill [r3]=r27,16;		\
++	;;					\
++.mem.offset 0,0; st8.spill [r2]=r28,16;		\
++.mem.offset 8,0; st8.spill [r3]=r29,16;		\
++	;;					\
++.mem.offset 0,0; st8.spill [r2]=r30,16;		\
++.mem.offset 8,0; st8.spill [r3]=r31,32;		\
++	;;					\
++	mov ar.fpsr=r11;	/* M-unit */	\
++	st8 [r2]=r8,8;		/* ar.ccv */	\
++	adds r24=PT(B6)-PT(F7),r3;		\
++	;;					\
++	stf.spill [r2]=f6,32;			\
++	stf.spill [r3]=f7,32;			\
++	;;					\
++	stf.spill [r2]=f8,32;			\
++	stf.spill [r3]=f9,32;			\
++	;;					\
++	stf.spill [r2]=f10;			\
++	stf.spill [r3]=f11;			\
++	adds r25=PT(B7)-PT(F11),r3;		\
++	;;					\
++	st8 [r24]=r18,16;       /* b6 */	\
++	st8 [r25]=r19,16;       /* b7 */	\
++	;;					\
++	st8 [r24]=r9;        	/* ar.csd */	\
++	st8 [r25]=r10;      	/* ar.ssd */	\
++	;;
++
++#define SAVE_MIN_WITH_COVER	DO_SAVE_MIN(cover, mov r30=cr.ifs,)
++#define SAVE_MIN_WITH_COVER_R19	DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
++#ifdef CONFIG_XEN
++#define SAVE_MIN		break 0;; /* FIXME: non-cover version only for ia32 support? */
++#else
++#define SAVE_MIN		DO_SAVE_MIN(     , mov r30=r0, )
++#endif
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenpal.S linux-2.6.12-xen/arch/ia64/xen/xenpal.S
+--- pristine-linux-2.6.12/arch/ia64/xen/xenpal.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/xenpal.S	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,73 @@
++/*
++ * ia64/xen/xenpal.S
++ *
++ * Alternate PAL  routines for Xen.  Heavily leveraged from
++ *   ia64/kernel/pal.S
++ *
++ * Copyright (C) 2005 Hewlett-Packard Co
++ *	Dan Magenheimer <dan.magenheimer at .hp.com>
++ */
++
++#include <asm/asmmacro.h>
++#include <asm/processor.h>
++
++GLOBAL_ENTRY(xen_pal_call_static)
++	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
++	alloc loc1 = ar.pfs,5,5,0,0
++#ifdef CONFIG_XEN
++	movl r22=running_on_xen;;
++	ld4 r22=[r22];;
++	cmp.eq p7,p0=r22,r0
++(p7)	br.cond.spnt.many __ia64_pal_call_static;;
++#endif
++	movl loc2 = pal_entry_point
++1:	{
++	  mov r28 = in0
++	  mov r29 = in1
++	  mov r8 = ip
++	}
++	;;
++	ld8 loc2 = [loc2]		// loc2 <- entry point
++	tbit.nz p6,p7 = in4, 0
++	adds r8 = 1f-1b,r8
++	mov loc4=ar.rsc			// save RSE configuration
++	;;
++	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
++	mov loc3 = psr
++	mov loc0 = rp
++	.body
++	mov r30 = in2
++
++#ifdef CONFIG_XEN
++	// this is low priority for paravirtualization, but is called
++	// from the idle loop so confuses privop counting
++	movl r31=XSI_PSR_IC
++	;;
++(p6)	st8 [r31]=r0
++	;;
++(p7)	adds r31=XSI_PSR_I-XSI_PSR_IC,r31
++	;;
++(p7)	st4 [r31]=r0
++	;;
++	mov r31 = in3
++	mov b7 = loc2
++	;;
++#else
++(p6)	rsm psr.i | psr.ic
++	mov r31 = in3
++	mov b7 = loc2
++
++(p7)	rsm psr.i
++	;;
++(p6)	srlz.i
++#endif
++	mov rp = r8
++	br.cond.sptk.many b7
++1:	mov psr.l = loc3
++	mov ar.rsc = loc4		// restore RSE configuration
++	mov ar.pfs = loc1
++	mov rp = loc0
++	;;
++	srlz.d				// seralize restoration of psr.l
++	br.ret.sptk.many b0
++END(xen_pal_call_static)
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xensetup.S linux-2.6.12-xen/arch/ia64/xen/xensetup.S
+--- pristine-linux-2.6.12/arch/ia64/xen/xensetup.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen/xensetup.S	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,33 @@
++/*
++ * Support routines for Xen
++ *
++ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer at hp.com>
++ */
++
++#include <linux/config.h>
++#include <asm/processor.h>
++#include <asm/asmmacro.h>
++
++	.data
++	.align 8
++	.globl running_on_xen
++running_on_xen:
++	data4 0
++
++	.text
++GLOBAL_ENTRY(early_xen_setup)
++	mov r8=cr.dcr;;
++	extr.u r8=r8,63,1
++	movl r9=running_on_xen;;
++	st4 [r9]=r8;;
++	cmp.ne p7,p0=r8,r0;;
++(p7)	movl r10=xen_ivt;;
++(p7)	mov cr.iva=r10
++	br.ret.sptk.many rp;;
++END(xen_init)
++
++GLOBAL_ENTRY(is_running_on_xen)
++	movl r9=running_on_xen;;
++	ld4 r8=[r9];;
++	br.ret.sptk.many rp;;
++END(is_running_on_xen)
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen-mkbuildtree-post linux-2.6.12-xen/arch/ia64/xen-mkbuildtree-post
+--- pristine-linux-2.6.12/arch/ia64/xen-mkbuildtree-post	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen-mkbuildtree-post	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,2 @@
++#!/bin/bash
++echo 'NOTHING YET IN ' ${0}
+diff -Nurp pristine-linux-2.6.12/arch/ia64/xen-mkbuildtree-pre linux-2.6.12-xen/arch/ia64/xen-mkbuildtree-pre
+--- pristine-linux-2.6.12/arch/ia64/xen-mkbuildtree-pre	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/ia64/xen-mkbuildtree-pre	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,50 @@
++#!/bin/bash
++# restructure directories to match future drivers/xen plan
++# and move aside xen/x86 specific changes
++# WARNING!: This directory movement really confuses hg which makes
++# it difficult to do development in a directory which is being used
++# for building (as all files in mv'd directories are thought by hg
++# to have been deleted).  I don't know how to avoid this right now,
++# but if someone has a better way, I'm all ears
++if [ ! -e mm.xen-x86 ]
++then
++	mv mm mm.xen-x86
++	mkdir mm
++	mv net net.xen-x86
++	mv kernel kernel.xen-x86
++	mv drivers/acpi/tables.c drivers/acpi/tables.c.xen-x86
++	mv arch/xen/kernel drivers/xen/core
++	mv arch/xen arch/xen.xen-x86
++	mkdir arch/xen
++	mv arch/xen.xen-x86/configs arch/xen
++#	mv include/asm-generic include/asm-generic.xen-x86
++	mv include/linux include/linux.xen-x86
++	mkdir include/linux
++fi
++
++# need to grab a couple of xen-modified files for generic_page_range and
++# typedef pte_fn_t which are used by driver/xen blkif
++cp mm.xen-x86/memory.c mm/memory.c
++cp include/linux.xen-x86/mm.h include/linux/mm.h
++
++#eventually asm-xsi-offsets needs to be part of hypervisor.h/hypercall.h
++cp ../xen/include/asm-ia64/asm-xsi-offsets.h include/asm-ia64/xen/
++
++#ia64 drivers/xen isn't fully functional yet, workaround...
++#also ignore core/evtchn.c which uses a different irq mechanism than ia64
++#(warning: there be dragons here if these files diverge)
++cp arch/ia64/xen/drivers/Makefile drivers/xen/Makefile
++cp arch/ia64/xen/drivers/coreMakefile drivers/xen/core/Makefile
++
++#not sure where these ia64-specific files will end up in the future
++cp arch/ia64/xen/drivers/xenia64_init.c drivers/xen/core
++cp arch/ia64/xen/drivers/evtchn_ia64.c drivers/xen/core
++
++#still a few x86-ism's in various drivers/xen files, patch them
++#cd drivers/xen
++#if [ ! -e ia64.patch.semaphore ]
++#then
++#	cat ../../arch/ia64/xen/drivers/patches/* | patch -p1 -b
++#fi
++#touch ia64.patch.semaphore
++#cd ../..
+diff -Nurp pristine-linux-2.6.12/arch/ppc/kernel/time.c linux-2.6.12-xen/arch/ppc/kernel/time.c
+--- pristine-linux-2.6.12/arch/ppc/kernel/time.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ppc/kernel/time.c	2006-03-05 23:54:35.432282630 +0100
+@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
+ 
+ extern unsigned long wall_jiffies;
+ 
++/* used for timezone offset */
++static long timezone_offset;
++
+ DEFINE_SPINLOCK(rtc_lock);
+ 
+ EXPORT_SYMBOL(rtc_lock);
+@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
+ 		     xtime.tv_sec - last_rtc_update >= 659 &&
+ 		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
+ 		     jiffies - wall_jiffies == 1) {
+-		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0)
++		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
+ 				last_rtc_update = xtime.tv_sec+1;
+ 			else
+ 				/* Try again one minute later */
+@@ -286,7 +289,7 @@ void __init time_init(void)
+ 	unsigned old_stamp, stamp, elapsed;
+ 
+         if (ppc_md.time_init != NULL)
+-                time_offset = ppc_md.time_init();
++                timezone_offset = ppc_md.time_init();
+ 
+ 	if (__USE_RTC()) {
+ 		/* 601 processor: dec counts down by 128 every 128ns */
+@@ -331,10 +334,10 @@ void __init time_init(void)
+ 	set_dec(tb_ticks_per_jiffy);
+ 
+ 	/* If platform provided a timezone (pmac), we correct the time */
+-        if (time_offset) {
+-		sys_tz.tz_minuteswest = -time_offset / 60;
++        if (timezone_offset) {
++		sys_tz.tz_minuteswest = -timezone_offset / 60;
+ 		sys_tz.tz_dsttime = 0;
+-		xtime.tv_sec -= time_offset;
++		xtime.tv_sec -= timezone_offset;
+         }
+         set_normalized_timespec(&wall_to_monotonic,
+                                 -xtime.tv_sec, -xtime.tv_nsec);
+diff -Nurp pristine-linux-2.6.12/arch/ppc64/boot/zlib.c linux-2.6.12-xen/arch/ppc64/boot/zlib.c
+--- pristine-linux-2.6.12/arch/ppc64/boot/zlib.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ppc64/boot/zlib.c	2006-03-05 23:54:35.583260383 +0100
+@@ -1307,7 +1307,7 @@ local int huft_build(
+   {
+     *t = (inflate_huft *)Z_NULL;
+     *m = 0;
+-    return Z_OK;
++    return Z_DATA_ERROR;
+   }
+ 
+ 
+@@ -1351,6 +1351,7 @@ local int huft_build(
+     if ((j = *p++) != 0)
+       v[x[j]++] = i;
+   } while (++i < n);
++  n = x[g];			/* set n to length of v */
+ 
+ 
+   /* Generate the Huffman codes and for each, make the table entries */
+diff -Nurp pristine-linux-2.6.12/arch/ppc64/kernel/pSeries_smp.c linux-2.6.12-xen/arch/ppc64/kernel/pSeries_smp.c
+--- pristine-linux-2.6.12/arch/ppc64/kernel/pSeries_smp.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/ppc64/kernel/pSeries_smp.c	2006-03-05 23:54:37.097037319 +0100
+@@ -92,10 +92,13 @@ static int query_cpu_stopped(unsigned in
+ 
+ int pSeries_cpu_disable(void)
+ {
++	int cpu = smp_processor_id();
++
++	cpu_clear(cpu, cpu_online_map);
+ 	systemcfg->processorCount--;
+ 
+ 	/*fix boot_cpuid here*/
+-	if (smp_processor_id() == boot_cpuid)
++	if (cpu == boot_cpuid)
+ 		boot_cpuid = any_online_cpu(cpu_online_map);
+ 
+ 	/* FIXME: abstract this to not be platform specific later on */
+diff -Nurp pristine-linux-2.6.12/arch/s390/kernel/smp.c linux-2.6.12-xen/arch/s390/kernel/smp.c
+--- pristine-linux-2.6.12/arch/s390/kernel/smp.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/s390/kernel/smp.c	2006-03-05 23:54:37.122033636 +0100
+@@ -679,12 +679,14 @@ __cpu_disable(void)
+ {
+ 	unsigned long flags;
+ 	ec_creg_mask_parms cr_parms;
++	int cpu = smp_processor_id();
+ 
+ 	spin_lock_irqsave(&smp_reserve_lock, flags);
+-	if (smp_cpu_reserved[smp_processor_id()] != 0) {
++	if (smp_cpu_reserved[cpu] != 0) {
+ 		spin_unlock_irqrestore(&smp_reserve_lock, flags);
+ 		return -EBUSY;
+ 	}
++	cpu_clear(cpu, cpu_online_map);
+ 
+ #ifdef CONFIG_PFAULT
+ 	/* Disable pfault pseudo page faults on this cpu. */
+diff -Nurp pristine-linux-2.6.12/arch/um/kernel/process.c linux-2.6.12-xen/arch/um/kernel/process.c
+--- pristine-linux-2.6.12/arch/um/kernel/process.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/um/kernel/process.c	2006-03-05 23:54:35.631253312 +0100
+@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
+ 	return(arg.pid);
+ }
+ 
+-static int ptrace_child(void)
++static int ptrace_child(void *arg)
+ {
+ 	int ret;
+ 	int pid = os_getpid(), ppid = getppid();
+@@ -159,16 +159,20 @@ static int ptrace_child(void)
+ 	_exit(ret);
+ }
+ 
+-static int start_ptraced_child(void)
++static int start_ptraced_child(void **stack_out)
+ {
++	void *stack;
++	unsigned long sp;
+ 	int pid, n, status;
+ 	
+-	pid = fork();
+-	if(pid == 0)
+-		ptrace_child();
+-
++	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
++		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
++	if(stack == MAP_FAILED)
++		panic("check_ptrace : mmap failed, errno = %d", errno);
++	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
++	pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
+ 	if(pid < 0)
+-		panic("check_ptrace : fork failed, errno = %d", errno);
++		panic("check_ptrace : clone failed, errno = %d", errno);
+ 	CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
+ 	if(n < 0)
+ 		panic("check_ptrace : wait failed, errno = %d", errno);
+@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
+ 		panic("check_ptrace : expected SIGSTOP, got status = %d",
+ 		      status);
+ 
++	*stack_out = stack;
+ 	return(pid);
+ }
+ 
+@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
+  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
+  * So only for SYSEMU features we test mustpanic, while normal host features
+  * must work anyway!*/
+-static int stop_ptraced_child(int pid, int exitcode, int mustexit)
++static int stop_ptraced_child(int pid, void *stack, int exitcode, int mustpanic)
+ {
+ 	int status, n, ret = 0;
+ 
+ 	if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
+-		panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
++		panic("check_ptrace : ptrace failed, errno = %d", errno);
+ 	CATCH_EINTR(n = waitpid(pid, &status, 0));
+ 	if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
+ 		int exit_with = WEXITSTATUS(status);
+@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
+ 		printk("check_ptrace : child exited with exitcode %d, while "
+ 		      "expecting %d; status 0x%x", exit_with,
+ 		      exitcode, status);
+-		if (mustexit)
++		if (mustpanic)
+ 			panic("\n");
+ 		else
+ 			printk("\n");
+ 		ret = -1;
+ 	}
+ 
++	if(munmap(stack, PAGE_SIZE) < 0)
++		panic("check_ptrace : munmap failed, errno = %d", errno);
+ 	return ret;
+ }
+ 
+@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
+ 
+ static void __init check_sysemu(void)
+ {
++	void *stack;
+ 	int pid, syscall, n, status, count=0;
+ 
+ 	printk("Checking syscall emulation patch for ptrace...");
+ 	sysemu_supported = 0;
+-	pid = start_ptraced_child();
++	pid = start_ptraced_child(&stack);
+ 
+ 	if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
+ 		goto fail;
+@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
+ 		panic("check_sysemu : failed to modify system "
+ 		      "call return, errno = %d", errno);
+ 
+-	if (stop_ptraced_child(pid, 0, 0) < 0)
++	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
+ 		goto fail_stopped;
+ 
+ 	sysemu_supported = 1;
+@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
+ 	set_using_sysemu(!force_sysemu_disabled);
+ 
+ 	printk("Checking advanced syscall emulation patch for ptrace...");
+-	pid = start_ptraced_child();
++	pid = start_ptraced_child(&stack);
+ 	while(1){
+ 		count++;
+ 		if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
+@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
+ 			break;
+ 		}
+ 	}
+-	if (stop_ptraced_child(pid, 0, 0) < 0)
++	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
+ 		goto fail_stopped;
+ 
+ 	sysemu_supported = 2;
+@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
+ 	return;
+ 
+ fail:
+-	stop_ptraced_child(pid, 1, 0);
++	stop_ptraced_child(pid, stack, 1, 0);
+ fail_stopped:
+ 	printk("missing\n");
+ }
+ 
+ void __init check_ptrace(void)
+ {
++	void *stack;
+ 	int pid, syscall, n, status;
+ 
+ 	printk("Checking that ptrace can change system call numbers...");
+-	pid = start_ptraced_child();
++	pid = start_ptraced_child(&stack);
+ 
+ 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) < 0)
+ 		panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", errno);
+@@ -330,7 +339,7 @@ void __init check_ptrace(void)
+ 			break;
+ 		}
+ 	}
+-	stop_ptraced_child(pid, 0, 1);
++	stop_ptraced_child(pid, stack, 0, 1);
+ 	printk("OK\n");
+ 	check_sysemu();
+ }
+@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
+ static inline int check_skas3_ptrace_support(void)
+ {
+ 	struct ptrace_faultinfo fi;
++	void *stack;
+ 	int pid, n, ret = 1;
+ 
+ 	printf("Checking for the skas3 patch in the host...");
+-	pid = start_ptraced_child();
++	pid = start_ptraced_child(&stack);
+ 
+ 	n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
+ 	if (n < 0) {
+@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
+ 	}
+ 
+ 	init_registers(pid);
+-	stop_ptraced_child(pid, 1, 1);
++	stop_ptraced_child(pid, stack, 1, 1);
+ 
+ 	return(ret);
+ }
+diff -Nurp pristine-linux-2.6.12/arch/x86_64/ia32/syscall32.c linux-2.6.12-xen/arch/x86_64/ia32/syscall32.c
+--- pristine-linux-2.6.12/arch/x86_64/ia32/syscall32.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/x86_64/ia32/syscall32.c	2006-03-05 23:54:35.632253164 +0100
+@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
+ 	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
+ 	struct vm_area_struct *vma;
+ 	struct mm_struct *mm = current->mm;
++	int ret;
+ 
+ 	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ 	if (!vma)
+@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
+ 	vma->vm_mm = mm;
+ 
+ 	down_write(&mm->mmap_sem);
+-	insert_vm_struct(mm, vma);
++	if ((ret = insert_vm_struct(mm, vma))) {
++		up_write(&mm->mmap_sem);
++		kmem_cache_free(vm_area_cachep, vma);
++		return ret;
++	}
+ 	mm->total_vm += npages;
+ 	up_write(&mm->mmap_sem);
+ 	return 0;
+diff -Nurp pristine-linux-2.6.12/arch/x86_64/kernel/i387.c linux-2.6.12-xen/arch/x86_64/kernel/i387.c
+--- pristine-linux-2.6.12/arch/x86_64/kernel/i387.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/x86_64/kernel/i387.c	2006-03-05 23:54:36.948059273 +0100
+@@ -42,7 +42,7 @@ void mxcsr_feature_mask_init(void)
+  * Called at bootup to set up the initial FPU state that is later cloned
+  * into all processes.
+  */
+-void __init fpu_init(void)
++void __cpuinit fpu_init(void)
+ {
+ 	unsigned long oldcr0 = read_cr0();
+ 	extern void __bad_fxsave_alignment(void);
+diff -Nurp pristine-linux-2.6.12/arch/x86_64/kernel/setup.c linux-2.6.12-xen/arch/x86_64/kernel/setup.c
+--- pristine-linux-2.6.12/arch/x86_64/kernel/setup.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/x86_64/kernel/setup.c	2006-03-05 23:54:35.681245945 +0100
+@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
+ 	int cpu = smp_processor_id();
+ 	int node = 0;
+ 	unsigned bits;
+-	if (c->x86_num_cores == 1)
+-		return;
+ 
+ 	bits = 0;
+ 	while ((1 << bits) < c->x86_num_cores)
+diff -Nurp pristine-linux-2.6.12/arch/x86_64/kernel/smpboot.c linux-2.6.12-xen/arch/x86_64/kernel/smpboot.c
+--- pristine-linux-2.6.12/arch/x86_64/kernel/smpboot.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/x86_64/kernel/smpboot.c	2006-03-05 23:54:35.687245061 +0100
+@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
+ {
+ 	unsigned long flags, i;
+ 
+-	if (smp_processor_id() != boot_cpu_id)
+-		return;
+-
+ 	go[MASTER] = 0;
+ 
+ 	local_irq_save(flags);
+@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
+ 	return tcenter - best_tm;
+ }
+ 
+-static __cpuinit void sync_tsc(void)
++static __cpuinit void sync_tsc(unsigned int master)
+ {
+ 	int i, done = 0;
+ 	long delta, adj, adjust_latency = 0;
+@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
+ 	} t[NUM_ROUNDS] __cpuinitdata;
+ #endif
+ 
++	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
++		smp_processor_id(), master);
++
+ 	go[MASTER] = 1;
+ 
+-	smp_call_function(sync_master, NULL, 1, 0);
++	/* It is dangerous to broadcast IPI as cpus are coming up,
++	 * as they may not be ready to accept them.  So since
++	 * we only need to send the ipi to the boot cpu direct
++	 * the message, and avoid the race.
++	 */
++	smp_call_function_single(master, sync_master, NULL, 1, 0);
+ 
+ 	while (go[MASTER])	/* wait for master to be ready */
+ 		no_cpu_relax();
+@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
+ 	printk(KERN_INFO
+ 	       "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
+ 	       "maxerr %lu cycles)\n",
+-	       smp_processor_id(), boot_cpu_id, delta, rt);
++	       smp_processor_id(), master, delta, rt);
+ }
+ 
+ static void __cpuinit tsc_sync_wait(void)
+ {
+ 	if (notscsync || !cpu_has_tsc)
+ 		return;
+-	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
+-			boot_cpu_id);
+-	sync_tsc();
++	sync_tsc(0);
+ }
+ 
+ static __init int notscsync_setup(char *s)
+diff -Nurp pristine-linux-2.6.12/arch/x86_64/kernel/smp.c linux-2.6.12-xen/arch/x86_64/kernel/smp.c
+--- pristine-linux-2.6.12/arch/x86_64/kernel/smp.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/arch/x86_64/kernel/smp.c	2006-03-05 23:54:35.682245798 +0100
+@@ -284,6 +284,71 @@ struct call_data_struct {
+ static struct call_data_struct * call_data;
+ 
+ /*
++ * this function sends a 'generic call function' IPI to one other CPU
++ * in the system.
++ */
++static void __smp_call_function_single (int cpu, void (*func) (void *info), void *info,
++				int nonatomic, int wait)
++{
++	struct call_data_struct data;
++	int cpus = 1;
++
++	data.func = func;
++	data.info = info;
++	atomic_set(&data.started, 0);
++	data.wait = wait;
++	if (wait)
++		atomic_set(&data.finished, 0);
++
++	call_data = &data;
++	wmb();
++	/* Send a message to all other CPUs and wait for them to respond */
++	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++
++	/* Wait for response */
++	while (atomic_read(&data.started) != cpus)
++		cpu_relax();
++
++	if (!wait)
++		return;
++
++	while (atomic_read(&data.finished) != cpus)
++		cpu_relax();
++}
++
++/*
++ * Run a function on another CPU
++ *  <func>	The function to run. This must be fast and non-blocking.
++ *  <info>	An arbitrary pointer to pass to the function.
++ *  <nonatomic>	Currently unused.
++ *  <wait>	If true, wait until function has completed on other CPUs.
++ *  [RETURNS]   0 on success, else a negative status code.
++ *
++ * Does not return until the remote CPU is nearly ready to execute <func>
++ * or is or has executed.
++ */
++
++int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
++	int nonatomic, int wait)
++{
++	
++	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
++
++	if (cpu == me) {
++		printk("%s: trying to call self\n", __func__);
++		put_cpu();
++		return -EBUSY;
++	}
++	spin_lock_bh(&call_lock);
++
++	__smp_call_function_single(cpu, func,info,nonatomic,wait);	
++
++	spin_unlock_bh(&call_lock);
++	put_cpu();
++	return 0;
++}
++
++/*
+  * this function sends a 'generic call function' IPI to all other CPUs
+  * in the system.
+  */
+diff -Nurp pristine-linux-2.6.12/arch/xen/boot/Makefile linux-2.6.12-xen/arch/xen/boot/Makefile
+--- pristine-linux-2.6.12/arch/xen/boot/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/boot/Makefile	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,11 @@
++
++OBJCOPYFLAGS := -g --strip-unneeded
++
++vmlinuz: vmlinux-stripped FORCE
++	$(call if_changed,gzip)
++
++vmlinux-stripped: vmlinux FORCE
++	$(call if_changed,objcopy)
++
++bzImage: vmlinuz
++	$(Q)ln -sf ../../../vmlinuz $(srctree)/arch/xen/boot/bzImage
+diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_ia64 linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_ia64
+--- pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_ia64	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_ia64	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,1405 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.12.6-xen0
++# Wed Dec 21 11:17:02 2005
++#
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_CLEAN_COMPILE=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_HOTPLUG=y
++CONFIG_KOBJECT_UEVENT=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++# CONFIG_CPUSETS is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++CONFIG_KALLSYMS_ALL=y
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_CC_ALIGN_FUNCTIONS=0
++CONFIG_CC_ALIGN_LABELS=0
++CONFIG_CC_ALIGN_LOOPS=0
++CONFIG_CC_ALIGN_JUMPS=0
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_OBSOLETE_MODPARM=y
++CONFIG_MODVERSIONS=y
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# Processor type and features
++#
++CONFIG_IA64=y
++CONFIG_64BIT=y
++CONFIG_MMU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_TIME_INTERPOLATION=y
++CONFIG_EFI=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_XEN=y
++CONFIG_ARCH_XEN=y
++CONFIG_XEN_PRIVILEGED_GUEST=y
++CONFIG_XEN_PHYSDEV_ACCESS=y
++CONFIG_XEN_BLKDEV_GRANT=y
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_VT=y
++CONFIG_VT=y
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++# CONFIG_IA64_GENERIC is not set
++CONFIG_IA64_DIG=y
++# CONFIG_IA64_HP_ZX1 is not set
++# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
++# CONFIG_IA64_SGI_SN2 is not set
++# CONFIG_IA64_HP_SIM is not set
++# CONFIG_ITANIUM is not set
++CONFIG_MCKINLEY=y
++# CONFIG_IA64_PAGE_SIZE_4KB is not set
++# CONFIG_IA64_PAGE_SIZE_8KB is not set
++CONFIG_IA64_PAGE_SIZE_16KB=y
++# CONFIG_IA64_PAGE_SIZE_64KB is not set
++CONFIG_IA64_L1_CACHE_SHIFT=7
++# CONFIG_NUMA is not set
++CONFIG_VIRTUAL_MEM_MAP=n
++CONFIG_IA64_CYCLONE=y
++CONFIG_IOSAPIC=y
++CONFIG_FORCE_MAX_ZONEORDER=18
++CONFIG_SMP=y
++CONFIG_NR_CPUS=4
++CONFIG_HOTPLUG_CPU=y
++# CONFIG_SCHED_SMT is not set
++# CONFIG_PREEMPT is not set
++CONFIG_HAVE_DEC_LOCK=y
++# CONFIG_IA32_SUPPORT is not set
++CONFIG_IA64_MCA_RECOVERY=y
++CONFIG_PERFMON=y
++CONFIG_IA64_PALINFO=y
++CONFIG_ACPI_DEALLOCATE_IRQ=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_PCDP=y
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Power management and ACPI
++#
++CONFIG_PM=y
++CONFIG_ACPI=y
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI_BOOT=y
++CONFIG_ACPI_INTERPRETER=y
++CONFIG_ACPI_BUTTON=y
++# CONFIG_ACPI_VIDEO is not set
++CONFIG_ACPI_FAN=y
++CONFIG_ACPI_PROCESSOR=y
++# CONFIG_ACPI_HOTPLUG_CPU is not set
++CONFIG_ACPI_THERMAL=y
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_BUS=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_PCI=y
++CONFIG_ACPI_SYSTEM=y
++# CONFIG_ACPI_CONTAINER is not set
++
++#
++# Bus options (PCI, PCMCIA)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY_PROC=y
++CONFIG_PCI_NAMES=y
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=y
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++CONFIG_HOTPLUG_PCI_ACPI=y
++# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
++
++#
++# Block devices
++#
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_CRYPTOLOOP=y
++CONFIG_BLK_DEV_NBD=y
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CDROM_PKTCDVD is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++CONFIG_BLK_DEV_IDEFLOPPY=y
++CONFIG_BLK_DEV_IDESCSI=y
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++CONFIG_BLK_DEV_CMD64X=y
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++# CONFIG_BLK_DEV_SVWKS is not set
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=y
++CONFIG_CHR_DEV_OSST=y
++CONFIG_BLK_DEV_SR=y
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=y
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++CONFIG_SCSI_FC_ATTRS=y
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_SYM53C8XX_2=y
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
++# CONFIG_SCSI_IPR is not set
++CONFIG_SCSI_QLOGIC_FC=y
++# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
++CONFIG_SCSI_QLOGIC_1280=y
++# CONFIG_SCSI_QLOGIC_1280_1040 is not set
++CONFIG_SCSI_QLA2XXX=y
++CONFIG_SCSI_QLA21XX=y
++CONFIG_SCSI_QLA22XX=y
++CONFIG_SCSI_QLA2300=y
++CONFIG_SCSI_QLA2322=y
++# CONFIG_SCSI_QLA6312 is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++# CONFIG_BLK_DEV_MD is not set
++# CONFIG_BLK_DEV_DM is not set
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_MAX_SGE=40
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Networking support
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++# CONFIG_IP_PNP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++CONFIG_ARPD=y
++CONFIG_SYN_COOKIES=y
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_IP_TCPDIAG=y
++# CONFIG_IP_TCPDIAG_IPV6 is not set
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_CONNTRACK is not set
++# CONFIG_IP_NF_CONNTRACK_MARK is not set
++# CONFIG_IP_NF_QUEUE is not set
++# CONFIG_IP_NF_IPTABLES is not set
++CONFIG_IP_NF_ARPTABLES=y
++# CONFIG_IP_NF_ARPFILTER is not set
++# CONFIG_IP_NF_ARP_MANGLE is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_NET_DIVERT is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++# CONFIG_NET_CLS_ROUTE is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_RX is not set
++# CONFIG_NETPOLL_TRAP is not set
++CONFIG_NET_POLL_CONTROLLER=y
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=y
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
++
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=y
++# CONFIG_ARCNET_1201 is not set
++# CONFIG_ARCNET_1051 is not set
++# CONFIG_ARCNET_RAW is not set
++# CONFIG_ARCNET_CAP is not set
++# CONFIG_ARCNET_COM90xx is not set
++# CONFIG_ARCNET_COM90xxIO is not set
++# CONFIG_ARCNET_RIM_I is not set
++# CONFIG_ARCNET_COM20020 is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_NET_VENDOR_3COM is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++CONFIG_TULIP_MWI=y
++CONFIG_TULIP_MMIO=y
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++CONFIG_EEPRO100=y
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_VIA_RHINE is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++# CONFIG_BNX2 is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++CONFIG_NETCONSOLE=y
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
++
++#
++# Old ISDN4Linux
++#
++# CONFIG_ISDN_I4L is not set
++
++#
++# CAPI subsystem
++#
++# CONFIG_ISDN_CAPI is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=y
++# CONFIG_INPUT_TSDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++# CONFIG_SERIO_SERPORT is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++CONFIG_GAMEPORT=y
++# CONFIG_GAMEPORT_NS558 is not set
++# CONFIG_GAMEPORT_L4 is not set
++# CONFIG_GAMEPORT_EMU10K1 is not set
++# CONFIG_GAMEPORT_VORTEX is not set
++# CONFIG_GAMEPORT_FM801 is not set
++# CONFIG_GAMEPORT_CS461X is not set
++
++#
++# Character devices
++#
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++CONFIG_SERIAL_NONSTANDARD=y
++# CONFIG_ROCKETPORT is not set
++# CONFIG_CYCLADES is not set
++# CONFIG_MOXA_SMARTIO is not set
++# CONFIG_ISI is not set
++# CONFIG_SYNCLINKMP is not set
++# CONFIG_N_HDLC is not set
++# CONFIG_SPECIALIX is not set
++# CONFIG_SX is not set
++# CONFIG_STALDRV is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_ACPI=y
++CONFIG_SERIAL_8250_NR_UARTS=6
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++# CONFIG_SERIAL_8250_MULTIPORT is not set
++# CONFIG_SERIAL_8250_RSA is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++CONFIG_EFI_RTC=y
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=y
++CONFIG_AGP_I460=y
++CONFIG_DRM=y
++# CONFIG_DRM_TDFX is not set
++# CONFIG_DRM_R128 is not set
++# CONFIG_DRM_RADEON is not set
++# CONFIG_DRM_MGA is not set
++# CONFIG_DRM_SIS is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=y
++CONFIG_I2C_ALGOPCF=y
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_ISA is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_SCx200_ACB is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++# CONFIG_I2C_PCA_ISA is not set
++
++#
++# Hardware Sensors Chip support
++#
++# CONFIG_I2C_SENSOR is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ASB100 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_FSCHER is not set
++# CONFIG_SENSORS_FSCPOS is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++
++#
++# Other I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_RTC8564 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# Dallas's 1-wire bus
++#
++# CONFIG_W1 is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=y
++
++#
++# Video For Linux
++#
++
++#
++# Video Adapters
++#
++# CONFIG_VIDEO_BT848 is not set
++# CONFIG_VIDEO_CPIA is not set
++# CONFIG_VIDEO_SAA5246A is not set
++# CONFIG_VIDEO_SAA5249 is not set
++# CONFIG_TUNER_3036 is not set
++# CONFIG_VIDEO_STRADIS is not set
++# CONFIG_VIDEO_ZORAN is not set
++# CONFIG_VIDEO_SAA7134 is not set
++# CONFIG_VIDEO_MXB is not set
++# CONFIG_VIDEO_DPC is not set
++# CONFIG_VIDEO_HEXIUM_ORION is not set
++# CONFIG_VIDEO_HEXIUM_GEMINI is not set
++# CONFIG_VIDEO_CX88 is not set
++# CONFIG_VIDEO_OVCAMCHIP is not set
++
++#
++# Radio Adapters
++#
++# CONFIG_RADIO_GEMTEK_PCI is not set
++# CONFIG_RADIO_MAXIRADIO is not set
++# CONFIG_RADIO_MAESTRO is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++
++#
++# Graphics support
++#
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++CONFIG_FB_SOFT_CURSOR=y
++# CONFIG_FB_MACMODES is not set
++CONFIG_FB_MODE_HELPERS=y
++# CONFIG_FB_TILEBLITTING is not set
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON_OLD is not set
++CONFIG_FB_RADEON=y
++CONFIG_FB_RADEON_I2C=y
++CONFIG_FB_RADEON_DEBUG=y
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_VIRTUAL is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++CONFIG_DUMMY_CONSOLE=y
++# CONFIG_FRAMEBUFFER_CONSOLE is not set
++
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=y
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++CONFIG_SND_HWDEP=y
++CONFIG_SND_RAWMIDI=y
++CONFIG_SND_SEQUENCER=y
++CONFIG_SND_SEQ_DUMMY=y
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++CONFIG_SND_SEQUENCER_OSS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=y
++CONFIG_SND_OPL3_LIB=y
++CONFIG_SND_DUMMY=y
++CONFIG_SND_VIRMIDI=y
++CONFIG_SND_MTPAV=y
++CONFIG_SND_SERIAL_U16550=y
++CONFIG_SND_MPU401=y
++
++#
++# PCI devices
++#
++CONFIG_SND_AC97_CODEC=y
++# CONFIG_SND_ALI5451 is not set
++CONFIG_SND_ATIIXP=y
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_YMFPCI is not set
++# CONFIG_SND_ALS4000 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++# CONFIG_SND_MAESTRO3 is not set
++CONFIG_SND_FM801=y
++CONFIG_SND_FM801_TEA575X=y
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++# CONFIG_SND_INTEL8X0 is not set
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_HDA_INTEL is not set
++
++#
++# USB devices
++#
++# CONFIG_SND_USB_AUDIO is not set
++
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=y
++# CONFIG_SOUND_BT878 is not set
++# CONFIG_SOUND_CMPCI is not set
++# CONFIG_SOUND_EMU10K1 is not set
++# CONFIG_SOUND_FUSION is not set
++# CONFIG_SOUND_CS4281 is not set
++# CONFIG_SOUND_ES1370 is not set
++# CONFIG_SOUND_ES1371 is not set
++# CONFIG_SOUND_ESSSOLO1 is not set
++# CONFIG_SOUND_MAESTRO is not set
++# CONFIG_SOUND_MAESTRO3 is not set
++# CONFIG_SOUND_ICH is not set
++# CONFIG_SOUND_SONICVIBES is not set
++# CONFIG_SOUND_TRIDENT is not set
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++# CONFIG_SOUND_VIA82CXXX is not set
++# CONFIG_SOUND_OSS is not set
++# CONFIG_SOUND_TVMIXER is not set
++# CONFIG_SOUND_ALI5455 is not set
++# CONFIG_SOUND_FORTE is not set
++# CONFIG_SOUND_RME96XX is not set
++# CONFIG_SOUND_AD1980 is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++CONFIG_USB_AUDIO=y
++# CONFIG_USB_BLUETOOTH_TTY is not set
++# CONFIG_USB_MIDI is not set
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_ISD200 is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_MTOUCH is not set
++# CONFIG_USB_EGALAX is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Multimedia devices
++#
++# CONFIG_USB_DABUSB is not set
++# CONFIG_USB_VICAM is not set
++# CONFIG_USB_DSBR is not set
++# CONFIG_USB_IBMCAM is not set
++# CONFIG_USB_KONICAWC is not set
++# CONFIG_USB_OV511 is not set
++# CONFIG_USB_SE401 is not set
++# CONFIG_USB_SN9C102 is not set
++# CONFIG_USB_STV680 is not set
++# CONFIG_USB_PWC is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_SISUSBVGA is not set
++# CONFIG_USB_TEST is not set
++
++#
++# USB ATM/DSL drivers
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++# CONFIG_JFS_FS is not set
++CONFIG_FS_POSIX_ACL=y
++
++#
++# XFS support
++#
++CONFIG_XFS_FS=y
++CONFIG_XFS_EXPORT=y
++# CONFIG_XFS_RT is not set
++# CONFIG_XFS_QUOTA is not set
++# CONFIG_XFS_SECURITY is not set
++# CONFIG_XFS_POSIX_ACL is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++# CONFIG_ZISOFS is not set
++CONFIG_UDF_FS=y
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++# CONFIG_DEVFS_FS is not set
++# CONFIG_DEVPTS_FS_XATTR is not set
++CONFIG_TMPFS=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_TMPFS_SECURITY=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++CONFIG_RAMFS=y
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=y
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++CONFIG_SMB_FS=y
++CONFIG_SMB_NLS_DEFAULT=y
++CONFIG_SMB_NLS_REMOTE="cp437"
++CONFIG_CIFS=y
++# CONFIG_CIFS_STATS is not set
++# CONFIG_CIFS_XATTR is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++CONFIG_SGI_PARTITION=y
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=y
++CONFIG_NLS_CODEPAGE_775=y
++CONFIG_NLS_CODEPAGE_850=y
++CONFIG_NLS_CODEPAGE_852=y
++CONFIG_NLS_CODEPAGE_855=y
++CONFIG_NLS_CODEPAGE_857=y
++CONFIG_NLS_CODEPAGE_860=y
++CONFIG_NLS_CODEPAGE_861=y
++CONFIG_NLS_CODEPAGE_862=y
++CONFIG_NLS_CODEPAGE_863=y
++CONFIG_NLS_CODEPAGE_864=y
++CONFIG_NLS_CODEPAGE_865=y
++CONFIG_NLS_CODEPAGE_866=y
++CONFIG_NLS_CODEPAGE_869=y
++CONFIG_NLS_CODEPAGE_936=y
++CONFIG_NLS_CODEPAGE_950=y
++CONFIG_NLS_CODEPAGE_932=y
++CONFIG_NLS_CODEPAGE_949=y
++CONFIG_NLS_CODEPAGE_874=y
++CONFIG_NLS_ISO8859_8=y
++# CONFIG_NLS_CODEPAGE_1250 is not set
++CONFIG_NLS_CODEPAGE_1251=y
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_2=y
++CONFIG_NLS_ISO8859_3=y
++CONFIG_NLS_ISO8859_4=y
++CONFIG_NLS_ISO8859_5=y
++CONFIG_NLS_ISO8859_6=y
++CONFIG_NLS_ISO8859_7=y
++CONFIG_NLS_ISO8859_9=y
++CONFIG_NLS_ISO8859_13=y
++CONFIG_NLS_ISO8859_14=y
++CONFIG_NLS_ISO8859_15=y
++CONFIG_NLS_KOI8_R=y
++CONFIG_NLS_KOI8_U=y
++CONFIG_NLS_UTF8=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++CONFIG_CRC32=y
++# CONFIG_LIBCRC32C is not set
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++
++#
++# Profiling support
++#
++# CONFIG_PROFILING is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_LOG_BUF_SHIFT=20
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++CONFIG_IA64_GRANULE_16MB=y
++# CONFIG_IA64_GRANULE_64MB is not set
++CONFIG_IA64_PRINT_HAZARDS=y
++# CONFIG_DISABLE_VHPT is not set
++# CONFIG_IA64_DEBUG_CMPXCHG is not set
++# CONFIG_IA64_DEBUG_IRQ is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
+diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_x86_32 linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_x86_32
+--- pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_x86_32	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_x86_32	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,1272 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.12.6-xen0
++# Mon Nov  7 17:22:05 2005
++#
++CONFIG_XEN=y
++CONFIG_ARCH_XEN=y
++CONFIG_NO_IDLE_HZ=y
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++CONFIG_XEN_PHYSDEV_ACCESS=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++# CONFIG_XEN_BLKDEV_TAP_BE is not set
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_TPMDEV_FRONTEND is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++# CONFIG_XEN_BLKDEV_TAP is not set
++# CONFIG_XEN_SHADOW_MODE is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_X86=y
++# CONFIG_XEN_X86_64 is not set
++CONFIG_HAVE_ARCH_ALLOC_SKB=y
++CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++# CONFIG_CLEAN_COMPILE is not set
++CONFIG_BROKEN=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_HOTPLUG=y
++CONFIG_KOBJECT_UEVENT=y
++# CONFIG_IKCONFIG is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_CC_ALIGN_FUNCTIONS=0
++CONFIG_CC_ALIGN_LABELS=0
++CONFIG_CC_ALIGN_LOOPS=0
++CONFIG_CC_ALIGN_JUMPS=0
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_OBSOLETE_MODPARM=y
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++
++#
++# X86 Processor Configuration
++#
++CONFIG_XENARCH="i386"
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_UID16=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++# CONFIG_X86_GENERIC is not set
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=5
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++# CONFIG_HPET_TIMER is not set
++# CONFIG_HPET_EMULATE_RTC is not set
++# CONFIG_SMP is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++CONFIG_MICROCODE=y
++CONFIG_X86_CPUID=y
++CONFIG_SWIOTLB=y
++
++#
++# Firmware Drivers
++#
++# CONFIG_EDD is not set
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_HIGHMEM=y
++CONFIG_MTRR=y
++# CONFIG_REGPARM is not set
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++
++#
++# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++#
++CONFIG_X86_UP_APIC=y
++CONFIG_X86_UP_IOAPIC=y
++CONFIG_PCI=y
++# CONFIG_PCI_GOMMCONFIG is not set
++# CONFIG_PCI_GODIRECT is not set
++CONFIG_PCI_GOANY=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY_PROC=y
++# CONFIG_PCI_NAMES is not set
++# CONFIG_PCI_DEBUG is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_ISA=y
++# CONFIG_EISA is not set
++# CONFIG_MCA is not set
++# CONFIG_SCx200 is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# PCI Hotplug Support
++#
++# CONFIG_HOTPLUG_PCI is not set
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_PC=y
++CONFIG_SECCOMP=y
++CONFIG_EARLY_PRINTK=y
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_AOUT is not set
++# CONFIG_BINFMT_MISC is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++# CONFIG_STANDALONE is not set
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=y
++# CONFIG_BLK_DEV_XD is not set
++# CONFIG_BLK_CPQ_DA is not set
++CONFIG_BLK_CPQ_CISS_DA=y
++# CONFIG_CISS_SCSI_TAPE is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_LBD is not set
++# CONFIG_CDROM_PKTCDVD is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_CMD640 is not set
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++# CONFIG_BLK_DEV_RZ1000 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_ATIIXP is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++CONFIG_BLK_DEV_SVWKS=y
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SIS5513 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++# CONFIG_IDE_CHIPSETS is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++CONFIG_BLK_DEV_3W_XXXX_RAID=y
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_7000FASST is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AHA152X is not set
++# CONFIG_SCSI_AHA1542 is not set
++CONFIG_SCSI_AACRAID=y
++CONFIG_SCSI_AIC7XXX=y
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++CONFIG_SCSI_AIC79XX=y
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++# CONFIG_SCSI_DPT_I2O is not set
++# CONFIG_SCSI_ADVANSYS is not set
++# CONFIG_SCSI_IN2000 is not set
++CONFIG_MEGARAID_NEWGEN=y
++# CONFIG_MEGARAID_MM is not set
++CONFIG_SCSI_SATA=y
++# CONFIG_SCSI_SATA_AHCI is not set
++# CONFIG_SCSI_SATA_SVW is not set
++CONFIG_SCSI_ATA_PIIX=y
++# CONFIG_SCSI_SATA_NV is not set
++CONFIG_SCSI_SATA_PROMISE=y
++# CONFIG_SCSI_SATA_QSTOR is not set
++CONFIG_SCSI_SATA_SX4=y
++CONFIG_SCSI_SATA_SIL=y
++# CONFIG_SCSI_SATA_SIS is not set
++# CONFIG_SCSI_SATA_ULI is not set
++# CONFIG_SCSI_SATA_VIA is not set
++# CONFIG_SCSI_SATA_VITESSE is not set
++CONFIG_SCSI_BUSLOGIC=y
++# CONFIG_SCSI_OMIT_FLASHPOINT is not set
++# CONFIG_SCSI_CPQFCTS is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_DTC3280 is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_EATA_PIO is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_GENERIC_NCR5380 is not set
++# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_NCR53C406A is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_PAS16 is not set
++# CONFIG_SCSI_PCI2000 is not set
++# CONFIG_SCSI_PCI2220I is not set
++# CONFIG_SCSI_PSI240I is not set
++# CONFIG_SCSI_QLOGIC_FAS is not set
++# CONFIG_SCSI_QLOGIC_ISP is not set
++# CONFIG_SCSI_QLOGIC_FC is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++CONFIG_SCSI_QLA2XXX=y
++# CONFIG_SCSI_QLA21XX is not set
++# CONFIG_SCSI_QLA22XX is not set
++# CONFIG_SCSI_QLA2300 is not set
++# CONFIG_SCSI_QLA2322 is not set
++# CONFIG_SCSI_QLA6312 is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_SEAGATE is not set
++# CONFIG_SCSI_SYM53C416 is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_T128 is not set
++# CONFIG_SCSI_U14_34F is not set
++# CONFIG_SCSI_ULTRASTOR is not set
++# CONFIG_SCSI_NSP32 is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Old CD-ROM drivers (not SCSI, not IDE)
++#
++# CONFIG_CD_NO_IDESCSI is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++# CONFIG_MD_LINEAR is not set
++CONFIG_MD_RAID0=y
++CONFIG_MD_RAID1=y
++# CONFIG_MD_RAID10 is not set
++CONFIG_MD_RAID5=y
++# CONFIG_MD_RAID6 is not set
++# CONFIG_MD_MULTIPATH is not set
++# CONFIG_MD_FAULTY is not set
++CONFIG_BLK_DEV_DM=y
++# CONFIG_DM_CRYPT is not set
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_MIRROR=y
++# CONFIG_DM_ZERO is not set
++# CONFIG_DM_MULTIPATH is not set
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_MAX_SGE=40
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Networking support
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_IP_TCPDIAG=y
++# CONFIG_IP_TCPDIAG_IPV6 is not set
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++# CONFIG_IP_NF_CONNTRACK_MARK is not set
++# CONFIG_IP_NF_CT_PROTO_SCTP is not set
++CONFIG_IP_NF_FTP=m
++# CONFIG_IP_NF_IRC is not set
++# CONFIG_IP_NF_TFTP is not set
++# CONFIG_IP_NF_AMANDA is not set
++# CONFIG_IP_NF_QUEUE is not set
++CONFIG_IP_NF_IPTABLES=m
++# CONFIG_IP_NF_MATCH_LIMIT is not set
++CONFIG_IP_NF_MATCH_IPRANGE=m
++# CONFIG_IP_NF_MATCH_MAC is not set
++# CONFIG_IP_NF_MATCH_PKTTYPE is not set
++# CONFIG_IP_NF_MATCH_MARK is not set
++# CONFIG_IP_NF_MATCH_MULTIPORT is not set
++# CONFIG_IP_NF_MATCH_TOS is not set
++# CONFIG_IP_NF_MATCH_RECENT is not set
++# CONFIG_IP_NF_MATCH_ECN is not set
++# CONFIG_IP_NF_MATCH_DSCP is not set
++# CONFIG_IP_NF_MATCH_AH_ESP is not set
++# CONFIG_IP_NF_MATCH_LENGTH is not set
++# CONFIG_IP_NF_MATCH_TTL is not set
++# CONFIG_IP_NF_MATCH_TCPMSS is not set
++# CONFIG_IP_NF_MATCH_HELPER is not set
++# CONFIG_IP_NF_MATCH_STATE is not set
++# CONFIG_IP_NF_MATCH_CONNTRACK is not set
++# CONFIG_IP_NF_MATCH_OWNER is not set
++CONFIG_IP_NF_MATCH_PHYSDEV=m
++# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
++# CONFIG_IP_NF_MATCH_REALM is not set
++# CONFIG_IP_NF_MATCH_SCTP is not set
++# CONFIG_IP_NF_MATCH_COMMENT is not set
++# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++# CONFIG_IP_NF_TARGET_LOG is not set
++# CONFIG_IP_NF_TARGET_ULOG is not set
++# CONFIG_IP_NF_TARGET_TCPMSS is not set
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++# CONFIG_IP_NF_TARGET_REDIRECT is not set
++# CONFIG_IP_NF_TARGET_NETMAP is not set
++# CONFIG_IP_NF_TARGET_SAME is not set
++# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
++CONFIG_IP_NF_NAT_FTP=m
++# CONFIG_IP_NF_MANGLE is not set
++# CONFIG_IP_NF_RAW is not set
++# CONFIG_IP_NF_ARPTABLES is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_NET_DIVERT is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++# CONFIG_NET_CLS_ROUTE is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++CONFIG_NET_VENDOR_3COM=y
++# CONFIG_EL1 is not set
++# CONFIG_EL2 is not set
++# CONFIG_ELPLUS is not set
++# CONFIG_EL16 is not set
++# CONFIG_EL3 is not set
++# CONFIG_3C515 is not set
++CONFIG_VORTEX=y
++# CONFIG_TYPHOON is not set
++# CONFIG_LANCE is not set
++# CONFIG_NET_VENDOR_SMC is not set
++# CONFIG_NET_VENDOR_RACAL is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_AT1700 is not set
++# CONFIG_DEPCA is not set
++# CONFIG_HP100 is not set
++# CONFIG_NET_ISA is not set
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=y
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_AC3200 is not set
++# CONFIG_APRICOT is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_CS89x0 is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++CONFIG_NE2K_PCI=y
++# CONFIG_8139CP is not set
++CONFIG_8139TOO=y
++CONFIG_8139TOO_PIO=y
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++# CONFIG_8139TOO_8129 is not set
++# CONFIG_8139_OLD_RX_RESET is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_TLAN is not set
++CONFIG_VIA_RHINE=y
++# CONFIG_VIA_RHINE_MMIO is not set
++# CONFIG_NET_POCKET is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=y
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++CONFIG_SK98LIN=y
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++# CONFIG_BNX2 is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_INPORT is not set
++# CONFIG_MOUSE_LOGIBM is not set
++# CONFIG_MOUSE_PC110PAD is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++# CONFIG_RTC is not set
++# CONFIG_GEN_RTC is not set
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++# CONFIG_SONYPI is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_FTAPE is not set
++CONFIG_AGP=m
++CONFIG_AGP_ALI=m
++CONFIG_AGP_ATI=m
++CONFIG_AGP_AMD=m
++CONFIG_AGP_AMD64=m
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_NVIDIA=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_SWORKS=m
++CONFIG_AGP_VIA=m
++# CONFIG_AGP_EFFICEON is not set
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++# CONFIG_DRM_GAMMA is not set
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++CONFIG_DRM_I830=m
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++# CONFIG_MWAVE is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++
++#
++# I2C support
++#
++# CONFIG_I2C is not set
++
++#
++# Dallas's 1-wire bus
++#
++# CONFIG_W1 is not set
++
++#
++# Misc devices
++#
++# CONFIG_IBM_ASM is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++
++#
++# Graphics support
++#
++# CONFIG_FB is not set
++# CONFIG_VIDEO_SELECT is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++# CONFIG_MDA_CONSOLE is not set
++CONFIG_DUMMY_CONSOLE=y
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++# CONFIG_USB_DEVICEFS is not set
++# CONFIG_USB_BANDWIDTH is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_EHCI_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_BLUETOOTH_TTY is not set
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
++#
++# CONFIG_USB_STORAGE is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_HID_FF is not set
++# CONFIG_USB_HIDDEV is not set
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_MTOUCH is not set
++# CONFIG_USB_EGALAX is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Multimedia devices
++#
++# CONFIG_USB_DABUSB is not set
++
++#
++# Video4Linux support is needed for USB Multimedia device support
++#
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++
++#
++# USB ATM/DSL drivers
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# Power management options
++#
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BOOT=y
++CONFIG_ACPI_INTERPRETER=y
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++# CONFIG_ACPI_CUSTOM_DSDT is not set
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_BUS=y
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_PCI=y
++CONFIG_ACPI_SYSTEM=y
++# CONFIG_X86_PM_TIMER is not set
++# CONFIG_ACPI_CONTAINER is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++
++#
++# XFS support
++#
++# CONFIG_XFS_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++# CONFIG_DEVFS_FS is not set
++# CONFIG_DEVPTS_FS_XATTR is not set
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_XATTR is not set
++# CONFIG_HUGETLBFS is not set
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V4 is not set
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++CONFIG_CRYPTO_SHA1=m
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=m
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES_586 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=y
++CONFIG_ZLIB_INFLATE=y
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_FRAME_POINTER is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_KPROBES is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_X86_FIND_SMP_CONFIG=y
++CONFIG_X86_MPPARSE=y
+diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_x86_64 linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_x86_64
+--- pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_x86_64	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_x86_64	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,1184 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.12.6-xen0
++# Mon Nov  7 17:24:18 2005
++#
++CONFIG_XEN=y
++CONFIG_ARCH_XEN=y
++CONFIG_NO_IDLE_HZ=y
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++CONFIG_XEN_PHYSDEV_ACCESS=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++# CONFIG_XEN_BLKDEV_TAP_BE is not set
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_TPMDEV_FRONTEND is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++# CONFIG_XEN_BLKDEV_TAP is not set
++# CONFIG_XEN_SHADOW_MODE is not set
++CONFIG_XEN_SCRUB_PAGES=y
++# CONFIG_XEN_X86 is not set
++CONFIG_XEN_X86_64=y
++CONFIG_HAVE_ARCH_ALLOC_SKB=y
++CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++# CONFIG_CLEAN_COMPILE is not set
++CONFIG_BROKEN=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_HOTPLUG=y
++CONFIG_KOBJECT_UEVENT=y
++# CONFIG_IKCONFIG is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_CC_ALIGN_FUNCTIONS=0
++CONFIG_CC_ALIGN_LABELS=0
++CONFIG_CC_ALIGN_LOOPS=0
++CONFIG_CC_ALIGN_JUMPS=0
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_OBSOLETE_MODPARM=y
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_XENARCH="x86_64"
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_UID16=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_GOOD_APIC=y
++# CONFIG_HPET_TIMER is not set
++# CONFIG_SMP is not set
++CONFIG_MICROCODE=y
++# CONFIG_X86_CPUID is not set
++CONFIG_SWIOTLB=y
++# CONFIG_NUMA is not set
++# CONFIG_MTRR is not set
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_PCI=y
++CONFIG_PCI_DIRECT=y
++# CONFIG_PCI_MMCONFIG is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_SECCOMP=y
++
++#
++# X86_64 processor configuration
++#
++CONFIG_X86_64=y
++CONFIG_64BIT=y
++CONFIG_EARLY_PRINTK=y
++
++#
++# Processor type and features
++#
++# CONFIG_MPSC is not set
++CONFIG_GENERIC_CPU=y
++CONFIG_X86_L1_CACHE_BYTES=128
++# CONFIG_X86_TSC is not set
++CONFIG_X86_XEN_GENAPIC=y
++# CONFIG_X86_MSR is not set
++# CONFIG_GART_IOMMU is not set
++CONFIG_DUMMY_IOMMU=y
++# CONFIG_X86_MCE is not set
++
++#
++# Power management options
++#
++# CONFIG_PM is not set
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI etc.)
++#
++# CONFIG_UNORDERED_IO is not set
++
++#
++# Executable file formats / Emulations
++#
++CONFIG_IA32_EMULATION=y
++# CONFIG_IA32_AOUT is not set
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++# CONFIG_PREVENT_FIRMWARE_BUILD is not set
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=y
++# CONFIG_BLK_CPQ_DA is not set
++CONFIG_BLK_CPQ_CISS_DA=y
++# CONFIG_CISS_SCSI_TAPE is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_LBD is not set
++# CONFIG_CDROM_PKTCDVD is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=y
++# CONFIG_IDEDISK_MULTI_MODE is not set
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++# CONFIG_BLK_DEV_IDEFLOPPY is not set
++# CONFIG_BLK_DEV_IDESCSI is not set
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++# CONFIG_BLK_DEV_CMD640 is not set
++CONFIG_BLK_DEV_IDEPCI=y
++# CONFIG_IDEPCI_SHARE_IRQ is not set
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++# CONFIG_BLK_DEV_RZ1000 is not set
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++# CONFIG_BLK_DEV_AEC62XX is not set
++# CONFIG_BLK_DEV_ALI15X3 is not set
++# CONFIG_BLK_DEV_AMD74XX is not set
++# CONFIG_BLK_DEV_ATIIXP is not set
++# CONFIG_BLK_DEV_CMD64X is not set
++# CONFIG_BLK_DEV_TRIFLEX is not set
++# CONFIG_BLK_DEV_CY82C693 is not set
++# CONFIG_BLK_DEV_CS5520 is not set
++# CONFIG_BLK_DEV_CS5530 is not set
++# CONFIG_BLK_DEV_HPT34X is not set
++# CONFIG_BLK_DEV_HPT366 is not set
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_NS87415 is not set
++# CONFIG_BLK_DEV_PDC202XX_OLD is not set
++# CONFIG_BLK_DEV_PDC202XX_NEW is not set
++CONFIG_BLK_DEV_SVWKS=y
++# CONFIG_BLK_DEV_SIIMAGE is not set
++# CONFIG_BLK_DEV_SIS5513 is not set
++# CONFIG_BLK_DEV_SLC90E66 is not set
++# CONFIG_BLK_DEV_TRM290 is not set
++# CONFIG_BLK_DEV_VIA82CXXX is not set
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++CONFIG_BLK_DEV_3W_XXXX_RAID=y
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++CONFIG_SCSI_AACRAID=y
++CONFIG_SCSI_AIC7XXX=y
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++CONFIG_SCSI_AIC79XX=y
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
++# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++# CONFIG_SCSI_ADVANSYS is not set
++CONFIG_MEGARAID_NEWGEN=y
++# CONFIG_MEGARAID_MM is not set
++CONFIG_SCSI_SATA=y
++# CONFIG_SCSI_SATA_AHCI is not set
++# CONFIG_SCSI_SATA_SVW is not set
++CONFIG_SCSI_ATA_PIIX=y
++# CONFIG_SCSI_SATA_NV is not set
++CONFIG_SCSI_SATA_PROMISE=y
++# CONFIG_SCSI_SATA_QSTOR is not set
++CONFIG_SCSI_SATA_SX4=y
++CONFIG_SCSI_SATA_SIL=y
++# CONFIG_SCSI_SATA_SIS is not set
++# CONFIG_SCSI_SATA_ULI is not set
++# CONFIG_SCSI_SATA_VIA is not set
++# CONFIG_SCSI_SATA_VITESSE is not set
++CONFIG_SCSI_BUSLOGIC=y
++# CONFIG_SCSI_OMIT_FLASHPOINT is not set
++# CONFIG_SCSI_CPQFCTS is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_EATA_PIO is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_GDTH is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++# CONFIG_SCSI_SYM53C8XX_2 is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_PCI2000 is not set
++# CONFIG_SCSI_PCI2220I is not set
++# CONFIG_SCSI_QLOGIC_ISP is not set
++# CONFIG_SCSI_QLOGIC_FC is not set
++# CONFIG_SCSI_QLOGIC_1280 is not set
++CONFIG_SCSI_QLA2XXX=y
++# CONFIG_SCSI_QLA21XX is not set
++# CONFIG_SCSI_QLA22XX is not set
++# CONFIG_SCSI_QLA2300 is not set
++# CONFIG_SCSI_QLA2322 is not set
++# CONFIG_SCSI_QLA6312 is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_LINEAR=y
++CONFIG_MD_RAID0=y
++CONFIG_MD_RAID1=y
++# CONFIG_MD_RAID10 is not set
++# CONFIG_MD_RAID5 is not set
++# CONFIG_MD_RAID6 is not set
++CONFIG_MD_MULTIPATH=y
++# CONFIG_MD_FAULTY is not set
++CONFIG_BLK_DEV_DM=y
++CONFIG_DM_CRYPT=y
++CONFIG_DM_SNAPSHOT=y
++CONFIG_DM_MIRROR=y
++# CONFIG_DM_ZERO is not set
++CONFIG_DM_MULTIPATH=y
++CONFIG_DM_MULTIPATH_EMC=y
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_MAX_SGE=40
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Networking support
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_IP_TCPDIAG=y
++# CONFIG_IP_TCPDIAG_IPV6 is not set
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++# CONFIG_IP_NF_CONNTRACK_MARK is not set
++# CONFIG_IP_NF_CT_PROTO_SCTP is not set
++CONFIG_IP_NF_FTP=m
++# CONFIG_IP_NF_IRC is not set
++# CONFIG_IP_NF_TFTP is not set
++# CONFIG_IP_NF_AMANDA is not set
++# CONFIG_IP_NF_QUEUE is not set
++CONFIG_IP_NF_IPTABLES=m
++# CONFIG_IP_NF_MATCH_LIMIT is not set
++CONFIG_IP_NF_MATCH_IPRANGE=m
++# CONFIG_IP_NF_MATCH_MAC is not set
++# CONFIG_IP_NF_MATCH_PKTTYPE is not set
++# CONFIG_IP_NF_MATCH_MARK is not set
++# CONFIG_IP_NF_MATCH_MULTIPORT is not set
++# CONFIG_IP_NF_MATCH_TOS is not set
++# CONFIG_IP_NF_MATCH_RECENT is not set
++# CONFIG_IP_NF_MATCH_ECN is not set
++# CONFIG_IP_NF_MATCH_DSCP is not set
++# CONFIG_IP_NF_MATCH_AH_ESP is not set
++# CONFIG_IP_NF_MATCH_LENGTH is not set
++# CONFIG_IP_NF_MATCH_TTL is not set
++# CONFIG_IP_NF_MATCH_TCPMSS is not set
++# CONFIG_IP_NF_MATCH_HELPER is not set
++# CONFIG_IP_NF_MATCH_STATE is not set
++# CONFIG_IP_NF_MATCH_CONNTRACK is not set
++# CONFIG_IP_NF_MATCH_OWNER is not set
++CONFIG_IP_NF_MATCH_PHYSDEV=m
++# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
++# CONFIG_IP_NF_MATCH_REALM is not set
++# CONFIG_IP_NF_MATCH_SCTP is not set
++# CONFIG_IP_NF_MATCH_COMMENT is not set
++# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++# CONFIG_IP_NF_TARGET_LOG is not set
++# CONFIG_IP_NF_TARGET_ULOG is not set
++# CONFIG_IP_NF_TARGET_TCPMSS is not set
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++# CONFIG_IP_NF_TARGET_REDIRECT is not set
++# CONFIG_IP_NF_TARGET_NETMAP is not set
++# CONFIG_IP_NF_TARGET_SAME is not set
++# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
++CONFIG_IP_NF_NAT_FTP=m
++# CONFIG_IP_NF_MANGLE is not set
++# CONFIG_IP_NF_RAW is not set
++# CONFIG_IP_NF_ARPTABLES is not set
++
++#
++# Bridge: Netfilter Configuration
++#
++# CONFIG_BRIDGE_NF_EBTABLES is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++# CONFIG_ATM is not set
++CONFIG_BRIDGE=y
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_NET_DIVERT is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++# CONFIG_NET_CLS_ROUTE is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++CONFIG_TUN=y
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=y
++# CONFIG_TYPHOON is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=y
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++CONFIG_NE2K_PCI=y
++# CONFIG_8139CP is not set
++CONFIG_8139TOO=y
++CONFIG_8139TOO_PIO=y
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++# CONFIG_8139TOO_8129 is not set
++# CONFIG_8139_OLD_RX_RESET is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++CONFIG_VIA_RHINE=y
++# CONFIG_VIA_RHINE_MMIO is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=y
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++CONFIG_SK98LIN=y
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++# CONFIG_BNX2 is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++# CONFIG_INPUT_JOYDEV is not set
++# CONFIG_INPUT_TSDEV is not set
++# CONFIG_INPUT_EVDEV is not set
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++# CONFIG_MOUSE_SERIAL is not set
++# CONFIG_MOUSE_VSXXXAA is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++# CONFIG_NVRAM is not set
++CONFIG_RTC=y
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_FTAPE is not set
++CONFIG_AGP=m
++CONFIG_AGP_AMD64=m
++# CONFIG_AGP_INTEL is not set
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++# CONFIG_DRM_GAMMA is not set
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++# CONFIG_MWAVE is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++
++#
++# I2C support
++#
++# CONFIG_I2C is not set
++
++#
++# Dallas's 1-wire bus
++#
++# CONFIG_W1 is not set
++
++#
++# Misc devices
++#
++# CONFIG_IBM_ASM is not set
++
++#
++# Multimedia devices
++#
++# CONFIG_VIDEO_DEV is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++
++#
++# Graphics support
++#
++# CONFIG_FB is not set
++# CONFIG_VIDEO_SELECT is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++CONFIG_DUMMY_CONSOLE=y
++
++#
++# Sound
++#
++# CONFIG_SOUND is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++# CONFIG_USB_DEVICEFS is not set
++# CONFIG_USB_BANDWIDTH is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++# CONFIG_USB_EHCI_HCD is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_BLUETOOTH_TTY is not set
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
++#
++# CONFIG_USB_STORAGE is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_HID_FF is not set
++# CONFIG_USB_HIDDEV is not set
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_MTOUCH is not set
++# CONFIG_USB_EGALAX is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Multimedia devices
++#
++# CONFIG_USB_DABUSB is not set
++
++#
++# Video4Linux support is needed for USB Multimedia device support
++#
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++
++#
++# USB ATM/DSL drivers
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# InfiniBand support
++#
++CONFIG_INFINIBAND=y
++CONFIG_INFINIBAND_MTHCA=y
++CONFIG_INFINIBAND_MTHCA_DEBUG=y
++CONFIG_INFINIBAND_IPOIB=y
++CONFIG_INFINIBAND_IPOIB_DEBUG=y
++CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y
++
++#
++# Power management options
++#
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BOOT=y
++CONFIG_ACPI_INTERPRETER=y
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_BUS=y
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_PCI=y
++CONFIG_ACPI_SYSTEM=y
++# CONFIG_ACPI_CONTAINER is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++
++#
++# XFS support
++#
++# CONFIG_XFS_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++# CONFIG_DEVFS_FS is not set
++# CONFIG_DEVPTS_FS_XATTR is not set
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_XATTR is not set
++# CONFIG_HUGETLBFS is not set
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V4 is not set
++CONFIG_NFSD_TCP=y
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++CONFIG_CRYPTO_SHA1=m
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=m
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_LOG_BUF_SHIFT=15
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_KPROBES is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_X86_FIND_SMP_CONFIG=y
++CONFIG_X86_MPPARSE=y
++# CONFIG_CHECKING is not set
++# CONFIG_INIT_DEBUG is not set
+diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xen_defconfig_x86_32 linux-2.6.12-xen/arch/xen/configs/xen_defconfig_x86_32
+--- pristine-linux-2.6.12/arch/xen/configs/xen_defconfig_x86_32	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/configs/xen_defconfig_x86_32	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,2965 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.12.6-xen
++# Mon Dec 12 10:42:00 2005
++#
++CONFIG_XEN=y
++CONFIG_ARCH_XEN=y
++CONFIG_NO_IDLE_HZ=y
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++CONFIG_XEN_PHYSDEV_ACCESS=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++# CONFIG_XEN_BLKDEV_TAP_BE is not set
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_TPMDEV_FRONTEND is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++# CONFIG_XEN_BLKDEV_TAP is not set
++# CONFIG_XEN_SHADOW_MODE is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_X86=y
++# CONFIG_XEN_X86_64 is not set
++CONFIG_HAVE_ARCH_ALLOC_SKB=y
++CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++# CONFIG_CLEAN_COMPILE is not set
++CONFIG_BROKEN=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_HOTPLUG=y
++CONFIG_KOBJECT_UEVENT=y
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++CONFIG_EMBEDDED=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SHMEM=y
++CONFIG_CC_ALIGN_FUNCTIONS=0
++CONFIG_CC_ALIGN_LABELS=0
++CONFIG_CC_ALIGN_LOOPS=0
++CONFIG_CC_ALIGN_JUMPS=0
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_OBSOLETE_MODPARM=y
++CONFIG_MODVERSIONS=y
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# X86 Processor Configuration
++#
++CONFIG_XENARCH="i386"
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_UID16=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++# CONFIG_X86_GENERIC is not set
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=5
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++# CONFIG_HPET_TIMER is not set
++# CONFIG_HPET_EMULATE_RTC is not set
++CONFIG_SMP=y
++CONFIG_SMP_ALTERNATIVES=y
++CONFIG_NR_CPUS=8
++# CONFIG_SCHED_SMT is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++CONFIG_MICROCODE=y
++CONFIG_X86_CPUID=m
++CONFIG_SWIOTLB=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EDD=m
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_HIGHMEM=y
++CONFIG_MTRR=y
++CONFIG_HAVE_DEC_LOCK=y
++# CONFIG_REGPARM is not set
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_HOTPLUG_CPU=y
++
++#
++# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++#
++CONFIG_PCI=y
++# CONFIG_PCI_GOMMCONFIG is not set
++# CONFIG_PCI_GODIRECT is not set
++CONFIG_PCI_GOANY=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_MSI is not set
++# CONFIG_PCI_LEGACY_PROC is not set
++CONFIG_PCI_NAMES=y
++# CONFIG_PCI_DEBUG is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_ISA=y
++# CONFIG_EISA is not set
++# CONFIG_MCA is not set
++CONFIG_SCx200=m
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++CONFIG_PCCARD=m
++# CONFIG_PCMCIA_DEBUG is not set
++CONFIG_PCMCIA=m
++CONFIG_CARDBUS=y
++
++#
++# PC-card bridges
++#
++CONFIG_YENTA=m
++CONFIG_PD6729=m
++CONFIG_I82092=m
++CONFIG_I82365=m
++CONFIG_TCIC=m
++CONFIG_PCMCIA_PROBE=y
++CONFIG_PCCARD_NONSTATIC=m
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=m
++CONFIG_HOTPLUG_PCI_FAKE=m
++# CONFIG_HOTPLUG_PCI_ACPI is not set
++CONFIG_HOTPLUG_PCI_CPCI=y
++CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
++CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
++CONFIG_HOTPLUG_PCI_SHPC=m
++# CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE is not set
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_X86_SMP=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_X86_TRAMPOLINE=y
++CONFIG_SECCOMP=y
++# CONFIG_EARLY_PRINTK is not set
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_AOUT=m
++CONFIG_BINFMT_MISC=m
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=m
++# CONFIG_DEBUG_DRIVER is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++CONFIG_MTD=m
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=m
++CONFIG_MTD_PARTITIONS=y
++CONFIG_MTD_REDBOOT_PARTS=m
++CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
++# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
++# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=m
++CONFIG_MTD_BLOCK=m
++CONFIG_MTD_BLOCK_RO=m
++CONFIG_FTL=m
++CONFIG_NFTL=m
++CONFIG_NFTL_RW=y
++CONFIG_INFTL=m
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=m
++CONFIG_MTD_JEDECPROBE=m
++CONFIG_MTD_GEN_PROBE=m
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++CONFIG_MTD_CFI_INTELEXT=m
++CONFIG_MTD_CFI_AMDSTD=m
++CONFIG_MTD_CFI_AMDSTD_RETRY=0
++CONFIG_MTD_CFI_STAA=m
++CONFIG_MTD_CFI_UTIL=m
++CONFIG_MTD_RAM=m
++CONFIG_MTD_ROM=m
++CONFIG_MTD_ABSENT=m
++# CONFIG_MTD_OBSOLETE_CHIPS is not set
++
++#
++# Mapping drivers for chip access
++#
++CONFIG_MTD_COMPLEX_MAPPINGS=y
++CONFIG_MTD_PHYSMAP=m
++CONFIG_MTD_PHYSMAP_START=0x8000000
++CONFIG_MTD_PHYSMAP_LEN=0x4000000
++CONFIG_MTD_PHYSMAP_BANKWIDTH=2
++CONFIG_MTD_PNC2000=m
++CONFIG_MTD_SC520CDP=m
++CONFIG_MTD_NETSC520=m
++CONFIG_MTD_TS5500=m
++CONFIG_MTD_SBC_GXX=m
++CONFIG_MTD_ELAN_104NC=m
++CONFIG_MTD_SCx200_DOCFLASH=m
++# CONFIG_MTD_AMD76XROM is not set
++# CONFIG_MTD_ICHXROM is not set
++# CONFIG_MTD_SCB2_FLASH is not set
++CONFIG_MTD_NETtel=m
++CONFIG_MTD_DILNETPC=m
++CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
++# CONFIG_MTD_L440GX is not set
++CONFIG_MTD_PCI=m
++CONFIG_MTD_PCMCIA=m
++
++#
++# Self-contained MTD device drivers
++#
++CONFIG_MTD_PMC551=m
++# CONFIG_MTD_PMC551_BUGFIX is not set
++# CONFIG_MTD_PMC551_DEBUG is not set
++CONFIG_MTD_SLRAM=m
++CONFIG_MTD_PHRAM=m
++CONFIG_MTD_MTDRAM=m
++CONFIG_MTDRAM_TOTAL_SIZE=4096
++CONFIG_MTDRAM_ERASE_SIZE=128
++CONFIG_MTD_BLKMTD=m
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++CONFIG_MTD_DOC2000=m
++CONFIG_MTD_DOC2001=m
++CONFIG_MTD_DOC2001PLUS=m
++CONFIG_MTD_DOCPROBE=m
++CONFIG_MTD_DOCECC=m
++# CONFIG_MTD_DOCPROBE_ADVANCED is not set
++CONFIG_MTD_DOCPROBE_ADDRESS=0
++
++#
++# NAND Flash Device Drivers
++#
++CONFIG_MTD_NAND=m
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++CONFIG_MTD_NAND_IDS=m
++CONFIG_MTD_NAND_DISKONCHIP=m
++# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
++CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
++# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
++# CONFIG_MTD_NAND_NANDSIM is not set
++
++#
++# Parallel port support
++#
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++CONFIG_PARPORT_SERIAL=m
++CONFIG_PARPORT_PC_FIFO=y
++# CONFIG_PARPORT_PC_SUPERIO is not set
++CONFIG_PARPORT_PC_PCMCIA=m
++CONFIG_PARPORT_NOT_PC=y
++# CONFIG_PARPORT_GSC is not set
++CONFIG_PARPORT_1284=y
++
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++# CONFIG_PNP_DEBUG is not set
++
++#
++# Protocols
++#
++CONFIG_ISAPNP=y
++# CONFIG_PNPBIOS is not set
++# CONFIG_PNPACPI is not set
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=m
++CONFIG_BLK_DEV_XD=m
++CONFIG_PARIDE=m
++CONFIG_PARIDE_PARPORT=m
++
++#
++# Parallel IDE high-level drivers
++#
++CONFIG_PARIDE_PD=m
++CONFIG_PARIDE_PCD=m
++CONFIG_PARIDE_PF=m
++CONFIG_PARIDE_PT=m
++CONFIG_PARIDE_PG=m
++
++#
++# Parallel IDE protocol modules
++#
++CONFIG_PARIDE_ATEN=m
++CONFIG_PARIDE_BPCK=m
++CONFIG_PARIDE_BPCK6=m
++CONFIG_PARIDE_COMM=m
++CONFIG_PARIDE_DSTR=m
++CONFIG_PARIDE_FIT2=m
++CONFIG_PARIDE_FIT3=m
++CONFIG_PARIDE_EPAT=m
++# CONFIG_PARIDE_EPATC8 is not set
++CONFIG_PARIDE_EPIA=m
++CONFIG_PARIDE_FRIQ=m
++CONFIG_PARIDE_FRPW=m
++CONFIG_PARIDE_KBIC=m
++CONFIG_PARIDE_KTTI=m
++CONFIG_PARIDE_ON20=m
++CONFIG_PARIDE_ON26=m
++CONFIG_BLK_CPQ_DA=m
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++CONFIG_BLK_DEV_UMEM=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_SX8=m
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_LBD=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++# CONFIG_CDROM_PKTCDVD_WCACHE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_ATA_OVER_ETH=m
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=y
++CONFIG_IDEDISK_MULTI_MODE=y
++CONFIG_BLK_DEV_IDECS=m
++CONFIG_BLK_DEV_IDECD=y
++CONFIG_BLK_DEV_IDETAPE=m
++CONFIG_BLK_DEV_IDEFLOPPY=y
++CONFIG_BLK_DEV_IDESCSI=m
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++CONFIG_BLK_DEV_CMD640=y
++CONFIG_BLK_DEV_CMD640_ENHANCED=y
++CONFIG_BLK_DEV_IDEPNP=y
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++CONFIG_BLK_DEV_OPTI621=m
++CONFIG_BLK_DEV_RZ1000=y
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++CONFIG_BLK_DEV_AEC62XX=y
++CONFIG_BLK_DEV_ALI15X3=y
++# CONFIG_WDC_ALI15X3 is not set
++CONFIG_BLK_DEV_AMD74XX=y
++CONFIG_BLK_DEV_ATIIXP=y
++CONFIG_BLK_DEV_CMD64X=y
++CONFIG_BLK_DEV_TRIFLEX=y
++CONFIG_BLK_DEV_CY82C693=y
++CONFIG_BLK_DEV_CS5520=y
++CONFIG_BLK_DEV_CS5530=y
++CONFIG_BLK_DEV_HPT34X=y
++# CONFIG_HPT34X_AUTODMA is not set
++CONFIG_BLK_DEV_HPT366=y
++CONFIG_BLK_DEV_SC1200=m
++CONFIG_BLK_DEV_PIIX=y
++CONFIG_BLK_DEV_NS87415=m
++CONFIG_BLK_DEV_PDC202XX_OLD=y
++CONFIG_PDC202XX_BURST=y
++CONFIG_BLK_DEV_PDC202XX_NEW=y
++CONFIG_PDC202XX_FORCE=y
++CONFIG_BLK_DEV_SVWKS=y
++CONFIG_BLK_DEV_SIIMAGE=y
++CONFIG_BLK_DEV_SIS5513=y
++CONFIG_BLK_DEV_SLC90E66=y
++CONFIG_BLK_DEV_TRM290=m
++CONFIG_BLK_DEV_VIA82CXXX=y
++# CONFIG_IDE_ARM is not set
++# CONFIG_IDE_CHIPSETS is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=m
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_3W_9XXX=m
++# CONFIG_SCSI_7000FASST is not set
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AHA152X=m
++# CONFIG_SCSI_AHA1542 is not set
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_AIC7XXX_OLD=m
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++CONFIG_AIC79XX_ENABLE_RD_STRM=y
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_DPT_I2O=m
++CONFIG_SCSI_ADVANSYS=m
++CONFIG_SCSI_IN2000=m
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_SCSI_SATA=y
++CONFIG_SCSI_SATA_AHCI=m
++CONFIG_SCSI_SATA_SVW=m
++CONFIG_SCSI_ATA_PIIX=m
++CONFIG_SCSI_SATA_NV=m
++CONFIG_SCSI_SATA_PROMISE=m
++# CONFIG_SCSI_SATA_QSTOR is not set
++CONFIG_SCSI_SATA_SX4=m
++CONFIG_SCSI_SATA_SIL=m
++CONFIG_SCSI_SATA_SIS=m
++CONFIG_SCSI_SATA_ULI=m
++CONFIG_SCSI_SATA_VIA=m
++CONFIG_SCSI_SATA_VITESSE=m
++CONFIG_SCSI_BUSLOGIC=m
++# CONFIG_SCSI_OMIT_FLASHPOINT is not set
++# CONFIG_SCSI_CPQFCTS is not set
++CONFIG_SCSI_DMX3191D=m
++CONFIG_SCSI_DTC3280=m
++CONFIG_SCSI_EATA=m
++CONFIG_SCSI_EATA_TAGGED_QUEUE=y
++CONFIG_SCSI_EATA_LINKED_COMMANDS=y
++CONFIG_SCSI_EATA_MAX_TAGS=16
++CONFIG_SCSI_EATA_PIO=m
++CONFIG_SCSI_FUTURE_DOMAIN=m
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_GENERIC_NCR5380=m
++CONFIG_SCSI_GENERIC_NCR5380_MMIO=m
++CONFIG_SCSI_GENERIC_NCR53C400=y
++CONFIG_SCSI_IPS=m
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_PPA=m
++CONFIG_SCSI_IMM=m
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_NCR53C406A=m
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
++CONFIG_SCSI_IPR=m
++# CONFIG_SCSI_IPR_TRACE is not set
++# CONFIG_SCSI_IPR_DUMP is not set
++CONFIG_SCSI_PAS16=m
++# CONFIG_SCSI_PCI2000 is not set
++# CONFIG_SCSI_PCI2220I is not set
++CONFIG_SCSI_PSI240I=m
++CONFIG_SCSI_QLOGIC_FAS=m
++CONFIG_SCSI_QLOGIC_ISP=m
++CONFIG_SCSI_QLOGIC_FC=m
++CONFIG_SCSI_QLOGIC_FC_FIRMWARE=y
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_QLOGIC_1280_1040=y
++CONFIG_SCSI_QLA2XXX=m
++CONFIG_SCSI_QLA21XX=m
++CONFIG_SCSI_QLA22XX=m
++CONFIG_SCSI_QLA2300=m
++CONFIG_SCSI_QLA2322=m
++CONFIG_SCSI_QLA6312=m
++CONFIG_SCSI_LPFC=m
++# CONFIG_SCSI_SEAGATE is not set
++CONFIG_SCSI_SYM53C416=m
++CONFIG_SCSI_DC395x=m
++CONFIG_SCSI_DC390T=m
++CONFIG_SCSI_T128=m
++CONFIG_SCSI_U14_34F=m
++CONFIG_SCSI_U14_34F_TAGGED_QUEUE=y
++CONFIG_SCSI_U14_34F_LINKED_COMMANDS=y
++CONFIG_SCSI_U14_34F_MAX_TAGS=8
++# CONFIG_SCSI_ULTRASTOR is not set
++CONFIG_SCSI_NSP32=m
++CONFIG_SCSI_DEBUG=m
++
++#
++# PCMCIA SCSI adapter support
++#
++CONFIG_PCMCIA_AHA152X=m
++CONFIG_PCMCIA_FDOMAIN=m
++CONFIG_PCMCIA_NINJA_SCSI=m
++CONFIG_PCMCIA_QLOGIC=m
++CONFIG_PCMCIA_SYM53C500=m
++
++#
++# Old CD-ROM drivers (not SCSI, not IDE)
++#
++CONFIG_CD_NO_IDESCSI=y
++CONFIG_AZTCD=m
++CONFIG_GSCD=m
++# CONFIG_SBPCD is not set
++CONFIG_MCDX=m
++CONFIG_OPTCD=m
++# CONFIG_CM206 is not set
++CONFIG_SJCD=m
++CONFIG_ISP16_CDI=m
++CONFIG_CDU31A=m
++CONFIG_CDU535=m
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=m
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID5=m
++CONFIG_MD_RAID6=m
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_FAULTY=m
++CONFIG_BLK_DEV_DM=m
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=m
++CONFIG_FUSION_MAX_SGE=40
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
++
++#
++# IEEE 1394 (FireWire) support
++#
++CONFIG_IEEE1394=m
++
++#
++# Subsystem Options
++#
++# CONFIG_IEEE1394_VERBOSEDEBUG is not set
++# CONFIG_IEEE1394_OUI_DB is not set
++CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
++CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
++
++#
++# Device Drivers
++#
++CONFIG_IEEE1394_PCILYNX=m
++CONFIG_IEEE1394_OHCI1394=m
++
++#
++# Protocol Drivers
++#
++CONFIG_IEEE1394_VIDEO1394=m
++CONFIG_IEEE1394_SBP2=m
++# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
++CONFIG_IEEE1394_ETH1394=m
++CONFIG_IEEE1394_DV1394=m
++CONFIG_IEEE1394_RAWIO=m
++CONFIG_IEEE1394_CMP=m
++CONFIG_IEEE1394_AMDTP=m
++
++#
++# I2O device support
++#
++CONFIG_I2O=m
++CONFIG_I2O_CONFIG=m
++CONFIG_I2O_BLOCK=m
++CONFIG_I2O_SCSI=m
++CONFIG_I2O_PROC=m
++
++#
++# Networking support
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=m
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=m
++CONFIG_NET_KEY=m
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++CONFIG_IP_ROUTE_VERBOSE=y
++# CONFIG_IP_PNP is not set
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_TUNNEL=m
++CONFIG_IP_TCPDIAG=m
++CONFIG_IP_TCPDIAG_IPV6=y
++
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IPV6=m
++CONFIG_IPV6_PRIVACY=y
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_IPV6_TUNNEL=m
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++CONFIG_IP_NF_CONNTRACK_MARK=y
++CONFIG_IP_NF_CT_PROTO_SCTP=m
++CONFIG_IP_NF_FTP=m
++CONFIG_IP_NF_IRC=m
++CONFIG_IP_NF_TFTP=m
++CONFIG_IP_NF_AMANDA=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_LIMIT=m
++CONFIG_IP_NF_MATCH_IPRANGE=m
++CONFIG_IP_NF_MATCH_MAC=m
++CONFIG_IP_NF_MATCH_PKTTYPE=m
++CONFIG_IP_NF_MATCH_MARK=m
++CONFIG_IP_NF_MATCH_MULTIPORT=m
++CONFIG_IP_NF_MATCH_TOS=m
++CONFIG_IP_NF_MATCH_RECENT=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_DSCP=m
++CONFIG_IP_NF_MATCH_AH_ESP=m
++CONFIG_IP_NF_MATCH_LENGTH=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_MATCH_TCPMSS=m
++CONFIG_IP_NF_MATCH_HELPER=m
++CONFIG_IP_NF_MATCH_STATE=m
++CONFIG_IP_NF_MATCH_CONNTRACK=m
++CONFIG_IP_NF_MATCH_OWNER=m
++CONFIG_IP_NF_MATCH_PHYSDEV=m
++CONFIG_IP_NF_MATCH_ADDRTYPE=m
++CONFIG_IP_NF_MATCH_REALM=m
++CONFIG_IP_NF_MATCH_SCTP=m
++CONFIG_IP_NF_MATCH_COMMENT=m
++CONFIG_IP_NF_MATCH_CONNMARK=m
++CONFIG_IP_NF_MATCH_HASHLIMIT=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_TCPMSS=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_SAME=m
++CONFIG_IP_NF_NAT_SNMP_BASIC=m
++CONFIG_IP_NF_NAT_IRC=m
++CONFIG_IP_NF_NAT_FTP=m
++CONFIG_IP_NF_NAT_TFTP=m
++CONFIG_IP_NF_NAT_AMANDA=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_TOS=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_DSCP=m
++CONFIG_IP_NF_TARGET_MARK=m
++CONFIG_IP_NF_TARGET_CLASSIFY=m
++CONFIG_IP_NF_TARGET_CONNMARK=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_TARGET_NOTRACK=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++CONFIG_IP6_NF_QUEUE=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_LIMIT=m
++CONFIG_IP6_NF_MATCH_MAC=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_MULTIPORT=m
++CONFIG_IP6_NF_MATCH_OWNER=m
++CONFIG_IP6_NF_MATCH_MARK=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_AHESP=m
++CONFIG_IP6_NF_MATCH_LENGTH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_MATCH_PHYSDEV=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_TARGET_MARK=m
++CONFIG_IP6_NF_RAW=m
++
++#
++# DECnet: Netfilter Configuration
++#
++CONFIG_DECNET_NF_GRABULATOR=m
++
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++# CONFIG_BRIDGE_EBT_ULOG is not set
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_HMAC_NONE is not set
++# CONFIG_SCTP_HMAC_SHA1 is not set
++CONFIG_SCTP_HMAC_MD5=y
++CONFIG_ATM=y
++CONFIG_ATM_CLIP=y
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++CONFIG_ATM_LANE=m
++CONFIG_ATM_MPOA=m
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_BRIDGE=m
++CONFIG_VLAN_8021Q=m
++CONFIG_DECNET=m
++# CONFIG_DECNET_ROUTER is not set
++CONFIG_LLC=y
++CONFIG_LLC2=m
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=y
++CONFIG_LTPC=m
++CONFIG_COPS=m
++CONFIG_COPS_DAYNA=y
++CONFIG_COPS_TANGENT=y
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++CONFIG_X25=m
++CONFIG_LAPB=m
++# CONFIG_NET_DIVERT is not set
++CONFIG_ECONET=m
++CONFIG_ECONET_AUNUDP=y
++CONFIG_ECONET_NATIVE=y
++CONFIG_WAN_ROUTER=m
++
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CLK_JIFFIES=y
++# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
++# CONFIG_NET_SCH_CLK_CPU is not set
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_INGRESS=m
++CONFIG_NET_QOS=y
++CONFIG_NET_ESTIMATOR=y
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++# CONFIG_CLS_U32_PERF is not set
++# CONFIG_NET_CLS_IND is not set
++# CONFIG_CLS_U32_MARK is not set
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_U32=m
++CONFIG_NET_EMATCH_META=m
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_CLS_POLICE=y
++
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_RX is not set
++# CONFIG_NETPOLL_TRAP is not set
++CONFIG_NET_POLL_CONTROLLER=y
++CONFIG_HAMRADIO=y
++
++#
++# Packet Radio protocols
++#
++CONFIG_AX25=m
++# CONFIG_AX25_DAMA_SLAVE is not set
++CONFIG_NETROM=m
++CONFIG_ROSE=m
++
++#
++# AX.25 network device drivers
++#
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++# CONFIG_DMASCC is not set
++CONFIG_SCC=m
++# CONFIG_SCC_DELAY is not set
++# CONFIG_SCC_TRXECHO is not set
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_BAYCOM_EPP=m
++CONFIG_YAM=m
++CONFIG_IRDA=m
++
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++# CONFIG_IRDA_ULTRA is not set
++
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++CONFIG_IRDA_DEBUG=y
++
++#
++# Infrared-port device drivers
++#
++
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
++
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++
++#
++# Old SIR device drivers
++#
++CONFIG_IRPORT_SIR=m
++
++#
++# Old Serial dongle support
++#
++# CONFIG_DONGLE_OLD is not set
++
++#
++# FIR device drivers
++#
++CONFIG_USB_IRDA=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_WINBOND_FIR=m
++# CONFIG_TOSHIBA_FIR is not set
++CONFIG_SMC_IRCC_FIR=m
++CONFIG_ALI_FIR=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_BT=m
++CONFIG_BT_L2CAP=m
++CONFIG_BT_SCO=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_HIDP=m
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIUSB=m
++CONFIG_BT_HCIUSB_SCO=y
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++# CONFIG_BT_HCIUART_BCSP_TXCRC is not set
++CONFIG_BT_HCIBCM203X=m
++# CONFIG_BT_HCIBPA10X is not set
++CONFIG_BT_HCIBFUSB=m
++CONFIG_BT_HCIDTL1=m
++CONFIG_BT_HCIBT3C=m
++CONFIG_BT_HCIBLUECARD=m
++CONFIG_BT_HCIBTUART=m
++CONFIG_BT_HCIVHCI=m
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++CONFIG_NET_SB1000=m
++
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=m
++CONFIG_ARCNET_1201=m
++CONFIG_ARCNET_1051=m
++CONFIG_ARCNET_RAW=m
++# CONFIG_ARCNET_CAP is not set
++CONFIG_ARCNET_COM90xx=m
++CONFIG_ARCNET_COM90xxIO=m
++CONFIG_ARCNET_RIM_I=m
++CONFIG_ARCNET_COM20020=m
++CONFIG_ARCNET_COM20020_ISA=m
++CONFIG_ARCNET_COM20020_PCI=m
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=m
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_EL1=m
++CONFIG_EL2=m
++# CONFIG_ELPLUS is not set
++CONFIG_EL16=m
++CONFIG_EL3=m
++# CONFIG_3C515 is not set
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++# CONFIG_LANCE is not set
++CONFIG_NET_VENDOR_SMC=y
++CONFIG_WD80x3=m
++CONFIG_ULTRA=m
++CONFIG_SMC9194=m
++CONFIG_NET_VENDOR_RACAL=y
++CONFIG_NI5010=m
++CONFIG_NI52=m
++# CONFIG_NI65 is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_TULIP=m
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_PCMCIA_XIRCOM=m
++# CONFIG_PCMCIA_XIRTULIP is not set
++CONFIG_AT1700=m
++CONFIG_DEPCA=m
++CONFIG_HP100=m
++CONFIG_NET_ISA=y
++CONFIG_E2100=m
++CONFIG_EWRK3=m
++CONFIG_EEXPRESS=m
++CONFIG_EEXPRESS_PRO=m
++CONFIG_HPLAN_PLUS=m
++CONFIG_HPLAN=m
++CONFIG_LP486E=m
++CONFIG_ETH16I=m
++CONFIG_NE2000=m
++CONFIG_ZNET=m
++CONFIG_SEEQ8005=m
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=m
++CONFIG_AMD8111_ETH=m
++# CONFIG_AMD8111E_NAPI is not set
++CONFIG_ADAPTEC_STARFIRE=m
++# CONFIG_ADAPTEC_STARFIRE_NAPI is not set
++CONFIG_AC3200=m
++CONFIG_APRICOT=m
++CONFIG_B44=m
++CONFIG_FORCEDETH=m
++CONFIG_CS89x0=m
++# CONFIG_DGRS is not set
++CONFIG_EEPRO100=m
++CONFIG_E100=m
++CONFIG_FEALNX=m
++CONFIG_NATSEMI=m
++CONFIG_NE2K_PCI=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++CONFIG_8139TOO_PIO=y
++CONFIG_8139TOO_TUNE_TWISTER=y
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_SIS900=m
++CONFIG_EPIC100=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++CONFIG_TLAN=m
++CONFIG_VIA_RHINE=m
++# CONFIG_VIA_RHINE_MMIO is not set
++CONFIG_NET_POCKET=y
++CONFIG_ATP=m
++CONFIG_DE600=m
++CONFIG_DE620=m
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++CONFIG_DL2K=m
++CONFIG_E1000=m
++# CONFIG_E1000_NAPI is not set
++CONFIG_NS83820=m
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++CONFIG_R8169=m
++# CONFIG_R8169_NAPI is not set
++# CONFIG_R8169_VLAN is not set
++CONFIG_SK98LIN=m
++CONFIG_VIA_VELOCITY=m
++CONFIG_TIGON3=m
++CONFIG_BNX2=m
++
++#
++# Ethernet (10000 Mbit)
++#
++CONFIG_IXGB=m
++# CONFIG_IXGB_NAPI is not set
++CONFIG_S2IO=m
++# CONFIG_S2IO_NAPI is not set
++# CONFIG_2BUFF_MODE is not set
++
++#
++# Token Ring devices
++#
++CONFIG_TR=y
++CONFIG_IBMTR=m
++CONFIG_IBMOL=m
++CONFIG_IBMLS=m
++CONFIG_3C359=m
++CONFIG_TMS380TR=m
++CONFIG_TMSPCI=m
++CONFIG_SKISA=m
++CONFIG_PROTEON=m
++CONFIG_ABYSS=m
++# CONFIG_SMCTR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++CONFIG_NET_RADIO=y
++
++#
++# Obsolete Wireless cards support (pre-802.11)
++#
++CONFIG_STRIP=m
++CONFIG_ARLAN=m
++CONFIG_WAVELAN=m
++CONFIG_PCMCIA_WAVELAN=m
++CONFIG_PCMCIA_NETWAVE=m
++
++#
++# Wireless 802.11 Frequency Hopping cards support
++#
++CONFIG_PCMCIA_RAYCS=m
++
++#
++# Wireless 802.11b ISA/PCI cards support
++#
++CONFIG_AIRO=m
++CONFIG_HERMES=m
++CONFIG_PLX_HERMES=m
++CONFIG_TMD_HERMES=m
++CONFIG_PCI_HERMES=m
++CONFIG_ATMEL=m
++CONFIG_PCI_ATMEL=m
++
++#
++# Wireless 802.11b Pcmcia/Cardbus cards support
++#
++CONFIG_PCMCIA_HERMES=m
++CONFIG_AIRO_CS=m
++CONFIG_PCMCIA_ATMEL=m
++CONFIG_PCMCIA_WL3501=m
++
++#
++# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
++#
++CONFIG_PRISM54=m
++CONFIG_NET_WIRELESS=y
++
++#
++# PCMCIA network device support
++#
++CONFIG_NET_PCMCIA=y
++CONFIG_PCMCIA_3C589=m
++CONFIG_PCMCIA_3C574=m
++CONFIG_PCMCIA_FMVJ18X=m
++CONFIG_PCMCIA_PCNET=m
++CONFIG_PCMCIA_NMCLAN=m
++CONFIG_PCMCIA_SMC91C92=m
++CONFIG_PCMCIA_XIRC2PS=m
++CONFIG_PCMCIA_AXNET=m
++CONFIG_ARCNET_COM20020_CS=m
++CONFIG_PCMCIA_IBMTR=m
++
++#
++# Wan interfaces
++#
++CONFIG_WAN=y
++CONFIG_HOSTESS_SV11=m
++CONFIG_COSA=m
++CONFIG_DSCC4=m
++CONFIG_DSCC4_PCISYNC=y
++CONFIG_DSCC4_PCI_RST=y
++CONFIG_LANMEDIA=m
++CONFIG_SEALEVEL_4021=m
++CONFIG_SYNCLINK_SYNCPPP=m
++CONFIG_HDLC=m
++CONFIG_HDLC_RAW=y
++CONFIG_HDLC_RAW_ETH=y
++CONFIG_HDLC_CISCO=y
++CONFIG_HDLC_FR=y
++CONFIG_HDLC_PPP=y
++CONFIG_HDLC_X25=y
++CONFIG_PCI200SYN=m
++CONFIG_WANXL=m
++CONFIG_PC300=m
++CONFIG_PC300_MLPPP=y
++CONFIG_N2=m
++CONFIG_C101=m
++CONFIG_FARSYNC=m
++CONFIG_DLCI=m
++CONFIG_DLCI_COUNT=24
++CONFIG_DLCI_MAX=8
++CONFIG_SDLA=m
++CONFIG_WAN_ROUTER_DRIVERS=y
++# CONFIG_VENDOR_SANGOMA is not set
++CONFIG_CYCLADES_SYNC=m
++CONFIG_CYCLOMX_X25=y
++CONFIG_LAPBETHER=m
++CONFIG_X25_ASY=m
++CONFIG_SBNI=m
++# CONFIG_SBNI_MULTILINE is not set
++
++#
++# ATM drivers
++#
++CONFIG_ATM_TCP=m
++CONFIG_ATM_LANAI=m
++CONFIG_ATM_ENI=m
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++CONFIG_ATM_FIRESTREAM=m
++CONFIG_ATM_ZATM=m
++# CONFIG_ATM_ZATM_DEBUG is not set
++CONFIG_ATM_NICSTAR=m
++# CONFIG_ATM_NICSTAR_USE_SUNI is not set
++# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
++CONFIG_ATM_IDT77252=m
++# CONFIG_ATM_IDT77252_DEBUG is not set
++# CONFIG_ATM_IDT77252_RCV_ALL is not set
++CONFIG_ATM_IDT77252_USE_SUNI=y
++CONFIG_ATM_AMBASSADOR=m
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++CONFIG_ATM_HORIZON=m
++# CONFIG_ATM_HORIZON_DEBUG is not set
++CONFIG_ATM_IA=m
++# CONFIG_ATM_IA_DEBUG is not set
++CONFIG_ATM_FORE200E_MAYBE=m
++CONFIG_ATM_FORE200E_PCA=y
++CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
++# CONFIG_ATM_FORE200E_USE_TASKLET is not set
++CONFIG_ATM_FORE200E_TX_RETRY=16
++CONFIG_ATM_FORE200E_DEBUG=0
++CONFIG_ATM_FORE200E=m
++CONFIG_ATM_HE=m
++CONFIG_ATM_HE_USE_SUNI=y
++CONFIG_FDDI=y
++CONFIG_DEFXX=m
++CONFIG_SKFP=m
++CONFIG_HIPPI=y
++CONFIG_ROADRUNNER=m
++# CONFIG_ROADRUNNER_LARGE_RINGS is not set
++CONFIG_PLIP=m
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_PPP_BSDCOMP=m
++CONFIG_PPPOE=m
++CONFIG_PPPOATM=m
++CONFIG_SLIP=m
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++CONFIG_SLIP_MODE_SLIP6=y
++CONFIG_NET_FC=y
++CONFIG_SHAPER=m
++CONFIG_NETCONSOLE=m
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
++
++#
++# Old ISDN4Linux
++#
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++CONFIG_IPPP_FILTER=y
++CONFIG_ISDN_PPP_BSDCOMP=m
++CONFIG_ISDN_AUDIO=y
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_ISDN_X25=y
++
++#
++# ISDN feature submodules
++#
++# CONFIG_ISDN_DRV_LOOP is not set
++# CONFIG_ISDN_DIVERSION is not set
++
++#
++# ISDN4Linux hardware drivers
++#
++
++#
++# Passive cards
++#
++CONFIG_ISDN_DRV_HISAX=m
++
++#
++# D-channel protocol features
++#
++CONFIG_HISAX_EURO=y
++CONFIG_DE_AOC=y
++# CONFIG_HISAX_NO_SENDCOMPLETE is not set
++# CONFIG_HISAX_NO_LLC is not set
++# CONFIG_HISAX_NO_KEYPAD is not set
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
++
++#
++# HiSax supported cards
++#
++CONFIG_HISAX_16_0=y
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_AVM_A1=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_IX1MICROR2=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_ASUSCOM=y
++CONFIG_HISAX_TELEINT=y
++CONFIG_HISAX_HFCS=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_SPORTSTER=y
++CONFIG_HISAX_MIC=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_ISURF=y
++CONFIG_HISAX_HSTSAPHIR=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
++
++#
++# HiSax PCMCIA card service modules
++#
++CONFIG_HISAX_SEDLBAUER_CS=m
++CONFIG_HISAX_ELSA_CS=m
++CONFIG_HISAX_AVM_A1_CS=m
++CONFIG_HISAX_TELES_CS=m
++
++#
++# HiSax sub driver modules
++#
++CONFIG_HISAX_ST5481=m
++CONFIG_HISAX_HFCUSB=m
++CONFIG_HISAX_HFC4S8S=m
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_HISAX_HDLC=y
++
++#
++# Active cards
++#
++CONFIG_ISDN_DRV_ICN=m
++CONFIG_ISDN_DRV_PCBIT=m
++CONFIG_ISDN_DRV_SC=m
++CONFIG_ISDN_DRV_ACT2000=m
++# CONFIG_HYSDN is not set
++
++#
++# CAPI subsystem
++#
++CONFIG_ISDN_CAPI=m
++CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
++CONFIG_ISDN_CAPI_CAPIFS=m
++CONFIG_ISDN_CAPI_CAPIDRV=m
++
++#
++# CAPI hardware drivers
++#
++
++#
++# Active AVM cards
++#
++CONFIG_CAPI_AVM=y
++CONFIG_ISDN_DRV_AVMB1_B1ISA=m
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_T1ISA=m
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
++
++#
++# Active Eicon DIVA Server cards
++#
++CONFIG_CAPI_EICON=y
++CONFIG_ISDN_DIVAS=m
++CONFIG_ISDN_DIVAS_BRIPCI=y
++CONFIG_ISDN_DIVAS_PRIPCI=y
++CONFIG_ISDN_DIVAS_DIVACAPI=m
++CONFIG_ISDN_DIVAS_USERIDI=m
++CONFIG_ISDN_DIVAS_MAINT=m
++
++#
++# Telephony Support
++#
++CONFIG_PHONE=m
++CONFIG_PHONE_IXJ=m
++CONFIG_PHONE_IXJ_PCMCIA=m
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_TSDEV=m
++CONFIG_INPUT_TSDEV_SCREEN_X=240
++CONFIG_INPUT_TSDEV_SCREEN_Y=320
++CONFIG_INPUT_EVDEV=m
++CONFIG_INPUT_EVBUG=m
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++CONFIG_KEYBOARD_SUNKBD=m
++CONFIG_KEYBOARD_LKKBD=m
++CONFIG_KEYBOARD_XTKBD=m
++CONFIG_KEYBOARD_NEWTON=m
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_SERIAL=m
++CONFIG_MOUSE_INPORT=m
++# CONFIG_MOUSE_ATIXL is not set
++CONFIG_MOUSE_LOGIBM=m
++CONFIG_MOUSE_PC110PAD=m
++CONFIG_MOUSE_VSXXXAA=m
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_GUNZE=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_UINPUT=m
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_SERIO_CT82C710=m
++CONFIG_SERIO_PARKBD=m
++CONFIG_SERIO_PCIPS2=m
++CONFIG_SERIO_LIBPS2=y
++CONFIG_SERIO_RAW=m
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_VORTEX=m
++CONFIG_GAMEPORT_FM801=m
++# CONFIG_GAMEPORT_CS461X is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=m
++# CONFIG_SERIAL_8250_CS is not set
++# CONFIG_SERIAL_8250_ACPI is not set
++CONFIG_SERIAL_8250_NR_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=m
++CONFIG_SERIAL_JSM=m
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++CONFIG_PRINTER=m
++# CONFIG_LP_CONSOLE is not set
++CONFIG_PPDEV=m
++CONFIG_TIPAR=m
++
++#
++# IPMI
++#
++CONFIG_IPMI_HANDLER=m
++# CONFIG_IPMI_PANIC_EVENT is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_POWEROFF=m
++
++#
++# Watchdog Cards
++#
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_ACQUIRE_WDT=m
++CONFIG_ADVANTECH_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_ALIM7101_WDT=m
++CONFIG_SC520_WDT=m
++CONFIG_EUROTECH_WDT=m
++CONFIG_IB700_WDT=m
++CONFIG_WAFER_WDT=m
++CONFIG_I8XX_TCO=m
++CONFIG_SC1200_WDT=m
++CONFIG_SCx200_WDT=m
++CONFIG_60XX_WDT=m
++CONFIG_CPU5_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_W83877F_WDT=m
++CONFIG_MACHZ_WDT=m
++
++#
++# ISA-based Watchdog Cards
++#
++CONFIG_PCWATCHDOG=m
++CONFIG_MIXCOMWD=m
++CONFIG_WDT=m
++CONFIG_WDT_501=y
++
++#
++# PCI-based Watchdog Cards
++#
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_WDTPCI=m
++CONFIG_WDT_501_PCI=y
++
++#
++# USB-based Watchdog Cards
++#
++CONFIG_USBPCWATCHDOG=m
++CONFIG_HW_RANDOM=m
++CONFIG_NVRAM=m
++CONFIG_RTC=m
++CONFIG_GEN_RTC=m
++CONFIG_GEN_RTC_X=y
++CONFIG_DTLK=m
++CONFIG_R3964=m
++CONFIG_APPLICOM=m
++CONFIG_SONYPI=m
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_FTAPE is not set
++CONFIG_AGP=m
++CONFIG_AGP_ALI=m
++CONFIG_AGP_ATI=m
++CONFIG_AGP_AMD=m
++CONFIG_AGP_AMD64=m
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_NVIDIA=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_SWORKS=m
++CONFIG_AGP_VIA=m
++CONFIG_AGP_EFFICEON=m
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++# CONFIG_DRM_GAMMA is not set
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++CONFIG_DRM_I830=m
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++
++#
++# PCMCIA character devices
++#
++CONFIG_SYNCLINK_CS=m
++CONFIG_MWAVE=m
++CONFIG_SCx200_GPIO=m
++CONFIG_RAW_DRIVER=m
++# CONFIG_HPET is not set
++CONFIG_MAX_RAW_DEVS=256
++CONFIG_HANGCHECK_TIMER=m
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=m
++CONFIG_I2C_CHARDEV=m
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=m
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_ALI1535=m
++CONFIG_I2C_ALI1563=m
++CONFIG_I2C_ALI15X3=m
++CONFIG_I2C_AMD756=m
++CONFIG_I2C_AMD756_S4882=m
++CONFIG_I2C_AMD8111=m
++CONFIG_I2C_ELEKTOR=m
++CONFIG_I2C_I801=m
++CONFIG_I2C_I810=m
++CONFIG_I2C_PIIX4=m
++CONFIG_I2C_ISA=m
++CONFIG_I2C_NFORCE2=m
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++CONFIG_I2C_PROSAVAGE=m
++CONFIG_I2C_SAVAGE4=m
++CONFIG_SCx200_I2C=m
++CONFIG_SCx200_I2C_SCL=12
++CONFIG_SCx200_I2C_SDA=13
++CONFIG_SCx200_ACB=m
++CONFIG_I2C_SIS5595=m
++CONFIG_I2C_SIS630=m
++CONFIG_I2C_SIS96X=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_VIA=m
++CONFIG_I2C_VIAPRO=m
++CONFIG_I2C_VOODOO3=m
++CONFIG_I2C_PCA_ISA=m
++
++#
++# Hardware Sensors Chip support
++#
++CONFIG_I2C_SENSOR=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ASB100=m
++CONFIG_SENSORS_DS1621=m
++CONFIG_SENSORS_FSCHER=m
++CONFIG_SENSORS_FSCPOS=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_PC87360=m
++# CONFIG_SENSORS_SMSC47B397 is not set
++CONFIG_SENSORS_SIS5595=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83627HF=m
++
++#
++# Other I2C Chip support
++#
++CONFIG_SENSORS_DS1337=m
++CONFIG_SENSORS_EEPROM=m
++CONFIG_SENSORS_PCF8574=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_SENSORS_RTC8564=m
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# Dallas's 1-wire bus
++#
++CONFIG_W1=m
++CONFIG_W1_MATROX=m
++CONFIG_W1_DS9490=m
++CONFIG_W1_DS9490_BRIDGE=m
++CONFIG_W1_THERM=m
++CONFIG_W1_SMEM=m
++
++#
++# Misc devices
++#
++CONFIG_IBM_ASM=m
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=m
++
++#
++# Video For Linux
++#
++
++#
++# Video Adapters
++#
++CONFIG_VIDEO_BT848=m
++CONFIG_VIDEO_PMS=m
++CONFIG_VIDEO_BWQCAM=m
++CONFIG_VIDEO_CQCAM=m
++CONFIG_VIDEO_W9966=m
++CONFIG_VIDEO_CPIA=m
++CONFIG_VIDEO_CPIA_PP=m
++CONFIG_VIDEO_CPIA_USB=m
++CONFIG_VIDEO_SAA5246A=m
++CONFIG_VIDEO_SAA5249=m
++CONFIG_TUNER_3036=m
++CONFIG_VIDEO_STRADIS=m
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++# CONFIG_VIDEO_ZR36120 is not set
++CONFIG_VIDEO_MEYE=m
++# CONFIG_VIDEO_SAA7134 is not set
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_DPC=m
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_CX88=m
++# CONFIG_VIDEO_CX88_DVB is not set
++CONFIG_VIDEO_OVCAMCHIP=m
++
++#
++# Radio Adapters
++#
++CONFIG_RADIO_CADET=m
++CONFIG_RADIO_RTRACK=m
++CONFIG_RADIO_RTRACK2=m
++CONFIG_RADIO_AZTECH=m
++CONFIG_RADIO_GEMTEK=m
++CONFIG_RADIO_GEMTEK_PCI=m
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_MAESTRO=m
++CONFIG_RADIO_MIROPCM20=m
++CONFIG_RADIO_MIROPCM20_RDS=m
++CONFIG_RADIO_SF16FMI=m
++CONFIG_RADIO_SF16FMR2=m
++CONFIG_RADIO_TERRATEC=m
++CONFIG_RADIO_TRUST=m
++CONFIG_RADIO_TYPHOON=m
++CONFIG_RADIO_TYPHOON_PROC_FS=y
++CONFIG_RADIO_ZOLTRIX=m
++
++#
++# Digital Video Broadcasting Devices
++#
++CONFIG_DVB=y
++CONFIG_DVB_CORE=m
++
++#
++# Supported SAA7146 based PCI Adapters
++#
++CONFIG_DVB_AV7110=m
++# CONFIG_DVB_AV7110_OSD is not set
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
++
++#
++# Supported USB Adapters
++#
++CONFIG_DVB_TTUSB_BUDGET=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_DVB_DIBUSB=m
++CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES=y
++# CONFIG_DVB_DIBCOM_DEBUG is not set
++CONFIG_DVB_CINERGYT2=m
++# CONFIG_DVB_CINERGYT2_TUNING is not set
++
++#
++# Supported FlexCopII (B2C2) Adapters
++#
++CONFIG_DVB_B2C2_FLEXCOP=m
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
++CONFIG_DVB_B2C2_SKYSTAR=m
++
++#
++# Supported BT878 Adapters
++#
++CONFIG_DVB_BT8XX=m
++
++#
++# Supported DVB Frontends
++#
++
++#
++# Customise DVB Frontends
++#
++
++#
++# DVB-S (satellite) frontends
++#
++CONFIG_DVB_STV0299=m
++CONFIG_DVB_CX24110=m
++CONFIG_DVB_TDA8083=m
++CONFIG_DVB_TDA80XX=m
++CONFIG_DVB_MT312=m
++CONFIG_DVB_VES1X93=m
++
++#
++# DVB-T (terrestrial) frontends
++#
++CONFIG_DVB_SP8870=m
++CONFIG_DVB_SP887X=m
++CONFIG_DVB_CX22700=m
++CONFIG_DVB_CX22702=m
++CONFIG_DVB_L64781=m
++CONFIG_DVB_TDA1004X=m
++CONFIG_DVB_NXT6000=m
++CONFIG_DVB_MT352=m
++CONFIG_DVB_DIB3000MB=m
++CONFIG_DVB_DIB3000MC=m
++
++#
++# DVB-C (cable) frontends
++#
++CONFIG_DVB_ATMEL_AT76C651=m
++CONFIG_DVB_VES1820=m
++CONFIG_DVB_TDA10021=m
++CONFIG_DVB_STV0297=m
++
++#
++# ATSC (North American/Korean Terresterial DTV) frontends
++#
++CONFIG_DVB_NXT2002=m
++CONFIG_DVB_OR51211=m
++CONFIG_DVB_OR51132=m
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_VIDEO_VIDEOBUF=m
++CONFIG_VIDEO_TUNER=m
++CONFIG_VIDEO_BUF=m
++CONFIG_VIDEO_BTCX=m
++CONFIG_VIDEO_IR=m
++CONFIG_VIDEO_TVEEPROM=m
++
++#
++# Graphics support
++#
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=m
++CONFIG_FB_CFB_COPYAREA=m
++CONFIG_FB_CFB_IMAGEBLIT=m
++CONFIG_FB_SOFT_CURSOR=m
++# CONFIG_FB_MACMODES is not set
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++CONFIG_FB_CIRRUS=m
++CONFIG_FB_PM2=m
++CONFIG_FB_PM2_FIFO_DISCONNECT=y
++CONFIG_FB_CYBER2000=m
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++CONFIG_FB_VGA16=m
++# CONFIG_FB_VESA is not set
++CONFIG_VIDEO_SELECT=y
++CONFIG_FB_HGA=m
++# CONFIG_FB_HGA_ACCEL is not set
++CONFIG_FB_NVIDIA=m
++CONFIG_FB_NVIDIA_I2C=y
++CONFIG_FB_RIVA=m
++CONFIG_FB_RIVA_I2C=y
++CONFIG_FB_RIVA_DEBUG=y
++CONFIG_FB_I810=m
++# CONFIG_FB_I810_GTF is not set
++CONFIG_FB_INTEL=m
++# CONFIG_FB_INTEL_DEBUG is not set
++CONFIG_FB_MATROX=m
++CONFIG_FB_MATROX_MILLENIUM=y
++CONFIG_FB_MATROX_MYSTIQUE=y
++# CONFIG_FB_MATROX_G is not set
++CONFIG_FB_MATROX_I2C=m
++CONFIG_FB_MATROX_MULTIHEAD=y
++CONFIG_FB_RADEON_OLD=m
++CONFIG_FB_RADEON=m
++CONFIG_FB_RADEON_I2C=y
++# CONFIG_FB_RADEON_DEBUG is not set
++CONFIG_FB_ATY128=m
++CONFIG_FB_ATY=m
++CONFIG_FB_ATY_CT=y
++CONFIG_FB_ATY_GENERIC_LCD=y
++CONFIG_FB_ATY_XL_INIT=y
++CONFIG_FB_ATY_GX=y
++CONFIG_FB_SAVAGE=m
++CONFIG_FB_SAVAGE_I2C=y
++CONFIG_FB_SAVAGE_ACCEL=y
++CONFIG_FB_SIS=m
++CONFIG_FB_SIS_300=y
++CONFIG_FB_SIS_315=y
++CONFIG_FB_NEOMAGIC=m
++CONFIG_FB_KYRO=m
++CONFIG_FB_3DFX=m
++# CONFIG_FB_3DFX_ACCEL is not set
++CONFIG_FB_VOODOO1=m
++CONFIG_FB_TRIDENT=m
++# CONFIG_FB_TRIDENT_ACCEL is not set
++# CONFIG_FB_PM3 is not set
++CONFIG_FB_GEODE=y
++CONFIG_FB_GEODE_GX1=m
++CONFIG_FB_S1D13XXX=m
++CONFIG_FB_VIRTUAL=m
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++CONFIG_MDA_CONSOLE=m
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=m
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++# CONFIG_LOGO is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=m
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_RAWMIDI=m
++CONFIG_SND_SEQUENCER=m
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=m
++CONFIG_SND_PCM_OSS=m
++CONFIG_SND_SEQUENCER_OSS=y
++CONFIG_SND_RTCTIMER=m
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++CONFIG_SND_GENERIC_PM=y
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=m
++CONFIG_SND_OPL3_LIB=m
++CONFIG_SND_OPL4_LIB=m
++CONFIG_SND_VX_LIB=m
++CONFIG_SND_DUMMY=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
++
++#
++# ISA devices
++#
++CONFIG_SND_AD1848_LIB=m
++CONFIG_SND_CS4231_LIB=m
++CONFIG_SND_AD1816A=m
++CONFIG_SND_AD1848=m
++CONFIG_SND_CS4231=m
++CONFIG_SND_CS4232=m
++CONFIG_SND_CS4236=m
++CONFIG_SND_ES968=m
++CONFIG_SND_ES1688=m
++CONFIG_SND_ES18XX=m
++CONFIG_SND_GUS_SYNTH=m
++CONFIG_SND_GUSCLASSIC=m
++CONFIG_SND_GUSEXTREME=m
++CONFIG_SND_GUSMAX=m
++CONFIG_SND_INTERWAVE=m
++CONFIG_SND_INTERWAVE_STB=m
++CONFIG_SND_OPTI92X_AD1848=m
++CONFIG_SND_OPTI92X_CS4231=m
++CONFIG_SND_OPTI93X=m
++CONFIG_SND_SB8=m
++CONFIG_SND_SB16=m
++CONFIG_SND_SBAWE=m
++CONFIG_SND_SB16_CSP=y
++CONFIG_SND_WAVEFRONT=m
++CONFIG_SND_ALS100=m
++CONFIG_SND_AZT2320=m
++CONFIG_SND_CMI8330=m
++CONFIG_SND_DT019X=m
++CONFIG_SND_OPL3SA2=m
++CONFIG_SND_SGALAXY=m
++CONFIG_SND_SSCAPE=m
++
++#
++# PCI devices
++#
++CONFIG_SND_AC97_CODEC=m
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_CS4281=m
++CONFIG_SND_EMU10K1=m
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_CA0106 is not set
++CONFIG_SND_KORG1212=m
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_RME32=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_HDSP=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_YMFPCI=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_FM801=m
++CONFIG_SND_FM801_TEA575X=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=m
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_VIA82XX=m
++# CONFIG_SND_VIA82XX_MODEM is not set
++CONFIG_SND_VX222=m
++CONFIG_SND_HDA_INTEL=m
++
++#
++# USB devices
++#
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_USB_USX2Y=m
++
++#
++# PCMCIA devices
++#
++CONFIG_SND_VXPOCKET=m
++CONFIG_SND_VXP440=m
++CONFIG_SND_PDAUDIOCF=m
++
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=m
++CONFIG_SOUND_BT878=m
++CONFIG_SOUND_CMPCI=m
++# CONFIG_SOUND_CMPCI_FM is not set
++# CONFIG_SOUND_CMPCI_MIDI is not set
++CONFIG_SOUND_CMPCI_JOYSTICK=y
++CONFIG_SOUND_EMU10K1=m
++CONFIG_MIDI_EMU10K1=y
++CONFIG_SOUND_FUSION=m
++CONFIG_SOUND_CS4281=m
++CONFIG_SOUND_ES1370=m
++CONFIG_SOUND_ES1371=m
++CONFIG_SOUND_ESSSOLO1=m
++CONFIG_SOUND_MAESTRO=m
++CONFIG_SOUND_MAESTRO3=m
++CONFIG_SOUND_ICH=m
++CONFIG_SOUND_SONICVIBES=m
++CONFIG_SOUND_TRIDENT=m
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++CONFIG_SOUND_VIA82CXXX=m
++CONFIG_MIDI_VIA82CXXX=y
++CONFIG_SOUND_OSS=m
++# CONFIG_SOUND_TRACEINIT is not set
++# CONFIG_SOUND_DMAP is not set
++# CONFIG_SOUND_AD1816 is not set
++CONFIG_SOUND_AD1889=m
++CONFIG_SOUND_SGALAXY=m
++CONFIG_SOUND_ADLIB=m
++CONFIG_SOUND_ACI_MIXER=m
++CONFIG_SOUND_CS4232=m
++CONFIG_SOUND_SSCAPE=m
++CONFIG_SOUND_GUS=m
++CONFIG_SOUND_GUS16=y
++CONFIG_SOUND_GUSMAX=y
++CONFIG_SOUND_VMIDI=m
++CONFIG_SOUND_TRIX=m
++CONFIG_SOUND_MSS=m
++CONFIG_SOUND_MPU401=m
++CONFIG_SOUND_NM256=m
++CONFIG_SOUND_MAD16=m
++CONFIG_MAD16_OLDCARD=y
++CONFIG_SOUND_PAS=m
++CONFIG_SOUND_PSS=m
++CONFIG_PSS_MIXER=y
++CONFIG_SOUND_SB=m
++# CONFIG_SOUND_AWE32_SYNTH is not set
++CONFIG_SOUND_WAVEFRONT=m
++CONFIG_SOUND_MAUI=m
++CONFIG_SOUND_YM3812=m
++CONFIG_SOUND_OPL3SA1=m
++CONFIG_SOUND_OPL3SA2=m
++CONFIG_SOUND_YMFPCI=m
++# CONFIG_SOUND_YMFPCI_LEGACY is not set
++CONFIG_SOUND_UART6850=m
++CONFIG_SOUND_AEDSP16=m
++CONFIG_SC6600=y
++CONFIG_SC6600_JOY=y
++CONFIG_SC6600_CDROM=4
++CONFIG_SC6600_CDROMBASE=0x0
++# CONFIG_AEDSP16_MSS is not set
++# CONFIG_AEDSP16_SBPRO is not set
++# CONFIG_AEDSP16_MPU401 is not set
++CONFIG_SOUND_TVMIXER=m
++CONFIG_SOUND_KAHLUA=m
++CONFIG_SOUND_ALI5455=m
++CONFIG_SOUND_FORTE=m
++CONFIG_SOUND_RME96XX=m
++CONFIG_SOUND_AD1980=m
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_SPLIT_ISO=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_OHCI_HCD=m
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=m
++CONFIG_USB_SL811_HCD=m
++CONFIG_USB_SL811_CS=m
++
++#
++# USB Device Class drivers
++#
++CONFIG_USB_AUDIO=m
++
++#
++# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
++#
++CONFIG_USB_MIDI=m
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=m
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_DATAFAB=y
++CONFIG_USB_STORAGE_FREECOM=y
++CONFIG_USB_STORAGE_ISD200=y
++CONFIG_USB_STORAGE_DPCM=y
++CONFIG_USB_STORAGE_USBAT=y
++CONFIG_USB_STORAGE_SDDR09=y
++CONFIG_USB_STORAGE_SDDR55=y
++CONFIG_USB_STORAGE_JUMPSHOT=y
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=m
++CONFIG_USB_HIDINPUT=y
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++
++#
++# USB HID Boot Protocol drivers
++#
++CONFIG_USB_KBD=m
++CONFIG_USB_MOUSE=m
++CONFIG_USB_AIPTEK=m
++CONFIG_USB_WACOM=m
++CONFIG_USB_KBTAB=m
++CONFIG_USB_POWERMATE=m
++CONFIG_USB_MTOUCH=m
++CONFIG_USB_EGALAX=m
++CONFIG_USB_XPAD=m
++CONFIG_USB_ATI_REMOTE=m
++
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
++
++#
++# USB Multimedia devices
++#
++# CONFIG_USB_DABUSB is not set
++CONFIG_USB_VICAM=m
++CONFIG_USB_DSBR=m
++CONFIG_USB_IBMCAM=m
++CONFIG_USB_KONICAWC=m
++CONFIG_USB_OV511=m
++CONFIG_USB_SE401=m
++CONFIG_USB_SN9C102=m
++CONFIG_USB_STV680=m
++CONFIG_USB_W9968CF=m
++CONFIG_USB_PWC=m
++
++#
++# USB Network Adapters
++#
++CONFIG_USB_CATC=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_USBNET=m
++
++#
++# USB Host-to-Host Cables
++#
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++CONFIG_USB_GENESYS=y
++CONFIG_USB_NET1080=y
++CONFIG_USB_PL2301=y
++CONFIG_USB_KC2190=y
++
++#
++# Intelligent USB Devices/Gadgets
++#
++CONFIG_USB_ARMLINUX=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_ZAURUS=y
++CONFIG_USB_CDCETHER=y
++
++#
++# USB Network Adapters
++#
++CONFIG_USB_AX8817X=y
++CONFIG_USB_ZD1201=m
++CONFIG_USB_MON=m
++
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
++
++#
++# USB Serial Converter support
++#
++CONFIG_USB_SERIAL=m
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_AIRPRIME=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP2101=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_EMPEG=m
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++# CONFIG_USB_SERIAL_GARMIN is not set
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++CONFIG_USB_SERIAL_PL2303=m
++CONFIG_USB_SERIAL_HP4X=m
++CONFIG_USB_SERIAL_SAFE=m
++# CONFIG_USB_SERIAL_SAFE_PADDED is not set
++# CONFIG_USB_SERIAL_TI is not set
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_XIRCOM=m
++CONFIG_USB_SERIAL_OPTION=m
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_EZUSB=y
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++CONFIG_USB_AUERSWALD=m
++CONFIG_USB_RIO500=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LED=m
++CONFIG_USB_CYTHERM=m
++CONFIG_USB_PHIDGETKIT=m
++CONFIG_USB_PHIDGETSERVO=m
++# CONFIG_USB_IDMOUSE is not set
++CONFIG_USB_SISUSBVGA=m
++CONFIG_USB_TEST=m
++
++#
++# USB ATM/DSL drivers
++#
++CONFIG_USB_ATM=m
++CONFIG_USB_SPEEDTOUCH=m
++
++#
++# USB Gadget Support
++#
++CONFIG_USB_GADGET=m
++# CONFIG_USB_GADGET_DEBUG_FILES is not set
++CONFIG_USB_GADGET_NET2280=y
++CONFIG_USB_NET2280=m
++# CONFIG_USB_GADGET_PXA2XX is not set
++# CONFIG_USB_GADGET_GOKU is not set
++# CONFIG_USB_GADGET_LH7A40X is not set
++# CONFIG_USB_GADGET_OMAP is not set
++# CONFIG_USB_GADGET_DUMMY_HCD is not set
++CONFIG_USB_GADGET_DUALSPEED=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_ETH_RNDIS=y
++CONFIG_USB_GADGETFS=m
++CONFIG_USB_FILE_STORAGE=m
++# CONFIG_USB_FILE_STORAGE_TEST is not set
++CONFIG_USB_G_SERIAL=m
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# Power management options
++#
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BOOT=y
++CONFIG_ACPI_INTERPRETER=y
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_PROCESSOR=m
++# CONFIG_ACPI_HOTPLUG_CPU is not set
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_BUS=y
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_PCI=y
++CONFIG_ACPI_SYSTEM=y
++# CONFIG_X86_PM_TIMER is not set
++# CONFIG_ACPI_CONTAINER is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT3_FS=m
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=m
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++# CONFIG_JFS_SECURITY is not set
++# CONFIG_JFS_DEBUG is not set
++CONFIG_JFS_STATISTICS=y
++CONFIG_FS_POSIX_ACL=y
++
++#
++# XFS support
++#
++CONFIG_XFS_FS=m
++CONFIG_XFS_EXPORT=y
++CONFIG_XFS_RT=y
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_SECURITY=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++CONFIG_QUOTA=y
++CONFIG_QFMT_V1=m
++CONFIG_QFMT_V2=m
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=m
++CONFIG_AUTOFS4_FS=m
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=m
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++CONFIG_NTFS_FS=m
++# CONFIG_NTFS_DEBUG is not set
++# CONFIG_NTFS_RW is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++# CONFIG_DEVFS_FS is not set
++CONFIG_DEVPTS_FS_XATTR=y
++CONFIG_DEVPTS_FS_SECURITY=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_TMPFS_SECURITY=y
++# CONFIG_HUGETLBFS is not set
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++
++#
++# Miscellaneous filesystems
++#
++CONFIG_ADFS_FS=m
++# CONFIG_ADFS_FS_RW is not set
++CONFIG_AFFS_FS=m
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_JFFS_FS=m
++CONFIG_JFFS_FS_VERBOSE=0
++CONFIG_JFFS_PROC_FS=y
++CONFIG_JFFS2_FS=m
++CONFIG_JFFS2_FS_DEBUG=0
++# CONFIG_JFFS2_FS_NAND is not set
++# CONFIG_JFFS2_FS_NOR_ECC is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++CONFIG_VXFS_FS=m
++CONFIG_HPFS_FS=m
++CONFIG_QNX4FS_FS=m
++# CONFIG_QNX4FS_RW is not set
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=m
++CONFIG_NFS_V3=y
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_RPCSEC_GSS_SPKM3=m
++CONFIG_SMB_FS=m
++# CONFIG_SMB_NLS_DEFAULT is not set
++CONFIG_CIFS=m
++# CONFIG_CIFS_STATS is not set
++# CONFIG_CIFS_XATTR is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++# CONFIG_NCPFS_SMALLDOS is not set
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_CODA_FS=m
++# CONFIG_CODA_FS_OLD_API is not set
++CONFIG_AFS_FS=m
++CONFIG_RXRPC=m
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++CONFIG_ACORN_PARTITION=y
++CONFIG_ACORN_PARTITION_CUMANA=y
++# CONFIG_ACORN_PARTITION_EESOX is not set
++CONFIG_ACORN_PARTITION_ICS=y
++# CONFIG_ACORN_PARTITION_ADFS is not set
++# CONFIG_ACORN_PARTITION_POWERTEC is not set
++CONFIG_ACORN_PARTITION_RISCIX=y
++CONFIG_OSF_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++CONFIG_ATARI_PARTITION=y
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++CONFIG_LDM_PARTITION=y
++# CONFIG_LDM_DEBUG is not set
++CONFIG_SGI_PARTITION=y
++CONFIG_ULTRIX_PARTITION=y
++CONFIG_SUN_PARTITION=y
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=m
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=m
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_UTF8=m
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
++CONFIG_SECURITY=y
++# CONFIG_SECURITY_NETWORK is not set
++CONFIG_SECURITY_CAPABILITIES=y
++CONFIG_SECURITY_ROOTPLUG=m
++CONFIG_SECURITY_SECLVL=m
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
++CONFIG_SECURITY_SELINUX_DISABLE=y
++CONFIG_SECURITY_SELINUX_DEVELOP=y
++CONFIG_SECURITY_SELINUX_AVC_STATS=y
++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_SHA1=m
++CONFIG_CRYPTO_SHA256=m
++CONFIG_CRYPTO_SHA512=m
++CONFIG_CRYPTO_WP512=m
++CONFIG_CRYPTO_TGR192=m
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_AES_586=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_CRC32C=m
++CONFIG_CRYPTO_TEST=m
++
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=m
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=m
++CONFIG_REED_SOLOMON=m
++CONFIG_REED_SOLOMON_DEC16=y
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_FRAME_POINTER is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_KPROBES is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_X86_FIND_SMP_CONFIG=y
++CONFIG_X86_MPPARSE=y
+diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xen_defconfig_x86_64 linux-2.6.12-xen/arch/xen/configs/xen_defconfig_x86_64
+--- pristine-linux-2.6.12/arch/xen/configs/xen_defconfig_x86_64	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/configs/xen_defconfig_x86_64	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,2425 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.12.4-xen
++# Mon Aug 15 19:54:11 2005
++#
++CONFIG_XEN=y
++CONFIG_ARCH_XEN=y
++CONFIG_NO_IDLE_HZ=y
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++CONFIG_XEN_PHYSDEV_ACCESS=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++# CONFIG_XEN_BLKDEV_TAP_BE is not set
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_TPMDEV_FRONTEND is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++# CONFIG_XEN_BLKDEV_TAP is not set
++# CONFIG_XEN_SHADOW_MODE is not set
++CONFIG_XEN_SCRUB_PAGES=y
++# CONFIG_XEN_X86 is not set
++CONFIG_XEN_X86_64=y
++CONFIG_HAVE_ARCH_ALLOC_SKB=y
++CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++# CONFIG_CLEAN_COMPILE is not set
++CONFIG_BROKEN=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_HOTPLUG=y
++CONFIG_KOBJECT_UEVENT=y
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_CC_ALIGN_FUNCTIONS=0
++CONFIG_CC_ALIGN_LABELS=0
++CONFIG_CC_ALIGN_LOOPS=0
++CONFIG_CC_ALIGN_JUMPS=0
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_OBSOLETE_MODPARM=y
++# CONFIG_MODVERSIONS is not set
++CONFIG_MODULE_SRCVERSION_ALL=y
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++CONFIG_XENARCH="x86_64"
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_UID16=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_GOOD_APIC=y
++# CONFIG_HPET_TIMER is not set
++CONFIG_SMP=y
++CONFIG_NR_CPUS=8
++# CONFIG_SCHED_SMT is not set
++CONFIG_MICROCODE=y
++# CONFIG_X86_CPUID is not set
++# CONFIG_NUMA is not set
++# CONFIG_MTRR is not set
++CONFIG_HAVE_DEC_LOCK=y
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_PCI=y
++CONFIG_PCI_DIRECT=y
++# CONFIG_PCI_MMCONFIG is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_SECCOMP=y
++
++#
++# X86_64 processor configuration
++#
++CONFIG_X86_64=y
++CONFIG_64BIT=y
++CONFIG_EARLY_PRINTK=y
++
++#
++# Processor type and features
++#
++# CONFIG_MPSC is not set
++CONFIG_GENERIC_CPU=y
++CONFIG_X86_L1_CACHE_BYTES=128
++# CONFIG_X86_TSC is not set
++CONFIG_X86_XEN_GENAPIC=y
++# CONFIG_X86_MSR is not set
++CONFIG_X86_HT=y
++# CONFIG_K8_NUMA is not set
++# CONFIG_NUMA_EMU is not set
++# CONFIG_GART_IOMMU is not set
++CONFIG_DUMMY_IOMMU=y
++CONFIG_SWIOTLB=y
++# CONFIG_X86_MCE is not set
++
++#
++# Power management options
++#
++# CONFIG_PM is not set
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI etc.)
++#
++# CONFIG_UNORDERED_IO is not set
++
++#
++# Executable file formats / Emulations
++#
++CONFIG_IA32_EMULATION=y
++# CONFIG_IA32_AOUT is not set
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++CONFIG_MTD=m
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=m
++CONFIG_MTD_PARTITIONS=y
++CONFIG_MTD_REDBOOT_PARTS=m
++CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
++# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
++# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
++CONFIG_MTD_CMDLINE_PARTS=y
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=m
++CONFIG_MTD_BLOCK=m
++CONFIG_MTD_BLOCK_RO=m
++CONFIG_FTL=m
++CONFIG_NFTL=m
++CONFIG_NFTL_RW=y
++CONFIG_INFTL=m
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=m
++CONFIG_MTD_JEDECPROBE=m
++CONFIG_MTD_GEN_PROBE=m
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++CONFIG_MTD_CFI_INTELEXT=m
++CONFIG_MTD_CFI_AMDSTD=m
++CONFIG_MTD_CFI_AMDSTD_RETRY=3
++CONFIG_MTD_CFI_STAA=m
++CONFIG_MTD_CFI_UTIL=m
++CONFIG_MTD_RAM=m
++CONFIG_MTD_ROM=m
++CONFIG_MTD_ABSENT=m
++# CONFIG_MTD_OBSOLETE_CHIPS is not set
++
++#
++# Mapping drivers for chip access
++#
++CONFIG_MTD_COMPLEX_MAPPINGS=y
++# CONFIG_MTD_PHYSMAP is not set
++# CONFIG_MTD_PNC2000 is not set
++CONFIG_MTD_SC520CDP=m
++CONFIG_MTD_NETSC520=m
++CONFIG_MTD_TS5500=m
++CONFIG_MTD_SBC_GXX=m
++CONFIG_MTD_ELAN_104NC=m
++# CONFIG_MTD_AMD76XROM is not set
++# CONFIG_MTD_ICHXROM is not set
++CONFIG_MTD_SCB2_FLASH=m
++# CONFIG_MTD_NETtel is not set
++# CONFIG_MTD_DILNETPC is not set
++# CONFIG_MTD_L440GX is not set
++CONFIG_MTD_PCI=m
++
++#
++# Self-contained MTD device drivers
++#
++CONFIG_MTD_PMC551=m
++# CONFIG_MTD_PMC551_BUGFIX is not set
++# CONFIG_MTD_PMC551_DEBUG is not set
++# CONFIG_MTD_SLRAM is not set
++# CONFIG_MTD_PHRAM is not set
++CONFIG_MTD_MTDRAM=m
++CONFIG_MTDRAM_TOTAL_SIZE=4096
++CONFIG_MTDRAM_ERASE_SIZE=128
++# CONFIG_MTD_BLKMTD is not set
++CONFIG_MTD_BLOCK2MTD=m
++
++#
++# Disk-On-Chip Device Drivers
++#
++CONFIG_MTD_DOC2000=m
++# CONFIG_MTD_DOC2001 is not set
++CONFIG_MTD_DOC2001PLUS=m
++CONFIG_MTD_DOCPROBE=m
++CONFIG_MTD_DOCECC=m
++# CONFIG_MTD_DOCPROBE_ADVANCED is not set
++CONFIG_MTD_DOCPROBE_ADDRESS=0
++
++#
++# NAND Flash Device Drivers
++#
++CONFIG_MTD_NAND=m
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++CONFIG_MTD_NAND_IDS=m
++# CONFIG_MTD_NAND_DISKONCHIP is not set
++# CONFIG_MTD_NAND_NANDSIM is not set
++
++#
++# Parallel port support
++#
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++# CONFIG_PARPORT_PC_FIFO is not set
++# CONFIG_PARPORT_PC_SUPERIO is not set
++CONFIG_PARPORT_NOT_PC=y
++# CONFIG_PARPORT_GSC is not set
++CONFIG_PARPORT_1284=y
++
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=m
++CONFIG_PARIDE=m
++CONFIG_PARIDE_PARPORT=m
++
++#
++# Parallel IDE high-level drivers
++#
++CONFIG_PARIDE_PD=m
++CONFIG_PARIDE_PCD=m
++CONFIG_PARIDE_PF=m
++CONFIG_PARIDE_PT=m
++CONFIG_PARIDE_PG=m
++
++#
++# Parallel IDE protocol modules
++#
++CONFIG_PARIDE_ATEN=m
++CONFIG_PARIDE_BPCK=m
++CONFIG_PARIDE_COMM=m
++CONFIG_PARIDE_DSTR=m
++CONFIG_PARIDE_FIT2=m
++CONFIG_PARIDE_FIT3=m
++CONFIG_PARIDE_EPAT=m
++CONFIG_PARIDE_EPATC8=y
++CONFIG_PARIDE_EPIA=m
++CONFIG_PARIDE_FRIQ=m
++CONFIG_PARIDE_FRPW=m
++CONFIG_PARIDE_KBIC=m
++CONFIG_PARIDE_KTTI=m
++CONFIG_PARIDE_ON20=m
++CONFIG_PARIDE_ON26=m
++CONFIG_BLK_CPQ_DA=m
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++CONFIG_BLK_DEV_UMEM=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_SX8=m
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_LBD=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++# CONFIG_CDROM_PKTCDVD_WCACHE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_ATA_OVER_ETH=m
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=y
++CONFIG_IDEDISK_MULTI_MODE=y
++CONFIG_BLK_DEV_IDECD=y
++# CONFIG_BLK_DEV_IDETAPE is not set
++CONFIG_BLK_DEV_IDEFLOPPY=y
++CONFIG_BLK_DEV_IDESCSI=m
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++CONFIG_BLK_DEV_CMD640=y
++CONFIG_BLK_DEV_CMD640_ENHANCED=y
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++# CONFIG_BLK_DEV_OPTI621 is not set
++CONFIG_BLK_DEV_RZ1000=y
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++CONFIG_BLK_DEV_AEC62XX=y
++CONFIG_BLK_DEV_ALI15X3=y
++# CONFIG_WDC_ALI15X3 is not set
++CONFIG_BLK_DEV_AMD74XX=y
++CONFIG_BLK_DEV_ATIIXP=y
++CONFIG_BLK_DEV_CMD64X=y
++CONFIG_BLK_DEV_TRIFLEX=y
++CONFIG_BLK_DEV_CY82C693=y
++CONFIG_BLK_DEV_CS5520=y
++CONFIG_BLK_DEV_CS5530=y
++CONFIG_BLK_DEV_HPT34X=y
++# CONFIG_HPT34X_AUTODMA is not set
++CONFIG_BLK_DEV_HPT366=y
++# CONFIG_BLK_DEV_SC1200 is not set
++CONFIG_BLK_DEV_PIIX=y
++# CONFIG_BLK_DEV_NS87415 is not set
++CONFIG_BLK_DEV_PDC202XX_OLD=y
++# CONFIG_PDC202XX_BURST is not set
++CONFIG_BLK_DEV_PDC202XX_NEW=y
++CONFIG_PDC202XX_FORCE=y
++CONFIG_BLK_DEV_SVWKS=y
++CONFIG_BLK_DEV_SIIMAGE=y
++CONFIG_BLK_DEV_SIS5513=y
++CONFIG_BLK_DEV_SLC90E66=y
++# CONFIG_BLK_DEV_TRM290 is not set
++CONFIG_BLK_DEV_VIA82CXXX=y
++# CONFIG_IDE_ARM is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=m
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++CONFIG_SCSI_ISCSI_ATTRS=m
++
++#
++# SCSI low-level drivers
++#
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_3W_9XXX=m
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
++CONFIG_AIC7XXX_DEBUG_MASK=0
++# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
++CONFIG_SCSI_AIC7XXX_OLD=m
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=4
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
++# CONFIG_AIC79XX_DEBUG_ENABLE is not set
++CONFIG_AIC79XX_DEBUG_MASK=0
++# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
++# CONFIG_SCSI_ADVANSYS is not set
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_SCSI_SATA=y
++CONFIG_SCSI_SATA_AHCI=m
++CONFIG_SCSI_SATA_SVW=m
++CONFIG_SCSI_ATA_PIIX=y
++CONFIG_SCSI_SATA_NV=m
++CONFIG_SCSI_SATA_PROMISE=m
++CONFIG_SCSI_SATA_QSTOR=m
++CONFIG_SCSI_SATA_SX4=m
++CONFIG_SCSI_SATA_SIL=m
++CONFIG_SCSI_SATA_SIS=m
++CONFIG_SCSI_SATA_ULI=m
++CONFIG_SCSI_SATA_VIA=m
++CONFIG_SCSI_SATA_VITESSE=m
++CONFIG_SCSI_BUSLOGIC=m
++# CONFIG_SCSI_OMIT_FLASHPOINT is not set
++# CONFIG_SCSI_CPQFCTS is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA is not set
++# CONFIG_SCSI_EATA_PIO is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_IPS=m
++CONFIG_SCSI_INITIO=m
++CONFIG_SCSI_INIA100=m
++CONFIG_SCSI_PPA=m
++CONFIG_SCSI_IMM=m
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_PCI2000 is not set
++# CONFIG_SCSI_PCI2220I is not set
++# CONFIG_SCSI_QLOGIC_ISP is not set
++# CONFIG_SCSI_QLOGIC_FC is not set
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_QLOGIC_1280_1040=y
++CONFIG_SCSI_QLA2XXX=y
++CONFIG_SCSI_QLA21XX=m
++CONFIG_SCSI_QLA22XX=m
++CONFIG_SCSI_QLA2300=m
++CONFIG_SCSI_QLA2322=m
++CONFIG_SCSI_QLA6312=m
++CONFIG_SCSI_LPFC=m
++CONFIG_SCSI_DC395x=m
++CONFIG_SCSI_DC390T=m
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID5=m
++CONFIG_MD_RAID6=m
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_FAULTY=m
++CONFIG_BLK_DEV_DM=m
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=m
++CONFIG_FUSION_MAX_SGE=40
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
++
++#
++# IEEE 1394 (FireWire) support
++#
++CONFIG_IEEE1394=m
++
++#
++# Subsystem Options
++#
++# CONFIG_IEEE1394_VERBOSEDEBUG is not set
++CONFIG_IEEE1394_OUI_DB=y
++CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
++CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
++
++#
++# Device Drivers
++#
++CONFIG_IEEE1394_PCILYNX=m
++CONFIG_IEEE1394_OHCI1394=m
++
++#
++# Protocol Drivers
++#
++CONFIG_IEEE1394_VIDEO1394=m
++CONFIG_IEEE1394_SBP2=m
++# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
++CONFIG_IEEE1394_ETH1394=m
++CONFIG_IEEE1394_DV1394=m
++CONFIG_IEEE1394_RAWIO=m
++CONFIG_IEEE1394_CMP=m
++CONFIG_IEEE1394_AMDTP=m
++
++#
++# I2O device support
++#
++CONFIG_I2O=m
++CONFIG_I2O_CONFIG=m
++CONFIG_I2O_BLOCK=m
++CONFIG_I2O_SCSI=m
++CONFIG_I2O_PROC=m
++
++#
++# Networking support
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=y
++CONFIG_NET_KEY=m
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++CONFIG_IP_ROUTE_VERBOSE=y
++# CONFIG_IP_PNP is not set
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_TUNNEL=m
++CONFIG_IP_TCPDIAG=m
++CONFIG_IP_TCPDIAG_IPV6=y
++
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IPV6=m
++CONFIG_IPV6_PRIVACY=y
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_IPV6_TUNNEL=m
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++CONFIG_IP_NF_CONNTRACK_MARK=y
++CONFIG_IP_NF_CT_PROTO_SCTP=m
++CONFIG_IP_NF_FTP=m
++CONFIG_IP_NF_IRC=m
++CONFIG_IP_NF_TFTP=m
++CONFIG_IP_NF_AMANDA=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_LIMIT=m
++CONFIG_IP_NF_MATCH_IPRANGE=m
++CONFIG_IP_NF_MATCH_MAC=m
++CONFIG_IP_NF_MATCH_PKTTYPE=m
++CONFIG_IP_NF_MATCH_MARK=m
++CONFIG_IP_NF_MATCH_MULTIPORT=m
++CONFIG_IP_NF_MATCH_TOS=m
++CONFIG_IP_NF_MATCH_RECENT=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_DSCP=m
++CONFIG_IP_NF_MATCH_AH_ESP=m
++CONFIG_IP_NF_MATCH_LENGTH=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_MATCH_TCPMSS=m
++CONFIG_IP_NF_MATCH_HELPER=m
++CONFIG_IP_NF_MATCH_STATE=m
++CONFIG_IP_NF_MATCH_CONNTRACK=m
++CONFIG_IP_NF_MATCH_OWNER=m
++CONFIG_IP_NF_MATCH_PHYSDEV=m
++CONFIG_IP_NF_MATCH_ADDRTYPE=m
++CONFIG_IP_NF_MATCH_REALM=m
++CONFIG_IP_NF_MATCH_SCTP=m
++CONFIG_IP_NF_MATCH_COMMENT=m
++CONFIG_IP_NF_MATCH_CONNMARK=m
++CONFIG_IP_NF_MATCH_HASHLIMIT=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_TCPMSS=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_SAME=m
++CONFIG_IP_NF_NAT_SNMP_BASIC=m
++CONFIG_IP_NF_NAT_IRC=m
++CONFIG_IP_NF_NAT_FTP=m
++CONFIG_IP_NF_NAT_TFTP=m
++CONFIG_IP_NF_NAT_AMANDA=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_TOS=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_DSCP=m
++CONFIG_IP_NF_TARGET_MARK=m
++CONFIG_IP_NF_TARGET_CLASSIFY=m
++CONFIG_IP_NF_TARGET_CONNMARK=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_TARGET_NOTRACK=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++CONFIG_IP6_NF_QUEUE=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_LIMIT=m
++CONFIG_IP6_NF_MATCH_MAC=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_MULTIPORT=m
++CONFIG_IP6_NF_MATCH_OWNER=m
++CONFIG_IP6_NF_MATCH_MARK=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_AHESP=m
++CONFIG_IP6_NF_MATCH_LENGTH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_MATCH_PHYSDEV=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_TARGET_MARK=m
++CONFIG_IP6_NF_RAW=m
++
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++CONFIG_BRIDGE_EBT_ULOG=m
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=y
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_HMAC_NONE is not set
++# CONFIG_SCTP_HMAC_SHA1 is not set
++CONFIG_SCTP_HMAC_MD5=y
++CONFIG_ATM=m
++CONFIG_ATM_CLIP=m
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++CONFIG_ATM_LANE=m
++# CONFIG_ATM_MPOA is not set
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_BRIDGE=m
++CONFIG_VLAN_8021Q=m
++# CONFIG_DECNET is not set
++CONFIG_LLC=y
++# CONFIG_LLC2 is not set
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=y
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++CONFIG_NET_DIVERT=y
++# CONFIG_ECONET is not set
++CONFIG_WAN_ROUTER=m
++
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CLK_JIFFIES=y
++# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
++# CONFIG_NET_SCH_CLK_CPU is not set
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_INGRESS=m
++CONFIG_NET_QOS=y
++CONFIG_NET_ESTIMATOR=y
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++CONFIG_CLS_U32_PERF=y
++CONFIG_NET_CLS_IND=y
++CONFIG_CLS_U32_MARK=y
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_U32=m
++CONFIG_NET_EMATCH_META=m
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_CLS_POLICE=y
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_RX is not set
++CONFIG_NETPOLL_TRAP=y
++CONFIG_NET_POLL_CONTROLLER=y
++# CONFIG_HAMRADIO is not set
++CONFIG_IRDA=m
++
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++# CONFIG_IRDA_ULTRA is not set
++
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++# CONFIG_IRDA_DEBUG is not set
++
++#
++# Infrared-port device drivers
++#
++
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
++
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++
++#
++# Old SIR device drivers
++#
++CONFIG_IRPORT_SIR=m
++
++#
++# Old Serial dongle support
++#
++# CONFIG_DONGLE_OLD is not set
++
++#
++# FIR device drivers
++#
++CONFIG_USB_IRDA=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_WINBOND_FIR=m
++CONFIG_SMC_IRCC_FIR=m
++CONFIG_ALI_FIR=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_BT=m
++CONFIG_BT_L2CAP=m
++CONFIG_BT_SCO=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_HIDP=m
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIUSB=m
++CONFIG_BT_HCIUSB_SCO=y
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIUART_BCSP_TXCRC=y
++CONFIG_BT_HCIBCM203X=m
++CONFIG_BT_HCIBPA10X=m
++CONFIG_BT_HCIBFUSB=m
++CONFIG_BT_HCIVHCI=m
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=m
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_TULIP=m
++# CONFIG_TULIP_MWI is not set
++CONFIG_TULIP_MMIO=y
++# CONFIG_TULIP_NAPI is not set
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=m
++CONFIG_AMD8111_ETH=m
++CONFIG_AMD8111E_NAPI=y
++CONFIG_ADAPTEC_STARFIRE=m
++CONFIG_ADAPTEC_STARFIRE_NAPI=y
++CONFIG_B44=m
++CONFIG_FORCEDETH=m
++CONFIG_DGRS=m
++CONFIG_EEPRO100=m
++CONFIG_E100=m
++CONFIG_FEALNX=m
++CONFIG_NATSEMI=m
++CONFIG_NE2K_PCI=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++CONFIG_8139TOO_PIO=y
++# CONFIG_8139TOO_TUNE_TWISTER is not set
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_SIS900=m
++CONFIG_EPIC100=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++CONFIG_VIA_RHINE=m
++CONFIG_VIA_RHINE_MMIO=y
++
++#
++# Ethernet (1000 Mbit)
++#
++CONFIG_ACENIC=m
++# CONFIG_ACENIC_OMIT_TIGON_I is not set
++CONFIG_DL2K=m
++CONFIG_E1000=m
++CONFIG_E1000_NAPI=y
++CONFIG_NS83820=m
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++CONFIG_R8169=m
++CONFIG_R8169_NAPI=y
++CONFIG_R8169_VLAN=y
++CONFIG_SK98LIN=m
++CONFIG_VIA_VELOCITY=m
++CONFIG_TIGON3=m
++CONFIG_BNX2=m
++
++#
++# Ethernet (10000 Mbit)
++#
++CONFIG_IXGB=m
++CONFIG_IXGB_NAPI=y
++CONFIG_S2IO=m
++CONFIG_S2IO_NAPI=y
++# CONFIG_2BUFF_MODE is not set
++
++#
++# Token Ring devices
++#
++CONFIG_TR=y
++CONFIG_IBMOL=m
++CONFIG_3C359=m
++CONFIG_TMS380TR=m
++CONFIG_TMSPCI=m
++CONFIG_ABYSS=m
++
++#
++# Wireless LAN (non-hamradio)
++#
++CONFIG_NET_RADIO=y
++
++#
++# Obsolete Wireless cards support (pre-802.11)
++#
++# CONFIG_STRIP is not set
++
++#
++# Wireless 802.11b ISA/PCI cards support
++#
++CONFIG_HERMES=m
++CONFIG_PLX_HERMES=m
++CONFIG_TMD_HERMES=m
++CONFIG_PCI_HERMES=m
++CONFIG_ATMEL=m
++CONFIG_PCI_ATMEL=m
++
++#
++# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
++#
++CONFIG_PRISM54=m
++CONFIG_NET_WIRELESS=y
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++
++#
++# ATM drivers
++#
++CONFIG_ATM_TCP=m
++CONFIG_ATM_LANAI=m
++CONFIG_ATM_ENI=m
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++CONFIG_ATM_FIRESTREAM=m
++# CONFIG_ATM_ZATM is not set
++CONFIG_ATM_IDT77252=m
++# CONFIG_ATM_IDT77252_DEBUG is not set
++# CONFIG_ATM_IDT77252_RCV_ALL is not set
++CONFIG_ATM_IDT77252_USE_SUNI=y
++CONFIG_ATM_AMBASSADOR=m
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++CONFIG_ATM_HORIZON=m
++# CONFIG_ATM_HORIZON_DEBUG is not set
++CONFIG_ATM_FORE200E_MAYBE=m
++# CONFIG_ATM_FORE200E_PCA is not set
++CONFIG_ATM_HE=m
++# CONFIG_ATM_HE_USE_SUNI is not set
++CONFIG_FDDI=y
++# CONFIG_DEFXX is not set
++CONFIG_SKFP=m
++# CONFIG_HIPPI is not set
++CONFIG_PLIP=m
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++# CONFIG_PPP_BSDCOMP is not set
++CONFIG_PPPOE=m
++CONFIG_PPPOATM=m
++CONFIG_SLIP=m
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++# CONFIG_SLIP_MODE_SLIP6 is not set
++CONFIG_NET_FC=y
++# CONFIG_SHAPER is not set
++CONFIG_NETCONSOLE=m
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
++
++#
++# Old ISDN4Linux
++#
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++CONFIG_IPPP_FILTER=y
++# CONFIG_ISDN_PPP_BSDCOMP is not set
++CONFIG_ISDN_AUDIO=y
++CONFIG_ISDN_TTY_FAX=y
++
++#
++# ISDN feature submodules
++#
++CONFIG_ISDN_DRV_LOOP=m
++CONFIG_ISDN_DIVERSION=m
++
++#
++# ISDN4Linux hardware drivers
++#
++
++#
++# Passive cards
++#
++CONFIG_ISDN_DRV_HISAX=m
++
++#
++# D-channel protocol features
++#
++CONFIG_HISAX_EURO=y
++CONFIG_DE_AOC=y
++CONFIG_HISAX_NO_SENDCOMPLETE=y
++CONFIG_HISAX_NO_LLC=y
++CONFIG_HISAX_NO_KEYPAD=y
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
++
++#
++# HiSax supported cards
++#
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
++
++#
++# HiSax PCMCIA card service modules
++#
++
++#
++# HiSax sub driver modules
++#
++CONFIG_HISAX_ST5481=m
++CONFIG_HISAX_HFCUSB=m
++CONFIG_HISAX_HFC4S8S=m
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_HISAX_HDLC=y
++
++#
++# Active cards
++#
++CONFIG_HYSDN=m
++CONFIG_HYSDN_CAPI=y
++
++#
++# CAPI subsystem
++#
++CONFIG_ISDN_CAPI=m
++CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
++CONFIG_ISDN_CAPI_CAPIFS=m
++CONFIG_ISDN_CAPI_CAPIDRV=m
++
++#
++# CAPI hardware drivers
++#
++
++#
++# Active AVM cards
++#
++CONFIG_CAPI_AVM=y
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
++
++#
++# Active Eicon DIVA Server cards
++#
++# CONFIG_CAPI_EICON is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++# CONFIG_INPUT_TSDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++# CONFIG_KEYBOARD_SUNKBD is not set
++# CONFIG_KEYBOARD_LKKBD is not set
++# CONFIG_KEYBOARD_XTKBD is not set
++# CONFIG_KEYBOARD_NEWTON is not set
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_SERIAL=m
++CONFIG_MOUSE_VSXXXAA=m
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_GUNZE=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_UINPUT=m
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=y
++# CONFIG_SERIO_CT82C710 is not set
++# CONFIG_SERIO_PARKBD is not set
++# CONFIG_SERIO_PCIPS2 is not set
++CONFIG_SERIO_LIBPS2=y
++# CONFIG_SERIO_RAW is not set
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_VORTEX=m
++CONFIG_GAMEPORT_FM801=m
++CONFIG_GAMEPORT_CS461X=m
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++# CONFIG_SERIAL_8250 is not set
++
++#
++# Non-8250 serial port support
++#
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++# CONFIG_LEGACY_PTYS is not set
++CONFIG_PRINTER=m
++CONFIG_LP_CONSOLE=y
++CONFIG_PPDEV=m
++CONFIG_TIPAR=m
++
++#
++# IPMI
++#
++CONFIG_IPMI_HANDLER=m
++# CONFIG_IPMI_PANIC_EVENT is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_POWEROFF=m
++
++#
++# Watchdog Cards
++#
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_ACQUIRE_WDT=m
++CONFIG_ADVANTECH_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_ALIM7101_WDT=m
++CONFIG_SC520_WDT=m
++CONFIG_EUROTECH_WDT=m
++CONFIG_IB700_WDT=m
++CONFIG_WAFER_WDT=m
++CONFIG_I8XX_TCO=m
++CONFIG_SC1200_WDT=m
++# CONFIG_60XX_WDT is not set
++CONFIG_CPU5_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_W83877F_WDT=m
++CONFIG_MACHZ_WDT=m
++
++#
++# PCI-based Watchdog Cards
++#
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_WDTPCI=m
++CONFIG_WDT_501_PCI=y
++
++#
++# USB-based Watchdog Cards
++#
++CONFIG_USBPCWATCHDOG=m
++CONFIG_HW_RANDOM=m
++# CONFIG_NVRAM is not set
++CONFIG_RTC=y
++CONFIG_DTLK=m
++CONFIG_R3964=m
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_FTAPE is not set
++# CONFIG_AGP is not set
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++# CONFIG_DRM_GAMMA is not set
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++# CONFIG_MWAVE is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++CONFIG_HANGCHECK_TIMER=m
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=m
++CONFIG_I2C_CHARDEV=m
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=m
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++CONFIG_I2C_AMD756=m
++CONFIG_I2C_AMD756_S4882=m
++CONFIG_I2C_AMD8111=m
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++CONFIG_I2C_ISA=m
++CONFIG_I2C_NFORCE2=m
++# CONFIG_I2C_PARPORT is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++CONFIG_I2C_PROSAVAGE=m
++CONFIG_I2C_SAVAGE4=m
++# CONFIG_SCx200_ACB is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++CONFIG_I2C_SIS96X=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_VIA=m
++CONFIG_I2C_VIAPRO=m
++CONFIG_I2C_VOODOO3=m
++CONFIG_I2C_PCA_ISA=m
++
++#
++# Hardware Sensors Chip support
++#
++CONFIG_I2C_SENSOR=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ASB100=m
++CONFIG_SENSORS_DS1621=m
++CONFIG_SENSORS_FSCHER=m
++CONFIG_SENSORS_FSCPOS=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_PC87360=m
++CONFIG_SENSORS_SMSC47B397=m
++CONFIG_SENSORS_SIS5595=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83627HF=m
++
++#
++# Other I2C Chip support
++#
++CONFIG_SENSORS_DS1337=m
++CONFIG_SENSORS_EEPROM=m
++CONFIG_SENSORS_PCF8574=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_SENSORS_RTC8564=m
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# Dallas's 1-wire bus
++#
++CONFIG_W1=m
++CONFIG_W1_MATROX=m
++CONFIG_W1_DS9490=m
++CONFIG_W1_DS9490_BRIDGE=m
++CONFIG_W1_THERM=m
++CONFIG_W1_SMEM=m
++
++#
++# Misc devices
++#
++# CONFIG_IBM_ASM is not set
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=m
++
++#
++# Video For Linux
++#
++
++#
++# Video Adapters
++#
++CONFIG_VIDEO_BT848=m
++CONFIG_VIDEO_BWQCAM=m
++CONFIG_VIDEO_CQCAM=m
++CONFIG_VIDEO_W9966=m
++CONFIG_VIDEO_CPIA=m
++CONFIG_VIDEO_CPIA_PP=m
++CONFIG_VIDEO_CPIA_USB=m
++CONFIG_VIDEO_SAA5246A=m
++CONFIG_VIDEO_SAA5249=m
++CONFIG_TUNER_3036=m
++CONFIG_VIDEO_STRADIS=m
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++# CONFIG_VIDEO_ZR36120 is not set
++CONFIG_VIDEO_SAA7134=m
++CONFIG_VIDEO_SAA7134_DVB=m
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_DPC=m
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_CX88=m
++CONFIG_VIDEO_CX88_DVB=m
++CONFIG_VIDEO_OVCAMCHIP=m
++
++#
++# Radio Adapters
++#
++CONFIG_RADIO_GEMTEK_PCI=m
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_MAESTRO=m
++
++#
++# Digital Video Broadcasting Devices
++#
++CONFIG_DVB=y
++CONFIG_DVB_CORE=m
++
++#
++# Supported SAA7146 based PCI Adapters
++#
++CONFIG_DVB_AV7110=m
++CONFIG_DVB_AV7110_OSD=y
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
++
++#
++# Supported USB Adapters
++#
++CONFIG_DVB_TTUSB_BUDGET=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_DVB_DIBUSB=m
++CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES=y
++# CONFIG_DVB_DIBCOM_DEBUG is not set
++CONFIG_DVB_CINERGYT2=m
++CONFIG_DVB_CINERGYT2_TUNING=y
++CONFIG_DVB_CINERGYT2_STREAM_URB_COUNT=32
++CONFIG_DVB_CINERGYT2_STREAM_BUF_SIZE=512
++CONFIG_DVB_CINERGYT2_QUERY_INTERVAL=250
++CONFIG_DVB_CINERGYT2_ENABLE_RC_INPUT_DEVICE=y
++CONFIG_DVB_CINERGYT2_RC_QUERY_INTERVAL=100
++
++#
++# Supported FlexCopII (B2C2) Adapters
++#
++CONFIG_DVB_B2C2_FLEXCOP=m
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
++CONFIG_DVB_B2C2_SKYSTAR=m
++
++#
++# Supported BT878 Adapters
++#
++CONFIG_DVB_BT8XX=m
++
++#
++# Supported DVB Frontends
++#
++
++#
++# Customise DVB Frontends
++#
++
++#
++# DVB-S (satellite) frontends
++#
++CONFIG_DVB_STV0299=m
++CONFIG_DVB_CX24110=m
++CONFIG_DVB_TDA8083=m
++CONFIG_DVB_TDA80XX=m
++CONFIG_DVB_MT312=m
++CONFIG_DVB_VES1X93=m
++
++#
++# DVB-T (terrestrial) frontends
++#
++CONFIG_DVB_SP8870=m
++CONFIG_DVB_SP887X=m
++CONFIG_DVB_CX22700=m
++CONFIG_DVB_CX22702=m
++CONFIG_DVB_L64781=m
++CONFIG_DVB_TDA1004X=m
++CONFIG_DVB_NXT6000=m
++CONFIG_DVB_MT352=m
++CONFIG_DVB_DIB3000MB=m
++CONFIG_DVB_DIB3000MC=m
++
++#
++# DVB-C (cable) frontends
++#
++CONFIG_DVB_ATMEL_AT76C651=m
++CONFIG_DVB_VES1820=m
++CONFIG_DVB_TDA10021=m
++CONFIG_DVB_STV0297=m
++
++#
++# ATSC (North American/Korean Terresterial DTV) frontends
++#
++CONFIG_DVB_NXT2002=m
++CONFIG_DVB_OR51211=m
++CONFIG_DVB_OR51132=m
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_VIDEO_VIDEOBUF=m
++CONFIG_VIDEO_TUNER=m
++CONFIG_VIDEO_BUF=m
++CONFIG_VIDEO_BUF_DVB=m
++CONFIG_VIDEO_BTCX=m
++CONFIG_VIDEO_IR=m
++CONFIG_VIDEO_TVEEPROM=m
++
++#
++# Graphics support
++#
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++CONFIG_FB_SOFT_CURSOR=y
++# CONFIG_FB_MACMODES is not set
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++CONFIG_FB_CIRRUS=m
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++CONFIG_FB_VGA16=m
++CONFIG_FB_VESA=y
++CONFIG_VIDEO_SELECT=y
++# CONFIG_FB_HGA is not set
++# CONFIG_FB_NVIDIA is not set
++CONFIG_FB_RIVA=m
++# CONFIG_FB_RIVA_I2C is not set
++# CONFIG_FB_RIVA_DEBUG is not set
++CONFIG_FB_MATROX=m
++CONFIG_FB_MATROX_MILLENIUM=y
++CONFIG_FB_MATROX_MYSTIQUE=y
++CONFIG_FB_MATROX_G=y
++CONFIG_FB_MATROX_I2C=m
++CONFIG_FB_MATROX_MAVEN=m
++CONFIG_FB_MATROX_MULTIHEAD=y
++# CONFIG_FB_RADEON_OLD is not set
++CONFIG_FB_RADEON=m
++CONFIG_FB_RADEON_I2C=y
++# CONFIG_FB_RADEON_DEBUG is not set
++CONFIG_FB_ATY128=m
++CONFIG_FB_ATY=m
++CONFIG_FB_ATY_CT=y
++CONFIG_FB_ATY_GENERIC_LCD=y
++# CONFIG_FB_ATY_XL_INIT is not set
++CONFIG_FB_ATY_GX=y
++CONFIG_FB_SAVAGE=m
++CONFIG_FB_SAVAGE_I2C=y
++CONFIG_FB_SAVAGE_ACCEL=y
++# CONFIG_FB_SIS is not set
++CONFIG_FB_NEOMAGIC=m
++CONFIG_FB_KYRO=m
++CONFIG_FB_3DFX=m
++CONFIG_FB_3DFX_ACCEL=y
++CONFIG_FB_VOODOO1=m
++CONFIG_FB_TRIDENT=m
++CONFIG_FB_TRIDENT_ACCEL=y
++# CONFIG_FB_PM3 is not set
++# CONFIG_FB_GEODE is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_VIRTUAL is not set
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++CONFIG_BACKLIGHT_LCD_SUPPORT=y
++CONFIG_BACKLIGHT_CLASS_DEVICE=m
++CONFIG_BACKLIGHT_DEVICE=y
++CONFIG_LCD_CLASS_DEVICE=m
++CONFIG_LCD_DEVICE=y
++
++#
++# Sound
++#
++CONFIG_SOUND=m
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_RAWMIDI=m
++CONFIG_SND_SEQUENCER=m
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=m
++CONFIG_SND_PCM_OSS=m
++CONFIG_SND_SEQUENCER_OSS=y
++CONFIG_SND_RTCTIMER=m
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=m
++CONFIG_SND_OPL3_LIB=m
++CONFIG_SND_VX_LIB=m
++CONFIG_SND_DUMMY=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++# CONFIG_SND_SERIAL_U16550 is not set
++CONFIG_SND_MPU401=m
++
++#
++# PCI devices
++#
++CONFIG_SND_AC97_CODEC=m
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_CS4281=m
++CONFIG_SND_EMU10K1=m
++CONFIG_SND_EMU10K1X=m
++CONFIG_SND_CA0106=m
++CONFIG_SND_KORG1212=m
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_RME32=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_HDSP=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_YMFPCI=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_FM801=m
++CONFIG_SND_FM801_TEA575X=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=m
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_VIA82XX=m
++CONFIG_SND_VIA82XX_MODEM=m
++CONFIG_SND_VX222=m
++CONFIG_SND_HDA_INTEL=m
++
++#
++# USB devices
++#
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_USB_USX2Y=m
++
++#
++# Open Sound System
++#
++# CONFIG_SOUND_PRIME is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++# CONFIG_USB_BANDWIDTH is not set
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=m
++CONFIG_USB_EHCI_SPLIT_ISO=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_OHCI_HCD=m
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=m
++CONFIG_USB_SL811_HCD=m
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_AUDIO is not set
++
++#
++# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
++#
++CONFIG_USB_MIDI=m
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=m
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_DATAFAB=y
++CONFIG_USB_STORAGE_FREECOM=y
++CONFIG_USB_STORAGE_ISD200=y
++CONFIG_USB_STORAGE_DPCM=y
++CONFIG_USB_STORAGE_USBAT=y
++CONFIG_USB_STORAGE_SDDR09=y
++CONFIG_USB_STORAGE_SDDR55=y
++CONFIG_USB_STORAGE_JUMPSHOT=y
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++CONFIG_HID_FF=y
++CONFIG_HID_PID=y
++CONFIG_LOGITECH_FF=y
++CONFIG_THRUSTMASTER_FF=y
++CONFIG_USB_HIDDEV=y
++CONFIG_USB_AIPTEK=m
++CONFIG_USB_WACOM=m
++CONFIG_USB_KBTAB=m
++CONFIG_USB_POWERMATE=m
++CONFIG_USB_MTOUCH=m
++CONFIG_USB_EGALAX=m
++CONFIG_USB_XPAD=m
++CONFIG_USB_ATI_REMOTE=m
++
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
++
++#
++# USB Multimedia devices
++#
++CONFIG_USB_DABUSB=m
++CONFIG_USB_VICAM=m
++CONFIG_USB_DSBR=m
++CONFIG_USB_IBMCAM=m
++CONFIG_USB_KONICAWC=m
++CONFIG_USB_OV511=m
++CONFIG_USB_SE401=m
++CONFIG_USB_SN9C102=m
++CONFIG_USB_STV680=m
++CONFIG_USB_W9968CF=m
++CONFIG_USB_PWC=m
++
++#
++# USB Network Adapters
++#
++CONFIG_USB_CATC=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_USBNET=m
++
++#
++# USB Host-to-Host Cables
++#
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++CONFIG_USB_GENESYS=y
++CONFIG_USB_NET1080=y
++CONFIG_USB_PL2301=y
++CONFIG_USB_KC2190=y
++
++#
++# Intelligent USB Devices/Gadgets
++#
++CONFIG_USB_ARMLINUX=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_ZAURUS=y
++CONFIG_USB_CDCETHER=y
++
++#
++# USB Network Adapters
++#
++CONFIG_USB_AX8817X=y
++CONFIG_USB_ZD1201=m
++CONFIG_USB_MON=m
++
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
++
++#
++# USB Serial Converter support
++#
++CONFIG_USB_SERIAL=m
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_AIRPRIME=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP2101=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_EMPEG=m
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++CONFIG_USB_SERIAL_GARMIN=m
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++CONFIG_USB_SERIAL_KEYSPAN_MPR=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
++CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19=y
++CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
++CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
++CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++CONFIG_USB_SERIAL_PL2303=m
++CONFIG_USB_SERIAL_HP4X=m
++CONFIG_USB_SERIAL_SAFE=m
++CONFIG_USB_SERIAL_SAFE_PADDED=y
++CONFIG_USB_SERIAL_TI=m
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_XIRCOM=m
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_EZUSB=y
++
++#
++# USB Miscellaneous drivers
++#
++CONFIG_USB_EMI62=m
++# CONFIG_USB_EMI26 is not set
++CONFIG_USB_AUERSWALD=m
++CONFIG_USB_RIO500=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LED=m
++# CONFIG_USB_CYTHERM is not set
++CONFIG_USB_PHIDGETKIT=m
++CONFIG_USB_PHIDGETSERVO=m
++CONFIG_USB_IDMOUSE=m
++CONFIG_USB_SISUSBVGA=m
++CONFIG_USB_TEST=m
++
++#
++# USB ATM/DSL drivers
++#
++CONFIG_USB_ATM=m
++CONFIG_USB_SPEEDTOUCH=m
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++CONFIG_MMC=m
++# CONFIG_MMC_DEBUG is not set
++CONFIG_MMC_BLOCK=m
++CONFIG_MMC_WBSD=m
++
++#
++# InfiniBand support
++#
++CONFIG_INFINIBAND=m
++CONFIG_INFINIBAND_MTHCA=m
++# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
++CONFIG_INFINIBAND_IPOIB=m
++# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
++
++#
++# Power management options
++#
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BOOT=y
++CONFIG_ACPI_INTERPRETER=y
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_PROCESSOR=m
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_BUS=y
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_PCI=y
++CONFIG_ACPI_SYSTEM=y
++# CONFIG_ACPI_CONTAINER is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT3_FS=m
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=m
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++CONFIG_REISERFS_PROC_INFO=y
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++CONFIG_JFS_SECURITY=y
++# CONFIG_JFS_DEBUG is not set
++# CONFIG_JFS_STATISTICS is not set
++CONFIG_FS_POSIX_ACL=y
++
++#
++# XFS support
++#
++CONFIG_XFS_FS=m
++CONFIG_XFS_EXPORT=y
++# CONFIG_XFS_RT is not set
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_SECURITY=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++CONFIG_QUOTA=y
++# CONFIG_QFMT_V1 is not set
++CONFIG_QFMT_V2=y
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=m
++CONFIG_AUTOFS4_FS=m
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++# CONFIG_DEVFS_FS is not set
++CONFIG_DEVPTS_FS_XATTR=y
++CONFIG_DEVPTS_FS_SECURITY=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_TMPFS_SECURITY=y
++# CONFIG_HUGETLBFS is not set
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++CONFIG_AFFS_FS=m
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++# CONFIG_JFFS_FS is not set
++CONFIG_JFFS2_FS=m
++CONFIG_JFFS2_FS_DEBUG=0
++CONFIG_JFFS2_FS_NAND=y
++# CONFIG_JFFS2_FS_NOR_ECC is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++CONFIG_VXFS_FS=m
++# CONFIG_HPFS_FS is not set
++CONFIG_QNX4FS_FS=m
++# CONFIG_QNX4FS_RW is not set
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=m
++CONFIG_NFS_V3=y
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_RPCSEC_GSS_SPKM3=m
++CONFIG_SMB_FS=m
++# CONFIG_SMB_NLS_DEFAULT is not set
++CONFIG_CIFS=m
++# CONFIG_CIFS_STATS is not set
++CONFIG_CIFS_XATTR=y
++CONFIG_CIFS_POSIX=y
++# CONFIG_CIFS_EXPERIMENTAL is not set
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++CONFIG_NCPFS_SMALLDOS=y
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_OSF_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++# CONFIG_ATARI_PARTITION is not set
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++# CONFIG_LDM_PARTITION is not set
++CONFIG_SGI_PARTITION=y
++# CONFIG_ULTRIX_PARTITION is not set
++CONFIG_SUN_PARTITION=y
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_UTF8=m
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++CONFIG_KEYS_DEBUG_PROC_KEYS=y
++CONFIG_SECURITY=y
++CONFIG_SECURITY_NETWORK=y
++CONFIG_SECURITY_CAPABILITIES=y
++# CONFIG_SECURITY_ROOTPLUG is not set
++# CONFIG_SECURITY_SECLVL is not set
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
++CONFIG_SECURITY_SELINUX_DISABLE=y
++CONFIG_SECURITY_SELINUX_DEVELOP=y
++CONFIG_SECURITY_SELINUX_AVC_STATS=y
++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=m
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA256=m
++CONFIG_CRYPTO_SHA512=m
++CONFIG_CRYPTO_WP512=m
++CONFIG_CRYPTO_TGR192=m
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_AES=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=m
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=m
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_LOG_BUF_SHIFT=15
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_KPROBES is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_X86_FIND_SMP_CONFIG=y
++CONFIG_X86_MPPARSE=y
++# CONFIG_INIT_DEBUG is not set
+diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_ia64 linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_ia64
+--- pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_ia64	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_ia64	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,1261 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.12
++# Thu Sep 15 11:04:33 2005
++#
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++# CONFIG_CLEAN_COMPILE is not set
++CONFIG_BROKEN=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_HOTPLUG=y
++CONFIG_KOBJECT_UEVENT=y
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_CC_ALIGN_FUNCTIONS=0
++CONFIG_CC_ALIGN_LABELS=0
++CONFIG_CC_ALIGN_LOOPS=0
++CONFIG_CC_ALIGN_JUMPS=0
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++# CONFIG_MODULE_UNLOAD is not set
++CONFIG_OBSOLETE_MODPARM=y
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++# CONFIG_KMOD is not set
++
++#
++# Processor type and features
++#
++CONFIG_IA64=y
++CONFIG_64BIT=y
++CONFIG_MMU=y
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_TIME_INTERPOLATION=y
++CONFIG_EFI=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_XEN=y
++CONFIG_XEN_VT=n
++CONFIG_ARCH_XEN=y
++CONFIG_XEN_PRIVILEGED_GUEST=y
++CONFIG_XEN_PHYSDEV_ACCESS=y
++CONFIG_XEN_BLKDEV_GRANT=y
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_VGA_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=n
++CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
++# CONFIG_IA64_GENERIC is not set
++# CONFIG_IA64_DIG is not set
++CONFIG_IA64_HP_ZX1=y
++# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
++# CONFIG_IA64_SGI_SN2 is not set
++# CONFIG_IA64_HP_SIM is not set
++# CONFIG_ITANIUM is not set
++CONFIG_MCKINLEY=y
++# CONFIG_IA64_PAGE_SIZE_4KB is not set
++# CONFIG_IA64_PAGE_SIZE_8KB is not set
++CONFIG_IA64_PAGE_SIZE_16KB=y
++# CONFIG_IA64_PAGE_SIZE_64KB is not set
++CONFIG_IA64_L1_CACHE_SHIFT=7
++# CONFIG_NUMA is not set
++CONFIG_VIRTUAL_MEM_MAP=y
++CONFIG_HOLES_IN_ZONE=y
++# CONFIG_IA64_CYCLONE is not set
++CONFIG_IOSAPIC=y
++CONFIG_FORCE_MAX_ZONEORDER=18
++CONFIG_SMP=y
++CONFIG_NR_CPUS=16
++# CONFIG_HOTPLUG_CPU is not set
++# CONFIG_SCHED_SMT is not set
++# CONFIG_PREEMPT is not set
++CONFIG_HAVE_DEC_LOCK=y
++# CONFIG_IA32_SUPPORT is not set
++CONFIG_IA64_MCA_RECOVERY=y
++CONFIG_PERFMON=y
++CONFIG_IA64_PALINFO=y
++CONFIG_ACPI_DEALLOCATE_IRQ=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EFI_VARS=y
++CONFIG_EFI_PCDP=y
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Power management and ACPI
++#
++CONFIG_PM=y
++CONFIG_ACPI=y
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI_BOOT=y
++CONFIG_ACPI_INTERPRETER=y
++CONFIG_ACPI_BUTTON=y
++# CONFIG_ACPI_VIDEO is not set
++CONFIG_ACPI_FAN=y
++CONFIG_ACPI_PROCESSOR=y
++CONFIG_ACPI_THERMAL=y
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_BUS=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_PCI=y
++CONFIG_ACPI_SYSTEM=y
++# CONFIG_ACPI_CONTAINER is not set
++
++#
++# Bus options (PCI, PCMCIA)
++#
++CONFIG_PCI=y
++CONFIG_PCI_DOMAINS=y
++# CONFIG_PCI_MSI is not set
++CONFIG_PCI_LEGACY_PROC=y
++CONFIG_PCI_NAMES=y
++# CONFIG_PCI_DEBUG is not set
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=y
++# CONFIG_HOTPLUG_PCI_FAKE is not set
++CONFIG_HOTPLUG_PCI_ACPI=y
++# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
++# CONFIG_HOTPLUG_PCI_CPCI is not set
++# CONFIG_HOTPLUG_PCI_SHPC is not set
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++# CONFIG_PCCARD is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++# CONFIG_MTD is not set
++
++#
++# Parallel port support
++#
++# CONFIG_PARPORT is not set
++
++#
++# Plug and Play support
++#
++# CONFIG_PNP is not set
++
++#
++# Block devices
++#
++# CONFIG_BLK_CPQ_DA is not set
++# CONFIG_BLK_CPQ_CISS_DA is not set
++# CONFIG_BLK_DEV_DAC960 is not set
++# CONFIG_BLK_DEV_UMEM is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=y
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++# CONFIG_BLK_DEV_NBD is not set
++# CONFIG_BLK_DEV_SX8 is not set
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_CDROM_PKTCDVD is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++# CONFIG_IDE is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=y
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=y
++CONFIG_CHR_DEV_ST=y
++CONFIG_CHR_DEV_OSST=y
++CONFIG_BLK_DEV_SR=y
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=y
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=y
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
++# CONFIG_SCSI_3W_9XXX is not set
++# CONFIG_SCSI_ACARD is not set
++# CONFIG_SCSI_AACRAID is not set
++# CONFIG_SCSI_AIC7XXX is not set
++# CONFIG_SCSI_AIC7XXX_OLD is not set
++# CONFIG_SCSI_AIC79XX is not set
++# CONFIG_SCSI_ADVANSYS is not set
++# CONFIG_MEGARAID_NEWGEN is not set
++# CONFIG_MEGARAID_LEGACY is not set
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_CPQFCTS is not set
++# CONFIG_SCSI_DMX3191D is not set
++# CONFIG_SCSI_EATA_PIO is not set
++# CONFIG_SCSI_FUTURE_DOMAIN is not set
++# CONFIG_SCSI_IPS is not set
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_SYM53C8XX_2=y
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
++# CONFIG_SCSI_IPR is not set
++# CONFIG_SCSI_PCI2000 is not set
++# CONFIG_SCSI_PCI2220I is not set
++# CONFIG_SCSI_QLOGIC_ISP is not set
++# CONFIG_SCSI_QLOGIC_FC is not set
++CONFIG_SCSI_QLOGIC_1280=y
++# CONFIG_SCSI_QLOGIC_1280_1040 is not set
++CONFIG_SCSI_QLA2XXX=y
++# CONFIG_SCSI_QLA21XX is not set
++# CONFIG_SCSI_QLA22XX is not set
++# CONFIG_SCSI_QLA2300 is not set
++# CONFIG_SCSI_QLA2322 is not set
++# CONFIG_SCSI_QLA6312 is not set
++# CONFIG_SCSI_LPFC is not set
++# CONFIG_SCSI_DC395x is not set
++# CONFIG_SCSI_DC390T is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++# CONFIG_MD is not set
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=y
++CONFIG_FUSION_MAX_SGE=40
++# CONFIG_FUSION_CTL is not set
++
++#
++# IEEE 1394 (FireWire) support
++#
++# CONFIG_IEEE1394 is not set
++
++#
++# I2O device support
++#
++# CONFIG_I2O is not set
++
++#
++# Networking support
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++# CONFIG_IP_ADVANCED_ROUTER is not set
++# CONFIG_IP_PNP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_IP_MROUTE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_TUNNEL is not set
++# CONFIG_IP_TCPDIAG is not set
++# CONFIG_IP_TCPDIAG_IPV6 is not set
++
++#
++# IP: Virtual Server Configuration
++#
++# CONFIG_IP_VS is not set
++# CONFIG_IPV6 is not set
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++
++#
++# IP: Netfilter Configuration
++#
++# CONFIG_IP_NF_CONNTRACK is not set
++# CONFIG_IP_NF_CONNTRACK_MARK is not set
++# CONFIG_IP_NF_QUEUE is not set
++# CONFIG_IP_NF_IPTABLES is not set
++CONFIG_IP_NF_ARPTABLES=y
++# CONFIG_IP_NF_ARPFILTER is not set
++# CONFIG_IP_NF_ARP_MANGLE is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_NET_DIVERT is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++# CONFIG_NET_CLS_ROUTE is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=y
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++
++#
++# ARCnet devices
++#
++# CONFIG_ARCNET is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=y
++# CONFIG_HAPPYMEAL is not set
++# CONFIG_SUNGEM is not set
++# CONFIG_NET_VENDOR_3COM is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++# CONFIG_DE2104X is not set
++CONFIG_TULIP=y
++CONFIG_TULIP_MWI=y
++CONFIG_TULIP_MMIO=y
++CONFIG_TULIP_NAPI=y
++CONFIG_TULIP_NAPI_HW_MITIGATION=y
++# CONFIG_DE4X5 is not set
++# CONFIG_WINBOND_840 is not set
++# CONFIG_DM9102 is not set
++# CONFIG_HP100 is not set
++CONFIG_NET_PCI=y
++# CONFIG_PCNET32 is not set
++# CONFIG_AMD8111_ETH is not set
++# CONFIG_ADAPTEC_STARFIRE is not set
++# CONFIG_B44 is not set
++# CONFIG_FORCEDETH is not set
++# CONFIG_DGRS is not set
++# CONFIG_EEPRO100 is not set
++CONFIG_E100=y
++# CONFIG_FEALNX is not set
++# CONFIG_NATSEMI is not set
++# CONFIG_NE2K_PCI is not set
++# CONFIG_8139CP is not set
++# CONFIG_8139TOO is not set
++# CONFIG_SIS900 is not set
++# CONFIG_EPIC100 is not set
++# CONFIG_SUNDANCE is not set
++# CONFIG_VIA_RHINE is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++# CONFIG_DL2K is not set
++CONFIG_E1000=y
++# CONFIG_E1000_NAPI is not set
++# CONFIG_NS83820 is not set
++# CONFIG_HAMACHI is not set
++# CONFIG_YELLOWFIN is not set
++# CONFIG_R8169 is not set
++# CONFIG_SK98LIN is not set
++# CONFIG_VIA_VELOCITY is not set
++CONFIG_TIGON3=y
++# CONFIG_BNX2 is not set
++
++#
++# Ethernet (10000 Mbit)
++#
++# CONFIG_IXGB is not set
++# CONFIG_S2IO is not set
++
++#
++# Token Ring devices
++#
++# CONFIG_TR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_FDDI is not set
++# CONFIG_HIPPI is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_NET_FC is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++
++#
++# ISDN subsystem
++#
++# CONFIG_ISDN is not set
++
++#
++# Telephony Support
++#
++# CONFIG_PHONE is not set
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=y
++# CONFIG_INPUT_TSDEV is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_INPUT_EVBUG is not set
++
++#
++# Input Device Drivers
++#
++# CONFIG_INPUT_KEYBOARD is not set
++# CONFIG_INPUT_MOUSE is not set
++# CONFIG_INPUT_JOYSTICK is not set
++# CONFIG_INPUT_TOUCHSCREEN is not set
++# CONFIG_INPUT_MISC is not set
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++# CONFIG_SERIO_I8042 is not set
++# CONFIG_SERIO_SERPORT is not set
++# CONFIG_SERIO_PCIPS2 is not set
++# CONFIG_SERIO_RAW is not set
++# CONFIG_GAMEPORT is not set
++
++#
++# Character devices
++#
++# CONFIG_VT is not set
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_ACPI=y
++CONFIG_SERIAL_8250_NR_UARTS=8
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++# CONFIG_SERIAL_8250_DETECT_IRQ is not set
++# CONFIG_SERIAL_8250_MULTIPORT is not set
++# CONFIG_SERIAL_8250_RSA is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=y
++CONFIG_SERIAL_CORE_CONSOLE=y
++# CONFIG_SERIAL_JSM is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++
++#
++# IPMI
++#
++# CONFIG_IPMI_HANDLER is not set
++
++#
++# Watchdog Cards
++#
++# CONFIG_WATCHDOG is not set
++# CONFIG_HW_RANDOM is not set
++CONFIG_EFI_RTC=y
++# CONFIG_DTLK is not set
++# CONFIG_R3964 is not set
++# CONFIG_APPLICOM is not set
++
++#
++# Ftape, the floppy tape device driver
++#
++CONFIG_AGP=y
++CONFIG_AGP_HP_ZX1=y
++CONFIG_DRM=y
++# CONFIG_DRM_TDFX is not set
++# CONFIG_DRM_GAMMA is not set
++# CONFIG_DRM_R128 is not set
++CONFIG_DRM_RADEON=y
++# CONFIG_DRM_MGA is not set
++# CONFIG_DRM_SIS is not set
++# CONFIG_RAW_DRIVER is not set
++# CONFIG_HPET is not set
++# CONFIG_HANGCHECK_TIMER is not set
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=y
++CONFIG_I2C_CHARDEV=y
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=y
++CONFIG_I2C_ALGOPCF=y
++# CONFIG_I2C_ALGOPCA is not set
++
++#
++# I2C Hardware Bus support
++#
++# CONFIG_I2C_ALI1535 is not set
++# CONFIG_I2C_ALI1563 is not set
++# CONFIG_I2C_ALI15X3 is not set
++# CONFIG_I2C_AMD756 is not set
++# CONFIG_I2C_AMD8111 is not set
++# CONFIG_I2C_I801 is not set
++# CONFIG_I2C_I810 is not set
++# CONFIG_I2C_PIIX4 is not set
++# CONFIG_I2C_ISA is not set
++# CONFIG_I2C_NFORCE2 is not set
++# CONFIG_I2C_PARPORT_LIGHT is not set
++# CONFIG_I2C_PROSAVAGE is not set
++# CONFIG_I2C_SAVAGE4 is not set
++# CONFIG_SCx200_ACB is not set
++# CONFIG_I2C_SIS5595 is not set
++# CONFIG_I2C_SIS630 is not set
++# CONFIG_I2C_SIS96X is not set
++# CONFIG_I2C_STUB is not set
++# CONFIG_I2C_VIA is not set
++# CONFIG_I2C_VIAPRO is not set
++# CONFIG_I2C_VOODOO3 is not set
++# CONFIG_I2C_PCA_ISA is not set
++
++#
++# Hardware Sensors Chip support
++#
++# CONFIG_I2C_SENSOR is not set
++# CONFIG_SENSORS_ADM1021 is not set
++# CONFIG_SENSORS_ADM1025 is not set
++# CONFIG_SENSORS_ADM1026 is not set
++# CONFIG_SENSORS_ADM1031 is not set
++# CONFIG_SENSORS_ASB100 is not set
++# CONFIG_SENSORS_DS1621 is not set
++# CONFIG_SENSORS_FSCHER is not set
++# CONFIG_SENSORS_FSCPOS is not set
++# CONFIG_SENSORS_GL518SM is not set
++# CONFIG_SENSORS_GL520SM is not set
++# CONFIG_SENSORS_IT87 is not set
++# CONFIG_SENSORS_LM63 is not set
++# CONFIG_SENSORS_LM75 is not set
++# CONFIG_SENSORS_LM77 is not set
++# CONFIG_SENSORS_LM78 is not set
++# CONFIG_SENSORS_LM80 is not set
++# CONFIG_SENSORS_LM83 is not set
++# CONFIG_SENSORS_LM85 is not set
++# CONFIG_SENSORS_LM87 is not set
++# CONFIG_SENSORS_LM90 is not set
++# CONFIG_SENSORS_LM92 is not set
++# CONFIG_SENSORS_MAX1619 is not set
++# CONFIG_SENSORS_PC87360 is not set
++# CONFIG_SENSORS_SMSC47B397 is not set
++# CONFIG_SENSORS_SIS5595 is not set
++# CONFIG_SENSORS_SMSC47M1 is not set
++# CONFIG_SENSORS_VIA686A is not set
++# CONFIG_SENSORS_W83781D is not set
++# CONFIG_SENSORS_W83L785TS is not set
++# CONFIG_SENSORS_W83627HF is not set
++
++#
++# Other I2C Chip support
++#
++# CONFIG_SENSORS_DS1337 is not set
++# CONFIG_SENSORS_EEPROM is not set
++# CONFIG_SENSORS_PCF8574 is not set
++# CONFIG_SENSORS_PCF8591 is not set
++# CONFIG_SENSORS_RTC8564 is not set
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# Dallas's 1-wire bus
++#
++# CONFIG_W1 is not set
++
++#
++# Misc devices
++#
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=y
++
++#
++# Video For Linux
++#
++
++#
++# Video Adapters
++#
++# CONFIG_VIDEO_BT848 is not set
++# CONFIG_VIDEO_CPIA is not set
++# CONFIG_VIDEO_SAA5246A is not set
++# CONFIG_VIDEO_SAA5249 is not set
++# CONFIG_TUNER_3036 is not set
++# CONFIG_VIDEO_STRADIS is not set
++# CONFIG_VIDEO_ZORAN is not set
++# CONFIG_VIDEO_ZR36120 is not set
++# CONFIG_VIDEO_SAA7134 is not set
++# CONFIG_VIDEO_MXB is not set
++# CONFIG_VIDEO_DPC is not set
++# CONFIG_VIDEO_HEXIUM_ORION is not set
++# CONFIG_VIDEO_HEXIUM_GEMINI is not set
++# CONFIG_VIDEO_CX88 is not set
++# CONFIG_VIDEO_OVCAMCHIP is not set
++
++#
++# Radio Adapters
++#
++# CONFIG_RADIO_GEMTEK_PCI is not set
++# CONFIG_RADIO_MAXIRADIO is not set
++# CONFIG_RADIO_MAESTRO is not set
++
++#
++# Digital Video Broadcasting Devices
++#
++# CONFIG_DVB is not set
++
++#
++# Graphics support
++#
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=y
++CONFIG_FB_CFB_COPYAREA=y
++CONFIG_FB_CFB_IMAGEBLIT=y
++CONFIG_FB_SOFT_CURSOR=y
++# CONFIG_FB_MACMODES is not set
++CONFIG_FB_MODE_HELPERS=y
++# CONFIG_FB_TILEBLITTING is not set
++# CONFIG_FB_CIRRUS is not set
++# CONFIG_FB_PM2 is not set
++# CONFIG_FB_CYBER2000 is not set
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++# CONFIG_FB_NVIDIA is not set
++# CONFIG_FB_RIVA is not set
++# CONFIG_FB_MATROX is not set
++# CONFIG_FB_RADEON_OLD is not set
++CONFIG_FB_RADEON=y
++CONFIG_FB_RADEON_I2C=y
++CONFIG_FB_RADEON_DEBUG=y
++# CONFIG_FB_ATY128 is not set
++# CONFIG_FB_ATY is not set
++# CONFIG_FB_SAVAGE is not set
++# CONFIG_FB_SIS is not set
++# CONFIG_FB_NEOMAGIC is not set
++# CONFIG_FB_KYRO is not set
++# CONFIG_FB_3DFX is not set
++# CONFIG_FB_VOODOO1 is not set
++# CONFIG_FB_TRIDENT is not set
++# CONFIG_FB_PM3 is not set
++# CONFIG_FB_S1D13XXX is not set
++# CONFIG_FB_VIRTUAL is not set
++
++#
++# Logo configuration
++#
++CONFIG_LOGO=y
++# CONFIG_LOGO_LINUX_MONO is not set
++# CONFIG_LOGO_LINUX_VGA16 is not set
++CONFIG_LOGO_LINUX_CLUT224=y
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=y
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=y
++CONFIG_SND_TIMER=y
++CONFIG_SND_PCM=y
++CONFIG_SND_HWDEP=y
++CONFIG_SND_RAWMIDI=y
++CONFIG_SND_SEQUENCER=y
++# CONFIG_SND_SEQ_DUMMY is not set
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=y
++CONFIG_SND_PCM_OSS=y
++CONFIG_SND_SEQUENCER_OSS=y
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=y
++CONFIG_SND_OPL3_LIB=y
++# CONFIG_SND_DUMMY is not set
++# CONFIG_SND_VIRMIDI is not set
++# CONFIG_SND_MTPAV is not set
++# CONFIG_SND_SERIAL_U16550 is not set
++# CONFIG_SND_MPU401 is not set
++
++#
++# PCI devices
++#
++CONFIG_SND_AC97_CODEC=y
++# CONFIG_SND_ALI5451 is not set
++# CONFIG_SND_ATIIXP is not set
++# CONFIG_SND_ATIIXP_MODEM is not set
++# CONFIG_SND_AU8810 is not set
++# CONFIG_SND_AU8820 is not set
++# CONFIG_SND_AU8830 is not set
++# CONFIG_SND_AZT3328 is not set
++# CONFIG_SND_BT87X is not set
++# CONFIG_SND_CS46XX is not set
++# CONFIG_SND_CS4281 is not set
++# CONFIG_SND_EMU10K1 is not set
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_CA0106 is not set
++# CONFIG_SND_KORG1212 is not set
++# CONFIG_SND_MIXART is not set
++# CONFIG_SND_NM256 is not set
++# CONFIG_SND_RME32 is not set
++# CONFIG_SND_RME96 is not set
++# CONFIG_SND_RME9652 is not set
++# CONFIG_SND_HDSP is not set
++# CONFIG_SND_TRIDENT is not set
++# CONFIG_SND_YMFPCI is not set
++# CONFIG_SND_ALS4000 is not set
++# CONFIG_SND_CMIPCI is not set
++# CONFIG_SND_ENS1370 is not set
++# CONFIG_SND_ENS1371 is not set
++# CONFIG_SND_ES1938 is not set
++# CONFIG_SND_ES1968 is not set
++# CONFIG_SND_MAESTRO3 is not set
++CONFIG_SND_FM801=y
++CONFIG_SND_FM801_TEA575X=y
++# CONFIG_SND_ICE1712 is not set
++# CONFIG_SND_ICE1724 is not set
++# CONFIG_SND_INTEL8X0 is not set
++# CONFIG_SND_INTEL8X0M is not set
++# CONFIG_SND_SONICVIBES is not set
++# CONFIG_SND_VIA82XX is not set
++# CONFIG_SND_VIA82XX_MODEM is not set
++# CONFIG_SND_VX222 is not set
++# CONFIG_SND_HDA_INTEL is not set
++
++#
++# USB devices
++#
++# CONFIG_SND_USB_AUDIO is not set
++
++#
++# Open Sound System
++#
++# CONFIG_SOUND_PRIME is not set
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++# CONFIG_USB_DEVICEFS is not set
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_SUSPEND is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++# CONFIG_USB_EHCI_SPLIT_ISO is not set
++# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
++CONFIG_USB_OHCI_HCD=y
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=y
++# CONFIG_USB_SL811_HCD is not set
++
++#
++# USB Device Class drivers
++#
++# CONFIG_USB_AUDIO is not set
++# CONFIG_USB_BLUETOOTH_TTY is not set
++# CONFIG_USB_MIDI is not set
++# CONFIG_USB_ACM is not set
++# CONFIG_USB_PRINTER is not set
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=y
++# CONFIG_USB_STORAGE_DEBUG is not set
++# CONFIG_USB_STORAGE_DATAFAB is not set
++# CONFIG_USB_STORAGE_FREECOM is not set
++# CONFIG_USB_STORAGE_DPCM is not set
++# CONFIG_USB_STORAGE_USBAT is not set
++# CONFIG_USB_STORAGE_SDDR09 is not set
++# CONFIG_USB_STORAGE_SDDR55 is not set
++# CONFIG_USB_STORAGE_JUMPSHOT is not set
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=y
++CONFIG_USB_HIDINPUT=y
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++# CONFIG_USB_AIPTEK is not set
++# CONFIG_USB_WACOM is not set
++# CONFIG_USB_KBTAB is not set
++# CONFIG_USB_POWERMATE is not set
++# CONFIG_USB_MTOUCH is not set
++# CONFIG_USB_EGALAX is not set
++# CONFIG_USB_XPAD is not set
++# CONFIG_USB_ATI_REMOTE is not set
++
++#
++# USB Imaging devices
++#
++# CONFIG_USB_MDC800 is not set
++# CONFIG_USB_MICROTEK is not set
++
++#
++# USB Multimedia devices
++#
++# CONFIG_USB_DABUSB is not set
++# CONFIG_USB_VICAM is not set
++# CONFIG_USB_DSBR is not set
++# CONFIG_USB_IBMCAM is not set
++# CONFIG_USB_KONICAWC is not set
++# CONFIG_USB_OV511 is not set
++# CONFIG_USB_SE401 is not set
++# CONFIG_USB_SN9C102 is not set
++# CONFIG_USB_STV680 is not set
++# CONFIG_USB_PWC is not set
++
++#
++# USB Network Adapters
++#
++# CONFIG_USB_CATC is not set
++# CONFIG_USB_KAWETH is not set
++# CONFIG_USB_PEGASUS is not set
++# CONFIG_USB_RTL8150 is not set
++# CONFIG_USB_USBNET is not set
++CONFIG_USB_MON=y
++
++#
++# USB port drivers
++#
++
++#
++# USB Serial Converter support
++#
++# CONFIG_USB_SERIAL is not set
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++# CONFIG_USB_AUERSWALD is not set
++# CONFIG_USB_RIO500 is not set
++# CONFIG_USB_LEGOTOWER is not set
++# CONFIG_USB_LCD is not set
++# CONFIG_USB_LED is not set
++# CONFIG_USB_CYTHERM is not set
++# CONFIG_USB_PHIDGETKIT is not set
++# CONFIG_USB_PHIDGETSERVO is not set
++# CONFIG_USB_IDMOUSE is not set
++# CONFIG_USB_SISUSBVGA is not set
++
++#
++# USB ATM/DSL drivers
++#
++
++#
++# USB Gadget Support
++#
++# CONFIG_USB_GADGET is not set
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++# CONFIG_EXT2_FS_POSIX_ACL is not set
++# CONFIG_EXT2_FS_SECURITY is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++# CONFIG_REISERFS_FS is not set
++# CONFIG_JFS_FS is not set
++
++#
++# XFS support
++#
++# CONFIG_XFS_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++# CONFIG_AUTOFS4_FS is not set
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++# CONFIG_ZISOFS is not set
++CONFIG_UDF_FS=y
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=y
++CONFIG_MSDOS_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++# CONFIG_DEVFS_FS is not set
++# CONFIG_DEVPTS_FS_XATTR is not set
++CONFIG_TMPFS=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_TMPFS_SECURITY=y
++CONFIG_HUGETLBFS=y
++CONFIG_HUGETLB_PAGE=y
++CONFIG_RAMFS=y
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++# CONFIG_CRAMFS is not set
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++CONFIG_NFS_V4=y
++# CONFIG_NFS_DIRECTIO is not set
++CONFIG_NFSD=y
++CONFIG_NFSD_V3=y
++# CONFIG_NFSD_V4 is not set
++# CONFIG_NFSD_TCP is not set
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=y
++CONFIG_SUNRPC=y
++CONFIG_SUNRPC_GSS=y
++CONFIG_RPCSEC_GSS_KRB5=y
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++# CONFIG_OSF_PARTITION is not set
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++# CONFIG_MAC_PARTITION is not set
++CONFIG_MSDOS_PARTITION=y
++# CONFIG_BSD_DISKLABEL is not set
++# CONFIG_MINIX_SUBPARTITION is not set
++# CONFIG_SOLARIS_X86_PARTITION is not set
++# CONFIG_UNIXWARE_DISKLABEL is not set
++# CONFIG_LDM_PARTITION is not set
++# CONFIG_SGI_PARTITION is not set
++# CONFIG_ULTRIX_PARTITION is not set
++# CONFIG_SUN_PARTITION is not set
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=y
++CONFIG_NLS_CODEPAGE_775=y
++CONFIG_NLS_CODEPAGE_850=y
++CONFIG_NLS_CODEPAGE_852=y
++CONFIG_NLS_CODEPAGE_855=y
++CONFIG_NLS_CODEPAGE_857=y
++CONFIG_NLS_CODEPAGE_860=y
++CONFIG_NLS_CODEPAGE_861=y
++CONFIG_NLS_CODEPAGE_862=y
++CONFIG_NLS_CODEPAGE_863=y
++CONFIG_NLS_CODEPAGE_864=y
++CONFIG_NLS_CODEPAGE_865=y
++CONFIG_NLS_CODEPAGE_866=y
++CONFIG_NLS_CODEPAGE_869=y
++CONFIG_NLS_CODEPAGE_936=y
++CONFIG_NLS_CODEPAGE_950=y
++CONFIG_NLS_CODEPAGE_932=y
++CONFIG_NLS_CODEPAGE_949=y
++CONFIG_NLS_CODEPAGE_874=y
++CONFIG_NLS_ISO8859_8=y
++# CONFIG_NLS_CODEPAGE_1250 is not set
++CONFIG_NLS_CODEPAGE_1251=y
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++CONFIG_NLS_ISO8859_2=y
++CONFIG_NLS_ISO8859_3=y
++CONFIG_NLS_ISO8859_4=y
++CONFIG_NLS_ISO8859_5=y
++CONFIG_NLS_ISO8859_6=y
++CONFIG_NLS_ISO8859_7=y
++CONFIG_NLS_ISO8859_9=y
++CONFIG_NLS_ISO8859_13=y
++CONFIG_NLS_ISO8859_14=y
++CONFIG_NLS_ISO8859_15=y
++CONFIG_NLS_KOI8_R=y
++CONFIG_NLS_KOI8_U=y
++CONFIG_NLS_UTF8=y
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++CONFIG_CRC32=y
++# CONFIG_LIBCRC32C is not set
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++
++#
++# Profiling support
++#
++# CONFIG_PROFILING is not set
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_LOG_BUF_SHIFT=17
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++CONFIG_IA64_GRANULE_16MB=y
++# CONFIG_IA64_GRANULE_64MB is not set
++CONFIG_IA64_PRINT_HAZARDS=y
++# CONFIG_DISABLE_VHPT is not set
++# CONFIG_IA64_DEBUG_CMPXCHG is not set
++# CONFIG_IA64_DEBUG_IRQ is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=y
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=y
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++# CONFIG_CRYPTO_CRC32C is not set
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
+diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_x86_32 linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_x86_32
+--- pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_x86_32	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_x86_32	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,562 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.12-xenU
++# Wed Aug  3 09:57:44 2005
++#
++CONFIG_XEN=y
++CONFIG_ARCH_XEN=y
++CONFIG_NO_IDLE_HZ=y
++
++#
++# XEN
++#
++# CONFIG_XEN_PRIVILEGED_GUEST is not set
++# CONFIG_XEN_PHYSDEV_ACCESS is not set
++# CONFIG_XEN_TPMDEV_FRONTEND is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++# CONFIG_XEN_BLKDEV_TAP is not set
++# CONFIG_XEN_SHADOW_MODE is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_X86=y
++# CONFIG_XEN_X86_64 is not set
++CONFIG_HAVE_ARCH_ALLOC_SKB=y
++CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_CLEAN_COMPILE=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++# CONFIG_POSIX_MQUEUE is not set
++# CONFIG_BSD_PROCESS_ACCT is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_HOTPLUG=y
++CONFIG_KOBJECT_UEVENT=y
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_CC_ALIGN_FUNCTIONS=0
++CONFIG_CC_ALIGN_LABELS=0
++CONFIG_CC_ALIGN_LOOPS=0
++CONFIG_CC_ALIGN_JUMPS=0
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_OBSOLETE_MODPARM=y
++# CONFIG_MODVERSIONS is not set
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# X86 Processor Configuration
++#
++CONFIG_XENARCH="i386"
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_UID16=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++# CONFIG_X86_GENERIC is not set
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=5
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++# CONFIG_HPET_TIMER is not set
++# CONFIG_HPET_EMULATE_RTC is not set
++CONFIG_SMP=y
++CONFIG_SMP_ALTERNATIVES=y
++CONFIG_NR_CPUS=8
++# CONFIG_SCHED_SMT is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++CONFIG_X86_CPUID=y
++
++#
++# Firmware Drivers
++#
++# CONFIG_EDD is not set
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_HIGHMEM=y
++CONFIG_HAVE_DEC_LOCK=y
++# CONFIG_REGPARM is not set
++CONFIG_HOTPLUG_CPU=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_X86_SMP=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_X86_TRAMPOLINE=y
++CONFIG_PC=y
++CONFIG_SECCOMP=y
++CONFIG_EARLY_PRINTK=y
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++# CONFIG_BINFMT_AOUT is not set
++# CONFIG_BINFMT_MISC is not set
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++# CONFIG_FW_LOADER is not set
++# CONFIG_DEBUG_DRIVER is not set
++
++#
++# Block devices
++#
++# CONFIG_BLK_DEV_FD is not set
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++# CONFIG_BLK_DEV_CRYPTOLOOP is not set
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=4096
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++# CONFIG_LBD is not set
++# CONFIG_CDROM_PKTCDVD is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++# CONFIG_CHR_DEV_ST is not set
++# CONFIG_CHR_DEV_OSST is not set
++# CONFIG_BLK_DEV_SR is not set
++# CONFIG_CHR_DEV_SG is not set
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++# CONFIG_SCSI_CONSTANTS is not set
++# CONFIG_SCSI_LOGGING is not set
++
++#
++# SCSI Transport Attributes
++#
++# CONFIG_SCSI_SPI_ATTRS is not set
++# CONFIG_SCSI_FC_ATTRS is not set
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++# CONFIG_SCSI_SATA is not set
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++# CONFIG_MD is not set
++
++#
++# Networking support
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++# CONFIG_PACKET_MMAP is not set
++CONFIG_UNIX=y
++# CONFIG_NET_KEY is not set
++CONFIG_INET=y
++# CONFIG_IP_MULTICAST is not set
++# CONFIG_IP_ADVANCED_ROUTER is not set
++CONFIG_IP_PNP=y
++# CONFIG_IP_PNP_DHCP is not set
++# CONFIG_IP_PNP_BOOTP is not set
++# CONFIG_IP_PNP_RARP is not set
++# CONFIG_NET_IPIP is not set
++# CONFIG_NET_IPGRE is not set
++# CONFIG_ARPD is not set
++# CONFIG_SYN_COOKIES is not set
++# CONFIG_INET_AH is not set
++# CONFIG_INET_ESP is not set
++# CONFIG_INET_IPCOMP is not set
++# CONFIG_INET_TUNNEL is not set
++CONFIG_IP_TCPDIAG=y
++# CONFIG_IP_TCPDIAG_IPV6 is not set
++# CONFIG_IPV6 is not set
++# CONFIG_NETFILTER is not set
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP_SCTP is not set
++# CONFIG_ATM is not set
++# CONFIG_BRIDGE is not set
++# CONFIG_VLAN_8021Q is not set
++# CONFIG_DECNET is not set
++# CONFIG_LLC2 is not set
++# CONFIG_IPX is not set
++# CONFIG_ATALK is not set
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++# CONFIG_NET_DIVERT is not set
++# CONFIG_ECONET is not set
++# CONFIG_WAN_ROUTER is not set
++
++#
++# QoS and/or fair queueing
++#
++# CONFIG_NET_SCHED is not set
++# CONFIG_NET_CLS_ROUTE is not set
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++# CONFIG_NETPOLL is not set
++# CONFIG_NET_POLL_CONTROLLER is not set
++# CONFIG_HAMRADIO is not set
++# CONFIG_IRDA is not set
++# CONFIG_BT is not set
++CONFIG_NETDEVICES=y
++# CONFIG_DUMMY is not set
++# CONFIG_BONDING is not set
++# CONFIG_EQUALIZER is not set
++# CONFIG_TUN is not set
++
++#
++# Ethernet (10 or 100Mbit)
++#
++# CONFIG_NET_ETHERNET is not set
++
++#
++# Ethernet (1000 Mbit)
++#
++
++#
++# Ethernet (10000 Mbit)
++#
++
++#
++# Token Ring devices
++#
++
++#
++# Wireless LAN (non-hamradio)
++#
++# CONFIG_NET_RADIO is not set
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++# CONFIG_PPP is not set
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++# CONFIG_NETCONSOLE is not set
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++# CONFIG_TCG_TPM is not set
++
++#
++# Character devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++# CONFIG_EXT2_FS_XATTR is not set
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=y
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++# CONFIG_JFS_FS is not set
++
++#
++# XFS support
++#
++# CONFIG_XFS_FS is not set
++# CONFIG_MINIX_FS is not set
++# CONFIG_ROMFS_FS is not set
++# CONFIG_QUOTA is not set
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=y
++CONFIG_AUTOFS4_FS=y
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++# CONFIG_UDF_FS is not set
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++# CONFIG_DEVFS_FS is not set
++CONFIG_DEVPTS_FS_XATTR=y
++# CONFIG_DEVPTS_FS_SECURITY is not set
++CONFIG_TMPFS=y
++# CONFIG_TMPFS_XATTR is not set
++# CONFIG_HUGETLBFS is not set
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++# CONFIG_AFFS_FS is not set
++# CONFIG_HFS_FS is not set
++# CONFIG_HFSPLUS_FS is not set
++# CONFIG_BEFS_FS is not set
++# CONFIG_BFS_FS is not set
++# CONFIG_EFS_FS is not set
++CONFIG_CRAMFS=y
++# CONFIG_VXFS_FS is not set
++# CONFIG_HPFS_FS is not set
++# CONFIG_QNX4FS_FS is not set
++# CONFIG_SYSV_FS is not set
++# CONFIG_UFS_FS is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=y
++CONFIG_NFS_V3=y
++# CONFIG_NFS_V4 is not set
++# CONFIG_NFS_DIRECTIO is not set
++# CONFIG_NFSD is not set
++CONFIG_ROOT_NFS=y
++CONFIG_LOCKD=y
++CONFIG_LOCKD_V4=y
++CONFIG_SUNRPC=y
++# CONFIG_RPCSEC_GSS_KRB5 is not set
++# CONFIG_RPCSEC_GSS_SPKM3 is not set
++# CONFIG_SMB_FS is not set
++# CONFIG_CIFS is not set
++# CONFIG_NCP_FS is not set
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++# CONFIG_PARTITION_ADVANCED is not set
++CONFIG_MSDOS_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="iso8859-1"
++CONFIG_NLS_CODEPAGE_437=y
++# CONFIG_NLS_CODEPAGE_737 is not set
++# CONFIG_NLS_CODEPAGE_775 is not set
++# CONFIG_NLS_CODEPAGE_850 is not set
++# CONFIG_NLS_CODEPAGE_852 is not set
++# CONFIG_NLS_CODEPAGE_855 is not set
++# CONFIG_NLS_CODEPAGE_857 is not set
++# CONFIG_NLS_CODEPAGE_860 is not set
++# CONFIG_NLS_CODEPAGE_861 is not set
++# CONFIG_NLS_CODEPAGE_862 is not set
++# CONFIG_NLS_CODEPAGE_863 is not set
++# CONFIG_NLS_CODEPAGE_864 is not set
++# CONFIG_NLS_CODEPAGE_865 is not set
++# CONFIG_NLS_CODEPAGE_866 is not set
++# CONFIG_NLS_CODEPAGE_869 is not set
++# CONFIG_NLS_CODEPAGE_936 is not set
++# CONFIG_NLS_CODEPAGE_950 is not set
++# CONFIG_NLS_CODEPAGE_932 is not set
++# CONFIG_NLS_CODEPAGE_949 is not set
++# CONFIG_NLS_CODEPAGE_874 is not set
++# CONFIG_NLS_ISO8859_8 is not set
++# CONFIG_NLS_CODEPAGE_1250 is not set
++# CONFIG_NLS_CODEPAGE_1251 is not set
++# CONFIG_NLS_ASCII is not set
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_NLS_ISO8859_2 is not set
++# CONFIG_NLS_ISO8859_3 is not set
++# CONFIG_NLS_ISO8859_4 is not set
++# CONFIG_NLS_ISO8859_5 is not set
++# CONFIG_NLS_ISO8859_6 is not set
++# CONFIG_NLS_ISO8859_7 is not set
++# CONFIG_NLS_ISO8859_9 is not set
++# CONFIG_NLS_ISO8859_13 is not set
++# CONFIG_NLS_ISO8859_14 is not set
++# CONFIG_NLS_ISO8859_15 is not set
++# CONFIG_NLS_KOI8_R is not set
++# CONFIG_NLS_KOI8_U is not set
++# CONFIG_NLS_UTF8 is not set
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++# CONFIG_CRYPTO_HMAC is not set
++# CONFIG_CRYPTO_NULL is not set
++# CONFIG_CRYPTO_MD4 is not set
++CONFIG_CRYPTO_MD5=m
++# CONFIG_CRYPTO_SHA1 is not set
++# CONFIG_CRYPTO_SHA256 is not set
++# CONFIG_CRYPTO_SHA512 is not set
++# CONFIG_CRYPTO_WP512 is not set
++# CONFIG_CRYPTO_TGR192 is not set
++# CONFIG_CRYPTO_DES is not set
++# CONFIG_CRYPTO_BLOWFISH is not set
++# CONFIG_CRYPTO_TWOFISH is not set
++# CONFIG_CRYPTO_SERPENT is not set
++# CONFIG_CRYPTO_AES_586 is not set
++# CONFIG_CRYPTO_CAST5 is not set
++# CONFIG_CRYPTO_CAST6 is not set
++# CONFIG_CRYPTO_TEA is not set
++# CONFIG_CRYPTO_ARC4 is not set
++# CONFIG_CRYPTO_KHAZAD is not set
++# CONFIG_CRYPTO_ANUBIS is not set
++# CONFIG_CRYPTO_DEFLATE is not set
++# CONFIG_CRYPTO_MICHAEL_MIC is not set
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++
++#
++# Library routines
++#
++# CONFIG_CRC_CCITT is not set
++# CONFIG_CRC32 is not set
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++CONFIG_DEBUG_BUGVERBOSE=y
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_FRAME_POINTER is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_KPROBES is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_4KSTACKS is not set
+diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_x86_64 linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_x86_64
+--- pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_x86_64	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_x86_64	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,939 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.12-xenU
++# Thu Aug 18 11:15:14 2005
++#
++CONFIG_XEN=y
++CONFIG_ARCH_XEN=y
++CONFIG_NO_IDLE_HZ=y
++
++#
++# XEN
++#
++# CONFIG_XEN_PRIVILEGED_GUEST is not set
++# CONFIG_XEN_PHYSDEV_ACCESS is not set
++# CONFIG_XEN_TPMDEV_FRONTEND is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++# CONFIG_XEN_BLKDEV_TAP is not set
++# CONFIG_XEN_SHADOW_MODE is not set
++CONFIG_XEN_SCRUB_PAGES=y
++# CONFIG_XEN_X86 is not set
++CONFIG_XEN_X86_64=y
++CONFIG_HAVE_ARCH_ALLOC_SKB=y
++CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++CONFIG_CLEAN_COMPILE=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++CONFIG_SYSCTL=y
++CONFIG_AUDIT=y
++CONFIG_AUDITSYSCALL=y
++CONFIG_HOTPLUG=y
++CONFIG_KOBJECT_UEVENT=y
++# CONFIG_IKCONFIG is not set
++# CONFIG_CPUSETS is not set
++# CONFIG_EMBEDDED is not set
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++CONFIG_KALLSYMS_EXTRA_PASS=y
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++CONFIG_SHMEM=y
++CONFIG_CC_ALIGN_FUNCTIONS=0
++CONFIG_CC_ALIGN_LABELS=0
++CONFIG_CC_ALIGN_LOOPS=0
++CONFIG_CC_ALIGN_JUMPS=0
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++# CONFIG_MODULE_FORCE_UNLOAD is not set
++CONFIG_OBSOLETE_MODPARM=y
++CONFIG_MODVERSIONS=y
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++CONFIG_XENARCH="x86_64"
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_UID16=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_L1_CACHE_SHIFT=7
++CONFIG_RWSEM_GENERIC_SPINLOCK=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_GOOD_APIC=y
++# CONFIG_HPET_TIMER is not set
++CONFIG_SMP=y
++CONFIG_NR_CPUS=8
++# CONFIG_SCHED_SMT is not set
++# CONFIG_MICROCODE is not set
++CONFIG_X86_CPUID=y
++# CONFIG_NUMA is not set
++# CONFIG_MTRR is not set
++CONFIG_HAVE_DEC_LOCK=y
++# CONFIG_X86_LOCAL_APIC is not set
++# CONFIG_X86_IO_APIC is not set
++# CONFIG_PCI is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_SECCOMP=y
++
++#
++# X86_64 processor configuration
++#
++CONFIG_X86_64=y
++CONFIG_64BIT=y
++CONFIG_EARLY_PRINTK=y
++
++#
++# Processor type and features
++#
++CONFIG_MPSC=y
++# CONFIG_GENERIC_CPU is not set
++CONFIG_X86_L1_CACHE_BYTES=128
++# CONFIG_X86_TSC is not set
++CONFIG_X86_XEN_GENAPIC=y
++# CONFIG_X86_MSR is not set
++CONFIG_X86_HT=y
++# CONFIG_K8_NUMA is not set
++# CONFIG_NUMA_EMU is not set
++CONFIG_DUMMY_IOMMU=y
++# CONFIG_X86_MCE is not set
++
++#
++# Power management options
++#
++# CONFIG_PM is not set
++
++#
++# CPU Frequency scaling
++#
++# CONFIG_CPU_FREQ is not set
++
++#
++# Bus options (PCI etc.)
++#
++# CONFIG_UNORDERED_IO is not set
++
++#
++# Executable file formats / Emulations
++#
++CONFIG_IA32_EMULATION=y
++# CONFIG_IA32_AOUT is not set
++CONFIG_COMPAT=y
++CONFIG_SYSVIPC_COMPAT=y
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_MISC=y
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=y
++# CONFIG_DEBUG_DRIVER is not set
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_LBD=y
++# CONFIG_CDROM_PKTCDVD is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++# CONFIG_ATA_OVER_ETH is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=m
++CONFIG_BLK_DEV_SR_VENDOR=y
++CONFIG_CHR_DEV_SG=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++# CONFIG_SCSI_MULTI_LUN is not set
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++CONFIG_SCSI_SATA=y
++# CONFIG_SCSI_DEBUG is not set
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=y
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID5=m
++CONFIG_MD_RAID6=m
++CONFIG_MD_MULTIPATH=m
++# CONFIG_MD_FAULTY is not set
++CONFIG_BLK_DEV_DM=m
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++# CONFIG_DM_MULTIPATH is not set
++
++#
++# Networking support
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=y
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=y
++CONFIG_NET_KEY=m
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++CONFIG_IP_ROUTE_VERBOSE=y
++CONFIG_IP_PNP=y
++CONFIG_IP_PNP_DHCP=y
++CONFIG_IP_PNP_BOOTP=y
++CONFIG_IP_PNP_RARP=y
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_TUNNEL=m
++CONFIG_IP_TCPDIAG=y
++# CONFIG_IP_TCPDIAG_IPV6 is not set
++
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IPV6=m
++CONFIG_IPV6_PRIVACY=y
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_IPV6_TUNNEL=m
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++# CONFIG_IP_NF_CONNTRACK_MARK is not set
++CONFIG_IP_NF_CT_PROTO_SCTP=m
++CONFIG_IP_NF_FTP=m
++CONFIG_IP_NF_IRC=m
++CONFIG_IP_NF_TFTP=m
++CONFIG_IP_NF_AMANDA=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_LIMIT=m
++CONFIG_IP_NF_MATCH_IPRANGE=m
++CONFIG_IP_NF_MATCH_MAC=m
++CONFIG_IP_NF_MATCH_PKTTYPE=m
++CONFIG_IP_NF_MATCH_MARK=m
++CONFIG_IP_NF_MATCH_MULTIPORT=m
++CONFIG_IP_NF_MATCH_TOS=m
++CONFIG_IP_NF_MATCH_RECENT=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_DSCP=m
++CONFIG_IP_NF_MATCH_AH_ESP=m
++CONFIG_IP_NF_MATCH_LENGTH=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_MATCH_TCPMSS=m
++CONFIG_IP_NF_MATCH_HELPER=m
++CONFIG_IP_NF_MATCH_STATE=m
++CONFIG_IP_NF_MATCH_CONNTRACK=m
++CONFIG_IP_NF_MATCH_OWNER=m
++CONFIG_IP_NF_MATCH_PHYSDEV=m
++CONFIG_IP_NF_MATCH_ADDRTYPE=m
++CONFIG_IP_NF_MATCH_REALM=m
++CONFIG_IP_NF_MATCH_SCTP=m
++CONFIG_IP_NF_MATCH_COMMENT=m
++# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_TCPMSS=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_SAME=m
++CONFIG_IP_NF_NAT_SNMP_BASIC=m
++CONFIG_IP_NF_NAT_IRC=m
++CONFIG_IP_NF_NAT_FTP=m
++CONFIG_IP_NF_NAT_TFTP=m
++CONFIG_IP_NF_NAT_AMANDA=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_TOS=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_DSCP=m
++CONFIG_IP_NF_TARGET_MARK=m
++CONFIG_IP_NF_TARGET_CLASSIFY=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_TARGET_NOTRACK=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++# CONFIG_IP6_NF_QUEUE is not set
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_LIMIT=m
++CONFIG_IP6_NF_MATCH_MAC=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_MULTIPORT=m
++CONFIG_IP6_NF_MATCH_OWNER=m
++CONFIG_IP6_NF_MATCH_MARK=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_AHESP=m
++CONFIG_IP6_NF_MATCH_LENGTH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_MATCH_PHYSDEV=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_TARGET_MARK=m
++CONFIG_IP6_NF_RAW=m
++
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++# CONFIG_BRIDGE_EBT_ULOG is not set
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=y
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_HMAC_NONE is not set
++# CONFIG_SCTP_HMAC_SHA1 is not set
++CONFIG_SCTP_HMAC_MD5=y
++CONFIG_ATM=m
++CONFIG_ATM_CLIP=m
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++CONFIG_ATM_LANE=m
++# CONFIG_ATM_MPOA is not set
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_BRIDGE=m
++CONFIG_VLAN_8021Q=m
++# CONFIG_DECNET is not set
++CONFIG_LLC=m
++# CONFIG_LLC2 is not set
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=y
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++# CONFIG_X25 is not set
++# CONFIG_LAPB is not set
++CONFIG_NET_DIVERT=y
++# CONFIG_ECONET is not set
++CONFIG_WAN_ROUTER=m
++
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CLK_JIFFIES=y
++# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
++# CONFIG_NET_SCH_CLK_CPU is not set
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_INGRESS=m
++CONFIG_NET_QOS=y
++CONFIG_NET_ESTIMATOR=y
++CONFIG_NET_CLS=y
++# CONFIG_NET_CLS_BASIC is not set
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++CONFIG_CLS_U32_PERF=y
++CONFIG_NET_CLS_IND=y
++# CONFIG_CLS_U32_MARK is not set
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++# CONFIG_NET_EMATCH is not set
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_CLS_POLICE=y
++
++#
++# Network testing
++#
++# CONFIG_NET_PKTGEN is not set
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_RX is not set
++CONFIG_NETPOLL_TRAP=y
++CONFIG_NET_POLL_CONTROLLER=y
++# CONFIG_HAMRADIO is not set
++CONFIG_IRDA=m
++
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++# CONFIG_IRDA_ULTRA is not set
++
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++# CONFIG_IRDA_DEBUG is not set
++
++#
++# Infrared-port device drivers
++#
++
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
++
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++
++#
++# Old SIR device drivers
++#
++
++#
++# Old Serial dongle support
++#
++
++#
++# FIR device drivers
++#
++# CONFIG_NSC_FIR is not set
++# CONFIG_WINBOND_FIR is not set
++# CONFIG_SMC_IRCC_FIR is not set
++# CONFIG_ALI_FIR is not set
++# CONFIG_VIA_FIR is not set
++CONFIG_BT=m
++CONFIG_BT_L2CAP=m
++CONFIG_BT_SCO=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_HIDP=m
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++CONFIG_BT_HCIUART_BCSP_TXCRC=y
++CONFIG_BT_HCIVHCI=m
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=m
++
++#
++# Ethernet (1000 Mbit)
++#
++
++#
++# Ethernet (10000 Mbit)
++#
++
++#
++# Token Ring devices
++#
++
++#
++# Wireless LAN (non-hamradio)
++#
++CONFIG_NET_RADIO=y
++
++#
++# Obsolete Wireless cards support (pre-802.11)
++#
++# CONFIG_STRIP is not set
++CONFIG_ATMEL=m
++
++#
++# Wan interfaces
++#
++# CONFIG_WAN is not set
++
++#
++# ATM drivers
++#
++CONFIG_ATM_TCP=m
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++# CONFIG_PPP_BSDCOMP is not set
++CONFIG_PPPOE=m
++CONFIG_PPPOATM=m
++# CONFIG_SLIP is not set
++# CONFIG_SHAPER is not set
++CONFIG_NETCONSOLE=m
++CONFIG_INPUT=m
++CONFIG_UNIX98_PTYS=y
++# CONFIG_LEGACY_PTYS is not set
++# CONFIG_TCG_TPM is not set
++
++#
++# Character devices
++#
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT3_FS=y
++CONFIG_EXT3_FS_XATTR=y
++# CONFIG_EXT3_FS_POSIX_ACL is not set
++# CONFIG_EXT3_FS_SECURITY is not set
++CONFIG_JBD=m
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=y
++# CONFIG_REISERFS_CHECK is not set
++CONFIG_REISERFS_PROC_INFO=y
++CONFIG_REISERFS_FS_XATTR=y
++CONFIG_REISERFS_FS_POSIX_ACL=y
++CONFIG_REISERFS_FS_SECURITY=y
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++# CONFIG_JFS_SECURITY is not set
++# CONFIG_JFS_DEBUG is not set
++# CONFIG_JFS_STATISTICS is not set
++CONFIG_FS_POSIX_ACL=y
++
++#
++# XFS support
++#
++CONFIG_XFS_FS=m
++CONFIG_XFS_EXPORT=y
++# CONFIG_XFS_RT is not set
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_SECURITY=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++CONFIG_QUOTA=y
++# CONFIG_QFMT_V1 is not set
++CONFIG_QFMT_V2=y
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=m
++CONFIG_AUTOFS4_FS=m
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=y
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=y
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
++# CONFIG_NTFS_FS is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++# CONFIG_DEVFS_FS is not set
++CONFIG_DEVPTS_FS_XATTR=y
++CONFIG_DEVPTS_FS_SECURITY=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_TMPFS_SECURITY=y
++# CONFIG_HUGETLBFS is not set
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++
++#
++# Miscellaneous filesystems
++#
++# CONFIG_ADFS_FS is not set
++CONFIG_AFFS_FS=m
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_CRAMFS=y
++CONFIG_VXFS_FS=m
++# CONFIG_HPFS_FS is not set
++CONFIG_QNX4FS_FS=m
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=m
++CONFIG_NFS_V3=y
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_RPCSEC_GSS_SPKM3=m
++CONFIG_SMB_FS=m
++# CONFIG_SMB_NLS_DEFAULT is not set
++CONFIG_CIFS=m
++# CONFIG_CIFS_STATS is not set
++CONFIG_CIFS_XATTR=y
++CONFIG_CIFS_POSIX=y
++# CONFIG_CIFS_EXPERIMENTAL is not set
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++CONFIG_NCPFS_SMALLDOS=y
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++# CONFIG_CODA_FS is not set
++# CONFIG_AFS_FS is not set
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_ACORN_PARTITION is not set
++CONFIG_OSF_PARTITION=y
++# CONFIG_AMIGA_PARTITION is not set
++# CONFIG_ATARI_PARTITION is not set
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++# CONFIG_LDM_PARTITION is not set
++CONFIG_SGI_PARTITION=y
++# CONFIG_ULTRIX_PARTITION is not set
++CONFIG_SUN_PARTITION=y
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="utf8"
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=y
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_UTF8=m
++
++#
++# Security options
++#
++# CONFIG_KEYS is not set
++# CONFIG_SECURITY is not set
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=m
++CONFIG_CRYPTO_SHA1=y
++CONFIG_CRYPTO_SHA256=m
++CONFIG_CRYPTO_SHA512=m
++CONFIG_CRYPTO_WP512=m
++# CONFIG_CRYPTO_TGR192 is not set
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_SERPENT=m
++# CONFIG_CRYPTO_AES is not set
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_KHAZAD=m
++# CONFIG_CRYPTO_ANUBIS is not set
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_CRC32C=m
++# CONFIG_CRYPTO_TEST is not set
++
++#
++# Hardware crypto devices
++#
++
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=m
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=m
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_LOG_BUF_SHIFT=15
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_KPROBES is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_4KSTACKS is not set
++# CONFIG_INIT_DEBUG is not set
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/Kconfig linux-2.6.12-xen/arch/xen/i386/Kconfig
+--- pristine-linux-2.6.12/arch/xen/i386/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/Kconfig	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,960 @@
++#
++# For a description of the syntax of this configuration file,
++# see Documentation/kbuild/kconfig-language.txt.
++#
++
++menu "X86 Processor Configuration"
++
++config XENARCH
++	string
++	default i386
++
++config X86
++	bool
++	default y
++	help
++	  This is Linux's home port.  Linux was originally native to the Intel
++	  386, and runs on all the later x86 processors including the Intel
++	  486, 586, Pentiums, and various instruction-set-compatible chips by
++	  AMD, Cyrix, and others.
++
++config MMU
++	bool
++	default y
++
++config SBUS
++	bool
++
++config UID16
++	bool
++	default y
++
++config GENERIC_ISA_DMA
++	bool
++	default y
++
++config GENERIC_IOMAP
++	bool
++	default y
++
++choice
++	prompt "Processor family"
++	default M686
++
++config M386
++	bool "386"
++	---help---
++	  This is the processor type of your CPU. This information is used for
++	  optimizing purposes. In order to compile a kernel that can run on
++	  all x86 CPU types (albeit not optimally fast), you can specify
++	  "386" here.
++
++	  The kernel will not necessarily run on earlier architectures than
++	  the one you have chosen, e.g. a Pentium optimized kernel will run on
++	  a PPro, but not necessarily on a i486.
++
++	  Here are the settings recommended for greatest speed:
++	  - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
++	  486DLC/DLC2, UMC 486SX-S and NexGen Nx586.  Only "386" kernels
++	  will run on a 386 class machine.
++	  - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
++	  SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
++	  - "586" for generic Pentium CPUs lacking the TSC
++	  (time stamp counter) register.
++	  - "Pentium-Classic" for the Intel Pentium.
++	  - "Pentium-MMX" for the Intel Pentium MMX.
++	  - "Pentium-Pro" for the Intel Pentium Pro.
++	  - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
++	  - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
++	  - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
++	  - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
++	  - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
++	  - "Crusoe" for the Transmeta Crusoe series.
++	  - "Efficeon" for the Transmeta Efficeon series.
++	  - "Winchip-C6" for original IDT Winchip.
++	  - "Winchip-2" for IDT Winchip 2.
++	  - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
++	  - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
++	  - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
++	  - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
++
++	  If you don't know what to do, choose "386".
++
++config M486
++	bool "486"
++	help
++	  Select this for a 486 series processor, either Intel or one of the
++	  compatible processors from AMD, Cyrix, IBM, or Intel.  Includes DX,
++	  DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
++	  U5S.
++
++config M586
++	bool "586/K5/5x86/6x86/6x86MX"
++	help
++	  Select this for an 586 or 686 series processor such as the AMD K5,
++	  the Cyrix 5x86, 6x86 and 6x86MX.  This choice does not
++	  assume the RDTSC (Read Time Stamp Counter) instruction.
++
++config M586TSC
++	bool "Pentium-Classic"
++	help
++	  Select this for a Pentium Classic processor with the RDTSC (Read
++	  Time Stamp Counter) instruction for benchmarking.
++
++config M586MMX
++	bool "Pentium-MMX"
++	help
++	  Select this for a Pentium with the MMX graphics/multimedia
++	  extended instructions.
++
++config M686
++	bool "Pentium-Pro"
++	help
++	  Select this for Intel Pentium Pro chips.  This enables the use of
++	  Pentium Pro extended instructions, and disables the init-time guard
++	  against the f00f bug found in earlier Pentiums.
++
++config MPENTIUMII
++	bool "Pentium-II/Celeron(pre-Coppermine)"
++	help
++	  Select this for Intel chips based on the Pentium-II and
++	  pre-Coppermine Celeron core.  This option enables an unaligned
++	  copy optimization, compiles the kernel with optimization flags
++	  tailored for the chip, and applies any applicable Pentium Pro
++	  optimizations.
++
++config MPENTIUMIII
++	bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
++	help
++	  Select this for Intel chips based on the Pentium-III and
++	  Celeron-Coppermine core.  This option enables use of some
++	  extended prefetch instructions in addition to the Pentium II
++	  extensions.
++
++config MPENTIUMM
++	bool "Pentium M"
++	help
++	  Select this for Intel Pentium M (not Pentium-4 M)
++	  notebook chips.
++
++config MPENTIUM4
++	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
++	help
++	  Select this for Intel Pentium 4 chips.  This includes the
++	  Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
++	  (not Pentium M) chips.  This option enables compile flags
++	  optimized for the chip, uses the correct cache shift, and
++	  applies any applicable Pentium III optimizations.
++
++config MK6
++	bool "K6/K6-II/K6-III"
++	help
++	  Select this for an AMD K6-family processor.  Enables use of
++	  some extended instructions, and passes appropriate optimization
++	  flags to GCC.
++
++config MK7
++	bool "Athlon/Duron/K7"
++	help
++	  Select this for an AMD Athlon K7-family processor.  Enables use of
++	  some extended instructions, and passes appropriate optimization
++	  flags to GCC.
++
++config MK8
++	bool "Opteron/Athlon64/Hammer/K8"
++	help
++	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.  Enables
++	  use of some extended instructions, and passes appropriate optimization
++	  flags to GCC.
++
++config MCRUSOE
++	bool "Crusoe"
++	help
++	  Select this for a Transmeta Crusoe processor.  Treats the processor
++	  like a 586 with TSC, and sets some GCC optimization flags (like a
++	  Pentium Pro with no alignment requirements).
++
++config MEFFICEON
++	bool "Efficeon"
++	help
++	  Select this for a Transmeta Efficeon processor.
++
++config MWINCHIPC6
++	bool "Winchip-C6"
++	help
++	  Select this for an IDT Winchip C6 chip.  Linux and GCC
++	  treat this chip as a 586TSC with some extended instructions
++	  and alignment requirements.
++
++config MWINCHIP2
++	bool "Winchip-2"
++	help
++	  Select this for an IDT Winchip-2.  Linux and GCC
++	  treat this chip as a 586TSC with some extended instructions
++	  and alignment requirements.
++
++config MWINCHIP3D
++	bool "Winchip-2A/Winchip-3"
++	help
++	  Select this for an IDT Winchip-2A or 3.  Linux and GCC
++	  treat this chip as a 586TSC with some extended instructions
++	  and alignment reqirements.  Also enable out of order memory
++	  stores for this CPU, which can increase performance of some
++	  operations.
++
++config MGEODEGX1
++	bool "GeodeGX1"
++	help
++	  Select this for a Geode GX1 (Cyrix MediaGX) chip.
++
++config MCYRIXIII
++	bool "CyrixIII/VIA-C3"
++	help
++	  Select this for a Cyrix III or C3 chip.  Presently Linux and GCC
++	  treat this chip as a generic 586. Whilst the CPU is 686 class,
++	  it lacks the cmov extension which gcc assumes is present when
++	  generating 686 code.
++	  Note that Nehemiah (Model 9) and above will not boot with this
++	  kernel due to them lacking the 3DNow! instructions used in earlier
++	  incarnations of the CPU.
++
++config MVIAC3_2
++	bool "VIA C3-2 (Nehemiah)"
++	help
++	  Select this for a VIA C3 "Nehemiah". Selecting this enables usage
++	  of SSE and tells gcc to treat the CPU as a 686.
++	  Note, this kernel will not boot on older (pre model 9) C3s.
++
++endchoice
++
++config X86_GENERIC
++       bool "Generic x86 support"
++       help
++	  Instead of just including optimizations for the selected
++	  x86 variant (e.g. PII, Crusoe or Athlon), include some more
++	  generic optimizations as well. This will make the kernel
++	  perform better on x86 CPUs other than that selected.
++
++	  This is really intended for distributors who need more
++	  generic optimizations.
++
++#
++# Define implied options from the CPU selection here
++#
++config X86_CMPXCHG
++	bool
++	depends on !M386
++	default y
++
++config X86_XADD
++	bool
++	depends on !M386
++	default y
++
++config X86_L1_CACHE_SHIFT
++	int
++	default "7" if MPENTIUM4 || X86_GENERIC
++	default "4" if X86_ELAN || M486 || M386
++	default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1
++	default "6" if MK7 || MK8 || MPENTIUMM
++
++config RWSEM_GENERIC_SPINLOCK
++	bool
++	depends on M386
++	default y
++
++config RWSEM_XCHGADD_ALGORITHM
++	bool
++	depends on !M386
++	default y
++
++config GENERIC_CALIBRATE_DELAY
++	bool
++	default y
++
++config X86_PPRO_FENCE
++	bool
++	depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
++	default y
++
++config X86_F00F_BUG
++	bool
++	depends on M586MMX || M586TSC || M586 || M486 || M386
++	default y
++
++config X86_WP_WORKS_OK
++	bool
++	depends on !M386
++	default y
++
++config X86_INVLPG
++	bool
++	depends on !M386
++	default y
++
++config X86_BSWAP
++	bool
++	depends on !M386
++	default y
++
++config X86_POPAD_OK
++	bool
++	depends on !M386
++	default y
++
++config X86_ALIGNMENT_16
++	bool
++	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++	default y
++
++config X86_GOOD_APIC
++	bool
++	depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
++	default y
++
++config X86_INTEL_USERCOPY
++	bool
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
++	default y
++
++config X86_USE_PPRO_CHECKSUM
++	bool
++	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON
++	default y
++
++config X86_USE_3DNOW
++	bool
++	depends on MCYRIXIII || MK7
++	default y
++
++config X86_OOSTORE
++	bool
++	depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
++	default y
++
++config HPET_TIMER
++	bool
++	default n
++#config HPET_TIMER
++#	bool "HPET Timer Support"
++#	help
++#	  This enables the use of the HPET for the kernel's internal timer.
++#	  HPET is the next generation timer replacing legacy 8254s.
++#	  You can safely choose Y here.  However, HPET will only be
++#	  activated if the platform and the BIOS support this feature.
++#	  Otherwise the 8254 will be used for timing services.
++#
++#	  Choose N to continue using the legacy 8254 timer.
++
++config HPET_EMULATE_RTC
++	def_bool HPET_TIMER && RTC=y
++
++config SMP
++	bool "Symmetric multi-processing support"
++	---help---
++	  This enables support for systems with more than one CPU. If you have
++	  a system with only one CPU, like most personal computers, say N. If
++	  you have a system with more than one CPU, say Y.
++
++	  If you say N here, the kernel will run on single and multiprocessor
++	  machines, but will use only one CPU of a multiprocessor machine. If
++	  you say Y here, the kernel will run on many, but not all,
++	  singleprocessor machines. On a singleprocessor machine, the kernel
++	  will run faster if you say N here.
++
++	  Note that if you say Y here and choose architecture "586" or
++	  "Pentium" under "Processor family", the kernel will not work on 486
++	  architectures. Similarly, multiprocessor kernels for the "PPro"
++	  architecture may not work on all Pentium based boards.
++
++	  People using multiprocessor machines who say Y here should also say
++	  Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
++	  Management" code will be disabled if you say Y here.
++
++	  See also the <file:Documentation/smp.txt>,
++	  <file:Documentation/i386/IO-APIC.txt>,
++	  <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
++	  <http://www.tldp.org/docs.html#howto>.
++
++	  If you don't know what to do here, say N.
++
++config SMP_ALTERNATIVES
++	bool "SMP alternatives support (EXPERIMENTAL)"
++	depends on SMP && EXPERIMENTAL
++	help
++	  Try to reduce the overhead of running an SMP kernel on a uniprocessor
++	  host slightly by replacing certain key instruction sequences
++	  according to whether we currently have more than one CPU available.
++	  This should provide a noticeable boost to performance when
++	  running SMP kernels on UP machines, and have negligible impact
++	  when running on an true SMP host.
++
++          If unsure, say N.
++	  
++config NR_CPUS
++	int "Maximum number of CPUs (2-255)"
++	range 2 255
++	depends on SMP
++	default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
++	default "8"
++	help
++	  This allows you to specify the maximum number of CPUs which this
++	  kernel will support.  The maximum supported value is 255 and the
++	  minimum value which makes sense is 2.
++
++	  This is purely to save memory - each supported CPU adds
++	  approximately eight kilobytes to the kernel image.
++
++config SCHED_SMT
++	bool "SMT (Hyperthreading) scheduler support"
++	depends on SMP
++	default off
++	help
++	  SMT scheduler support improves the CPU scheduler's decision making
++	  when dealing with Intel Pentium 4 chips with HyperThreading at a
++	  cost of slightly increased overhead in some places. If unsure say
++	  N here.
++
++#config PREEMPT
++#	bool "Preemptible Kernel"
++#	help
++#	  This option reduces the latency of the kernel when reacting to
++#	  real-time or interactive events by allowing a low priority process to
++#	  be preempted even if it is in kernel mode executing a system call.
++#	  This allows applications to run more reliably even when the system is
++#	  under load.
++#
++#	  Say Y here if you are building a kernel for a desktop, embedded
++#	  or real-time system.  Say N if you are unsure.
++
++config PREEMPT_BKL
++	bool "Preempt The Big Kernel Lock"
++	depends on PREEMPT
++	default y
++	help
++	  This option reduces the latency of the kernel by making the
++	  big kernel lock preemptible.
++
++	  Say Y here if you are building a kernel for a desktop system.
++	  Say N if you are unsure.
++
++#config X86_TSC
++#	 bool
++# 	depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1) && !X86_NUMAQ
++#	 default y
++
++#config X86_MCE
++#	 bool "Machine Check Exception"
++#	depends on !X86_VOYAGER
++#	 ---help---
++#	   Machine Check Exception support allows the processor to notify the
++#	   kernel if it detects a problem (e.g. overheating, component failure).
++#	   The action the kernel takes depends on the severity of the problem,
++#	   ranging from a warning message on the console, to halting the machine.
++#	   Your processor must be a Pentium or newer to support this - check the
++#	   flags in /proc/cpuinfo for mce.  Note that some older Pentium systems
++#	   have a design flaw which leads to false MCE events - hence MCE is
++#	   disabled on all P5 processors, unless explicitly enabled with "mce"
++#	   as a boot argument.  Similarly, if MCE is built in and creates a
++#	   problem on some new non-standard machine, you can boot with "nomce"
++#	   to disable it.  MCE support simply ignores non-MCE processors like
++#	   the 386 and 486, so nearly everyone can say Y here.
++
++#config X86_MCE_NONFATAL
++#	tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
++#	 depends on X86_MCE
++#	 help
++#	   Enabling this feature starts a timer that triggers every 5 seconds which
++#	   will look at the machine check registers to see if anything happened.
++#	   Non-fatal problems automatically get corrected (but still logged).
++#	   Disable this if you don't want to see these messages.
++#	   Seeing the messages this option prints out may be indicative of dying hardware,
++#	   or out-of-spec (ie, overclocked) hardware.
++#	   This option only does something on certain CPUs.
++#	   (AMD Athlon/Duron and Intel Pentium 4)
++
++#config X86_MCE_P4THERMAL
++#	 bool "check for P4 thermal throttling interrupt."
++#	 depends on X86_MCE && (X86_UP_APIC || SMP)
++#	 help
++#	   Enabling this feature will cause a message to be printed when the P4
++#	   enters thermal throttling.
++
++config X86_REBOOTFIXUPS
++	bool "Enable X86 board specific fixups for reboot"
++	depends on X86
++	default n
++	---help---
++	  This enables chipset and/or board specific fixups to be done
++	  in order to get reboot to work correctly. This is only needed on
++	  some combinations of hardware and BIOS. The symptom, for which
++	  this config is intended, is when reboot ends with a stalled/hung
++	  system.
++
++	  Currently, the only fixup is for the Geode GX1/CS5530A/TROM2.1.
++	  combination.
++
++	  Say Y if you want to enable the fixup. Currently, it's safe to
++	  enable this option even if you don't need it.
++	  Say N otherwise.
++
++config MICROCODE
++	tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
++	 depends on XEN_PRIVILEGED_GUEST
++	---help---
++	  If you say Y here and also to "/dev file system support" in the
++	  'File systems' section, you will be able to update the microcode on
++	  Intel processors in the IA32 family, e.g. Pentium Pro, Pentium II,
++	  Pentium III, Pentium 4, Xeon etc.  You will obviously need the
++	  actual microcode binary data itself which is not shipped with the
++	  Linux kernel.
++
++	  For latest news and information on obtaining all the required
++	  ingredients for this driver, check:
++	  <http://www.urbanmyth.org/microcode/>.
++
++	  To compile this driver as a module, choose M here: the
++	  module will be called microcode.
++
++#config X86_MSR
++#	 tristate "/dev/cpu/*/msr - Model-specific register support"
++#	 help
++#	   This device gives privileged processes access to the x86
++#	   Model-Specific Registers (MSRs).  It is a character device with
++#	   major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
++#	   MSR accesses are directed to a specific CPU on multi-processor
++#	   systems.
++
++config X86_CPUID
++	tristate "/dev/cpu/*/cpuid - CPU information support"
++	help
++	  This device gives processes access to the x86 CPUID instruction to
++	  be executed on a specific processor.  It is a character device
++	  with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
++	  /dev/cpu/31/cpuid.
++
++config SWIOTLB
++       bool
++       depends on PCI
++       default y
++
++source "drivers/firmware/Kconfig"
++
++choice
++	prompt "High Memory Support"
++	default NOHIGHMEM
++
++config NOHIGHMEM
++	bool "off"
++	---help---
++	  Linux can use up to 64 Gigabytes of physical memory on x86 systems.
++	  However, the address space of 32-bit x86 processors is only 4
++	  Gigabytes large. That means that, if you have a large amount of
++	  physical memory, not all of it can be "permanently mapped" by the
++	  kernel. The physical memory that's not permanently mapped is called
++	  "high memory".
++
++	  If you are compiling a kernel which will never run on a machine with
++	  more than 1 Gigabyte total physical RAM, answer "off" here (default
++	  choice and suitable for most users). This will result in a "3GB/1GB"
++	  split: 3GB are mapped so that each process sees a 3GB virtual memory
++	  space and the remaining part of the 4GB virtual memory space is used
++	  by the kernel to permanently map as much physical memory as
++	  possible.
++
++	  If the machine has between 1 and 4 Gigabytes physical RAM, then
++	  answer "4GB" here.
++
++	  If more than 4 Gigabytes is used then answer "64GB" here. This
++	  selection turns Intel PAE (Physical Address Extension) mode on.
++	  PAE implements 3-level paging on IA32 processors. PAE is fully
++	  supported by Linux, PAE mode is implemented on all recent Intel
++	  processors (Pentium Pro and better). NOTE: If you say "64GB" here,
++	  then the kernel will not boot on CPUs that don't support PAE!
++
++	  The actual amount of total physical memory will either be
++	  auto detected or can be forced by using a kernel command line option
++	  such as "mem=256M". (Try "man bootparam" or see the documentation of
++	  your boot loader (lilo or loadlin) about how to pass options to the
++	  kernel at boot time.)
++
++	  If unsure, say "off".
++
++config HIGHMEM4G
++	bool "4GB"
++	help
++	  Select this if you have a 32-bit processor and between 1 and 4
++	  gigabytes of physical RAM.
++
++config HIGHMEM64G
++	bool "64GB"
++	help
++	  Select this if you have a 32-bit processor and more than 4
++	  gigabytes of physical RAM.
++
++endchoice
++
++config HIGHMEM
++	bool
++	depends on HIGHMEM64G || HIGHMEM4G
++	default y
++
++config X86_PAE
++	bool
++	depends on HIGHMEM64G
++	default y
++
++# Common NUMA Features
++config NUMA
++	bool "Numa Memory Allocation and Scheduler Support"
++	depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
++	default n if X86_PC
++	default y if (X86_NUMAQ || X86_SUMMIT)
++
++# Need comments to help the hapless user trying to turn on NUMA support
++comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
++	depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
++
++comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
++	depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
++
++config DISCONTIGMEM
++	bool
++	depends on NUMA
++	default y
++
++config HAVE_ARCH_BOOTMEM_NODE
++	bool
++	depends on NUMA
++	default y
++
++config HAVE_MEMORY_PRESENT
++	bool
++	depends on DISCONTIGMEM
++	default y
++
++config NEED_NODE_MEMMAP_SIZE
++	bool
++	depends on DISCONTIGMEM
++	default y
++
++#config HIGHPTE
++#	bool "Allocate 3rd-level pagetables from highmem"
++#	depends on HIGHMEM4G || HIGHMEM64G
++#	help
++#	  The VM uses one page table entry for each page of physical memory.
++#	  For systems with a lot of RAM, this can be wasteful of precious
++#	  low memory.  Setting this option will put user-space page table
++#	  entries in high memory.
++
++config MTRR
++	bool
++	depends on XEN_PRIVILEGED_GUEST
++	default y
++
++#config MTRR
++#	 bool "MTRR (Memory Type Range Register) support"
++#	 ---help---
++#	   On Intel P6 family processors (Pentium Pro, Pentium II and later)
++#	   the Memory Type Range Registers (MTRRs) may be used to control
++#	   processor access to memory ranges. This is most useful if you have
++#	   a video (VGA) card on a PCI or AGP bus. Enabling write-combining
++#	   allows bus write transfers to be combined into a larger transfer
++#	   before bursting over the PCI/AGP bus. This can increase performance
++#	   of image write operations 2.5 times or more. Saying Y here creates a
++#	   /proc/mtrr file which may be used to manipulate your processor's
++#	   MTRRs. Typically the X server should use this.
++#
++#	   This code has a reasonably generic interface so that similar
++#	   control registers on other processors can be easily supported
++#	   as well:
++#
++#	   The Cyrix 6x86, 6x86MX and M II processors have Address Range
++#	   Registers (ARRs) which provide a similar functionality to MTRRs. For
++#	   these, the ARRs are used to emulate the MTRRs.
++#	   The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
++#	   MTRRs. The Centaur C6 (WinChip) has 8 MCRs, allowing
++#	   write-combining. All of these processors are supported by this code
++#	   and it makes sense to say Y here if you have one of them.
++#
++#	   Saying Y here also fixes a problem with buggy SMP BIOSes which only
++#	   set the MTRRs for the boot CPU and not for the secondary CPUs. This
++#	   can lead to all sorts of problems, so it's good to say Y here.
++#
++#	   You can safely say Y even if your machine doesn't have MTRRs, you'll
++#	   just add about 9 KB to your kernel.
++#
++#	   See <file:Documentation/mtrr.txt> for more information.
++
++config IRQBALANCE
++ 	bool "Enable kernel irq balancing"
++	depends on SMP && X86_IO_APIC && !XEN
++	default y
++	help
++ 	  The default yes will allow the kernel to do irq load balancing.
++	  Saying no will keep the kernel from doing irq load balancing.
++
++config HAVE_DEC_LOCK
++	bool
++	depends on (SMP || PREEMPT) && X86_CMPXCHG
++	default y
++
++# turning this on wastes a bunch of space.
++# Summit needs it only when NUMA is on
++config BOOT_IOREMAP
++	bool
++	depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
++	default y
++
++config REGPARM
++	bool "Use register arguments (EXPERIMENTAL)"
++	depends on EXPERIMENTAL
++	default n
++	help
++	Compile the kernel with -mregparm=3. This uses a different ABI
++	and passes the first three arguments of a function call in registers.
++	This will probably break binary only modules.
++
++	This feature is only enabled for gcc-3.0 and later - earlier compilers
++	generate incorrect output with certain kernel constructs when
++	-mregparm=3 is used.
++
++config X86_LOCAL_APIC
++	bool
++	depends on XEN_PRIVILEGED_GUEST && (X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER))
++	default y
++
++config X86_IO_APIC
++	bool
++	depends on XEN_PRIVILEGED_GUEST && (X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER)))
++	default y
++
++config X86_VISWS_APIC
++	bool
++	depends on X86_VISWS
++  	default y
++
++config HOTPLUG_CPU
++	bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
++	depends on SMP && HOTPLUG && EXPERIMENTAL
++	---help---
++	  Say Y here to experiment with turning CPUs off and on.  CPUs
++	  can be controlled through /sys/devices/system/cpu.
++
++	  Say N.
++
++
++if XEN_PHYSDEV_ACCESS
++
++menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
++
++config X86_UP_APIC
++	bool "Local APIC support on uniprocessors"
++	depends on !SMP && !(X86_VISWS || X86_VOYAGER)
++	help
++	  A local APIC (Advanced Programmable Interrupt Controller) is an
++	  integrated interrupt controller in the CPU. If you have a single-CPU
++	  system which has a processor with a local APIC, you can say Y here to
++	  enable and use it. If you say Y here even though your machine doesn't
++	  have a local APIC, then the kernel will still run with no slowdown at
++	  all. The local APIC supports CPU-generated self-interrupts (timer,
++	  performance counters), and the NMI watchdog which detects hard
++	  lockups.
++
++config X86_UP_IOAPIC
++	bool "IO-APIC support on uniprocessors"
++	depends on X86_UP_APIC
++	help
++	  An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
++	  SMP-capable replacement for PC-style interrupt controllers. Most
++	  SMP systems and many recent uniprocessor systems have one.
++
++	  If you have a single-CPU system with an IO-APIC, you can say Y here
++	  to use it. If you say Y here even though your machine doesn't have
++	  an IO-APIC, then the kernel will still run with no slowdown at all.
++
++config PCI
++	bool "PCI support" if !X86_VISWS
++	depends on !X86_VOYAGER
++	default y if X86_VISWS
++	help
++	  Find out whether you have a PCI motherboard. PCI is the name of a
++	  bus system, i.e. the way the CPU talks to the other stuff inside
++	  your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
++	  VESA. If you have PCI, say Y, otherwise N.
++
++	  The PCI-HOWTO, available from
++	  <http://www.tldp.org/docs.html#howto>, contains valuable
++	  information about which PCI hardware does work under Linux and which
++	  doesn't.
++
++choice
++	prompt "PCI access mode"
++	depends on PCI && !X86_VISWS
++	default PCI_GOANY
++	---help---
++	  On PCI systems, the BIOS can be used to detect the PCI devices and
++	  determine their configuration. However, some old PCI motherboards
++	  have BIOS bugs and may crash if this is done. Also, some embedded
++	  PCI-based systems don't have any BIOS at all. Linux can also try to
++	  detect the PCI hardware directly without using the BIOS.
++
++	  With this option, you can specify how Linux should detect the
++	  PCI devices. If you choose "BIOS", the BIOS will be used,
++	  if you choose "Direct", the BIOS won't be used, and if you
++	  choose "MMConfig", then PCI Express MMCONFIG will be used.
++	  If you choose "Any", the kernel will try MMCONFIG, then the
++	  direct access method and falls back to the BIOS if that doesn't
++	  work. If unsure, go with the default, which is "Any".
++
++#config PCI_GOBIOS
++#	bool "BIOS"
++
++config PCI_GOMMCONFIG
++	bool "MMConfig"
++
++config PCI_GODIRECT
++	bool "Direct"
++
++config PCI_GOANY
++	bool "Any"
++
++endchoice
++
++#config PCI_BIOS
++#	bool
++#	depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
++#	default y
++
++config PCI_DIRECT
++	bool
++ 	depends on PCI && ((PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
++	default y
++
++config PCI_MMCONFIG
++	bool
++	depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
++	select ACPI_BOOT
++	default y
++
++source "drivers/pci/pcie/Kconfig"
++
++source "drivers/pci/Kconfig"
++
++config ISA_DMA_API
++	bool
++	default y
++
++config ISA
++	bool "ISA support"
++	depends on !(X86_VOYAGER || X86_VISWS)
++	help
++	  Find out whether you have ISA slots on your motherboard.  ISA is the
++	  name of a bus system, i.e. the way the CPU talks to the other stuff
++	  inside your box.  Other bus systems are PCI, EISA, MicroChannel
++	  (MCA) or VESA.  ISA is an older system, now being displaced by PCI;
++	  newer boards don't support it.  If you have ISA, say Y, otherwise N.
++
++config EISA
++	bool "EISA support"
++	depends on ISA
++	---help---
++	  The Extended Industry Standard Architecture (EISA) bus was
++	  developed as an open alternative to the IBM MicroChannel bus.
++
++	  The EISA bus provided some of the features of the IBM MicroChannel
++	  bus while maintaining backward compatibility with cards made for
++	  the older ISA bus.  The EISA bus saw limited use between 1988 and
++	  1995 when it was made obsolete by the PCI bus.
++
++	  Say Y here if you are building a kernel for an EISA-based machine.
++
++	  Otherwise, say N.
++
++source "drivers/eisa/Kconfig"
++
++config MCA
++	bool "MCA support" if !(X86_VISWS || X86_VOYAGER)
++	default y if X86_VOYAGER
++	help
++	  MicroChannel Architecture is found in some IBM PS/2 machines and
++	  laptops.  It is a bus system similar to PCI or ISA. See
++	  <file:Documentation/mca.txt> (and especially the web page given
++	  there) before attempting to build an MCA bus kernel.
++
++source "drivers/mca/Kconfig"
++
++config SCx200
++	tristate "NatSemi SCx200 support"
++	depends on !X86_VOYAGER
++	help
++	  This provides basic support for the National Semiconductor SCx200
++	  processor.  Right now this is just a driver for the GPIO pins.
++
++	  If you don't know what to do here, say N.
++
++	  This support is also available as a module.  If compiled as a
++	  module, it will be called scx200.
++
++source "drivers/pcmcia/Kconfig"
++
++source "drivers/pci/hotplug/Kconfig"
++
++endmenu
++
++endif
++
++#
++# Use the generic interrupt handling code in kernel/irq/:
++#
++config GENERIC_HARDIRQS
++	bool
++	default y
++
++config GENERIC_IRQ_PROBE
++	bool
++	default y
++
++config X86_SMP
++	bool
++	depends on SMP && !X86_VOYAGER
++	default y
++
++#config X86_HT
++#	bool
++#	depends on SMP && !(X86_VISWS || X86_VOYAGER)
++#	default y
++
++config X86_BIOS_REBOOT
++	bool
++	depends on !(X86_VISWS || X86_VOYAGER)
++	default y
++
++config X86_TRAMPOLINE
++	bool
++	depends on X86_SMP || (X86_VOYAGER && SMP)
++	default y
++
++config PC
++	bool
++	depends on X86 && !EMBEDDED
++	default y
++
++config SECCOMP
++	bool "Enable seccomp to safely compute untrusted bytecode"
++	depends on PROC_FS
++	default y
++	help
++	  This kernel feature is useful for number crunching applications
++	  that may need to compute untrusted bytecode during their
++	  execution. By using pipes or other transports made available to
++	  the process as file descriptors supporting the read/write
++	  syscalls, it's possible to isolate those applications in
++	  their own address space using seccomp. Once seccomp is
++	  enabled via /proc/<pid>/seccomp, it cannot be disabled
++	  and the task is only allowed to execute a few safe syscalls
++	  defined by each seccomp mode.
++
++	  If unsure, say Y. Only embedded should say N here.
++
++endmenu
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/acpi/boot.c linux-2.6.12-xen/arch/xen/i386/kernel/acpi/boot.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/acpi/boot.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/acpi/boot.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,920 @@
++/*
++ *  boot.c - Architecture-Specific Low-Level ACPI Boot Support
++ *
++ *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh at intel.com>
++ *  Copyright (C) 2001 Jun Nakajima <jun.nakajima at intel.com>
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ *  This program is free software; you can redistribute it and/or modify
++ *  it under the terms of the GNU General Public License as published by
++ *  the Free Software Foundation; either version 2 of the License, or
++ *  (at your option) any later version.
++ *
++ *  This program is distributed in the hope that it will be useful,
++ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *  GNU General Public License for more details.
++ *
++ *  You should have received a copy of the GNU General Public License
++ *  along with this program; if not, write to the Free Software
++ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/init.h>
++#include <linux/config.h>
++#include <linux/acpi.h>
++#include <linux/efi.h>
++#include <linux/irq.h>
++#include <linux/module.h>
++
++#include <asm/pgtable.h>
++#include <asm/io_apic.h>
++#include <asm/apic.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_XEN
++#include <asm/fixmap.h>
++#endif
++
++#ifdef	CONFIG_X86_64
++
++static inline void  acpi_madt_oem_check(char *oem_id, char *oem_table_id) { }
++extern void __init clustered_apic_check(void);
++static inline int ioapic_setup_disabled(void) { return 0; }
++#include <asm/proto.h>
++
++#else	/* X86 */
++
++#ifdef	CONFIG_X86_LOCAL_APIC
++#include <mach_apic.h>
++#include <mach_mpparse.h>
++#endif	/* CONFIG_X86_LOCAL_APIC */
++
++#endif	/* X86 */
++
++#define BAD_MADT_ENTRY(entry, end) (					    \
++		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
++		((acpi_table_entry_header *)entry)->length != sizeof(*entry))
++
++#define PREFIX			"ACPI: "
++
++#ifdef CONFIG_ACPI_PCI
++int acpi_noirq __initdata;	/* skip ACPI IRQ initialization */
++int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
++#else
++int acpi_noirq __initdata = 1;
++int acpi_pci_disabled __initdata = 1;
++#endif
++int acpi_ht __initdata = 1;	/* enable HT */
++
++int acpi_lapic;
++int acpi_ioapic;
++int acpi_strict;
++EXPORT_SYMBOL(acpi_strict);
++
++acpi_interrupt_flags acpi_sci_flags __initdata;
++int acpi_sci_override_gsi __initdata;
++int acpi_skip_timer_override __initdata;
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
++#endif
++
++#ifndef __HAVE_ARCH_CMPXCHG
++#warning ACPI uses CMPXCHG, i486 and later hardware
++#endif
++
++#define MAX_MADT_ENTRIES	256
++u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
++			{ [0 ... MAX_MADT_ENTRIES-1] = 0xff };
++EXPORT_SYMBOL(x86_acpiid_to_apicid);
++
++/* --------------------------------------------------------------------------
++                              Boot-time Configuration
++   -------------------------------------------------------------------------- */
++
++/*
++ * The default interrupt routing model is PIC (8259).  This gets
++ * overriden if IOAPICs are enumerated (below).
++ */
++enum acpi_irq_model_id		acpi_irq_model = ACPI_IRQ_MODEL_PIC;
++
++#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
++
++/* rely on all ACPI tables being in the direct mapping */
++char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
++{
++	if (!phys_addr || !size)
++	return NULL;
++
++	if (phys_addr < (end_pfn_map << PAGE_SHIFT))
++		return __va(phys_addr);
++
++	return NULL;
++}
++
++#else
++
++/*
++ * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
++ * to map the target physical address. The problem is that set_fixmap()
++ * provides a single page, and it is possible that the page is not
++ * sufficient.
++ * By using this area, we can map up to MAX_IO_APICS pages temporarily,
++ * i.e. until the next __va_range() call.
++ *
++ * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
++ * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
++ * count idx down while incrementing the phys address.
++ */
++char *__acpi_map_table(unsigned long phys, unsigned long size)
++{
++	unsigned long base, offset, mapped_size;
++	int idx;
++
++#ifndef CONFIG_XEN
++	if (phys + size < 8*1024*1024) 
++		return __va(phys); 
++#endif
++
++	offset = phys & (PAGE_SIZE - 1);
++	mapped_size = PAGE_SIZE - offset;
++	set_fixmap(FIX_ACPI_END, phys);
++	base = fix_to_virt(FIX_ACPI_END);
++
++	/*
++	 * Most cases can be covered by the below.
++	 */
++	idx = FIX_ACPI_END;
++	while (mapped_size < size) {
++		if (--idx < FIX_ACPI_BEGIN)
++			return NULL;	/* cannot handle this */
++		phys += PAGE_SIZE;
++		set_fixmap(idx, phys);
++		mapped_size += PAGE_SIZE;
++	}
++
++	return ((unsigned char *) base + offset);
++}
++#endif
++
++#ifdef CONFIG_PCI_MMCONFIG
++static int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
++{
++	struct acpi_table_mcfg *mcfg;
++
++	if (!phys_addr || !size)
++		return -EINVAL;
++
++	mcfg = (struct acpi_table_mcfg *) __acpi_map_table(phys_addr, size);
++	if (!mcfg) {
++		printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
++		return -ENODEV;
++	}
++
++	if (mcfg->base_reserved) {
++		printk(KERN_ERR PREFIX "MMCONFIG not in low 4GB of memory\n");
++		return -ENODEV;
++	}
++
++	pci_mmcfg_base_addr = mcfg->base_address;
++
++	return 0;
++}
++#else
++#define	acpi_parse_mcfg NULL
++#endif /* !CONFIG_PCI_MMCONFIG */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static int __init
++acpi_parse_madt (
++	unsigned long		phys_addr,
++	unsigned long		size)
++{
++	struct acpi_table_madt	*madt = NULL;
++
++	if (!phys_addr || !size)
++		return -EINVAL;
++
++	madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size);
++	if (!madt) {
++		printk(KERN_WARNING PREFIX "Unable to map MADT\n");
++		return -ENODEV;
++	}
++
++	if (madt->lapic_address) {
++		acpi_lapic_addr = (u64) madt->lapic_address;
++
++		printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
++			madt->lapic_address);
++	}
++
++	acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
++	
++	return 0;
++}
++
++
++static int __init
++acpi_parse_lapic (
++	acpi_table_entry_header *header, const unsigned long end)
++{
++	struct acpi_table_lapic	*processor = NULL;
++
++	processor = (struct acpi_table_lapic*) header;
++
++	if (BAD_MADT_ENTRY(processor, end))
++		return -EINVAL;
++
++	acpi_table_print_madt_entry(header);
++
++	/* no utility in registering a disabled processor */
++	if (processor->flags.enabled == 0)
++		return 0;
++
++	x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
++
++	mp_register_lapic (
++		processor->id,					   /* APIC ID */
++		processor->flags.enabled);			  /* Enabled? */
++
++	return 0;
++}
++
++static int __init
++acpi_parse_lapic_addr_ovr (
++	acpi_table_entry_header *header, const unsigned long end)
++{
++	struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
++
++	lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header;
++
++	if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
++		return -EINVAL;
++
++	acpi_lapic_addr = lapic_addr_ovr->address;
++
++	return 0;
++}
++
++static int __init
++acpi_parse_lapic_nmi (
++	acpi_table_entry_header *header, const unsigned long end)
++{
++	struct acpi_table_lapic_nmi *lapic_nmi = NULL;
++
++	lapic_nmi = (struct acpi_table_lapic_nmi*) header;
++
++	if (BAD_MADT_ENTRY(lapic_nmi, end))
++		return -EINVAL;
++
++	acpi_table_print_madt_entry(header);
++
++	if (lapic_nmi->lint != 1)
++		printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
++
++	return 0;
++}
++
++
++#endif /*CONFIG_X86_LOCAL_APIC*/
++
++#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
++
++static int __init
++acpi_parse_ioapic (
++	acpi_table_entry_header *header, const unsigned long end)
++{
++	struct acpi_table_ioapic *ioapic = NULL;
++
++	ioapic = (struct acpi_table_ioapic*) header;
++
++	if (BAD_MADT_ENTRY(ioapic, end))
++		return -EINVAL;
++ 
++	acpi_table_print_madt_entry(header);
++
++	mp_register_ioapic (
++		ioapic->id,
++		ioapic->address,
++		ioapic->global_irq_base);
++ 
++	return 0;
++}
++
++/*
++ * Parse Interrupt Source Override for the ACPI SCI
++ */
++static void
++acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
++{
++	if (trigger == 0)	/* compatible SCI trigger is level */
++		trigger = 3;
++
++	if (polarity == 0)	/* compatible SCI polarity is low */
++		polarity = 3;
++
++	/* Command-line over-ride via acpi_sci= */
++	if (acpi_sci_flags.trigger)
++		trigger = acpi_sci_flags.trigger;
++
++	if (acpi_sci_flags.polarity)
++		polarity = acpi_sci_flags.polarity;
++
++	/*
++ 	 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
++	 * If GSI is < 16, this will update its flags,
++	 * else it will create a new mp_irqs[] entry.
++	 */
++	mp_override_legacy_irq(gsi, polarity, trigger, gsi);
++
++	/*
++	 * stash over-ride to indicate we've been here
++	 * and for later update of acpi_fadt
++	 */
++	acpi_sci_override_gsi = gsi;
++	return;
++}
++
++static int __init
++acpi_parse_int_src_ovr (
++	acpi_table_entry_header *header, const unsigned long end)
++{
++	struct acpi_table_int_src_ovr *intsrc = NULL;
++
++	intsrc = (struct acpi_table_int_src_ovr*) header;
++
++	if (BAD_MADT_ENTRY(intsrc, end))
++		return -EINVAL;
++
++	acpi_table_print_madt_entry(header);
++
++	if (intsrc->bus_irq == acpi_fadt.sci_int) {
++		acpi_sci_ioapic_setup(intsrc->global_irq,
++			intsrc->flags.polarity, intsrc->flags.trigger);
++		return 0;
++	}
++
++	if (acpi_skip_timer_override &&
++		intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
++			printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
++			return 0;
++	}
++
++	mp_override_legacy_irq (
++		intsrc->bus_irq,
++		intsrc->flags.polarity,
++		intsrc->flags.trigger,
++		intsrc->global_irq);
++
++	return 0;
++}
++
++
++static int __init
++acpi_parse_nmi_src (
++	acpi_table_entry_header *header, const unsigned long end)
++{
++	struct acpi_table_nmi_src *nmi_src = NULL;
++
++	nmi_src = (struct acpi_table_nmi_src*) header;
++
++	if (BAD_MADT_ENTRY(nmi_src, end))
++		return -EINVAL;
++
++	acpi_table_print_madt_entry(header);
++
++	/* TBD: Support nimsrc entries? */
++
++	return 0;
++}
++
++#endif /* CONFIG_X86_IO_APIC */
++
++#ifdef	CONFIG_ACPI_BUS
++
++/*
++ * acpi_pic_sci_set_trigger()
++ * 
++ * use ELCR to set PIC-mode trigger type for SCI
++ *
++ * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
++ * it may require Edge Trigger -- use "acpi_sci=edge"
++ *
++ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
++ * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
++ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
++ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
++ */
++
++void __init
++acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
++{
++	unsigned int mask = 1 << irq;
++	unsigned int old, new;
++
++	/* Real old ELCR mask */
++	old = inb(0x4d0) | (inb(0x4d1) << 8);
++
++	/*
++	 * If we use ACPI to set PCI irq's, then we should clear ELCR
++	 * since we will set it correctly as we enable the PCI irq
++	 * routing.
++	 */
++	new = acpi_noirq ? old : 0;
++
++	/*
++	 * Update SCI information in the ELCR, it isn't in the PCI
++	 * routing tables..
++	 */
++	switch (trigger) {
++	case 1:	/* Edge - clear */
++		new &= ~mask;
++		break;
++	case 3: /* Level - set */
++		new |= mask;
++		break;
++	}
++
++	if (old == new)
++		return;
++
++	printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
++	outb(new, 0x4d0);
++	outb(new >> 8, 0x4d1);
++}
++
++
++#endif /* CONFIG_ACPI_BUS */
++
++int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
++{
++#ifdef CONFIG_X86_IO_APIC
++	if (use_pci_vector() && !platform_legacy_irq(gsi))
++ 		*irq = IO_APIC_VECTOR(gsi);
++	else
++#endif
++		*irq = gsi;
++	return 0;
++}
++
++unsigned int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
++{
++	unsigned int irq;
++	unsigned int plat_gsi = gsi;
++
++#ifdef CONFIG_PCI
++	/*
++	 * Make sure all (legacy) PCI IRQs are set as level-triggered.
++	 */
++	if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
++		extern void eisa_set_level_irq(unsigned int irq);
++
++		if (edge_level == ACPI_LEVEL_SENSITIVE)
++				eisa_set_level_irq(gsi);
++	}
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++	if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
++		plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low);
++	}
++#endif
++	acpi_gsi_to_irq(plat_gsi, &irq);
++	return irq;
++}
++EXPORT_SYMBOL(acpi_register_gsi);
++
++/*
++ *  ACPI based hotplug support for CPU
++ */
++#ifdef CONFIG_ACPI_HOTPLUG_CPU
++int
++acpi_map_lsapic(acpi_handle handle, int *pcpu)
++{
++	/* TBD */
++	return -EINVAL;
++}
++EXPORT_SYMBOL(acpi_map_lsapic);
++
++
++int
++acpi_unmap_lsapic(int cpu)
++{
++	/* TBD */
++	return -EINVAL;
++}
++EXPORT_SYMBOL(acpi_unmap_lsapic);
++#endif /* CONFIG_ACPI_HOTPLUG_CPU */
++
++static unsigned long __init
++acpi_scan_rsdp (
++	unsigned long		start,
++	unsigned long		length)
++{
++	unsigned long		offset = 0;
++	unsigned long		sig_len = sizeof("RSD PTR ") - 1;
++	unsigned long		vstart = (unsigned long)isa_bus_to_virt(start);
++
++	/*
++	 * Scan all 16-byte boundaries of the physical memory region for the
++	 * RSDP signature.
++	 */
++	for (offset = 0; offset < length; offset += 16) {
++		if (strncmp((char *) (vstart + offset), "RSD PTR ", sig_len))
++			continue;
++		return (start + offset);
++	}
++
++	return 0;
++}
++
++static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
++{
++	struct acpi_table_sbf *sb;
++
++	if (!phys_addr || !size)
++	return -EINVAL;
++
++	sb = (struct acpi_table_sbf *) __acpi_map_table(phys_addr, size);
++	if (!sb) {
++		printk(KERN_WARNING PREFIX "Unable to map SBF\n");
++		return -ENODEV;
++	}
++
++	sbf_port = sb->sbf_cmos; /* Save CMOS port */
++
++	return 0;
++}
++
++
++#ifdef CONFIG_HPET_TIMER
++
++static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
++{
++	struct acpi_table_hpet *hpet_tbl;
++
++	if (!phys || !size)
++		return -EINVAL;
++
++	hpet_tbl = (struct acpi_table_hpet *) __acpi_map_table(phys, size);
++	if (!hpet_tbl) {
++		printk(KERN_WARNING PREFIX "Unable to map HPET\n");
++		return -ENODEV;
++	}
++
++	if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
++		printk(KERN_WARNING PREFIX "HPET timers must be located in "
++		       "memory.\n");
++		return -1;
++	}
++
++#ifdef	CONFIG_X86_64
++        vxtime.hpet_address = hpet_tbl->addr.addrl |
++                ((long) hpet_tbl->addr.addrh << 32);
++
++        printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
++               hpet_tbl->id, vxtime.hpet_address);
++#else	/* X86 */
++	{
++		extern unsigned long hpet_address;
++
++		hpet_address = hpet_tbl->addr.addrl;
++		printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
++			hpet_tbl->id, hpet_address);
++	}
++#endif	/* X86 */
++
++	return 0;
++}
++#else
++#define	acpi_parse_hpet	NULL
++#endif
++
++#ifdef CONFIG_X86_PM_TIMER
++extern u32 pmtmr_ioport;
++#endif
++
++static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
++{
++	struct fadt_descriptor_rev2 *fadt = NULL;
++
++	fadt = (struct fadt_descriptor_rev2*) __acpi_map_table(phys,size);
++	if(!fadt) {
++		printk(KERN_WARNING PREFIX "Unable to map FADT\n");
++		return 0;
++	}
++
++#ifdef	CONFIG_ACPI_INTERPRETER
++	/* initialize sci_int early for INT_SRC_OVR MADT parsing */
++	acpi_fadt.sci_int = fadt->sci_int;
++#endif
++
++#ifdef CONFIG_ACPI_BUS
++	/* initialize rev and apic_phys_dest_mode for x86_64 genapic */
++	acpi_fadt.revision = fadt->revision;
++	acpi_fadt.force_apic_physical_destination_mode = fadt->force_apic_physical_destination_mode;
++#endif
++
++#if defined(CONFIG_X86_PM_TIMER) && !defined(CONFIG_XEN)
++	/* detect the location of the ACPI PM Timer */
++	if (fadt->revision >= FADT2_REVISION_ID) {
++		/* FADT rev. 2 */
++		if (fadt->xpm_tmr_blk.address_space_id != ACPI_ADR_SPACE_SYSTEM_IO)
++			return 0;
++
++		pmtmr_ioport = fadt->xpm_tmr_blk.address;
++	} else {
++		/* FADT rev. 1 */
++		pmtmr_ioport = fadt->V1_pm_tmr_blk;
++	}
++	if (pmtmr_ioport)
++		printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", pmtmr_ioport);
++#endif
++	return 0;
++}
++
++
++unsigned long __init
++acpi_find_rsdp (void)
++{
++	unsigned long		rsdp_phys = 0;
++
++	if (efi_enabled) {
++		if (efi.acpi20)
++			return __pa(efi.acpi20);
++		else if (efi.acpi)
++			return __pa(efi.acpi);
++	}
++	/*
++	 * Scan memory looking for the RSDP signature. First search EBDA (low
++	 * memory) paragraphs and then search upper memory (E0000-FFFFF).
++	 */
++	rsdp_phys = acpi_scan_rsdp (0, 0x400);
++	if (!rsdp_phys)
++		rsdp_phys = acpi_scan_rsdp (0xE0000, 0x20000);
++
++	return rsdp_phys;
++}
++
++#ifdef	CONFIG_X86_LOCAL_APIC
++/*
++ * Parse LAPIC entries in MADT
++ * returns 0 on success, < 0 on error
++ */
++static int __init
++acpi_parse_madt_lapic_entries(void)
++{
++	int count;
++
++	/* 
++	 * Note that the LAPIC address is obtained from the MADT (32-bit value)
++	 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
++	 */
++
++	count = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0);
++	if (count < 0) {
++		printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
++		return count;
++	}
++
++	mp_register_lapic_address(acpi_lapic_addr);
++
++	count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
++				       MAX_APICS);
++	if (!count) { 
++		printk(KERN_ERR PREFIX "No LAPIC entries present\n");
++		/* TBD: Cleanup to allow fallback to MPS */
++		return -ENODEV;
++	}
++	else if (count < 0) {
++		printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
++		/* TBD: Cleanup to allow fallback to MPS */
++		return count;
++	}
++
++	count = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
++	if (count < 0) {
++		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
++		/* TBD: Cleanup to allow fallback to MPS */
++		return count;
++	}
++	return 0;
++}
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
++/*
++ * Parse IOAPIC related entries in MADT
++ * returns 0 on success, < 0 on error
++ */
++static int __init
++acpi_parse_madt_ioapic_entries(void)
++{
++	int count;
++
++	/*
++	 * ACPI interpreter is required to complete interrupt setup,
++	 * so if it is off, don't enumerate the io-apics with ACPI.
++	 * If MPS is present, it will handle them,
++	 * otherwise the system will stay in PIC mode
++	 */
++	if (acpi_disabled || acpi_noirq) {
++		return -ENODEV;
++        }
++
++	/*
++ 	 * if "noapic" boot option, don't look for IO-APICs
++	 */
++	if (skip_ioapic_setup) {
++		printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
++			"due to 'noapic' option.\n");
++		return -ENODEV;
++	}
++
++	count = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, MAX_IO_APICS);
++	if (!count) {
++		printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
++		return -ENODEV;
++	}
++	else if (count < 0) {
++		printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
++		return count;
++	}
++
++	count = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, NR_IRQ_VECTORS);
++	if (count < 0) {
++		printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
++		/* TBD: Cleanup to allow fallback to MPS */
++		return count;
++	}
++
++	/*
++	 * If BIOS did not supply an INT_SRC_OVR for the SCI
++	 * pretend we got one so we can set the SCI flags.
++	 */
++	if (!acpi_sci_override_gsi)
++		acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
++
++	/* Fill in identity legacy mapings where no override */
++	mp_config_acpi_legacy_irqs();
++
++	count = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, NR_IRQ_VECTORS);
++	if (count < 0) {
++		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
++		/* TBD: Cleanup to allow fallback to MPS */
++		return count;
++	}
++
++	return 0;
++}
++#else
++static inline int acpi_parse_madt_ioapic_entries(void)
++{
++	return -1;
++}
++#endif /* !(CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER) */
++
++
++static void __init
++acpi_process_madt(void)
++{
++#ifdef CONFIG_X86_LOCAL_APIC
++	int count, error;
++
++	count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
++	if (count >= 1) {
++
++		/*
++		 * Parse MADT LAPIC entries
++		 */
++		error = acpi_parse_madt_lapic_entries();
++		if (!error) {
++			acpi_lapic = 1;
++
++			/*
++			 * Parse MADT IO-APIC entries
++			 */
++			error = acpi_parse_madt_ioapic_entries();
++			if (!error) {
++				acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
++				acpi_irq_balance_set(NULL);
++				acpi_ioapic = 1;
++
++				smp_found_config = 1;
++				clustered_apic_check();
++			}
++		}
++		if (error == -EINVAL) {
++			/*
++			 * Dell Precision Workstation 410, 610 come here.
++			 */
++			printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n");
++			disable_acpi();
++		}
++	}
++#endif
++	return;
++}
++
++/*
++ * acpi_boot_table_init() and acpi_boot_init()
++ *  called from setup_arch(), always.
++ *	1. checksums all tables
++ *	2. enumerates lapics
++ *	3. enumerates io-apics
++ *
++ * acpi_table_init() is separate to allow reading SRAT without
++ * other side effects.
++ *
++ * side effects of acpi_boot_init:
++ *	acpi_lapic = 1 if LAPIC found
++ *	acpi_ioapic = 1 if IOAPIC found
++ *	if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
++ *	if acpi_blacklisted() acpi_disabled = 1;
++ *	acpi_irq_model=...
++ *	...
++ *
++ * return value: (currently ignored)
++ *	0: success
++ *	!0: failure
++ */
++
++int __init
++acpi_boot_table_init(void)
++{
++	int error;
++
++	/*
++	 * If acpi_disabled, bail out
++	 * One exception: acpi=ht continues far enough to enumerate LAPICs
++	 */
++	if (acpi_disabled && !acpi_ht)
++		 return 1;
++
++	/* 
++	 * Initialize the ACPI boot-time table parser.
++	 */
++	error = acpi_table_init();
++	if (error) {
++		disable_acpi();
++		return error;
++	}
++
++#ifdef __i386__
++	check_acpi_pci();
++#endif
++
++	acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++
++	/*
++	 * blacklist may disable ACPI entirely
++	 */
++	error = acpi_blacklisted();
++	if (error) {
++		extern int acpi_force;
++
++		if (acpi_force) {
++			printk(KERN_WARNING PREFIX "acpi=force override\n");
++		} else {
++			printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
++			disable_acpi();
++			return error;
++		}
++	}
++
++	return 0;
++}
++
++
++int __init acpi_boot_init(void)
++{
++	/*
++	 * If acpi_disabled, bail out
++	 * One exception: acpi=ht continues far enough to enumerate LAPICs
++	 */
++	if (acpi_disabled && !acpi_ht)
++		 return 1;
++
++	acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
++
++	/*
++	 * set sci_int and PM timer address
++	 */
++	acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
++
++	/*
++	 * Process the Multiple APIC Description Table (MADT), if present
++	 */
++	acpi_process_madt();
++
++	acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
++	acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
++
++	return 0;
++}
++
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/acpi/Makefile linux-2.6.12-xen/arch/xen/i386/kernel/acpi/Makefile
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/acpi/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/acpi/Makefile	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,13 @@
++obj-$(CONFIG_ACPI_BOOT)		        := boot.o
++c-obj-$(CONFIG_X86_IO_APIC)	        += earlyquirk.o
++c-obj-$(CONFIG_ACPI_SLEEP)	        += sleep.o wakeup.o
++
++c-link                                  :=
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
++	@ln -fsn $(srctree)/arch/i386/kernel/acpi/$(notdir $@) $@
++
++obj-y	+= $(c-obj-y) $(s-obj-y)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
++clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/apic.c linux-2.6.12-xen/arch/xen/i386/kernel/apic.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/apic.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/apic.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,83 @@
++/*
++ *	Local APIC handling, local APIC timers
++ *
++ *	(c) 1999, 2000 Ingo Molnar <mingo at redhat.com>
++ *
++ *	Fixes
++ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
++ *					thanks to Eric Gilmore
++ *					and Rolf G. Tews
++ *					for testing these extensively.
++ *	Maciej W. Rozycki	:	Various updates and fixes.
++ *	Mikael Pettersson	:	Power Management for UP-APIC.
++ *	Pavel Machek and
++ *	Mikael Pettersson	:	PM converted to driver model.
++ */
++
++#include <linux/config.h>
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++
++#include <mach_apic.h>
++
++#include "io_ports.h"
++
++/*
++ * Debug level
++ */
++int apic_verbosity;
++
++int get_physical_broadcast(void)
++{
++        return 0xff;
++}
++
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++	printk("unexpected IRQ trap at vector %02x\n", irq);
++	/*
++	 * Currently unexpected vectors happen only on SMP and APIC.
++	 * We _must_ ack these because every local APIC has only N
++	 * irq slots per priority level, and a 'hanging, unacked' IRQ
++	 * holds up an irq slot - in excessive cases (when multiple
++	 * unexpected vectors occur) that might lock up the APIC
++	 * completely.
++	 */
++	ack_APIC_irq();
++}
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++	if (smp_found_config)
++		if (!skip_ioapic_setup && nr_ioapics)
++			setup_IO_APIC();
++#endif
++
++	return 0;
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/common.c linux-2.6.12-xen/arch/xen/i386/kernel/cpu/common.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/common.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/cpu/common.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,653 @@
++#include <linux/init.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <asm/semaphore.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/msr.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++#ifdef CONFIG_X86_LOCAL_APIC
++#include <asm/mpspec.h>
++#include <asm/apic.h>
++#include <mach_apic.h>
++#endif
++#include <asm/hypervisor.h>
++
++#include "cpu.h"
++
++#ifndef CONFIG_XEN
++DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
++EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
++
++DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
++EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
++#endif
++
++static int cachesize_override __initdata = -1;
++static int disable_x86_fxsr __initdata = 0;
++static int disable_x86_serial_nr __initdata = 1;
++
++struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
++
++extern void mcheck_init(struct cpuinfo_x86 *c);
++
++extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
++
++extern int disable_pse;
++
++static void default_init(struct cpuinfo_x86 * c)
++{
++	/* Not much we can do here... */
++	/* Check if at least it has cpuid */
++	if (c->cpuid_level == -1) {
++		/* No cpuid. It must be an ancient CPU */
++		if (c->x86 == 4)
++			strcpy(c->x86_model_id, "486");
++		else if (c->x86 == 3)
++			strcpy(c->x86_model_id, "386");
++	}
++}
++
++static struct cpu_dev default_cpu = {
++	.c_init	= default_init,
++};
++static struct cpu_dev * this_cpu = &default_cpu;
++
++static int __init cachesize_setup(char *str)
++{
++	get_option (&str, &cachesize_override);
++	return 1;
++}
++__setup("cachesize=", cachesize_setup);
++
++int __init get_model_name(struct cpuinfo_x86 *c)
++{
++	unsigned int *v;
++	char *p, *q;
++
++	if (cpuid_eax(0x80000000) < 0x80000004)
++		return 0;
++
++	v = (unsigned int *) c->x86_model_id;
++	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++	c->x86_model_id[48] = 0;
++
++	/* Intel chips right-justify this string for some dumb reason;
++	   undo that brain damage */
++	p = q = &c->x86_model_id[0];
++	while ( *p == ' ' )
++	     p++;
++	if ( p != q ) {
++	     while ( *p )
++		  *q++ = *p++;
++	     while ( q <= &c->x86_model_id[48] )
++		  *q++ = '\0';	/* Zero-pad the rest */
++	}
++
++	return 1;
++}
++
++
++void __init display_cacheinfo(struct cpuinfo_x86 *c)
++{
++	unsigned int n, dummy, ecx, edx, l2size;
++
++	n = cpuid_eax(0x80000000);
++
++	if (n >= 0x80000005) {
++		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
++		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++		c->x86_cache_size=(ecx>>24)+(edx>>24);	
++	}
++
++	if (n < 0x80000006)	/* Some chips just has a large L1. */
++		return;
++
++	ecx = cpuid_ecx(0x80000006);
++	l2size = ecx >> 16;
++	
++	/* do processor-specific cache resizing */
++	if (this_cpu->c_size_cache)
++		l2size = this_cpu->c_size_cache(c,l2size);
++
++	/* Allow user to override all this if necessary. */
++	if (cachesize_override != -1)
++		l2size = cachesize_override;
++
++	if ( l2size == 0 )
++		return;		/* Again, no L2 cache is possible */
++
++	c->x86_cache_size = l2size;
++
++	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++	       l2size, ecx & 0xFF);
++}
++
++/* Naming convention should be: <Name> [(<Codename>)] */
++/* This table only is used unless init_<vendor>() below doesn't set it; */
++/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
++
++/* Look up CPU names by table lookup. */
++static char __init *table_lookup_model(struct cpuinfo_x86 *c)
++{
++	struct cpu_model_info *info;
++
++	if ( c->x86_model >= 16 )
++		return NULL;	/* Range check */
++
++	if (!this_cpu)
++		return NULL;
++
++	info = this_cpu->c_models;
++
++	while (info && info->family) {
++		if (info->family == c->x86)
++			return info->model_names[c->x86_model];
++		info++;
++	}
++	return NULL;		/* Not found */
++}
++
++
++void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
++{
++	char *v = c->x86_vendor_id;
++	int i;
++
++	for (i = 0; i < X86_VENDOR_NUM; i++) {
++		if (cpu_devs[i]) {
++			if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
++			    (cpu_devs[i]->c_ident[1] && 
++			     !strcmp(v,cpu_devs[i]->c_ident[1]))) {
++				c->x86_vendor = i;
++				if (!early)
++					this_cpu = cpu_devs[i];
++				break;
++			}
++		}
++	}
++}
++
++
++static int __init x86_fxsr_setup(char * s)
++{
++	disable_x86_fxsr = 1;
++	return 1;
++}
++__setup("nofxsr", x86_fxsr_setup);
++
++
++/* Standard macro to see if a specific flag is changeable */
++static inline int flag_is_changeable_p(u32 flag)
++{
++	u32 f1, f2;
++
++	asm("pushfl\n\t"
++	    "pushfl\n\t"
++	    "popl %0\n\t"
++	    "movl %0,%1\n\t"
++	    "xorl %2,%0\n\t"
++	    "pushl %0\n\t"
++	    "popfl\n\t"
++	    "pushfl\n\t"
++	    "popl %0\n\t"
++	    "popfl\n\t"
++	    : "=&r" (f1), "=&r" (f2)
++	    : "ir" (flag));
++
++	return ((f1^f2) & flag) != 0;
++}
++
++
++/* Probe for the CPUID instruction */
++static int __init have_cpuid_p(void)
++{
++	return flag_is_changeable_p(X86_EFLAGS_ID);
++}
++
++/* Do minimum CPU detection early.
++   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
++   The others are not touched to avoid unwanted side effects. */
++static void __init early_cpu_detect(void)
++{
++	struct cpuinfo_x86 *c = &boot_cpu_data;
++
++	c->x86_cache_alignment = 32;
++
++	if (!have_cpuid_p())
++		return;
++
++	/* Get vendor name */
++	cpuid(0x00000000, &c->cpuid_level,
++	      (int *)&c->x86_vendor_id[0],
++	      (int *)&c->x86_vendor_id[8],
++	      (int *)&c->x86_vendor_id[4]);
++
++	get_cpu_vendor(c, 1);
++
++	c->x86 = 4;
++	if (c->cpuid_level >= 0x00000001) {
++		u32 junk, tfms, cap0, misc;
++		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
++		c->x86 = (tfms >> 8) & 15;
++		c->x86_model = (tfms >> 4) & 15;
++		if (c->x86 == 0xf) {
++			c->x86 += (tfms >> 20) & 0xff;
++			c->x86_model += ((tfms >> 16) & 0xF) << 4;
++		}
++		c->x86_mask = tfms & 15;
++		if (cap0 & (1<<19))
++			c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
++	}
++
++	early_intel_workaround(c);
++
++#ifdef CONFIG_X86_HT
++	phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
++}
++
++void __init generic_identify(struct cpuinfo_x86 * c)
++{
++	u32 tfms, xlvl;
++	int junk;
++
++	if (have_cpuid_p()) {
++		/* Get vendor name */
++		cpuid(0x00000000, &c->cpuid_level,
++		      (int *)&c->x86_vendor_id[0],
++		      (int *)&c->x86_vendor_id[8],
++		      (int *)&c->x86_vendor_id[4]);
++		
++		get_cpu_vendor(c, 0);
++		/* Initialize the standard set of capabilities */
++		/* Note that the vendor-specific code below might override */
++	
++		/* Intel-defined flags: level 0x00000001 */
++		if ( c->cpuid_level >= 0x00000001 ) {
++			u32 capability, excap;
++			cpuid(0x00000001, &tfms, &junk, &excap, &capability);
++			c->x86_capability[0] = capability;
++			c->x86_capability[4] = excap;
++			c->x86 = (tfms >> 8) & 15;
++			c->x86_model = (tfms >> 4) & 15;
++			if (c->x86 == 0xf) {
++				c->x86 += (tfms >> 20) & 0xff;
++				c->x86_model += ((tfms >> 16) & 0xF) << 4;
++			} 
++			c->x86_mask = tfms & 15;
++		} else {
++			/* Have CPUID level 0 only - unheard of */
++			c->x86 = 4;
++		}
++
++		/* AMD-defined flags: level 0x80000001 */
++		xlvl = cpuid_eax(0x80000000);
++		if ( (xlvl & 0xffff0000) == 0x80000000 ) {
++			if ( xlvl >= 0x80000001 ) {
++				c->x86_capability[1] = cpuid_edx(0x80000001);
++				c->x86_capability[6] = cpuid_ecx(0x80000001);
++			}
++			if ( xlvl >= 0x80000004 )
++				get_model_name(c); /* Default name */
++		}
++	}
++}
++
++static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
++{
++	if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
++		/* Disable processor serial number */
++		unsigned long lo,hi;
++		rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++		lo |= 0x200000;
++		wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++		printk(KERN_NOTICE "CPU serial number disabled.\n");
++		clear_bit(X86_FEATURE_PN, c->x86_capability);
++
++		/* Disabling the serial number may affect the cpuid level */
++		c->cpuid_level = cpuid_eax(0);
++	}
++}
++
++static int __init x86_serial_nr_setup(char *s)
++{
++	disable_x86_serial_nr = 0;
++	return 1;
++}
++__setup("serialnumber", x86_serial_nr_setup);
++
++
++
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __init identify_cpu(struct cpuinfo_x86 *c)
++{
++	int i;
++
++	c->loops_per_jiffy = loops_per_jiffy;
++	c->x86_cache_size = -1;
++	c->x86_vendor = X86_VENDOR_UNKNOWN;
++	c->cpuid_level = -1;	/* CPUID not detected */
++	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
++	c->x86_vendor_id[0] = '\0'; /* Unset */
++	c->x86_model_id[0] = '\0';  /* Unset */
++	c->x86_num_cores = 1;
++	memset(&c->x86_capability, 0, sizeof c->x86_capability);
++
++	if (!have_cpuid_p()) {
++		/* First of all, decide if this is a 486 or higher */
++		/* It's a 486 if we can modify the AC flag */
++		if ( flag_is_changeable_p(X86_EFLAGS_AC) )
++			c->x86 = 4;
++		else
++			c->x86 = 3;
++	}
++
++	generic_identify(c);
++
++	printk(KERN_DEBUG "CPU: After generic identify, caps:");
++	for (i = 0; i < NCAPINTS; i++)
++		printk(" %08lx", c->x86_capability[i]);
++	printk("\n");
++
++	if (this_cpu->c_identify) {
++		this_cpu->c_identify(c);
++
++		printk(KERN_DEBUG "CPU: After vendor identify, caps:");
++		for (i = 0; i < NCAPINTS; i++)
++			printk(" %08lx", c->x86_capability[i]);
++		printk("\n");
++	}
++
++	/*
++	 * Vendor-specific initialization.  In this section we
++	 * canonicalize the feature flags, meaning if there are
++	 * features a certain CPU supports which CPUID doesn't
++	 * tell us, CPUID claiming incorrect flags, or other bugs,
++	 * we handle them here.
++	 *
++	 * At the end of this section, c->x86_capability better
++	 * indicate the features this CPU genuinely supports!
++	 */
++	if (this_cpu->c_init)
++		this_cpu->c_init(c);
++
++	/* Disable the PN if appropriate */
++	squash_the_stupid_serial_number(c);
++
++	/*
++	 * The vendor-specific functions might have changed features.  Now
++	 * we do "generic changes."
++	 */
++
++	/* TSC disabled? */
++	if ( tsc_disable )
++		clear_bit(X86_FEATURE_TSC, c->x86_capability);
++
++	/* FXSR disabled? */
++	if (disable_x86_fxsr) {
++		clear_bit(X86_FEATURE_FXSR, c->x86_capability);
++		clear_bit(X86_FEATURE_XMM, c->x86_capability);
++	}
++
++	if (disable_pse)
++		clear_bit(X86_FEATURE_PSE, c->x86_capability);
++
++	/* If the model name is still unset, do table lookup. */
++	if ( !c->x86_model_id[0] ) {
++		char *p;
++		p = table_lookup_model(c);
++		if ( p )
++			strcpy(c->x86_model_id, p);
++		else
++			/* Last resort... */
++			sprintf(c->x86_model_id, "%02x/%02x",
++				c->x86_vendor, c->x86_model);
++	}
++
++	machine_specific_modify_cpu_capabilities(c);
++
++	/* Now the feature flags better reflect actual CPU features! */
++
++	printk(KERN_DEBUG "CPU: After all inits, caps:");
++	for (i = 0; i < NCAPINTS; i++)
++		printk(" %08lx", c->x86_capability[i]);
++	printk("\n");
++
++	/*
++	 * On SMP, boot_cpu_data holds the common feature set between
++	 * all CPUs; so make sure that we indicate which features are
++	 * common between the CPUs.  The first time this routine gets
++	 * executed, c == &boot_cpu_data.
++	 */
++	if ( c != &boot_cpu_data ) {
++		/* AND the already accumulated flags with these */
++		for ( i = 0 ; i < NCAPINTS ; i++ )
++			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++	}
++
++	/* Init Machine Check Exception if available. */
++#ifdef CONFIG_X86_MCE
++	mcheck_init(c);
++#endif
++}
++
++#ifdef CONFIG_X86_HT
++void __init detect_ht(struct cpuinfo_x86 *c)
++{
++	u32 	eax, ebx, ecx, edx;
++	int 	index_msb, tmp;
++	int 	cpu = smp_processor_id();
++
++	if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
++		return;
++
++	cpuid(1, &eax, &ebx, &ecx, &edx);
++	smp_num_siblings = (ebx & 0xff0000) >> 16;
++
++	if (smp_num_siblings == 1) {
++		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
++	} else if (smp_num_siblings > 1 ) {
++		index_msb = 31;
++
++		if (smp_num_siblings > NR_CPUS) {
++			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
++			smp_num_siblings = 1;
++			return;
++		}
++		tmp = smp_num_siblings;
++		while ((tmp & 0x80000000 ) == 0) {
++			tmp <<=1 ;
++			index_msb--;
++		}
++		if (smp_num_siblings & (smp_num_siblings - 1))
++			index_msb++;
++		phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
++
++		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
++		       phys_proc_id[cpu]);
++
++		smp_num_siblings = smp_num_siblings / c->x86_num_cores;
++
++		tmp = smp_num_siblings;
++		index_msb = 31;
++		while ((tmp & 0x80000000) == 0) {
++			tmp <<=1 ;
++			index_msb--;
++		}
++
++		if (smp_num_siblings & (smp_num_siblings - 1))
++			index_msb++;
++
++		cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
++
++		if (c->x86_num_cores > 1)
++			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
++			       cpu_core_id[cpu]);
++	}
++}
++#endif
++
++void __init print_cpu_info(struct cpuinfo_x86 *c)
++{
++	char *vendor = NULL;
++
++	if (c->x86_vendor < X86_VENDOR_NUM)
++		vendor = this_cpu->c_vendor;
++	else if (c->cpuid_level >= 0)
++		vendor = c->x86_vendor_id;
++
++	if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
++		printk("%s ", vendor);
++
++	if (!c->x86_model_id[0])
++		printk("%d86", c->x86);
++	else
++		printk("%s", c->x86_model_id);
++
++	if (c->x86_mask || c->cpuid_level >= 0) 
++		printk(" stepping %02x\n", c->x86_mask);
++	else
++		printk("\n");
++}
++
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++
++/* This is hacky. :)
++ * We're emulating future behavior.
++ * In the future, the cpu-specific init functions will be called implicitly
++ * via the magic of initcalls.
++ * They will insert themselves into the cpu_devs structure.
++ * Then, when cpu_init() is called, we can just iterate over that array.
++ */
++
++extern int intel_cpu_init(void);
++extern int cyrix_init_cpu(void);
++extern int nsc_init_cpu(void);
++extern int amd_init_cpu(void);
++extern int centaur_init_cpu(void);
++extern int transmeta_init_cpu(void);
++extern int rise_init_cpu(void);
++extern int nexgen_init_cpu(void);
++extern int umc_init_cpu(void);
++
++void __init early_cpu_init(void)
++{
++	intel_cpu_init();
++	cyrix_init_cpu();
++	nsc_init_cpu();
++	amd_init_cpu();
++	centaur_init_cpu();
++	transmeta_init_cpu();
++	rise_init_cpu();
++	nexgen_init_cpu();
++	umc_init_cpu();
++	early_cpu_detect();
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++	/* pse is not compatible with on-the-fly unmapping,
++	 * disable it even if the cpus claim to support it.
++	 */
++	clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++	disable_pse = 1;
++#endif
++}
++
++void __cpuinit cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
++{
++	unsigned long frames[16];
++	unsigned long va;
++	int f;
++
++	for (va = gdt_descr->address, f = 0;
++	     va < gdt_descr->address + gdt_descr->size;
++	     va += PAGE_SIZE, f++) {
++		frames[f] = virt_to_mfn(va);
++		make_lowmem_page_readonly(
++			(void *)va, XENFEAT_writable_descriptor_tables);
++	}
++	if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
++		BUG();
++	lgdt_finish();
++}
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ */
++void __cpuinit cpu_init (void)
++{
++	int cpu = smp_processor_id();
++	struct tss_struct * t = &per_cpu(init_tss, cpu);
++	struct thread_struct *thread = &current->thread;
++
++	if (cpu_test_and_set(cpu, cpu_initialized)) {
++		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
++		for (;;) local_irq_enable();
++	}
++	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
++
++	if (cpu_has_vme || cpu_has_de)
++		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++	if (tsc_disable && cpu_has_tsc) {
++		printk(KERN_NOTICE "Disabling TSC...\n");
++		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
++		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
++		set_in_cr4(X86_CR4_TSD);
++	}
++
++	/*
++	 * Set up the per-thread TLS descriptor cache:
++	 */
++	memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
++	       GDT_ENTRY_TLS_ENTRIES * 8);
++
++	cpu_gdt_init(&cpu_gdt_descr[cpu]);
++
++	/*
++	 * Delete NT
++	 */
++	__asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
++
++	/*
++	 * Set up and load the per-CPU TSS and LDT
++	 */
++	atomic_inc(&init_mm.mm_count);
++	current->active_mm = &init_mm;
++	if (current->mm)
++		BUG();
++	enter_lazy_tlb(&init_mm, current);
++
++	load_esp0(t, thread);
++
++	load_LDT(&init_mm.context);
++
++	/* Clear %fs and %gs. */
++	asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
++
++	/* Clear all 6 debug registers: */
++
++#define CD(register) HYPERVISOR_set_debugreg(register, 0)
++
++	CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
++
++#undef CD
++
++	/*
++	 * Force FPU initialization:
++	 */
++	current_thread_info()->status = 0;
++	clear_used_math();
++	mxcsr_feature_mask_init();
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/Makefile linux-2.6.12-xen/arch/xen/i386/kernel/cpu/Makefile
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/cpu/Makefile	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,31 @@
++#
++# Makefile for x86-compatible CPU details and quirks
++#
++
++CFLAGS	+= -Iarch/i386/kernel/cpu
++
++obj-y	:=	common.o
++c-obj-y	+=	proc.o
++
++c-obj-y	+=	amd.o
++c-obj-y	+=	cyrix.o
++c-obj-y	+=	centaur.o
++c-obj-y	+=	transmeta.o
++c-obj-y	+=	intel.o intel_cacheinfo.o
++c-obj-y	+=	rise.o
++c-obj-y	+=	nexgen.o
++c-obj-y	+=	umc.o
++
++#obj-$(CONFIG_X86_MCE)	+=	../../../../i386/kernel/cpu/mcheck/
++
++obj-$(CONFIG_MTRR)	+= 	mtrr/
++#obj-$(CONFIG_CPU_FREQ)	+=	../../../../i386/kernel/cpu/cpufreq/
++
++c-link	:=
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
++	@ln -fsn $(srctree)/arch/i386/kernel/cpu/$(notdir $@) $@
++
++obj-y	+= $(c-obj-y)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/mtrr/main.c linux-2.6.12-xen/arch/xen/i386/kernel/cpu/mtrr/main.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/mtrr/main.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/cpu/mtrr/main.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,172 @@
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <asm/uaccess.h>
++
++#include <asm/mtrr.h>
++#include "mtrr.h"
++
++void generic_get_mtrr(unsigned int reg, unsigned long *base,
++		      unsigned int *size, mtrr_type * type)
++{
++	dom0_op_t op;
++
++	op.cmd = DOM0_READ_MEMTYPE;
++	op.u.read_memtype.reg = reg;
++	(void)HYPERVISOR_dom0_op(&op);
++
++	*size = op.u.read_memtype.nr_pfns;
++	*base = op.u.read_memtype.pfn;
++	*type = op.u.read_memtype.type;
++}
++
++struct mtrr_ops generic_mtrr_ops = {
++	.use_intel_if      = 1,
++	.get               = generic_get_mtrr,
++};
++
++struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
++unsigned int num_var_ranges;
++unsigned int *usage_table;
++
++static void __init set_num_var_ranges(void)
++{
++	dom0_op_t op;
++
++	for (num_var_ranges = 0; ; num_var_ranges++) {
++		op.cmd = DOM0_READ_MEMTYPE;
++		op.u.read_memtype.reg = num_var_ranges;
++		if (HYPERVISOR_dom0_op(&op) != 0)
++			break;
++	}
++}
++
++static void __init init_table(void)
++{
++	int i, max;
++
++	max = num_var_ranges;
++	if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
++	    == NULL) {
++		printk(KERN_ERR "mtrr: could not allocate\n");
++		return;
++	}
++	for (i = 0; i < max; i++)
++		usage_table[i] = 0;
++}
++
++int mtrr_add_page(unsigned long base, unsigned long size, 
++		  unsigned int type, char increment)
++{
++	int error;
++	dom0_op_t op;
++
++	op.cmd = DOM0_ADD_MEMTYPE;
++	op.u.add_memtype.pfn     = base;
++	op.u.add_memtype.nr_pfns = size;
++	op.u.add_memtype.type    = type;
++	error = HYPERVISOR_dom0_op(&op);
++	if (error) {
++		BUG_ON(error > 0);
++		return error;
++	}
++
++	if (increment)
++		++usage_table[op.u.add_memtype.reg];
++
++	return op.u.add_memtype.reg;
++}
++
++int
++mtrr_add(unsigned long base, unsigned long size, unsigned int type,
++	 char increment)
++{
++	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
++		printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
++		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
++		return -EINVAL;
++	}
++	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
++			     increment);
++}
++
++int mtrr_del_page(int reg, unsigned long base, unsigned long size)
++{
++	int i, max;
++	mtrr_type ltype;
++	unsigned long lbase;
++	unsigned int lsize;
++	int error = -EINVAL;
++	dom0_op_t op;
++
++	max = num_var_ranges;
++	if (reg < 0) {
++		/*  Search for existing MTRR  */
++		for (i = 0; i < max; ++i) {
++			mtrr_if->get(i, &lbase, &lsize, &ltype);
++			if (lbase == base && lsize == size) {
++				reg = i;
++				break;
++			}
++		}
++		if (reg < 0) {
++			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
++			       size);
++			goto out;
++		}
++	}
++	if (usage_table[reg] < 1) {
++		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
++		goto out;
++	}
++	if (--usage_table[reg] < 1) {
++		op.cmd = DOM0_DEL_MEMTYPE;
++		op.u.del_memtype.handle = 0;
++		op.u.del_memtype.reg    = reg;
++		error = HYPERVISOR_dom0_op(&op);
++		if (error) {
++			BUG_ON(error > 0);
++			goto out;
++		}
++	}
++	error = reg;
++ out:
++	return error;
++}
++
++int
++mtrr_del(int reg, unsigned long base, unsigned long size)
++{
++	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
++		printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
++		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
++		return -EINVAL;
++	}
++	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
++}
++
++EXPORT_SYMBOL(mtrr_add);
++EXPORT_SYMBOL(mtrr_del);
++
++static int __init mtrr_init(void)
++{
++	struct cpuinfo_x86 *c = &boot_cpu_data;
++
++	if (!(xen_start_info->flags & SIF_PRIVILEGED))
++		return -ENODEV;
++
++	if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
++	    (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
++	    (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
++	    (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
++		return -ENODEV;
++
++	set_num_var_ranges();
++	init_table();
++
++	return 0;
++}
++
++subsys_initcall(mtrr_init);
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/mtrr/Makefile linux-2.6.12-xen/arch/xen/i386/kernel/cpu/mtrr/Makefile
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/mtrr/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/cpu/mtrr/Makefile	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,16 @@
++obj-y	:= main.o
++c-obj-y	:= if.o
++
++c-link	:=
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)): $(obj)/mtrr.h
++	@ln -fsn $(srctree)/arch/i386/kernel/cpu/mtrr/$(notdir $@) $@
++
++$(patsubst %.o,$(obj)/%.c,$(obj-y)): $(obj)/mtrr.h
++
++$(obj)/mtrr.h:
++	@ln -fsn $(srctree)/arch/i386/kernel/cpu/mtrr/mtrr.h $@
++
++obj-y	+= $(c-obj-y)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/entry.S linux-2.6.12-xen/arch/xen/i386/kernel/entry.S
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/entry.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/entry.S	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,842 @@
++/*
++ *  linux/arch/i386/entry.S
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'ret_from_system_call':
++ * 	ptrace needs to have all regs on the stack.
++ *	if the order here is changed, it needs to be
++ *	updated in fork.c:copy_process, signal.c:do_signal,
++ *	ptrace.c and ptrace.h
++ *
++ *	 0(%esp) - %ebx
++ *	 4(%esp) - %ecx
++ *	 8(%esp) - %edx
++ *       C(%esp) - %esi
++ *	10(%esp) - %edi
++ *	14(%esp) - %ebp
++ *	18(%esp) - %eax
++ *	1C(%esp) - %ds
++ *	20(%esp) - %es
++ *	24(%esp) - orig_eax
++ *	28(%esp) - %eip
++ *	2C(%esp) - %cs
++ *	30(%esp) - %eflags
++ *	34(%esp) - %oldesp
++ *	38(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
++
++#include <linux/config.h>
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include "irq_vectors.h"
++#include <asm-xen/xen-public/xen.h>
++
++#define nr_syscalls ((syscall_table_size)/4)
++
++EBX		= 0x00
++ECX		= 0x04
++EDX		= 0x08
++ESI		= 0x0C
++EDI		= 0x10
++EBP		= 0x14
++EAX		= 0x18
++DS		= 0x1C
++ES		= 0x20
++ORIG_EAX	= 0x24
++EIP		= 0x28
++CS		= 0x2C
++EVENT_MASK	= 0x2E
++EFLAGS		= 0x30
++OLDESP		= 0x34
++OLDSS		= 0x38
++
++CF_MASK		= 0x00000001
++TF_MASK		= 0x00000100
++IF_MASK		= 0x00000200
++DF_MASK		= 0x00000400 
++NT_MASK		= 0x00004000
++VM_MASK		= 0x00020000
++/* Pseudo-eflags. */
++NMI_MASK	= 0x80000000
++	
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending		/* 0 */
++#define evtchn_upcall_mask		1
++
++#define sizeof_vcpu_shift		6
++
++#ifdef CONFIG_SMP
++#define preempt_disable(reg)	incl TI_preempt_count(reg)
++#define preempt_enable(reg)	decl TI_preempt_count(reg)
++#define XEN_GET_VCPU_INFO(reg)	preempt_disable(%ebp)			; \
++				movl TI_cpu(%ebp),reg			; \
++				shl  $sizeof_vcpu_shift,reg		; \
++				addl HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%ebp)
++#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
++#else
++#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg)
++#define XEN_PUT_VCPU_INFO_fixup
++#endif
++
++#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
++#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
++#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
++				XEN_LOCKED_BLOCK_EVENTS(reg)		; \
++    				XEN_PUT_VCPU_INFO(reg)
++#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
++				XEN_LOCKED_UNBLOCK_EVENTS(reg)		; \
++    				XEN_PUT_VCPU_INFO(reg)
++#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
++
++#ifdef CONFIG_PREEMPT
++#define preempt_stop		GET_THREAD_INFO(%ebp)			; \
++				XEN_BLOCK_EVENTS(%esi)
++#else
++#define preempt_stop
++#define resume_kernel		restore_nocheck
++#endif
++
++#define SAVE_ALL \
++	cld; \
++	pushl %es; \
++	pushl %ds; \
++	pushl %eax; \
++	pushl %ebp; \
++	pushl %edi; \
++	pushl %esi; \
++	pushl %edx; \
++	pushl %ecx; \
++	pushl %ebx; \
++	movl $(__USER_DS), %edx; \
++	movl %edx, %ds; \
++	movl %edx, %es;
++
++#define RESTORE_INT_REGS \
++	popl %ebx;	\
++	popl %ecx;	\
++	popl %edx;	\
++	popl %esi;	\
++	popl %edi;	\
++	popl %ebp;	\
++	popl %eax
++
++#define RESTORE_REGS	\
++	RESTORE_INT_REGS; \
++1:	popl %ds;	\
++2:	popl %es;	\
++.section .fixup,"ax";	\
++3:	movl $0,(%esp);	\
++	jmp 1b;		\
++4:	movl $0,(%esp);	\
++	jmp 2b;		\
++.previous;		\
++.section __ex_table,"a";\
++	.align 4;	\
++	.long 1b,3b;	\
++	.long 2b,4b;	\
++.previous
++
++
++#define RESTORE_ALL	\
++	RESTORE_REGS	\
++	addl $4, %esp;	\
++1:	iret;		\
++.section .fixup,"ax";   \
++2:	pushl $0;	\
++	pushl $do_iret_error;	\
++	jmp error_code;	\
++.previous;		\
++.section __ex_table,"a";\
++	.align 4;	\
++	.long 1b,2b;	\
++.previous
++
++
++ENTRY(ret_from_fork)
++	pushl %eax
++	call schedule_tail
++	GET_THREAD_INFO(%ebp)
++	popl %eax
++	jmp syscall_exit
++
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
++
++	# userspace resumption stub bypassing syscall exit tracing
++	ALIGN
++ret_from_exception:
++	preempt_stop
++ret_from_intr:
++	GET_THREAD_INFO(%ebp)
++	movl EFLAGS(%esp), %eax		# mix EFLAGS and CS
++	movb CS(%esp), %al
++	testl $(VM_MASK | 2), %eax
++	jz resume_kernel
++ENTRY(resume_userspace)
++	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
++					# setting need_resched or sigpending
++					# between sampling and the iret
++	movl TI_flags(%ebp), %ecx
++	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done on
++					# int/exception return?
++	jne work_pending
++	jmp restore_all
++
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++	XEN_BLOCK_EVENTS(%esi)
++	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
++	jnz restore_nocheck
++need_resched:
++	movl TI_flags(%ebp), %ecx	# need_resched set ?
++	testb $_TIF_NEED_RESCHED, %cl
++	jz restore_all
++	testb $0xFF,EVENT_MASK(%esp)	# interrupts off (exception path) ?
++	jnz restore_all
++	call preempt_schedule_irq
++	jmp need_resched
++#endif
++
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
++
++	# sysenter call handler stub
++ENTRY(sysenter_entry)
++	movl TSS_sysenter_esp0(%esp),%esp
++sysenter_past_esp:
++	sti
++	pushl $(__USER_DS)
++	pushl %ebp
++	pushfl
++	pushl $(__USER_CS)
++	pushl $SYSENTER_RETURN
++
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++	cmpl $__PAGE_OFFSET-3,%ebp
++	jae syscall_fault
++1:	movl (%ebp),%ebp
++.section __ex_table,"a"
++	.align 4
++	.long 1b,syscall_fault
++.previous
++
++	pushl %eax
++	SAVE_ALL
++	GET_THREAD_INFO(%ebp)
++
++	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++	testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
++	jnz syscall_trace_entry
++	cmpl $(nr_syscalls), %eax
++	jae syscall_badsys
++	call *sys_call_table(,%eax,4)
++	movl %eax,EAX(%esp)
++	cli
++	movl TI_flags(%ebp), %ecx
++	testw $_TIF_ALLWORK_MASK, %cx
++	jne syscall_exit_work
++/* if something modifies registers it must also disable sysexit */
++	movl EIP(%esp), %edx
++	movl OLDESP(%esp), %ecx
++	xorl %ebp,%ebp
++	sti
++	sysexit
++
++
++	# system call handler stub
++ENTRY(system_call)
++	pushl %eax			# save orig_eax
++	SAVE_ALL
++	GET_THREAD_INFO(%ebp)
++					# system call tracing in operation
++	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++	testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
++	jnz syscall_trace_entry
++	cmpl $(nr_syscalls), %eax
++	jae syscall_badsys
++syscall_call:
++	call *sys_call_table(,%eax,4)
++	movl %eax,EAX(%esp)		# store the return value
++syscall_exit:
++	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
++					# setting need_resched or sigpending
++					# between sampling and the iret
++	movl TI_flags(%ebp), %ecx
++	testw $_TIF_ALLWORK_MASK, %cx	# current->work
++	jne syscall_exit_work
++
++restore_all:
++#if 0 /* XEN */
++	movl EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
++	# Warning: OLDSS(%esp) contains the wrong/random values if we
++	# are returning to the kernel.
++	# See comments in process.c:copy_thread() for details.
++	movb OLDSS(%esp), %ah
++	movb CS(%esp), %al
++	andl $(VM_MASK | (4 << 8) | 3), %eax
++	cmpl $((4 << 8) | 3), %eax
++	je ldt_ss			# returning to user-space with LDT SS
++#endif /* XEN */
++restore_nocheck:
++	testl $(VM_MASK|NMI_MASK), EFLAGS(%esp)
++	jnz hypervisor_iret
++	movb EVENT_MASK(%esp), %al
++	notb %al			# %al == ~saved_mask
++	XEN_GET_VCPU_INFO(%esi)
++	andb evtchn_upcall_mask(%esi),%al
++	andb $1,%al			# %al == mask & ~saved_mask
++	jnz restore_all_enable_events	#     != 0 => reenable event delivery
++	XEN_PUT_VCPU_INFO(%esi)
++	RESTORE_REGS
++	addl $4, %esp
++1:	iret
++.section .fixup,"ax"
++iret_exc:
++	pushl $0			# no error code
++	pushl $do_iret_error
++	jmp error_code
++.previous
++.section __ex_table,"a"
++	.align 4
++	.long 1b,iret_exc
++.previous
++
++hypervisor_iret:
++	andl $~NMI_MASK, EFLAGS(%esp)
++	RESTORE_REGS
++	addl $4, %esp
++	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
++
++#if 0 /* XEN */
++ldt_ss:
++	larl OLDSS(%esp), %eax
++	jnz restore_nocheck
++	testl $0x00400000, %eax		# returning to 32bit stack?
++	jnz restore_nocheck		# allright, normal return
++	/* If returning to userspace with 16bit stack,
++	 * try to fix the higher word of ESP, as the CPU
++	 * won't restore it.
++	 * This is an "official" bug of all the x86-compatible
++	 * CPUs, which we can try to work around to make
++	 * dosemu and wine happy. */
++	subl $8, %esp		# reserve space for switch16 pointer
++	cli
++	movl %esp, %eax
++	/* Set up the 16bit stack frame with switch32 pointer on top,
++	 * and a switch16 pointer on top of the current frame. */
++	call setup_x86_bogus_stack
++	RESTORE_REGS
++	lss 20+4(%esp), %esp	# switch to 16bit stack
++1:	iret
++.section __ex_table,"a"
++	.align 4
++	.long 1b,iret_exc
++.previous
++#endif /* XEN */
++
++	# perform work that needs to be done immediately before resumption
++	ALIGN
++work_pending:
++	testb $_TIF_NEED_RESCHED, %cl
++	jz work_notifysig
++work_resched:
++	call schedule
++	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
++					# setting need_resched or sigpending
++					# between sampling and the iret
++	movl TI_flags(%ebp), %ecx
++	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other
++					# than syscall tracing?
++	jz restore_all
++	testb $_TIF_NEED_RESCHED, %cl
++	jnz work_resched
++
++work_notifysig:				# deal with pending signals and
++					# notify-resume requests
++	testl $VM_MASK, EFLAGS(%esp)
++	movl %esp, %eax
++	jne work_notifysig_v86		# returning to kernel-space or
++					# vm86-space
++	xorl %edx, %edx
++	call do_notify_resume
++	jmp restore_all
++
++	ALIGN
++work_notifysig_v86:
++	pushl %ecx			# save ti_flags for do_notify_resume
++	call save_v86_state		# %eax contains pt_regs pointer
++	popl %ecx
++	movl %eax, %esp
++	xorl %edx, %edx
++	call do_notify_resume
++	jmp restore_all
++
++	# perform syscall exit tracing
++	ALIGN
++syscall_trace_entry:
++	movl $-ENOSYS,EAX(%esp)
++	movl %esp, %eax
++	xorl %edx,%edx
++	call do_syscall_trace
++	movl ORIG_EAX(%esp), %eax
++	cmpl $(nr_syscalls), %eax
++	jnae syscall_call
++	jmp syscall_exit
++
++	# perform syscall exit tracing
++	ALIGN
++syscall_exit_work:
++	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
++	jz work_pending
++	XEN_UNBLOCK_EVENTS(%esi)	# could let do_syscall_trace() call
++					# schedule() instead
++	movl %esp, %eax
++	movl $1, %edx
++	call do_syscall_trace
++	jmp resume_userspace
++
++	ALIGN
++syscall_fault:
++	pushl %eax			# save orig_eax
++	SAVE_ALL
++	GET_THREAD_INFO(%ebp)
++	movl $-EFAULT,EAX(%esp)
++	jmp resume_userspace
++
++	ALIGN
++syscall_badsys:
++	movl $-ENOSYS,EAX(%esp)
++	jmp resume_userspace
++
++#if 0 /* XEN */
++#define FIXUP_ESPFIX_STACK \
++	movl %esp, %eax; \
++	/* switch to 32bit stack using the pointer on top of 16bit stack */ \
++	lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
++	/* copy data from 16bit stack to 32bit stack */ \
++	call fixup_x86_bogus_stack; \
++	/* put ESP to the proper location */ \
++	movl %eax, %esp;
++#define UNWIND_ESPFIX_STACK \
++	pushl %eax; \
++	movl %ss, %eax; \
++	/* see if on 16bit stack */ \
++	cmpw $__ESPFIX_SS, %ax; \
++	jne 28f; \
++	movl $__KERNEL_DS, %edx; \
++	movl %edx, %ds; \
++	movl %edx, %es; \
++	/* switch to 32bit stack */ \
++	FIXUP_ESPFIX_STACK \
++28:	popl %eax;
++
++/*
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
++ */
++.data
++ENTRY(interrupt)
++.text
++
++vector=0
++ENTRY(irq_entries_start)
++.rept NR_IRQS
++	ALIGN
++1:	pushl $vector-256
++	jmp common_interrupt
++.data
++	.long 1b
++.text
++vector=vector+1
++.endr
++
++	ALIGN
++common_interrupt:
++	SAVE_ALL
++	movl %esp,%eax
++	call do_IRQ
++	jmp ret_from_intr
++
++#define BUILD_INTERRUPT(name, nr)	\
++ENTRY(name)				\
++	pushl $nr-256;			\
++	SAVE_ALL			\
++	movl %esp,%eax;			\
++	call smp_/**/name;		\
++	jmp ret_from_intr;
++
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
++#endif /* XEN */
++
++ENTRY(divide_error)
++	pushl $0			# no error code
++	pushl $do_divide_error
++	ALIGN
++error_code:
++	pushl %ds
++	pushl %eax
++	xorl %eax, %eax
++	pushl %ebp
++	pushl %edi
++	pushl %esi
++	pushl %edx
++	decl %eax			# eax = -1
++	pushl %ecx
++	pushl %ebx
++	cld
++	pushl %es
++#	UNWIND_ESPFIX_STACK
++	popl %ecx
++	movl ES(%esp), %edi		# get the function address
++	movl ORIG_EAX(%esp), %edx	# get the error code
++	movl %eax, ORIG_EAX(%esp)
++	movl %ecx, ES(%esp)
++	movl $(__USER_DS), %ecx
++	movl %ecx, %ds
++	movl %ecx, %es
++	movl %esp,%eax			# pt_regs pointer
++	call *%edi
++	jmp ret_from_exception
++
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++ENTRY(hypervisor_callback)
++	pushl %eax
++	SAVE_ALL
++	movl EIP(%esp),%eax
++	cmpl $scrit,%eax
++	jb   11f
++	cmpl $ecrit,%eax
++	jb   critical_region_fixup
++11:	push %esp
++	call evtchn_do_upcall
++	add  $4,%esp
++	jmp  ret_from_intr
++
++        ALIGN
++restore_all_enable_events:  
++	XEN_LOCKED_UNBLOCK_EVENTS(%esi)
++scrit:	/**** START OF CRITICAL REGION ****/
++	XEN_TEST_PENDING(%esi)
++	jnz  14f			# process more events if necessary...
++	XEN_PUT_VCPU_INFO(%esi)
++	RESTORE_ALL
++14:	XEN_LOCKED_BLOCK_EVENTS(%esi)
++	XEN_PUT_VCPU_INFO(%esi)
++	jmp  11b
++ecrit:  /**** END OF CRITICAL REGION ****/
++# [How we do the fixup]. We want to merge the current stack frame with the
++# just-interrupted frame. How we do this depends on where in the critical
++# region the interrupted handler was executing, and so how many saved
++# registers are in each frame. We do this quickly using the lookup table
++# 'critical_fixup_table'. For each byte offset in the critical region, it
++# provides the number of bytes which have already been popped from the
++# interrupted stack frame. 
++critical_region_fixup:
++	addl $critical_fixup_table-scrit,%eax
++	movzbl (%eax),%eax		# %eax contains num bytes popped
++	cmpb $0xff,%al                  # 0xff => vcpu_info critical region
++	jne  15f
++	GET_THREAD_INFO(%ebp)
++	XEN_PUT_VCPU_INFO(%esi)         # abort vcpu_info critical region
++        xorl %eax,%eax
++15:	mov  %esp,%esi
++	add  %eax,%esi			# %esi points at end of src region
++	mov  %esp,%edi
++	add  $0x34,%edi			# %edi points at end of dst region
++	mov  %eax,%ecx
++	shr  $2,%ecx			# convert words to bytes
++	je   17f			# skip loop if nothing to copy
++16:	subl $4,%esi			# pre-decrementing copy loop
++	subl $4,%edi
++	movl (%esi),%eax
++	movl %eax,(%edi)
++	loop 16b
++17:	movl %edi,%esp			# final %edi is top of merged stack
++	jmp  11b
++
++critical_fixup_table:
++	.byte 0xff,0xff,0xff		# testb $0xff,(%esi) = XEN_TEST_PENDING
++	.byte 0xff,0xff			# jnz  14f
++	XEN_PUT_VCPU_INFO_fixup
++	.byte 0x00			# pop  %ebx
++	.byte 0x04			# pop  %ecx
++	.byte 0x08			# pop  %edx
++	.byte 0x0c			# pop  %esi
++	.byte 0x10			# pop  %edi
++	.byte 0x14			# pop  %ebp
++	.byte 0x18			# pop  %eax
++	.byte 0x1c			# pop  %ds
++	.byte 0x20			# pop  %es
++	.byte 0x24,0x24,0x24		# add  $4,%esp
++	.byte 0x28			# iret
++	.byte 0xff,0xff,0xff,0xff	# movb $1,1(%esi)
++	XEN_PUT_VCPU_INFO_fixup
++	.byte 0x00,0x00			# jmp  11b
++
++# Hypervisor uses this for application faults while it executes.
++ENTRY(failsafe_callback)
++1:	popl %ds
++2:	popl %es
++3:	popl %fs
++4:	popl %gs
++	subl $4,%esp
++	SAVE_ALL
++	jmp  ret_from_exception
++.section .fixup,"ax";	\
++6:	movl $0,(%esp);	\
++	jmp 1b;		\
++7:	movl $0,(%esp);	\
++	jmp 2b;		\
++8:	movl $0,(%esp);	\
++	jmp 3b;		\
++9:	movl $0,(%esp);	\
++	jmp 4b;		\
++.previous;		\
++.section __ex_table,"a";\
++	.align 4;	\
++	.long 1b,6b;	\
++	.long 2b,7b;	\
++	.long 3b,8b;	\
++	.long 4b,9b;	\
++.previous
++
++ENTRY(coprocessor_error)
++	pushl $0
++	pushl $do_coprocessor_error
++	jmp error_code
++
++ENTRY(simd_coprocessor_error)
++	pushl $0
++	pushl $do_simd_coprocessor_error
++	jmp error_code
++
++ENTRY(device_not_available)
++	pushl $-1			# mark this as an int
++	SAVE_ALL
++	#preempt_stop /* This is already an interrupt gate on Xen. */
++	call math_state_restore
++	jmp ret_from_exception
++
++/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
++ *
++ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
++ */
++#define FIX_STACK(offset, ok, label)		\
++	cmpw $__KERNEL_CS,4(%esp);		\
++	jne ok;					\
++label:						\
++	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
++	pushfl;					\
++	pushl $__KERNEL_CS;			\
++	pushl $sysenter_past_esp
++
++ENTRY(debug)
++	cmpl $sysenter_entry,(%esp)
++	jne debug_stack_correct
++	FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++	pushl $-1			# mark this as an int
++	SAVE_ALL
++	xorl %edx,%edx			# error code 0
++	movl %esp,%eax			# pt_regs pointer
++	call do_debug
++	jmp ret_from_exception
++
++ENTRY(nmi)
++	pushl %eax
++	SAVE_ALL
++	xorl %edx,%edx		# zero error code
++	movl %esp,%eax		# pt_regs pointer
++	call do_nmi
++	orl  $NMI_MASK, EFLAGS(%esp)
++	jmp restore_all
++
++#if 0 /* XEN */
++/*
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got  an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
++ */
++ENTRY(nmi)
++	pushl %eax
++	movl %ss, %eax
++	cmpw $__ESPFIX_SS, %ax
++	popl %eax
++	je nmi_16bit_stack
++	cmpl $sysenter_entry,(%esp)
++	je nmi_stack_fixup
++	pushl %eax
++	movl %esp,%eax
++	/* Do not access memory above the end of our stack page,
++	 * it might not exist.
++	 */
++	andl $(THREAD_SIZE-1),%eax
++	cmpl $(THREAD_SIZE-20),%eax
++	popl %eax
++	jae nmi_stack_correct
++	cmpl $sysenter_entry,12(%esp)
++	je nmi_debug_stack_check
++nmi_stack_correct:
++	pushl %eax
++	SAVE_ALL
++	xorl %edx,%edx		# zero error code
++	movl %esp,%eax		# pt_regs pointer
++	call do_nmi
++	jmp restore_all
++
++nmi_stack_fixup:
++	FIX_STACK(12,nmi_stack_correct, 1)
++	jmp nmi_stack_correct
++nmi_debug_stack_check:
++	cmpw $__KERNEL_CS,16(%esp)
++	jne nmi_stack_correct
++	cmpl $debug - 1,(%esp)
++	jle nmi_stack_correct
++	cmpl $debug_esp_fix_insn,(%esp)
++	jle nmi_debug_stack_fixup
++nmi_debug_stack_fixup:
++	FIX_STACK(24,nmi_stack_correct, 1)
++	jmp nmi_stack_correct
++
++nmi_16bit_stack:
++	/* create the pointer to lss back */
++	pushl %ss
++	pushl %esp
++	movzwl %sp, %esp
++	addw $4, (%esp)
++	/* copy the iret frame of 12 bytes */
++	.rept 3
++	pushl 16(%esp)
++	.endr
++	pushl %eax
++	SAVE_ALL
++	FIXUP_ESPFIX_STACK		# %eax == %esp
++	xorl %edx,%edx			# zero error code
++	call do_nmi
++	RESTORE_REGS
++	lss 12+4(%esp), %esp		# back to 16bit stack
++1:	iret
++.section __ex_table,"a"
++	.align 4
++	.long 1b,iret_exc
++.previous
++#endif /* XEN */
++
++ENTRY(int3)
++	pushl $-1			# mark this as an int
++	SAVE_ALL
++	xorl %edx,%edx		# zero error code
++	movl %esp,%eax		# pt_regs pointer
++	call do_int3
++	jmp ret_from_exception
++
++ENTRY(overflow)
++	pushl $0
++	pushl $do_overflow
++	jmp error_code
++
++ENTRY(bounds)
++	pushl $0
++	pushl $do_bounds
++	jmp error_code
++
++ENTRY(invalid_op)
++	pushl $0
++	pushl $do_invalid_op
++	jmp error_code
++
++ENTRY(coprocessor_segment_overrun)
++	pushl $0
++	pushl $do_coprocessor_segment_overrun
++	jmp error_code
++
++ENTRY(invalid_TSS)
++	pushl $do_invalid_TSS
++	jmp error_code
++
++ENTRY(segment_not_present)
++	pushl $do_segment_not_present
++	jmp error_code
++
++ENTRY(stack_segment)
++	pushl $do_stack_segment
++	jmp error_code
++
++ENTRY(general_protection)
++	pushl $do_general_protection
++	jmp error_code
++
++ENTRY(alignment_check)
++	pushl $do_alignment_check
++	jmp error_code
++
++ENTRY(page_fault)
++	pushl $do_page_fault
++	jmp error_code
++
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++	pushl $0
++	pushl machine_check_vector
++	jmp error_code
++#endif
++
++ENTRY(fixup_4gb_segment)
++	pushl $do_fixup_4gb_segment
++	jmp error_code
++
++#include "syscall_table.S"
++
++syscall_table_size=(.-sys_call_table)
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/fixup.c linux-2.6.12-xen/arch/xen/i386/kernel/fixup.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/fixup.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/fixup.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,95 @@
++/******************************************************************************
++ * fixup.c
++ * 
++ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
++ * Used to avoid repeated slow emulation of common instructions used by the
++ * user-space TLS (Thread-Local Storage) libraries.
++ * 
++ * **** NOTE ****
++ *  Issues with the binary rewriting have caused it to be removed. Instead
++ *  we rely on Xen's emulator to boot the kernel, and then print a banner
++ *  message recommending that the user disables /lib/tls.
++ * 
++ * Copyright (c) 2004, K A Fraser
++ * 
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ * 
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ * 
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ */
++
++#include <linux/config.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/version.h>
++
++#define DP(_f, _args...) printk(KERN_ALERT "  " _f "\n" , ## _args )
++
++fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
++{
++	static unsigned long printed = 0;
++	char info[100];
++	int i;
++
++	if (test_and_set_bit(0, &printed))
++		return;
++
++	HYPERVISOR_vm_assist(
++		VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
++
++	sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
++
++
++	DP("");
++	DP("***************************************************************");
++	DP("***************************************************************");
++	DP("** WARNING: Currently emulating unsupported memory accesses  **");
++	DP("**          in /lib/tls glibc libraries. The emulation is    **");
++	DP("**          slow. To ensure full performance you should      **");
++	DP("**          install a 'xen-friendly' (nosegneg) version of   **");
++	DP("**          the library, or disable tls support by executing **");
++	DP("**          the following as root:                           **");
++	DP("**          mv /lib/tls /lib/tls.disabled                    **");
++	DP("** Offending process: %-38.38s **", info);
++	DP("***************************************************************");
++	DP("***************************************************************");
++	DP("");
++
++	for (i = 5; i > 0; i--) {
++		printk("Pausing... %d", i);
++		mdelay(1000);
++		printk("\b\b\b\b\b\b\b\b\b\b\b\b");
++	}
++
++	printk("Continuing...\n\n");
++}
++
++static int __init fixup_init(void)
++{
++	HYPERVISOR_vm_assist(
++		VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
++	return 0;
++}
++__initcall(fixup_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/head.S linux-2.6.12-xen/arch/xen/i386/kernel/head.S
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/head.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/head.S	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,198 @@
++
++#include <linux/config.h>
++
++.section __xen_guest
++	.ascii	"GUEST_OS=linux,GUEST_VER=2.6"
++	.ascii	",XEN_VER=xen-3.0"
++	.ascii	",VIRT_BASE=0xC0000000"
++	.ascii	",HYPERCALL_PAGE=0x104" /* __pa(hypercall_page) >> 12 */
++#ifdef CONFIG_X86_PAE
++	.ascii	",PAE=yes"
++#else
++	.ascii	",PAE=no"
++#endif
++#ifdef CONFIG_XEN_SHADOW_MODE
++	.ascii	",SHADOW=translate"
++#endif
++	.ascii	",LOADER=generic"
++	.byte	0
++
++.text
++#include <linux/threads.h>
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/thread_info.h>
++#include <asm/asm_offsets.h>
++#include <asm-xen/xen-public/arch-x86_32.h>
++
++/*
++ * References to members of the new_cpu_data structure.
++ */
++
++#define X86		new_cpu_data+CPUINFO_x86
++#define X86_VENDOR	new_cpu_data+CPUINFO_x86_vendor
++#define X86_MODEL	new_cpu_data+CPUINFO_x86_model
++#define X86_MASK	new_cpu_data+CPUINFO_x86_mask
++#define X86_HARD_MATH	new_cpu_data+CPUINFO_hard_math
++#define X86_CPUID	new_cpu_data+CPUINFO_cpuid_level
++#define X86_CAPABILITY	new_cpu_data+CPUINFO_x86_capability
++#define X86_VENDOR_ID	new_cpu_data+CPUINFO_x86_vendor_id
++
++ENTRY(startup_32)
++	movl %esi,xen_start_info
++
++#if 0
++ENTRY(startup_32_smp)
++#endif /* CONFIG_SMP */
++
++	cld
++
++	/* Set up the stack pointer */
++	lss stack_start,%esp
++
++checkCPUtype:
++
++	/* get vendor info */
++	xorl %eax,%eax			# call CPUID with 0 -> return vendor ID
++	cpuid
++	movl %eax,X86_CPUID		# save CPUID level
++	movl %ebx,X86_VENDOR_ID		# lo 4 chars
++	movl %edx,X86_VENDOR_ID+4	# next 4 chars
++	movl %ecx,X86_VENDOR_ID+8	# last 4 chars
++
++	movl $1,%eax		# Use the CPUID instruction to get CPU type
++	cpuid
++	movb %al,%cl		# save reg for future use
++	andb $0x0f,%ah		# mask processor family
++	movb %ah,X86
++	andb $0xf0,%al		# mask model
++	shrb $4,%al
++	movb %al,X86_MODEL
++	andb $0x0f,%cl		# mask mask revision
++	movb %cl,X86_MASK
++	movl %edx,X86_CAPABILITY
++
++	incb ready
++
++	xorl %eax,%eax			# Clear FS/GS and LDT
++	movl %eax,%fs
++	movl %eax,%gs
++	cld			# gcc2 wants the direction flag cleared at all times
++
++#if 0
++	movb ready, %cl	
++	cmpb $1,%cl
++	je 1f			# the first CPU calls start_kernel
++				# all other CPUs call initialize_secondary
++	call initialize_secondary
++	jmp L6
++1:
++#endif /* CONFIG_SMP */
++	call start_kernel
++L6:
++	jmp L6			# main should never return here, but
++				# just in case, we know what happens.
++
++ENTRY(lgdt_finish)
++	movl $(__KERNEL_DS),%eax	# reload all the segment registers
++	movw %ax,%ss			# after changing gdt.
++
++	movl $(__USER_DS),%eax		# DS/ES contains default USER segment
++	movw %ax,%ds
++	movw %ax,%es
++
++	popl %eax			# reload CS by intersegment return
++	pushl $(__KERNEL_CS)
++	pushl %eax
++	lret
++
++ENTRY(stack_start)
++	.long init_thread_union+THREAD_SIZE
++	.long __BOOT_DS
++
++ready:	.byte 0
++
++.globl idt_descr
++.globl cpu_gdt_descr
++
++	ALIGN
++	.word 0				# 32-bit align idt_desc.address
++idt_descr:
++	.word IDT_ENTRIES*8-1		# idt contains 256 entries
++	.long idt_table
++
++# boot GDT descriptor (later on used by CPU#0):
++	.word 0				# 32 bit align gdt_desc.address
++cpu_gdt_descr:
++	.word GDT_SIZE
++	.long cpu_gdt_table
++
++	.fill NR_CPUS-1,8,0		# space for the other GDT descriptors
++
++.org 0x1000
++ENTRY(empty_zero_page)
++
++.org 0x2000
++ENTRY(cpu_gdt_table)
++	.quad 0x0000000000000000	/* NULL descriptor */
++	.quad 0x0000000000000000	/* 0x0b reserved */
++	.quad 0x0000000000000000	/* 0x13 reserved */
++	.quad 0x0000000000000000	/* 0x1b reserved */
++	.quad 0x0000000000000000	/* 0x20 unused */
++	.quad 0x0000000000000000	/* 0x28 unused */
++	.quad 0x0000000000000000	/* 0x33 TLS entry 1 */
++	.quad 0x0000000000000000	/* 0x3b TLS entry 2 */
++	.quad 0x0000000000000000	/* 0x43 TLS entry 3 */
++	.quad 0x0000000000000000	/* 0x4b reserved */
++	.quad 0x0000000000000000	/* 0x53 reserved */
++	.quad 0x0000000000000000	/* 0x5b reserved */
++
++#ifdef CONFIG_X86_PAE
++	.quad 0x00cfbb00000067ff	/* 0x60 kernel 4GB code at 0x00000000 */
++	.quad 0x00cfb300000067ff	/* 0x68 kernel 4GB data at 0x00000000 */
++	.quad 0x00cffb00000067ff	/* 0x73 user 4GB code at 0x00000000 */
++	.quad 0x00cff300000067ff	/* 0x7b user 4GB data at 0x00000000 */
++#else
++	.quad 0x00cfbb000000c3ff	/* 0x60 kernel 4GB code at 0x00000000 */
++	.quad 0x00cfb3000000c3ff	/* 0x68 kernel 4GB data at 0x00000000 */
++	.quad 0x00cffb000000c3ff	/* 0x73 user 4GB code at 0x00000000 */
++	.quad 0x00cff3000000c3ff	/* 0x7b user 4GB data at 0x00000000 */
++#endif
++
++	.quad 0x0000000000000000	/* 0x80 TSS descriptor */
++	.quad 0x0000000000000000	/* 0x88 LDT descriptor */
++
++	/* Segments used for calling PnP BIOS */
++	.quad 0x0000000000000000	/* 0x90 32-bit code */
++	.quad 0x0000000000000000	/* 0x98 16-bit code */
++	.quad 0x0000000000000000	/* 0xa0 16-bit data */
++	.quad 0x0000000000000000	/* 0xa8 16-bit data */
++	.quad 0x0000000000000000	/* 0xb0 16-bit data */
++	/*
++	 * The APM segments have byte granularity and their bases
++	 * and limits are set at run time.
++	 */
++	.quad 0x0000000000000000	/* 0xb8 APM CS    code */
++	.quad 0x0000000000000000	/* 0xc0 APM CS 16 code (16 bit) */
++	.quad 0x0000000000000000	/* 0xc8 APM DS    data */
++
++	.quad 0x0000000000000000	/* 0xd0 - ESPFIX 16-bit SS */
++	.quad 0x0000000000000000	/* 0xd8 - unused */
++	.quad 0x0000000000000000	/* 0xe0 - unused */
++	.quad 0x0000000000000000	/* 0xe8 - unused */
++	.quad 0x0000000000000000	/* 0xf0 - unused */
++	.quad 0x0000000000000000	/* 0xf8 - GDT entry 31: double-fault TSS */
++	.fill GDT_ENTRIES-32,8,0
++
++.org 0x3000
++ENTRY(default_ldt)
++
++.org 0x4000
++ENTRY(hypercall_page)
++
++.org 0x5000
++/*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/i386_ksyms.c linux-2.6.12-xen/arch/xen/i386/kernel/i386_ksyms.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/i386_ksyms.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/i386_ksyms.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,185 @@
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/smp.h>
++#include <linux/user.h>
++#include <linux/elfcore.h>
++#include <linux/mca.h>
++#include <linux/sched.h>
++#include <linux/in6.h>
++#include <linux/interrupt.h>
++#include <linux/smp_lock.h>
++#include <linux/pm.h>
++#include <linux/pci.h>
++#include <linux/apm_bios.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/tty.h>
++#include <linux/highmem.h>
++#include <linux/time.h>
++
++#include <asm/semaphore.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/uaccess.h>
++#include <asm/checksum.h>
++#include <asm/io.h>
++#include <asm/delay.h>
++#include <asm/irq.h>
++#include <asm/mmx.h>
++#include <asm/desc.h>
++#include <asm/pgtable.h>
++#include <asm/tlbflush.h>
++#include <asm/nmi.h>
++#include <asm/ist.h>
++#include <asm/kdebug.h>
++
++extern void dump_thread(struct pt_regs *, struct user *);
++extern spinlock_t rtc_lock;
++
++/* This is definitely a GPL-only symbol */
++EXPORT_SYMBOL_GPL(cpu_gdt_table);
++
++#if defined(CONFIG_APM_MODULE)
++extern void machine_real_restart(unsigned char *, int);
++EXPORT_SYMBOL(machine_real_restart);
++extern void default_idle(void);
++EXPORT_SYMBOL(default_idle);
++#endif
++
++#ifdef CONFIG_SMP
++extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
++extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
++#endif
++
++#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
++extern struct drive_info_struct drive_info;
++EXPORT_SYMBOL(drive_info);
++#endif
++
++extern unsigned long cpu_khz;
++extern unsigned long get_cmos_time(void);
++
++/* platform dependent support */
++EXPORT_SYMBOL(boot_cpu_data);
++#ifdef CONFIG_DISCONTIGMEM
++EXPORT_SYMBOL(node_data);
++EXPORT_SYMBOL(physnode_map);
++#endif
++#ifdef CONFIG_X86_NUMAQ
++EXPORT_SYMBOL(xquad_portio);
++#endif
++EXPORT_SYMBOL(dump_thread);
++EXPORT_SYMBOL(dump_fpu);
++EXPORT_SYMBOL_GPL(kernel_fpu_begin);
++EXPORT_SYMBOL(__ioremap);
++EXPORT_SYMBOL(ioremap_nocache);
++EXPORT_SYMBOL(iounmap);
++EXPORT_SYMBOL(kernel_thread);
++EXPORT_SYMBOL(pm_idle);
++EXPORT_SYMBOL(pm_power_off);
++EXPORT_SYMBOL(get_cmos_time);
++EXPORT_SYMBOL(cpu_khz);
++EXPORT_SYMBOL(apm_info);
++
++EXPORT_SYMBOL(__down_failed);
++EXPORT_SYMBOL(__down_failed_interruptible);
++EXPORT_SYMBOL(__down_failed_trylock);
++EXPORT_SYMBOL(__up_wakeup);
++/* Networking helper routines. */
++EXPORT_SYMBOL(csum_partial_copy_generic);
++/* Delay loops */
++EXPORT_SYMBOL(__ndelay);
++EXPORT_SYMBOL(__udelay);
++EXPORT_SYMBOL(__delay);
++EXPORT_SYMBOL(__const_udelay);
++
++EXPORT_SYMBOL(__get_user_1);
++EXPORT_SYMBOL(__get_user_2);
++EXPORT_SYMBOL(__get_user_4);
++
++EXPORT_SYMBOL(__put_user_1);
++EXPORT_SYMBOL(__put_user_2);
++EXPORT_SYMBOL(__put_user_4);
++EXPORT_SYMBOL(__put_user_8);
++
++EXPORT_SYMBOL(strpbrk);
++EXPORT_SYMBOL(strstr);
++
++EXPORT_SYMBOL(strncpy_from_user);
++EXPORT_SYMBOL(__strncpy_from_user);
++EXPORT_SYMBOL(clear_user);
++EXPORT_SYMBOL(__clear_user);
++EXPORT_SYMBOL(__copy_from_user_ll);
++EXPORT_SYMBOL(__copy_to_user_ll);
++EXPORT_SYMBOL(strnlen_user);
++
++#ifdef CONFIG_PCI
++EXPORT_SYMBOL(pci_mem_start);
++#endif
++
++#ifdef CONFIG_PCI_BIOS
++EXPORT_SYMBOL(pcibios_set_irq_routing);
++EXPORT_SYMBOL(pcibios_get_irq_routing_table);
++#endif
++
++#ifdef CONFIG_X86_USE_3DNOW
++EXPORT_SYMBOL(_mmx_memcpy);
++EXPORT_SYMBOL(mmx_clear_page);
++EXPORT_SYMBOL(mmx_copy_page);
++#endif
++
++#ifdef CONFIG_X86_HT
++EXPORT_SYMBOL(smp_num_siblings);
++EXPORT_SYMBOL(cpu_sibling_map);
++#endif
++
++#ifdef CONFIG_SMP
++EXPORT_SYMBOL(__write_lock_failed);
++EXPORT_SYMBOL(__read_lock_failed);
++
++/* Global SMP stuff */
++EXPORT_SYMBOL(smp_call_function);
++
++/* TLB flushing */
++EXPORT_SYMBOL(flush_tlb_page);
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
++#endif
++
++#ifdef CONFIG_MCA
++EXPORT_SYMBOL(machine_id);
++#endif
++
++#ifdef CONFIG_VT
++EXPORT_SYMBOL(screen_info);
++#endif
++
++EXPORT_SYMBOL(get_wchan);
++
++EXPORT_SYMBOL(rtc_lock);
++
++EXPORT_SYMBOL_GPL(set_nmi_callback);
++EXPORT_SYMBOL_GPL(unset_nmi_callback);
++
++EXPORT_SYMBOL(register_die_notifier);
++#ifdef CONFIG_HAVE_DEC_LOCK
++EXPORT_SYMBOL(_atomic_dec_and_lock);
++#endif
++
++EXPORT_SYMBOL(__PAGE_KERNEL);
++
++#ifdef CONFIG_HIGHMEM
++EXPORT_SYMBOL(kmap);
++EXPORT_SYMBOL(kunmap);
++EXPORT_SYMBOL(kmap_atomic);
++EXPORT_SYMBOL(kunmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_to_page);
++#endif
++
++#if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
++EXPORT_SYMBOL(ist_info);
++#endif
++
++EXPORT_SYMBOL(csum_partial);
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/init_task.c linux-2.6.12-xen/arch/xen/i386/kernel/init_task.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/init_task.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/init_task.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,49 @@
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/init.h>
++#include <linux/init_task.h>
++#include <linux/fs.h>
++#include <linux/mqueue.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/desc.h>
++
++static struct fs_struct init_fs = INIT_FS;
++static struct files_struct init_files = INIT_FILES;
++static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
++static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
++
++#define swapper_pg_dir ((pgd_t *)NULL)
++struct mm_struct init_mm = INIT_MM(init_mm);
++#undef swapper_pg_dir
++
++EXPORT_SYMBOL(init_mm);
++
++/*
++ * Initial thread structure.
++ *
++ * We need to make sure that this is THREAD_SIZE aligned due to the
++ * way process stacks are handled. This is done by having a special
++ * "init_task" linker map entry..
++ */
++union thread_union init_thread_union 
++	__attribute__((__section__(".data.init_task"))) =
++		{ INIT_THREAD_INFO(init_task) };
++
++/*
++ * Initial task structure.
++ *
++ * All other task structs will be allocated on slabs in fork.c
++ */
++struct task_struct init_task = INIT_TASK(init_task);
++
++EXPORT_SYMBOL(init_task);
++
++/*
++ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
++ * no more per-task TSS's.
++ */ 
++DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp = INIT_TSS;
++
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/io_apic.c linux-2.6.12-xen/arch/xen/i386/kernel/io_apic.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/io_apic.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/io_apic.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,2611 @@
++/*
++ *	Intel IO-APIC support for multi-Pentium hosts.
++ *
++ *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ *	Many thanks to Stig Venaas for trying out countless experimental
++ *	patches and reporting/debugging problems patiently!
++ *
++ *	(c) 1999, Multiple IO-APIC support, developed by
++ *	Ken-ichi Yaku <yaku at css1.kbnes.nec.co.jp> and
++ *      Hidemi Kishimoto <kisimoto at css1.kbnes.nec.co.jp>,
++ *	further tested and cleaned up by Zach Brown <zab at redhat.com>
++ *	and Ingo Molnar <mingo at redhat.com>
++ *
++ *	Fixes
++ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
++ *					thanks to Eric Gilmore
++ *					and Rolf G. Tews
++ *					for testing these extensively
++ *	Paul Diefenbaugh	:	Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/config.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/compiler.h>
++#include <linux/acpi.h>
++
++#include <linux/sysdev.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/timer.h>
++
++#include <mach_apic.h>
++
++#include "io_ports.h"
++
++#ifdef CONFIG_XEN
++
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/xen-public/physdev.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq)  ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++	physdev_op_t op;
++	int ret;
++
++	op.cmd = PHYSDEVOP_APIC_READ;
++	op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
++	op.u.apic_op.offset = reg;
++	ret = HYPERVISOR_physdev_op(&op);
++	if (ret)
++		return ret;
++	return op.u.apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++	physdev_op_t op;
++
++	op.cmd = PHYSDEVOP_APIC_WRITE;
++	op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
++	op.u.apic_op.offset = reg;
++	op.u.apic_op.value = value;
++	HYPERVISOR_physdev_op(&op);
++}
++
++#define io_apic_read(a,r)    xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#endif /* CONFIG_XEN */
++
++int (*ioapic_renumber_irq)(int ioapic, int irq);
++atomic_t irq_mis_count;
++
++static DEFINE_SPINLOCK(ioapic_lock);
++
++/*
++ *	Is the SiS APIC rmw bug present ?
++ *	-1 = don't know, 0 = no, 1 = yes
++ */
++int sis_apic_bug = -1;
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++	int apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
++#ifdef CONFIG_PCI_MSI
++#define vector_to_irq(vector) 	\
++	(platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#else
++#define vector_to_irq(vector)	(vector)
++#endif
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++	static int first_free_entry = NR_IRQS;
++	struct irq_pin_list *entry = irq_2_pin + irq;
++
++	while (entry->next)
++		entry = irq_2_pin + entry->next;
++
++	if (entry->pin != -1) {
++		entry->next = first_free_entry;
++		entry = irq_2_pin + entry->next;
++		if (++first_free_entry >= PIN_MAP_SIZE)
++			panic("io_apic.c: whoops");
++	}
++	entry->apic = apic;
++	entry->pin = pin;
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Reroute an IRQ to a different pin.
++ */
++static void __init replace_pin_at_irq(unsigned int irq,
++				      int oldapic, int oldpin,
++				      int newapic, int newpin)
++{
++	struct irq_pin_list *entry = irq_2_pin + irq;
++
++	while (1) {
++		if (entry->apic == oldapic && entry->pin == oldpin) {
++			entry->apic = newapic;
++			entry->pin = newpin;
++		}
++		if (!entry->next)
++			break;
++		entry = irq_2_pin + entry->next;
++	}
++}
++
++static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
++{
++	struct irq_pin_list *entry = irq_2_pin + irq;
++	unsigned int pin, reg;
++
++	for (;;) {
++		pin = entry->pin;
++		if (pin == -1)
++			break;
++		reg = io_apic_read(entry->apic, 0x10 + pin*2);
++		reg &= ~disable;
++		reg |= enable;
++		io_apic_modify(entry->apic, 0x10 + pin*2, reg);
++		if (!entry->next)
++			break;
++		entry = irq_2_pin + entry->next;
++	}
++}
++
++/* mask = 1 */
++static void __mask_IO_APIC_irq (unsigned int irq)
++{
++	__modify_IO_APIC_irq(irq, 0x00010000, 0);
++}
++
++/* mask = 0 */
++static void __unmask_IO_APIC_irq (unsigned int irq)
++{
++	__modify_IO_APIC_irq(irq, 0, 0x00010000);
++}
++
++/* mask = 1, trigger = 0 */
++static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
++{
++	__modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
++}
++
++/* mask = 0, trigger = 1 */
++static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
++{
++	__modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
++}
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__mask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__unmask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++	struct IO_APIC_route_entry entry;
++	unsigned long flags;
++	
++	/* Check delivery_mode to be sure we're not clearing an SMI pin */
++	spin_lock_irqsave(&ioapic_lock, flags);
++	*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++	*(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++	if (entry.delivery_mode == dest_SMI)
++		return;
++
++	/*
++	 * Disable it in the IO-APIC irq-routing table:
++	 */
++	memset(&entry, 0, sizeof(entry));
++	entry.mask = 1;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
++	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC (void)
++{
++	int apic, pin;
++
++	for (apic = 0; apic < nr_ioapics; apic++)
++		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++			clear_IO_APIC_pin(apic, pin);
++}
++
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
++{
++	unsigned long flags;
++	int pin;
++	struct irq_pin_list *entry = irq_2_pin + irq;
++	unsigned int apicid_value;
++	
++	apicid_value = cpu_mask_to_apicid(cpumask);
++	/* Prepare to do the io_apic_write */
++	apicid_value = apicid_value << 24;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	for (;;) {
++		pin = entry->pin;
++		if (pin == -1)
++			break;
++		io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
++		if (!entry->next)
++			break;
++		entry = irq_2_pin + entry->next;
++	}
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++#else
++#define clear_IO_APIC() ((void)0)
++#endif
++
++#if defined(CONFIG_IRQBALANCE)
++# include <asm/processor.h>	/* kernel_thread() */
++# include <linux/kernel_stat.h>	/* kstat */
++# include <linux/slab.h>		/* kmalloc() */
++# include <linux/timer.h>	/* time_after() */
++ 
++# ifdef CONFIG_BALANCED_IRQ_DEBUG
++#  define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
++#  define Dprintk(x...) do { TDprintk(x); } while (0)
++# else
++#  define TDprintk(x...) 
++#  define Dprintk(x...) 
++# endif
++
++cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS];
++
++#define IRQBALANCE_CHECK_ARCH -999
++static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
++static int physical_balance = 0;
++
++static struct irq_cpu_info {
++	unsigned long * last_irq;
++	unsigned long * irq_delta;
++	unsigned long irq;
++} irq_cpu_data[NR_CPUS];
++
++#define CPU_IRQ(cpu)		(irq_cpu_data[cpu].irq)
++#define LAST_CPU_IRQ(cpu,irq)   (irq_cpu_data[cpu].last_irq[irq])
++#define IRQ_DELTA(cpu,irq) 	(irq_cpu_data[cpu].irq_delta[irq])
++
++#define IDLE_ENOUGH(cpu,now) \
++	(idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
++
++#define IRQ_ALLOWED(cpu, allowed_mask)	cpu_isset(cpu, allowed_mask)
++
++#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
++
++#define MAX_BALANCED_IRQ_INTERVAL	(5*HZ)
++#define MIN_BALANCED_IRQ_INTERVAL	(HZ/2)
++#define BALANCED_IRQ_MORE_DELTA		(HZ/10)
++#define BALANCED_IRQ_LESS_DELTA		(HZ)
++
++static long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL;
++
++static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
++			unsigned long now, int direction)
++{
++	int search_idle = 1;
++	int cpu = curr_cpu;
++
++	goto inside;
++
++	do {
++		if (unlikely(cpu == curr_cpu))
++			search_idle = 0;
++inside:
++		if (direction == 1) {
++			cpu++;
++			if (cpu >= NR_CPUS)
++				cpu = 0;
++		} else {
++			cpu--;
++			if (cpu == -1)
++				cpu = NR_CPUS-1;
++		}
++	} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
++			(search_idle && !IDLE_ENOUGH(cpu,now)));
++
++	return cpu;
++}
++
++static inline void balance_irq(int cpu, int irq)
++{
++	unsigned long now = jiffies;
++	cpumask_t allowed_mask;
++	unsigned int new_cpu;
++		
++	if (irqbalance_disabled)
++		return; 
++
++	cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
++	new_cpu = move(cpu, allowed_mask, now, 1);
++	if (cpu != new_cpu) {
++		irq_desc_t *desc = irq_desc + irq;
++		unsigned long flags;
++
++		spin_lock_irqsave(&desc->lock, flags);
++		pending_irq_balance_cpumask[irq] = cpumask_of_cpu(new_cpu);
++		spin_unlock_irqrestore(&desc->lock, flags);
++	}
++}
++
++static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
++{
++	int i, j;
++	Dprintk("Rotating IRQs among CPUs.\n");
++	for (i = 0; i < NR_CPUS; i++) {
++		for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) {
++			if (!irq_desc[j].action)
++				continue;
++			/* Is it a significant load ?  */
++			if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
++						useful_load_threshold)
++				continue;
++			balance_irq(i, j);
++		}
++	}
++	balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++		balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);	
++	return;
++}
++
++static void do_irq_balance(void)
++{
++	int i, j;
++	unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
++	unsigned long move_this_load = 0;
++	int max_loaded = 0, min_loaded = 0;
++	int load;
++	unsigned long useful_load_threshold = balanced_irq_interval + 10;
++	int selected_irq;
++	int tmp_loaded, first_attempt = 1;
++	unsigned long tmp_cpu_irq;
++	unsigned long imbalance = 0;
++	cpumask_t allowed_mask, target_cpu_mask, tmp;
++
++	for (i = 0; i < NR_CPUS; i++) {
++		int package_index;
++		CPU_IRQ(i) = 0;
++		if (!cpu_online(i))
++			continue;
++		package_index = CPU_TO_PACKAGEINDEX(i);
++		for (j = 0; j < NR_IRQS; j++) {
++			unsigned long value_now, delta;
++			/* Is this an active IRQ? */
++			if (!irq_desc[j].action)
++				continue;
++			if ( package_index == i )
++				IRQ_DELTA(package_index,j) = 0;
++			/* Determine the total count per processor per IRQ */
++			value_now = (unsigned long) kstat_cpu(i).irqs[j];
++
++			/* Determine the activity per processor per IRQ */
++			delta = value_now - LAST_CPU_IRQ(i,j);
++
++			/* Update last_cpu_irq[][] for the next time */
++			LAST_CPU_IRQ(i,j) = value_now;
++
++			/* Ignore IRQs whose rate is less than the clock */
++			if (delta < useful_load_threshold)
++				continue;
++			/* update the load for the processor or package total */
++			IRQ_DELTA(package_index,j) += delta;
++
++			/* Keep track of the higher numbered sibling as well */
++			if (i != package_index)
++				CPU_IRQ(i) += delta;
++			/*
++			 * We have sibling A and sibling B in the package
++			 *
++			 * cpu_irq[A] = load for cpu A + load for cpu B
++			 * cpu_irq[B] = load for cpu B
++			 */
++			CPU_IRQ(package_index) += delta;
++		}
++	}
++	/* Find the least loaded processor package */
++	for (i = 0; i < NR_CPUS; i++) {
++		if (!cpu_online(i))
++			continue;
++		if (i != CPU_TO_PACKAGEINDEX(i))
++			continue;
++		if (min_cpu_irq > CPU_IRQ(i)) {
++			min_cpu_irq = CPU_IRQ(i);
++			min_loaded = i;
++		}
++	}
++	max_cpu_irq = ULONG_MAX;
++
++tryanothercpu:
++	/* Look for heaviest loaded processor.
++	 * We may come back to get the next heaviest loaded processor.
++	 * Skip processors with trivial loads.
++	 */
++	tmp_cpu_irq = 0;
++	tmp_loaded = -1;
++	for (i = 0; i < NR_CPUS; i++) {
++		if (!cpu_online(i))
++			continue;
++		if (i != CPU_TO_PACKAGEINDEX(i))
++			continue;
++		if (max_cpu_irq <= CPU_IRQ(i)) 
++			continue;
++		if (tmp_cpu_irq < CPU_IRQ(i)) {
++			tmp_cpu_irq = CPU_IRQ(i);
++			tmp_loaded = i;
++		}
++	}
++
++	if (tmp_loaded == -1) {
++ 	 /* In the case of small number of heavy interrupt sources, 
++	  * loading some of the cpus too much. We use Ingo's original 
++	  * approach to rotate them around.
++	  */
++		if (!first_attempt && imbalance >= useful_load_threshold) {
++			rotate_irqs_among_cpus(useful_load_threshold);
++			return;
++		}
++		goto not_worth_the_effort;
++	}
++	
++	first_attempt = 0;		/* heaviest search */
++	max_cpu_irq = tmp_cpu_irq;	/* load */
++	max_loaded = tmp_loaded;	/* processor */
++	imbalance = (max_cpu_irq - min_cpu_irq) / 2;
++	
++	Dprintk("max_loaded cpu = %d\n", max_loaded);
++	Dprintk("min_loaded cpu = %d\n", min_loaded);
++	Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
++	Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
++	Dprintk("load imbalance = %lu\n", imbalance);
++
++	/* if imbalance is less than approx 10% of max load, then
++	 * observe diminishing returns action. - quit
++	 */
++	if (imbalance < (max_cpu_irq >> 3)) {
++		Dprintk("Imbalance too trivial\n");
++		goto not_worth_the_effort;
++	}
++
++tryanotherirq:
++	/* if we select an IRQ to move that can't go where we want, then
++	 * see if there is another one to try.
++	 */
++	move_this_load = 0;
++	selected_irq = -1;
++	for (j = 0; j < NR_IRQS; j++) {
++		/* Is this an active IRQ? */
++		if (!irq_desc[j].action)
++			continue;
++		if (imbalance <= IRQ_DELTA(max_loaded,j))
++			continue;
++		/* Try to find the IRQ that is closest to the imbalance
++		 * without going over.
++		 */
++		if (move_this_load < IRQ_DELTA(max_loaded,j)) {
++			move_this_load = IRQ_DELTA(max_loaded,j);
++			selected_irq = j;
++		}
++	}
++	if (selected_irq == -1) {
++		goto tryanothercpu;
++	}
++
++	imbalance = move_this_load;
++	
++	/* For physical_balance case, we accumlated both load
++	 * values in the one of the siblings cpu_irq[],
++	 * to use the same code for physical and logical processors
++	 * as much as possible. 
++	 *
++	 * NOTE: the cpu_irq[] array holds the sum of the load for
++	 * sibling A and sibling B in the slot for the lowest numbered
++	 * sibling (A), _AND_ the load for sibling B in the slot for
++	 * the higher numbered sibling.
++	 *
++	 * We seek the least loaded sibling by making the comparison
++	 * (A+B)/2 vs B
++	 */
++	load = CPU_IRQ(min_loaded) >> 1;
++	for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
++		if (load > CPU_IRQ(j)) {
++			/* This won't change cpu_sibling_map[min_loaded] */
++			load = CPU_IRQ(j);
++			min_loaded = j;
++		}
++	}
++
++	cpus_and(allowed_mask, cpu_online_map, irq_affinity[selected_irq]);
++	target_cpu_mask = cpumask_of_cpu(min_loaded);
++	cpus_and(tmp, target_cpu_mask, allowed_mask);
++
++	if (!cpus_empty(tmp)) {
++		irq_desc_t *desc = irq_desc + selected_irq;
++		unsigned long flags;
++
++		Dprintk("irq = %d moved to cpu = %d\n",
++				selected_irq, min_loaded);
++		/* mark for change destination */
++		spin_lock_irqsave(&desc->lock, flags);
++		pending_irq_balance_cpumask[selected_irq] =
++					cpumask_of_cpu(min_loaded);
++		spin_unlock_irqrestore(&desc->lock, flags);
++		/* Since we made a change, come back sooner to 
++		 * check for more variation.
++		 */
++		balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++			balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);	
++		return;
++	}
++	goto tryanotherirq;
++
++not_worth_the_effort:
++	/*
++	 * if we did not find an IRQ to move, then adjust the time interval
++	 * upward
++	 */
++	balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
++		balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);	
++	Dprintk("IRQ worth rotating not found\n");
++	return;
++}
++
++static int balanced_irq(void *unused)
++{
++	int i;
++	unsigned long prev_balance_time = jiffies;
++	long time_remaining = balanced_irq_interval;
++
++	daemonize("kirqd");
++	
++	/* push everything to CPU 0 to give us a starting point.  */
++	for (i = 0 ; i < NR_IRQS ; i++) {
++		pending_irq_balance_cpumask[i] = cpumask_of_cpu(0);
++	}
++
++	for ( ; ; ) {
++		set_current_state(TASK_INTERRUPTIBLE);
++		time_remaining = schedule_timeout(time_remaining);
++		try_to_freeze(PF_FREEZE);
++		if (time_after(jiffies,
++				prev_balance_time+balanced_irq_interval)) {
++			preempt_disable();
++			do_irq_balance();
++			prev_balance_time = jiffies;
++			time_remaining = balanced_irq_interval;
++			preempt_enable();
++		}
++	}
++	return 0;
++}
++
++static int __init balanced_irq_init(void)
++{
++	int i;
++	struct cpuinfo_x86 *c;
++	cpumask_t tmp;
++
++	cpus_shift_right(tmp, cpu_online_map, 2);
++        c = &boot_cpu_data;
++	/* When not overwritten by the command line ask subarchitecture. */
++	if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
++		irqbalance_disabled = NO_BALANCE_IRQ;
++	if (irqbalance_disabled)
++		return 0;
++	
++	 /* disable irqbalance completely if there is only one processor online */
++	if (num_online_cpus() < 2) {
++		irqbalance_disabled = 1;
++		return 0;
++	}
++	/*
++	 * Enable physical balance only if more than 1 physical processor
++	 * is present
++	 */
++	if (smp_num_siblings > 1 && !cpus_empty(tmp))
++		physical_balance = 1;
++
++	for (i = 0; i < NR_CPUS; i++) {
++		if (!cpu_online(i))
++			continue;
++		irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++		irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++		if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
++			printk(KERN_ERR "balanced_irq_init: out of memory");
++			goto failed;
++		}
++		memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
++		memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
++	}
++	
++	printk(KERN_INFO "Starting balanced_irq\n");
++	if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0) 
++		return 0;
++	else 
++		printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
++failed:
++	for (i = 0; i < NR_CPUS; i++) {
++		if(irq_cpu_data[i].irq_delta)
++			kfree(irq_cpu_data[i].irq_delta);
++		if(irq_cpu_data[i].last_irq)
++			kfree(irq_cpu_data[i].last_irq);
++	}
++	return 0;
++}
++
++int __init irqbalance_disable(char *str)
++{
++	irqbalance_disabled = 1;
++	return 0;
++}
++
++__setup("noirqbalance", irqbalance_disable);
++
++static inline void move_irq(int irq)
++{
++	/* note - we hold the desc->lock */
++	if (unlikely(!cpus_empty(pending_irq_balance_cpumask[irq]))) {
++		set_ioapic_affinity_irq(irq, pending_irq_balance_cpumask[irq]);
++		cpus_clear(pending_irq_balance_cpumask[irq]);
++	}
++}
++
++late_initcall(balanced_irq_init);
++
++#else /* !CONFIG_IRQBALANCE */
++static inline void move_irq(int irq) { }
++#endif /* CONFIG_IRQBALANCE */
++
++#ifndef CONFIG_SMP
++void fastcall send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++	unsigned int cfg;
++
++	/*
++	 * Wait for idle.
++	 */
++	apic_wait_icr_idle();
++	cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
++	/*
++	 * Send the IPI. The write to APIC_ICR fires this off.
++	 */
++	apic_write_around(APIC_ICR, cfg);
++#endif
++}
++#endif /* !CONFIG_SMP */
++
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++
++static int __init ioapic_setup(char *str)
++{
++	skip_ioapic_setup = 1;
++	return 1;
++}
++
++__setup("noapic", ioapic_setup);
++
++static int __init ioapic_pirq_setup(char *str)
++{
++	int i, max;
++	int ints[MAX_PIRQS+1];
++
++	get_options(str, ARRAY_SIZE(ints), ints);
++
++	for (i = 0; i < MAX_PIRQS; i++)
++		pirq_entries[i] = -1;
++
++	pirqs_enabled = 1;
++	apic_printk(APIC_VERBOSE, KERN_INFO
++			"PIRQ redirection, working around broken MP-BIOS.\n");
++	max = MAX_PIRQS;
++	if (ints[0] < MAX_PIRQS)
++		max = ints[0];
++
++	for (i = 0; i < max; i++) {
++		apic_printk(APIC_VERBOSE, KERN_DEBUG
++				"... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++		/*
++		 * PIRQs are mapped upside down, usually.
++		 */
++		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++	}
++	return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++	int i;
++
++	for (i = 0; i < mp_irq_entries; i++)
++		if (mp_irqs[i].mpc_irqtype == type &&
++		    (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++		     mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++		    mp_irqs[i].mpc_dstirq == pin)
++			return i;
++
++	return -1;
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int find_isa_irq_pin(int irq, int type)
++{
++	int i;
++
++	for (i = 0; i < mp_irq_entries; i++) {
++		int lbus = mp_irqs[i].mpc_srcbus;
++
++		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++		    ) &&
++		    (mp_irqs[i].mpc_irqtype == type) &&
++		    (mp_irqs[i].mpc_srcbusirq == irq))
++
++			return mp_irqs[i].mpc_dstirq;
++	}
++	return -1;
++}
++#endif
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++	int apic, i, best_guess = -1;
++
++	apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
++		"slot:%d, pin:%d.\n", bus, slot, pin);
++	if (mp_bus_id_to_pci_bus[bus] == -1) {
++		printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++		return -1;
++	}
++	for (i = 0; i < mp_irq_entries; i++) {
++		int lbus = mp_irqs[i].mpc_srcbus;
++
++		for (apic = 0; apic < nr_ioapics; apic++)
++			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++				break;
++
++		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++		    !mp_irqs[i].mpc_irqtype &&
++		    (bus == lbus) &&
++		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++			int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++			if (!(apic || IO_APIC_IRQ(irq)))
++				continue;
++
++			if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++				return irq;
++			/*
++			 * Use the first all-but-pin matching entry as a
++			 * best-guess fuzzy result for broken mptables.
++			 */
++			if (best_guess < 0)
++				best_guess = irq;
++		}
++	}
++	return best_guess;
++}
++
++#ifndef CONFIG_XEN
++/*
++ * This function currently is only a helper for the i386 smp boot process where 
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++void __init setup_ioapic_dest(void)
++{
++	int pin, ioapic, irq, irq_entry;
++
++	if (skip_ioapic_setup == 1)
++		return;
++
++	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++			if (irq_entry == -1)
++				continue;
++			irq = pin_2_irq(irq_entry, ioapic, pin);
++			set_ioapic_affinity_irq(irq, TARGET_CPUS);
++		}
++
++	}
++}
++#endif /* !CONFIG_XEN */
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++	if (irq < 16) {
++		unsigned int port = 0x4d0 + (irq >> 3);
++		return (inb(port) >> (irq & 7)) & 1;
++	}
++	apic_printk(APIC_VERBOSE, KERN_INFO
++			"Broken MPtable reports ISA irq %d\n", irq);
++	return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value.  If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx)	(0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx)	(0)
++#define default_ISA_polarity(idx)	(0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx)	(1)
++#define default_PCI_polarity(idx)	(1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx)	(1)
++#define default_MCA_polarity(idx)	(0)
++
++/* NEC98 interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_NEC98_trigger(idx)     (0)
++#define default_NEC98_polarity(idx)    (0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++	int bus = mp_irqs[idx].mpc_srcbus;
++	int polarity;
++
++	/*
++	 * Determine IRQ line polarity (high active or low active):
++	 */
++	switch (mp_irqs[idx].mpc_irqflag & 3)
++	{
++		case 0: /* conforms, ie. bus-type dependent polarity */
++		{
++			switch (mp_bus_id_to_type[bus])
++			{
++				case MP_BUS_ISA: /* ISA pin */
++				{
++					polarity = default_ISA_polarity(idx);
++					break;
++				}
++				case MP_BUS_EISA: /* EISA pin */
++				{
++					polarity = default_EISA_polarity(idx);
++					break;
++				}
++				case MP_BUS_PCI: /* PCI pin */
++				{
++					polarity = default_PCI_polarity(idx);
++					break;
++				}
++				case MP_BUS_MCA: /* MCA pin */
++				{
++					polarity = default_MCA_polarity(idx);
++					break;
++				}
++				case MP_BUS_NEC98: /* NEC 98 pin */
++				{
++					polarity = default_NEC98_polarity(idx);
++					break;
++				}
++				default:
++				{
++					printk(KERN_WARNING "broken BIOS!!\n");
++					polarity = 1;
++					break;
++				}
++			}
++			break;
++		}
++		case 1: /* high active */
++		{
++			polarity = 0;
++			break;
++		}
++		case 2: /* reserved */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			polarity = 1;
++			break;
++		}
++		case 3: /* low active */
++		{
++			polarity = 1;
++			break;
++		}
++		default: /* invalid */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			polarity = 1;
++			break;
++		}
++	}
++	return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++	int bus = mp_irqs[idx].mpc_srcbus;
++	int trigger;
++
++	/*
++	 * Determine IRQ trigger mode (edge or level sensitive):
++	 */
++	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++	{
++		case 0: /* conforms, ie. bus-type dependent */
++		{
++			switch (mp_bus_id_to_type[bus])
++			{
++				case MP_BUS_ISA: /* ISA pin */
++				{
++					trigger = default_ISA_trigger(idx);
++					break;
++				}
++				case MP_BUS_EISA: /* EISA pin */
++				{
++					trigger = default_EISA_trigger(idx);
++					break;
++				}
++				case MP_BUS_PCI: /* PCI pin */
++				{
++					trigger = default_PCI_trigger(idx);
++					break;
++				}
++				case MP_BUS_MCA: /* MCA pin */
++				{
++					trigger = default_MCA_trigger(idx);
++					break;
++				}
++				case MP_BUS_NEC98: /* NEC 98 pin */
++				{
++					trigger = default_NEC98_trigger(idx);
++					break;
++				}
++				default:
++				{
++					printk(KERN_WARNING "broken BIOS!!\n");
++					trigger = 1;
++					break;
++				}
++			}
++			break;
++		}
++		case 1: /* edge */
++		{
++			trigger = 0;
++			break;
++		}
++		case 2: /* reserved */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			trigger = 1;
++			break;
++		}
++		case 3: /* level */
++		{
++			trigger = 1;
++			break;
++		}
++		default: /* invalid */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			trigger = 0;
++			break;
++		}
++	}
++	return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++	return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++	return MPBIOS_trigger(idx);
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++	int irq, i;
++	int bus = mp_irqs[idx].mpc_srcbus;
++
++	/*
++	 * Debugging check, we are in big trouble if this message pops up!
++	 */
++	if (mp_irqs[idx].mpc_dstirq != pin)
++		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++	switch (mp_bus_id_to_type[bus])
++	{
++		case MP_BUS_ISA: /* ISA pin */
++		case MP_BUS_EISA:
++		case MP_BUS_MCA:
++		case MP_BUS_NEC98:
++		{
++			irq = mp_irqs[idx].mpc_srcbusirq;
++			break;
++		}
++		case MP_BUS_PCI: /* PCI pin */
++		{
++			/*
++			 * PCI IRQs are mapped in order
++			 */
++			i = irq = 0;
++			while (i < apic)
++				irq += nr_ioapic_registers[i++];
++			irq += pin;
++
++			/*
++			 * For MPS mode, so far only needed by ES7000 platform
++			 */
++			if (ioapic_renumber_irq)
++				irq = ioapic_renumber_irq(apic, irq);
++
++			break;
++		}
++		default:
++		{
++			printk(KERN_ERR "unknown bus type %d.\n",bus); 
++			irq = 0;
++			break;
++		}
++	}
++
++	/*
++	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
++	 */
++	if ((pin >= 16) && (pin <= 23)) {
++		if (pirq_entries[pin-16] != -1) {
++			if (!pirq_entries[pin-16]) {
++				apic_printk(APIC_VERBOSE, KERN_DEBUG
++						"disabling PIRQ%d\n", pin-16);
++			} else {
++				irq = pirq_entries[pin-16];
++				apic_printk(APIC_VERBOSE, KERN_DEBUG
++						"using PIRQ%d -> IRQ %d\n",
++						pin-16, irq);
++			}
++		}
++	}
++	return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++	int apic, idx, pin;
++
++	for (apic = 0; apic < nr_ioapics; apic++) {
++		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++			idx = find_irq_entry(apic,pin,mp_INT);
++			if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++				return irq_trigger(idx);
++		}
++	}
++	/*
++	 * nonexistent IRQs are edge default
++	 */
++	return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++u8 irq_vector[NR_IRQ_VECTORS]; /* = { FIRST_DEVICE_VECTOR , 0 }; */
++
++int assign_irq_vector(int irq)
++{
++	static int current_vector = FIRST_DEVICE_VECTOR;
++	physdev_op_t op;
++
++	BUG_ON(irq >= NR_IRQ_VECTORS);
++	if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
++		return IO_APIC_VECTOR(irq);
++
++	op.cmd = PHYSDEVOP_ASSIGN_VECTOR;
++	op.u.irq_op.irq = irq;
++	if (HYPERVISOR_physdev_op(&op))
++		return -ENOSPC;
++	current_vector = op.u.irq_op.vector;
++
++	vector_irq[current_vector] = irq;
++	if (irq != AUTO_ASSIGN)
++		IO_APIC_VECTOR(irq) = current_vector;
++
++	return current_vector;
++}
++
++#ifndef CONFIG_XEN
++static struct hw_interrupt_type ioapic_level_type;
++static struct hw_interrupt_type ioapic_edge_type;
++
++#define IOAPIC_AUTO	-1
++#define IOAPIC_EDGE	0
++#define IOAPIC_LEVEL	1
++
++static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++	if (use_pci_vector() && !platform_legacy_irq(irq)) {
++		if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++				trigger == IOAPIC_LEVEL)
++			irq_desc[vector].handler = &ioapic_level_type;
++		else
++			irq_desc[vector].handler = &ioapic_edge_type;
++		set_intr_gate(vector, interrupt[vector]);
++	} else	{
++		if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++				trigger == IOAPIC_LEVEL)
++			irq_desc[irq].handler = &ioapic_level_type;
++		else
++			irq_desc[irq].handler = &ioapic_edge_type;
++		set_intr_gate(vector, interrupt[irq]);
++	}
++}
++#else
++#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
++#endif
++
++static void __init setup_IO_APIC_irqs(void)
++{
++	struct IO_APIC_route_entry entry;
++	int apic, pin, idx, irq, first_notcon = 1, vector;
++	unsigned long flags;
++
++	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++	for (apic = 0; apic < nr_ioapics; apic++) {
++	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++		/*
++		 * add it to the IO-APIC irq-routing table:
++		 */
++		memset(&entry,0,sizeof(entry));
++
++		entry.delivery_mode = INT_DELIVERY_MODE;
++		entry.dest_mode = INT_DEST_MODE;
++		entry.mask = 0;				/* enable IRQ */
++		entry.dest.logical.logical_dest = 
++					cpu_mask_to_apicid(TARGET_CPUS);
++
++		idx = find_irq_entry(apic,pin,mp_INT);
++		if (idx == -1) {
++			if (first_notcon) {
++				apic_printk(APIC_VERBOSE, KERN_DEBUG
++						" IO-APIC (apicid-pin) %d-%d",
++						mp_ioapics[apic].mpc_apicid,
++						pin);
++				first_notcon = 0;
++			} else
++				apic_printk(APIC_VERBOSE, ", %d-%d",
++					mp_ioapics[apic].mpc_apicid, pin);
++			continue;
++		}
++
++		entry.trigger = irq_trigger(idx);
++		entry.polarity = irq_polarity(idx);
++
++		if (irq_trigger(idx)) {
++			entry.trigger = 1;
++			entry.mask = 1;
++		}
++
++		irq = pin_2_irq(idx, apic, pin);
++		/*
++		 * skip adding the timer int on secondary nodes, which causes
++		 * a small but painful rift in the time-space continuum
++		 */
++		if (multi_timer_check(apic, irq))
++			continue;
++		else
++			add_pin_to_irq(irq, apic, pin);
++
++		if (/*!apic &&*/ !IO_APIC_IRQ(irq))
++			continue;
++
++		if (IO_APIC_IRQ(irq)) {
++			vector = assign_irq_vector(irq);
++			entry.vector = vector;
++			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++		
++			if (!apic && (irq < 16))
++				disable_8259A_irq(irq);
++		}
++		spin_lock_irqsave(&ioapic_lock, flags);
++		io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++		io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++	}
++	}
++
++	if (!first_notcon)
++		apic_printk(APIC_VERBOSE, " not connected.\n");
++}
++
++/*
++ * Set up the 8259A-master output pin:
++ */
++#ifndef CONFIG_XEN
++static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
++{
++	struct IO_APIC_route_entry entry;
++	unsigned long flags;
++
++	memset(&entry,0,sizeof(entry));
++
++	disable_8259A_irq(0);
++
++	/* mask LVT0 */
++	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++	/*
++	 * We use logical delivery to get the timer IRQ
++	 * to the first CPU.
++	 */
++	entry.dest_mode = INT_DEST_MODE;
++	entry.mask = 0;					/* unmask IRQ now */
++	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++	entry.delivery_mode = INT_DELIVERY_MODE;
++	entry.polarity = 0;
++	entry.trigger = 0;
++	entry.vector = vector;
++
++	/*
++	 * The timer IRQ doesn't have to know that behind the
++	 * scene we have a 8259A-master in AEOI mode ...
++	 */
++	irq_desc[0].handler = &ioapic_edge_type;
++
++	/*
++	 * Add it to the IO-APIC irq-routing table:
++	 */
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
++	io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	enable_8259A_irq(0);
++}
++
++static inline void UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __init print_IO_APIC(void)
++{
++	int apic, i;
++	union IO_APIC_reg_00 reg_00;
++	union IO_APIC_reg_01 reg_01;
++	union IO_APIC_reg_02 reg_02;
++	union IO_APIC_reg_03 reg_03;
++	unsigned long flags;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++ 	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++	for (i = 0; i < nr_ioapics; i++)
++		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++		       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++	/*
++	 * We are a bit conservative about what we expect.  We have to
++	 * know about every hardware change ASAP.
++	 */
++	printk(KERN_INFO "testing the IO APIC.......................\n");
++
++	for (apic = 0; apic < nr_ioapics; apic++) {
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_00.raw = io_apic_read(apic, 0);
++	reg_01.raw = io_apic_read(apic, 1);
++	if (reg_01.bits.version >= 0x10)
++		reg_02.raw = io_apic_read(apic, 2);
++	if (reg_01.bits.version >= 0x20)
++		reg_03.raw = io_apic_read(apic, 3);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
++	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
++	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);
++	if (reg_00.bits.ID >= get_physical_broadcast())
++		UNEXPECTED_IO_APIC();
++	if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++		UNEXPECTED_IO_APIC();
++
++	printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
++	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
++	if (	(reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++		(reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++		(reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++		(reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++		(reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++		(reg_01.bits.entries != 0x2E) &&
++		(reg_01.bits.entries != 0x3F)
++	)
++		UNEXPECTED_IO_APIC();
++
++	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
++	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
++	if (	(reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++		(reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++		(reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++		(reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++		(reg_01.bits.version != 0x20)    /* Intel P64H (82806 AA) */
++	)
++		UNEXPECTED_IO_APIC();
++	if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++		UNEXPECTED_IO_APIC();
++
++	/*
++	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
++	 * but the value of reg_02 is read as the previous read register
++	 * value, so ignore it if reg_02 == reg_01.
++	 */
++	if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
++		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
++		if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++			UNEXPECTED_IO_APIC();
++	}
++
++	/*
++	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
++	 * or reg_03, but the value of reg_0[23] is read as the previous read
++	 * register value, so ignore it if reg_03 == reg_0[12].
++	 */
++	if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
++	    reg_03.raw != reg_01.raw) {
++		printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
++		printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
++		if (reg_03.bits.__reserved_1)
++			UNEXPECTED_IO_APIC();
++	}
++
++	printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++			  " Stat Dest Deli Vect:   \n");
++
++	for (i = 0; i <= reg_01.bits.entries; i++) {
++		struct IO_APIC_route_entry entry;
++
++		spin_lock_irqsave(&ioapic_lock, flags);
++		*(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
++		*(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++
++		printk(KERN_DEBUG " %02x %03X %02X  ",
++			i,
++			entry.dest.logical.logical_dest,
++			entry.dest.physical.physical_dest
++		);
++
++		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
++			entry.mask,
++			entry.trigger,
++			entry.irr,
++			entry.polarity,
++			entry.delivery_status,
++			entry.dest_mode,
++			entry.delivery_mode,
++			entry.vector
++		);
++	}
++	}
++	if (use_pci_vector())
++		printk(KERN_INFO "Using vector-based indexing\n");
++	printk(KERN_DEBUG "IRQ to pin mappings:\n");
++	for (i = 0; i < NR_IRQS; i++) {
++		struct irq_pin_list *entry = irq_2_pin + i;
++		if (entry->pin < 0)
++			continue;
++ 		if (use_pci_vector() && !platform_legacy_irq(i))
++			printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
++		else
++			printk(KERN_DEBUG "IRQ%d ", i);
++		for (;;) {
++			printk("-> %d:%d", entry->apic, entry->pin);
++			if (!entry->next)
++				break;
++			entry = irq_2_pin + entry->next;
++		}
++		printk("\n");
++	}
++
++	printk(KERN_INFO ".................................... done.\n");
++
++	return;
++}
++
++static void print_APIC_bitfield (int base)
++{
++	unsigned int v;
++	int i, j;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++	for (i = 0; i < 8; i++) {
++		v = apic_read(base + i*0x10);
++		for (j = 0; j < 32; j++) {
++			if (v & (1<<j))
++				printk("1");
++			else
++				printk("0");
++		}
++		printk("\n");
++	}
++}
++
++void /*__init*/ print_local_APIC(void * dummy)
++{
++	unsigned int v, ver, maxlvt;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++		smp_processor_id(), hard_smp_processor_id());
++	v = apic_read(APIC_ID);
++	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
++	v = apic_read(APIC_LVR);
++	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++	ver = GET_APIC_VERSION(v);
++	maxlvt = get_maxlvt();
++
++	v = apic_read(APIC_TASKPRI);
++	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++	if (APIC_INTEGRATED(ver)) {			/* !82489DX */
++		v = apic_read(APIC_ARBPRI);
++		printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++			v & APIC_ARBPRI_MASK);
++		v = apic_read(APIC_PROCPRI);
++		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++	}
++
++	v = apic_read(APIC_EOI);
++	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++	v = apic_read(APIC_RRR);
++	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++	v = apic_read(APIC_LDR);
++	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++	v = apic_read(APIC_DFR);
++	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++	v = apic_read(APIC_SPIV);
++	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++	printk(KERN_DEBUG "... APIC ISR field:\n");
++	print_APIC_bitfield(APIC_ISR);
++	printk(KERN_DEBUG "... APIC TMR field:\n");
++	print_APIC_bitfield(APIC_TMR);
++	printk(KERN_DEBUG "... APIC IRR field:\n");
++	print_APIC_bitfield(APIC_IRR);
++
++	if (APIC_INTEGRATED(ver)) {		/* !82489DX */
++		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
++			apic_write(APIC_ESR, 0);
++		v = apic_read(APIC_ESR);
++		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++	}
++
++	v = apic_read(APIC_ICR);
++	printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++	v = apic_read(APIC_ICR2);
++	printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++	v = apic_read(APIC_LVTT);
++	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++	if (maxlvt > 3) {                       /* PC is LVT#4. */
++		v = apic_read(APIC_LVTPC);
++		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++	}
++	v = apic_read(APIC_LVT0);
++	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++	v = apic_read(APIC_LVT1);
++	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++	if (maxlvt > 2) {			/* ERR is LVT#3. */
++		v = apic_read(APIC_LVTERR);
++		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++	}
++
++	v = apic_read(APIC_TMICT);
++	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++	v = apic_read(APIC_TMCCT);
++	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++	v = apic_read(APIC_TDCR);
++	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++	printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++	on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void /*__init*/ print_PIC(void)
++{
++	extern spinlock_t i8259A_lock;
++	unsigned int v;
++	unsigned long flags;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++	spin_lock_irqsave(&i8259A_lock, flags);
++
++	v = inb(0xa1) << 8 | inb(0x21);
++	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
++
++	v = inb(0xa0) << 8 | inb(0x20);
++	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
++
++	outb(0x0b,0xa0);
++	outb(0x0b,0x20);
++	v = inb(0xa0) << 8 | inb(0x20);
++	outb(0x0a,0xa0);
++	outb(0x0a,0x20);
++
++	spin_unlock_irqrestore(&i8259A_lock, flags);
++
++	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
++
++	v = inb(0x4d1) << 8 | inb(0x4d0);
++	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++#else
++void __init print_IO_APIC(void) { }
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++	union IO_APIC_reg_01 reg_01;
++	int i;
++	unsigned long flags;
++
++	for (i = 0; i < PIN_MAP_SIZE; i++) {
++		irq_2_pin[i].pin = -1;
++		irq_2_pin[i].next = 0;
++	}
++	if (!pirqs_enabled)
++		for (i = 0; i < MAX_PIRQS; i++)
++			pirq_entries[i] = -1;
++
++	/*
++	 * The number of IO-APIC IRQ registers (== #pins):
++	 */
++	for (i = 0; i < nr_ioapics; i++) {
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_01.raw = io_apic_read(i, 1);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		nr_ioapic_registers[i] = reg_01.bits.entries+1;
++	}
++
++	/*
++	 * Do not trust the IO-APIC being empty at bootup
++	 */
++	clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++	/*
++	 * Clear the IO-APIC before rebooting:
++	 */
++	clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++	disconnect_bsp_APIC();
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch at dell.com>  Tue Dec 21 12:25:05 CST 1999
++ */
++
++#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
++static void __init setup_ioapic_ids_from_mpc(void)
++{
++	union IO_APIC_reg_00 reg_00;
++	physid_mask_t phys_id_present_map;
++	int apic;
++	int i;
++	unsigned char old_id;
++	unsigned long flags;
++
++	/*
++	 * This is broken; anything with a real cpu count has to
++	 * circumvent this idiocy regardless.
++	 */
++	phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++	/*
++	 * Set the IOAPIC ID to the value stored in the MPC table.
++	 */
++	for (apic = 0; apic < nr_ioapics; apic++) {
++
++		/* Read the register 0 value */
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_00.raw = io_apic_read(apic, 0);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		
++		old_id = mp_ioapics[apic].mpc_apicid;
++
++		if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
++			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
++				apic, mp_ioapics[apic].mpc_apicid);
++			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++				reg_00.bits.ID);
++			mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
++		}
++
++		/* Don't check I/O APIC IDs for some xAPIC systems.  They have
++		 * no meaning without the serial APIC bus. */
++		if (NO_IOAPIC_CHECK)
++			continue;
++		/*
++		 * Sanity check, is the ID really free? Every APIC in a
++		 * system must have a unique ID or we get lots of nice
++		 * 'stuck on smp_invalidate_needed IPI wait' messages.
++		 */
++		if (check_apicid_used(phys_id_present_map,
++					mp_ioapics[apic].mpc_apicid)) {
++			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
++				apic, mp_ioapics[apic].mpc_apicid);
++			for (i = 0; i < get_physical_broadcast(); i++)
++				if (!physid_isset(i, phys_id_present_map))
++					break;
++			if (i >= get_physical_broadcast())
++				panic("Max APIC ID exceeded!\n");
++			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++				i);
++			physid_set(i, phys_id_present_map);
++			mp_ioapics[apic].mpc_apicid = i;
++		} else {
++			physid_mask_t tmp;
++			tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
++			apic_printk(APIC_VERBOSE, "Setting %d in the "
++					"phys_id_present_map\n",
++					mp_ioapics[apic].mpc_apicid);
++			physids_or(phys_id_present_map, phys_id_present_map, tmp);
++		}
++
++
++		/*
++		 * We need to adjust the IRQ routing table
++		 * if the ID changed.
++		 */
++		if (old_id != mp_ioapics[apic].mpc_apicid)
++			for (i = 0; i < mp_irq_entries; i++)
++				if (mp_irqs[i].mpc_dstapic == old_id)
++					mp_irqs[i].mpc_dstapic
++						= mp_ioapics[apic].mpc_apicid;
++
++		/*
++		 * Read the right value from the MPC table and
++		 * write it into the ID register.
++	 	 */
++		apic_printk(APIC_VERBOSE, KERN_INFO
++			"...changing IO-APIC physical APIC ID to %d ...",
++			mp_ioapics[apic].mpc_apicid);
++
++		reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++		spin_lock_irqsave(&ioapic_lock, flags);
++		io_apic_write(apic, 0, reg_00.raw);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++
++		/*
++		 * Sanity check
++		 */
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_00.raw = io_apic_read(apic, 0);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++			printk("could not set ID!\n");
++		else
++			apic_printk(APIC_VERBOSE, " ok.\n");
++	}
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++#ifndef CONFIG_XEN
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ *	- timer IRQ defaults to IO-APIC IRQ
++ *	- if this function detects that timer IRQs are defunct, then we fall
++ *	  back to ISA timer IRQs
++ */
++static int __init timer_irq_works(void)
++{
++	unsigned long t1 = jiffies;
++
++	local_irq_enable();
++	/* Let ten ticks pass... */
++	mdelay((10 * 1000) / HZ);
++
++	/*
++	 * Expect a few ticks at least, to be sure some possible
++	 * glue logic does not lock up after one or two first
++	 * ticks in a non-ExtINT mode.  Also the local APIC
++	 * might have cached one ExtINT interrupt.  Finally, at
++	 * least one tick may be lost due to delays.
++	 */
++	if (jiffies - t1 > 4)
++		return 1;
++
++	return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ */
++static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++{
++	int was_pending = 0;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	if (irq < 16) {
++		disable_8259A_irq(irq);
++		if (i8259A_irq_pending(irq))
++			was_pending = 1;
++	}
++	__unmask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return was_pending;
++}
++
++/*
++ * Once we have recorded IRQ_PENDING already, we can mask the
++ * interrupt for real. This prevents IRQ storms from unhandled
++ * devices.
++ */
++static void ack_edge_ioapic_irq(unsigned int irq)
++{
++	move_irq(irq);
++	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
++					== (IRQ_PENDING | IRQ_DISABLED))
++		mask_IO_APIC_irq(irq);
++	ack_APIC_irq();
++}
++
++/*
++ * Level triggered interrupts can just be masked,
++ * and shutting down and starting up the interrupt
++ * is the same as enabling and disabling them -- except
++ * with a startup need to return a "was pending" value.
++ *
++ * Level triggered interrupts are special because we
++ * do not touch any IO-APIC register while handling
++ * them. We ack the APIC in the end-IRQ handler, not
++ * in the start-IRQ-handler. Protection against reentrance
++ * from the same interrupt is still provided, both by the
++ * generic IRQ layer and by the fact that an unacked local
++ * APIC does not accept IRQs.
++ */
++static unsigned int startup_level_ioapic_irq (unsigned int irq)
++{
++	unmask_IO_APIC_irq(irq);
++
++	return 0; /* don't check for pending */
++}
++
++static void end_level_ioapic_irq (unsigned int irq)
++{
++	unsigned long v;
++	int i;
++
++	move_irq(irq);
++/*
++ * It appears there is an erratum which affects at least version 0x11
++ * of I/O APIC (that's the 82093AA and cores integrated into various
++ * chipsets).  Under certain conditions a level-triggered interrupt is
++ * erroneously delivered as edge-triggered one but the respective IRR
++ * bit gets set nevertheless.  As a result the I/O unit expects an EOI
++ * message but it will never arrive and further interrupts are blocked
++ * from the source.  The exact reason is so far unknown, but the
++ * phenomenon was observed when two consecutive interrupt requests
++ * from a given source get delivered to the same CPU and the source is
++ * temporarily disabled in between.
++ *
++ * A workaround is to simulate an EOI message manually.  We achieve it
++ * by setting the trigger mode to edge and then to level when the edge
++ * trigger mode gets detected in the TMR of a local APIC for a
++ * level-triggered interrupt.  We mask the source for the time of the
++ * operation to prevent an edge-triggered interrupt escaping meanwhile.
++ * The idea is from Manfred Spraul.  --macro
++ */
++	i = IO_APIC_VECTOR(irq);
++
++	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
++
++	ack_APIC_irq();
++
++	if (!(v & (1 << (i & 0x1f)))) {
++		atomic_inc(&irq_mis_count);
++		spin_lock(&ioapic_lock);
++		__mask_and_edge_IO_APIC_irq(irq);
++		__unmask_and_level_IO_APIC_irq(irq);
++		spin_unlock(&ioapic_lock);
++	}
++}
++
++#ifdef CONFIG_PCI_MSI
++static unsigned int startup_edge_ioapic_vector(unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	return startup_edge_ioapic_irq(irq);
++}
++
++static void ack_edge_ioapic_vector(unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	ack_edge_ioapic_irq(irq);
++}
++
++static unsigned int startup_level_ioapic_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	return startup_level_ioapic_irq (irq);
++}
++
++static void end_level_ioapic_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	end_level_ioapic_irq(irq);
++}
++
++static void mask_IO_APIC_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	mask_IO_APIC_irq(irq);
++}
++
++static void unmask_IO_APIC_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	unmask_IO_APIC_irq(irq);
++}
++
++static void set_ioapic_affinity_vector (unsigned int vector,
++					cpumask_t cpu_mask)
++{
++	int irq = vector_to_irq(vector);
++
++	set_ioapic_affinity_irq(irq, cpu_mask);
++}
++#endif
++
++/*
++ * Level and edge triggered IO-APIC interrupts need different handling,
++ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
++ * handled with the level-triggered descriptor, but that one has slightly
++ * more overhead. Level-triggered interrupts cannot be handled with the
++ * edge-triggered handler, without risking IRQ storms and other ugly
++ * races.
++ */
++static struct hw_interrupt_type ioapic_edge_type = {
++	.typename 	= "IO-APIC-edge",
++	.startup 	= startup_edge_ioapic,
++	.shutdown 	= shutdown_edge_ioapic,
++	.enable 	= enable_edge_ioapic,
++	.disable 	= disable_edge_ioapic,
++	.ack 		= ack_edge_ioapic,
++	.end 		= end_edge_ioapic,
++	.set_affinity 	= set_ioapic_affinity,
++};
++
++static struct hw_interrupt_type ioapic_level_type = {
++	.typename 	= "IO-APIC-level",
++	.startup 	= startup_level_ioapic,
++	.shutdown 	= shutdown_level_ioapic,
++	.enable 	= enable_level_ioapic,
++	.disable 	= disable_level_ioapic,
++	.ack 		= mask_and_ack_level_ioapic,
++	.end 		= end_level_ioapic,
++	.set_affinity 	= set_ioapic_affinity,
++};
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++	int irq;
++
++	/*
++	 * NOTE! The local APIC isn't very good at handling
++	 * multiple interrupts at the same interrupt level.
++	 * As the interrupt level is determined by taking the
++	 * vector number and shifting that right by 4, we
++	 * want to spread these out a bit so that they don't
++	 * all fall in the same interrupt level.
++	 *
++	 * Also, we've got to be careful not to trash gate
++	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
++	 */
++	for (irq = 0; irq < NR_IRQS ; irq++) {
++		int tmp = irq;
++		if (use_pci_vector()) {
++			if (!platform_legacy_irq(tmp))
++				if ((tmp = vector_to_irq(tmp)) == -1)
++					continue;
++		}
++		if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++			/*
++			 * Hmm.. We don't have an entry for this,
++			 * so default to an old-fashioned 8259
++			 * interrupt if we can..
++			 */
++			if (irq < 16)
++				make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++			else
++				/* Strange. Oh, well.. */
++				irq_desc[irq].handler = &no_irq_type;
++#endif
++		}
++	}
++}
++
++#ifndef CONFIG_XEN
++static void enable_lapic_irq (unsigned int irq)
++{
++	unsigned long v;
++
++	v = apic_read(APIC_LVT0);
++	apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static void disable_lapic_irq (unsigned int irq)
++{
++	unsigned long v;
++
++	v = apic_read(APIC_LVT0);
++	apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void ack_lapic_irq (unsigned int irq)
++{
++	ack_APIC_irq();
++}
++
++static void end_lapic_irq (unsigned int i) { /* nothing */ }
++
++static struct hw_interrupt_type lapic_irq_type = {
++	.typename 	= "local-APIC-edge",
++	.startup 	= NULL, /* startup_irq() not used for IRQ0 */
++	.shutdown 	= NULL, /* shutdown_irq() not used for IRQ0 */
++	.enable 	= enable_lapic_irq,
++	.disable 	= disable_lapic_irq,
++	.ack 		= ack_lapic_irq,
++	.end 		= end_lapic_irq
++};
++
++static void setup_nmi (void)
++{
++	/*
++ 	 * Dirty trick to enable the NMI watchdog ...
++	 * We put the 8259A master into AEOI mode and
++	 * unmask on all local APICs LVT0 as NMI.
++	 *
++	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++	 * is from Maciej W. Rozycki - so we do not have to EOI from
++	 * the NMI handler or the timer interrupt.
++	 */ 
++	apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
++
++	on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
++
++	apic_printk(APIC_VERBOSE, " done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic.  ICR does
++ * not support the ExtINT mode, unfortunately.  We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA.  --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++	int pin, i;
++	struct IO_APIC_route_entry entry0, entry1;
++	unsigned char save_control, save_freq_select;
++	unsigned long flags;
++
++	pin = find_isa_irq_pin(8, mp_INT);
++	if (pin == -1)
++		return;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	*(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin);
++	*(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++	clear_IO_APIC_pin(0, pin);
++
++	memset(&entry1, 0, sizeof(entry1));
++
++	entry1.dest_mode = 0;			/* physical delivery */
++	entry1.mask = 0;			/* unmask IRQ now */
++	entry1.dest.physical.physical_dest = hard_smp_processor_id();
++	entry1.delivery_mode = dest_ExtINT;
++	entry1.polarity = entry0.polarity;
++	entry1.trigger = 0;
++	entry1.vector = 0;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
++	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	save_control = CMOS_READ(RTC_CONTROL);
++	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++		   RTC_FREQ_SELECT);
++	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++	i = 100;
++	while (i-- > 0) {
++		mdelay(10);
++		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++			i -= 10;
++	}
++
++	CMOS_WRITE(save_control, RTC_CONTROL);
++	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++	clear_IO_APIC_pin(0, pin);
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
++	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
++ * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ */
++static inline void check_timer(void)
++{
++	int pin1, pin2;
++	int vector;
++
++	/*
++	 * get/set the timer IRQ vector:
++	 */
++	disable_8259A_irq(0);
++	vector = assign_irq_vector(0);
++	set_intr_gate(vector, interrupt[0]);
++
++	/*
++	 * Subtle, code in do_timer_interrupt() expects an AEOI
++	 * mode for the 8259A whenever interrupts are routed
++	 * through I/O APICs.  Also IRQ0 has to be enabled in
++	 * the 8259A which implies the virtual wire has to be
++	 * disabled in the local APIC.
++	 */
++	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++	init_8259A(1);
++	timer_ack = 1;
++	enable_8259A_irq(0);
++
++	pin1 = find_isa_irq_pin(0, mp_INT);
++	pin2 = find_isa_irq_pin(0, mp_ExtINT);
++
++	printk(KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2);
++
++	if (pin1 != -1) {
++		/*
++		 * Ok, does IRQ0 through the IOAPIC work?
++		 */
++		unmask_IO_APIC_irq(0);
++		if (timer_irq_works()) {
++			if (nmi_watchdog == NMI_IO_APIC) {
++				disable_8259A_irq(0);
++				setup_nmi();
++				enable_8259A_irq(0);
++			}
++			return;
++		}
++		clear_IO_APIC_pin(0, pin1);
++		printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
++	}
++
++	printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
++	if (pin2 != -1) {
++		printk("\n..... (found pin %d) ...", pin2);
++		/*
++		 * legacy devices should be connected to IO APIC #0
++		 */
++		setup_ExtINT_IRQ0_pin(pin2, vector);
++		if (timer_irq_works()) {
++			printk("works.\n");
++			if (pin1 != -1)
++				replace_pin_at_irq(0, 0, pin1, 0, pin2);
++			else
++				add_pin_to_irq(0, 0, pin2);
++			if (nmi_watchdog == NMI_IO_APIC) {
++				setup_nmi();
++			}
++			return;
++		}
++		/*
++		 * Cleanup, just in case ...
++		 */
++		clear_IO_APIC_pin(0, pin2);
++	}
++	printk(" failed.\n");
++
++	if (nmi_watchdog == NMI_IO_APIC) {
++		printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++		nmi_watchdog = 0;
++	}
++
++	printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++	disable_8259A_irq(0);
++	irq_desc[0].handler = &lapic_irq_type;
++	apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
++	enable_8259A_irq(0);
++
++	if (timer_irq_works()) {
++		printk(" works.\n");
++		return;
++	}
++	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++	printk(" failed.\n");
++
++	printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++	timer_ack = 0;
++	init_8259A(0);
++	make_8259A_irq(0);
++	apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
++
++	unlock_ExtINT_logic();
++
++	if (timer_irq_works()) {
++		printk(" works.\n");
++		return;
++	}
++	printk(" failed :(.\n");
++	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
++		"report.  Then try booting with the 'noapic' option");
++}
++#else
++#define check_timer() ((void)0)
++#endif
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ *   Linux doesn't really care, as it's not actually used
++ *   for any interrupt handling anyway.
++ */
++#define PIC_IRQS	(1 << PIC_CASCADE_IR)
++
++void __init setup_IO_APIC(void)
++{
++	enable_IO_APIC();
++
++	if (acpi_ioapic)
++		io_apic_irqs = ~0;	/* all IRQs go through IOAPIC */
++	else
++		io_apic_irqs = ~PIC_IRQS;
++
++	printk("ENABLING IO-APIC IRQs\n");
++
++	/*
++	 * Set up IO-APIC IRQ routing.
++	 */
++	if (!acpi_ioapic)
++		setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++	sync_Arb_IDs();
++#endif
++	setup_IO_APIC_irqs();
++	init_IO_APIC_traps();
++	check_timer();
++	if (!acpi_ioapic)
++		print_IO_APIC();
++}
++
++/*
++ *	Called after all the initialization is done. If we didnt find any
++ *	APIC bugs then we can allow the modify fast path
++ */
++ 
++static int __init io_apic_bug_finalize(void)
++{
++	if(sis_apic_bug == -1)
++		sis_apic_bug = 0;
++	return 0;
++}
++
++late_initcall(io_apic_bug_finalize);
++
++struct sysfs_ioapic_data {
++	struct sys_device dev;
++	struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++	struct IO_APIC_route_entry *entry;
++	struct sysfs_ioapic_data *data;
++	unsigned long flags;
++	int i;
++	
++	data = container_of(dev, struct sysfs_ioapic_data, dev);
++	entry = data->entry;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++		*(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
++		*(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++	}
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++	struct IO_APIC_route_entry *entry;
++	struct sysfs_ioapic_data *data;
++	unsigned long flags;
++	union IO_APIC_reg_00 reg_00;
++	int i;
++	
++	data = container_of(dev, struct sysfs_ioapic_data, dev);
++	entry = data->entry;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_00.raw = io_apic_read(dev->id, 0);
++	if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++		reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++		io_apic_write(dev->id, 0, reg_00.raw);
++	}
++	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++		io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
++		io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++	}
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++	set_kset_name("ioapic"),
++	.suspend = ioapic_suspend,
++	.resume = ioapic_resume,
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++	struct sys_device * dev;
++	int i, size, error = 0;
++
++	error = sysdev_class_register(&ioapic_sysdev_class);
++	if (error)
++		return error;
++
++	for (i = 0; i < nr_ioapics; i++ ) {
++		size = sizeof(struct sys_device) + nr_ioapic_registers[i] 
++			* sizeof(struct IO_APIC_route_entry);
++		mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++		if (!mp_ioapic_data[i]) {
++			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++			continue;
++		}
++		memset(mp_ioapic_data[i], 0, size);
++		dev = &mp_ioapic_data[i]->dev;
++		dev->id = i; 
++		dev->cls = &ioapic_sysdev_class;
++		error = sysdev_register(dev);
++		if (error) {
++			kfree(mp_ioapic_data[i]);
++			mp_ioapic_data[i] = NULL;
++			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++			continue;
++		}
++	}
++
++	return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/* --------------------------------------------------------------------------
++                          ACPI-based IOAPIC Configuration
++   -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI_BOOT
++
++int __init io_apic_get_unique_id (int ioapic, int apic_id)
++{
++#ifndef CONFIG_XEN
++	union IO_APIC_reg_00 reg_00;
++	static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
++	physid_mask_t tmp;
++	unsigned long flags;
++	int i = 0;
++
++	/*
++	 * The P4 platform supports up to 256 APIC IDs on two separate APIC 
++	 * buses (one for LAPICs, one for IOAPICs), where predecessors only 
++	 * supports up to 16 on one shared APIC bus.
++	 * 
++	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
++	 *      advantage of new APIC bus architecture.
++	 */
++
++	if (physids_empty(apic_id_map))
++		apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_00.raw = io_apic_read(ioapic, 0);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	if (apic_id >= get_physical_broadcast()) {
++		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
++			"%d\n", ioapic, apic_id, reg_00.bits.ID);
++		apic_id = reg_00.bits.ID;
++	}
++
++	/*
++	 * Every APIC in a system must have a unique ID or we get lots of nice 
++	 * 'stuck on smp_invalidate_needed IPI wait' messages.
++	 */
++	if (check_apicid_used(apic_id_map, apic_id)) {
++
++		for (i = 0; i < get_physical_broadcast(); i++) {
++			if (!check_apicid_used(apic_id_map, i))
++				break;
++		}
++
++		if (i == get_physical_broadcast())
++			panic("Max apic_id exceeded!\n");
++
++		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
++			"trying %d\n", ioapic, apic_id, i);
++
++		apic_id = i;
++	} 
++
++	tmp = apicid_to_cpu_present(apic_id);
++	physids_or(apic_id_map, apic_id_map, tmp);
++
++	if (reg_00.bits.ID != apic_id) {
++		reg_00.bits.ID = apic_id;
++
++		spin_lock_irqsave(&ioapic_lock, flags);
++		io_apic_write(ioapic, 0, reg_00.raw);
++		reg_00.raw = io_apic_read(ioapic, 0);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++
++		/* Sanity check */
++		if (reg_00.bits.ID != apic_id)
++			panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic);
++	}
++
++	apic_printk(APIC_VERBOSE, KERN_INFO
++			"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
++#endif /* !CONFIG_XEN */
++
++	return apic_id;
++}
++
++
++int __init io_apic_get_version (int ioapic)
++{
++	union IO_APIC_reg_01	reg_01;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_01.raw = io_apic_read(ioapic, 1);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++	union IO_APIC_reg_01	reg_01;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_01.raw = io_apic_read(ioapic, 1);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++	struct IO_APIC_route_entry entry;
++	unsigned long flags;
++
++	if (!IO_APIC_IRQ(irq)) {
++		printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++			ioapic);
++		return -EINVAL;
++	}
++
++	/*
++	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++	 * Note that we mask (disable) IRQs now -- these get enabled when the
++	 * corresponding device driver registers for this IRQ.
++	 */
++
++	memset(&entry,0,sizeof(entry));
++
++	entry.delivery_mode = INT_DELIVERY_MODE;
++	entry.dest_mode = INT_DEST_MODE;
++	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++	entry.trigger = edge_level;
++	entry.polarity = active_high_low;
++	entry.mask  = 1;
++
++	/*
++	 * IRQs < 16 are already in the irq_2_pin[] map
++	 */
++	if (irq >= 16)
++		add_pin_to_irq(irq, ioapic, pin);
++
++	entry.vector = assign_irq_vector(irq);
++
++	apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
++		"(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
++		mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++		edge_level, active_high_low);
++
++	ioapic_register_intr(irq, entry.vector, edge_level);
++
++	if (!ioapic && (irq < 16))
++		disable_8259A_irq(irq);
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
++	io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return 0;
++}
++
++#endif /*CONFIG_ACPI_BOOT*/
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/ioport.c linux-2.6.12-xen/arch/xen/i386/kernel/ioport.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/ioport.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/ioport.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,125 @@
++/*
++ *	linux/arch/i386/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <asm-xen/xen-public/physdev.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++	unsigned long mask;
++	unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
++	unsigned int low_index = base & (BITS_PER_LONG-1);
++	int length = low_index + extent;
++
++	if (low_index != 0) {
++		mask = (~0UL << low_index);
++		if (length < BITS_PER_LONG)
++			mask &= ~(~0UL << length);
++		if (new_value)
++			*bitmap_base++ |= mask;
++		else
++			*bitmap_base++ &= ~mask;
++		length -= BITS_PER_LONG;
++	}
++
++	mask = (new_value ? ~0UL : 0UL);
++	while (length >= BITS_PER_LONG) {
++		*bitmap_base++ = mask;
++		length -= BITS_PER_LONG;
++	}
++
++	if (length > 0) {
++		mask = ~(~0UL << length);
++		if (new_value)
++			*bitmap_base++ |= mask;
++		else
++			*bitmap_base++ &= ~mask;
++	}
++}
++
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++	struct thread_struct * t = &current->thread;
++	unsigned long *bitmap;
++	physdev_op_t op;
++
++	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++		return -EINVAL;
++	if (turn_on && !capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
++	/*
++	 * If it's the first ioperm() call in this thread's lifetime, set the
++	 * IO bitmap up. ioperm() is much less timing critical than clone(),
++	 * this is why we delay this operation until now:
++	 */
++	if (!t->io_bitmap_ptr) {
++		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++		if (!bitmap)
++			return -ENOMEM;
++
++		memset(bitmap, 0xff, IO_BITMAP_BYTES);
++		t->io_bitmap_ptr = bitmap;
++
++		op.cmd = PHYSDEVOP_SET_IOBITMAP;
++		op.u.set_iobitmap.bitmap   = (char *)bitmap;
++		op.u.set_iobitmap.nr_ports = IO_BITMAP_BITS;
++		HYPERVISOR_physdev_op(&op);
++	}
++
++	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++	return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ * Here we just change the eflags value on the stack: we allow
++ * only the super-user to do it. This depends on the stack-layout
++ * on system-call entry - see also fork() and the signal handling
++ * code.
++ */
++
++asmlinkage long sys_iopl(unsigned int new_io_pl)
++{
++	unsigned int old_io_pl = current->thread.io_pl;
++	physdev_op_t op;
++
++	if (new_io_pl > 3)
++		return -EINVAL;
++
++	/* Need "raw I/O" privileges for direct port access. */
++	if ((new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
++	/* Change our version of the privilege levels. */
++	current->thread.io_pl = new_io_pl;
++
++	/* Force the change at ring 0. */
++	op.cmd             = PHYSDEVOP_SET_IOPL;
++	op.u.set_iopl.iopl = (new_io_pl == 0) ? 1 : new_io_pl;
++	HYPERVISOR_physdev_op(&op);
++
++	return 0;
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/irq.c linux-2.6.12-xen/arch/xen/i386/kernel/irq.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/irq.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/irq.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,300 @@
++/*
++ *	linux/arch/i386/kernel/irq.c
++ *
++ *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
++ *
++ * This file contains the lowest level x86-specific interrupt
++ * entry, irq-stacks and irq statistics code. All the remaining
++ * irq logic is done by the generic kernel/irq/ code and
++ * by the x86-specific irq controller code. (e.g. i8259.c and
++ * io_apic.c.)
++ */
++
++#include <asm/uaccess.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/interrupt.h>
++#include <linux/kernel_stat.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/delay.h>
++
++DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp;
++EXPORT_PER_CPU_SYMBOL(irq_stat);
++
++#ifndef CONFIG_X86_LOCAL_APIC
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++	printk("unexpected IRQ trap at vector %02x\n", irq);
++}
++#endif
++
++#ifdef CONFIG_4KSTACKS
++/*
++ * per-CPU IRQ handling contexts (thread information and stack)
++ */
++union irq_ctx {
++	struct thread_info      tinfo;
++	u32                     stack[THREAD_SIZE/sizeof(u32)];
++};
++
++static union irq_ctx *hardirq_ctx[NR_CPUS];
++static union irq_ctx *softirq_ctx[NR_CPUS];
++#endif
++
++/*
++ * do_IRQ handles all normal device IRQ's (the special
++ * SMP cross-CPU interrupts have their own specific
++ * handlers).
++ */
++fastcall unsigned int do_IRQ(struct pt_regs *regs)
++{	
++	/* high bits used in ret_from_ code */
++	int irq = regs->orig_eax & __IRQ_MASK(HARDIRQ_BITS);
++#ifdef CONFIG_4KSTACKS
++	union irq_ctx *curctx, *irqctx;
++	u32 *isp;
++#endif
++
++	irq_enter();
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++	/* Debugging check for stack overflow: is there less than 1KB free? */
++	{
++		long esp;
++
++		__asm__ __volatile__("andl %%esp,%0" :
++					"=r" (esp) : "0" (THREAD_SIZE - 1));
++		if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
++			printk("do_IRQ: stack overflow: %ld\n",
++				esp - sizeof(struct thread_info));
++			dump_stack();
++		}
++	}
++#endif
++
++#ifdef CONFIG_4KSTACKS
++
++	curctx = (union irq_ctx *) current_thread_info();
++	irqctx = hardirq_ctx[smp_processor_id()];
++
++	/*
++	 * this is where we switch to the IRQ stack. However, if we are
++	 * already using the IRQ stack (because we interrupted a hardirq
++	 * handler) we can't do that and just have to keep using the
++	 * current stack (which is the irq stack already after all)
++	 */
++	if (curctx != irqctx) {
++		int arg1, arg2, ebx;
++
++		/* build the stack frame on the IRQ stack */
++		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
++		irqctx->tinfo.task = curctx->tinfo.task;
++		irqctx->tinfo.previous_esp = current_stack_pointer;
++
++		asm volatile(
++			"       xchgl   %%ebx,%%esp      \n"
++			"       call    __do_IRQ         \n"
++			"       movl   %%ebx,%%esp      \n"
++			: "=a" (arg1), "=d" (arg2), "=b" (ebx)
++			:  "0" (irq),   "1" (regs),  "2" (isp)
++			: "memory", "cc", "ecx"
++		);
++	} else
++#endif
++		__do_IRQ(irq, regs);
++
++	irq_exit();
++
++	return 1;
++}
++
++#ifdef CONFIG_4KSTACKS
++
++/*
++ * These should really be __section__(".bss.page_aligned") as well, but
++ * gcc's 3.0 and earlier don't handle that correctly.
++ */
++static char softirq_stack[NR_CPUS * THREAD_SIZE]
++		__attribute__((__aligned__(THREAD_SIZE)));
++
++static char hardirq_stack[NR_CPUS * THREAD_SIZE]
++		__attribute__((__aligned__(THREAD_SIZE)));
++
++/*
++ * allocate per-cpu stacks for hardirq and for softirq processing
++ */
++void irq_ctx_init(int cpu)
++{
++	union irq_ctx *irqctx;
++
++	if (hardirq_ctx[cpu])
++		return;
++
++	irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
++	irqctx->tinfo.task              = NULL;
++	irqctx->tinfo.exec_domain       = NULL;
++	irqctx->tinfo.cpu               = cpu;
++	irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
++	irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
++
++	hardirq_ctx[cpu] = irqctx;
++
++	irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
++	irqctx->tinfo.task              = NULL;
++	irqctx->tinfo.exec_domain       = NULL;
++	irqctx->tinfo.cpu               = cpu;
++	irqctx->tinfo.preempt_count     = SOFTIRQ_OFFSET;
++	irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
++
++	softirq_ctx[cpu] = irqctx;
++
++	printk("CPU %u irqstacks, hard=%p soft=%p\n",
++		cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
++}
++
++extern asmlinkage void __do_softirq(void);
++
++asmlinkage void do_softirq(void)
++{
++	unsigned long flags;
++	struct thread_info *curctx;
++	union irq_ctx *irqctx;
++	u32 *isp;
++
++	if (in_interrupt())
++		return;
++
++	local_irq_save(flags);
++
++	if (local_softirq_pending()) {
++		curctx = current_thread_info();
++		irqctx = softirq_ctx[smp_processor_id()];
++		irqctx->tinfo.task = curctx->task;
++		irqctx->tinfo.previous_esp = current_stack_pointer;
++
++		/* build the stack frame on the softirq stack */
++		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
++
++		asm volatile(
++			"       xchgl   %%ebx,%%esp     \n"
++			"       call    __do_softirq    \n"
++			"       movl    %%ebx,%%esp     \n"
++			: "=b"(isp)
++			: "0"(isp)
++			: "memory", "cc", "edx", "ecx", "eax"
++		);
++	}
++
++	local_irq_restore(flags);
++}
++
++EXPORT_SYMBOL(do_softirq);
++#endif
++
++/*
++ * Interrupt statistics:
++ */
++
++atomic_t irq_err_count;
++
++/*
++ * /proc/interrupts printing:
++ */
++
++int show_interrupts(struct seq_file *p, void *v)
++{
++	int i = *(loff_t *) v, j;
++	struct irqaction * action;
++	unsigned long flags;
++
++	if (i == 0) {
++		seq_printf(p, "           ");
++		for_each_cpu(j)
++			seq_printf(p, "CPU%d       ",j);
++		seq_putc(p, '\n');
++	}
++
++	if (i < NR_IRQS) {
++		spin_lock_irqsave(&irq_desc[i].lock, flags);
++		action = irq_desc[i].action;
++		if (!action)
++			goto skip;
++		seq_printf(p, "%3d: ",i);
++#ifndef CONFIG_SMP
++		seq_printf(p, "%10u ", kstat_irqs(i));
++#else
++		for_each_cpu(j)
++			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++#endif
++		seq_printf(p, " %14s", irq_desc[i].handler->typename);
++		seq_printf(p, "  %s", action->name);
++
++		for (action=action->next; action; action = action->next)
++			seq_printf(p, ", %s", action->name);
++
++		seq_putc(p, '\n');
++skip:
++		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++	} else if (i == NR_IRQS) {
++		seq_printf(p, "NMI: ");
++		for_each_cpu(j)
++ 			seq_printf(p, "%10u ", nmi_count(j));
++		seq_putc(p, '\n');
++#ifdef CONFIG_X86_LOCAL_APIC
++		seq_printf(p, "LOC: ");
++		for_each_cpu(j)
++			seq_printf(p, "%10u ", per_cpu(irq_stat,j).apic_timer_irqs);
++		seq_putc(p, '\n');
++#endif
++		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#if defined(CONFIG_X86_IO_APIC)
++		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
++#endif
++	}
++	return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++void fixup_irqs(cpumask_t map)
++{
++	unsigned int irq;
++	static int warned;
++
++	for (irq = 0; irq < NR_IRQS; irq++) {
++		cpumask_t mask;
++		if (irq == 2)
++			continue;
++
++		cpus_and(mask, irq_affinity[irq], map);
++		if (any_online_cpu(mask) == NR_CPUS) {
++			/*printk("Breaking affinity for irq %i\n", irq);*/
++			mask = map;
++		}
++		if (irq_desc[irq].handler->set_affinity)
++			irq_desc[irq].handler->set_affinity(irq, mask);
++		else if (irq_desc[irq].action && !(warned++))
++			printk("Cannot set affinity for irq %i\n", irq);
++	}
++
++#if 0
++	barrier();
++	/* Ingo Molnar says: "after the IO-APIC masks have been redirected
++	   [note the nop - the interrupt-enable boundary on x86 is two
++	   instructions from sti] - to flush out pending hardirqs and
++	   IPIs. After this point nothing is supposed to reach this CPU." */
++	__asm__ __volatile__("sti; nop; cli");
++	barrier();
++#else
++	/* That doesn't seem sufficient.  Give it 1ms. */
++	local_irq_enable();
++	mdelay(1);
++	local_irq_disable();
++#endif
++}
++#endif
++
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/ldt.c linux-2.6.12-xen/arch/xen/i386/kernel/ldt.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/ldt.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/ldt.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,268 @@
++/*
++ * linux/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++	if (current->active_mm)
++		load_LDT(&current->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++{
++	void *oldldt;
++	void *newldt;
++	int oldsize;
++
++	if (mincount <= pc->size)
++		return 0;
++	oldsize = pc->size;
++	mincount = (mincount+511)&(~511);
++	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++	else
++		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++	if (!newldt)
++		return -ENOMEM;
++
++	if (oldsize)
++		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++	oldldt = pc->ldt;
++	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++	pc->ldt = newldt;
++	wmb();
++	pc->size = mincount;
++	wmb();
++
++	if (reload) {
++#ifdef CONFIG_SMP
++		cpumask_t mask;
++		preempt_disable();
++#endif
++		make_pages_readonly(
++			pc->ldt,
++			(pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		load_LDT(pc);
++#ifdef CONFIG_SMP
++		mask = cpumask_of_cpu(smp_processor_id());
++		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++			smp_call_function(flush_ldt, NULL, 1, 1);
++		preempt_enable();
++#endif
++	}
++	if (oldsize) {
++		make_pages_writable(
++			oldldt,
++			(oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++			vfree(oldldt);
++		else
++			kfree(oldldt);
++	}
++	return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++	int err = alloc_ldt(new, old->size, 0);
++	if (err < 0)
++		return err;
++	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++	make_pages_readonly(
++		new->ldt,
++		(new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++		XENFEAT_writable_descriptor_tables);
++	return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++	struct mm_struct * old_mm;
++	int retval = 0;
++
++	init_MUTEX(&mm->context.sem);
++	mm->context.size = 0;
++	old_mm = current->mm;
++	if (old_mm && old_mm->context.size > 0) {
++		down(&old_mm->context.sem);
++		retval = copy_ldt(&mm->context, &old_mm->context);
++		up(&old_mm->context.sem);
++	}
++	return retval;
++}
++
++/*
++ * No need to lock the MM as we are the last user
++ */
++void destroy_context(struct mm_struct *mm)
++{
++	if (mm->context.size) {
++		if (mm == current->active_mm)
++			clear_LDT();
++		make_pages_writable(
++			mm->context.ldt,
++			(mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++			vfree(mm->context.ldt);
++		else
++			kfree(mm->context.ldt);
++		mm->context.size = 0;
++	}
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++	int err;
++	unsigned long size;
++	struct mm_struct * mm = current->mm;
++
++	if (!mm->context.size)
++		return 0;
++	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++	down(&mm->context.sem);
++	size = mm->context.size*LDT_ENTRY_SIZE;
++	if (size > bytecount)
++		size = bytecount;
++
++	err = 0;
++	if (copy_to_user(ptr, mm->context.ldt, size))
++		err = -EFAULT;
++	up(&mm->context.sem);
++	if (err < 0)
++		goto error_return;
++	if (size != bytecount) {
++		/* zero-fill the rest */
++		if (clear_user(ptr+size, bytecount-size) != 0) {
++			err = -EFAULT;
++			goto error_return;
++		}
++	}
++	return bytecount;
++error_return:
++	return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++	int err;
++	unsigned long size;
++	void *address;
++
++	err = 0;
++	address = &default_ldt[0];
++	size = 5*sizeof(struct desc_struct);
++	if (size > bytecount)
++		size = bytecount;
++
++	err = size;
++	if (copy_to_user(ptr, address, size))
++		err = -EFAULT;
++
++	return err;
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++	struct mm_struct * mm = current->mm;
++	__u32 entry_1, entry_2;
++	int error;
++	struct user_desc ldt_info;
++
++	error = -EINVAL;
++	if (bytecount != sizeof(ldt_info))
++		goto out;
++	error = -EFAULT; 	
++	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
++		goto out;
++
++	error = -EINVAL;
++	if (ldt_info.entry_number >= LDT_ENTRIES)
++		goto out;
++	if (ldt_info.contents == 3) {
++		if (oldmode)
++			goto out;
++		if (ldt_info.seg_not_present == 0)
++			goto out;
++	}
++
++	down(&mm->context.sem);
++	if (ldt_info.entry_number >= mm->context.size) {
++		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
++		if (error < 0)
++			goto out_unlock;
++	}
++
++   	/* Allow LDTs to be cleared by the user. */
++   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++		if (oldmode || LDT_empty(&ldt_info)) {
++			entry_1 = 0;
++			entry_2 = 0;
++			goto install;
++		}
++	}
++
++	entry_1 = LDT_entry_a(&ldt_info);
++	entry_2 = LDT_entry_b(&ldt_info);
++	if (oldmode)
++		entry_2 &= ~(1 << 20);
++
++	/* Install the new entry ...  */
++install:
++	error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
++				entry_1, entry_2);
++
++out_unlock:
++	up(&mm->context.sem);
++out:
++	return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++	int ret = -ENOSYS;
++
++	switch (func) {
++	case 0:
++		ret = read_ldt(ptr, bytecount);
++		break;
++	case 1:
++		ret = write_ldt(ptr, bytecount, 1);
++		break;
++	case 2:
++		ret = read_default_ldt(ptr, bytecount);
++		break;
++	case 0x11:
++		ret = write_ldt(ptr, bytecount, 0);
++		break;
++	}
++	return ret;
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/Makefile linux-2.6.12-xen/arch/xen/i386/kernel/Makefile
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/Makefile	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,100 @@
++#
++# Makefile for the linux kernel.
++#
++
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++CFLAGS	+= -Iarch/$(XENARCH)/kernel
++AFLAGS	+= -Iarch/$(XENARCH)/kernel
++
++extra-y := head.o init_task.o
++
++obj-y	:= process.o signal.o entry.o traps.o \
++		time.o ioport.o ldt.o setup.o \
++		pci-dma.o i386_ksyms.o irq.o quirks.o fixup.o
++
++c-obj-y	:= semaphore.o vm86.o \
++		ptrace.o sys_i386.o \
++		i387.o dmi_scan.o bootflag.o
++s-obj-y	:=
++
++obj-y				+= cpu/
++#obj-y				+= timers/
++obj-$(CONFIG_ACPI_BOOT)		+= acpi/
++#c-obj-$(CONFIG_X86_BIOS_REBOOT)	+= reboot.o
++c-obj-$(CONFIG_MCA)		+= mca.o
++c-obj-$(CONFIG_X86_MSR)		+= msr.o
++c-obj-$(CONFIG_X86_CPUID)	+= cpuid.o
++obj-$(CONFIG_MICROCODE)		+= microcode.o
++c-obj-$(CONFIG_APM)		+= apm.o
++obj-$(CONFIG_X86_SMP)		+= smp.o
++#obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline.o
++obj-$(CONFIG_X86_MPPARSE)	+= mpparse.o
++obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o
++c-obj-$(CONFIG_X86_LOCAL_APIC)	+= nmi.o
++obj-$(CONFIG_X86_IO_APIC)	+= io_apic.o
++c-obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups.o
++c-obj-$(CONFIG_X86_NUMAQ)	+= numaq.o
++c-obj-$(CONFIG_X86_SUMMIT_NUMA)	+= summit.o
++c-obj-$(CONFIG_MODULES)		+= module.o
++c-obj-y				+= sysenter.o
++obj-y				+= vsyscall.o
++c-obj-$(CONFIG_ACPI_SRAT) 	+= srat.o
++c-obj-$(CONFIG_HPET_TIMER) 	+= time_hpet.o
++c-obj-$(CONFIG_EFI) 		+= efi.o efi_stub.o
++c-obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
++c-obj-$(CONFIG_SMP_ALTERNATIVES)+= smpalts.o
++obj-$(CONFIG_SWIOTLB)		+= swiotlb.o
++
++EXTRA_AFLAGS   := -traditional
++
++c-obj-$(CONFIG_SCx200)		+= scx200.o
++
++# vsyscall.o contains the vsyscall DSO images as __initdata.
++# We must build both images before we can assemble it.
++# Note: kbuild does not track this dependency due to usage of .incbin
++$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
++targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
++targets += vsyscall-note.o vsyscall.lds
++
++# The DSO images are built using a special linker script.
++quiet_cmd_syscall = SYSCALL $@
++      cmd_syscall = $(CC) -m elf_i386 -nostdlib $(SYSCFLAGS_$(@F)) \
++		          -Wl,-T,$(filter-out FORCE,$^) -o $@
++
++export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH)
++
++vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
++SYSCFLAGS_vsyscall-sysenter.so	= $(vsyscall-flags)
++SYSCFLAGS_vsyscall-int80.so	= $(vsyscall-flags)
++
++$(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
++$(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
++		      $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
++	$(call if_changed,syscall)
++
++# We also create a special relocatable object that should mirror the symbol
++# table and layout of the linked DSO.  With ld -R we can then refer to
++# these symbols in the kernel code rather than hand-coded addresses.
++extra-y += vsyscall-syms.o
++$(obj)/built-in.o: $(obj)/vsyscall-syms.o
++$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
++
++SYSCFLAGS_vsyscall-syms.o = -r
++$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
++			$(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
++	$(call if_changed,syscall)
++
++c-link	:=
++s-link	:= vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o vsyscall.lds.o
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-obj-m) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
++	@ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
++
++$(obj)/vsyscall-int80.S: $(obj)/vsyscall-sigreturn.S
++
++obj-y	+= $(c-obj-y) $(s-obj-y)
++obj-m	+= $(c-obj-m)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-m) $(c-obj-) $(c-link))
++clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/microcode.c linux-2.6.12-xen/arch/xen/i386/kernel/microcode.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/microcode.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/microcode.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,163 @@
++/*
++ *	Intel CPU Microcode Update Driver for Linux
++ *
++ *	Copyright (C) 2000-2004 Tigran Aivazian
++ *
++ *	This driver allows to upgrade microcode on Intel processors
++ *	belonging to IA-32 family - PentiumPro, Pentium II, 
++ *	Pentium III, Xeon, Pentium 4, etc.
++ *
++ *	Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual, 
++ *	Order Number 245472 or free download from:
++ *		
++ *	http://developer.intel.com/design/pentium4/manuals/245472.htm
++ *
++ *	For more information, go to http://www.urbanmyth.org/microcode
++ *
++ *	This program is free software; you can redistribute it and/or
++ *	modify it under the terms of the GNU General Public License
++ *	as published by the Free Software Foundation; either version
++ *	2 of the License, or (at your option) any later version.
++ */
++
++//#define DEBUG /* pr_debug */
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/miscdevice.h>
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <linux/syscalls.h>
++
++#include <asm/msr.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++
++MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
++MODULE_AUTHOR("Tigran Aivazian <tigran at veritas.com>");
++MODULE_LICENSE("GPL");
++
++#define MICROCODE_VERSION 	"1.14-xen"
++
++#define DEFAULT_UCODE_DATASIZE 	(2000) 	  /* 2000 bytes */
++#define MC_HEADER_SIZE		(sizeof (microcode_header_t))  	  /* 48 bytes */
++#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
++
++/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
++static DECLARE_MUTEX(microcode_sem);
++
++static void __user *user_buffer;	/* user area microcode data buffer */
++static unsigned int user_buffer_size;	/* it's size */
++				
++static int microcode_open (struct inode *unused1, struct file *unused2)
++{
++	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++
++static int do_microcode_update (void)
++{
++	int err;
++	dom0_op_t op;
++
++	err = sys_mlock((unsigned long)user_buffer, user_buffer_size);
++	if (err != 0)
++		return err;
++
++	op.cmd = DOM0_MICROCODE;
++	op.u.microcode.data = user_buffer;
++	op.u.microcode.length = user_buffer_size;
++	err = HYPERVISOR_dom0_op(&op);
++
++	(void)sys_munlock((unsigned long)user_buffer, user_buffer_size);
++
++	return err;
++}
++
++static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
++{
++	ssize_t ret;
++
++	if (len < DEFAULT_UCODE_TOTALSIZE) {
++		printk(KERN_ERR "microcode: not enough data\n"); 
++		return -EINVAL;
++	}
++
++	if ((len >> PAGE_SHIFT) > num_physpages) {
++		printk(KERN_ERR "microcode: too much data (max %ld pages)\n", num_physpages);
++		return -EINVAL;
++	}
++
++	down(&microcode_sem);
++
++	user_buffer = (void __user *) buf;
++	user_buffer_size = (int) len;
++
++	ret = do_microcode_update();
++	if (!ret)
++		ret = (ssize_t)len;
++
++	up(&microcode_sem);
++
++	return ret;
++}
++
++static int microcode_ioctl (struct inode *inode, struct file *file, 
++		unsigned int cmd, unsigned long arg)
++{
++	switch (cmd) {
++		/* 
++		 *  XXX: will be removed after microcode_ctl 
++		 *  is updated to ignore failure of this ioctl()
++		 */
++		case MICROCODE_IOCFREE:
++			return 0;
++		default:
++			return -EINVAL;
++	}
++	return -EINVAL;
++}
++
++static struct file_operations microcode_fops = {
++	.owner		= THIS_MODULE,
++	.write		= microcode_write,
++	.ioctl		= microcode_ioctl,
++	.open		= microcode_open,
++};
++
++static struct miscdevice microcode_dev = {
++	.minor		= MICROCODE_MINOR,
++	.name		= "microcode",
++	.devfs_name	= "cpu/microcode",
++	.fops		= &microcode_fops,
++};
++
++static int __init microcode_init (void)
++{
++	int error;
++
++	error = misc_register(&microcode_dev);
++	if (error) {
++		printk(KERN_ERR
++			"microcode: can't misc_register on minor=%d\n",
++			MICROCODE_MINOR);
++		return error;
++	}
++
++	printk(KERN_INFO 
++		"IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran at veritas.com>\n");
++	return 0;
++}
++
++static void __exit microcode_exit (void)
++{
++	misc_deregister(&microcode_dev);
++	printk(KERN_INFO "IA-32 Microcode Update Driver v" MICROCODE_VERSION " unregistered\n");
++}
++
++module_init(microcode_init)
++module_exit(microcode_exit)
++MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/mpparse.c linux-2.6.12-xen/arch/xen/i386/kernel/mpparse.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/mpparse.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/mpparse.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,1126 @@
++/*
++ *	Intel Multiprocessor Specification 1.1 and 1.4
++ *	compliant MP-table parsing routines.
++ *
++ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
++ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo at redhat.com>
++ *
++ *	Fixes
++ *		Erich Boleyn	:	MP v1.4 and additional changes.
++ *		Alan Cox	:	Added EBDA scanning
++ *		Ingo Molnar	:	various cleanups and rewrites
++ *		Maciej W. Rozycki:	Bits for default MP configurations
++ *		Paul Diefenbaugh:	Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/init.h>
++#include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/config.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/bitops.h>
++
++#include <asm/smp.h>
++#include <asm/acpi.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/io_apic.h>
++
++#include <mach_apic.h>
++#include <mach_mpparse.h>
++#include <bios_ebda.h>
++
++/* Have we found an MP table */
++int smp_found_config;
++unsigned int __initdata maxcpus = NR_CPUS;
++
++/*
++ * Various Linux-internal data structures created from the
++ * MP-table.
++ */
++int apic_version [MAX_APICS];
++int mp_bus_id_to_type [MAX_MP_BUSSES];
++int mp_bus_id_to_node [MAX_MP_BUSSES];
++int mp_bus_id_to_local [MAX_MP_BUSSES];
++int quad_local_to_mp_bus_id [NR_CPUS/4][4];
++int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++static int mp_current_pci_id;
++
++/* I/O APIC entries */
++struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++
++/* # of MP IRQ source entries */
++struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++
++/* MP IRQ source entries */
++int mp_irq_entries;
++
++int nr_ioapics;
++
++int pic_mode;
++unsigned long mp_lapic_addr;
++
++/* Processor that is doing the boot up */
++unsigned int boot_cpu_physical_apicid = -1U;
++unsigned int boot_cpu_logical_apicid = -1U;
++/* Internal processor count */
++static unsigned int __initdata num_processors;
++
++/* Bitmask of physically existing CPUs */
++physid_mask_t phys_cpu_present_map;
++
++u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++/*
++ * Intel MP BIOS table parsing routines:
++ */
++
++
++/*
++ * Checksum an MP configuration block.
++ */
++
++static int __init mpf_checksum(unsigned char *mp, int len)
++{
++	int sum = 0;
++
++	while (len--)
++		sum += *mp++;
++
++	return sum & 0xFF;
++}
++
++/*
++ * Have to match translation table entries to main table entries by counter
++ * hence the mpc_record variable .... can't see a less disgusting way of
++ * doing this ....
++ */
++
++static int mpc_record; 
++static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
++
++#ifdef CONFIG_X86_NUMAQ
++static int MP_valid_apicid(int apicid, int version)
++{
++	return hweight_long(apicid & 0xf) == 1 && (apicid >> 4) != 0xf;
++}
++#elif !defined(CONFIG_XEN)
++static int MP_valid_apicid(int apicid, int version)
++{
++	if (version >= 0x14)
++		return apicid < 0xff;
++	else
++		return apicid < 0xf;
++}
++#endif
++
++#ifndef CONFIG_XEN
++static void __init MP_processor_info (struct mpc_config_processor *m)
++{
++ 	int ver, apicid;
++	physid_mask_t tmp;
++ 	
++	if (!(m->mpc_cpuflag & CPU_ENABLED))
++		return;
++
++	apicid = mpc_apic_id(m, translation_table[mpc_record]);
++
++	if (m->mpc_featureflag&(1<<0))
++		Dprintk("    Floating point unit present.\n");
++	if (m->mpc_featureflag&(1<<7))
++		Dprintk("    Machine Exception supported.\n");
++	if (m->mpc_featureflag&(1<<8))
++		Dprintk("    64 bit compare & exchange supported.\n");
++	if (m->mpc_featureflag&(1<<9))
++		Dprintk("    Internal APIC present.\n");
++	if (m->mpc_featureflag&(1<<11))
++		Dprintk("    SEP present.\n");
++	if (m->mpc_featureflag&(1<<12))
++		Dprintk("    MTRR  present.\n");
++	if (m->mpc_featureflag&(1<<13))
++		Dprintk("    PGE  present.\n");
++	if (m->mpc_featureflag&(1<<14))
++		Dprintk("    MCA  present.\n");
++	if (m->mpc_featureflag&(1<<15))
++		Dprintk("    CMOV  present.\n");
++	if (m->mpc_featureflag&(1<<16))
++		Dprintk("    PAT  present.\n");
++	if (m->mpc_featureflag&(1<<17))
++		Dprintk("    PSE  present.\n");
++	if (m->mpc_featureflag&(1<<18))
++		Dprintk("    PSN  present.\n");
++	if (m->mpc_featureflag&(1<<19))
++		Dprintk("    Cache Line Flush Instruction present.\n");
++	/* 20 Reserved */
++	if (m->mpc_featureflag&(1<<21))
++		Dprintk("    Debug Trace and EMON Store present.\n");
++	if (m->mpc_featureflag&(1<<22))
++		Dprintk("    ACPI Thermal Throttle Registers  present.\n");
++	if (m->mpc_featureflag&(1<<23))
++		Dprintk("    MMX  present.\n");
++	if (m->mpc_featureflag&(1<<24))
++		Dprintk("    FXSR  present.\n");
++	if (m->mpc_featureflag&(1<<25))
++		Dprintk("    XMM  present.\n");
++	if (m->mpc_featureflag&(1<<26))
++		Dprintk("    Willamette New Instructions  present.\n");
++	if (m->mpc_featureflag&(1<<27))
++		Dprintk("    Self Snoop  present.\n");
++	if (m->mpc_featureflag&(1<<28))
++		Dprintk("    HT  present.\n");
++	if (m->mpc_featureflag&(1<<29))
++		Dprintk("    Thermal Monitor present.\n");
++	/* 30, 31 Reserved */
++
++
++	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++		Dprintk("    Bootup CPU\n");
++		boot_cpu_physical_apicid = m->mpc_apicid;
++		boot_cpu_logical_apicid = apicid;
++	}
++
++	if (num_processors >= NR_CPUS) {
++		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
++			"  Processor ignored.\n", NR_CPUS); 
++		return;
++	}
++
++	if (num_processors >= maxcpus) {
++		printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
++			" Processor ignored.\n", maxcpus); 
++		return;
++	}
++	num_processors++;
++	ver = m->mpc_apicver;
++
++	if (!MP_valid_apicid(apicid, ver)) {
++		printk(KERN_WARNING "Processor #%d INVALID. (Max ID: %d).\n",
++			m->mpc_apicid, MAX_APICS);
++		--num_processors;
++		return;
++	}
++
++	tmp = apicid_to_cpu_present(apicid);
++	physids_or(phys_cpu_present_map, phys_cpu_present_map, tmp);
++	
++	/*
++	 * Validate version
++	 */
++	if (ver == 0x0) {
++		printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
++		ver = 0x10;
++	}
++	apic_version[m->mpc_apicid] = ver;
++	bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
++}
++#else
++void __init MP_processor_info (struct mpc_config_processor *m)
++{
++	num_processors++;
++}
++#endif /* CONFIG_XEN */
++
++static void __init MP_bus_info (struct mpc_config_bus *m)
++{
++	char str[7];
++
++	memcpy(str, m->mpc_bustype, 6);
++	str[6] = 0;
++
++	mpc_oem_bus_info(m, str, translation_table[mpc_record]);
++
++	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
++	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
++		mpc_oem_pci_bus(m, translation_table[mpc_record]);
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
++		mp_current_pci_id++;
++	} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
++	} else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
++	} else {
++		printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
++	}
++}
++
++static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++{
++	if (!(m->mpc_flags & MPC_APIC_USABLE))
++		return;
++
++	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
++		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
++	if (nr_ioapics >= MAX_IO_APICS) {
++		printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
++			MAX_IO_APICS, nr_ioapics);
++		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++	}
++	if (!m->mpc_apicaddr) {
++		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
++			" found in MP table, skipping!\n");
++		return;
++	}
++	mp_ioapics[nr_ioapics] = *m;
++	nr_ioapics++;
++}
++
++static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++{
++	mp_irqs [mp_irq_entries] = *m;
++	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
++		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
++			m->mpc_irqtype, m->mpc_irqflag & 3,
++			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
++			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
++	if (++mp_irq_entries == MAX_IRQ_SOURCES)
++		panic("Max # of irq sources exceeded!!\n");
++}
++
++static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++{
++	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
++		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
++			m->mpc_irqtype, m->mpc_irqflag & 3,
++			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
++			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
++	/*
++	 * Well it seems all SMP boards in existence
++	 * use ExtINT/LVT1 == LINT0 and
++	 * NMI/LVT2 == LINT1 - the following check
++	 * will show us if this assumptions is false.
++	 * Until then we do not have to add baggage.
++	 */
++	if ((m->mpc_irqtype == mp_ExtINT) &&
++		(m->mpc_destapiclint != 0))
++			BUG();
++	if ((m->mpc_irqtype == mp_NMI) &&
++		(m->mpc_destapiclint != 1))
++			BUG();
++}
++
++#ifdef CONFIG_X86_NUMAQ
++static void __init MP_translation_info (struct mpc_config_translation *m)
++{
++	printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
++
++	if (mpc_record >= MAX_MPC_ENTRY) 
++		printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
++	else
++		translation_table[mpc_record] = m; /* stash this for later */
++	if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
++		node_set_online(m->trans_quad);
++}
++
++/*
++ * Read/parse the MPC oem tables
++ */
++
++static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
++	unsigned short oemsize)
++{
++	int count = sizeof (*oemtable); /* the header size */
++	unsigned char *oemptr = ((unsigned char *)oemtable)+count;
++	
++	mpc_record = 0;
++	printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
++	if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
++	{
++		printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
++			oemtable->oem_signature[0],
++			oemtable->oem_signature[1],
++			oemtable->oem_signature[2],
++			oemtable->oem_signature[3]);
++		return;
++	}
++	if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
++	{
++		printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
++		return;
++	}
++	while (count < oemtable->oem_length) {
++		switch (*oemptr) {
++			case MP_TRANSLATION:
++			{
++				struct mpc_config_translation *m=
++					(struct mpc_config_translation *)oemptr;
++				MP_translation_info(m);
++				oemptr += sizeof(*m);
++				count += sizeof(*m);
++				++mpc_record;
++				break;
++			}
++			default:
++			{
++				printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
++				return;
++			}
++		}
++       }
++}
++
++static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
++		char *productid)
++{
++	if (strncmp(oem, "IBM NUMA", 8))
++		printk("Warning!  May not be a NUMA-Q system!\n");
++	if (mpc->mpc_oemptr)
++		smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
++				mpc->mpc_oemsize);
++}
++#endif	/* CONFIG_X86_NUMAQ */
++
++/*
++ * Read/parse the MPC
++ */
++
++static int __init smp_read_mpc(struct mp_config_table *mpc)
++{
++	char str[16];
++	char oem[10];
++	int count=sizeof(*mpc);
++	unsigned char *mpt=((unsigned char *)mpc)+count;
++
++	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
++		printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
++			*(u32 *)mpc->mpc_signature);
++		return 0;
++	}
++	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
++		printk(KERN_ERR "SMP mptable: checksum error!\n");
++		return 0;
++	}
++	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
++		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++			mpc->mpc_spec);
++		return 0;
++	}
++	if (!mpc->mpc_lapic) {
++		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++		return 0;
++	}
++	memcpy(oem,mpc->mpc_oem,8);
++	oem[8]=0;
++	printk(KERN_INFO "OEM ID: %s ",oem);
++
++	memcpy(str,mpc->mpc_productid,12);
++	str[12]=0;
++	printk("Product ID: %s ",str);
++
++	mps_oem_check(mpc, oem, str);
++
++	printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
++
++	/* 
++	 * Save the local APIC address (it might be non-default) -- but only
++	 * if we're not using ACPI.
++	 */
++	if (!acpi_lapic)
++		mp_lapic_addr = mpc->mpc_lapic;
++
++	/*
++	 *	Now process the configuration blocks.
++	 */
++	mpc_record = 0;
++	while (count < mpc->mpc_length) {
++		switch(*mpt) {
++			case MP_PROCESSOR:
++			{
++				struct mpc_config_processor *m=
++					(struct mpc_config_processor *)mpt;
++				/* ACPI may have already provided this data */
++				if (!acpi_lapic)
++					MP_processor_info(m);
++				mpt += sizeof(*m);
++				count += sizeof(*m);
++				break;
++			}
++			case MP_BUS:
++			{
++				struct mpc_config_bus *m=
++					(struct mpc_config_bus *)mpt;
++				MP_bus_info(m);
++				mpt += sizeof(*m);
++				count += sizeof(*m);
++				break;
++			}
++			case MP_IOAPIC:
++			{
++				struct mpc_config_ioapic *m=
++					(struct mpc_config_ioapic *)mpt;
++				MP_ioapic_info(m);
++				mpt+=sizeof(*m);
++				count+=sizeof(*m);
++				break;
++			}
++			case MP_INTSRC:
++			{
++				struct mpc_config_intsrc *m=
++					(struct mpc_config_intsrc *)mpt;
++
++				MP_intsrc_info(m);
++				mpt+=sizeof(*m);
++				count+=sizeof(*m);
++				break;
++			}
++			case MP_LINTSRC:
++			{
++				struct mpc_config_lintsrc *m=
++					(struct mpc_config_lintsrc *)mpt;
++				MP_lintsrc_info(m);
++				mpt+=sizeof(*m);
++				count+=sizeof(*m);
++				break;
++			}
++			default:
++			{
++				count = mpc->mpc_length;
++				break;
++			}
++		}
++		++mpc_record;
++	}
++	clustered_apic_check();
++	if (!num_processors)
++		printk(KERN_ERR "SMP mptable: no processors registered!\n");
++	return num_processors;
++}
++
++static int __init ELCR_trigger(unsigned int irq)
++{
++	unsigned int port;
++
++	port = 0x4d0 + (irq >> 3);
++	return (inb(port) >> (irq & 7)) & 1;
++}
++
++static void __init construct_default_ioirq_mptable(int mpc_default_type)
++{
++	struct mpc_config_intsrc intsrc;
++	int i;
++	int ELCR_fallback = 0;
++
++	intsrc.mpc_type = MP_INTSRC;
++	intsrc.mpc_irqflag = 0;			/* conforming */
++	intsrc.mpc_srcbus = 0;
++	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++
++	intsrc.mpc_irqtype = mp_INT;
++
++	/*
++	 *  If true, we have an ISA/PCI system with no IRQ entries
++	 *  in the MP table. To prevent the PCI interrupts from being set up
++	 *  incorrectly, we try to use the ELCR. The sanity check to see if
++	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
++	 *  never be level sensitive, so we simply see if the ELCR agrees.
++	 *  If it does, we assume it's valid.
++	 */
++	if (mpc_default_type == 5) {
++		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++
++		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
++			printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
++		else {
++			printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
++			ELCR_fallback = 1;
++		}
++	}
++
++	for (i = 0; i < 16; i++) {
++		switch (mpc_default_type) {
++		case 2:
++			if (i == 0 || i == 13)
++				continue;	/* IRQ0 & IRQ13 not connected */
++			/* fall through */
++		default:
++			if (i == 2)
++				continue;	/* IRQ2 is never connected */
++		}
++
++		if (ELCR_fallback) {
++			/*
++			 *  If the ELCR indicates a level-sensitive interrupt, we
++			 *  copy that information over to the MP table in the
++			 *  irqflag field (level sensitive, active high polarity).
++			 */
++			if (ELCR_trigger(i))
++				intsrc.mpc_irqflag = 13;
++			else
++				intsrc.mpc_irqflag = 0;
++		}
++
++		intsrc.mpc_srcbusirq = i;
++		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
++		MP_intsrc_info(&intsrc);
++	}
++
++	intsrc.mpc_irqtype = mp_ExtINT;
++	intsrc.mpc_srcbusirq = 0;
++	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
++	MP_intsrc_info(&intsrc);
++}
++
++static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++{
++	struct mpc_config_processor processor;
++	struct mpc_config_bus bus;
++	struct mpc_config_ioapic ioapic;
++	struct mpc_config_lintsrc lintsrc;
++	int linttypes[2] = { mp_ExtINT, mp_NMI };
++	int i;
++
++	/*
++	 * local APIC has default address
++	 */
++	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++
++	/*
++	 * 2 CPUs, numbered 0 & 1.
++	 */
++	processor.mpc_type = MP_PROCESSOR;
++	/* Either an integrated APIC or a discrete 82489DX. */
++	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++	processor.mpc_cpuflag = CPU_ENABLED;
++	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++				   (boot_cpu_data.x86_model << 4) |
++				   boot_cpu_data.x86_mask;
++	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++	processor.mpc_reserved[0] = 0;
++	processor.mpc_reserved[1] = 0;
++	for (i = 0; i < 2; i++) {
++		processor.mpc_apicid = i;
++		MP_processor_info(&processor);
++	}
++
++	bus.mpc_type = MP_BUS;
++	bus.mpc_busid = 0;
++	switch (mpc_default_type) {
++		default:
++			printk("???\n");
++			printk(KERN_ERR "Unknown standard configuration %d\n",
++				mpc_default_type);
++			/* fall through */
++		case 1:
++		case 5:
++			memcpy(bus.mpc_bustype, "ISA   ", 6);
++			break;
++		case 2:
++		case 6:
++		case 3:
++			memcpy(bus.mpc_bustype, "EISA  ", 6);
++			break;
++		case 4:
++		case 7:
++			memcpy(bus.mpc_bustype, "MCA   ", 6);
++	}
++	MP_bus_info(&bus);
++	if (mpc_default_type > 4) {
++		bus.mpc_busid = 1;
++		memcpy(bus.mpc_bustype, "PCI   ", 6);
++		MP_bus_info(&bus);
++	}
++
++	ioapic.mpc_type = MP_IOAPIC;
++	ioapic.mpc_apicid = 2;
++	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++	ioapic.mpc_flags = MPC_APIC_USABLE;
++	ioapic.mpc_apicaddr = 0xFEC00000;
++	MP_ioapic_info(&ioapic);
++
++	/*
++	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
++	 */
++	construct_default_ioirq_mptable(mpc_default_type);
++
++	lintsrc.mpc_type = MP_LINTSRC;
++	lintsrc.mpc_irqflag = 0;		/* conforming */
++	lintsrc.mpc_srcbusid = 0;
++	lintsrc.mpc_srcbusirq = 0;
++	lintsrc.mpc_destapic = MP_APIC_ALL;
++	for (i = 0; i < 2; i++) {
++		lintsrc.mpc_irqtype = linttypes[i];
++		lintsrc.mpc_destapiclint = i;
++		MP_lintsrc_info(&lintsrc);
++	}
++}
++
++static struct intel_mp_floating *mpf_found;
++
++/*
++ * Scan the memory blocks for an SMP configuration block.
++ */
++void __init get_smp_config (void)
++{
++	struct intel_mp_floating *mpf = mpf_found;
++
++	/*
++	 * ACPI may be used to obtain the entire SMP configuration or just to 
++	 * enumerate/configure processors (CONFIG_ACPI_BOOT).  Note that 
++	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
++	 * processors, where MPS only supports physical.
++	 */
++	if (acpi_lapic && acpi_ioapic) {
++		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
++		return;
++	}
++	else if (acpi_lapic)
++		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++
++	printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
++	if (mpf->mpf_feature2 & (1<<7)) {
++		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
++		pic_mode = 1;
++	} else {
++		printk(KERN_INFO "    Virtual Wire compatibility mode.\n");
++		pic_mode = 0;
++	}
++
++	/*
++	 * Now see if we need to read further.
++	 */
++	if (mpf->mpf_feature1 != 0) {
++
++		printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
++		construct_default_ISA_mptable(mpf->mpf_feature1);
++
++	} else if (mpf->mpf_physptr) {
++
++		/*
++		 * Read the physical hardware table.  Anything here will
++		 * override the defaults.
++		 */
++		if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++			smp_found_config = 0;
++			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
++			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
++			return;
++		}
++		/*
++		 * If there are no explicit MP IRQ entries, then we are
++		 * broken.  We set up most of the low 16 IO-APIC pins to
++		 * ISA defaults and hope it will work.
++		 */
++		if (!mp_irq_entries) {
++			struct mpc_config_bus bus;
++
++			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++
++			bus.mpc_type = MP_BUS;
++			bus.mpc_busid = 0;
++			memcpy(bus.mpc_bustype, "ISA   ", 6);
++			MP_bus_info(&bus);
++
++			construct_default_ioirq_mptable(0);
++		}
++
++	} else
++		BUG();
++
++	printk(KERN_INFO "Processors: %d\n", num_processors);
++	/*
++	 * Only use the first configuration found.
++	 */
++}
++
++static int __init smp_scan_config (unsigned long base, unsigned long length)
++{
++	unsigned long *bp = isa_bus_to_virt(base);
++	struct intel_mp_floating *mpf;
++
++	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++	if (sizeof(*mpf) != 16)
++		printk("Error: MPF size\n");
++
++	while (length > 0) {
++		mpf = (struct intel_mp_floating *)bp;
++		if ((*bp == SMP_MAGIC_IDENT) &&
++			(mpf->mpf_length == 1) &&
++			!mpf_checksum((unsigned char *)bp, 16) &&
++			((mpf->mpf_specification == 1)
++				|| (mpf->mpf_specification == 4)) ) {
++
++			smp_found_config = 1;
++#ifndef CONFIG_XEN
++			printk(KERN_INFO "found SMP MP-table at %08lx\n",
++						virt_to_phys(mpf));
++			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
++			if (mpf->mpf_physptr) {
++				/*
++				 * We cannot access to MPC table to compute
++				 * table size yet, as only few megabytes from
++				 * the bottom is mapped now.
++				 * PC-9800's MPC table places on the very last
++				 * of physical memory; so that simply reserving
++				 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
++				 * in reserve_bootmem.
++				 */
++				unsigned long size = PAGE_SIZE;
++				unsigned long end = max_low_pfn * PAGE_SIZE;
++				if (mpf->mpf_physptr + size > end)
++					size = end - mpf->mpf_physptr;
++				reserve_bootmem(mpf->mpf_physptr, size);
++			}
++#else
++			printk(KERN_INFO "found SMP MP-table at %08lx\n",
++				((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
++#endif
++
++			mpf_found = mpf;
++			return 1;
++		}
++		bp += 4;
++		length -= 16;
++	}
++	return 0;
++}
++
++void __init find_smp_config (void)
++{
++#ifndef CONFIG_XEN
++	unsigned int address;
++#endif
++
++	/*
++	 * FIXME: Linux assumes you have 640K of base ram..
++	 * this continues the error...
++	 *
++	 * 1) Scan the bottom 1K for a signature
++	 * 2) Scan the top 1K of base RAM
++	 * 3) Scan the 64K of bios
++	 */
++	if (smp_scan_config(0x0,0x400) ||
++		smp_scan_config(639*0x400,0x400) ||
++			smp_scan_config(0xF0000,0x10000))
++		return;
++	/*
++	 * If it is an SMP machine we should know now, unless the
++	 * configuration is in an EISA/MCA bus machine with an
++	 * extended bios data area.
++	 *
++	 * there is a real-mode segmented pointer pointing to the
++	 * 4K EBDA area at 0x40E, calculate and scan it here.
++	 *
++	 * NOTE! There are Linux loaders that will corrupt the EBDA
++	 * area, and as such this kind of SMP config may be less
++	 * trustworthy, simply because the SMP table may have been
++	 * stomped on during early boot. These loaders are buggy and
++	 * should be fixed.
++	 *
++	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
++	 */
++
++#ifndef CONFIG_XEN
++	address = get_bios_ebda();
++	if (address)
++		smp_scan_config(address, 0x400);
++#endif
++}
++
++/* --------------------------------------------------------------------------
++                            ACPI-based MP Configuration
++   -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI_BOOT
++
++void __init mp_register_lapic_address (
++	u64			address)
++{
++#ifndef CONFIG_XEN
++	mp_lapic_addr = (unsigned long) address;
++
++	if (boot_cpu_physical_apicid == -1U)
++		boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
++
++	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
++}
++
++
++void __init mp_register_lapic (
++	u8			id, 
++	u8			enabled)
++{
++	struct mpc_config_processor processor;
++	int			boot_cpu = 0;
++	
++	if (MAX_APICS - id <= 0) {
++		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
++			id, MAX_APICS);
++		return;
++	}
++
++	if (id == boot_cpu_physical_apicid)
++		boot_cpu = 1;
++
++#ifndef CONFIG_XEN
++	processor.mpc_type = MP_PROCESSOR;
++	processor.mpc_apicid = id;
++	processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
++	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
++	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
++	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 
++		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++	processor.mpc_reserved[0] = 0;
++	processor.mpc_reserved[1] = 0;
++#endif
++
++	MP_processor_info(&processor);
++}
++
++#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT))
++
++#define MP_ISA_BUS		0
++#define MP_MAX_IOAPIC_PIN	127
++
++static struct mp_ioapic_routing {
++	int			apic_id;
++	int			gsi_base;
++	int			gsi_end;
++	u32			pin_programmed[4];
++} mp_ioapic_routing[MAX_IO_APICS];
++
++
++static int mp_find_ioapic (
++	int			gsi)
++{
++	int			i = 0;
++
++	/* Find the IOAPIC that manages this GSI. */
++	for (i = 0; i < nr_ioapics; i++) {
++		if ((gsi >= mp_ioapic_routing[i].gsi_base)
++			&& (gsi <= mp_ioapic_routing[i].gsi_end))
++			return i;
++	}
++
++	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++
++	return -1;
++}
++	
++
++void __init mp_register_ioapic (
++	u8			id, 
++	u32			address,
++	u32			gsi_base)
++{
++	int			idx = 0;
++
++	if (nr_ioapics >= MAX_IO_APICS) {
++		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
++		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++	}
++	if (!address) {
++		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++			" found in MADT table, skipping!\n");
++		return;
++	}
++
++	idx = nr_ioapics++;
++
++	mp_ioapics[idx].mpc_type = MP_IOAPIC;
++	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
++	mp_ioapics[idx].mpc_apicaddr = address;
++
++	mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
++	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++	
++	/* 
++	 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
++	 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
++	 */
++	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
++	mp_ioapic_routing[idx].gsi_base = gsi_base;
++	mp_ioapic_routing[idx].gsi_end = gsi_base + 
++		io_apic_get_redir_entries(idx);
++
++	printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
++		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
++		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++		mp_ioapic_routing[idx].gsi_base,
++		mp_ioapic_routing[idx].gsi_end);
++
++	return;
++}
++
++
++void __init mp_override_legacy_irq (
++	u8			bus_irq,
++	u8			polarity, 
++	u8			trigger, 
++	u32			gsi)
++{
++	struct mpc_config_intsrc intsrc;
++	int			ioapic = -1;
++	int			pin = -1;
++
++	/* 
++	 * Convert 'gsi' to 'ioapic.pin'.
++	 */
++	ioapic = mp_find_ioapic(gsi);
++	if (ioapic < 0)
++		return;
++	pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++
++	/*
++	 * TBD: This check is for faulty timer entries, where the override
++	 *      erroneously sets the trigger to level, resulting in a HUGE 
++	 *      increase of timer interrupts!
++	 */
++	if ((bus_irq == 0) && (trigger == 3))
++		trigger = 1;
++
++	intsrc.mpc_type = MP_INTSRC;
++	intsrc.mpc_irqtype = mp_INT;
++	intsrc.mpc_irqflag = (trigger << 2) | polarity;
++	intsrc.mpc_srcbus = MP_ISA_BUS;
++	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
++	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
++	intsrc.mpc_dstirq = pin;				    /* INTIN# */
++
++	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
++		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
++		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
++		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++
++	mp_irqs[mp_irq_entries] = intsrc;
++	if (++mp_irq_entries == MAX_IRQ_SOURCES)
++		panic("Max # of irq sources exceeded!\n");
++
++	return;
++}
++
++int es7000_plat;
++
++void __init mp_config_acpi_legacy_irqs (void)
++{
++	struct mpc_config_intsrc intsrc;
++	int			i = 0;
++	int			ioapic = -1;
++
++	/* 
++	 * Fabricate the legacy ISA bus (bus #31).
++	 */
++	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
++	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++
++	/*
++	 * Older generations of ES7000 have no legacy identity mappings
++	 */
++	if (es7000_plat == 1)
++		return;
++
++	/* 
++	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
++	 */
++	ioapic = mp_find_ioapic(0);
++	if (ioapic < 0)
++		return;
++
++	intsrc.mpc_type = MP_INTSRC;
++	intsrc.mpc_irqflag = 0;					/* Conforming */
++	intsrc.mpc_srcbus = MP_ISA_BUS;
++	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++
++	/* 
++	 * Use the default configuration for the IRQs 0-15.  Unless
++	 * overriden by (MADT) interrupt source override entries.
++	 */
++	for (i = 0; i < 16; i++) {
++		int idx;
++
++		for (idx = 0; idx < mp_irq_entries; idx++) {
++			struct mpc_config_intsrc *irq = mp_irqs + idx;
++
++			/* Do we already have a mapping for this ISA IRQ? */
++			if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
++				break;
++
++			/* Do we already have a mapping for this IOAPIC pin */
++			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
++				(irq->mpc_dstirq == i))
++				break;
++		}
++
++		if (idx != mp_irq_entries) {
++			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
++			continue;			/* IRQ already used */
++		}
++
++		intsrc.mpc_irqtype = mp_INT;
++		intsrc.mpc_srcbusirq = i;		   /* Identity mapped */
++		intsrc.mpc_dstirq = i;
++
++		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
++			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
++			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
++			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 
++			intsrc.mpc_dstirq);
++
++		mp_irqs[mp_irq_entries] = intsrc;
++		if (++mp_irq_entries == MAX_IRQ_SOURCES)
++			panic("Max # of irq sources exceeded!\n");
++	}
++}
++
++int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
++{
++	int			ioapic = -1;
++	int			ioapic_pin = 0;
++	int			idx, bit = 0;
++
++#ifdef CONFIG_ACPI_BUS
++	/* Don't set up the ACPI SCI because it's already set up */
++	if (acpi_fadt.sci_int == gsi)
++		return gsi;
++#endif
++
++	ioapic = mp_find_ioapic(gsi);
++	if (ioapic < 0) {
++		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
++		return gsi;
++	}
++
++	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++
++	if (ioapic_renumber_irq)
++		gsi = ioapic_renumber_irq(ioapic, gsi);
++
++	/* 
++	 * Avoid pin reprogramming.  PRTs typically include entries  
++	 * with redundant pin->gsi mappings (but unique PCI devices);
++	 * we only program the IOAPIC on the first.
++	 */
++	bit = ioapic_pin % 32;
++	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
++	if (idx > 3) {
++		printk(KERN_ERR "Invalid reference to IOAPIC pin "
++			"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
++			ioapic_pin);
++		return gsi;
++	}
++	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
++		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
++			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
++		return gsi;
++	}
++
++	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++
++	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
++		    edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
++		    active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
++	return gsi;
++}
++
++#endif /*CONFIG_X86_IO_APIC && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)*/
++#endif /*CONFIG_ACPI_BOOT*/
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/pci-dma.c linux-2.6.12-xen/arch/xen/i386/kernel/pci-dma.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/pci-dma.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/pci-dma.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,319 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * On i386 there is no hardware dynamic DMA address translation,
++ * so consistent alloc/free are merely page allocation/freeing.
++ * The rest of the dynamic DMA mapping interface is implemented
++ * in asm/pci.h.
++ */
++
++#include <linux/types.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/pci.h>
++#include <linux/version.h>
++#include <asm/io.h>
++#include <asm-xen/balloon.h>
++#include <asm/tlbflush.h>
++#include <asm/swiotlb.h>
++
++struct dma_coherent_mem {
++	void		*virt_base;
++	u32		device_base;
++	int		size;
++	int		flags;
++	unsigned long	*bitmap;
++};
++
++#define IOMMU_BUG_ON(test)				\
++do {							\
++	if (unlikely(test)) {				\
++		printk(KERN_ALERT "Fatal DMA error! "	\
++		       "Please use 'swiotlb=force'\n");	\
++		BUG();					\
++	}						\
++} while (0)
++
++int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++	   enum dma_data_direction direction)
++{
++	int i, rc;
++
++	BUG_ON(direction == DMA_NONE);
++
++	if (swiotlb) {
++		rc = swiotlb_map_sg(hwdev, sg, nents, direction);
++	} else {
++		for (i = 0; i < nents; i++ ) {
++			sg[i].dma_address =
++				page_to_phys(sg[i].page) + sg[i].offset;
++			sg[i].dma_length  = sg[i].length;
++			BUG_ON(!sg[i].page);
++			IOMMU_BUG_ON(address_needs_mapping(
++				hwdev, sg[i].dma_address));
++		}
++		rc = nents;
++	}
++
++	flush_write_buffers();
++	return rc;
++}
++EXPORT_SYMBOL(dma_map_sg);
++
++void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++	     enum dma_data_direction direction)
++{
++	BUG_ON(direction == DMA_NONE);
++	if (swiotlb)
++		swiotlb_unmap_sg(hwdev, sg, nents, direction);
++}
++EXPORT_SYMBOL(dma_unmap_sg);
++
++dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++	     size_t size, enum dma_data_direction direction)
++{
++	dma_addr_t dma_addr;
++
++	BUG_ON(direction == DMA_NONE);
++
++	if (swiotlb) {
++		dma_addr = swiotlb_map_page(
++			dev, page, offset, size, direction);
++	} else {
++		dma_addr = page_to_phys(page) + offset;
++		IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
++	}
++
++	return dma_addr;
++}
++EXPORT_SYMBOL(dma_map_page);
++
++void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++	       enum dma_data_direction direction)
++{
++	BUG_ON(direction == DMA_NONE);
++	if (swiotlb)
++		swiotlb_unmap_page(dev, dma_address, size, direction);
++}
++EXPORT_SYMBOL(dma_unmap_page);
++
++int
++dma_mapping_error(dma_addr_t dma_addr)
++{
++	if (swiotlb)
++		return swiotlb_dma_mapping_error(dma_addr);
++	return 0;
++}
++EXPORT_SYMBOL(dma_mapping_error);
++
++int
++dma_supported(struct device *dev, u64 mask)
++{
++	if (swiotlb)
++		return swiotlb_dma_supported(dev, mask);
++	/*
++	 * By default we'll BUG when an infeasible DMA is requested, and
++	 * request swiotlb=force (see IOMMU_BUG_ON).
++	 */
++	return 1;
++}
++EXPORT_SYMBOL(dma_supported);
++
++void *dma_alloc_coherent(struct device *dev, size_t size,
++			   dma_addr_t *dma_handle, unsigned int __nocast gfp)
++{
++	void *ret;
++	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++	unsigned int order = get_order(size);
++	unsigned long vstart;
++	/* ignore region specifiers */
++	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
++
++	if (mem) {
++		int page = bitmap_find_free_region(mem->bitmap, mem->size,
++						     order);
++		if (page >= 0) {
++			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
++			ret = mem->virt_base + (page << PAGE_SHIFT);
++			memset(ret, 0, size);
++			return ret;
++		}
++		if (mem->flags & DMA_MEMORY_EXCLUSIVE)
++			return NULL;
++	}
++
++	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
++		gfp |= GFP_DMA;
++
++	vstart = __get_free_pages(gfp, order);
++	ret = (void *)vstart;
++
++	if (ret != NULL) {
++		/* NB. Hardcode 31 address bits for now: aacraid limitation. */
++		if (xen_create_contiguous_region(vstart, order, 31) != 0) {
++			free_pages(vstart, order);
++			return NULL;
++		}
++		memset(ret, 0, size);
++		*dma_handle = virt_to_bus(ret);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(dma_alloc_coherent);
++
++void dma_free_coherent(struct device *dev, size_t size,
++			 void *vaddr, dma_addr_t dma_handle)
++{
++	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++	int order = get_order(size);
++	
++	if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
++		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
++
++		bitmap_release_region(mem->bitmap, page, order);
++	} else {
++		xen_destroy_contiguous_region((unsigned long)vaddr, order);
++		free_pages((unsigned long)vaddr, order);
++	}
++}
++EXPORT_SYMBOL(dma_free_coherent);
++
++int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++				dma_addr_t device_addr, size_t size, int flags)
++{
++	void __iomem *mem_base;
++	int pages = size >> PAGE_SHIFT;
++	int bitmap_size = (pages + 31)/32;
++
++	if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
++		goto out;
++	if (!size)
++		goto out;
++	if (dev->dma_mem)
++		goto out;
++
++	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
++
++	mem_base = ioremap(bus_addr, size);
++	if (!mem_base)
++		goto out;
++
++	dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
++	if (!dev->dma_mem)
++		goto out;
++	memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
++	dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
++	if (!dev->dma_mem->bitmap)
++		goto free1_out;
++	memset(dev->dma_mem->bitmap, 0, bitmap_size);
++
++	dev->dma_mem->virt_base = mem_base;
++	dev->dma_mem->device_base = device_addr;
++	dev->dma_mem->size = pages;
++	dev->dma_mem->flags = flags;
++
++	if (flags & DMA_MEMORY_MAP)
++		return DMA_MEMORY_MAP;
++
++	return DMA_MEMORY_IO;
++
++ free1_out:
++	kfree(dev->dma_mem->bitmap);
++ out:
++	return 0;
++}
++EXPORT_SYMBOL(dma_declare_coherent_memory);
++
++void dma_release_declared_memory(struct device *dev)
++{
++	struct dma_coherent_mem *mem = dev->dma_mem;
++	
++	if(!mem)
++		return;
++	dev->dma_mem = NULL;
++	iounmap(mem->virt_base);
++	kfree(mem->bitmap);
++	kfree(mem);
++}
++EXPORT_SYMBOL(dma_release_declared_memory);
++
++void *dma_mark_declared_memory_occupied(struct device *dev,
++					dma_addr_t device_addr, size_t size)
++{
++	struct dma_coherent_mem *mem = dev->dma_mem;
++	int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	int pos, err;
++
++	if (!mem)
++		return ERR_PTR(-EINVAL);
++
++	pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
++	err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
++	if (err != 0)
++		return ERR_PTR(err);
++	return mem->virt_base + (pos << PAGE_SHIFT);
++}
++EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
++
++dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++	       enum dma_data_direction direction)
++{
++	dma_addr_t dma;
++
++	BUG_ON(direction == DMA_NONE);
++
++	if (swiotlb) {
++		dma = swiotlb_map_single(dev, ptr, size, direction);
++	} else {
++		dma = virt_to_bus(ptr);
++		IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
++		IOMMU_BUG_ON(address_needs_mapping(dev, dma));
++	}
++
++	flush_write_buffers();
++	return dma;
++}
++EXPORT_SYMBOL(dma_map_single);
++
++void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++		 enum dma_data_direction direction)
++{
++	BUG_ON(direction == DMA_NONE);
++	if (swiotlb)
++		swiotlb_unmap_single(dev, dma_addr, size, direction);
++}
++EXPORT_SYMBOL(dma_unmap_single);
++
++void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++			enum dma_data_direction direction)
++{
++	if (swiotlb)
++		swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_cpu);
++
++void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++                           enum dma_data_direction direction)
++{
++	if (swiotlb)
++		swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_device);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/process.c linux-2.6.12-xen/arch/xen/i386/kernel/process.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/process.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/process.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,770 @@
++/*
++ *  linux/arch/i386/kernel/process.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/config.h>
++#include <linux/utsname.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/init.h>
++#include <linux/mc146818rtc.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/random.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/ldt.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/irq.h>
++#include <asm/desc.h>
++#include <asm-xen/xen-public/physdev.h>
++#include <asm-xen/xen-public/vcpu.h>
++#ifdef CONFIG_MATH_EMULATION
++#include <asm/math_emu.h>
++#endif
++
++#include <linux/irq.h>
++#include <linux/err.h>
++
++#include <asm/tlbflush.h>
++#include <asm/cpu.h>
++
++asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
++
++static int hlt_counter;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Return saved PC of a blocked thread.
++ */
++unsigned long thread_saved_pc(struct task_struct *tsk)
++{
++	return ((unsigned long *)tsk->thread.esp)[3];
++}
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++void disable_hlt(void)
++{
++	hlt_counter++;
++}
++
++EXPORT_SYMBOL(disable_hlt);
++
++void enable_hlt(void)
++{
++	hlt_counter--;
++}
++
++EXPORT_SYMBOL(enable_hlt);
++
++/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
++extern void stop_hz_timer(void);
++extern void start_hz_timer(void);
++void xen_idle(void)
++{
++	local_irq_disable();
++
++	if (need_resched()) {
++		local_irq_enable();
++	} else {
++		stop_hz_timer();
++		/* Blocking includes an implicit local_irq_enable(). */
++		HYPERVISOR_sched_op(SCHEDOP_block, 0);
++		start_hz_timer();
++	}
++}
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle (void)
++{
++#if defined(CONFIG_HOTPLUG_CPU)
++	int cpu = _smp_processor_id();
++#endif
++
++	/* endless idle loop with no priority at all */
++	while (1) {
++		while (!need_resched()) {
++
++			if (__get_cpu_var(cpu_idle_state))
++				__get_cpu_var(cpu_idle_state) = 0;
++			rmb();
++
++#if defined(CONFIG_HOTPLUG_CPU)
++			if (cpu_is_offline(cpu)) {
++				HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
++				local_irq_enable();
++			}
++#endif
++
++			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
++			xen_idle();
++		}
++		schedule();
++	}
++}
++
++void cpu_idle_wait(void)
++{
++	unsigned int cpu, this_cpu = get_cpu();
++	cpumask_t map;
++
++	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++	put_cpu();
++
++	cpus_clear(map);
++	for_each_online_cpu(cpu) {
++		per_cpu(cpu_idle_state, cpu) = 1;
++		cpu_set(cpu, map);
++	}
++
++	__get_cpu_var(cpu_idle_state) = 0;
++
++	wmb();
++	do {
++		ssleep(1);
++		for_each_online_cpu(cpu) {
++			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
++				cpu_clear(cpu, map);
++		}
++		cpus_and(map, map, cpu_online_map);
++	} while (!cpus_empty(map));
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
++/* Always use xen_idle() instead. */
++void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
++
++void show_regs(struct pt_regs * regs)
++{
++	printk("\n");
++	printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
++	printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
++	print_symbol("EIP is at %s\n", regs->eip);
++
++	if (regs->xcs & 2)
++		printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
++	printk(" EFLAGS: %08lx    %s  (%s)\n",
++	       regs->eflags, print_tainted(), system_utsname.release);
++	printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
++		regs->eax,regs->ebx,regs->ecx,regs->edx);
++	printk("ESI: %08lx EDI: %08lx EBP: %08lx",
++		regs->esi, regs->edi, regs->ebp);
++	printk(" DS: %04x ES: %04x\n",
++		0xffff & regs->xds,0xffff & regs->xes);
++
++	show_trace(NULL, &regs->esp);
++}
++
++/*
++ * This gets run with %ebx containing the
++ * function to call, and %edx containing
++ * the "args".
++ */
++extern void kernel_thread_helper(void);
++__asm__(".section .text\n"
++	".align 4\n"
++	"kernel_thread_helper:\n\t"
++	"movl %edx,%eax\n\t"
++	"pushl %edx\n\t"
++	"call *%ebx\n\t"
++	"pushl %eax\n\t"
++	"call do_exit\n"
++	".previous");
++
++/*
++ * Create a kernel thread
++ */
++int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++{
++	struct pt_regs regs;
++
++	memset(&regs, 0, sizeof(regs));
++
++	regs.ebx = (unsigned long) fn;
++	regs.edx = (unsigned long) arg;
++
++	regs.xds = __USER_DS;
++	regs.xes = __USER_DS;
++	regs.orig_eax = -1;
++	regs.eip = (unsigned long) kernel_thread_helper;
++	regs.xcs = __KERNEL_CS;
++	regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
++
++	/* Ok, create the new process.. */
++	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++}
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++	struct task_struct *tsk = current;
++	struct thread_struct *t = &tsk->thread;
++
++	/* The process may have allocated an io port bitmap... nuke it. */
++	if (unlikely(NULL != t->io_bitmap_ptr)) {
++		physdev_op_t op = { 0 };
++		op.cmd = PHYSDEVOP_SET_IOBITMAP;
++		HYPERVISOR_physdev_op(&op);
++		kfree(t->io_bitmap_ptr);
++		t->io_bitmap_ptr = NULL;
++	}
++}
++
++void flush_thread(void)
++{
++	struct task_struct *tsk = current;
++
++	memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
++	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
++	/*
++	 * Forget coprocessor state..
++	 */
++	clear_fpu(tsk);
++	clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++	if (dead_task->mm) {
++		// temporary debugging check
++		if (dead_task->mm->context.size) {
++			printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
++					dead_task->comm,
++					dead_task->mm->context.ldt,
++					dead_task->mm->context.size);
++			BUG();
++		}
++	}
++
++	release_vm86_irqs(dead_task);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++	unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
++	unsigned long unused,
++	struct task_struct * p, struct pt_regs * regs)
++{
++	struct pt_regs * childregs;
++	struct task_struct *tsk;
++	int err;
++
++	childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
++	/*
++	 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
++	 * This is necessary to guarantee that the entire "struct pt_regs"
++	 * is accessable even if the CPU haven't stored the SS/ESP registers
++	 * on the stack (interrupt gate does not save these registers
++	 * when switching to the same priv ring).
++	 * Therefore beware: accessing the xss/esp fields of the
++	 * "struct pt_regs" is possible, but they may contain the
++	 * completely wrong values.
++	 */
++	childregs = (struct pt_regs *) ((unsigned long) childregs - 8);
++	*childregs = *regs;
++	childregs->eax = 0;
++	childregs->esp = esp;
++
++	p->thread.esp = (unsigned long) childregs;
++	p->thread.esp0 = (unsigned long) (childregs+1);
++
++	p->thread.eip = (unsigned long) ret_from_fork;
++
++	savesegment(fs,p->thread.fs);
++	savesegment(gs,p->thread.gs);
++
++	tsk = current;
++	if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
++		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++		if (!p->thread.io_bitmap_ptr) {
++			p->thread.io_bitmap_max = 0;
++			return -ENOMEM;
++		}
++		memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
++			IO_BITMAP_BYTES);
++	}
++
++	/*
++	 * Set a new TLS for the child thread?
++	 */
++	if (clone_flags & CLONE_SETTLS) {
++		struct desc_struct *desc;
++		struct user_desc info;
++		int idx;
++
++		err = -EFAULT;
++		if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
++			goto out;
++		err = -EINVAL;
++		if (LDT_empty(&info))
++			goto out;
++
++		idx = info.entry_number;
++		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++			goto out;
++
++		desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++		desc->a = LDT_entry_a(&info);
++		desc->b = LDT_entry_b(&info);
++	}
++
++	p->thread.io_pl = current->thread.io_pl;
++
++	err = 0;
++ out:
++	if (err && p->thread.io_bitmap_ptr) {
++		kfree(p->thread.io_bitmap_ptr);
++		p->thread.io_bitmap_max = 0;
++	}
++	return err;
++}
++
++/*
++ * fill in the user structure for a core dump..
++ */
++void dump_thread(struct pt_regs * regs, struct user * dump)
++{
++	int i;
++
++/* changed the size calculations - should hopefully work better. lbt */
++	dump->magic = CMAGIC;
++	dump->start_code = 0;
++	dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
++	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
++	dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
++	dump->u_dsize -= dump->u_tsize;
++	dump->u_ssize = 0;
++	for (i = 0; i < 8; i++)
++		dump->u_debugreg[i] = current->thread.debugreg[i];  
++
++	if (dump->start_stack < TASK_SIZE)
++		dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
++
++	dump->regs.ebx = regs->ebx;
++	dump->regs.ecx = regs->ecx;
++	dump->regs.edx = regs->edx;
++	dump->regs.esi = regs->esi;
++	dump->regs.edi = regs->edi;
++	dump->regs.ebp = regs->ebp;
++	dump->regs.eax = regs->eax;
++	dump->regs.ds = regs->xds;
++	dump->regs.es = regs->xes;
++	savesegment(fs,dump->regs.fs);
++	savesegment(gs,dump->regs.gs);
++	dump->regs.orig_eax = regs->orig_eax;
++	dump->regs.eip = regs->eip;
++	dump->regs.cs = regs->xcs;
++	dump->regs.eflags = regs->eflags;
++	dump->regs.esp = regs->esp;
++	dump->regs.ss = regs->xss;
++
++	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
++}
++
++/* 
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++	struct pt_regs ptregs;
++	
++	ptregs = *(struct pt_regs *)
++		((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
++	ptregs.xcs &= 0xffff;
++	ptregs.xds &= 0xffff;
++	ptregs.xes &= 0xffff;
++	ptregs.xss &= 0xffff;
++
++	elf_core_copy_regs(regs, &ptregs);
++
++	boot_option_idle_override = 1;
++	return 1;
++}
++
++/*
++ *	switch_to(x,yn) should switch tasks from x to y.
++ *
++ * We fsave/fwait so that an exception goes off at the right time
++ * (as a call from the fsave or fwait in effect) rather than to
++ * the wrong process. Lazy FP saving no longer makes any sense
++ * with modern CPU's, and this simplifies a lot of things (SMP
++ * and UP become the same).
++ *
++ * NOTE! We used to use the x86 hardware context switching. The
++ * reason for not using it any more becomes apparent when you
++ * try to recover gracefully from saved state that is no longer
++ * valid (stale segment register values in particular). With the
++ * hardware task-switch, there is no way to fix up bad state in
++ * a reasonable manner.
++ *
++ * The fact that Intel documents the hardware task-switching to
++ * be slow is a fairly red herring - this code is not noticeably
++ * faster. However, there _is_ some room for improvement here,
++ * so the performance issues may eventually be a valid point.
++ * More important, however, is the fact that this allows us much
++ * more flexibility.
++ *
++ * The return value (in %eax) will be the "prev" task after
++ * the task-switch, and shows up in ret_from_fork in entry.S,
++ * for example.
++ */
++struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++	struct thread_struct *prev = &prev_p->thread,
++				 *next = &next_p->thread;
++	int cpu = smp_processor_id();
++	struct tss_struct *tss = &per_cpu(init_tss, cpu);
++	physdev_op_t iopl_op, iobmp_op;
++	multicall_entry_t _mcl[8], *mcl = _mcl;
++
++	/* XEN NOTE: FS/GS saved in switch_mm(), not here. */
++
++	/*
++	 * This is basically '__unlazy_fpu', except that we queue a
++	 * multicall to indicate FPU task switch, rather than
++	 * synchronously trapping to Xen.
++	 */
++	if (prev_p->thread_info->status & TS_USEDFPU) {
++		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++		mcl->op      = __HYPERVISOR_fpu_taskswitch;
++		mcl->args[0] = 1;
++		mcl++;
++	}
++#if 0 /* lazy fpu sanity check */
++	else BUG_ON(!(read_cr0() & 8));
++#endif
++
++	/*
++	 * Reload esp0, LDT and the page table pointer:
++	 * This is load_esp0(tss, next) with a multicall.
++	 */
++	tss->esp0 = next->esp0;
++	mcl->op      = __HYPERVISOR_stack_switch;
++	mcl->args[0] = tss->ss0;
++	mcl->args[1] = tss->esp0;
++	mcl++;
++
++	/*
++	 * Load the per-thread Thread-Local Storage descriptor.
++	 * This is load_TLS(next, cpu) with multicalls.
++	 */
++#define C(i) do {							\
++	if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||	\
++		     next->tls_array[i].b != prev->tls_array[i].b)) {	\
++		mcl->op = __HYPERVISOR_update_descriptor;		\
++		*(u64 *)&mcl->args[0] =	virt_to_machine(		\
++			&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
++		*(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i];	\
++		mcl++;							\
++	}								\
++} while (0)
++	C(0); C(1); C(2);
++#undef C
++
++	if (unlikely(prev->io_pl != next->io_pl)) {
++		iopl_op.cmd             = PHYSDEVOP_SET_IOPL;
++		iopl_op.u.set_iopl.iopl = (next->io_pl == 0) ? 1 : next->io_pl;
++		mcl->op      = __HYPERVISOR_physdev_op;
++		mcl->args[0] = (unsigned long)&iopl_op;
++		mcl++;
++	}
++
++	if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++		iobmp_op.cmd                     =
++			PHYSDEVOP_SET_IOBITMAP;
++		iobmp_op.u.set_iobitmap.bitmap   =
++			(char *)next->io_bitmap_ptr;
++		iobmp_op.u.set_iobitmap.nr_ports =
++			next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++		mcl->op      = __HYPERVISOR_physdev_op;
++		mcl->args[0] = (unsigned long)&iobmp_op;
++		mcl++;
++	}
++
++	(void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
++
++	/*
++	 * Restore %fs and %gs if needed.
++	 */
++	if (unlikely(next->fs | next->gs)) {
++		loadsegment(fs, next->fs);
++		loadsegment(gs, next->gs);
++	}
++
++	/*
++	 * Now maybe reload the debug registers
++	 */
++	if (unlikely(next->debugreg[7])) {
++		loaddebug(next, 0);
++		loaddebug(next, 1);
++		loaddebug(next, 2);
++		loaddebug(next, 3);
++		/* no 4 and 5 */
++		loaddebug(next, 6);
++		loaddebug(next, 7);
++	}
++
++	return prev_p;
++}
++
++asmlinkage int sys_fork(struct pt_regs regs)
++{
++	return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++}
++
++asmlinkage int sys_clone(struct pt_regs regs)
++{
++	unsigned long clone_flags;
++	unsigned long newsp;
++	int __user *parent_tidptr, *child_tidptr;
++
++	clone_flags = regs.ebx;
++	newsp = regs.ecx;
++	parent_tidptr = (int __user *)regs.edx;
++	child_tidptr = (int __user *)regs.edi;
++	if (!newsp)
++		newsp = regs.esp;
++	return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage int sys_vfork(struct pt_regs regs)
++{
++	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage int sys_execve(struct pt_regs regs)
++{
++	int error;
++	char * filename;
++
++	filename = getname((char __user *) regs.ebx);
++	error = PTR_ERR(filename);
++	if (IS_ERR(filename))
++		goto out;
++	error = do_execve(filename,
++			(char __user * __user *) regs.ecx,
++			(char __user * __user *) regs.edx,
++			&regs);
++	if (error == 0) {
++		task_lock(current);
++		current->ptrace &= ~PT_DTRACE;
++		task_unlock(current);
++		/* Make sure we don't return using sysenter.. */
++		set_thread_flag(TIF_IRET);
++	}
++	putname(filename);
++out:
++	return error;
++}
++
++#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
++#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
++
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long ebp, esp, eip;
++	unsigned long stack_page;
++	int count = 0;
++	if (!p || p == current || p->state == TASK_RUNNING)
++		return 0;
++	stack_page = (unsigned long)p->thread_info;
++	esp = p->thread.esp;
++	if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
++		return 0;
++	/* include/asm-i386/system.h:switch_to() pushes ebp last. */
++	ebp = *(unsigned long *) esp;
++	do {
++		if (ebp < stack_page || ebp > top_ebp+stack_page)
++			return 0;
++		eip = *(unsigned long *) (ebp+4);
++		if (!in_sched_functions(eip))
++			return eip;
++		ebp = *(unsigned long *) ebp;
++	} while (count++ < 16);
++	return 0;
++}
++
++/*
++ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
++ */
++static int get_free_idx(void)
++{
++	struct thread_struct *t = &current->thread;
++	int idx;
++
++	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
++		if (desc_empty(t->tls_array + idx))
++			return idx + GDT_ENTRY_TLS_MIN;
++	return -ESRCH;
++}
++
++/*
++ * Set a given TLS descriptor:
++ */
++asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
++{
++	struct thread_struct *t = &current->thread;
++	struct user_desc info;
++	struct desc_struct *desc;
++	int cpu, idx;
++
++	if (copy_from_user(&info, u_info, sizeof(info)))
++		return -EFAULT;
++	idx = info.entry_number;
++
++	/*
++	 * index -1 means the kernel should try to find and
++	 * allocate an empty descriptor:
++	 */
++	if (idx == -1) {
++		idx = get_free_idx();
++		if (idx < 0)
++			return idx;
++		if (put_user(idx, &u_info->entry_number))
++			return -EFAULT;
++	}
++
++	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++		return -EINVAL;
++
++	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++	/*
++	 * We must not get preempted while modifying the TLS.
++	 */
++	cpu = get_cpu();
++
++	if (LDT_empty(&info)) {
++		desc->a = 0;
++		desc->b = 0;
++	} else {
++		desc->a = LDT_entry_a(&info);
++		desc->b = LDT_entry_b(&info);
++	}
++	load_TLS(t, cpu);
++
++	put_cpu();
++
++	return 0;
++}
++
++/*
++ * Get the current Thread-Local Storage area:
++ */
++
++#define GET_BASE(desc) ( \
++	(((desc)->a >> 16) & 0x0000ffff) | \
++	(((desc)->b << 16) & 0x00ff0000) | \
++	( (desc)->b        & 0xff000000)   )
++
++#define GET_LIMIT(desc) ( \
++	((desc)->a & 0x0ffff) | \
++	 ((desc)->b & 0xf0000) )
++	
++#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
++#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
++#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
++#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
++#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
++#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
++
++asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
++{
++	struct user_desc info;
++	struct desc_struct *desc;
++	int idx;
++
++	if (get_user(idx, &u_info->entry_number))
++		return -EFAULT;
++	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++		return -EINVAL;
++
++	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++	info.entry_number = idx;
++	info.base_addr = GET_BASE(desc);
++	info.limit = GET_LIMIT(desc);
++	info.seg_32bit = GET_32BIT(desc);
++	info.contents = GET_CONTENTS(desc);
++	info.read_exec_only = !GET_WRITABLE(desc);
++	info.limit_in_pages = GET_LIMIT_PAGES(desc);
++	info.seg_not_present = !GET_PRESENT(desc);
++	info.useable = GET_USEABLE(desc);
++
++	if (copy_to_user(u_info, &info, sizeof(info)))
++		return -EFAULT;
++	return 0;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++	if (randomize_va_space)
++		sp -= get_random_int() % 8192;
++	return sp & ~0xf;
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/quirks.c linux-2.6.12-xen/arch/xen/i386/kernel/quirks.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/quirks.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/quirks.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,49 @@
++/*
++ * This file contains work-arounds for x86 and x86_64 platform bugs.
++ */
++#include <linux/config.h>
++#include <linux/pci.h>
++#include <linux/irq.h>
++
++#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
++
++static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
++{
++	u8 config, rev;
++	u32 word;
++
++	/* BIOS may enable hardware IRQ balancing for
++	 * E7520/E7320/E7525(revision ID 0x9 and below)
++	 * based platforms.
++	 * Disable SW irqbalance/affinity on those platforms.
++	 */
++	pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
++	if (rev > 0x9)
++		return;
++
++	printk(KERN_INFO "Intel E7520/7320/7525 detected.");
++
++	/* enable access to config space*/
++	pci_read_config_byte(dev, 0xf4, &config);
++	config |= 0x2;
++	pci_write_config_byte(dev, 0xf4, config);
++
++	/* read xTPR register */
++	raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
++
++	if (!(word & (1 << 13))) {
++		dom0_op_t op;
++		printk(KERN_INFO "Disabling irq balancing and affinity\n");
++		op.cmd = DOM0_PLATFORM_QUIRK;
++		op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
++		(void)HYPERVISOR_dom0_op(&op);
++	}
++
++	config &= ~0x2;
++	/* disable access to config space*/
++	pci_write_config_byte(dev, 0xf4, config);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7320_MCH,	quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7525_MCH,	quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7520_MCH,	quirk_intel_irqbalance);
++#endif
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/setup.c linux-2.6.12-xen/arch/xen/i386/kernel/setup.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/setup.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/setup.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,1790 @@
++/*
++ *  linux/arch/i386/kernel/setup.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ *
++ *  Memory region support
++ *	David Parsons <orc at pell.chi.il.us>, July-August 1999
++ *
++ *  Added E820 sanitization routine (removes overlapping memory regions);
++ *  Brian Moyle <bmoyle at mvista.com>, February 2001
++ *
++ * Moved CPU detection code to cpu/${cpu}.c
++ *    Patrick Mochel <mochel at osdl.org>, March 2002
++ *
++ *  Provisions for empty E820 memory regions (reported by certain BIOSes).
++ *  Alex Achenbach <xela at slit.de>, December 2002.
++ *
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/tty.h>
++#include <linux/ioport.h>
++#include <linux/acpi.h>
++#include <linux/apm_bios.h>
++#include <linux/initrd.h>
++#include <linux/bootmem.h>
++#include <linux/seq_file.h>
++#include <linux/console.h>
++#include <linux/mca.h>
++#include <linux/root_dev.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/efi.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <linux/nodemask.h>
++#include <linux/kernel.h>
++#include <linux/percpu.h>
++#include <linux/notifier.h>
++#include <video/edid.h>
++#include <asm/e820.h>
++#include <asm/mpspec.h>
++#include <asm/setup.h>
++#include <asm/arch_hooks.h>
++#include <asm/sections.h>
++#include <asm/io_apic.h>
++#include <asm/ist.h>
++#include <asm/io.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/xen-public/physdev.h>
++#include <asm-xen/xen-public/memory.h>
++#include <asm-xen/features.h>
++#include "setup_arch_pre.h"
++#include <bios_ebda.h>
++
++/* Allows setting of maximum possible memory size  */
++static unsigned long xen_override_max_pfn;
++
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++	xen_panic_event, NULL, 0 /* try to go last */
++};
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++int disable_pse __initdata = 0;
++
++/*
++ * Machine setup..
++ */
++
++#ifdef CONFIG_EFI
++int efi_enabled = 0;
++EXPORT_SYMBOL(efi_enabled);
++#endif
++
++/* cpu data as detected by the assembly code in head.S */
++struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
++/* common cpu data for all cpus */
++struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
++
++unsigned long mmu_cr4_features;
++
++#ifdef	CONFIG_ACPI_INTERPRETER
++	int acpi_disabled = 0;
++#else
++	int acpi_disabled = 1;
++#endif
++EXPORT_SYMBOL(acpi_disabled);
++
++#ifdef	CONFIG_ACPI_BOOT
++int __initdata acpi_force = 0;
++extern acpi_interrupt_flags	acpi_sci_flags;
++#endif
++
++/* for MCA, but anyone else can use it if they want */
++unsigned int machine_id;
++unsigned int machine_submodel_id;
++unsigned int BIOS_revision;
++unsigned int mca_pentium_flag;
++
++/* For PCI or other memory-mapped resources */
++unsigned long pci_mem_start = 0x10000000;
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++/* user-defined highmem size */
++static unsigned int highmem_pages = -1;
++
++/*
++ * Setup options
++ */
++struct drive_info_struct { char dummy[32]; } drive_info;
++struct screen_info screen_info;
++struct apm_info apm_info;
++struct sys_desc_table_struct {
++	unsigned short length;
++	unsigned char table[0];
++};
++struct edid_info edid_info;
++struct ist_info ist_info;
++struct e820map e820;
++
++extern void early_cpu_init(void);
++extern void dmi_scan_machine(void);
++extern void generic_apic_probe(char *);
++extern int root_mountflags;
++
++unsigned long saved_videomode;
++
++#define RAMDISK_IMAGE_START_MASK  	0x07FF
++#define RAMDISK_PROMPT_FLAG		0x8000
++#define RAMDISK_LOAD_FLAG		0x4000	
++
++static char command_line[COMMAND_LINE_SIZE];
++
++unsigned char __initdata boot_params[PARAM_SIZE];
++
++static struct resource data_resource = {
++	.name	= "Kernel data",
++	.start	= 0,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource code_resource = {
++	.name	= "Kernel code",
++	.start	= 0,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++static struct resource system_rom_resource = {
++	.name	= "System ROM",
++	.start	= 0xf0000,
++	.end	= 0xfffff,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource extension_rom_resource = {
++	.name	= "Extension ROM",
++	.start	= 0xe0000,
++	.end	= 0xeffff,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource adapter_rom_resources[] = { {
++	.name 	= "Adapter ROM",
++	.start	= 0xc8000,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++	.name 	= "Adapter ROM",
++	.start	= 0,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++	.name 	= "Adapter ROM",
++	.start	= 0,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++	.name 	= "Adapter ROM",
++	.start	= 0,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++	.name 	= "Adapter ROM",
++	.start	= 0,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++	.name 	= "Adapter ROM",
++	.start	= 0,
++	.end	= 0,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++} };
++
++#define ADAPTER_ROM_RESOURCES \
++	(sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++
++static struct resource video_rom_resource = {
++	.name 	= "Video ROM",
++	.start	= 0xc0000,
++	.end	= 0xc7fff,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++#endif
++
++static struct resource video_ram_resource = {
++	.name	= "Video RAM area",
++	.start	= 0xa0000,
++	.end	= 0xbffff,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource standard_io_resources[] = { {
++	.name	= "dma1",
++	.start	= 0x0000,
++	.end	= 0x001f,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "pic1",
++	.start	= 0x0020,
++	.end	= 0x0021,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name   = "timer0",
++	.start	= 0x0040,
++	.end    = 0x0043,
++	.flags  = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name   = "timer1",
++	.start  = 0x0050,
++	.end    = 0x0053,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "keyboard",
++	.start	= 0x0060,
++	.end	= 0x006f,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "dma page reg",
++	.start	= 0x0080,
++	.end	= 0x008f,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "pic2",
++	.start	= 0x00a0,
++	.end	= 0x00a1,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "dma2",
++	.start	= 0x00c0,
++	.end	= 0x00df,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++	.name	= "fpu",
++	.start	= 0x00f0,
++	.end	= 0x00ff,
++	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
++} };
++
++#define STANDARD_IO_RESOURCES \
++	(sizeof standard_io_resources / sizeof standard_io_resources[0])
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++	unsigned char *p, sum = 0;
++
++	for (p = rom; p < rom + length; p++)
++		sum += *p;
++	return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++	unsigned long start, length, upper;
++	unsigned char *rom;
++	int	      i;
++
++	/* Nothing to do if not running in dom0. */
++	if (!(xen_start_info->flags & SIF_INITDOMAIN))
++		return;
++
++	/* video rom */
++	upper = adapter_rom_resources[0].start;
++	for (start = video_rom_resource.start; start < upper; start += 2048) {
++		rom = isa_bus_to_virt(start);
++		if (!romsignature(rom))
++			continue;
++
++		video_rom_resource.start = start;
++
++		/* 0 < length <= 0x7f * 512, historically */
++		length = rom[2] * 512;
++
++		/* if checksum okay, trust length byte */
++		if (length && romchecksum(rom, length))
++			video_rom_resource.end = start + length - 1;
++
++		request_resource(&iomem_resource, &video_rom_resource);
++		break;
++	}
++
++	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++	if (start < upper)
++		start = upper;
++
++	/* system rom */
++	request_resource(&iomem_resource, &system_rom_resource);
++	upper = system_rom_resource.start;
++
++	/* check for extension rom (ignore length byte!) */
++	rom = isa_bus_to_virt(extension_rom_resource.start);
++	if (romsignature(rom)) {
++		length = extension_rom_resource.end - extension_rom_resource.start + 1;
++		if (romchecksum(rom, length)) {
++			request_resource(&iomem_resource, &extension_rom_resource);
++			upper = extension_rom_resource.start;
++		}
++	}
++
++	/* check for adapter roms on 2k boundaries */
++	for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++		rom = isa_bus_to_virt(start);
++		if (!romsignature(rom))
++			continue;
++
++		/* 0 < length <= 0x7f * 512, historically */
++		length = rom[2] * 512;
++
++		/* but accept any length that fits if checksum okay */
++		if (!length || start + length > upper || !romchecksum(rom, length))
++			continue;
++
++		adapter_rom_resources[i].start = start;
++		adapter_rom_resources[i].end = start + length - 1;
++		request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++		start = adapter_rom_resources[i++].end & ~2047UL;
++	}
++}
++#endif
++
++/*
++ * Point at the empty zero page to start with. We map the real shared_info
++ * page as soon as fixmap is up and running.
++ */
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++
++static void __init limit_regions(unsigned long long size)
++{
++	unsigned long long current_addr = 0;
++	int i;
++
++	if (efi_enabled) {
++		for (i = 0; i < memmap.nr_map; i++) {
++			current_addr = memmap.map[i].phys_addr +
++				       (memmap.map[i].num_pages << 12);
++			if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
++				if (current_addr >= size) {
++					memmap.map[i].num_pages -=
++						(((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
++					memmap.nr_map = i + 1;
++					return;
++				}
++			}
++		}
++	}
++	for (i = 0; i < e820.nr_map; i++) {
++		if (e820.map[i].type == E820_RAM) {
++			current_addr = e820.map[i].addr + e820.map[i].size;
++			if (current_addr >= size) {
++				e820.map[i].size -= current_addr-size;
++				e820.nr_map = i + 1;
++				return;
++			}
++		}
++	}
++}
++
++static void __init add_memory_region(unsigned long long start,
++                                  unsigned long long size, int type)
++{
++	int x;
++
++	if (!efi_enabled) {
++       		x = e820.nr_map;
++
++		if (x == E820MAX) {
++		    printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++		    return;
++		}
++
++		e820.map[x].addr = start;
++		e820.map[x].size = size;
++		e820.map[x].type = type;
++		e820.nr_map++;
++	}
++} /* add_memory_region */
++
++#define E820_DEBUG	1
++
++static void __init print_memory_map(char *who)
++{
++	int i;
++
++	for (i = 0; i < e820.nr_map; i++) {
++		printk(" %s: %016Lx - %016Lx ", who,
++			e820.map[i].addr,
++			e820.map[i].addr + e820.map[i].size);
++		switch (e820.map[i].type) {
++		case E820_RAM:	printk("(usable)\n");
++				break;
++		case E820_RESERVED:
++				printk("(reserved)\n");
++				break;
++		case E820_ACPI:
++				printk("(ACPI data)\n");
++				break;
++		case E820_NVS:
++				printk("(ACPI NVS)\n");
++				break;
++		default:	printk("type %lu\n", e820.map[i].type);
++				break;
++		}
++	}
++}
++
++#if 0
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries.  The following 
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++struct change_member {
++	struct e820entry *pbios; /* pointer to original bios entry */
++	unsigned long long addr; /* address for this change point */
++};
++static struct change_member change_point_list[2*E820MAX] __initdata;
++static struct change_member *change_point[2*E820MAX] __initdata;
++static struct e820entry *overlap_list[E820MAX] __initdata;
++static struct e820entry new_bios[E820MAX] __initdata;
++
++static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++	struct change_member *change_tmp;
++	unsigned long current_type, last_type;
++	unsigned long long last_addr;
++	int chgidx, still_changing;
++	int overlap_entries;
++	int new_bios_entry;
++	int old_nr, new_nr, chg_nr;
++	int i;
++
++	/*
++		Visually we're performing the following (1,2,3,4 = memory types)...
++
++		Sample memory map (w/overlaps):
++		   ____22__________________
++		   ______________________4_
++		   ____1111________________
++		   _44_____________________
++		   11111111________________
++		   ____________________33__
++		   ___________44___________
++		   __________33333_________
++		   ______________22________
++		   ___________________2222_
++		   _________111111111______
++		   _____________________11_
++		   _________________4______
++
++		Sanitized equivalent (no overlap):
++		   1_______________________
++		   _44_____________________
++		   ___1____________________
++		   ____22__________________
++		   ______11________________
++		   _________1______________
++		   __________3_____________
++		   ___________44___________
++		   _____________33_________
++		   _______________2________
++		   ________________1_______
++		   _________________4______
++		   ___________________2____
++		   ____________________33__
++		   ______________________4_
++	*/
++
++	/* if there's only one memory region, don't bother */
++	if (*pnr_map < 2)
++		return -1;
++
++	old_nr = *pnr_map;
++
++	/* bail out if we find any unreasonable addresses in bios map */
++	for (i=0; i<old_nr; i++)
++		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
++			return -1;
++
++	/* create pointers for initial change-point information (for sorting) */
++	for (i=0; i < 2*old_nr; i++)
++		change_point[i] = &change_point_list[i];
++
++	/* record all known change-points (starting and ending addresses),
++	   omitting those that are for empty memory regions */
++	chgidx = 0;
++	for (i=0; i < old_nr; i++)	{
++		if (biosmap[i].size != 0) {
++			change_point[chgidx]->addr = biosmap[i].addr;
++			change_point[chgidx++]->pbios = &biosmap[i];
++			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++			change_point[chgidx++]->pbios = &biosmap[i];
++		}
++	}
++	chg_nr = chgidx;    	/* true number of change-points */
++
++	/* sort change-point list by memory addresses (low -> high) */
++	still_changing = 1;
++	while (still_changing)	{
++		still_changing = 0;
++		for (i=1; i < chg_nr; i++)  {
++			/* if <current_addr> > <last_addr>, swap */
++			/* or, if current=<start_addr> & last=<end_addr>, swap */
++			if ((change_point[i]->addr < change_point[i-1]->addr) ||
++				((change_point[i]->addr == change_point[i-1]->addr) &&
++				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
++				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++			   )
++			{
++				change_tmp = change_point[i];
++				change_point[i] = change_point[i-1];
++				change_point[i-1] = change_tmp;
++				still_changing=1;
++			}
++		}
++	}
++
++	/* create a new bios memory map, removing overlaps */
++	overlap_entries=0;	 /* number of entries in the overlap table */
++	new_bios_entry=0;	 /* index for creating new bios map entries */
++	last_type = 0;		 /* start with undefined memory type */
++	last_addr = 0;		 /* start with 0 as last starting address */
++	/* loop through change-points, determining affect on the new bios map */
++	for (chgidx=0; chgidx < chg_nr; chgidx++)
++	{
++		/* keep track of all overlapping bios entries */
++		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++		{
++			/* add map entry to overlap list (> 1 entry implies an overlap) */
++			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++		}
++		else
++		{
++			/* remove entry from list (order independent, so swap with last) */
++			for (i=0; i<overlap_entries; i++)
++			{
++				if (overlap_list[i] == change_point[chgidx]->pbios)
++					overlap_list[i] = overlap_list[overlap_entries-1];
++			}
++			overlap_entries--;
++		}
++		/* if there are overlapping entries, decide which "type" to use */
++		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++		current_type = 0;
++		for (i=0; i<overlap_entries; i++)
++			if (overlap_list[i]->type > current_type)
++				current_type = overlap_list[i]->type;
++		/* continue building up new bios map based on this information */
++		if (current_type != last_type)	{
++			if (last_type != 0)	 {
++				new_bios[new_bios_entry].size =
++					change_point[chgidx]->addr - last_addr;
++				/* move forward only if the new size was non-zero */
++				if (new_bios[new_bios_entry].size != 0)
++					if (++new_bios_entry >= E820MAX)
++						break; 	/* no more space left for new bios entries */
++			}
++			if (current_type != 0)	{
++				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++				new_bios[new_bios_entry].type = current_type;
++				last_addr=change_point[chgidx]->addr;
++			}
++			last_type = current_type;
++		}
++	}
++	new_nr = new_bios_entry;   /* retain count for new bios entries */
++
++	/* copy new bios mapping into original location */
++	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++	*pnr_map = new_nr;
++
++	return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory.  If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++	/* Only one memory region (or negative)? Ignore it */
++	if (nr_map < 2)
++		return -1;
++
++	do {
++		unsigned long long start = biosmap->addr;
++		unsigned long long size = biosmap->size;
++		unsigned long long end = start + size;
++		unsigned long type = biosmap->type;
++
++		/* Overflow in 64 bits? Ignore the memory map. */
++		if (start > end)
++			return -1;
++
++		/*
++		 * Some BIOSes claim RAM in the 640k - 1M region.
++		 * Not right. Fix it up.
++		 */
++		if (type == E820_RAM) {
++			if (start < 0x100000ULL && end > 0xA0000ULL) {
++				if (start < 0xA0000ULL)
++					add_memory_region(start, 0xA0000ULL-start, type);
++				if (end <= 0x100000ULL)
++					continue;
++				start = 0x100000ULL;
++				size = end - start;
++			}
++		}
++		add_memory_region(start, size, type);
++	} while (biosmap++,--nr_map);
++	return 0;
++}
++#endif
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ *              from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++     edd.edd_info_nr = EDD_NR;
++}
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++/*
++ * Do NOT EVER look at the BIOS memory size location.
++ * It does not work on many machines.
++ */
++#define LOWMEMSIZE()	(0x9f000)
++
++static void __init parse_cmdline_early (char ** cmdline_p)
++{
++	char c = ' ', *to = command_line, *from = saved_command_line;
++	int len = 0, max_cmdline;
++	int userdef = 0;
++
++	if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++		max_cmdline = COMMAND_LINE_SIZE;
++	memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++	/* Save unparsed command line copy for /proc/cmdline */
++	saved_command_line[max_cmdline-1] = '\0';
++
++	for (;;) {
++		if (c != ' ')
++			goto next_char;
++		/*
++		 * "mem=nopentium" disables the 4MB page tables.
++		 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
++		 * to <mem>, overriding the bios size.
++		 * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
++		 * <start> to <start>+<mem>, overriding the bios size.
++		 *
++		 * HPA tells me bootloaders need to parse mem=, so no new
++		 * option should be mem=  [also see Documentation/i386/boot.txt]
++		 */
++		if (!memcmp(from, "mem=", 4)) {
++			if (to != command_line)
++				to--;
++			if (!memcmp(from+4, "nopentium", 9)) {
++				from += 9+4;
++				clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++				disable_pse = 1;
++			} else {
++				/* If the user specifies memory size, we
++				 * limit the BIOS-provided memory map to
++				 * that size. exactmap can be used to specify
++				 * the exact map. mem=number can be used to
++				 * trim the existing memory map.
++				 */
++				unsigned long long mem_size;
++ 
++				mem_size = memparse(from+4, &from);
++#if 0
++				limit_regions(mem_size);
++				userdef=1;
++#else
++				xen_override_max_pfn =
++					(unsigned long)(mem_size>>PAGE_SHIFT);
++#endif
++			}
++		}
++
++		else if (!memcmp(from, "memmap=", 7)) {
++			if (to != command_line)
++				to--;
++			if (!memcmp(from+7, "exactmap", 8)) {
++				from += 8+7;
++				e820.nr_map = 0;
++				userdef = 1;
++			} else {
++				/* If the user specifies memory size, we
++				 * limit the BIOS-provided memory map to
++				 * that size. exactmap can be used to specify
++				 * the exact map. mem=number can be used to
++				 * trim the existing memory map.
++				 */
++				unsigned long long start_at, mem_size;
++ 
++				mem_size = memparse(from+7, &from);
++				if (*from == '@') {
++					start_at = memparse(from+1, &from);
++					add_memory_region(start_at, mem_size, E820_RAM);
++				} else if (*from == '#') {
++					start_at = memparse(from+1, &from);
++					add_memory_region(start_at, mem_size, E820_ACPI);
++				} else if (*from == '$') {
++					start_at = memparse(from+1, &from);
++					add_memory_region(start_at, mem_size, E820_RESERVED);
++				} else {
++					limit_regions(mem_size);
++					userdef=1;
++				}
++			}
++		}
++
++		else if (!memcmp(from, "noexec=", 7))
++			noexec_setup(from + 7);
++
++
++#ifdef  CONFIG_X86_MPPARSE
++		/*
++		 * If the BIOS enumerates physical processors before logical,
++		 * maxcpus=N at enumeration-time can be used to disable HT.
++		 */
++		else if (!memcmp(from, "maxcpus=", 8)) {
++			extern unsigned int maxcpus;
++
++			maxcpus = simple_strtoul(from + 8, NULL, 0);
++		}
++#endif
++
++#ifdef CONFIG_ACPI_BOOT
++		/* "acpi=off" disables both ACPI table parsing and interpreter */
++		else if (!memcmp(from, "acpi=off", 8)) {
++			disable_acpi();
++		}
++
++		/* acpi=force to over-ride black-list */
++		else if (!memcmp(from, "acpi=force", 10)) {
++			acpi_force = 1;
++			acpi_ht = 1;
++			acpi_disabled = 0;
++		}
++
++		/* acpi=strict disables out-of-spec workarounds */
++		else if (!memcmp(from, "acpi=strict", 11)) {
++			acpi_strict = 1;
++		}
++
++		/* Limit ACPI just to boot-time to enable HT */
++		else if (!memcmp(from, "acpi=ht", 7)) {
++			if (!acpi_force)
++				disable_acpi();
++			acpi_ht = 1;
++		}
++		
++		/* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
++		else if (!memcmp(from, "pci=noacpi", 10)) {
++			acpi_disable_pci();
++		}
++		/* "acpi=noirq" disables ACPI interrupt routing */
++		else if (!memcmp(from, "acpi=noirq", 10)) {
++			acpi_noirq_set();
++		}
++
++		else if (!memcmp(from, "acpi_sci=edge", 13))
++			acpi_sci_flags.trigger =  1;
++
++		else if (!memcmp(from, "acpi_sci=level", 14))
++			acpi_sci_flags.trigger = 3;
++
++		else if (!memcmp(from, "acpi_sci=high", 13))
++			acpi_sci_flags.polarity = 1;
++
++		else if (!memcmp(from, "acpi_sci=low", 12))
++			acpi_sci_flags.polarity = 3;
++
++#ifdef CONFIG_X86_IO_APIC
++		else if (!memcmp(from, "acpi_skip_timer_override", 24))
++			acpi_skip_timer_override = 1;
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++		/* disable IO-APIC */
++		else if (!memcmp(from, "noapic", 6))
++			disable_ioapic_setup();
++#endif /* CONFIG_X86_LOCAL_APIC */
++#endif /* CONFIG_ACPI_BOOT */
++
++		/*
++		 * highmem=size forces highmem to be exactly 'size' bytes.
++		 * This works even on boxes that have no highmem otherwise.
++		 * This also works to reduce highmem size on bigger boxes.
++		 */
++		else if (!memcmp(from, "highmem=", 8))
++			highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
++	
++		/*
++		 * vmalloc=size forces the vmalloc area to be exactly 'size'
++		 * bytes. This can be used to increase (or decrease) the
++		 * vmalloc area - the default is 128m.
++		 */
++		else if (!memcmp(from, "vmalloc=", 8))
++			__VMALLOC_RESERVE = memparse(from+8, &from);
++
++	next_char:
++		c = *(from++);
++		if (!c)
++			break;
++		if (COMMAND_LINE_SIZE <= ++len)
++			break;
++		*(to++) = c;
++	}
++	*to = '\0';
++	*cmdline_p = command_line;
++	if (userdef) {
++		printk(KERN_INFO "user-defined physical RAM map:\n");
++		print_memory_map("user");
++	}
++}
++
++#if 0 /* !XEN */
++/*
++ * Callback for efi_memory_walk.
++ */
++static int __init
++efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
++{
++	unsigned long *max_pfn = arg, pfn;
++
++	if (start < end) {
++		pfn = PFN_UP(end -1);
++		if (pfn > *max_pfn)
++			*max_pfn = pfn;
++	}
++	return 0;
++}
++
++
++/*
++ * Find the highest page frame number we have available
++ */
++void __init find_max_pfn(void)
++{
++	int i;
++
++	max_pfn = 0;
++	if (efi_enabled) {
++		efi_memmap_walk(efi_find_max_pfn, &max_pfn);
++		return;
++	}
++
++	for (i = 0; i < e820.nr_map; i++) {
++		unsigned long start, end;
++		/* RAM? */
++		if (e820.map[i].type != E820_RAM)
++			continue;
++		start = PFN_UP(e820.map[i].addr);
++		end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++		if (start >= end)
++			continue;
++		if (end > max_pfn)
++			max_pfn = end;
++	}
++}
++#else
++/* We don't use the fake e820 because we need to respond to user override. */
++void __init find_max_pfn(void)
++{
++	if (xen_override_max_pfn == 0) {
++		max_pfn = xen_start_info->nr_pages;
++		/* Default 8MB slack (to balance backend allocations). */
++		max_pfn += 8 << (20 - PAGE_SHIFT);
++	} else if (xen_override_max_pfn > xen_start_info->nr_pages) {
++		max_pfn = xen_override_max_pfn;
++	} else {
++		max_pfn = xen_start_info->nr_pages;
++	}
++}
++#endif /* XEN */
++
++/*
++ * Determine low and high memory ranges:
++ */
++unsigned long __init find_max_low_pfn(void)
++{
++	unsigned long max_low_pfn;
++
++	max_low_pfn = max_pfn;
++	if (max_low_pfn > MAXMEM_PFN) {
++		if (highmem_pages == -1)
++			highmem_pages = max_pfn - MAXMEM_PFN;
++		if (highmem_pages + MAXMEM_PFN < max_pfn)
++			max_pfn = MAXMEM_PFN + highmem_pages;
++		if (highmem_pages + MAXMEM_PFN > max_pfn) {
++			printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
++			highmem_pages = 0;
++		}
++		max_low_pfn = MAXMEM_PFN;
++#ifndef CONFIG_HIGHMEM
++		/* Maximum memory usable is what is directly addressable */
++		printk(KERN_WARNING "Warning only %ldMB will be used.\n",
++					MAXMEM>>20);
++		if (max_pfn > MAX_NONPAE_PFN)
++			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++		else
++			printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
++		max_pfn = MAXMEM_PFN;
++#else /* !CONFIG_HIGHMEM */
++#ifndef CONFIG_X86_PAE
++		if (max_pfn > MAX_NONPAE_PFN) {
++			max_pfn = MAX_NONPAE_PFN;
++			printk(KERN_WARNING "Warning only 4GB will be used.\n");
++			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++		}
++#endif /* !CONFIG_X86_PAE */
++#endif /* !CONFIG_HIGHMEM */
++	} else {
++		if (highmem_pages == -1)
++			highmem_pages = 0;
++#ifdef CONFIG_HIGHMEM
++		if (highmem_pages >= max_pfn) {
++			printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
++			highmem_pages = 0;
++		}
++		if (highmem_pages) {
++			if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
++				printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
++				highmem_pages = 0;
++			}
++			max_low_pfn -= highmem_pages;
++		}
++#else
++		if (highmem_pages)
++			printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
++#endif
++	}
++	return max_low_pfn;
++}
++
++/*
++ * Free all available memory for boot time allocation.  Used
++ * as a callback function by efi_memory_walk()
++ */
++
++static int __init
++free_available_memory(unsigned long start, unsigned long end, void *arg)
++{
++	/* check max_low_pfn */
++	if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
++		return 0;
++	if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
++		end = (max_low_pfn + 1) << PAGE_SHIFT;
++	if (start < end)
++		free_bootmem(start, end - start);
++
++	return 0;
++}
++/*
++ * Register fully available low RAM pages with the bootmem allocator.
++ */
++static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
++{
++	int i;
++
++	if (efi_enabled) {
++		efi_memmap_walk(free_available_memory, NULL);
++		return;
++	}
++	for (i = 0; i < e820.nr_map; i++) {
++		unsigned long curr_pfn, last_pfn, size;
++		/*
++		 * Reserve usable low memory
++		 */
++		if (e820.map[i].type != E820_RAM)
++			continue;
++		/*
++		 * We are rounding up the start address of usable memory:
++		 */
++		curr_pfn = PFN_UP(e820.map[i].addr);
++		if (curr_pfn >= max_low_pfn)
++			continue;
++		/*
++		 * ... and at the end of the usable range downwards:
++		 */
++		last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++
++		if (last_pfn > max_low_pfn)
++			last_pfn = max_low_pfn;
++
++		/*
++		 * .. finally, did all the rounding and playing
++		 * around just make the area go away?
++		 */
++		if (last_pfn <= curr_pfn)
++			continue;
++
++		size = last_pfn - curr_pfn;
++		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
++	}
++}
++
++#ifndef CONFIG_XEN
++/*
++ * workaround for Dell systems that neglect to reserve EBDA
++ */
++static void __init reserve_ebda_region(void)
++{
++	unsigned int addr;
++	addr = get_bios_ebda();
++	if (addr)
++		reserve_bootmem(addr, PAGE_SIZE);	
++}
++#endif
++
++#ifndef CONFIG_DISCONTIGMEM
++void __init setup_bootmem_allocator(void);
++static unsigned long __init setup_memory(void)
++{
++	/*
++	 * partially used pages are not usable - thus
++	 * we are rounding upwards:
++	 */
++ 	min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
++		xen_start_info->nr_pt_frames;
++
++	find_max_pfn();
++
++	max_low_pfn = find_max_low_pfn();
++
++#ifdef CONFIG_HIGHMEM
++	highstart_pfn = highend_pfn = max_pfn;
++	if (max_pfn > max_low_pfn) {
++		highstart_pfn = max_low_pfn;
++	}
++	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
++		pages_to_mb(highend_pfn - highstart_pfn));
++#endif
++	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
++			pages_to_mb(max_low_pfn));
++
++	setup_bootmem_allocator();
++
++	return max_low_pfn;
++}
++
++void __init zone_sizes_init(void)
++{
++	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
++	unsigned int max_dma, low;
++
++	/*
++	 * XEN: Our notion of "DMA memory" is fake when running over Xen.
++	 * We simply put all RAM in the DMA zone so that those drivers which
++	 * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
++	 * Those drivers that *do* require lowmem are screwed anyway when
++	 * running over Xen!
++	 */
++	max_dma = max_low_pfn;
++	low = max_low_pfn;
++
++	if (low < max_dma)
++		zones_size[ZONE_DMA] = low;
++	else {
++		zones_size[ZONE_DMA] = max_dma;
++		zones_size[ZONE_NORMAL] = low - max_dma;
++#ifdef CONFIG_HIGHMEM
++		zones_size[ZONE_HIGHMEM] = highend_pfn - low;
++#endif
++	}
++	free_area_init(zones_size);
++}
++#else
++extern unsigned long setup_memory(void);
++extern void zone_sizes_init(void);
++#endif /* !CONFIG_DISCONTIGMEM */
++
++void __init setup_bootmem_allocator(void)
++{
++	unsigned long bootmap_size;
++	/*
++	 * Initialize the boot-time allocator (with low memory only):
++	 */
++	bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
++
++	register_bootmem_low_pages(max_low_pfn);
++
++	/*
++	 * Reserve the bootmem bitmap itself as well. We do this in two
++	 * steps (first step was init_bootmem()) because this catches
++	 * the (very unlikely) case of us accidentally initializing the
++	 * bootmem allocator with an invalid RAM area.
++	 */
++	reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(min_low_pfn) +
++			 bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
++
++#ifndef CONFIG_XEN
++	/*
++	 * reserve physical page 0 - it's a special BIOS page on many boxes,
++	 * enabling clean reboots, SMP operation, laptop functions.
++	 */
++	reserve_bootmem(0, PAGE_SIZE);
++
++	/* reserve EBDA region, it's a 4K region */
++	reserve_ebda_region();
++
++    /* could be an AMD 768MPX chipset. Reserve a page  before VGA to prevent
++       PCI prefetch into it (errata #56). Usually the page is reserved anyways,
++       unless you have no PS/2 mouse plugged in. */
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++	    boot_cpu_data.x86 == 6)
++	     reserve_bootmem(0xa0000 - 4096, 4096);
++
++#ifdef CONFIG_SMP
++	/*
++	 * But first pinch a few for the stack/trampoline stuff
++	 * FIXME: Don't need the extra page at 4K, but need to fix
++	 * trampoline before removing it. (see the GDT stuff)
++	 */
++	reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
++#endif
++#ifdef CONFIG_ACPI_SLEEP
++	/*
++	 * Reserve low memory region for sleep support.
++	 */
++	acpi_reserve_bootmem();
++#endif
++#endif /* !CONFIG_XEN */
++
++#ifdef CONFIG_BLK_DEV_INITRD
++	if (xen_start_info->mod_start) {
++		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
++			/*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
++			initrd_start = INITRD_START + PAGE_OFFSET;
++			initrd_end = initrd_start+INITRD_SIZE;
++			initrd_below_start_ok = 1;
++		}
++		else {
++			printk(KERN_ERR "initrd extends beyond end of memory "
++			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++			    INITRD_START + INITRD_SIZE,
++			    max_low_pfn << PAGE_SHIFT);
++			initrd_start = 0;
++		}
++	}
++#endif
++
++	phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;
++}
++
++/*
++ * The node 0 pgdat is initialized before all of these because
++ * it's needed for bootmem.  node>0 pgdats have their virtual
++ * space allocated before the pagetables are in place to access
++ * them, so they can't be cleared then.
++ *
++ * This should all compile down to nothing when NUMA is off.
++ */
++void __init remapped_pgdat_init(void)
++{
++	int nid;
++
++	for_each_online_node(nid) {
++		if (nid != 0)
++			memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
++	}
++}
++
++/*
++ * Request address space for all standard RAM and ROM resources
++ * and also for regions reported as reserved by the e820.
++ */
++static void __init
++legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
++{
++	int i;
++#ifdef CONFIG_XEN
++	dom0_op_t op;
++	struct dom0_memory_map_entry *map;
++	unsigned long gapstart, gapsize;
++	unsigned long long last;
++#endif
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++	probe_roms();
++#endif
++
++#ifdef CONFIG_XEN
++	map = alloc_bootmem_low_pages(PAGE_SIZE);
++	op.cmd = DOM0_PHYSICAL_MEMORY_MAP;
++	op.u.physical_memory_map.memory_map = map;
++	op.u.physical_memory_map.max_map_entries =
++		PAGE_SIZE / sizeof(struct dom0_memory_map_entry);
++	BUG_ON(HYPERVISOR_dom0_op(&op));
++
++	last = 0x100000000ULL;
++	gapstart = 0x10000000;
++	gapsize = 0x400000;
++
++	for (i = op.u.physical_memory_map.nr_map_entries - 1; i >= 0; i--) {
++		struct resource *res;
++
++		if ((last > map[i].end) && ((last - map[i].end) > gapsize)) {
++			gapsize = last - map[i].end;
++			gapstart = map[i].end;
++		}
++		if (map[i].start < last)
++			last = map[i].start;
++
++		if (map[i].end > 0x100000000ULL)
++			continue;
++		res = alloc_bootmem_low(sizeof(struct resource));
++		res->name = map[i].is_ram ? "System RAM" : "reserved";
++		res->start = map[i].start;
++		res->end = map[i].end - 1;
++		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++		request_resource(&iomem_resource, res);
++	}
++
++	free_bootmem(__pa(map), PAGE_SIZE);
++
++	/*
++	 * Start allocating dynamic PCI memory a bit into the gap,
++	 * aligned up to the nearest megabyte.
++	 *
++	 * Question: should we try to pad it up a bit (do something
++	 * like " + (gapsize >> 3)" in there too?). We now have the
++	 * technology.
++	 */
++	pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
++
++	printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
++		pci_mem_start, gapstart, gapsize);
++#else
++	for (i = 0; i < e820.nr_map; i++) {
++		struct resource *res;
++		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
++			continue;
++		res = alloc_bootmem_low(sizeof(struct resource));
++		switch (e820.map[i].type) {
++		case E820_RAM:	res->name = "System RAM"; break;
++		case E820_ACPI:	res->name = "ACPI Tables"; break;
++		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
++		default:	res->name = "reserved";
++		}
++		res->start = e820.map[i].addr;
++		res->end = res->start + e820.map[i].size - 1;
++		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++		request_resource(&iomem_resource, res);
++		if (e820.map[i].type == E820_RAM) {
++			/*
++			 *  We don't know which RAM region contains kernel data,
++			 *  so we try it repeatedly and let the resource manager
++			 *  test it.
++			 */
++			request_resource(res, code_resource);
++			request_resource(res, data_resource);
++		}
++	}
++#endif
++}
++
++/*
++ * Request address space for all standard resources
++ */
++static void __init register_memory(void)
++{
++#ifndef CONFIG_XEN
++	unsigned long gapstart, gapsize;
++	unsigned long long last;
++#endif
++	int	      i;
++
++	/* Nothing to do if not running in dom0. */
++	if (!(xen_start_info->flags & SIF_INITDOMAIN))
++		return;
++
++	if (efi_enabled)
++		efi_initialize_iomem_resources(&code_resource, &data_resource);
++	else
++		legacy_init_iomem_resources(&code_resource, &data_resource);
++
++	/* EFI systems may still have VGA */
++	request_resource(&iomem_resource, &video_ram_resource);
++
++	/* request I/O space for devices used on all i[345]86 PCs */
++	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++		request_resource(&ioport_resource, &standard_io_resources[i]);
++
++#ifndef CONFIG_XEN
++	/*
++	 * Search for the bigest gap in the low 32 bits of the e820
++	 * memory space.
++	 */
++	last = 0x100000000ull;
++	gapstart = 0x10000000;
++	gapsize = 0x400000;
++	i = e820.nr_map;
++	while (--i >= 0) {
++		unsigned long long start = e820.map[i].addr;
++		unsigned long long end = start + e820.map[i].size;
++
++		/*
++		 * Since "last" is at most 4GB, we know we'll
++		 * fit in 32 bits if this condition is true
++		 */
++		if (last > end) {
++			unsigned long gap = last - end;
++
++			if (gap > gapsize) {
++				gapsize = gap;
++				gapstart = end;
++			}
++		}
++		if (start < last)
++			last = start;
++	}
++
++	/*
++	 * Start allocating dynamic PCI memory a bit into the gap,
++	 * aligned up to the nearest megabyte.
++	 *
++	 * Question: should we try to pad it up a bit (do something
++	 * like " + (gapsize >> 3)" in there too?). We now have the
++	 * technology.
++	 */
++	pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
++
++	printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
++		pci_mem_start, gapstart, gapsize);
++#endif
++}
++
++/* Use inline assembly to define this because the nops are defined 
++   as inline assembly strings in the include files and we cannot 
++   get them easily into strings. */
++asm("\t.data\nintelnops: " 
++    GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
++    GENERIC_NOP7 GENERIC_NOP8); 
++asm("\t.data\nk8nops: " 
++    K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
++    K8_NOP7 K8_NOP8); 
++asm("\t.data\nk7nops: " 
++    K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
++    K7_NOP7 K7_NOP8); 
++    
++extern unsigned char intelnops[], k8nops[], k7nops[];
++static unsigned char *intel_nops[ASM_NOP_MAX+1] = { 
++     NULL,
++     intelnops,
++     intelnops + 1,
++     intelnops + 1 + 2,
++     intelnops + 1 + 2 + 3,
++     intelnops + 1 + 2 + 3 + 4,
++     intelnops + 1 + 2 + 3 + 4 + 5,
++     intelnops + 1 + 2 + 3 + 4 + 5 + 6,
++     intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
++}; 
++static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 
++     NULL,
++     k8nops,
++     k8nops + 1,
++     k8nops + 1 + 2,
++     k8nops + 1 + 2 + 3,
++     k8nops + 1 + 2 + 3 + 4,
++     k8nops + 1 + 2 + 3 + 4 + 5,
++     k8nops + 1 + 2 + 3 + 4 + 5 + 6,
++     k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
++}; 
++static unsigned char *k7_nops[ASM_NOP_MAX+1] = { 
++     NULL,
++     k7nops,
++     k7nops + 1,
++     k7nops + 1 + 2,
++     k7nops + 1 + 2 + 3,
++     k7nops + 1 + 2 + 3 + 4,
++     k7nops + 1 + 2 + 3 + 4 + 5,
++     k7nops + 1 + 2 + 3 + 4 + 5 + 6,
++     k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
++}; 
++static struct nop { 
++     int cpuid; 
++     unsigned char **noptable; 
++} noptypes[] = { 
++     { X86_FEATURE_K8, k8_nops }, 
++     { X86_FEATURE_K7, k7_nops }, 
++     { -1, NULL }
++}; 
++
++/* Replace instructions with better alternatives for this CPU type.
++
++   This runs before SMP is initialized to avoid SMP problems with
++   self modifying code. This implies that assymetric systems where
++   APs have less capabilities than the boot processor are not handled. 
++   In this case boot with "noreplacement". */ 
++void apply_alternatives(void *start, void *end) 
++{ 
++	struct alt_instr *a; 
++	int diff, i, k;
++        unsigned char **noptable = intel_nops; 
++	for (i = 0; noptypes[i].cpuid >= 0; i++) { 
++		if (boot_cpu_has(noptypes[i].cpuid)) { 
++			noptable = noptypes[i].noptable;
++			break;
++		}
++	} 
++	for (a = start; (void *)a < end; a++) { 
++		if (!boot_cpu_has(a->cpuid))
++			continue;
++		BUG_ON(a->replacementlen > a->instrlen); 
++		memcpy(a->instr, a->replacement, a->replacementlen); 
++		diff = a->instrlen - a->replacementlen; 
++		/* Pad the rest with nops */
++		for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
++			k = diff;
++			if (k > ASM_NOP_MAX)
++				k = ASM_NOP_MAX;
++			memcpy(a->instr + i, noptable[k], k); 
++		} 
++	}
++} 
++
++static int no_replacement __initdata = 0; 
++ 
++void __init alternative_instructions(void)
++{
++	extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
++	if (no_replacement) 
++		return;
++	apply_alternatives(__alt_instructions, __alt_instructions_end);
++}
++
++static int __init noreplacement_setup(char *s)
++{ 
++     no_replacement = 1; 
++     return 0; 
++} 
++
++__setup("noreplacement", noreplacement_setup); 
++
++static char * __init machine_specific_memory_setup(void);
++
++#ifdef CONFIG_MCA
++static void set_mca_bus(int x)
++{
++	MCA_bus = x;
++}
++#else
++static void set_mca_bus(int x) { }
++#endif
++
++/*
++ * Determine if we were loaded by an EFI loader.  If so, then we have also been
++ * passed the efi memmap, systab, etc., so we should use these data structures
++ * for initialization.  Note, the efi init code path is determined by the
++ * global efi_enabled. This allows the same kernel image to be used on existing
++ * systems (with a traditional BIOS) as well as on EFI systems.
++ */
++void __init setup_arch(char **cmdline_p)
++{
++	int i, j, k, fpp;
++	physdev_op_t op;
++	unsigned long max_low_pfn;
++
++	/* Force a quick death if the kernel panics. */
++	extern int panic_timeout;
++	if (panic_timeout == 0)
++		panic_timeout = 1;
++
++	/* Register a call for panic conditions. */
++	notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++
++	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
++	HYPERVISOR_vm_assist(VMASST_CMD_enable,
++			     VMASST_TYPE_writable_pagetables);
++
++	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
++	early_cpu_init();
++
++	/*
++	 * FIXME: This isn't an official loader_type right
++	 * now but does currently work with elilo.
++	 * If we were configured as an EFI kernel, check to make
++	 * sure that we were loaded correctly from elilo and that
++	 * the system table is valid.  If not, then initialize normally.
++	 */
++#ifdef CONFIG_EFI
++	if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
++		efi_enabled = 1;
++#endif
++
++	/* This must be initialized to UNNAMED_MAJOR for ipconfig to work
++	   properly.  Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
++	*/
++	ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
++ 	drive_info = DRIVE_INFO;
++ 	screen_info = SCREEN_INFO;
++	edid_info = EDID_INFO;
++	apm_info.bios = APM_BIOS_INFO;
++	ist_info = IST_INFO;
++	saved_videomode = VIDEO_MODE;
++	if( SYS_DESC_TABLE.length != 0 ) {
++		set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
++		machine_id = SYS_DESC_TABLE.table[0];
++		machine_submodel_id = SYS_DESC_TABLE.table[1];
++		BIOS_revision = SYS_DESC_TABLE.table[2];
++	}
++	bootloader_type = LOADER_TYPE;
++
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++	/* This is drawn from a dump from vgacon:startup in standard Linux. */
++	screen_info.orig_video_mode = 3; 
++	screen_info.orig_video_isVGA = 1;
++	screen_info.orig_video_lines = 25;
++	screen_info.orig_video_cols = 80;
++	screen_info.orig_video_ega_bx = 3;
++	screen_info.orig_video_points = 16;
++#endif
++
++#ifdef CONFIG_BLK_DEV_RAM
++	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++
++	setup_xen_features();
++
++	ARCH_SETUP
++	if (efi_enabled)
++		efi_init();
++	else {
++		printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++		print_memory_map(machine_specific_memory_setup());
++	}
++
++	copy_edd();
++
++	if (!MOUNT_ROOT_RDONLY)
++		root_mountflags &= ~MS_RDONLY;
++	init_mm.start_code = (unsigned long) _text;
++	init_mm.end_code = (unsigned long) _etext;
++	init_mm.end_data = (unsigned long) _edata;
++	init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
++		       xen_start_info->nr_pt_frames) << PAGE_SHIFT;
++
++	/* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
++	/*code_resource.start = virt_to_phys(_text);*/
++	/*code_resource.end = virt_to_phys(_etext)-1;*/
++	/*data_resource.start = virt_to_phys(_etext);*/
++	/*data_resource.end = virt_to_phys(_edata)-1;*/
++
++	parse_cmdline_early(cmdline_p);
++
++	max_low_pfn = setup_memory();
++
++	/*
++	 * NOTE: before this point _nobody_ is allowed to allocate
++	 * any memory using the bootmem allocator.  Although the
++	 * alloctor is now initialised only the first 8Mb of the kernel
++	 * virtual address space has been mapped.  All allocations before
++	 * paging_init() has completed must use the alloc_bootmem_low_pages()
++	 * variant (which allocates DMA'able memory) and care must be taken
++	 * not to exceed the 8Mb limit.
++	 */
++
++#ifdef CONFIG_SMP
++	smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
++#endif
++	paging_init();
++	remapped_pgdat_init();
++	zone_sizes_init();
++
++#ifdef CONFIG_X86_FIND_SMP_CONFIG
++	/*
++	 * Find and reserve possible boot-time SMP configuration:
++	 */
++	find_smp_config();
++#endif
++
++	/* Make sure we have a correctly sized P->M table. */
++	phys_to_machine_mapping = alloc_bootmem_low_pages(
++		max_pfn * sizeof(unsigned long));
++	memset(phys_to_machine_mapping, ~0,
++		max_pfn * sizeof(unsigned long));
++	memcpy(phys_to_machine_mapping,
++		(unsigned long *)xen_start_info->mfn_list,
++		xen_start_info->nr_pages * sizeof(unsigned long));
++	free_bootmem(
++		__pa(xen_start_info->mfn_list), 
++		PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++		sizeof(unsigned long))));
++
++	/* 
++	 * Initialise the list of the frames that specify the list of 
++	 * frames that make up the p2m table. Used by save/restore
++	 */
++	pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
++	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++	  virt_to_mfn(pfn_to_mfn_frame_list_list);
++	       
++	fpp = PAGE_SIZE/sizeof(unsigned long);
++	for ( i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++ )
++	{
++	    if ( (j % fpp) == 0 )
++	    {
++	        k++;
++		BUG_ON(k>=16);
++		pfn_to_mfn_frame_list[k] = alloc_bootmem_low_pages(PAGE_SIZE);
++		pfn_to_mfn_frame_list_list[k] = 
++		    virt_to_mfn(pfn_to_mfn_frame_list[k]);
++		j=0;
++	    }
++	    pfn_to_mfn_frame_list[k][j] = 
++	        virt_to_mfn(&phys_to_machine_mapping[i]);
++	}
++	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++
++	/*
++	 * NOTE: at this point the bootmem allocator is fully available.
++	 */
++
++#ifdef CONFIG_EARLY_PRINTK
++	{
++		char *s = strstr(*cmdline_p, "earlyprintk=");
++		if (s) {
++			extern void setup_early_printk(char *);
++
++			setup_early_printk(s);
++			printk("early console enabled\n");
++		}
++	}
++#endif
++
++	if (xen_start_info->flags & SIF_INITDOMAIN)
++		dmi_scan_machine();
++
++#ifdef CONFIG_X86_GENERICARCH
++	generic_apic_probe(*cmdline_p);
++#endif	
++	if (efi_enabled)
++		efi_map_memmap();
++
++	op.cmd             = PHYSDEVOP_SET_IOPL;
++	op.u.set_iopl.iopl = 1;
++	HYPERVISOR_physdev_op(&op);
++
++#ifdef CONFIG_ACPI_BOOT
++	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
++		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
++		acpi_disabled = 1;
++		acpi_ht = 0;
++	}
++#endif
++
++#ifdef CONFIG_ACPI_BOOT
++	/*
++	 * Parse the ACPI tables for possible boot-time SMP configuration.
++	 */
++	acpi_boot_table_init();
++	acpi_boot_init();
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++	if (smp_found_config)
++		get_smp_config();
++#endif
++
++	/* XXX Disable irqdebug until we have a way to avoid interrupt
++	 * conflicts. */
++	noirqdebug_setup("");
++
++	register_memory();
++
++	if (xen_start_info->flags & SIF_INITDOMAIN) {
++		if (!(xen_start_info->flags & SIF_PRIVILEGED))
++			panic("Xen granted us console access "
++			      "but not privileged status");
++
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++		if (!efi_enabled ||
++		    (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++			conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++		conswitchp = &dummy_con;
++#endif
++#endif
++	} else {
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++		extern const struct consw xennull_con;
++		extern int console_use_vt;
++#if defined(CONFIG_VGA_CONSOLE)
++		/* disable VGA driver */
++		ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
++#endif
++		conswitchp = &xennull_con;
++		console_use_vt = 0;
++#endif
++	}
++}
++
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++	HYPERVISOR_sched_op(SCHEDOP_shutdown, SHUTDOWN_crash);
++	/* we're never actually going to get here... */
++	return NOTIFY_DONE;
++}
++
++#include "setup_arch_post.h"
++/*
++ * Local Variables:
++ * mode:c
++ * c-file-style:"k&r"
++ * c-basic-offset:8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/signal.c linux-2.6.12-xen/arch/xen/i386/kernel/signal.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/signal.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/signal.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,665 @@
++/*
++ *  linux/arch/i386/kernel/signal.c
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *
++ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
++ *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel.h>
++#include <linux/signal.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/unistd.h>
++#include <linux/stddef.h>
++#include <linux/personality.h>
++#include <linux/suspend.h>
++#include <linux/ptrace.h>
++#include <linux/elf.h>
++#include <asm/processor.h>
++#include <asm/ucontext.h>
++#include <asm/uaccess.h>
++#include <asm/i387.h>
++#include "sigframe.h"
++
++#define DEBUG_SIG 0
++
++#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
++
++/*
++ * Atomically swap in the new signal mask, and wait for a signal.
++ */
++asmlinkage int
++sys_sigsuspend(int history0, int history1, old_sigset_t mask)
++{
++	struct pt_regs * regs = (struct pt_regs *) &history0;
++	sigset_t saveset;
++
++	mask &= _BLOCKABLE;
++	spin_lock_irq(&current->sighand->siglock);
++	saveset = current->blocked;
++	siginitset(&current->blocked, mask);
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
++
++	regs->eax = -EINTR;
++	while (1) {
++		current->state = TASK_INTERRUPTIBLE;
++		schedule();
++		if (do_signal(regs, &saveset))
++			return -EINTR;
++	}
++}
++
++asmlinkage int
++sys_rt_sigsuspend(struct pt_regs regs)
++{
++	sigset_t saveset, newset;
++
++	/* XXX: Don't preclude handling different sized sigset_t's.  */
++	if (regs.ecx != sizeof(sigset_t))
++		return -EINVAL;
++
++	if (copy_from_user(&newset, (sigset_t __user *)regs.ebx, sizeof(newset)))
++		return -EFAULT;
++	sigdelsetmask(&newset, ~_BLOCKABLE);
++
++	spin_lock_irq(&current->sighand->siglock);
++	saveset = current->blocked;
++	current->blocked = newset;
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
++
++	regs.eax = -EINTR;
++	while (1) {
++		current->state = TASK_INTERRUPTIBLE;
++		schedule();
++		if (do_signal(&regs, &saveset))
++			return -EINTR;
++	}
++}
++
++asmlinkage int 
++sys_sigaction(int sig, const struct old_sigaction __user *act,
++	      struct old_sigaction __user *oact)
++{
++	struct k_sigaction new_ka, old_ka;
++	int ret;
++
++	if (act) {
++		old_sigset_t mask;
++		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
++		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
++		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
++			return -EFAULT;
++		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
++		__get_user(mask, &act->sa_mask);
++		siginitset(&new_ka.sa.sa_mask, mask);
++	}
++
++	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
++
++	if (!ret && oact) {
++		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
++		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
++		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
++			return -EFAULT;
++		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
++		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
++	}
++
++	return ret;
++}
++
++asmlinkage int
++sys_sigaltstack(unsigned long ebx)
++{
++	/* This is needed to make gcc realize it doesn't own the "struct pt_regs" */
++	struct pt_regs *regs = (struct pt_regs *)&ebx;
++	const stack_t __user *uss = (const stack_t __user *)ebx;
++	stack_t __user *uoss = (stack_t __user *)regs->ecx;
++
++	return do_sigaltstack(uss, uoss, regs->esp);
++}
++
++
++/*
++ * Do a signal return; undo the signal stack.
++ */
++
++static int
++restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax)
++{
++	unsigned int err = 0;
++
++	/* Always make any pending restarted system calls return -EINTR */
++	current_thread_info()->restart_block.fn = do_no_restart_syscall;
++
++#define COPY(x)		err |= __get_user(regs->x, &sc->x)
++
++#define COPY_SEG(seg)							\
++	{ unsigned short tmp;						\
++	  err |= __get_user(tmp, &sc->seg);				\
++	  regs->x##seg = tmp; }
++
++#define COPY_SEG_STRICT(seg)						\
++	{ unsigned short tmp;						\
++	  err |= __get_user(tmp, &sc->seg);				\
++	  regs->x##seg = tmp|3; }
++
++#define GET_SEG(seg)							\
++	{ unsigned short tmp;						\
++	  err |= __get_user(tmp, &sc->seg);				\
++	  loadsegment(seg,tmp); }
++
++#define	FIX_EFLAGS	(X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \
++			 X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
++			 X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
++
++	GET_SEG(gs);
++	GET_SEG(fs);
++	COPY_SEG(es);
++	COPY_SEG(ds);
++	COPY(edi);
++	COPY(esi);
++	COPY(ebp);
++	COPY(esp);
++	COPY(ebx);
++	COPY(edx);
++	COPY(ecx);
++	COPY(eip);
++	COPY_SEG_STRICT(cs);
++	COPY_SEG_STRICT(ss);
++	
++	{
++		unsigned int tmpflags;
++		err |= __get_user(tmpflags, &sc->eflags);
++		regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
++		regs->orig_eax = -1;		/* disable syscall checks */
++	}
++
++	{
++		struct _fpstate __user * buf;
++		err |= __get_user(buf, &sc->fpstate);
++		if (buf) {
++			if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
++				goto badframe;
++			err |= restore_i387(buf);
++		} else {
++			struct task_struct *me = current;
++			if (used_math()) {
++				clear_fpu(me);
++				clear_used_math();
++			}
++		}
++	}
++
++	err |= __get_user(*peax, &sc->eax);
++	return err;
++
++badframe:
++	return 1;
++}
++
++asmlinkage int sys_sigreturn(unsigned long __unused)
++{
++	struct pt_regs *regs = (struct pt_regs *) &__unused;
++	struct sigframe __user *frame = (struct sigframe __user *)(regs->esp - 8);
++	sigset_t set;
++	int eax;
++
++	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
++		goto badframe;
++	if (__get_user(set.sig[0], &frame->sc.oldmask)
++	    || (_NSIG_WORDS > 1
++		&& __copy_from_user(&set.sig[1], &frame->extramask,
++				    sizeof(frame->extramask))))
++		goto badframe;
++
++	sigdelsetmask(&set, ~_BLOCKABLE);
++	spin_lock_irq(&current->sighand->siglock);
++	current->blocked = set;
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
++	
++	if (restore_sigcontext(regs, &frame->sc, &eax))
++		goto badframe;
++	return eax;
++
++badframe:
++	force_sig(SIGSEGV, current);
++	return 0;
++}	
++
++asmlinkage int sys_rt_sigreturn(unsigned long __unused)
++{
++	struct pt_regs *regs = (struct pt_regs *) &__unused;
++	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->esp - 4);
++	sigset_t set;
++	int eax;
++
++	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
++		goto badframe;
++	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
++		goto badframe;
++
++	sigdelsetmask(&set, ~_BLOCKABLE);
++	spin_lock_irq(&current->sighand->siglock);
++	current->blocked = set;
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
++	
++	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
++		goto badframe;
++
++	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->esp) == -EFAULT)
++		goto badframe;
++
++	return eax;
++
++badframe:
++	force_sig(SIGSEGV, current);
++	return 0;
++}	
++
++/*
++ * Set up a signal frame.
++ */
++
++static int
++setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
++		 struct pt_regs *regs, unsigned long mask)
++{
++	int tmp, err = 0;
++
++	tmp = 0;
++	__asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
++	err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
++	__asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
++	err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
++
++	err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
++	err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
++	err |= __put_user(regs->edi, &sc->edi);
++	err |= __put_user(regs->esi, &sc->esi);
++	err |= __put_user(regs->ebp, &sc->ebp);
++	err |= __put_user(regs->esp, &sc->esp);
++	err |= __put_user(regs->ebx, &sc->ebx);
++	err |= __put_user(regs->edx, &sc->edx);
++	err |= __put_user(regs->ecx, &sc->ecx);
++	err |= __put_user(regs->eax, &sc->eax);
++	err |= __put_user(current->thread.trap_no, &sc->trapno);
++	err |= __put_user(current->thread.error_code, &sc->err);
++	err |= __put_user(regs->eip, &sc->eip);
++	err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs);
++	err |= __put_user(regs->eflags, &sc->eflags);
++	err |= __put_user(regs->esp, &sc->esp_at_signal);
++	err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss);
++
++	tmp = save_i387(fpstate);
++	if (tmp < 0)
++	  err = 1;
++	else
++	  err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
++
++	/* non-iBCS2 extensions.. */
++	err |= __put_user(mask, &sc->oldmask);
++	err |= __put_user(current->thread.cr2, &sc->cr2);
++
++	return err;
++}
++
++/*
++ * Determine which stack to use..
++ */
++static inline void __user *
++get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
++{
++	unsigned long esp;
++
++	/* Default to using normal stack */
++	esp = regs->esp;
++
++	/* This is the X/Open sanctioned signal stack switching.  */
++	if (ka->sa.sa_flags & SA_ONSTACK) {
++		if (sas_ss_flags(esp) == 0)
++			esp = current->sas_ss_sp + current->sas_ss_size;
++	}
++
++	/* This is the legacy signal stack switching. */
++	else if ((regs->xss & 0xffff) != __USER_DS &&
++		 !(ka->sa.sa_flags & SA_RESTORER) &&
++		 ka->sa.sa_restorer) {
++		esp = (unsigned long) ka->sa.sa_restorer;
++	}
++
++	return (void __user *)((esp - frame_size) & -8ul);
++}
++
++/* These symbols are defined with the addresses in the vsyscall page.
++   See vsyscall-sigreturn.S.  */
++extern void __user __kernel_sigreturn;
++extern void __user __kernel_rt_sigreturn;
++
++static void setup_frame(int sig, struct k_sigaction *ka,
++			sigset_t *set, struct pt_regs * regs)
++{
++	void __user *restorer;
++	struct sigframe __user *frame;
++	int err = 0;
++	int usig;
++
++	frame = get_sigframe(ka, regs, sizeof(*frame));
++
++	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
++		goto give_sigsegv;
++
++	usig = current_thread_info()->exec_domain
++		&& current_thread_info()->exec_domain->signal_invmap
++		&& sig < 32
++		? current_thread_info()->exec_domain->signal_invmap[sig]
++		: sig;
++
++	err = __put_user(usig, &frame->sig);
++	if (err)
++		goto give_sigsegv;
++
++	err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
++	if (err)
++		goto give_sigsegv;
++
++	if (_NSIG_WORDS > 1) {
++		err = __copy_to_user(&frame->extramask, &set->sig[1],
++				      sizeof(frame->extramask));
++		if (err)
++			goto give_sigsegv;
++	}
++
++	restorer = &__kernel_sigreturn;
++	if (ka->sa.sa_flags & SA_RESTORER)
++		restorer = ka->sa.sa_restorer;
++
++	/* Set up to return from userspace.  */
++	err |= __put_user(restorer, &frame->pretcode);
++	 
++	/*
++	 * This is popl %eax ; movl $,%eax ; int $0x80
++	 *
++	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
++	 * reasons and because gdb uses it as a signature to notice
++	 * signal handler stack frames.
++	 */
++	err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
++	err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
++	err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
++
++	if (err)
++		goto give_sigsegv;
++
++	/* Set up registers for signal handler */
++	regs->esp = (unsigned long) frame;
++	regs->eip = (unsigned long) ka->sa.sa_handler;
++	regs->eax = (unsigned long) sig;
++	regs->edx = (unsigned long) 0;
++	regs->ecx = (unsigned long) 0;
++
++	set_fs(USER_DS);
++	regs->xds = __USER_DS;
++	regs->xes = __USER_DS;
++	regs->xss = __USER_DS;
++	regs->xcs = __USER_CS;
++
++	/*
++	 * Clear TF when entering the signal handler, but
++	 * notify any tracer that was single-stepping it.
++	 * The tracer may want to single-step inside the
++	 * handler too.
++	 */
++	regs->eflags &= ~TF_MASK;
++	if (test_thread_flag(TIF_SINGLESTEP))
++		ptrace_notify(SIGTRAP);
++
++#if DEBUG_SIG
++	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
++		current->comm, current->pid, frame, regs->eip, frame->pretcode);
++#endif
++
++	return;
++
++give_sigsegv:
++	force_sigsegv(sig, current);
++}
++
++static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
++			   sigset_t *set, struct pt_regs * regs)
++{
++	void __user *restorer;
++	struct rt_sigframe __user *frame;
++	int err = 0;
++	int usig;
++
++	frame = get_sigframe(ka, regs, sizeof(*frame));
++
++	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
++		goto give_sigsegv;
++
++	usig = current_thread_info()->exec_domain
++		&& current_thread_info()->exec_domain->signal_invmap
++		&& sig < 32
++		? current_thread_info()->exec_domain->signal_invmap[sig]
++		: sig;
++
++	err |= __put_user(usig, &frame->sig);
++	err |= __put_user(&frame->info, &frame->pinfo);
++	err |= __put_user(&frame->uc, &frame->puc);
++	err |= copy_siginfo_to_user(&frame->info, info);
++	if (err)
++		goto give_sigsegv;
++
++	/* Create the ucontext.  */
++	err |= __put_user(0, &frame->uc.uc_flags);
++	err |= __put_user(0, &frame->uc.uc_link);
++	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
++	err |= __put_user(sas_ss_flags(regs->esp),
++			  &frame->uc.uc_stack.ss_flags);
++	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
++	err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
++			        regs, set->sig[0]);
++	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
++	if (err)
++		goto give_sigsegv;
++
++	/* Set up to return from userspace.  */
++	restorer = &__kernel_rt_sigreturn;
++	if (ka->sa.sa_flags & SA_RESTORER)
++		restorer = ka->sa.sa_restorer;
++	err |= __put_user(restorer, &frame->pretcode);
++	 
++	/*
++	 * This is movl $,%eax ; int $0x80
++	 *
++	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
++	 * reasons and because gdb uses it as a signature to notice
++	 * signal handler stack frames.
++	 */
++	err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
++	err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
++	err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
++
++	if (err)
++		goto give_sigsegv;
++
++	/* Set up registers for signal handler */
++	regs->esp = (unsigned long) frame;
++	regs->eip = (unsigned long) ka->sa.sa_handler;
++	regs->eax = (unsigned long) usig;
++	regs->edx = (unsigned long) &frame->info;
++	regs->ecx = (unsigned long) &frame->uc;
++
++	set_fs(USER_DS);
++	regs->xds = __USER_DS;
++	regs->xes = __USER_DS;
++	regs->xss = __USER_DS;
++	regs->xcs = __USER_CS;
++
++	/*
++	 * Clear TF when entering the signal handler, but
++	 * notify any tracer that was single-stepping it.
++	 * The tracer may want to single-step inside the
++	 * handler too.
++	 */
++	regs->eflags &= ~TF_MASK;
++	if (test_thread_flag(TIF_SINGLESTEP))
++		ptrace_notify(SIGTRAP);
++
++#if DEBUG_SIG
++	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
++		current->comm, current->pid, frame, regs->eip, frame->pretcode);
++#endif
++
++	return;
++
++give_sigsegv:
++	force_sigsegv(sig, current);
++}
++
++/*
++ * OK, we're invoking a handler
++ */	
++
++static void
++handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
++	      sigset_t *oldset,	struct pt_regs * regs)
++{
++	/* Are we from a system call? */
++	if (regs->orig_eax >= 0) {
++		/* If so, check system call restarting.. */
++		switch (regs->eax) {
++		        case -ERESTART_RESTARTBLOCK:
++			case -ERESTARTNOHAND:
++				regs->eax = -EINTR;
++				break;
++
++			case -ERESTARTSYS:
++				if (!(ka->sa.sa_flags & SA_RESTART)) {
++					regs->eax = -EINTR;
++					break;
++				}
++			/* fallthrough */
++			case -ERESTARTNOINTR:
++				regs->eax = regs->orig_eax;
++				regs->eip -= 2;
++		}
++	}
++
++	/*
++	 * If TF is set due to a debugger (PT_DTRACE), clear the TF flag so
++	 * that register information in the sigcontext is correct.
++	 */
++	if (unlikely(regs->eflags & TF_MASK)
++	    && likely(current->ptrace & PT_DTRACE)) {
++		current->ptrace &= ~PT_DTRACE;
++		regs->eflags &= ~TF_MASK;
++	}
++
++	/* Set up the stack frame */
++	if (ka->sa.sa_flags & SA_SIGINFO)
++		setup_rt_frame(sig, ka, info, oldset, regs);
++	else
++		setup_frame(sig, ka, oldset, regs);
++
++	if (!(ka->sa.sa_flags & SA_NODEFER)) {
++		spin_lock_irq(&current->sighand->siglock);
++		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
++		sigaddset(&current->blocked,sig);
++		recalc_sigpending();
++		spin_unlock_irq(&current->sighand->siglock);
++	}
++}
++
++/*
++ * Note that 'init' is a special process: it doesn't get signals it doesn't
++ * want to handle. Thus you cannot kill init even with a SIGKILL even by
++ * mistake.
++ */
++int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
++{
++	siginfo_t info;
++	int signr;
++	struct k_sigaction ka;
++
++	/*
++	 * We want the common case to go fast, which
++	 * is why we may in certain cases get here from
++	 * kernel mode. Just return without doing anything
++	 * if so.
++	 */
++	if ((regs->xcs & 2) != 2)
++		return 1;
++
++	if (current->flags & PF_FREEZE) {
++		refrigerator(0);
++		goto no_signal;
++	}
++
++	if (!oldset)
++		oldset = &current->blocked;
++
++	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
++	if (signr > 0) {
++		/* Reenable any watchpoints before delivering the
++		 * signal to user space. The processor register will
++		 * have been cleared if the watchpoint triggered
++		 * inside the kernel.
++		 */
++		if (unlikely(current->thread.debugreg[7])) {
++			loaddebug(&current->thread, 7);
++		}
++
++		/* Whee!  Actually deliver the signal.  */
++		handle_signal(signr, &info, &ka, oldset, regs);
++		return 1;
++	}
++
++ no_signal:
++	/* Did we come from a system call? */
++	if (regs->orig_eax >= 0) {
++		/* Restart the system call - no handlers present */
++		if (regs->eax == -ERESTARTNOHAND ||
++		    regs->eax == -ERESTARTSYS ||
++		    regs->eax == -ERESTARTNOINTR) {
++			regs->eax = regs->orig_eax;
++			regs->eip -= 2;
++		}
++		if (regs->eax == -ERESTART_RESTARTBLOCK){
++			regs->eax = __NR_restart_syscall;
++			regs->eip -= 2;
++		}
++	}
++	return 0;
++}
++
++/*
++ * notification of userspace execution resumption
++ * - triggered by current->work.notify_resume
++ */
++__attribute__((regparm(3)))
++void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
++		      __u32 thread_info_flags)
++{
++	/* Pending single-step? */
++	if (thread_info_flags & _TIF_SINGLESTEP) {
++		regs->eflags |= TF_MASK;
++		clear_thread_flag(TIF_SINGLESTEP);
++	}
++	/* deal with pending signal delivery */
++	if (thread_info_flags & _TIF_SIGPENDING)
++		do_signal(regs,oldset);
++	
++	clear_thread_flag(TIF_IRET);
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/smp.c linux-2.6.12-xen/arch/xen/i386/kernel/smp.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/smp.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/smp.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,606 @@
++/*
++ *	Intel SMP support routines.
++ *
++ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
++ *	(c) 1998-99, 2000 Ingo Molnar <mingo at redhat.com>
++ *
++ *	This code is released under the GNU General Public License version 2 or
++ *	later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/cache.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++
++#include <asm/mtrr.h>
++#include <asm/tlbflush.h>
++#if 0
++#include <mach_apic.h>
++#endif
++#include <asm-xen/evtchn.h>
++
++/*
++ *	Some notes on x86 processor bugs affecting SMP operation:
++ *
++ *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
++ *	The Linux implications for SMP are handled as follows:
++ *
++ *	Pentium III / [Xeon]
++ *		None of the E1AP-E3AP errata are visible to the user.
++ *
++ *	E1AP.	see PII A1AP
++ *	E2AP.	see PII A2AP
++ *	E3AP.	see PII A3AP
++ *
++ *	Pentium II / [Xeon]
++ *		None of the A1AP-A3AP errata are visible to the user.
++ *
++ *	A1AP.	see PPro 1AP
++ *	A2AP.	see PPro 2AP
++ *	A3AP.	see PPro 7AP
++ *
++ *	Pentium Pro
++ *		None of 1AP-9AP errata are visible to the normal user,
++ *	except occasional delivery of 'spurious interrupt' as trap #15.
++ *	This is very rare and a non-problem.
++ *
++ *	1AP.	Linux maps APIC as non-cacheable
++ *	2AP.	worked around in hardware
++ *	3AP.	fixed in C0 and above steppings microcode update.
++ *		Linux does not use excessive STARTUP_IPIs.
++ *	4AP.	worked around in hardware
++ *	5AP.	symmetric IO mode (normal Linux operation) not affected.
++ *		'noapic' mode has vector 0xf filled out properly.
++ *	6AP.	'noapic' mode might be affected - fixed in later steppings
++ *	7AP.	We do not assume writes to the LVT deassering IRQs
++ *	8AP.	We do not enable low power mode (deep sleep) during MP bootup
++ *	9AP.	We do not use mixed mode
++ *
++ *	Pentium
++ *		There is a marginal case where REP MOVS on 100MHz SMP
++ *	machines with B stepping processors can fail. XXX should provide
++ *	an L1cache=Writethrough or L1cache=off option.
++ *
++ *		B stepping CPUs may hang. There are hardware work arounds
++ *	for this. We warn about it in case your board doesn't have the work
++ *	arounds. Basically thats so I can tell anyone with a B stepping
++ *	CPU and SMP problems "tough".
++ *
++ *	Specific items [From Pentium Processor Specification Update]
++ *
++ *	1AP.	Linux doesn't use remote read
++ *	2AP.	Linux doesn't trust APIC errors
++ *	3AP.	We work around this
++ *	4AP.	Linux never generated 3 interrupts of the same priority
++ *		to cause a lost local interrupt.
++ *	5AP.	Remote read is never used
++ *	6AP.	not affected - worked around in hardware
++ *	7AP.	not affected - worked around in hardware
++ *	8AP.	worked around in hardware - we get explicit CS errors if not
++ *	9AP.	only 'noapic' mode affected. Might generate spurious
++ *		interrupts, we log only the first one and count the
++ *		rest silently.
++ *	10AP.	not affected - worked around in hardware
++ *	11AP.	Linux reads the APIC between writes to avoid this, as per
++ *		the documentation. Make sure you preserve this as it affects
++ *		the C stepping chips too.
++ *	12AP.	not affected - worked around in hardware
++ *	13AP.	not affected - worked around in hardware
++ *	14AP.	we always deassert INIT during bootup
++ *	15AP.	not affected - worked around in hardware
++ *	16AP.	not affected - worked around in hardware
++ *	17AP.	not affected - worked around in hardware
++ *	18AP.	not affected - worked around in hardware
++ *	19AP.	not affected - worked around in BIOS
++ *
++ *	If this sounds worrying believe me these bugs are either ___RARE___,
++ *	or are signal timing bugs worked around in hardware and there's
++ *	about nothing of note with C stepping upwards.
++ */
++
++DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
++
++/*
++ * the following functions deal with sending IPIs between CPUs.
++ *
++ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
++ */
++
++static inline int __prepare_ICR (unsigned int shortcut, int vector)
++{
++	return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
++}
++
++static inline int __prepare_ICR2 (unsigned int mask)
++{
++	return SET_APIC_DEST_FIELD(mask);
++}
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++	int irq = per_cpu(ipi_to_irq, cpu)[vector];
++	BUG_ON(irq < 0);
++	notify_remote_via_irq(irq);
++}
++
++void __send_IPI_shortcut(unsigned int shortcut, int vector)
++{
++	int cpu;
++
++	switch (shortcut) {
++	case APIC_DEST_SELF:
++		__send_IPI_one(smp_processor_id(), vector);
++		break;
++	case APIC_DEST_ALLBUT:
++		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++			if (cpu == smp_processor_id())
++				continue;
++			if (cpu_isset(cpu, cpu_online_map)) {
++				__send_IPI_one(cpu, vector);
++			}
++		}
++		break;
++	default:
++		printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++		       vector);
++		break;
++	}
++}
++
++void fastcall send_IPI_self(int vector)
++{
++	__send_IPI_shortcut(APIC_DEST_SELF, vector);
++}
++
++/*
++ * This is only used on smaller machines.
++ */
++void send_IPI_mask_bitmask(cpumask_t mask, int vector)
++{
++	unsigned long flags;
++	unsigned int cpu;
++
++	local_irq_save(flags);
++	WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
++
++	for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++		if (cpu_isset(cpu, mask)) {
++			__send_IPI_one(cpu, vector);
++		}
++	}
++
++	local_irq_restore(flags);
++}
++
++void send_IPI_mask_sequence(cpumask_t mask, int vector)
++{
++
++	send_IPI_mask_bitmask(mask, vector);
++}
++
++#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
++
++#if 0 /* XEN */
++/*
++ *	Smarter SMP flushing macros. 
++ *		c/o Linus Torvalds.
++ *
++ *	These mean you can really definitely utterly forget about
++ *	writing to user space from interrupts. (Its not allowed anyway).
++ *
++ *	Optimizations Manfred Spraul <manfred at colorfullife.com>
++ */
++
++static cpumask_t flush_cpumask;
++static struct mm_struct * flush_mm;
++static unsigned long flush_va;
++static DEFINE_SPINLOCK(tlbstate_lock);
++#define FLUSH_ALL	0xffffffff
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context, 
++ * instead update mm->cpu_vm_mask.
++ *
++ * We need to reload %cr3 since the page tables may be going
++ * away from under us..
++ */
++static inline void leave_mm (unsigned long cpu)
++{
++	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++		BUG();
++	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
++	load_cr3(swapper_pg_dir);
++}
++
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * 	Stop ipi delivery for the old mm. This is not synchronized with
++ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * 	for the wrong mm, and in the worst case we perform a superflous
++ * 	tlb flush.
++ * 1a2) set cpu_tlbstate to TLBSTATE_OK
++ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ *	was in lazy tlb mode.
++ * 1a3) update cpu_tlbstate[].active_mm
++ * 	Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * 	Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ *	cpu_tlbstate[].active_mm is correct, cpu0 already handles
++ *	flush ipis.
++ * 1b1) set cpu_tlbstate to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * 	Atomically set the bit [other cpus will start sending flush ipis],
++ * 	and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ *   runs in kernel space, the cpu could load tlb entries for user space
++ *   pages.
++ *
++ * The good news is that cpu_tlbstate is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ */
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++				     struct pt_regs *regs)
++{
++	unsigned long cpu;
++
++	cpu = get_cpu();
++
++	if (!cpu_isset(cpu, flush_cpumask))
++		goto out;
++		/* 
++		 * This was a BUG() but until someone can quote me the
++		 * line from the intel manual that guarantees an IPI to
++		 * multiple CPUs is retried _only_ on the erroring CPUs
++		 * its staying as a return
++		 *
++		 * BUG();
++		 */
++		 
++	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
++		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
++			if (flush_va == FLUSH_ALL)
++				local_flush_tlb();
++			else
++				__flush_tlb_one(flush_va);
++		} else
++			leave_mm(cpu);
++	}
++	smp_mb__before_clear_bit();
++	cpu_clear(cpu, flush_cpumask);
++	smp_mb__after_clear_bit();
++out:
++	put_cpu_no_resched();
++
++	return IRQ_HANDLED;
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++						unsigned long va)
++{
++	/*
++	 * A couple of (to be removed) sanity checks:
++	 *
++	 * - current CPU must not be in mask
++	 * - mask must exist :)
++	 */
++	BUG_ON(cpus_empty(cpumask));
++	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
++	BUG_ON(!mm);
++
++	/* If a CPU which we ran on has gone down, OK. */
++	cpus_and(cpumask, cpumask, cpu_online_map);
++	if (cpus_empty(cpumask))
++		return;
++
++	/*
++	 * i'm not happy about this global shared spinlock in the
++	 * MM hot path, but we'll see how contended it is.
++	 * Temporarily this turns IRQs off, so that lockups are
++	 * detected by the NMI watchdog.
++	 */
++	spin_lock(&tlbstate_lock);
++	
++	flush_mm = mm;
++	flush_va = va;
++#if NR_CPUS <= BITS_PER_LONG
++	atomic_set_mask(cpumask, &flush_cpumask);
++#else
++	{
++		int k;
++		unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
++		unsigned long *cpu_mask = (unsigned long *)&cpumask;
++		for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
++			atomic_set_mask(cpu_mask[k], &flush_mask[k]);
++	}
++#endif
++	/*
++	 * We have to send the IPI only to
++	 * CPUs affected.
++	 */
++	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
++
++	while (!cpus_empty(flush_cpumask))
++		/* nothing. lockup detection does not belong here */
++		mb();
++
++	flush_mm = NULL;
++	flush_va = 0;
++	spin_unlock(&tlbstate_lock);
++}
++	
++void flush_tlb_current_task(void)
++{
++	struct mm_struct *mm = current->mm;
++	cpumask_t cpu_mask;
++
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
++
++	local_flush_tlb();
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++	preempt_enable();
++}
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++	cpumask_t cpu_mask;
++
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
++
++	if (current->active_mm == mm) {
++		if (current->mm)
++			local_flush_tlb();
++		else
++			leave_mm(smp_processor_id());
++	}
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++	preempt_enable();
++}
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++	struct mm_struct *mm = vma->vm_mm;
++	cpumask_t cpu_mask;
++
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
++
++	if (current->active_mm == mm) {
++		if(current->mm)
++			__flush_tlb_one(va);
++		else
++		 	leave_mm(smp_processor_id());
++	}
++
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, va);
++
++	preempt_enable();
++}
++
++static void do_flush_tlb_all(void* info)
++{
++	unsigned long cpu = smp_processor_id();
++
++	__flush_tlb_all();
++	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
++		leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++
++#else
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++				     struct pt_regs *regs)
++{ return 0; }
++void flush_tlb_current_task(void)
++{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
++void flush_tlb_mm(struct mm_struct * mm)
++{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
++void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
++{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
++void flush_tlb_all(void)
++{ xen_tlb_flush_all(); }
++
++#endif /* XEN */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++void smp_send_reschedule(int cpu)
++{
++	WARN_ON(cpu_is_offline(cpu));
++	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++	void (*func) (void *info);
++	void *info;
++	atomic_t started;
++	atomic_t finished;
++	int wait;
++};
++
++static struct call_data_struct * call_data;
++
++/*
++ * this function sends a 'generic call function' IPI to all other CPUs
++ * in the system.
++ */
++
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++			int wait)
++/*
++ * [SUMMARY] Run a function on all other CPUs.
++ * <func> The function to run. This must be fast and non-blocking.
++ * <info> An arbitrary pointer to pass to the function.
++ * <nonatomic> currently unused.
++ * <wait> If true, wait (atomically) until function has completed on other CPUs.
++ * [RETURNS] 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ */
++{
++	struct call_data_struct data;
++	int cpus;
++
++	/* Holding any lock stops cpus from going down. */
++	spin_lock(&call_lock);
++	cpus = num_online_cpus()-1;
++
++	if (!cpus) {
++		spin_unlock(&call_lock);
++		return 0;
++	}
++
++	/* Can deadlock when called with interrupts disabled */
++	WARN_ON(irqs_disabled());
++
++	data.func = func;
++	data.info = info;
++	atomic_set(&data.started, 0);
++	data.wait = wait;
++	if (wait)
++		atomic_set(&data.finished, 0);
++
++	call_data = &data;
++	mb();
++	
++	/* Send a message to all other CPUs and wait for them to respond */
++	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++	/* Wait for response */
++	while (atomic_read(&data.started) != cpus)
++		barrier();
++
++	if (wait)
++		while (atomic_read(&data.finished) != cpus)
++			barrier();
++	spin_unlock(&call_lock);
++
++	return 0;
++}
++
++static void stop_this_cpu (void * dummy)
++{
++	/*
++	 * Remove this CPU:
++	 */
++	cpu_clear(smp_processor_id(), cpu_online_map);
++	local_irq_disable();
++#if 0
++	disable_local_APIC();
++#endif
++	if (cpu_data[smp_processor_id()].hlt_works_ok)
++		for(;;) __asm__("hlt");
++	for (;;);
++}
++
++/*
++ * this function calls the 'stop' function on all other CPUs in the system.
++ */
++
++void smp_send_stop(void)
++{
++	smp_call_function(stop_this_cpu, NULL, 1, 0);
++
++	local_irq_disable();
++#if 0
++	disable_local_APIC();
++#endif
++	local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
++				     struct pt_regs *regs)
++{
++
++	return IRQ_HANDLED;
++}
++
++#include <linux/kallsyms.h>
++irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
++					struct pt_regs *regs)
++{
++	void (*func) (void *info) = call_data->func;
++	void *info = call_data->info;
++	int wait = call_data->wait;
++
++	/*
++	 * Notify initiating CPU that I've grabbed the data and am
++	 * about to execute the function
++	 */
++	mb();
++	atomic_inc(&call_data->started);
++	/*
++	 * At this point the info structure may be out of scope unless wait==1
++	 */
++	irq_enter();
++	(*func)(info);
++	irq_exit();
++
++	if (wait) {
++		mb();
++		atomic_inc(&call_data->finished);
++	}
++
++	return IRQ_HANDLED;
++}
++
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/swiotlb.c linux-2.6.12-xen/arch/xen/i386/kernel/swiotlb.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/swiotlb.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/swiotlb.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,671 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * This implementation is a fallback for platforms that do not support
++ * I/O TLBs (aka DMA address translation hardware).
++ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
++ * Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
++ * Copyright (C) 2000, 2003 Hewlett-Packard Co
++ *	David Mosberger-Tang <davidm at hpl.hp.com>
++ * Copyright (C) 2005 Keir Fraser <keir at xensource.com>
++ */
++
++#include <linux/cache.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <asm/io.h>
++#include <asm/pci.h>
++#include <asm/dma.h>
++#include <asm/uaccess.h>
++#include <asm-xen/xen-public/memory.h>
++
++#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
++
++#define SG_ENT_PHYS_ADDRESS(sg)	(page_to_phys((sg)->page) + (sg)->offset)
++
++/*
++ * Maximum allowable number of contiguous slabs to map,
++ * must be a power of 2.  What is the appropriate value ?
++ * The complexity of {map,unmap}_single is linearly dependent on this value.
++ */
++#define IO_TLB_SEGSIZE	128
++
++/*
++ * log of the size of each IO TLB slab.  The number of slabs is command line
++ * controllable.
++ */
++#define IO_TLB_SHIFT 11
++
++static int swiotlb_force;
++static char *iotlb_virt_start;
++static unsigned long iotlb_nslabs;
++
++/*
++ * Used to do a quick range check in swiotlb_unmap_single and
++ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
++ * API.
++ */
++static dma_addr_t iotlb_bus_start, iotlb_bus_end, iotlb_bus_mask;
++
++/* Does the given dma address reside within the swiotlb aperture? */
++#define in_swiotlb_aperture(a) (!(((a) ^ iotlb_bus_start) & iotlb_bus_mask))
++
++/*
++ * When the IOMMU overflows we return a fallback buffer. This sets the size.
++ */
++static unsigned long io_tlb_overflow = 32*1024;
++
++void *io_tlb_overflow_buffer;
++
++/*
++ * This is a free list describing the number of free entries available from
++ * each index
++ */
++static unsigned int *io_tlb_list;
++static unsigned int io_tlb_index;
++
++/*
++ * We need to save away the original address corresponding to a mapped entry
++ * for the sync operations.
++ */
++static struct phys_addr {
++	struct page *page;
++	unsigned int offset;
++} *io_tlb_orig_addr;
++
++/*
++ * Protect the above data structures in the map and unmap calls
++ */
++static DEFINE_SPINLOCK(io_tlb_lock);
++
++static int __init
++setup_io_tlb_npages(char *str)
++{
++	/* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
++	if (isdigit(*str)) {
++		iotlb_nslabs = simple_strtoul(str, &str, 0) <<
++			(20 - IO_TLB_SHIFT);
++		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++		/* Round up to power of two (xen_create_contiguous_region). */
++		while (iotlb_nslabs & (iotlb_nslabs-1))
++			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
++	}
++	if (*str == ',')
++		++str;
++	/*
++         * NB. 'force' enables the swiotlb, but doesn't force its use for
++         * every DMA like it does on native Linux. 'off' forcibly disables
++         * use of the swiotlb.
++         */
++	if (!strcmp(str, "force"))
++		swiotlb_force = 1;
++	else if (!strcmp(str, "off"))
++		swiotlb_force = -1;
++	return 1;
++}
++__setup("swiotlb=", setup_io_tlb_npages);
++/* make io_tlb_overflow tunable too? */
++
++/*
++ * Statically reserve bounce buffer space and initialize bounce buffer data
++ * structures for the software IO TLB used to implement the PCI DMA API.
++ */
++void
++swiotlb_init_with_default_size (size_t default_size)
++{
++	unsigned long i, bytes;
++	int rc;
++
++	if (!iotlb_nslabs) {
++		iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
++		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++		/* Round up to power of two (xen_create_contiguous_region). */
++		while (iotlb_nslabs & (iotlb_nslabs-1))
++			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
++	}
++
++	bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
++
++	/*
++	 * Get IO TLB memory from the low pages
++	 */
++	iotlb_virt_start = alloc_bootmem_low_pages(bytes);
++	if (!iotlb_virt_start)
++		panic("Cannot allocate SWIOTLB buffer!\n"
++		      "Use dom0_mem Xen boot parameter to reserve\n"
++		      "some DMA memory (e.g., dom0_mem=-128M).\n");
++
++	/* Hardcode 31 address bits for now: aacraid limitation. */
++	rc = xen_create_contiguous_region(
++		(unsigned long)iotlb_virt_start, get_order(bytes), 31);
++	BUG_ON(rc);
++
++	/*
++	 * Allocate and initialize the free list array.  This array is used
++	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
++	 */
++	io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
++	for (i = 0; i < iotlb_nslabs; i++)
++ 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++	io_tlb_index = 0;
++	io_tlb_orig_addr = alloc_bootmem(
++		iotlb_nslabs * sizeof(*io_tlb_orig_addr));
++
++	/*
++	 * Get the overflow emergency buffer
++	 */
++	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
++
++	iotlb_bus_start = virt_to_bus(iotlb_virt_start);
++	iotlb_bus_end   = iotlb_bus_start + bytes;
++	iotlb_bus_mask  = ~(dma_addr_t)(bytes - 1);
++
++	printk(KERN_INFO "Software IO TLB enabled: \n"
++	       " Aperture:     %lu megabytes\n"
++	       " Bus range:    0x%016lx - 0x%016lx\n"
++	       " Kernel range: 0x%016lx - 0x%016lx\n",
++	       bytes >> 20,
++	       (unsigned long)iotlb_bus_start,
++	       (unsigned long)iotlb_bus_end,
++	       (unsigned long)iotlb_virt_start,
++	       (unsigned long)iotlb_virt_start + bytes);
++}
++
++void
++swiotlb_init(void)
++{
++	long ram_end;
++	size_t defsz = 64 * (1 << 20); /* 64MB default size */
++
++	if (swiotlb_force == 1) {
++		swiotlb = 1;
++	} else if ((swiotlb_force != -1) &&
++		   (xen_start_info->flags & SIF_INITDOMAIN)) {
++		/* Domain 0 always has a swiotlb. */
++		ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++		if (ram_end <= 0x7ffff)
++			defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
++		swiotlb = 1;
++	}
++
++	if (swiotlb)
++		swiotlb_init_with_default_size(defsz);
++	else
++		printk(KERN_INFO "Software IO TLB disabled\n");
++}
++
++/*
++ * We use __copy_to_user to transfer to the host buffer because the buffer
++ * may be mapped read-only (e.g, in blkback driver) but lower-level
++ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
++ * unnecessary copy from the aperture to the host buffer, and a page fault.
++ */
++static void
++__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
++{
++	if (PageHighMem(buffer.page)) {
++		size_t len, bytes;
++		char *dev, *host, *kmp;
++		len = size;
++		while (len != 0) {
++			if (((bytes = len) + buffer.offset) > PAGE_SIZE)
++				bytes = PAGE_SIZE - buffer.offset;
++			kmp  = kmap_atomic(buffer.page, KM_SWIOTLB);
++			dev  = dma_addr + size - len;
++			host = kmp + buffer.offset;
++			if (dir == DMA_FROM_DEVICE) {
++				if (__copy_to_user(host, dev, bytes))
++					/* inaccessible */;
++			} else
++				memcpy(dev, host, bytes);
++			kunmap_atomic(kmp, KM_SWIOTLB);
++			len -= bytes;
++			buffer.page++;
++			buffer.offset = 0;
++		}
++	} else {
++		char *host = (char *)phys_to_virt(
++			page_to_pseudophys(buffer.page)) + buffer.offset;
++		if (dir == DMA_FROM_DEVICE) {
++			if (__copy_to_user(host, dma_addr, size))
++				/* inaccessible */;
++		} else if (dir == DMA_TO_DEVICE)
++			memcpy(dma_addr, host, size);
++	}
++}
++
++/*
++ * Allocates bounce buffer and returns its kernel virtual address.
++ */
++static void *
++map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
++{
++	unsigned long flags;
++	char *dma_addr;
++	unsigned int nslots, stride, index, wrap;
++	int i;
++
++	/*
++	 * For mappings greater than a page, we limit the stride (and
++	 * hence alignment) to a page size.
++	 */
++	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++	if (size > PAGE_SIZE)
++		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
++	else
++		stride = 1;
++
++	BUG_ON(!nslots);
++
++	/*
++	 * Find suitable number of IO TLB entries size that will fit this
++	 * request and allocate a buffer from that IO TLB pool.
++	 */
++	spin_lock_irqsave(&io_tlb_lock, flags);
++	{
++		wrap = index = ALIGN(io_tlb_index, stride);
++
++		if (index >= iotlb_nslabs)
++			wrap = index = 0;
++
++		do {
++			/*
++			 * If we find a slot that indicates we have 'nslots'
++			 * number of contiguous buffers, we allocate the
++			 * buffers from that slot and mark the entries as '0'
++			 * indicating unavailable.
++			 */
++			if (io_tlb_list[index] >= nslots) {
++				int count = 0;
++
++				for (i = index; i < (int)(index + nslots); i++)
++					io_tlb_list[i] = 0;
++				for (i = index - 1;
++				     (OFFSET(i, IO_TLB_SEGSIZE) !=
++				      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++				     i--)
++					io_tlb_list[i] = ++count;
++				dma_addr = iotlb_virt_start +
++					(index << IO_TLB_SHIFT);
++
++				/*
++				 * Update the indices to avoid searching in
++				 * the next round.
++				 */
++				io_tlb_index = 
++					((index + nslots) < iotlb_nslabs
++					 ? (index + nslots) : 0);
++
++				goto found;
++			}
++			index += stride;
++			if (index >= iotlb_nslabs)
++				index = 0;
++		} while (index != wrap);
++
++		spin_unlock_irqrestore(&io_tlb_lock, flags);
++		return NULL;
++	}
++  found:
++	spin_unlock_irqrestore(&io_tlb_lock, flags);
++
++	/*
++	 * Save away the mapping from the original address to the DMA address.
++	 * This is needed when we sync the memory.  Then we sync the buffer if
++	 * needed.
++	 */
++	io_tlb_orig_addr[index] = buffer;
++	if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++		__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
++
++	return dma_addr;
++}
++
++/*
++ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
++ */
++static void
++unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++	unsigned long flags;
++	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++	struct phys_addr buffer = io_tlb_orig_addr[index];
++
++	/*
++	 * First, sync the memory before unmapping the entry
++	 */
++	if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++		__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
++
++	/*
++	 * Return the buffer to the free list by setting the corresponding
++	 * entries to indicate the number of contigous entries available.
++	 * While returning the entries to the free list, we merge the entries
++	 * with slots below and above the pool being returned.
++	 */
++	spin_lock_irqsave(&io_tlb_lock, flags);
++	{
++		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
++			 io_tlb_list[index + nslots] : 0);
++		/*
++		 * Step 1: return the slots to the free list, merging the
++		 * slots with superceeding slots
++		 */
++		for (i = index + nslots - 1; i >= index; i--)
++			io_tlb_list[i] = ++count;
++		/*
++		 * Step 2: merge the returned slots with the preceding slots,
++		 * if available (non zero)
++		 */
++		for (i = index - 1;
++		     (OFFSET(i, IO_TLB_SEGSIZE) !=
++		      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++		     i--)
++			io_tlb_list[i] = ++count;
++	}
++	spin_unlock_irqrestore(&io_tlb_lock, flags);
++}
++
++static void
++sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++	struct phys_addr buffer = io_tlb_orig_addr[index];
++	BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
++	__sync_single(buffer, dma_addr, size, dir);
++}
++
++static void
++swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
++{
++	/*
++	 * Ran out of IOMMU space for this operation. This is very bad.
++	 * Unfortunately the drivers cannot handle this operation properly.
++	 * unless they check for pci_dma_mapping_error (most don't)
++	 * When the mapping is small enough return a static buffer to limit
++	 * the damage, or panic when the transfer is too big.
++	 */
++	printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
++	       "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
++
++	if (size > io_tlb_overflow && do_panic) {
++		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++			panic("PCI-DMA: Memory would be corrupted\n");
++		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++			panic("PCI-DMA: Random memory would be DMAed\n");
++	}
++}
++
++/*
++ * Map a single buffer of the indicated size for DMA in streaming mode.  The
++ * PCI address to use is returned.
++ *
++ * Once the device is given the dma address, the device owns this memory until
++ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
++ */
++dma_addr_t
++swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
++{
++	dma_addr_t dev_addr = virt_to_bus(ptr);
++	void *map;
++	struct phys_addr buffer;
++
++	BUG_ON(dir == DMA_NONE);
++
++	/*
++	 * If the pointer passed in happens to be in the device's DMA window,
++	 * we can safely return the device addr and not worry about bounce
++	 * buffering it.
++	 */
++	if (!range_straddles_page_boundary(ptr, size) &&
++	    !address_needs_mapping(hwdev, dev_addr))
++		return dev_addr;
++
++	/*
++	 * Oh well, have to allocate and map a bounce buffer.
++	 */
++	buffer.page   = virt_to_page(ptr);
++	buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
++	map = map_single(hwdev, buffer, size, dir);
++	if (!map) {
++		swiotlb_full(hwdev, size, dir, 1);
++		map = io_tlb_overflow_buffer;
++	}
++
++	dev_addr = virt_to_bus(map);
++	return dev_addr;
++}
++
++/*
++ * Unmap a single streaming mode DMA translation.  The dma_addr and size must
++ * match what was provided for in a previous swiotlb_map_single call.  All
++ * other usages are undefined.
++ *
++ * After this call, reads by the cpu to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++void
++swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
++		     int dir)
++{
++	BUG_ON(dir == DMA_NONE);
++	if (in_swiotlb_aperture(dev_addr))
++		unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++/*
++ * Make physical memory consistent for a single streaming mode DMA translation
++ * after a transfer.
++ *
++ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
++ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
++ * call this function before doing so.  At the next point you give the PCI dma
++ * address back to the card, you must first perform a
++ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
++ */
++void
++swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++			    size_t size, int dir)
++{
++	BUG_ON(dir == DMA_NONE);
++	if (in_swiotlb_aperture(dev_addr))
++		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++void
++swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++			       size_t size, int dir)
++{
++	BUG_ON(dir == DMA_NONE);
++	if (in_swiotlb_aperture(dev_addr))
++		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++/*
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the above swiotlb_map_single
++ * interface.  Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length.  They are obtained via
++ * sg_dma_{address,length}(SG).
++ *
++ * NOTE: An implementation may be able to use a smaller number of
++ *       DMA address/length pairs than there are SG table elements.
++ *       (for example via virtual mapping capabilities)
++ *       The routine returns the number of addr/length pairs actually
++ *       used, at most nents.
++ *
++ * Device ownership issues as mentioned above for swiotlb_map_single are the
++ * same here.
++ */
++int
++swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++	       int dir)
++{
++	struct phys_addr buffer;
++	dma_addr_t dev_addr;
++	char *map;
++	int i;
++
++	BUG_ON(dir == DMA_NONE);
++
++	for (i = 0; i < nelems; i++, sg++) {
++		dev_addr = SG_ENT_PHYS_ADDRESS(sg);
++		if (address_needs_mapping(hwdev, dev_addr)) {
++			buffer.page   = sg->page;
++			buffer.offset = sg->offset;
++			map = map_single(hwdev, buffer, sg->length, dir);
++			if (!map) {
++				/* Don't panic here, we expect map_sg users
++				   to do proper error handling. */
++				swiotlb_full(hwdev, sg->length, dir, 0);
++				swiotlb_unmap_sg(hwdev, sg - i, i, dir);
++				sg[0].dma_length = 0;
++				return 0;
++			}
++			sg->dma_address = (dma_addr_t)virt_to_bus(map);
++		} else
++			sg->dma_address = dev_addr;
++		sg->dma_length = sg->length;
++	}
++	return nelems;
++}
++
++/*
++ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
++ * concerning calls here are the same as for swiotlb_unmap_single() above.
++ */
++void
++swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++		 int dir)
++{
++	int i;
++
++	BUG_ON(dir == DMA_NONE);
++
++	for (i = 0; i < nelems; i++, sg++)
++		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++			unmap_single(hwdev, 
++				     (void *)bus_to_virt(sg->dma_address),
++				     sg->dma_length, dir);
++}
++
++/*
++ * Make physical memory consistent for a set of streaming mode DMA translations
++ * after a transfer.
++ *
++ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
++ * and usage.
++ */
++void
++swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++			int nelems, int dir)
++{
++	int i;
++
++	BUG_ON(dir == DMA_NONE);
++
++	for (i = 0; i < nelems; i++, sg++)
++		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++			sync_single(hwdev,
++				    (void *)bus_to_virt(sg->dma_address),
++				    sg->dma_length, dir);
++}
++
++void
++swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++			   int nelems, int dir)
++{
++	int i;
++
++	BUG_ON(dir == DMA_NONE);
++
++	for (i = 0; i < nelems; i++, sg++)
++		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++			sync_single(hwdev,
++				    (void *)bus_to_virt(sg->dma_address),
++				    sg->dma_length, dir);
++}
++
++dma_addr_t
++swiotlb_map_page(struct device *hwdev, struct page *page,
++		 unsigned long offset, size_t size,
++		 enum dma_data_direction direction)
++{
++	struct phys_addr buffer;
++	dma_addr_t dev_addr;
++	char *map;
++
++	dev_addr = page_to_phys(page) + offset;
++	if (address_needs_mapping(hwdev, dev_addr)) {
++		buffer.page   = page;
++		buffer.offset = offset;
++		map = map_single(hwdev, buffer, size, direction);
++		if (!map) {
++			swiotlb_full(hwdev, size, direction, 1);
++			map = io_tlb_overflow_buffer;
++		}
++		dev_addr = (dma_addr_t)virt_to_bus(map);
++	}
++
++	return dev_addr;
++}
++
++void
++swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++		   size_t size, enum dma_data_direction direction)
++{
++	BUG_ON(direction == DMA_NONE);
++	if (in_swiotlb_aperture(dma_address))
++		unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
++}
++
++int
++swiotlb_dma_mapping_error(dma_addr_t dma_addr)
++{
++	return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
++}
++
++/*
++ * Return whether the given PCI device DMA address mask can be supported
++ * properly.  For example, if your device can only drive the low 24-bits
++ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
++ * this function.
++ */
++int
++swiotlb_dma_supported (struct device *hwdev, u64 mask)
++{
++	return (mask >= (iotlb_bus_end - 1));
++}
++
++EXPORT_SYMBOL(swiotlb_init);
++EXPORT_SYMBOL(swiotlb_map_single);
++EXPORT_SYMBOL(swiotlb_unmap_single);
++EXPORT_SYMBOL(swiotlb_map_sg);
++EXPORT_SYMBOL(swiotlb_unmap_sg);
++EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_single_for_device);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
++EXPORT_SYMBOL(swiotlb_map_page);
++EXPORT_SYMBOL(swiotlb_unmap_page);
++EXPORT_SYMBOL(swiotlb_dma_mapping_error);
++EXPORT_SYMBOL(swiotlb_dma_supported);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/time.c linux-2.6.12-xen/arch/xen/i386/kernel/time.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/time.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/time.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,961 @@
++/*
++ *  linux/arch/i386/kernel/time.c
++ *
++ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
++ *
++ * This file contains the PC-specific time handling details:
++ * reading the RTC at bootup, etc..
++ * 1994-07-02    Alan Modra
++ *	fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
++ * 1995-03-26    Markus Kuhn
++ *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
++ *      precision CMOS clock update
++ * 1996-05-03    Ingo Molnar
++ *      fixed time warps in do_[slow|fast]_gettimeoffset()
++ * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
++ *		"A Kernel Model for Precision Timekeeping" by Dave Mills
++ * 1998-09-05    (Various)
++ *	More robust do_fast_gettimeoffset() algorithm implemented
++ *	(works with APM, Cyrix 6x86MX and Centaur C6),
++ *	monotonic gettimeofday() with fast_get_timeoffset(),
++ *	drift-proof precision TSC calibration on boot
++ *	(C. Scott Ananian <cananian at alumni.princeton.edu>, Andrew D.
++ *	Balsa <andrebalsa at altern.org>, Philip Gladstone <philip at raptor.com>;
++ *	ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause at tu-harburg.de>).
++ * 1998-12-16    Andrea Arcangeli
++ *	Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
++ *	because was not accounting lost_ticks.
++ * 1998-12-24 Copyright (C) 1998  Andrea Arcangeli
++ *	Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
++ *	serialize accesses to xtime/lost_ticks).
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/param.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/time.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/bcd.h>
++#include <linux/efi.h>
++#include <linux/mca.h>
++#include <linux/sysctl.h>
++#include <linux/percpu.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/irq.h>
++#include <asm/msr.h>
++#include <asm/delay.h>
++#include <asm/mpspec.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/timer.h>
++
++#include "mach_time.h"
++
++#include <linux/timex.h>
++#include <linux/config.h>
++
++#include <asm/hpet.h>
++
++#include <asm/arch_hooks.h>
++
++#include "io_ports.h"
++
++#include <asm-xen/evtchn.h>
++
++extern spinlock_t i8259A_lock;
++int pit_latch_buggy;              /* extern */
++
++u64 jiffies_64 = INITIAL_JIFFIES;
++
++EXPORT_SYMBOL(jiffies_64);
++
++#if defined(__x86_64__)
++unsigned long vxtime_hz = PIT_TICK_RATE;
++struct vxtime_data __vxtime __section_vxtime;   /* for vsyscalls */
++volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
++unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
++struct timespec __xtime __section_xtime;
++struct timezone __sys_tz __section_sys_tz;
++#endif
++
++#if defined(__x86_64__)
++unsigned int cpu_khz;	/* Detected as we calibrate the TSC */
++#else
++unsigned long cpu_khz;	/* Detected as we calibrate the TSC */
++#endif
++
++extern unsigned long wall_jiffies;
++
++DEFINE_SPINLOCK(rtc_lock);
++
++DEFINE_SPINLOCK(i8253_lock);
++EXPORT_SYMBOL(i8253_lock);
++
++extern struct init_timer_opts timer_tsc_init;
++extern struct timer_opts timer_tsc;
++struct timer_opts *cur_timer = &timer_tsc;
++
++/* These are peridically updated in shared_info, and then copied here. */
++struct shadow_time_info {
++	u64 tsc_timestamp;     /* TSC at last update of time vals.  */
++	u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
++	u32 tsc_to_nsec_mul;
++	u32 tsc_to_usec_mul;
++	int tsc_shift;
++	u32 version;
++};
++static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
++static struct timespec shadow_tv;
++static u32 shadow_tv_version;
++
++/* Keep track of last time we did processing/updating of jiffies and xtime. */
++static u64 processed_system_time;   /* System time (ns) at last processing. */
++static DEFINE_PER_CPU(u64, processed_system_time);
++
++/* Must be signed, as it's compared with s64 quantities which can be -ve. */
++#define NS_PER_TICK (1000000000LL/HZ)
++
++static inline void __normalize_time(time_t *sec, s64 *nsec)
++{
++	while (*nsec >= NSEC_PER_SEC) {
++		(*nsec) -= NSEC_PER_SEC;
++		(*sec)++;
++	}
++	while (*nsec < 0) {
++		(*nsec) += NSEC_PER_SEC;
++		(*sec)--;
++	}
++}
++
++/* Does this guest OS track Xen time, or set its wall clock independently? */
++static int independent_wallclock = 0;
++static int __init __independent_wallclock(char *str)
++{
++	independent_wallclock = 1;
++	return 1;
++}
++__setup("independent_wallclock", __independent_wallclock);
++
++int tsc_disable __initdata = 0;
++
++static void delay_tsc(unsigned long loops)
++{
++	unsigned long bclock, now;
++	
++	rdtscl(bclock);
++	do
++	{
++		rep_nop();
++		rdtscl(now);
++	} while ((now-bclock) < loops);
++}
++
++struct timer_opts timer_tsc = {
++	.name = "tsc",
++	.delay = delay_tsc,
++};
++
++/*
++ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
++ * yielding a 64-bit result.
++ */
++static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
++{
++	u64 product;
++#ifdef __i386__
++	u32 tmp1, tmp2;
++#endif
++
++	if ( shift < 0 )
++		delta >>= -shift;
++	else
++		delta <<= shift;
++
++#ifdef __i386__
++	__asm__ (
++		"mul  %5       ; "
++		"mov  %4,%%eax ; "
++		"mov  %%edx,%4 ; "
++		"mul  %5       ; "
++		"xor  %5,%5    ; "
++		"add  %4,%%eax ; "
++		"adc  %5,%%edx ; "
++		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
++		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
++#else
++	__asm__ (
++		"mul %%rdx ; shrd $32,%%rdx,%%rax"
++		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
++#endif
++
++	return product;
++}
++
++void init_cpu_khz(void)
++{
++	u64 __cpu_khz = 1000000ULL << 32;
++	struct vcpu_time_info *info;
++	info = &HYPERVISOR_shared_info->vcpu_info[0].time;
++	do_div(__cpu_khz, info->tsc_to_system_mul);
++	if ( info->tsc_shift < 0 )
++		cpu_khz = __cpu_khz << -info->tsc_shift;
++	else
++		cpu_khz = __cpu_khz >> info->tsc_shift;
++}
++
++static u64 get_nsec_offset(struct shadow_time_info *shadow)
++{
++	u64 now, delta;
++	rdtscll(now);
++	delta = now - shadow->tsc_timestamp;
++	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
++}
++
++static unsigned long get_usec_offset(struct shadow_time_info *shadow)
++{
++	u64 now, delta;
++	rdtscll(now);
++	delta = now - shadow->tsc_timestamp;
++	return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
++}
++
++static void __update_wallclock(time_t sec, long nsec)
++{
++	long wtm_nsec, xtime_nsec;
++	time_t wtm_sec, xtime_sec;
++	u64 tmp, wc_nsec;
++
++	/* Adjust wall-clock time base based on wall_jiffies ticks. */
++	wc_nsec = processed_system_time;
++	wc_nsec += sec * (u64)NSEC_PER_SEC;
++	wc_nsec += nsec;
++	wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
++
++	/* Split wallclock base into seconds and nanoseconds. */
++	tmp = wc_nsec;
++	xtime_nsec = do_div(tmp, 1000000000);
++	xtime_sec  = (time_t)tmp;
++
++	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
++	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
++
++	set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
++	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++
++	time_adjust = 0;		/* stop active adjtime() */
++	time_status |= STA_UNSYNC;
++	time_maxerror = NTP_PHASE_LIMIT;
++	time_esterror = NTP_PHASE_LIMIT;
++}
++
++static void update_wallclock(void)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++
++	do {
++		shadow_tv_version = s->wc_version;
++		rmb();
++		shadow_tv.tv_sec  = s->wc_sec;
++		shadow_tv.tv_nsec = s->wc_nsec;
++		rmb();
++	}
++	while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
++
++	if (!independent_wallclock)
++		__update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
++}
++
++/*
++ * Reads a consistent set of time-base values from Xen, into a shadow data
++ * area.
++ */
++static void get_time_values_from_xen(void)
++{
++	shared_info_t           *s = HYPERVISOR_shared_info;
++	struct vcpu_time_info   *src;
++	struct shadow_time_info *dst;
++
++	src = &s->vcpu_info[smp_processor_id()].time;
++	dst = &per_cpu(shadow_time, smp_processor_id());
++
++	do {
++		dst->version = src->version;
++		rmb();
++		dst->tsc_timestamp     = src->tsc_timestamp;
++		dst->system_timestamp  = src->system_time;
++		dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
++		dst->tsc_shift         = src->tsc_shift;
++		rmb();
++	}
++	while ((src->version & 1) | (dst->version ^ src->version));
++
++	dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
++}
++
++static inline int time_values_up_to_date(int cpu)
++{
++	struct vcpu_time_info   *src;
++	struct shadow_time_info *dst;
++
++	src = &HYPERVISOR_shared_info->vcpu_info[cpu].time;
++	dst = &per_cpu(shadow_time, cpu); 
++
++	return (dst->version == src->version);
++}
++
++/*
++ * This is a special lock that is owned by the CPU and holds the index
++ * register we are working with.  It is required for NMI access to the
++ * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
++ */
++volatile unsigned long cmos_lock = 0;
++EXPORT_SYMBOL(cmos_lock);
++
++/* Routines for accessing the CMOS RAM/RTC. */
++unsigned char rtc_cmos_read(unsigned char addr)
++{
++	unsigned char val;
++	lock_cmos_prefix(addr);
++	outb_p(addr, RTC_PORT(0));
++	val = inb_p(RTC_PORT(1));
++	lock_cmos_suffix(addr);
++	return val;
++}
++EXPORT_SYMBOL(rtc_cmos_read);
++
++void rtc_cmos_write(unsigned char val, unsigned char addr)
++{
++	lock_cmos_prefix(addr);
++	outb_p(addr, RTC_PORT(0));
++	outb_p(val, RTC_PORT(1));
++	lock_cmos_suffix(addr);
++}
++EXPORT_SYMBOL(rtc_cmos_write);
++
++/*
++ * This version of gettimeofday has microsecond resolution
++ * and better than microsecond precision on fast x86 machines with TSC.
++ */
++void do_gettimeofday(struct timeval *tv)
++{
++	unsigned long seq;
++	unsigned long usec, sec;
++	unsigned long max_ntp_tick;
++	s64 nsec;
++	unsigned int cpu;
++	struct shadow_time_info *shadow;
++	u32 local_time_version;
++
++	cpu = get_cpu();
++	shadow = &per_cpu(shadow_time, cpu);
++
++	do {
++		unsigned long lost;
++
++		local_time_version = shadow->version;
++		seq = read_seqbegin(&xtime_lock);
++
++		usec = get_usec_offset(shadow);
++		lost = jiffies - wall_jiffies;
++
++		/*
++		 * If time_adjust is negative then NTP is slowing the clock
++		 * so make sure not to go into next possible interval.
++		 * Better to lose some accuracy than have time go backwards..
++		 */
++		if (unlikely(time_adjust < 0)) {
++			max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
++			usec = min(usec, max_ntp_tick);
++
++			if (lost)
++				usec += lost * max_ntp_tick;
++		}
++		else if (unlikely(lost))
++			usec += lost * (USEC_PER_SEC / HZ);
++
++		sec = xtime.tv_sec;
++		usec += (xtime.tv_nsec / NSEC_PER_USEC);
++
++		nsec = shadow->system_timestamp - processed_system_time;
++		__normalize_time(&sec, &nsec);
++		usec += (long)nsec / NSEC_PER_USEC;
++
++		if (unlikely(!time_values_up_to_date(cpu))) {
++			/*
++			 * We may have blocked for a long time,
++			 * rendering our calculations invalid
++			 * (e.g. the time delta may have
++			 * overflowed). Detect that and recalculate
++			 * with fresh values.
++			 */
++			get_time_values_from_xen();
++			continue;
++		}
++	} while (read_seqretry(&xtime_lock, seq) ||
++		 (local_time_version != shadow->version));
++
++	put_cpu();
++
++	while (usec >= USEC_PER_SEC) {
++		usec -= USEC_PER_SEC;
++		sec++;
++	}
++
++	tv->tv_sec = sec;
++	tv->tv_usec = usec;
++}
++
++EXPORT_SYMBOL(do_gettimeofday);
++
++int do_settimeofday(struct timespec *tv)
++{
++	time_t sec;
++	s64 nsec;
++	unsigned int cpu;
++	struct shadow_time_info *shadow;
++	dom0_op_t op;
++
++	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++		return -EINVAL;
++
++	cpu = get_cpu();
++	shadow = &per_cpu(shadow_time, cpu);
++
++	write_seqlock_irq(&xtime_lock);
++
++	/*
++	 * Ensure we don't get blocked for a long time so that our time delta
++	 * overflows. If that were to happen then our shadow time values would
++	 * be stale, so we can retry with fresh ones.
++	 */
++	for ( ; ; ) {
++		nsec = tv->tv_nsec - get_nsec_offset(shadow);
++		if (time_values_up_to_date(cpu))
++			break;
++		get_time_values_from_xen();
++	}
++	sec = tv->tv_sec;
++	__normalize_time(&sec, &nsec);
++
++	if ((xen_start_info->flags & SIF_INITDOMAIN) &&
++	    !independent_wallclock) {
++		op.cmd = DOM0_SETTIME;
++		op.u.settime.secs        = sec;
++		op.u.settime.nsecs       = nsec;
++		op.u.settime.system_time = shadow->system_timestamp;
++		HYPERVISOR_dom0_op(&op);
++		update_wallclock();
++	} else if (independent_wallclock) {
++		nsec -= shadow->system_timestamp;
++		__normalize_time(&sec, &nsec);
++		__update_wallclock(sec, nsec);
++	}
++
++	write_sequnlock_irq(&xtime_lock);
++
++	put_cpu();
++
++	clock_was_set();
++	return 0;
++}
++
++EXPORT_SYMBOL(do_settimeofday);
++
++static void sync_xen_wallclock(unsigned long dummy);
++static struct timer_list sync_xen_wallclock_timer =
++	TIMER_INITIALIZER(sync_xen_wallclock, 0, 0);
++static void sync_xen_wallclock(unsigned long dummy)
++{
++	time_t sec;
++	s64 nsec;
++	dom0_op_t op;
++
++	if (((time_status & STA_UNSYNC) != 0) || independent_wallclock ||
++	    !(xen_start_info->flags & SIF_INITDOMAIN))
++		return;
++
++	write_seqlock_irq(&xtime_lock);
++
++	sec  = xtime.tv_sec;
++	nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
++	__normalize_time(&sec, &nsec);
++
++	op.cmd = DOM0_SETTIME;
++	op.u.settime.secs        = sec;
++	op.u.settime.nsecs       = nsec;
++	op.u.settime.system_time = processed_system_time;
++	HYPERVISOR_dom0_op(&op);
++
++	update_wallclock();
++
++	write_sequnlock_irq(&xtime_lock);
++
++	/* Once per minute. */
++	mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
++}
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++static int set_rtc_mmss(unsigned long nowtime)
++{
++	int retval;
++
++	WARN_ON(irqs_disabled());
++
++	if (!(xen_start_info->flags & SIF_INITDOMAIN))
++		return 0;
++
++	/* gets recalled with irq locally disabled */
++	spin_lock_irq(&rtc_lock);
++	if (efi_enabled)
++		retval = efi_set_rtc_mmss(nowtime);
++	else
++		retval = mach_set_rtc_mmss(nowtime);
++	spin_unlock_irq(&rtc_lock);
++
++	return retval;
++}
++#else
++static int set_rtc_mmss(unsigned long nowtime)
++{
++	return 0;
++}
++#endif
++
++/* monotonic_clock(): returns # of nanoseconds passed since time_init()
++ *		Note: This function is required to return accurate
++ *		time even in the absence of multiple timer ticks.
++ */
++unsigned long long monotonic_clock(void)
++{
++	int cpu = get_cpu();
++	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++	u64 time;
++	u32 local_time_version;
++
++	do {
++		local_time_version = shadow->version;
++		smp_rmb();
++		time = shadow->system_timestamp + get_nsec_offset(shadow);
++		if (!time_values_up_to_date(cpu))
++			get_time_values_from_xen();
++		smp_rmb();
++	} while (local_time_version != shadow->version);
++
++	put_cpu();
++
++	return time;
++}
++EXPORT_SYMBOL(monotonic_clock);
++
++unsigned long long sched_clock(void)
++{
++	return monotonic_clock();
++}
++
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++unsigned long profile_pc(struct pt_regs *regs)
++{
++	unsigned long pc = instruction_pointer(regs);
++
++	if (in_lock_functions(pc))
++		return *(unsigned long *)(regs->ebp + 4);
++
++	return pc;
++}
++EXPORT_SYMBOL(profile_pc);
++#endif
++
++irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
++{
++	s64 delta, delta_cpu;
++	int i, cpu = smp_processor_id();
++	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++
++	write_seqlock(&xtime_lock);
++
++	do {
++		get_time_values_from_xen();
++
++		delta = delta_cpu = 
++			shadow->system_timestamp + get_nsec_offset(shadow);
++		delta     -= processed_system_time;
++		delta_cpu -= per_cpu(processed_system_time, cpu);
++	}
++	while (!time_values_up_to_date(cpu));
++
++	if ((unlikely(delta < -1000000LL) || unlikely(delta_cpu < 0))
++	    && printk_ratelimit()) {
++		printk("Timer ISR/%d: Time went backwards: "
++		       "delta=%lld cpu_delta=%lld shadow=%lld "
++		       "off=%lld processed=%lld cpu_processed=%lld\n",
++		       cpu, delta, delta_cpu, shadow->system_timestamp,
++		       (s64)get_nsec_offset(shadow),
++		       processed_system_time,
++		       per_cpu(processed_system_time, cpu));
++		for (i = 0; i < num_online_cpus(); i++)
++			printk(" %d: %lld\n", i,
++			       per_cpu(processed_system_time, i));
++	}
++
++	/* System-wide jiffy work. */
++	while (delta >= NS_PER_TICK) {
++		delta -= NS_PER_TICK;
++		processed_system_time += NS_PER_TICK;
++		do_timer(regs);
++	}
++
++	if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
++		update_wallclock();
++		clock_was_set();
++	}
++
++	write_sequnlock(&xtime_lock);
++
++	/*
++         * Local CPU jiffy work. No need to hold xtime_lock, and I'm not sure
++         * if there is risk of deadlock if we do (since update_process_times
++         * may do scheduler rebalancing work and thus acquire runqueue locks).
++         */
++	while (delta_cpu >= NS_PER_TICK) {
++		delta_cpu -= NS_PER_TICK;
++		per_cpu(processed_system_time, cpu) += NS_PER_TICK;
++		update_process_times(user_mode(regs));
++		profile_tick(CPU_PROFILING, regs);
++	}
++
++	return IRQ_HANDLED;
++}
++
++/* not static: needed by APM */
++unsigned long get_cmos_time(void)
++{
++	unsigned long retval;
++
++	spin_lock(&rtc_lock);
++
++	if (efi_enabled)
++		retval = efi_get_time();
++	else
++		retval = mach_get_cmos_time();
++
++	spin_unlock(&rtc_lock);
++
++	return retval;
++}
++static void sync_cmos_clock(unsigned long dummy);
++
++static struct timer_list sync_cmos_timer =
++                                      TIMER_INITIALIZER(sync_cmos_clock, 0, 0);
++
++static void sync_cmos_clock(unsigned long dummy)
++{
++	struct timeval now, next;
++	int fail = 1;
++
++	/*
++	 * If we have an externally synchronized Linux clock, then update
++	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++	 * called as close as possible to 500 ms before the new second starts.
++	 * This code is run on a timer.  If the clock is set, that timer
++	 * may not expire at the correct time.  Thus, we adjust...
++	 */
++	if ((time_status & STA_UNSYNC) != 0)
++		/*
++		 * Not synced, exit, do not restart a timer (if one is
++		 * running, let it run out).
++		 */
++		return;
++
++	do_gettimeofday(&now);
++	if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
++	    now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
++		fail = set_rtc_mmss(now.tv_sec);
++
++	next.tv_usec = USEC_AFTER - now.tv_usec;
++	if (next.tv_usec <= 0)
++		next.tv_usec += USEC_PER_SEC;
++
++	if (!fail)
++		next.tv_sec = 659;
++	else
++		next.tv_sec = 0;
++
++	if (next.tv_usec >= USEC_PER_SEC) {
++		next.tv_sec++;
++		next.tv_usec -= USEC_PER_SEC;
++	}
++	mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
++}
++
++void notify_arch_cmos_timer(void)
++{
++	mod_timer(&sync_cmos_timer, jiffies + 1);
++	mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
++}
++
++static long clock_cmos_diff, sleep_start;
++
++static int timer_suspend(struct sys_device *dev, pm_message_t state)
++{
++	/*
++	 * Estimate time zone so that set_time can update the clock
++	 */
++	clock_cmos_diff = -get_cmos_time();
++	clock_cmos_diff += get_seconds();
++	sleep_start = get_cmos_time();
++	return 0;
++}
++
++static int timer_resume(struct sys_device *dev)
++{
++	unsigned long flags;
++	unsigned long sec;
++	unsigned long sleep_length;
++
++#ifdef CONFIG_HPET_TIMER
++	if (is_hpet_enabled())
++		hpet_reenable();
++#endif
++	sec = get_cmos_time() + clock_cmos_diff;
++	sleep_length = (get_cmos_time() - sleep_start) * HZ;
++	write_seqlock_irqsave(&xtime_lock, flags);
++	xtime.tv_sec = sec;
++	xtime.tv_nsec = 0;
++	write_sequnlock_irqrestore(&xtime_lock, flags);
++	jiffies += sleep_length;
++	wall_jiffies += sleep_length;
++	return 0;
++}
++
++static struct sysdev_class timer_sysclass = {
++	.resume = timer_resume,
++	.suspend = timer_suspend,
++	set_kset_name("timer"),
++};
++
++
++/* XXX this driverfs stuff should probably go elsewhere later -john */
++static struct sys_device device_timer = {
++	.id	= 0,
++	.cls	= &timer_sysclass,
++};
++
++static int time_init_device(void)
++{
++	int error = sysdev_class_register(&timer_sysclass);
++	if (!error)
++		error = sysdev_register(&device_timer);
++	return error;
++}
++
++device_initcall(time_init_device);
++
++#ifdef CONFIG_HPET_TIMER
++extern void (*late_time_init)(void);
++/* Duplicate of time_init() below, with hpet_enable part added */
++static void __init hpet_time_init(void)
++{
++	xtime.tv_sec = get_cmos_time();
++	xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
++	set_normalized_timespec(&wall_to_monotonic,
++		-xtime.tv_sec, -xtime.tv_nsec);
++
++	if ((hpet_enable() >= 0) && hpet_use_timer) {
++		printk("Using HPET for base-timer\n");
++	}
++
++	cur_timer = select_timer();
++	printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
++
++	time_init_hook();
++}
++#endif
++
++/* Dynamically-mapped IRQ. */
++DEFINE_PER_CPU(int, timer_irq);
++
++extern void (*late_time_init)(void);
++static void setup_cpu0_timer_irq(void)
++{
++	per_cpu(timer_irq, 0) =
++		bind_virq_to_irqhandler(
++			VIRQ_TIMER,
++			0,
++			timer_interrupt,
++			SA_INTERRUPT,
++			"timer0",
++			NULL);
++	BUG_ON(per_cpu(timer_irq, 0) < 0);
++}
++
++void __init time_init(void)
++{
++#ifdef CONFIG_HPET_TIMER
++	if (is_hpet_capable()) {
++		/*
++		 * HPET initialization needs to do memory-mapped io. So, let
++		 * us do a late initialization after mem_init().
++		 */
++		late_time_init = hpet_time_init;
++		return;
++	}
++#endif
++	get_time_values_from_xen();
++
++	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++	per_cpu(processed_system_time, 0) = processed_system_time;
++
++	update_wallclock();
++
++	init_cpu_khz();
++	printk(KERN_INFO "Xen reported: %lu.%03lu MHz processor.\n",
++	       cpu_khz / 1000, cpu_khz % 1000);
++
++#if defined(__x86_64__)
++	vxtime.mode = VXTIME_TSC;
++	vxtime.quot = (1000000L << 32) / vxtime_hz;
++	vxtime.tsc_quot = (1000L << 32) / cpu_khz;
++	vxtime.hz = vxtime_hz;
++	sync_core();
++	rdtscll(vxtime.last_tsc);
++#endif
++
++	/* Cannot request_irq() until kmem is initialised. */
++	late_time_init = setup_cpu0_timer_irq;
++}
++
++/* Convert jiffies to system time. */
++static inline u64 jiffies_to_st(unsigned long j) 
++{
++	unsigned long seq;
++	long delta;
++	u64 st;
++
++	do {
++		seq = read_seqbegin(&xtime_lock);
++		delta = j - jiffies;
++		/* NB. The next check can trigger in some wrap-around cases,
++		 * but that's ok: we'll just end up with a shorter timeout. */
++		if (delta < 1) 
++			delta = 1;
++		st = processed_system_time + (delta * (u64)NS_PER_TICK);
++	} while (read_seqretry(&xtime_lock, seq));
++
++	return st;
++}
++
++/*
++ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
++ * These functions are based on implementations from arch/s390/kernel/time.c
++ */
++void stop_hz_timer(void)
++{
++	unsigned int cpu = smp_processor_id();
++	unsigned long j;
++	
++	/* s390 does this /before/ checking rcu_pending(). We copy them. */
++	cpu_set(cpu, nohz_cpu_mask);
++
++	/* Leave ourselves in 'tick mode' if rcu or softirq pending. */
++	if (rcu_pending(cpu) || local_softirq_pending()) {
++		cpu_clear(cpu, nohz_cpu_mask);
++		j = jiffies + 1;
++	} else {
++		j = next_timer_interrupt();
++	}
++
++	BUG_ON(HYPERVISOR_set_timer_op(jiffies_to_st(j)) != 0);
++}
++
++void start_hz_timer(void)
++{
++	cpu_clear(smp_processor_id(), nohz_cpu_mask);
++}
++
++/* No locking required. We are only CPU running, and interrupts are off. */
++void time_resume(void)
++{
++	init_cpu_khz();
++
++	get_time_values_from_xen();
++
++	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++	per_cpu(processed_system_time, 0) = processed_system_time;
++
++	update_wallclock();
++}
++
++#ifdef CONFIG_SMP
++static char timer_name[NR_CPUS][15];
++
++void local_setup_timer(unsigned int cpu)
++{
++	int seq;
++
++	BUG_ON(cpu == 0);
++
++	do {
++		seq = read_seqbegin(&xtime_lock);
++		/* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
++		per_cpu(processed_system_time, cpu) = 
++			per_cpu(shadow_time, 0).system_timestamp;
++	} while (read_seqretry(&xtime_lock, seq));
++
++	sprintf(timer_name[cpu], "timer%d", cpu);
++	per_cpu(timer_irq, cpu) =
++		bind_virq_to_irqhandler(
++			VIRQ_TIMER,
++			cpu,
++			timer_interrupt,
++			SA_INTERRUPT,
++			timer_name[cpu],
++			NULL);
++	BUG_ON(per_cpu(timer_irq, cpu) < 0);
++}
++
++void local_teardown_timer(unsigned int cpu)
++{
++	BUG_ON(cpu == 0);
++	unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
++}
++#endif
++
++/*
++ * /proc/sys/xen: This really belongs in another file. It can stay here for
++ * now however.
++ */
++static ctl_table xen_subtable[] = {
++	{1, "independent_wallclock", &independent_wallclock,
++	 sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
++	{0}
++};
++static ctl_table xen_table[] = {
++	{123, "xen", NULL, 0, 0555, xen_subtable},
++	{0}
++};
++static int __init xen_sysctl_init(void)
++{
++	(void)register_sysctl_table(xen_table, 0);
++	return 0;
++}
++__initcall(xen_sysctl_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/traps.c linux-2.6.12-xen/arch/xen/i386/kernel/traps.c
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/traps.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/traps.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,1029 @@
++/*
++ *  linux/arch/i386/traps.c
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *
++ *  Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'asm.s'.
++ */
++#include <linux/config.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/highmem.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/kprobes.h>
++
++#ifdef CONFIG_EISA
++#include <linux/ioport.h>
++#include <linux/eisa.h>
++#endif
++
++#ifdef CONFIG_MCA
++#include <linux/mca.h>
++#endif
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/nmi.h>
++
++#include <asm/smp.h>
++#include <asm/arch_hooks.h>
++#include <asm/kdebug.h>
++
++#include <linux/irq.h>
++#include <linux/module.h>
++
++#include "mach_traps.h"
++
++asmlinkage int system_call(void);
++
++/* Do we ignore FPU interrupts ? */
++char ignore_fpu_irq = 0;
++
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void alignment_check(void);
++asmlinkage void fixup_4gb_segment(void);
++asmlinkage void machine_check(void);
++
++static int kstack_depth_to_print = 24;
++struct notifier_block *i386die_chain;
++static DEFINE_SPINLOCK(die_notifier_lock);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++	int err = 0;
++	unsigned long flags;
++	spin_lock_irqsave(&die_notifier_lock, flags);
++	err = notifier_chain_register(&i386die_chain, nb);
++	spin_unlock_irqrestore(&die_notifier_lock, flags);
++	return err;
++}
++
++static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
++{
++	return	p > (void *)tinfo &&
++		p < (void *)tinfo + THREAD_SIZE - 3;
++}
++
++static inline unsigned long print_context_stack(struct thread_info *tinfo,
++				unsigned long *stack, unsigned long ebp)
++{
++	unsigned long addr;
++
++#ifdef	CONFIG_FRAME_POINTER
++	while (valid_stack_ptr(tinfo, (void *)ebp)) {
++		addr = *(unsigned long *)(ebp + 4);
++		printk(" [<%08lx>] ", addr);
++		print_symbol("%s", addr);
++		printk("\n");
++		ebp = *(unsigned long *)ebp;
++	}
++#else
++	while (valid_stack_ptr(tinfo, stack)) {
++		addr = *stack++;
++		if (__kernel_text_address(addr)) {
++			printk(" [<%08lx>]", addr);
++			print_symbol(" %s", addr);
++			printk("\n");
++		}
++	}
++#endif
++	return ebp;
++}
++
++void show_trace(struct task_struct *task, unsigned long * stack)
++{
++	unsigned long ebp;
++
++	if (!task)
++		task = current;
++
++	if (task == current) {
++		/* Grab ebp right from our regs */
++		asm ("movl %%ebp, %0" : "=r" (ebp) : );
++	} else {
++		/* ebp is the last reg pushed by switch_to */
++		ebp = *(unsigned long *) task->thread.esp;
++	}
++
++	while (1) {
++		struct thread_info *context;
++		context = (struct thread_info *)
++			((unsigned long)stack & (~(THREAD_SIZE - 1)));
++		ebp = print_context_stack(context, stack, ebp);
++		stack = (unsigned long*)context->previous_esp;
++		if (!stack)
++			break;
++		printk(" =======================\n");
++	}
++}
++
++void show_stack(struct task_struct *task, unsigned long *esp)
++{
++	unsigned long *stack;
++	int i;
++
++	if (esp == NULL) {
++		if (task)
++			esp = (unsigned long*)task->thread.esp;
++		else
++			esp = (unsigned long *)&esp;
++	}
++
++	stack = esp;
++	for(i = 0; i < kstack_depth_to_print; i++) {
++		if (kstack_end(stack))
++			break;
++		if (i && ((i % 8) == 0))
++			printk("\n       ");
++		printk("%08lx ", *stack++);
++	}
++	printk("\nCall Trace:\n");
++	show_trace(task, esp);
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++	unsigned long stack;
++
++	show_trace(current, &stack);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++	int i;
++	int in_kernel = 1;
++	unsigned long esp;
++	unsigned short ss;
++
++	esp = (unsigned long) (&regs->esp);
++	ss = __KERNEL_DS;
++	if (regs->xcs & 2) {
++		in_kernel = 0;
++		esp = regs->esp;
++		ss = regs->xss & 0xffff;
++	}
++	print_modules();
++	printk("CPU:    %d\nEIP:    %04x:[<%08lx>]    %s VLI\nEFLAGS: %08lx"
++			"   (%s) \n",
++		smp_processor_id(), 0xffff & regs->xcs, regs->eip,
++		print_tainted(), regs->eflags, system_utsname.release);
++	print_symbol("EIP is at %s\n", regs->eip);
++	printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
++		regs->eax, regs->ebx, regs->ecx, regs->edx);
++	printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
++		regs->esi, regs->edi, regs->ebp, esp);
++	printk("ds: %04x   es: %04x   ss: %04x\n",
++		regs->xds & 0xffff, regs->xes & 0xffff, ss);
++	printk("Process %s (pid: %d, threadinfo=%p task=%p)",
++		current->comm, current->pid, current_thread_info(), current);
++	/*
++	 * When in-kernel, we also print out the stack and code at the
++	 * time of the fault..
++	 */
++	if (in_kernel) {
++		u8 *eip;
++
++		printk("\nStack: ");
++		show_stack(NULL, (unsigned long*)esp);
++
++		printk("Code: ");
++
++		eip = (u8 *)regs->eip - 43;
++		for (i = 0; i < 64; i++, eip++) {
++			unsigned char c;
++
++			if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) {
++				printk(" Bad EIP value.");
++				break;
++			}
++			if (eip == (u8 *)regs->eip)
++				printk("<%02x> ", c);
++			else
++				printk("%02x ", c);
++		}
++	}
++	printk("\n");
++}	
++
++static void handle_BUG(struct pt_regs *regs)
++{
++	unsigned short ud2;
++	unsigned short line;
++	char *file;
++	char c;
++	unsigned long eip;
++
++	if (regs->xcs & 2)
++		goto no_bug;		/* Not in kernel */
++
++	eip = regs->eip;
++
++	if (eip < PAGE_OFFSET)
++		goto no_bug;
++	if (__get_user(ud2, (unsigned short *)eip))
++		goto no_bug;
++	if (ud2 != 0x0b0f)
++		goto no_bug;
++	if (__get_user(line, (unsigned short *)(eip + 2)))
++		goto bug;
++	if (__get_user(file, (char **)(eip + 4)) ||
++		(unsigned long)file < PAGE_OFFSET || __get_user(c, file))
++		file = "<bad filename>";
++
++	printk("------------[ cut here ]------------\n");
++	printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
++
++no_bug:
++	return;
++
++	/* Here we know it was a BUG but file-n-line is unavailable */
++bug:
++	printk("Kernel BUG\n");
++}
++
++void die(const char * str, struct pt_regs * regs, long err)
++{
++	static struct {
++		spinlock_t lock;
++		u32 lock_owner;
++		int lock_owner_depth;
++	} die = {
++		.lock =			SPIN_LOCK_UNLOCKED,
++		.lock_owner =		-1,
++		.lock_owner_depth =	0
++	};
++	static int die_counter;
++
++	if (die.lock_owner != _smp_processor_id()) {
++		console_verbose();
++		spin_lock_irq(&die.lock);
++		die.lock_owner = smp_processor_id();
++		die.lock_owner_depth = 0;
++		bust_spinlocks(1);
++	}
++
++	if (++die.lock_owner_depth < 3) {
++		int nl = 0;
++		handle_BUG(regs);
++		printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++		printk("PREEMPT ");
++		nl = 1;
++#endif
++#ifdef CONFIG_SMP
++		printk("SMP ");
++		nl = 1;
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++		printk("DEBUG_PAGEALLOC");
++		nl = 1;
++#endif
++		if (nl)
++			printk("\n");
++	notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
++		show_registers(regs);
++  	} else
++		printk(KERN_ERR "Recursive die() failure, output suppressed\n");
++
++	bust_spinlocks(0);
++	die.lock_owner = -1;
++	spin_unlock_irq(&die.lock);
++	if (in_interrupt())
++		panic("Fatal exception in interrupt");
++
++	if (panic_on_oops) {
++		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
++		ssleep(5);
++		panic("Fatal exception");
++	}
++	do_exit(SIGSEGV);
++}
++
++static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
++{
++	if (!(regs->eflags & VM_MASK) && !(2 & regs->xcs))
++		die(str, regs, err);
++}
++
++static void do_trap(int trapnr, int signr, char *str, int vm86,
++			   struct pt_regs * regs, long error_code, siginfo_t *info)
++{
++	if (regs->eflags & VM_MASK) {
++		if (vm86)
++			goto vm86_trap;
++		goto trap_signal;
++	}
++
++	if (!(regs->xcs & 2))
++		goto kernel_trap;
++
++	trap_signal: {
++		struct task_struct *tsk = current;
++		tsk->thread.error_code = error_code;
++		tsk->thread.trap_no = trapnr;
++		if (info)
++			force_sig_info(signr, info, tsk);
++		else
++			force_sig(signr, tsk);
++		return;
++	}
++
++	kernel_trap: {
++		if (!fixup_exception(regs))
++			die(str, regs, error_code);
++		return;
++	}
++
++	vm86_trap: {
++		int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
++		if (ret) goto trap_signal;
++		return;
++	}
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	siginfo_t info; \
++	info.si_signo = signr; \
++	info.si_errno = 0; \
++	info.si_code = sicode; \
++	info.si_addr = (void __user *)siaddr; \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
++}
++
++#define DO_VM86_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
++}
++
++#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	siginfo_t info; \
++	info.si_signo = signr; \
++	info.si_errno = 0; \
++	info.si_code = sicode; \
++	info.si_addr = (void __user *)siaddr; \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++						== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
++}
++
++DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
++#ifndef CONFIG_KPROBES
++DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
++#endif
++DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL,  "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
++DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
++DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
++DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++#ifdef CONFIG_X86_MCE
++DO_ERROR(18, SIGBUS, "machine check", machine_check)
++#endif
++DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
++
++fastcall void do_general_protection(struct pt_regs * regs, long error_code)
++{
++	/*
++	 * If we trapped on an LDT access then ensure that the default_ldt is
++	 * loaded, if nothing else. We load default_ldt lazily because LDT
++	 * switching costs time and many applications don't need it.
++	 */
++	if (unlikely((error_code & 6) == 4)) {
++		unsigned long ldt;
++		__asm__ __volatile__ ("sldt %0" : "=r" (ldt));
++		if (ldt == 0) {
++			xen_set_ldt((unsigned long)&default_ldt[0], 5);
++			return;
++		}
++	}
++
++	if (regs->eflags & VM_MASK)
++		goto gp_in_vm86;
++
++	if (!(regs->xcs & 2))
++		goto gp_in_kernel;
++
++	current->thread.error_code = error_code;
++	current->thread.trap_no = 13;
++	force_sig(SIGSEGV, current);
++	return;
++
++gp_in_vm86:
++	local_irq_enable();
++	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
++	return;
++
++gp_in_kernel:
++	if (!fixup_exception(regs)) {
++		if (notify_die(DIE_GPF, "general protection fault", regs,
++				error_code, 13, SIGSEGV) == NOTIFY_STOP)
++			return;
++		die("general protection fault", regs, error_code);
++	}
++}
++
++static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++	printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
++	printk("You probably have a hardware problem with your RAM chips\n");
++
++	/* Clear and disable the memory parity error line. */
++	clear_mem_error(reason);
++}
++
++static void io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++	printk("NMI: IOCK error (debug interrupt?)\n");
++	show_registers(regs);
++
++	/* Re-enable the IOCK line, wait for a few seconds */
++	clear_io_check_error(reason);
++}
++
++static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{
++#ifdef CONFIG_MCA
++	/* Might actually be able to figure out what the guilty party
++	* is. */
++	if( MCA_bus ) {
++		mca_handle_nmi();
++		return;
++	}
++#endif
++	printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
++		reason, smp_processor_id());
++	printk("Dazed and confused, but trying to continue\n");
++	printk("Do you have a strange power saving mode enabled?\n");
++}
++
++static DEFINE_SPINLOCK(nmi_print_lock);
++
++void die_nmi (struct pt_regs *regs, const char *msg)
++{
++	spin_lock(&nmi_print_lock);
++	/*
++	* We are in trouble anyway, lets at least try
++	* to get a message out.
++	*/
++	bust_spinlocks(1);
++	printk(msg);
++	printk(" on CPU%d, eip %08lx, registers:\n",
++		smp_processor_id(), regs->eip);
++	show_registers(regs);
++	printk("console shuts up ...\n");
++	console_silent();
++	spin_unlock(&nmi_print_lock);
++	bust_spinlocks(0);
++	do_exit(SIGSEGV);
++}
++
++static void default_do_nmi(struct pt_regs * regs)
++{
++	unsigned char reason = 0;
++
++	/* Only the BSP gets external NMIs from the system.  */
++	if (!smp_processor_id())
++		reason = get_nmi_reason();
++ 
++	if (!(reason & 0xc0)) {
++		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
++							== NOTIFY_STOP)
++			return;
++#ifdef CONFIG_X86_LOCAL_APIC
++		/*
++		 * Ok, so this is none of the documented NMI sources,
++		 * so it must be the NMI watchdog.
++		 */
++		if (nmi_watchdog) {
++			nmi_watchdog_tick(regs);
++			return;
++		}
++#endif
++		unknown_nmi_error(reason, regs);
++		return;
++	}
++	if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
++		return;
++	if (reason & 0x80)
++		mem_parity_error(reason, regs);
++	if (reason & 0x40)
++		io_check_error(reason, regs);
++	/*
++	 * Reassert NMI in case it became active meanwhile
++	 * as it's edge-triggered.
++	 */
++	reassert_nmi();
++}
++
++static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
++{
++	return 0;
++}
++ 
++static nmi_callback_t nmi_callback = dummy_nmi_callback;
++ 
++fastcall void do_nmi(struct pt_regs * regs, long error_code)
++{
++	int cpu;
++
++	nmi_enter();
++
++	cpu = smp_processor_id();
++
++#ifdef CONFIG_HOTPLUG_CPU
++	if (!cpu_online(cpu)) {
++		nmi_exit();
++		return;
++	}
++#endif
++
++	++nmi_count(cpu);
++
++	if (!nmi_callback(regs, cpu))
++		default_do_nmi(regs);
++
++	nmi_exit();
++}
++
++void set_nmi_callback(nmi_callback_t callback)
++{
++	nmi_callback = callback;
++}
++
++void unset_nmi_callback(void)
++{
++	nmi_callback = dummy_nmi_callback;
++}
++
++#ifdef CONFIG_KPROBES
++fastcall void do_int3(struct pt_regs *regs, long error_code)
++{
++	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
++			== NOTIFY_STOP)
++		return;
++	/* This is an interrupt gate, because kprobes wants interrupts
++	disabled.  Normal trap handlers don't. */
++	restore_interrupts(regs);
++	do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
++}
++#endif
++
++/*
++ * Our handling of the processor debug registers is non-trivial.
++ * We do not clear them on entry and exit from the kernel. Therefore
++ * it is possible to get a watchpoint trap here from inside the kernel.
++ * However, the code in ./ptrace.c has ensured that the user can
++ * only set watchpoints on userspace addresses. Therefore the in-kernel
++ * watchpoint trap can only occur in code which is reading/writing
++ * from user space. Such code must not hold kernel locks (since it
++ * can equally take a page fault), therefore it is safe to call
++ * force_sig_info even though that claims and releases locks.
++ * 
++ * Code in ./signal.c ensures that the debug control register
++ * is restored before we deliver any signal, and therefore that
++ * user code runs with the correct debug control register even though
++ * we clear it here.
++ *
++ * Being careful here means that we don't have to be as careful in a
++ * lot of more complicated places (task switching can be a bit lazy
++ * about restoring all the debug state, and ptrace doesn't have to
++ * find every occurrence of the TF bit that could be saved away even
++ * by user code)
++ */
++fastcall void do_debug(struct pt_regs * regs, long error_code)
++{
++	unsigned int condition;
++	struct task_struct *tsk = current;
++
++	condition = HYPERVISOR_get_debugreg(6);
++
++	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++					SIGTRAP) == NOTIFY_STOP)
++		return;
++	/* It's safe to allow irq's after DR6 has been saved */
++	if (regs->eflags & X86_EFLAGS_IF)
++		local_irq_enable();
++
++	/* Mask out spurious debug traps due to lazy DR7 setting */
++	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++		if (!tsk->thread.debugreg[7])
++			goto clear_dr7;
++	}
++
++	if (regs->eflags & VM_MASK)
++		goto debug_vm86;
++
++	/* Save debug status register where ptrace can see it */
++	tsk->thread.debugreg[6] = condition;
++
++	/*
++	 * Single-stepping through TF: make sure we ignore any events in
++	 * kernel space (but re-enable TF when returning to user mode).
++	 */
++	if (condition & DR_STEP) {
++		/*
++		 * We already checked v86 mode above, so we can
++		 * check for kernel mode by just checking the CPL
++		 * of CS.
++		 */
++		if ((regs->xcs & 2) == 0)
++			goto clear_TF_reenable;
++	}
++
++	/* Ok, finally something we can handle */
++	send_sigtrap(tsk, regs, error_code);
++
++	/* Disable additional traps. They'll be re-enabled when
++	 * the signal is delivered.
++	 */
++clear_dr7:
++	HYPERVISOR_set_debugreg(7, 0);
++	return;
++
++debug_vm86:
++	handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
++	return;
++
++clear_TF_reenable:
++	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++	regs->eflags &= ~TF_MASK;
++	return;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++void math_error(void __user *eip)
++{
++	struct task_struct * task;
++	siginfo_t info;
++	unsigned short cwd, swd;
++
++	/*
++	 * Save the info for the exception handler and clear the error.
++	 */
++	task = current;
++	save_init_fpu(task);
++	task->thread.trap_no = 16;
++	task->thread.error_code = 0;
++	info.si_signo = SIGFPE;
++	info.si_errno = 0;
++	info.si_code = __SI_FAULT;
++	info.si_addr = eip;
++	/*
++	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
++	 * status.  0x3f is the exception bits in these regs, 0x200 is the
++	 * C1 reg you need in case of a stack fault, 0x040 is the stack
++	 * fault bit.  We should only be taking one exception at a time,
++	 * so if this combination doesn't produce any single exception,
++	 * then we have a bad program that isn't syncronizing its FPU usage
++	 * and it will suffer the consequences since we won't be able to
++	 * fully reproduce the context of the exception
++	 */
++	cwd = get_fpu_cwd(task);
++	swd = get_fpu_swd(task);
++	switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
++		case 0x000:
++		default:
++			break;
++		case 0x001: /* Invalid Op */
++		case 0x041: /* Stack Fault */
++		case 0x241: /* Stack Fault | Direction */
++			info.si_code = FPE_FLTINV;
++			/* Should we clear the SF or let user space do it ???? */
++			break;
++		case 0x002: /* Denormalize */
++		case 0x010: /* Underflow */
++			info.si_code = FPE_FLTUND;
++			break;
++		case 0x004: /* Zero Divide */
++			info.si_code = FPE_FLTDIV;
++			break;
++		case 0x008: /* Overflow */
++			info.si_code = FPE_FLTOVF;
++			break;
++		case 0x020: /* Precision */
++			info.si_code = FPE_FLTRES;
++			break;
++	}
++	force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
++{
++	ignore_fpu_irq = 1;
++	math_error((void __user *)regs->eip);
++}
++
++static void simd_math_error(void __user *eip)
++{
++	struct task_struct * task;
++	siginfo_t info;
++	unsigned short mxcsr;
++
++	/*
++	 * Save the info for the exception handler and clear the error.
++	 */
++	task = current;
++	save_init_fpu(task);
++	task->thread.trap_no = 19;
++	task->thread.error_code = 0;
++	info.si_signo = SIGFPE;
++	info.si_errno = 0;
++	info.si_code = __SI_FAULT;
++	info.si_addr = eip;
++	/*
++	 * The SIMD FPU exceptions are handled a little differently, as there
++	 * is only a single status/control register.  Thus, to determine which
++	 * unmasked exception was caught we must mask the exception mask bits
++	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++	 */
++	mxcsr = get_fpu_mxcsr(task);
++	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++		case 0x000:
++		default:
++			break;
++		case 0x001: /* Invalid Op */
++			info.si_code = FPE_FLTINV;
++			break;
++		case 0x002: /* Denormalize */
++		case 0x010: /* Underflow */
++			info.si_code = FPE_FLTUND;
++			break;
++		case 0x004: /* Zero Divide */
++			info.si_code = FPE_FLTDIV;
++			break;
++		case 0x008: /* Overflow */
++			info.si_code = FPE_FLTOVF;
++			break;
++		case 0x020: /* Precision */
++			info.si_code = FPE_FLTRES;
++			break;
++	}
++	force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
++					  long error_code)
++{
++	if (cpu_has_xmm) {
++		/* Handle SIMD FPU exceptions on PIII+ processors. */
++		ignore_fpu_irq = 1;
++		simd_math_error((void __user *)regs->eip);
++	} else {
++		/*
++		 * Handle strange cache flush from user space exception
++		 * in all other cases.  This is undocumented behaviour.
++		 */
++		if (regs->eflags & VM_MASK) {
++			handle_vm86_fault((struct kernel_vm86_regs *)regs,
++					  error_code);
++			return;
++		}
++		die_if_kernel("cache flush denied", regs, error_code);
++		current->thread.trap_no = 19;
++		current->thread.error_code = error_code;
++		force_sig(SIGSEGV, current);
++	}
++}
++
++#ifndef CONFIG_XEN
++fastcall void setup_x86_bogus_stack(unsigned char * stk)
++{
++	unsigned long *switch16_ptr, *switch32_ptr;
++	struct pt_regs *regs;
++	unsigned long stack_top, stack_bot;
++	unsigned short iret_frame16_off;
++	int cpu = smp_processor_id();
++	/* reserve the space on 32bit stack for the magic switch16 pointer */
++	memmove(stk, stk + 8, sizeof(struct pt_regs));
++	switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
++	regs = (struct pt_regs *)stk;
++	/* now the switch32 on 16bit stack */
++	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
++	switch32_ptr = (unsigned long *)(stack_top - 8);
++	iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
++	/* copy iret frame on 16bit stack */
++	memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
++	/* fill in the switch pointers */
++	switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
++	switch16_ptr[1] = __ESPFIX_SS;
++	switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
++		8 - CPU_16BIT_STACK_SIZE;
++	switch32_ptr[1] = __KERNEL_DS;
++}
++
++fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
++{
++	unsigned long *switch32_ptr;
++	unsigned char *stack16, *stack32;
++	unsigned long stack_top, stack_bot;
++	int len;
++	int cpu = smp_processor_id();
++	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
++	switch32_ptr = (unsigned long *)(stack_top - 8);
++	/* copy the data from 16bit stack to 32bit stack */
++	len = CPU_16BIT_STACK_SIZE - 8 - sp;
++	stack16 = (unsigned char *)(stack_bot + sp);
++	stack32 = (unsigned char *)
++		(switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
++	memcpy(stack32, stack16, len);
++	return stack32;
++}
++#endif
++
++/*
++ *  'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ *
++ * Must be called with kernel preemption disabled (in this case,
++ * local interrupts are disabled at the call-site in entry.S).
++ */
++asmlinkage void math_state_restore(struct pt_regs regs)
++{
++	struct thread_info *thread = current_thread_info();
++	struct task_struct *tsk = thread->task;
++
++	/* NB. 'clts' is done for us by Xen during virtual trap. */
++	if (!tsk_used_math(tsk))
++		init_fpu(tsk);
++	restore_fpu(tsk);
++	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
++}
++
++#ifndef CONFIG_MATH_EMULATION
++
++asmlinkage void math_emulate(long arg)
++{
++	printk("math-emulation not enabled and no coprocessor found.\n");
++	printk("killing %s.\n",current->comm);
++	force_sig(SIGFPE,current);
++	schedule();
++}
++
++#endif /* CONFIG_MATH_EMULATION */
++
++#ifdef CONFIG_X86_F00F_BUG
++void __init trap_init_f00f_bug(void)
++{
++	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
++
++	/*
++	 * Update the IDT descriptor and reload the IDT so that
++	 * it uses the read-only mapped virtual address.
++	 */
++	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++	__asm__ __volatile__("lidt %0" : : "m" (idt_descr));
++}
++#endif
++
++
++/*
++ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
++ * for those that specify <dpl>|4 in the second field.
++ */
++static trap_info_t trap_table[] = {
++	{  0, 0, __KERNEL_CS, (unsigned long)divide_error		},
++	{  1, 0|4, __KERNEL_CS, (unsigned long)debug			},
++	{  3, 3|4, __KERNEL_CS, (unsigned long)int3			},
++	{  4, 3, __KERNEL_CS, (unsigned long)overflow			},
++	{  5, 3, __KERNEL_CS, (unsigned long)bounds			},
++	{  6, 0, __KERNEL_CS, (unsigned long)invalid_op			},
++	{  7, 0|4, __KERNEL_CS, (unsigned long)device_not_available	},
++	{  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
++	{ 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS		},
++	{ 11, 0, __KERNEL_CS, (unsigned long)segment_not_present	},
++	{ 12, 0, __KERNEL_CS, (unsigned long)stack_segment		},
++	{ 13, 0, __KERNEL_CS, (unsigned long)general_protection		},
++	{ 14, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
++	{ 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment		},
++	{ 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error		},
++	{ 17, 0, __KERNEL_CS, (unsigned long)alignment_check		},
++#ifdef CONFIG_X86_MCE
++	{ 18, 0, __KERNEL_CS, (unsigned long)machine_check		},
++#endif
++	{ 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error	},
++	{ SYSCALL_VECTOR,  3, __KERNEL_CS, (unsigned long)system_call	},
++	{  0, 0,	   0, 0						}
++};
++
++void __init trap_init(void)
++{
++	HYPERVISOR_set_trap_table(trap_table);
++
++	/*
++	 * default LDT is a single-entry callgate to lcall7 for iBCS
++	 * and a callgate to lcall27 for Solaris/x86 binaries
++	 */
++	make_lowmem_page_readonly(
++		&default_ldt[0], XENFEAT_writable_descriptor_tables);
++
++	/*
++	 * Should be a barrier for any external CPU state.
++	 */
++	cpu_init();
++}
++
++void smp_trap_init(trap_info_t *trap_ctxt)
++{
++	trap_info_t *t = trap_table;
++	int i;
++
++	for (i = 0; i < 256; i++) {
++		trap_ctxt[i].vector = i;
++		trap_ctxt[i].cs     = FLAT_KERNEL_CS;
++	}
++
++	for (t = trap_table; t->address; t++) {
++		trap_ctxt[t->vector].flags = t->flags;
++		trap_ctxt[t->vector].cs = t->cs;
++		trap_ctxt[t->vector].address = t->address;
++	}
++}
++
++static int __init kstack_setup(char *s)
++{
++	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
++	return 0;
++}
++__setup("kstack=", kstack_setup);
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/vsyscall-note.S linux-2.6.12-xen/arch/xen/i386/kernel/vsyscall-note.S
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/vsyscall-note.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/vsyscall-note.S	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,32 @@
++/*
++ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
++ * Here we can supply some information useful to userland.
++ * First we get the vanilla i386 note that supplies the kernel version info.
++ */
++
++#include "../../../i386/kernel/vsyscall-note.S"
++
++/*
++ * Now we add a special note telling glibc's dynamic linker a fake hardware
++ * flavor that it will use to choose the search path for libraries in the
++ * same way it uses real hardware capabilities like "mmx".
++ * We supply "nosegneg" as the fake capability, to indicate that we
++ * do not like negative offsets in instructions using segment overrides,
++ * since we implement those inefficiently.  This makes it possible to
++ * install libraries optimized to avoid those access patterns in someplace
++ * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
++ * corresponding to the bits here is needed to make ldconfig work right.
++ * It should contain:
++ *	hwcap 0 nosegneg
++ * to match the mapping of bit to name that we give here.
++ */
++#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
++	ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
++	.long ncaps, mask
++#define NOTE_KERNELCAP(bit, name) \
++	.byte bit; .asciz name
++#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
++
++NOTE_KERNELCAP_BEGIN(1, 1)
++NOTE_KERNELCAP(1, "nosegneg")  /* Change 1 back to 0 when glibc is fixed! */
++NOTE_KERNELCAP_END
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/vsyscall.S linux-2.6.12-xen/arch/xen/i386/kernel/vsyscall.S
+--- pristine-linux-2.6.12/arch/xen/i386/kernel/vsyscall.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/kernel/vsyscall.S	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,15 @@
++#include <linux/init.h>
++
++__INITDATA
++
++	.globl vsyscall_int80_start, vsyscall_int80_end
++vsyscall_int80_start:
++	.incbin "arch/xen/i386/kernel/vsyscall-int80.so"
++vsyscall_int80_end:
++
++	.globl vsyscall_sysenter_start, vsyscall_sysenter_end
++vsyscall_sysenter_start:
++	.incbin "arch/xen/i386/kernel/vsyscall-sysenter.so"
++vsyscall_sysenter_end:
++
++__FINIT
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mach-default/Makefile linux-2.6.12-xen/arch/xen/i386/mach-default/Makefile
+--- pristine-linux-2.6.12/arch/xen/i386/mach-default/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/mach-default/Makefile	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,12 @@
++#
++# Makefile for the linux kernel.
++#
++
++c-obj-y				:= topology.o
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y)):
++	@ln -fsn $(srctree)/arch/i386/mach-default/$(notdir $@) $@
++
++obj-y	+= $(c-obj-y)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-))
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/Makefile linux-2.6.12-xen/arch/xen/i386/Makefile
+--- pristine-linux-2.6.12/arch/xen/i386/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/Makefile	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,108 @@
++#
++# i386/Makefile
++#
++# This file is included by the global makefile so that you can add your own
++# architecture-specific flags and dependencies. Remember to do have actions
++# for "archclean" cleaning up for this architecture.
++#
++# This file is subject to the terms and conditions of the GNU General Public
++# License.  See the file "COPYING" in the main directory of this archive
++# for more details.
++#
++# Copyright (C) 1994 by Linus Torvalds
++#
++# 19990713  Artur Skawina <skawina at geocities.com>
++#           Added '-march' and '-mpreferred-stack-boundary' support
++#
++# 20050320  Kianusch Sayah Karadji <kianusch at sk-tech.net>
++#           Added support for GEODE CPU
++
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++LDFLAGS		:= -m elf_i386
++LDFLAGS_vmlinux :=
++CHECK		:= $(CHECK) -D__i386__=1
++
++CFLAGS += -m32
++AFLAGS += -m32
++
++CFLAGS += -pipe -msoft-float
++
++# prevent gcc from keeping the stack 16 byte aligned
++CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2,)
++
++align := $(cc-option-align)
++cflags-$(CONFIG_M386)		+= -march=i386
++cflags-$(CONFIG_M486)		+= -march=i486
++cflags-$(CONFIG_M586)		+= -march=i586
++cflags-$(CONFIG_M586TSC)	+= -march=i586
++cflags-$(CONFIG_M586MMX)	+= $(call cc-option,-march=pentium-mmx,-march=i586)
++cflags-$(CONFIG_M686)		+= -march=i686
++cflags-$(CONFIG_MPENTIUMII)	+= -march=i686 $(call cc-option,-mtune=pentium2)
++cflags-$(CONFIG_MPENTIUMIII)	+= -march=i686 $(call cc-option,-mtune=pentium3)
++cflags-$(CONFIG_MPENTIUMM)	+= -march=i686 $(call cc-option,-mtune=pentium3)
++cflags-$(CONFIG_MPENTIUM4)	+= -march=i686 $(call cc-option,-mtune=pentium4)
++cflags-$(CONFIG_MK6)		+= -march=k6
++# Please note, that patches that add -march=athlon-xp and friends are pointless.
++# They make zero difference whatsosever to performance at this time.
++cflags-$(CONFIG_MK7)		+= $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)
++cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4))
++cflags-$(CONFIG_MCRUSOE)	+= -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
++cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call cc-option,-mtune=pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
++cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
++cflags-$(CONFIG_MWINCHIP2)	+= $(call cc-option,-march=winchip2,-march=i586)
++cflags-$(CONFIG_MWINCHIP3D)	+= $(call cc-option,-march=winchip2,-march=i586)
++cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
++cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
++
++# AMD Elan support
++cflags-$(CONFIG_X86_ELAN)	+= -march=i486
++
++# Geode GX1 support
++cflags-$(CONFIG_MGEODEGX1)		+= $(call cc-option,-march=pentium-mmx,-march=i486)
++
++# -mregparm=3 works ok on gcc-3.0 and later
++#
++GCC_VERSION			:= $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
++cflags-$(CONFIG_REGPARM) 	+= $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
++
++# Disable unit-at-a-time mode, it makes gcc use a lot more stack
++# due to the lack of sharing of stacklots.
++CFLAGS += $(call cc-option,-fno-unit-at-a-time,)
++
++CFLAGS += $(cflags-y)
++
++head-y := arch/xen/i386/kernel/head.o arch/xen/i386/kernel/init_task.o
++
++libs-y 					+= arch/i386/lib/
++core-y					+= arch/xen/i386/kernel/ \
++					   arch/xen/i386/mm/ \
++					   arch/xen/i386/mach-default/ \
++					   arch/i386/crypto/
++# \
++#					   arch/xen/$(mcore-y)/
++drivers-$(CONFIG_MATH_EMULATION)	+= arch/i386/math-emu/
++drivers-$(CONFIG_PCI)			+= arch/xen/i386/pci/
++# must be linked after kernel/
++drivers-$(CONFIG_OPROFILE)		+= arch/i386/oprofile/
++drivers-$(CONFIG_PM)			+= arch/i386/power/
++
++# for clean
++obj-	+= kernel/ mm/ pci/
++#obj-	+= ../../i386/lib/ ../../i386/mm/ 
++#../../i386/$(mcore-y)/
++#obj-	+= ../../i386/pci/ ../../i386/oprofile/ ../../i386/power/
++
++xenflags-y += -Iinclude/asm-xen/asm-i386/mach-xen \
++		-Iinclude/asm-i386/mach-default
++CFLAGS += $(xenflags-y)
++AFLAGS += $(xenflags-y)
++
++prepare: include/asm-$(XENARCH)/asm_offsets.h
++CLEAN_FILES += include/asm-$(XENARCH)/asm_offsets.h
++
++arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
++	include/linux/version.h include/config/MARKER
++
++include/asm-$(XENARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
++	$(call filechk,gen-asm-offsets)
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/fault.c linux-2.6.12-xen/arch/xen/i386/mm/fault.c
+--- pristine-linux-2.6.12/arch/xen/i386/mm/fault.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/mm/fault.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,620 @@
++/*
++ *  linux/arch/i386/mm/fault.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h>		/* For unblank_screen() */
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/desc.h>
++#include <asm/kdebug.h>
++
++extern void die(const char *,struct pt_regs *,long);
++
++DEFINE_PER_CPU(pgd_t *, cur_pgd);
++
++/*
++ * Unlock any spinlocks which will prevent us from getting the
++ * message out 
++ */
++void bust_spinlocks(int yes)
++{
++	int loglevel_save = console_loglevel;
++
++	if (yes) {
++		oops_in_progress = 1;
++		return;
++	}
++#ifdef CONFIG_VT
++	unblank_screen();
++#endif
++	oops_in_progress = 0;
++	/*
++	 * OK, the message is on the console.  Now we call printk()
++	 * without oops_in_progress set so that printk will give klogd
++	 * a poke.  Hold onto your hats...
++	 */
++	console_loglevel = 15;		/* NMI oopser may have shut the console up */
++	printk(" ");
++	console_loglevel = loglevel_save;
++}
++
++/*
++ * Return EIP plus the CS segment base.  The segment limit is also
++ * adjusted, clamped to the kernel/user address space (whichever is
++ * appropriate), and returned in *eip_limit.
++ *
++ * The segment is checked, because it might have been changed by another
++ * task between the original faulting instruction and here.
++ *
++ * If CS is no longer a valid code segment, or if EIP is beyond the
++ * limit, or if it is a kernel address when CS is not a kernel segment,
++ * then the returned value will be greater than *eip_limit.
++ * 
++ * This is slow, but is very rarely executed.
++ */
++static inline unsigned long get_segment_eip(struct pt_regs *regs,
++					    unsigned long *eip_limit)
++{
++	unsigned long eip = regs->eip;
++	unsigned seg = regs->xcs & 0xffff;
++	u32 seg_ar, seg_limit, base, *desc;
++
++	/* The standard kernel/user address space limit. */
++	*eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
++
++	/* Unlikely, but must come before segment checks. */
++	if (unlikely((regs->eflags & VM_MASK) != 0))
++		return eip + (seg << 4);
++	
++	/* By far the most common cases. */
++	if (likely(seg == __USER_CS || seg == __KERNEL_CS))
++		return eip;
++
++	/* Check the segment exists, is within the current LDT/GDT size,
++	   that kernel/user (ring 0..3) has the appropriate privilege,
++	   that it's a code segment, and get the limit. */
++	__asm__ ("larl %3,%0; lsll %3,%1"
++		 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
++	if ((~seg_ar & 0x9800) || eip > seg_limit) {
++		*eip_limit = 0;
++		return 1;	 /* So that returned eip > *eip_limit. */
++	}
++
++	/* Get the GDT/LDT descriptor base. 
++	   When you look for races in this code remember that
++	   LDT and other horrors are only used in user space. */
++	if (seg & (1<<2)) {
++		/* Must lock the LDT while reading it. */
++		down(&current->mm->context.sem);
++		desc = current->mm->context.ldt;
++		desc = (void *)desc + (seg & ~7);
++	} else {
++		/* Must disable preemption while reading the GDT. */
++		desc = (u32 *)get_cpu_gdt_table(get_cpu());
++		desc = (void *)desc + (seg & ~7);
++	}
++
++	/* Decode the code segment base from the descriptor */
++	base = get_desc_base((unsigned long *)desc);
++
++	if (seg & (1<<2)) { 
++		up(&current->mm->context.sem);
++	} else
++		put_cpu();
++
++	/* Adjust EIP and segment limit, and clamp at the kernel limit.
++	   It's legitimate for segments to wrap at 0xffffffff. */
++	seg_limit += base;
++	if (seg_limit < *eip_limit && seg_limit >= base)
++		*eip_limit = seg_limit;
++	return eip + base;
++}
++
++/* 
++ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ */
++static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
++{ 
++	unsigned long limit;
++	unsigned long instr = get_segment_eip (regs, &limit);
++	int scan_more = 1;
++	int prefetch = 0; 
++	int i;
++
++	for (i = 0; scan_more && i < 15; i++) { 
++		unsigned char opcode;
++		unsigned char instr_hi;
++		unsigned char instr_lo;
++
++		if (instr > limit)
++			break;
++		if (__get_user(opcode, (unsigned char *) instr))
++			break; 
++
++		instr_hi = opcode & 0xf0; 
++		instr_lo = opcode & 0x0f; 
++		instr++;
++
++		switch (instr_hi) { 
++		case 0x20:
++		case 0x30:
++			/* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
++			scan_more = ((instr_lo & 7) == 0x6);
++			break;
++			
++		case 0x60:
++			/* 0x64 thru 0x67 are valid prefixes in all modes. */
++			scan_more = (instr_lo & 0xC) == 0x4;
++			break;		
++		case 0xF0:
++			/* 0xF0, 0xF2, and 0xF3 are valid prefixes */
++			scan_more = !instr_lo || (instr_lo>>1) == 1;
++			break;			
++		case 0x00:
++			/* Prefetch instruction is 0x0F0D or 0x0F18 */
++			scan_more = 0;
++			if (instr > limit)
++				break;
++			if (__get_user(opcode, (unsigned char *) instr)) 
++				break;
++			prefetch = (instr_lo == 0xF) &&
++				(opcode == 0x0D || opcode == 0x18);
++			break;			
++		default:
++			scan_more = 0;
++			break;
++		} 
++	}
++	return prefetch;
++}
++
++static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++			      unsigned long error_code)
++{
++	if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++		     boot_cpu_data.x86 >= 6)) {
++		/* Catch an obscure case of prefetch inside an NX page. */
++		if (nx_enabled && (error_code & 16))
++			return 0;
++		return __is_prefetch(regs, addr);
++	}
++	return 0;
++} 
++
++fastcall void do_invalid_op(struct pt_regs *, unsigned long);
++
++#ifdef CONFIG_X86_PAE
++static void dump_fault_path(unsigned long address)
++{
++	unsigned long *p, page;
++	unsigned long mfn; 
++
++	preempt_disable();
++	page = __pa(per_cpu(cur_pgd, smp_processor_id()));
++	preempt_enable();
++
++	p  = (unsigned long *)__va(page);
++	p += (address >> 30) * 2;
++	printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
++	if (p[0] & 1) {
++		mfn  = (p[0] >> PAGE_SHIFT) | ((p[1] & 0x7) << 20); 
++		page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
++		p  = (unsigned long *)__va(page);
++		address &= 0x3fffffff;
++		p += (address >> 21) * 2;
++		printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n", 
++		       page, p[1], p[0]);
++#ifndef CONFIG_HIGHPTE
++		if (p[0] & 1) {
++			mfn  = (p[0] >> PAGE_SHIFT) | ((p[1] & 0x7) << 20); 
++			page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
++			p  = (unsigned long *) __va(page);
++			address &= 0x001fffff;
++			p += (address >> 12) * 2;
++			printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
++			       page, p[1], p[0]);
++		}
++#endif
++	}
++}
++#else
++static void dump_fault_path(unsigned long address)
++{
++	unsigned long page;
++
++	preempt_disable();
++	page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
++	    [address >> 22];
++	preempt_enable();
++
++	page = ((unsigned long *) per_cpu(cur_pgd, get_cpu()))
++	    [address >> 22];
++	printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
++	       machine_to_phys(page));
++	/*
++	 * We must not directly access the pte in the highpte
++	 * case, the page table might be allocated in highmem.
++	 * And lets rather not kmap-atomic the pte, just in case
++	 * it's allocated already.
++	 */
++#ifndef CONFIG_HIGHPTE
++	if (page & 1) {
++		page &= PAGE_MASK;
++		address &= 0x003ff000;
++		page = machine_to_phys(page);
++		page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
++		printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
++		       machine_to_phys(page));
++	}
++#endif
++}
++#endif
++
++
++/*
++ * This routine handles page faults.  It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ *
++ * error_code:
++ *	bit 0 == 0 means no page found, 1 means protection fault
++ *	bit 1 == 0 means read, 1 means write
++ *	bit 2 == 0 means kernel, 1 means user-mode
++ */
++fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code)
++{
++	struct task_struct *tsk;
++	struct mm_struct *mm;
++	struct vm_area_struct * vma;
++	unsigned long address;
++	int write;
++	siginfo_t info;
++
++	address = HYPERVISOR_shared_info->vcpu_info[
++		smp_processor_id()].arch.cr2;
++
++	/* Set the "privileged fault" bit to something sane. */
++	error_code &= ~4;
++	error_code |= (regs->xcs & 2) << 1;
++	if (regs->eflags & X86_EFLAGS_VM)
++		error_code |= 4;
++
++	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++					SIGSEGV) == NOTIFY_STOP)
++		return;
++	/* It's safe to allow irq's after cr2 has been saved */
++	if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
++		local_irq_enable();
++
++	tsk = current;
++
++	info.si_code = SEGV_MAPERR;
++
++	/*
++	 * We fault-in kernel-space virtual memory on-demand. The
++	 * 'reference' page table is init_mm.pgd.
++	 *
++	 * NOTE! We MUST NOT take any locks for this case. We may
++	 * be in an interrupt or a critical region, and should
++	 * only copy the information from the master page table,
++	 * nothing more.
++	 *
++	 * This verifies that the fault happens in kernel space
++	 * (error_code & 4) == 0, and that the fault was not a
++	 * protection error (error_code & 1) == 0.
++	 */
++	if (unlikely(address >= TASK_SIZE)) { 
++		if (!(error_code & 5))
++			goto vmalloc_fault;
++		/* 
++		 * Don't take the mm semaphore here. If we fixup a prefetch
++		 * fault we could otherwise deadlock.
++		 */
++		goto bad_area_nosemaphore;
++	} 
++
++	mm = tsk->mm;
++
++	/*
++	 * If we're in an interrupt, have no user context or are running in an
++	 * atomic region then we must not take the fault..
++	 */
++	if (in_atomic() || !mm)
++		goto bad_area_nosemaphore;
++
++	/* When running in the kernel we expect faults to occur only to
++	 * addresses in user space.  All other faults represent errors in the
++	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
++	 * erroneous fault occuring in a code path which already holds mmap_sem
++	 * we will deadlock attempting to validate the fault against the
++	 * address space.  Luckily the kernel only validly references user
++	 * space from well defined areas of code, which are listed in the
++	 * exceptions table.
++	 *
++	 * As the vast majority of faults will be valid we will only perform
++	 * the source reference check when there is a possibilty of a deadlock.
++	 * Attempt to lock the address space, if we cannot we then validate the
++	 * source.  If this is invalid we can skip the address space check,
++	 * thus avoiding the deadlock.
++	 */
++	if (!down_read_trylock(&mm->mmap_sem)) {
++		if ((error_code & 4) == 0 &&
++		    !search_exception_tables(regs->eip))
++			goto bad_area_nosemaphore;
++		down_read(&mm->mmap_sem);
++	}
++
++	vma = find_vma(mm, address);
++	if (!vma)
++		goto bad_area;
++	if (vma->vm_start <= address)
++		goto good_area;
++	if (!(vma->vm_flags & VM_GROWSDOWN))
++		goto bad_area;
++	if (error_code & 4) {
++		/*
++		 * accessing the stack below %esp is always a bug.
++		 * The "+ 32" is there due to some instructions (like
++		 * pusha) doing post-decrement on the stack and that
++		 * doesn't show up until later..
++		 */
++		if (address + 32 < regs->esp)
++			goto bad_area;
++	}
++	if (expand_stack(vma, address))
++		goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++	info.si_code = SEGV_ACCERR;
++	write = 0;
++	switch (error_code & 3) {
++		default:	/* 3: write, present */
++#ifdef TEST_VERIFY_AREA
++			if (regs->cs == KERNEL_CS)
++				printk("WP fault at %08lx\n", regs->eip);
++#endif
++			/* fall through */
++		case 2:		/* write, not present */
++			if (!(vma->vm_flags & VM_WRITE))
++				goto bad_area;
++			write++;
++			break;
++		case 1:		/* read, present */
++			goto bad_area;
++		case 0:		/* read, not present */
++			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++				goto bad_area;
++	}
++
++ survive:
++	/*
++	 * If for any reason at all we couldn't handle the fault,
++	 * make sure we exit gracefully rather than endlessly redo
++	 * the fault.
++	 */
++	switch (handle_mm_fault(mm, vma, address, write)) {
++		case VM_FAULT_MINOR:
++			tsk->min_flt++;
++			break;
++		case VM_FAULT_MAJOR:
++			tsk->maj_flt++;
++			break;
++		case VM_FAULT_SIGBUS:
++			goto do_sigbus;
++		case VM_FAULT_OOM:
++			goto out_of_memory;
++		default:
++			BUG();
++	}
++
++	/*
++	 * Did it hit the DOS screen memory VA from vm86 mode?
++	 */
++	if (regs->eflags & VM_MASK) {
++		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
++		if (bit < 32)
++			tsk->thread.screen_bitmap |= 1 << bit;
++	}
++	up_read(&mm->mmap_sem);
++	return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++	up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++	/* User mode accesses just cause a SIGSEGV */
++	if (error_code & 4) {
++		/* 
++		 * Valid to do another page fault here because this one came 
++		 * from user space.
++		 */
++		if (is_prefetch(regs, address, error_code))
++			return;
++
++		tsk->thread.cr2 = address;
++		/* Kernel addresses are always protection faults */
++		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++		tsk->thread.trap_no = 14;
++		info.si_signo = SIGSEGV;
++		info.si_errno = 0;
++		/* info.si_code has been set above */
++		info.si_addr = (void __user *)address;
++		force_sig_info(SIGSEGV, &info, tsk);
++		return;
++	}
++
++#ifdef CONFIG_X86_F00F_BUG
++	/*
++	 * Pentium F0 0F C7 C8 bug workaround.
++	 */
++	if (boot_cpu_data.f00f_bug) {
++		unsigned long nr;
++		
++		nr = (address - idt_descr.address) >> 3;
++
++		if (nr == 6) {
++			do_invalid_op(regs, 0);
++			return;
++		}
++	}
++#endif
++
++no_context:
++	/* Are we prepared to handle this kernel fault?  */
++	if (fixup_exception(regs))
++		return;
++
++	/* 
++	 * Valid to do another page fault here, because if this fault
++	 * had been triggered by is_prefetch fixup_exception would have 
++	 * handled it.
++	 */
++ 	if (is_prefetch(regs, address, error_code))
++ 		return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++	bust_spinlocks(1);
++
++#ifdef CONFIG_X86_PAE
++	if (error_code & 16) {
++		pte_t *pte = lookup_address(address);
++
++		if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
++			printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
++	}
++#endif
++	if (address < PAGE_SIZE)
++		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++	else
++		printk(KERN_ALERT "Unable to handle kernel paging request");
++	printk(" at virtual address %08lx\n",address);
++	printk(KERN_ALERT " printing eip:\n");
++	printk("%08lx\n", regs->eip);
++	dump_fault_path(address);
++	die("Oops", regs, error_code);
++	bust_spinlocks(0);
++	do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++	up_read(&mm->mmap_sem);
++	if (tsk->pid == 1) {
++		yield();
++		down_read(&mm->mmap_sem);
++		goto survive;
++	}
++	printk("VM: killing process %s\n", tsk->comm);
++	if (error_code & 4)
++		do_exit(SIGKILL);
++	goto no_context;
++
++do_sigbus:
++	up_read(&mm->mmap_sem);
++
++	/* Kernel mode? Handle exceptions or die */
++	if (!(error_code & 4))
++		goto no_context;
++
++	/* User space => ok to do another page fault */
++	if (is_prefetch(regs, address, error_code))
++		return;
++
++	tsk->thread.cr2 = address;
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = 14;
++	info.si_signo = SIGBUS;
++	info.si_errno = 0;
++	info.si_code = BUS_ADRERR;
++	info.si_addr = (void __user *)address;
++	force_sig_info(SIGBUS, &info, tsk);
++	return;
++
++vmalloc_fault:
++	{
++		/*
++		 * Synchronize this task's top level page-table
++		 * with the 'reference' page table.
++		 *
++		 * Do _not_ use "tsk" here. We might be inside
++		 * an interrupt in the middle of a task switch..
++		 */
++		int index = pgd_index(address);
++		pgd_t *pgd, *pgd_k;
++		pud_t *pud, *pud_k;
++		pmd_t *pmd, *pmd_k;
++		pte_t *pte_k;
++
++		preempt_disable();
++		pgd = index + per_cpu(cur_pgd, smp_processor_id());
++		preempt_enable();
++		pgd_k = init_mm.pgd + index;
++
++		if (!pgd_present(*pgd_k))
++			goto no_context;
++
++		/*
++		 * set_pgd(pgd, *pgd_k); here would be useless on PAE
++		 * and redundant with the set_pmd() on non-PAE. As would
++		 * set_pud.
++		 */
++
++		pud = pud_offset(pgd, address);
++		pud_k = pud_offset(pgd_k, address);
++		if (!pud_present(*pud_k))
++			goto no_context;
++		
++		pmd = pmd_offset(pud, address);
++		pmd_k = pmd_offset(pud_k, address);
++		if (!pmd_present(*pmd_k))
++			goto no_context;
++#ifndef CONFIG_XEN
++		set_pmd(pmd, *pmd_k);
++#else
++		/*
++		 * When running on Xen we must launder *pmd_k through
++		 * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
++		 */
++		set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
++#endif
++
++		pte_k = pte_offset_kernel(pmd_k, address);
++		if (!pte_present(*pte_k))
++			goto no_context;
++		return;
++	}
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/highmem.c linux-2.6.12-xen/arch/xen/i386/mm/highmem.c
+--- pristine-linux-2.6.12/arch/xen/i386/mm/highmem.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/mm/highmem.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,99 @@
++#include <linux/highmem.h>
++
++void *kmap(struct page *page)
++{
++	might_sleep();
++	if (!PageHighMem(page))
++		return page_address(page);
++	return kmap_high(page);
++}
++
++void kunmap(struct page *page)
++{
++	if (in_interrupt())
++		BUG();
++	if (!PageHighMem(page))
++		return;
++	kunmap_high(page);
++}
++
++/*
++ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
++ * no global lock is needed and because the kmap code must perform a global TLB
++ * invalidation when the kmap pool wraps.
++ *
++ * However when holding an atomic kmap is is not legal to sleep, so atomic
++ * kmaps are appropriate for short, tight code paths only.
++ */
++static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
++{
++	enum fixed_addresses idx;
++	unsigned long vaddr;
++
++	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
++	inc_preempt_count();
++	if (!PageHighMem(page))
++		return page_address(page);
++
++	idx = type + KM_TYPE_NR*smp_processor_id();
++	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++#ifdef CONFIG_DEBUG_HIGHMEM
++	if (!pte_none(*(kmap_pte-idx)))
++		BUG();
++#endif
++	set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++
++	return (void*) vaddr;
++}
++
++void *kmap_atomic(struct page *page, enum km_type type)
++{
++	return __kmap_atomic(page, type, kmap_prot);
++}
++
++/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
++void *kmap_atomic_pte(struct page *page, enum km_type type)
++{
++	return __kmap_atomic(page, type, PAGE_KERNEL_RO);
++}
++
++void kunmap_atomic(void *kvaddr, enum km_type type)
++{
++#ifdef CONFIG_DEBUG_HIGHMEM
++	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
++	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
++
++	if (vaddr < FIXADDR_START) { // FIXME
++		dec_preempt_count();
++		preempt_check_resched();
++		return;
++	}
++
++	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
++		BUG();
++
++	/*
++	 * force other mappings to Oops if they'll try to access
++	 * this pte without first remap it
++	 */
++	pte_clear(&init_mm, vaddr, kmap_pte-idx);
++	__flush_tlb_one(vaddr);
++#endif
++
++	dec_preempt_count();
++	preempt_check_resched();
++}
++
++struct page *kmap_atomic_to_page(void *ptr)
++{
++	unsigned long idx, vaddr = (unsigned long)ptr;
++	pte_t *pte;
++
++	if (vaddr < FIXADDR_START)
++		return virt_to_page(ptr);
++
++	idx = virt_to_fix(vaddr);
++	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
++	return pte_page(*pte);
++}
++
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/hypervisor.c linux-2.6.12-xen/arch/xen/i386/mm/hypervisor.c
+--- pristine-linux-2.6.12/arch/xen/i386/mm/hypervisor.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/mm/hypervisor.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,459 @@
++/******************************************************************************
++ * mm/hypervisor.c
++ * 
++ * Update page tables via the hypervisor.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/config.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/balloon.h>
++#include <asm-xen/xen-public/memory.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <asm/tlbflush.h>
++
++#ifdef CONFIG_X86_64
++#define pmd_val_ma(v) (v).pmd
++#else
++#ifdef CONFIG_X86_PAE
++# define pmd_val_ma(v) ((v).pmd)
++# define pud_val_ma(v) ((v).pgd.pgd)
++#else
++# define pmd_val_ma(v) ((v).pud.pgd.pgd)
++#endif
++#endif
++
++#ifndef CONFIG_XEN_SHADOW_MODE
++void xen_l1_entry_update(pte_t *ptr, pte_t val)
++{
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = pte_val_ma(val);
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
++{
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = pmd_val_ma(val);
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++#ifdef CONFIG_X86_PAE
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = pud_val_ma(val);
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif
++
++#ifdef CONFIG_X86_64
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = val.pud;
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
++{
++	mmu_update_t u;
++	u.ptr = virt_to_machine(ptr);
++	u.val = val.pgd;
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif /* CONFIG_X86_64 */
++#endif /* CONFIG_XEN_SHADOW_MODE */
++
++void xen_machphys_update(unsigned long mfn, unsigned long pfn)
++{
++	mmu_update_t u;
++	u.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
++	u.val = pfn;
++	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pt_switch(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_NEW_BASEPTR;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_new_user_pt(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_NEW_USER_BASEPTR;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush(void)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_INVLPG_LOCAL;
++	op.arg1.linear_addr = ptr & PAGE_MASK;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++#ifdef CONFIG_SMP
++
++void xen_tlb_flush_all(void)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_TLB_FLUSH_ALL;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush_mask(cpumask_t *mask)
++{
++	struct mmuext_op op;
++	if ( cpus_empty(*mask) )
++		return;
++	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
++	op.arg2.vcpumask = mask->bits;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_all(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_INVLPG_ALL;
++	op.arg1.linear_addr = ptr & PAGE_MASK;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
++{
++	struct mmuext_op op;
++	if ( cpus_empty(*mask) )
++		return;
++	op.cmd = MMUEXT_INVLPG_MULTI;
++	op.arg1.linear_addr = ptr & PAGE_MASK;
++	op.arg2.vcpumask    = mask->bits;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++#endif /* CONFIG_SMP */
++
++#ifndef CONFIG_XEN_SHADOW_MODE
++void xen_pgd_pin(unsigned long ptr)
++{
++	struct mmuext_op op;
++#ifdef CONFIG_X86_64
++	op.cmd = MMUEXT_PIN_L4_TABLE;
++#elif defined(CONFIG_X86_PAE)
++	op.cmd = MMUEXT_PIN_L3_TABLE;
++#else
++	op.cmd = MMUEXT_PIN_L2_TABLE;
++#endif
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pgd_unpin(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_UNPIN_TABLE;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pte_pin(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_PIN_L1_TABLE;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pte_unpin(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_UNPIN_TABLE;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++#ifdef CONFIG_X86_64
++void xen_pud_pin(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_PIN_L3_TABLE;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pud_unpin(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_UNPIN_TABLE;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pmd_pin(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_PIN_L2_TABLE;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pmd_unpin(unsigned long ptr)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_UNPIN_TABLE;
++	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++#endif /* CONFIG_X86_64 */
++#endif /* CONFIG_XEN_SHADOW_MODE */
++
++void xen_set_ldt(unsigned long ptr, unsigned long len)
++{
++	struct mmuext_op op;
++	op.cmd = MMUEXT_SET_LDT;
++	op.arg1.linear_addr = ptr;
++	op.arg2.nr_ents     = len;
++	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++/*
++ * Bitmap is indexed by page number. If bit is set, the page is part of a
++ * xen_create_contiguous_region() area of memory.
++ */
++unsigned long *contiguous_bitmap;
++
++static void contiguous_bitmap_set(
++	unsigned long first_page, unsigned long nr_pages)
++{
++	unsigned long start_off, end_off, curr_idx, end_idx;
++
++	curr_idx  = first_page / BITS_PER_LONG;
++	start_off = first_page & (BITS_PER_LONG-1);
++	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
++	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++	if (curr_idx == end_idx) {
++		contiguous_bitmap[curr_idx] |=
++			((1UL<<end_off)-1) & -(1UL<<start_off);
++	} else {
++		contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
++		while ( ++curr_idx < end_idx )
++			contiguous_bitmap[curr_idx] = ~0UL;
++		contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
++	}
++}
++
++static void contiguous_bitmap_clear(
++	unsigned long first_page, unsigned long nr_pages)
++{
++	unsigned long start_off, end_off, curr_idx, end_idx;
++
++	curr_idx  = first_page / BITS_PER_LONG;
++	start_off = first_page & (BITS_PER_LONG-1);
++	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
++	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++	if (curr_idx == end_idx) {
++		contiguous_bitmap[curr_idx] &=
++			-(1UL<<end_off) | ((1UL<<start_off)-1);
++	} else {
++		contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
++		while ( ++curr_idx != end_idx )
++			contiguous_bitmap[curr_idx] = 0;
++		contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
++	}
++}
++
++/* Ensure multi-page extents are contiguous in machine memory. */
++int xen_create_contiguous_region(
++	unsigned long vstart, unsigned int order, unsigned int address_bits)
++{
++	pgd_t         *pgd; 
++	pud_t         *pud; 
++	pmd_t         *pmd;
++	pte_t         *pte;
++	unsigned long  frame, i, flags;
++	struct xen_memory_reservation reservation = {
++		.extent_start = &frame,
++		.nr_extents   = 1,
++		.extent_order = 0,
++		.domid        = DOMID_SELF
++	};
++
++	scrub_pages(vstart, 1 << order);
++
++	balloon_lock(flags);
++
++	/* 1. Zap current PTEs, giving away the underlying pages. */
++	for (i = 0; i < (1<<order); i++) {
++		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
++		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
++		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
++		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
++		frame = pte_mfn(*pte);
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++			INVALID_P2M_ENTRY);
++		BUG_ON(HYPERVISOR_memory_op(
++			XENMEM_decrease_reservation, &reservation) != 1);
++	}
++
++	/* 2. Get a new contiguous memory extent. */
++	reservation.extent_order = order;
++	reservation.address_bits = address_bits;
++	frame = __pa(vstart) >> PAGE_SHIFT;
++	if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++				 &reservation) != 1)
++		goto fail;
++
++	/* 3. Map the new extent in place of old pages. */
++	for (i = 0; i < (1<<order); i++) {
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			vstart + (i*PAGE_SIZE),
++			pfn_pte_ma(frame+i, PAGE_KERNEL), 0));
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame+i);
++	}
++
++	flush_tlb_all();
++
++	contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
++
++	balloon_unlock(flags);
++
++	return 0;
++
++ fail:
++	reservation.extent_order = 0;
++	reservation.address_bits = 0;
++
++	for (i = 0; i < (1<<order); i++) {
++		frame = (__pa(vstart) >> PAGE_SHIFT) + i;
++		BUG_ON(HYPERVISOR_memory_op(
++			XENMEM_populate_physmap, &reservation) != 1);
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			vstart + (i*PAGE_SIZE),
++			pfn_pte_ma(frame, PAGE_KERNEL), 0));
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++	}
++
++	flush_tlb_all();
++
++	balloon_unlock(flags);
++
++	return -ENOMEM;
++}
++
++void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++	pgd_t         *pgd; 
++	pud_t         *pud; 
++	pmd_t         *pmd;
++	pte_t         *pte;
++	unsigned long  frame, i, flags;
++	struct xen_memory_reservation reservation = {
++		.extent_start = &frame,
++		.nr_extents   = 1,
++		.extent_order = 0,
++		.domid        = DOMID_SELF
++	};
++
++	scrub_pages(vstart, 1 << order);
++
++	balloon_lock(flags);
++
++	contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
++
++	/* 1. Zap current PTEs, giving away the underlying pages. */
++	for (i = 0; i < (1<<order); i++) {
++		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
++		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
++		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
++		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
++		frame = pte_mfn(*pte);
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++			INVALID_P2M_ENTRY);
++		BUG_ON(HYPERVISOR_memory_op(
++			XENMEM_decrease_reservation, &reservation) != 1);
++	}
++
++	/* 2. Map new pages in place of old pages. */
++	for (i = 0; i < (1<<order); i++) {
++		frame = (__pa(vstart) >> PAGE_SHIFT) + i;
++		BUG_ON(HYPERVISOR_memory_op(
++			XENMEM_populate_physmap, &reservation) != 1);
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			vstart + (i*PAGE_SIZE),
++			pfn_pte_ma(frame, PAGE_KERNEL), 0));
++		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++	}
++
++	flush_tlb_all();
++
++	balloon_unlock(flags);
++}
++
++#ifdef __i386__
++int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
++{
++	__u32 *lp = (__u32 *)((char *)ldt + entry * 8);
++	maddr_t mach_lp = arbitrary_virt_to_machine(lp);
++	return HYPERVISOR_update_descriptor(
++		mach_lp, (u64)entry_a | ((u64)entry_b<<32));
++}
++#endif
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/init.c linux-2.6.12-xen/arch/xen/i386/mm/init.c
+--- pristine-linux-2.6.12/arch/xen/i386/mm/init.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/mm/init.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,780 @@
++/*
++ *  linux/arch/i386/mm/init.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/hugetlb.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/slab.h>
++#include <linux/proc_fs.h>
++#include <linux/efi.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/sections.h>
++#include <asm/hypervisor.h>
++
++extern unsigned long *contiguous_bitmap;
++
++#if defined(CONFIG_SWIOTLB)
++extern void swiotlb_init(void);
++int swiotlb;
++EXPORT_SYMBOL(swiotlb);
++#endif
++
++unsigned int __VMALLOC_RESERVE = 128 << 20;
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++unsigned long highstart_pfn, highend_pfn;
++
++static int noinline do_test_wp_bit(void);
++
++/*
++ * Creates a middle page table and puts a pointer to it in the
++ * given global directory entry. This only returns the gd entry
++ * in non-PAE compilation mode, since the middle layer is folded.
++ */
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++	pud_t *pud;
++	pmd_t *pmd_table;
++		
++#ifdef CONFIG_X86_PAE
++	pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++	make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
++	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
++	pud = pud_offset(pgd, 0);
++	if (pmd_table != pmd_offset(pud, 0)) 
++		BUG();
++#else
++	pud = pud_offset(pgd, 0);
++	pmd_table = pmd_offset(pud, 0);
++#endif
++
++	return pmd_table;
++}
++
++/*
++ * Create a page table and place a pointer to it in a middle page
++ * directory entry.
++ */
++static pte_t * __init one_page_table_init(pmd_t *pmd)
++{
++	if (pmd_none(*pmd)) {
++		pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++		make_lowmem_page_readonly(page_table,
++					  XENFEAT_writable_page_tables);
++		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++		if (page_table != pte_offset_kernel(pmd, 0))
++			BUG();	
++
++		return page_table;
++	}
++	
++	return pte_offset_kernel(pmd, 0);
++}
++
++/*
++ * This function initializes a certain range of kernel virtual memory 
++ * with new bootmem page tables, everywhere page tables are missing in
++ * the given range.
++ */
++
++/*
++ * NOTE: The pagetables are allocated contiguous on the physical space 
++ * so we can cache the place of the first one and move around without 
++ * checking the pgd every time.
++ */
++static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	int pgd_idx, pmd_idx;
++	unsigned long vaddr;
++
++	vaddr = start;
++	pgd_idx = pgd_index(vaddr);
++	pmd_idx = pmd_index(vaddr);
++	pgd = pgd_base + pgd_idx;
++
++	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
++		if (pgd_none(*pgd)) 
++			one_md_table_init(pgd);
++		pud = pud_offset(pgd, vaddr);
++		pmd = pmd_offset(pud, vaddr);
++		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
++			if (vaddr < HYPERVISOR_VIRT_START && pmd_none(*pmd)) 
++				one_page_table_init(pmd);
++
++			vaddr += PMD_SIZE;
++		}
++		pmd_idx = 0;
++	}
++}
++
++static inline int is_kernel_text(unsigned long addr)
++{
++	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
++		return 1;
++	return 0;
++}
++
++/*
++ * This maps the physical memory to kernel virtual address space, a total 
++ * of max_low_pfn pages, by creating page tables starting from address 
++ * PAGE_OFFSET.
++ */
++static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
++{
++	unsigned long pfn;
++	pgd_t *pgd;
++	pmd_t *pmd;
++	pte_t *pte;
++	int pgd_idx, pmd_idx, pte_ofs;
++
++	unsigned long max_ram_pfn = xen_start_info->nr_pages;
++	if (max_ram_pfn > max_low_pfn)
++		max_ram_pfn = max_low_pfn;
++
++	pgd_idx = pgd_index(PAGE_OFFSET);
++	pgd = pgd_base + pgd_idx;
++	pfn = 0;
++	pmd_idx = pmd_index(PAGE_OFFSET);
++	pte_ofs = pte_index(PAGE_OFFSET);
++
++	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
++#ifdef CONFIG_XEN
++		/*
++		 * Native linux hasn't PAE-paging enabled yet at this
++		 * point.  When running as xen domain we are in PAE
++		 * mode already, thus we can't simply hook a empty
++		 * pmd.  That would kill the mappings we are currently
++		 * using ...
++		 */
++		pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
++#else
++		pmd = one_md_table_init(pgd);
++#endif
++		if (pfn >= max_low_pfn)
++			continue;
++		pmd += pmd_idx;
++		for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
++			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
++			if (address >= HYPERVISOR_VIRT_START)
++				continue;
++
++			/* Map with big pages if possible, otherwise create normal page tables. */
++			if (cpu_has_pse) {
++				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
++
++				if (is_kernel_text(address) || is_kernel_text(address2))
++					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
++				else
++					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
++				pfn += PTRS_PER_PTE;
++			} else {
++				pte = one_page_table_init(pmd);
++
++				pte += pte_ofs;
++				for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
++						/* XEN: Only map initial RAM allocation. */
++						if ((pfn >= max_ram_pfn) || pte_present(*pte))
++							continue;
++						if (is_kernel_text(address))
++							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
++						else
++							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
++				}
++				pte_ofs = 0;
++			}
++		}
++		pmd_idx = 0;
++	}
++}
++
++#ifndef CONFIG_XEN
++
++static inline int page_kills_ppro(unsigned long pagenr)
++{
++	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
++		return 1;
++	return 0;
++}
++
++extern int is_available_memory(efi_memory_desc_t *);
++
++static inline int page_is_ram(unsigned long pagenr)
++{
++	int i;
++	unsigned long addr, end;
++
++	if (efi_enabled) {
++		efi_memory_desc_t *md;
++
++		for (i = 0; i < memmap.nr_map; i++) {
++			md = &memmap.map[i];
++			if (!is_available_memory(md))
++				continue;
++			addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++			end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
++
++			if ((pagenr >= addr) && (pagenr < end))
++				return 1;
++		}
++		return 0;
++	}
++
++	for (i = 0; i < e820.nr_map; i++) {
++
++		if (e820.map[i].type != E820_RAM)	/* not usable memory */
++			continue;
++		/*
++		 *	!!!FIXME!!! Some BIOSen report areas as RAM that
++		 *	are not. Notably the 640->1Mb area. We need a sanity
++		 *	check here.
++		 */
++		addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++		end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
++		if  ((pagenr >= addr) && (pagenr < end))
++			return 1;
++	}
++	return 0;
++}
++
++#else /* CONFIG_XEN */
++
++#define page_kills_ppro(p)	0
++#define page_is_ram(p)		1
++
++#endif
++
++#ifdef CONFIG_HIGHMEM
++pte_t *kmap_pte;
++pgprot_t kmap_prot;
++
++#define kmap_get_fixmap_pte(vaddr)					\
++	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++
++static void __init kmap_init(void)
++{
++	unsigned long kmap_vstart;
++
++	/* cache the first kmap pte */
++	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
++	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
++
++	kmap_prot = PAGE_KERNEL;
++}
++
++static void __init permanent_kmaps_init(pgd_t *pgd_base)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	unsigned long vaddr;
++
++	vaddr = PKMAP_BASE;
++	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
++
++	pgd = swapper_pg_dir + pgd_index(vaddr);
++	pud = pud_offset(pgd, vaddr);
++	pmd = pmd_offset(pud, vaddr);
++	pte = pte_offset_kernel(pmd, vaddr);
++	pkmap_page_table = pte;	
++}
++
++void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
++{
++	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
++		ClearPageReserved(page);
++		set_bit(PG_highmem, &page->flags);
++		set_page_count(page, 1);
++		if (pfn < xen_start_info->nr_pages)
++			__free_page(page);
++		totalhigh_pages++;
++	} else
++		SetPageReserved(page);
++}
++
++#ifndef CONFIG_DISCONTIGMEM
++static void __init set_highmem_pages_init(int bad_ppro)
++{
++	int pfn;
++	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
++		one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
++	totalram_pages += totalhigh_pages;
++}
++#else
++extern void set_highmem_pages_init(int);
++#endif /* !CONFIG_DISCONTIGMEM */
++
++#else
++#define kmap_init() do { } while (0)
++#define permanent_kmaps_init(pgd_base) do { } while (0)
++#define set_highmem_pages_init(bad_ppro) do { } while (0)
++#endif /* CONFIG_HIGHMEM */
++
++unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
++unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
++
++#ifndef CONFIG_DISCONTIGMEM
++#define remap_numa_kva() do {} while (0)
++#else
++extern void __init remap_numa_kva(void);
++#endif
++
++pgd_t *swapper_pg_dir;
++
++static void __init pagetable_init (void)
++{
++	unsigned long vaddr;
++	pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
++	int i;
++
++	swapper_pg_dir = pgd_base;
++	init_mm.pgd    = pgd_base;
++	for (i = 0; i < NR_CPUS; i++)
++		per_cpu(cur_pgd, i) = pgd_base;
++
++	/* Enable PSE if available */
++	if (cpu_has_pse) {
++		set_in_cr4(X86_CR4_PSE);
++	}
++
++	/* Enable PGE if available */
++	if (cpu_has_pge) {
++		set_in_cr4(X86_CR4_PGE);
++		__PAGE_KERNEL |= _PAGE_GLOBAL;
++		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
++	}
++
++	kernel_physical_mapping_init(pgd_base);
++	remap_numa_kva();
++
++	/*
++	 * Fixed mappings, only the page table structure has to be
++	 * created - mappings will be set by set_fixmap():
++	 */
++	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
++	page_table_range_init(vaddr, 0, pgd_base);
++
++	permanent_kmaps_init(pgd_base);
++}
++
++#if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
++/*
++ * Swap suspend & friends need this for resume because things like the intel-agp
++ * driver might have split up a kernel 4MB mapping.
++ */
++char __nosavedata swsusp_pg_dir[PAGE_SIZE]
++	__attribute__ ((aligned (PAGE_SIZE)));
++
++static inline void save_pg_dir(void)
++{
++	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++}
++#else
++static inline void save_pg_dir(void)
++{
++}
++#endif
++
++void zap_low_mappings (void)
++{
++	int i;
++
++	save_pg_dir();
++
++	/*
++	 * Zap initial low-memory mappings.
++	 *
++	 * Note that "pgd_clear()" doesn't do it for
++	 * us, because pgd_clear() is a no-op on i386.
++	 */
++	for (i = 0; i < USER_PTRS_PER_PGD; i++)
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
++#else
++		set_pgd(swapper_pg_dir+i, __pgd(0));
++#endif
++	flush_tlb_all();
++}
++
++static int disable_nx __initdata = 0;
++u64 __supported_pte_mask = ~_PAGE_NX;
++
++/*
++ * noexec = on|off
++ *
++ * Control non executable mappings.
++ *
++ * on      Enable
++ * off     Disable
++ */
++void __init noexec_setup(const char *str)
++{
++	if (!strncmp(str, "on",2) && cpu_has_nx) {
++		__supported_pte_mask |= _PAGE_NX;
++		disable_nx = 0;
++	} else if (!strncmp(str,"off",3)) {
++		disable_nx = 1;
++		__supported_pte_mask &= ~_PAGE_NX;
++	}
++}
++
++int nx_enabled = 0;
++#ifdef CONFIG_X86_PAE
++
++static void __init set_nx(void)
++{
++	unsigned int v[4], l, h;
++
++	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
++		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++		if ((v[3] & (1 << 20)) && !disable_nx) {
++			rdmsr(MSR_EFER, l, h);
++			l |= EFER_NX;
++			wrmsr(MSR_EFER, l, h);
++			nx_enabled = 1;
++			__supported_pte_mask |= _PAGE_NX;
++		}
++	}
++}
++
++/*
++ * Enables/disables executability of a given kernel page and
++ * returns the previous setting.
++ */
++int __init set_kernel_exec(unsigned long vaddr, int enable)
++{
++	pte_t *pte;
++	int ret = 1;
++
++	if (!nx_enabled)
++		goto out;
++
++	pte = lookup_address(vaddr);
++	BUG_ON(!pte);
++
++	if (!pte_exec_kernel(*pte))
++		ret = 0;
++
++	if (enable)
++		pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
++	else
++		pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
++	__flush_tlb_all();
++out:
++	return ret;
++}
++
++#endif
++
++/*
++ * paging_init() sets up the page tables - note that the first 8MB are
++ * already mapped by head.S.
++ *
++ * This routines also unmaps the page at virtual kernel address 0, so
++ * that we can trap those pesky NULL-reference errors in the kernel.
++ */
++void __init paging_init(void)
++{
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++	int i;
++#endif
++
++#ifdef CONFIG_X86_PAE
++	set_nx();
++	if (nx_enabled)
++		printk("NX (Execute Disable) protection: active\n");
++#endif
++
++	pagetable_init();
++
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++	/*
++	 * We will bail out later - printk doesn't work right now so
++	 * the user would just see a hanging kernel.
++	 * when running as xen domain we are already in PAE mode at
++	 * this point.
++	 */
++	if (cpu_has_pae)
++		set_in_cr4(X86_CR4_PAE);
++#endif
++	__flush_tlb_all();
++
++	kmap_init();
++
++	/* Switch to the real shared_info page, and clear the dummy page. */
++	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++	memset(empty_zero_page, 0, sizeof(empty_zero_page));
++
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++	/* Setup mapping of lower 1st MB */
++	for (i = 0; i < NR_FIX_ISAMAPS; i++)
++		if (xen_start_info->flags & SIF_PRIVILEGED)
++			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++		else
++			__set_fixmap(FIX_ISAMAP_BEGIN - i,
++				     virt_to_machine(empty_zero_page),
++				     PAGE_KERNEL_RO);
++#endif
++}
++
++/*
++ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
++ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
++ * used to involve black magic jumps to work around some nasty CPU bugs,
++ * but fortunately the switch to using exceptions got rid of all that.
++ */
++
++static void __init test_wp_bit(void)
++{
++	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
++
++	/* Any page-aligned address will do, the test is non-destructive */
++	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
++	boot_cpu_data.wp_works_ok = do_test_wp_bit();
++	clear_fixmap(FIX_WP_TEST);
++
++	if (!boot_cpu_data.wp_works_ok) {
++		printk("No.\n");
++#ifdef CONFIG_X86_WP_WORKS_OK
++		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
++#endif
++	} else {
++		printk("Ok.\n");
++	}
++}
++
++static void __init set_max_mapnr_init(void)
++{
++#ifdef CONFIG_HIGHMEM
++	num_physpages = highend_pfn;
++#else
++	num_physpages = max_low_pfn;
++#endif
++#ifndef CONFIG_DISCONTIGMEM
++	max_mapnr = num_physpages;
++#endif
++}
++
++static struct kcore_list kcore_mem, kcore_vmalloc; 
++
++void __init mem_init(void)
++{
++	extern int ppro_with_ram_bug(void);
++	int codesize, reservedpages, datasize, initsize;
++	int tmp;
++	int bad_ppro;
++	unsigned long pfn;
++
++	contiguous_bitmap = alloc_bootmem_low_pages(
++		(max_low_pfn + 2*BITS_PER_LONG) >> 3);
++	BUG_ON(!contiguous_bitmap);
++	memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
++
++#if defined(CONFIG_SWIOTLB)
++	swiotlb_init();	
++#endif
++
++#ifndef CONFIG_DISCONTIGMEM
++	if (!mem_map)
++		BUG();
++#endif
++	
++	bad_ppro = ppro_with_ram_bug();
++
++#ifdef CONFIG_HIGHMEM
++	/* check that fixmap and pkmap do not overlap */
++	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
++		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
++		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
++				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
++		BUG();
++	}
++#endif
++ 
++	set_max_mapnr_init();
++
++#ifdef CONFIG_HIGHMEM
++	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
++#else
++	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
++#endif
++	printk("vmalloc area: %lx-%lx, maxmem %lx\n",
++	       VMALLOC_START,VMALLOC_END,MAXMEM);
++	BUG_ON(VMALLOC_START > VMALLOC_END);
++	
++	/* this will put all low memory onto the freelists */
++	totalram_pages += free_all_bootmem();
++	/* XEN: init and count low-mem pages outside initial allocation. */
++	for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
++		ClearPageReserved(&mem_map[pfn]);
++		set_page_count(&mem_map[pfn], 1);
++		totalram_pages++;
++	}
++
++	reservedpages = 0;
++	for (tmp = 0; tmp < max_low_pfn; tmp++)
++		/*
++		 * Only count reserved RAM pages
++		 */
++		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
++			reservedpages++;
++
++	set_highmem_pages_init(bad_ppro);
++
++	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
++	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
++	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
++	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
++		   VMALLOC_END-VMALLOC_START);
++
++	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
++		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++		num_physpages << (PAGE_SHIFT-10),
++		codesize >> 10,
++		reservedpages << (PAGE_SHIFT-10),
++		datasize >> 10,
++		initsize >> 10,
++		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
++	       );
++
++#ifdef CONFIG_X86_PAE
++	if (!cpu_has_pae)
++		panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
++#endif
++	if (boot_cpu_data.wp_works_ok < 0)
++		test_wp_bit();
++
++	/*
++	 * Subtle. SMP is doing it's boot stuff late (because it has to
++	 * fork idle threads) - but it also needs low mappings for the
++	 * protected-mode entry to work. We zap these entries only after
++	 * the WP-bit has been tested.
++	 */
++#ifndef CONFIG_SMP
++	zap_low_mappings();
++#endif
++
++	set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
++}
++
++kmem_cache_t *pgd_cache;
++kmem_cache_t *pmd_cache;
++
++void __init pgtable_cache_init(void)
++{
++	if (PTRS_PER_PMD > 1) {
++		pmd_cache = kmem_cache_create("pmd",
++					PTRS_PER_PMD*sizeof(pmd_t),
++					PTRS_PER_PMD*sizeof(pmd_t),
++					0,
++					pmd_ctor,
++					NULL);
++		if (!pmd_cache)
++			panic("pgtable_cache_init(): cannot create pmd cache");
++	}
++	pgd_cache = kmem_cache_create("pgd",
++#ifndef CONFIG_XEN
++				PTRS_PER_PGD*sizeof(pgd_t),
++				PTRS_PER_PGD*sizeof(pgd_t),
++#else
++				PAGE_SIZE,
++				PAGE_SIZE,
++#endif
++				0,
++				pgd_ctor,
++				pgd_dtor);
++	if (!pgd_cache)
++		panic("pgtable_cache_init(): Cannot create pgd cache");
++}
++
++/*
++ * This function cannot be __init, since exceptions don't work in that
++ * section.  Put this after the callers, so that it cannot be inlined.
++ */
++static int noinline do_test_wp_bit(void)
++{
++	char tmp_reg;
++	int flag;
++
++	__asm__ __volatile__(
++		"	movb %0,%1	\n"
++		"1:	movb %1,%0	\n"
++		"	xorl %2,%2	\n"
++		"2:			\n"
++		".section __ex_table,\"a\"\n"
++		"	.align 4	\n"
++		"	.long 1b,2b	\n"
++		".previous		\n"
++		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
++		 "=q" (tmp_reg),
++		 "=r" (flag)
++		:"2" (1)
++		:"memory");
++	
++	return flag;
++}
++
++void free_initmem(void)
++{
++	unsigned long addr;
++
++	addr = (unsigned long)(&__init_begin);
++	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
++		ClearPageReserved(virt_to_page(addr));
++		set_page_count(virt_to_page(addr), 1);
++		memset((void *)addr, 0xcc, PAGE_SIZE);
++		free_page(addr);
++		totalram_pages++;
++	}
++	printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
++}
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++	if (start < end)
++		printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
++	for (; start < end; start += PAGE_SIZE) {
++		ClearPageReserved(virt_to_page(start));
++		set_page_count(virt_to_page(start), 1);
++		free_page(start);
++		totalram_pages++;
++	}
++}
++#endif
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/ioremap.c linux-2.6.12-xen/arch/xen/i386/mm/ioremap.c
+--- pristine-linux-2.6.12/arch/xen/i386/mm/ioremap.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/mm/ioremap.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,470 @@
++/*
++ * arch/i386/mm/ioremap.c
++ *
++ * Re-map IO memory to kernel address space so that we can access it.
++ * This is needed for high PCI addresses that aren't mapped in the
++ * 640k-1MB IO memory area on PC's
++ *
++ * (C) Copyright 1995 1996 Linus Torvalds
++ */
++
++#include <linux/vmalloc.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <asm/io.h>
++#include <asm/fixmap.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++
++#define ISA_START_ADDRESS	0x0
++#define ISA_END_ADDRESS		0x100000
++
++#if 0 /* not PAE safe */
++/* These hacky macros avoid phys->machine translations. */
++#define __direct_pte(x) ((pte_t) { (x) } )
++#define __direct_mk_pte(page_nr,pgprot) \
++  __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
++#define direct_mk_pte_phys(physpage, pgprot) \
++  __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
++#endif
++
++static int direct_remap_area_pte_fn(pte_t *pte, 
++				    struct page *pte_page,
++				    unsigned long address, 
++				    void *data)
++{
++	mmu_update_t **v = (mmu_update_t **)data;
++
++	(*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pte_page)) <<
++		     PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++	(*v)++;
++
++	return 0;
++}
++
++static int __direct_remap_pfn_range(struct mm_struct *mm,
++				    unsigned long address, 
++				    unsigned long mfn,
++				    unsigned long size, 
++				    pgprot_t prot,
++				    domid_t  domid)
++{
++	int rc;
++	unsigned long i, start_address;
++	mmu_update_t *u, *v, *w;
++
++	u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
++	if (u == NULL)
++		return -ENOMEM;
++
++	start_address = address;
++
++	flush_cache_all();
++
++	for (i = 0; i < size; i += PAGE_SIZE) {
++		if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
++			/* Fill in the PTE pointers. */
++			rc = generic_page_range(mm, start_address, 
++						address - start_address,
++						direct_remap_area_pte_fn, &w);
++			if (rc)
++				goto out;
++			w = u;
++			rc = -EFAULT;
++			if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
++				goto out;
++			v = u;
++			start_address = address;
++		}
++
++		/*
++		 * Fill in the machine address: PTE ptr is done later by
++		 * __direct_remap_area_pages(). 
++		 */
++		v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
++
++		mfn++;
++		address += PAGE_SIZE; 
++		v++;
++	}
++
++	if (v != u) {
++		/* get the ptep's filled in */
++		rc = generic_page_range(mm, start_address, address - start_address,
++				   direct_remap_area_pte_fn, &w);
++		if (rc)
++			goto out;
++		rc = -EFAULT;
++		if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
++			goto out;
++	}
++
++	rc = 0;
++
++ out:
++	flush_tlb_all();
++
++	free_page((unsigned long)u);
++
++	return rc;
++}
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++			   unsigned long address, 
++			   unsigned long mfn,
++			   unsigned long size, 
++			   pgprot_t prot,
++			   domid_t  domid)
++{
++	/* Same as remap_pfn_range(). */
++	vma->vm_flags |= VM_IO | VM_RESERVED;
++
++	return __direct_remap_pfn_range(
++		vma->vm_mm, address, mfn, size, prot, domid);
++}
++EXPORT_SYMBOL(direct_remap_pfn_range);
++
++int direct_kernel_remap_pfn_range(unsigned long address, 
++				  unsigned long mfn,
++				  unsigned long size, 
++				  pgprot_t prot,
++				  domid_t  domid)
++{
++	return __direct_remap_pfn_range(
++		&init_mm, address, mfn, size, prot, domid);
++}
++EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
++
++static int lookup_pte_fn(
++	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
++{
++	uint64_t *ptep = (uint64_t *)data;
++	if (ptep)
++		*ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
++			 PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++	return 0;
++}
++
++int create_lookup_pte_addr(struct mm_struct *mm, 
++			   unsigned long address,
++			   uint64_t *ptep)
++{
++	return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
++}
++
++EXPORT_SYMBOL(create_lookup_pte_addr);
++
++static int noop_fn(
++	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
++{
++	return 0;
++}
++
++int touch_pte_range(struct mm_struct *mm,
++		    unsigned long address,
++		    unsigned long size)
++{
++	return generic_page_range(mm, address, size, noop_fn, NULL);
++} 
++
++EXPORT_SYMBOL(touch_pte_range);
++
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++
++/*
++ * Does @address reside within a non-highmem page that is local to this virtual
++ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
++ * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
++ * why this works.
++ */
++static inline int is_local_lowmem(unsigned long address)
++{
++	extern unsigned long max_low_pfn;
++	unsigned long mfn = address >> PAGE_SHIFT;
++	unsigned long pfn = mfn_to_pfn(mfn);
++	return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn));
++}
++
++/*
++ * Generic mapping function (not visible outside):
++ */
++
++/*
++ * Remap an arbitrary physical address space into the kernel virtual
++ * address space. Needed when the kernel wants to access high addresses
++ * directly.
++ *
++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
++ * have to convert them into an offset in a page-aligned mapping, but the
++ * caller shouldn't need to know that small detail.
++ */
++void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
++{
++	void __iomem * addr;
++	struct vm_struct * area;
++	unsigned long offset, last_addr;
++	domid_t domid = DOMID_IO;
++
++	/* Don't allow wraparound or zero size */
++	last_addr = phys_addr + size - 1;
++	if (!size || last_addr < phys_addr)
++		return NULL;
++
++	/*
++	 * Don't remap the low PCI/ISA area, it's always mapped..
++	 */
++	if (xen_start_info->flags & SIF_PRIVILEGED &&
++	    phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++		return (void __iomem *) isa_bus_to_virt(phys_addr);
++
++	/*
++	 * Don't allow anybody to remap normal RAM that we're using..
++	 */
++	if (is_local_lowmem(phys_addr)) {
++		char *t_addr, *t_end;
++		struct page *page;
++
++		t_addr = bus_to_virt(phys_addr);
++		t_end = t_addr + (size - 1);
++	   
++		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
++			if(!PageReserved(page))
++				return NULL;
++
++		domid = DOMID_SELF;
++	}
++
++	/*
++	 * Mappings have to be page-aligned
++	 */
++	offset = phys_addr & ~PAGE_MASK;
++	phys_addr &= PAGE_MASK;
++	size = PAGE_ALIGN(last_addr+1) - phys_addr;
++
++	/*
++	 * Ok, go for it..
++	 */
++	area = get_vm_area(size, VM_IOREMAP | (flags << 20));
++	if (!area)
++		return NULL;
++	area->phys_addr = phys_addr;
++	addr = (void __iomem *) area->addr;
++	flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
++#ifdef __x86_64__
++	flags |= _PAGE_USER;
++#endif
++	if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
++				     phys_addr>>PAGE_SHIFT,
++				     size, __pgprot(flags), domid)) {
++		vunmap((void __force *) addr);
++		return NULL;
++	}
++	return (void __iomem *) (offset + (char __iomem *)addr);
++}
++
++
++/**
++ * ioremap_nocache     -   map bus memory into CPU space
++ * @offset:    bus address of the memory
++ * @size:      size of the resource to map
++ *
++ * ioremap_nocache performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address. 
++ *
++ * This version of ioremap ensures that the memory is marked uncachable
++ * on the CPU as well as honouring existing caching rules from things like
++ * the PCI bus. Note that there are other caches and buffers on many 
++ * busses. In particular driver authors should read up on PCI writes
++ *
++ * It's useful if some control registers are in such an area and
++ * write combining or read caching is not desirable:
++ * 
++ * Must be freed with iounmap.
++ */
++
++void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
++{
++	unsigned long last_addr;
++	void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
++	if (!p) 
++		return p; 
++
++	/* Guaranteed to be > phys_addr, as per __ioremap() */
++	last_addr = phys_addr + size - 1;
++
++	if (is_local_lowmem(last_addr)) { 
++		struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
++		unsigned long npages;
++
++		phys_addr &= PAGE_MASK;
++
++		/* This might overflow and become zero.. */
++		last_addr = PAGE_ALIGN(last_addr);
++
++		/* .. but that's ok, because modulo-2**n arithmetic will make
++	 	* the page-aligned "last - first" come out right.
++	 	*/
++		npages = (last_addr - phys_addr) >> PAGE_SHIFT;
++
++		if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
++			iounmap(p); 
++			p = NULL;
++		}
++		global_flush_tlb();
++	}
++
++	return p;					
++}
++
++void iounmap(volatile void __iomem *addr)
++{
++	struct vm_struct *p;
++	if ((void __force *) addr <= high_memory) 
++		return;
++
++	/*
++	 * __ioremap special-cases the PCI/ISA range by not instantiating a
++	 * vm_area and by simply returning an address into the kernel mapping
++	 * of ISA space.   So handle that here.
++	 */
++	if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++		return;
++
++	write_lock(&vmlist_lock);
++	p = __remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
++	if (!p) { 
++		printk("iounmap: bad address %p\n", addr);
++		goto out_unlock;
++	}
++
++	if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
++		/* p->size includes the guard page, but cpa doesn't like that */
++		change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
++				 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
++				 PAGE_KERNEL);
++		global_flush_tlb();
++	} 
++out_unlock:
++	write_unlock(&vmlist_lock);
++	kfree(p); 
++}
++
++#ifdef __i386__
++
++void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
++{
++	unsigned long offset, last_addr;
++	unsigned int nrpages;
++	enum fixed_addresses idx;
++
++	/* Don't allow wraparound or zero size */
++	last_addr = phys_addr + size - 1;
++	if (!size || last_addr < phys_addr)
++		return NULL;
++
++	/*
++	 * Don't remap the low PCI/ISA area, it's always mapped..
++	 */
++	if (xen_start_info->flags & SIF_PRIVILEGED &&
++	    phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++		return isa_bus_to_virt(phys_addr);
++
++	/*
++	 * Mappings have to be page-aligned
++	 */
++	offset = phys_addr & ~PAGE_MASK;
++	phys_addr &= PAGE_MASK;
++	size = PAGE_ALIGN(last_addr) - phys_addr;
++
++	/*
++	 * Mappings have to fit in the FIX_BTMAP area.
++	 */
++	nrpages = size >> PAGE_SHIFT;
++	if (nrpages > NR_FIX_BTMAPS)
++		return NULL;
++
++	/*
++	 * Ok, go for it..
++	 */
++	idx = FIX_BTMAP_BEGIN;
++	while (nrpages > 0) {
++		set_fixmap(idx, phys_addr);
++		phys_addr += PAGE_SIZE;
++		--idx;
++		--nrpages;
++	}
++	return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
++}
++
++void __init bt_iounmap(void *addr, unsigned long size)
++{
++	unsigned long virt_addr;
++	unsigned long offset;
++	unsigned int nrpages;
++	enum fixed_addresses idx;
++
++	virt_addr = (unsigned long)addr;
++	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
++		return;
++	if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++		return;
++	offset = virt_addr & ~PAGE_MASK;
++	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
++
++	idx = FIX_BTMAP_BEGIN;
++	while (nrpages > 0) {
++		clear_fixmap(idx);
++		--idx;
++		--nrpages;
++	}
++}
++
++#endif /* __i386__ */
++
++#else /* CONFIG_XEN_PHYSDEV_ACCESS */
++
++void __iomem * __ioremap(unsigned long phys_addr, unsigned long size,
++			 unsigned long flags)
++{
++	return NULL;
++}
++
++void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
++{
++	return NULL;
++}
++
++void iounmap(volatile void __iomem *addr)
++{
++}
++
++#ifdef __i386__
++
++void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
++{
++	return NULL;
++}
++
++void __init bt_iounmap(void *addr, unsigned long size)
++{
++}
++
++#endif /* __i386__ */
++
++#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/Makefile linux-2.6.12-xen/arch/xen/i386/mm/Makefile
+--- pristine-linux-2.6.12/arch/xen/i386/mm/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/mm/Makefile	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,24 @@
++#
++# Makefile for the linux i386-specific parts of the memory manager.
++#
++
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++CFLAGS	+= -Iarch/$(XENARCH)/mm
++
++obj-y	:= init.o pgtable.o fault.o ioremap.o hypervisor.o
++c-obj-y	:= extable.o mmap.o pageattr.o
++
++c-obj-$(CONFIG_DISCONTIGMEM)	+= discontig.o
++c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
++obj-$(CONFIG_HIGHMEM) += highmem.o
++c-obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
++
++c-link	:=
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
++	@ln -fsn $(srctree)/arch/i386/mm/$(notdir $@) $@
++
++obj-y	+= $(c-obj-y)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/pgtable.c linux-2.6.12-xen/arch/xen/i386/mm/pgtable.c
+--- pristine-linux-2.6.12/arch/xen/i386/mm/pgtable.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/mm/pgtable.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,622 @@
++/*
++ *  linux/arch/i386/mm/pgtable.c
++ */
++
++#include <linux/config.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/highmem.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/spinlock.h>
++
++#include <asm/system.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++
++#include <asm-xen/features.h>
++#include <asm-xen/foreign_page.h>
++#include <asm/hypervisor.h>
++
++static void pgd_test_and_unpin(pgd_t *pgd);
++
++void show_mem(void)
++{
++	int total = 0, reserved = 0;
++	int shared = 0, cached = 0;
++	int highmem = 0;
++	struct page *page;
++	pg_data_t *pgdat;
++	unsigned long i;
++
++	printk("Mem-info:\n");
++	show_free_areas();
++	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++	for_each_pgdat(pgdat) {
++		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++			page = pgdat->node_mem_map + i;
++			total++;
++			if (PageHighMem(page))
++				highmem++;
++			if (PageReserved(page))
++				reserved++;
++			else if (PageSwapCache(page))
++				cached++;
++			else if (page_count(page))
++				shared += page_count(page) - 1;
++		}
++	}
++	printk("%d pages of RAM\n", total);
++	printk("%d pages of HIGHMEM\n",highmem);
++	printk("%d reserved pages\n",reserved);
++	printk("%d pages shared\n",shared);
++	printk("%d pages swap cached\n",cached);
++}
++
++/*
++ * Associate a virtual page frame with a given physical page frame 
++ * and protection flags for that frame.
++ */ 
++static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++
++	pgd = swapper_pg_dir + pgd_index(vaddr);
++	if (pgd_none(*pgd)) {
++		BUG();
++		return;
++	}
++	pud = pud_offset(pgd, vaddr);
++	if (pud_none(*pud)) {
++		BUG();
++		return;
++	}
++	pmd = pmd_offset(pud, vaddr);
++	if (pmd_none(*pmd)) {
++		BUG();
++		return;
++	}
++	pte = pte_offset_kernel(pmd, vaddr);
++	/* <pfn,flags> stored as-is, to permit clearing entries */
++	set_pte(pte, pfn_pte(pfn, flags));
++
++	/*
++	 * It's enough to flush this one mapping.
++	 * (PGE mappings get flushed as well)
++	 */
++	__flush_tlb_one(vaddr);
++}
++
++/*
++ * Associate a virtual page frame with a given physical page frame 
++ * and protection flags for that frame.
++ */ 
++static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
++			   pgprot_t flags)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++
++	pgd = swapper_pg_dir + pgd_index(vaddr);
++	if (pgd_none(*pgd)) {
++		BUG();
++		return;
++	}
++	pud = pud_offset(pgd, vaddr);
++	if (pud_none(*pud)) {
++		BUG();
++		return;
++	}
++	pmd = pmd_offset(pud, vaddr);
++	if (pmd_none(*pmd)) {
++		BUG();
++		return;
++	}
++	pte = pte_offset_kernel(pmd, vaddr);
++	/* <pfn,flags> stored as-is, to permit clearing entries */
++	set_pte(pte, pfn_pte_ma(pfn, flags));
++
++	/*
++	 * It's enough to flush this one mapping.
++	 * (PGE mappings get flushed as well)
++	 */
++	__flush_tlb_one(vaddr);
++}
++
++/*
++ * Associate a large virtual page frame with a given physical page frame 
++ * and protection flags for that frame. pfn is for the base of the page,
++ * vaddr is what the page gets mapped to - both must be properly aligned. 
++ * The pmd must already be instantiated. Assumes PAE mode.
++ */ 
++void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++
++	if (vaddr & (PMD_SIZE-1)) {		/* vaddr is misaligned */
++		printk ("set_pmd_pfn: vaddr misaligned\n");
++		return; /* BUG(); */
++	}
++	if (pfn & (PTRS_PER_PTE-1)) {		/* pfn is misaligned */
++		printk ("set_pmd_pfn: pfn misaligned\n");
++		return; /* BUG(); */
++	}
++	pgd = swapper_pg_dir + pgd_index(vaddr);
++	if (pgd_none(*pgd)) {
++		printk ("set_pmd_pfn: pgd_none\n");
++		return; /* BUG(); */
++	}
++	pud = pud_offset(pgd, vaddr);
++	pmd = pmd_offset(pud, vaddr);
++	set_pmd(pmd, pfn_pmd(pfn, flags));
++	/*
++	 * It's enough to flush this one mapping.
++	 * (PGE mappings get flushed as well)
++	 */
++	__flush_tlb_one(vaddr);
++}
++
++void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
++{
++	unsigned long address = __fix_to_virt(idx);
++
++	if (idx >= __end_of_fixed_addresses) {
++		BUG();
++		return;
++	}
++	switch (idx) {
++	case FIX_WP_TEST:
++	case FIX_VSYSCALL:
++#ifdef CONFIG_X86_F00F_BUG
++	case FIX_F00F_IDT:
++#endif
++		set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
++		break;
++	default:
++		set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
++		break;
++	}
++}
++
++pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
++	if (pte)
++		make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
++	return pte;
++}
++
++struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++	struct page *pte;
++
++#ifdef CONFIG_HIGHPTE
++	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
++#else
++	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++	if (pte) {
++		SetPageForeign(pte, pte_free);
++		set_page_count(pte, 1);
++	}
++#endif
++
++	return pte;
++}
++
++void pte_free(struct page *pte)
++{
++	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
++
++	if (!pte_write(*virt_to_ptep(va)))
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
++
++	ClearPageForeign(pte);
++	set_page_count(pte, 1);
++
++	__free_page(pte);
++}
++
++void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
++{
++	memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++}
++
++/*
++ * List of all pgd's needed for non-PAE so it can invalidate entries
++ * in both cached and uncached pgd's; not needed for PAE since the
++ * kernel pmd is shared. If PAE were not to share the pmd a similar
++ * tactic would be needed. This is essentially codepath-based locking
++ * against pageattr.c; it is the unique case in which a valid change
++ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
++ * vmalloc faults work because attached pagetables are never freed.
++ * The locking scheme was chosen on the basis of manfred's
++ * recommendations and having no core impact whatsoever.
++ * -- wli
++ */
++DEFINE_SPINLOCK(pgd_lock);
++struct page *pgd_list;
++
++static inline void pgd_list_add(pgd_t *pgd)
++{
++	struct page *page = virt_to_page(pgd);
++	page->index = (unsigned long)pgd_list;
++	if (pgd_list)
++		pgd_list->private = (unsigned long)&page->index;
++	pgd_list = page;
++	page->private = (unsigned long)&pgd_list;
++}
++
++static inline void pgd_list_del(pgd_t *pgd)
++{
++	struct page *next, **pprev, *page = virt_to_page(pgd);
++	next = (struct page *)page->index;
++	pprev = (struct page **)page->private;
++	*pprev = next;
++	if (next)
++		next->private = (unsigned long)pprev;
++}
++
++void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++{
++	unsigned long flags;
++
++	if (PTRS_PER_PMD > 1) {
++		/* Ensure pgd resides below 4GB. */
++		int rc = xen_create_contiguous_region(
++			(unsigned long)pgd, 0, 32);
++		BUG_ON(rc);
++		if (HAVE_SHARED_KERNEL_PMD)
++			memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
++			       swapper_pg_dir + USER_PTRS_PER_PGD,
++			       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
++	} else {
++		spin_lock_irqsave(&pgd_lock, flags);
++		memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
++		       swapper_pg_dir + USER_PTRS_PER_PGD,
++		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
++		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++		pgd_list_add(pgd);
++		spin_unlock_irqrestore(&pgd_lock, flags);
++	}
++}
++
++void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++{
++	unsigned long flags; /* can be called from interrupt context */
++
++	if (PTRS_PER_PMD > 1) {
++		xen_destroy_contiguous_region((unsigned long)pgd, 0);
++	} else {
++		spin_lock_irqsave(&pgd_lock, flags);
++		pgd_list_del(pgd);
++		spin_unlock_irqrestore(&pgd_lock, flags);
++
++		pgd_test_and_unpin(pgd);
++	}
++}
++
++pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++	int i;
++	pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
++
++	pgd_test_and_unpin(pgd);
++
++	if (PTRS_PER_PMD == 1 || !pgd)
++		return pgd;
++
++	for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++		pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++		if (!pmd)
++			goto out_oom;
++		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
++	}
++
++	if (!HAVE_SHARED_KERNEL_PMD) {
++		unsigned long flags;
++
++		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++			if (!pmd)
++				goto out_oom;
++			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
++		}
++
++		spin_lock_irqsave(&pgd_lock, flags);
++		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++			pgd_t *kpgd = pgd_offset_k(v);
++			pud_t *kpud = pud_offset(kpgd, v);
++			pmd_t *kpmd = pmd_offset(kpud, v);
++			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++			memcpy(pmd, kpmd, PAGE_SIZE);
++			make_lowmem_page_readonly(
++				pmd, XENFEAT_writable_page_tables);
++		}
++		pgd_list_add(pgd);
++		spin_unlock_irqrestore(&pgd_lock, flags);
++	}
++
++	return pgd;
++
++out_oom:
++	for (i--; i >= 0; i--)
++		kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
++	kmem_cache_free(pgd_cache, pgd);
++	return NULL;
++}
++
++void pgd_free(pgd_t *pgd)
++{
++	int i;
++
++	pgd_test_and_unpin(pgd);
++
++	/* in the PAE case user pgd entries are overwritten before usage */
++	if (PTRS_PER_PMD > 1) {
++		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++			kmem_cache_free(pmd_cache, pmd);
++		}
++		if (!HAVE_SHARED_KERNEL_PMD) {
++			unsigned long flags;
++			spin_lock_irqsave(&pgd_lock, flags);
++			pgd_list_del(pgd);
++			spin_unlock_irqrestore(&pgd_lock, flags);
++			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++				make_lowmem_page_writable(
++					pmd, XENFEAT_writable_page_tables);
++				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++				kmem_cache_free(pmd_cache, pmd);
++			}
++		}
++	}
++	/* in the non-PAE case, free_pgtables() clears user pgd entries */
++	kmem_cache_free(pgd_cache, pgd);
++}
++
++#ifndef CONFIG_XEN_SHADOW_MODE
++void make_lowmem_page_readonly(void *va, unsigned int feature)
++{
++	pte_t *pte;
++	int rc;
++
++	if (xen_feature(feature))
++		return;
++
++	pte = virt_to_ptep(va);
++	rc = HYPERVISOR_update_va_mapping(
++		(unsigned long)va, pte_wrprotect(*pte), 0);
++	BUG_ON(rc);
++}
++
++void make_lowmem_page_writable(void *va, unsigned int feature)
++{
++	pte_t *pte;
++	int rc;
++
++	if (xen_feature(feature))
++		return;
++
++	pte = virt_to_ptep(va);
++	rc = HYPERVISOR_update_va_mapping(
++		(unsigned long)va, pte_mkwrite(*pte), 0);
++	BUG_ON(rc);
++}
++
++void make_page_readonly(void *va, unsigned int feature)
++{
++	pte_t *pte;
++	int rc;
++
++	if (xen_feature(feature))
++		return;
++
++	pte = virt_to_ptep(va);
++	rc = HYPERVISOR_update_va_mapping(
++		(unsigned long)va, pte_wrprotect(*pte), 0);
++	if (rc) /* fallback? */
++		xen_l1_entry_update(pte, pte_wrprotect(*pte));
++	if ((unsigned long)va >= (unsigned long)high_memory) {
++		unsigned long pfn = pte_pfn(*pte);
++#ifdef CONFIG_HIGHMEM
++		if (pfn >= highstart_pfn)
++			kmap_flush_unused(); /* flush stale writable kmaps */
++		else
++#endif
++			make_lowmem_page_readonly(
++				phys_to_virt(pfn << PAGE_SHIFT), feature); 
++	}
++}
++
++void make_page_writable(void *va, unsigned int feature)
++{
++	pte_t *pte;
++	int rc;
++
++	if (xen_feature(feature))
++		return;
++
++	pte = virt_to_ptep(va);
++	rc = HYPERVISOR_update_va_mapping(
++		(unsigned long)va, pte_mkwrite(*pte), 0);
++	if (rc) /* fallback? */
++		xen_l1_entry_update(pte, pte_mkwrite(*pte));
++	if ((unsigned long)va >= (unsigned long)high_memory) {
++		unsigned long pfn = pte_pfn(*pte); 
++#ifdef CONFIG_HIGHMEM
++		if (pfn < highstart_pfn)
++#endif
++			make_lowmem_page_writable(
++				phys_to_virt(pfn << PAGE_SHIFT), feature);
++	}
++}
++
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
++{
++	if (xen_feature(feature))
++		return;
++
++	while (nr-- != 0) {
++		make_page_readonly(va, feature);
++		va = (void *)((unsigned long)va + PAGE_SIZE);
++	}
++}
++
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
++{
++	if (xen_feature(feature))
++		return;
++
++	while (nr-- != 0) {
++		make_page_writable(va, feature);
++		va = (void *)((unsigned long)va + PAGE_SIZE);
++	}
++}
++#endif /* CONFIG_XEN_SHADOW_MODE */
++
++static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
++{
++	struct page *page = virt_to_page(pt);
++	unsigned long pfn = page_to_pfn(page);
++
++	if (PageHighMem(page))
++		return;
++	BUG_ON(HYPERVISOR_update_va_mapping(
++		(unsigned long)__va(pfn << PAGE_SHIFT),
++		pfn_pte(pfn, flags), 0));
++}
++
++static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
++{
++	pgd_t *pgd = pgd_base;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	int    g, u, m;
++
++	for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++		if (pgd_none(*pgd))
++			continue;
++		pud = pud_offset(pgd, 0);
++		if (PTRS_PER_PUD > 1) /* not folded */
++			pgd_walk_set_prot(pud,flags);
++		for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++			if (pud_none(*pud))
++				continue;
++			pmd = pmd_offset(pud, 0);
++			if (PTRS_PER_PMD > 1) /* not folded */
++				pgd_walk_set_prot(pmd,flags);
++			for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++				if (pmd_none(*pmd))
++					continue;
++				pte = pte_offset_kernel(pmd,0);
++				pgd_walk_set_prot(pte,flags);
++			}
++		}
++	}
++
++	BUG_ON(HYPERVISOR_update_va_mapping(
++		(unsigned long)pgd_base,
++		pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
++		UVMF_TLB_FLUSH));
++}
++
++static void __pgd_pin(pgd_t *pgd)
++{
++	pgd_walk(pgd, PAGE_KERNEL_RO);
++	xen_pgd_pin(__pa(pgd));
++	set_bit(PG_pinned, &virt_to_page(pgd)->flags);
++}
++
++static void __pgd_unpin(pgd_t *pgd)
++{
++	xen_pgd_unpin(__pa(pgd));
++	pgd_walk(pgd, PAGE_KERNEL);
++	clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
++}
++
++static void pgd_test_and_unpin(pgd_t *pgd)
++{
++	if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
++		__pgd_unpin(pgd);
++}
++
++void mm_pin(struct mm_struct *mm)
++{
++	spin_lock(&mm->page_table_lock);
++	__pgd_pin(mm->pgd);
++	spin_unlock(&mm->page_table_lock);
++}
++
++void mm_unpin(struct mm_struct *mm)
++{
++	spin_lock(&mm->page_table_lock);
++	__pgd_unpin(mm->pgd);
++	spin_unlock(&mm->page_table_lock);
++}
++
++void mm_pin_all(void)
++{
++	struct page *page;
++	for (page = pgd_list; page; page = (struct page *)page->index) {
++		if (!test_bit(PG_pinned, &page->flags))
++			__pgd_pin((pgd_t *)page_address(page));
++	}
++}
++
++void _arch_exit_mmap(struct mm_struct *mm)
++{
++	struct task_struct *tsk = current;
++
++	task_lock(tsk);
++
++	/*
++	 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++	 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++	 */
++	if (tsk->active_mm == mm) {
++		tsk->active_mm = &init_mm;
++		atomic_inc(&init_mm.mm_count);
++
++		switch_mm(mm, &init_mm, tsk);
++
++		atomic_dec(&mm->mm_count);
++		BUG_ON(atomic_read(&mm->mm_count) == 0);
++	}
++
++	task_unlock(tsk);
++
++	if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
++	    (atomic_read(&mm->mm_count) == 1))
++		mm_unpin(mm);
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/pci/i386.c linux-2.6.12-xen/arch/xen/i386/pci/i386.c
+--- pristine-linux-2.6.12/arch/xen/i386/pci/i386.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/pci/i386.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,304 @@
++/*
++ *	Low-Level PCI Access for i386 machines
++ *
++ * Copyright 1993, 1994 Drew Eckhardt
++ *      Visionary Computing
++ *      (Unix and Linux consulting and custom programming)
++ *      Drew at Colorado.EDU
++ *      +1 (303) 786-7975
++ *
++ * Drew's work was sponsored by:
++ *	iX Multiuser Multitasking Magazine
++ *	Hannover, Germany
++ *	hm at ix.de
++ *
++ * Copyright 1997--2000 Martin Mares <mj at ucw.cz>
++ *
++ * For more information, please consult the following manuals (look at
++ * http://www.pcisig.com/ for how to get them):
++ *
++ * PCI BIOS Specification
++ * PCI Local Bus Specification
++ * PCI to PCI Bridge Specification
++ * PCI System Design Guide
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/ioport.h>
++#include <linux/errno.h>
++
++#include "pci.h"
++
++/*
++ * We need to avoid collisions with `mirrored' VGA ports
++ * and other strange ISA hardware, so we always want the
++ * addresses to be allocated in the 0x000-0x0ff region
++ * modulo 0x400.
++ *
++ * Why? Because some silly external IO cards only decode
++ * the low 10 bits of the IO address. The 0x00-0xff region
++ * is reserved for motherboard devices that decode all 16
++ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
++ * but we want to try to avoid allocating at 0x2900-0x2bff
++ * which might have be mirrored at 0x0100-0x03ff..
++ */
++void
++pcibios_align_resource(void *data, struct resource *res,
++		       unsigned long size, unsigned long align)
++{
++	if (res->flags & IORESOURCE_IO) {
++		unsigned long start = res->start;
++
++		if (start & 0x300) {
++			start = (start + 0x3ff) & ~0x3ff;
++			res->start = start;
++		}
++	}
++}
++
++
++/*
++ *  Handle resources of PCI devices.  If the world were perfect, we could
++ *  just allocate all the resource regions and do nothing more.  It isn't.
++ *  On the other hand, we cannot just re-allocate all devices, as it would
++ *  require us to know lots of host bridge internals.  So we attempt to
++ *  keep as much of the original configuration as possible, but tweak it
++ *  when it's found to be wrong.
++ *
++ *  Known BIOS problems we have to work around:
++ *	- I/O or memory regions not configured
++ *	- regions configured, but not enabled in the command register
++ *	- bogus I/O addresses above 64K used
++ *	- expansion ROMs left enabled (this may sound harmless, but given
++ *	  the fact the PCI specs explicitly allow address decoders to be
++ *	  shared between expansion ROMs and other resource regions, it's
++ *	  at least dangerous)
++ *
++ *  Our solution:
++ *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
++ *	    This gives us fixed barriers on where we can allocate.
++ *	(2) Allocate resources for all enabled devices.  If there is
++ *	    a collision, just mark the resource as unallocated. Also
++ *	    disable expansion ROMs during this step.
++ *	(3) Try to allocate resources for disabled devices.  If the
++ *	    resources were assigned correctly, everything goes well,
++ *	    if they weren't, they won't disturb allocation of other
++ *	    resources.
++ *	(4) Assign new addresses to resources which were either
++ *	    not configured at all or misconfigured.  If explicitly
++ *	    requested by the user, configure expansion ROM address
++ *	    as well.
++ */
++
++static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
++{
++	struct pci_bus *bus;
++	struct pci_dev *dev;
++	int idx;
++	struct resource *r, *pr;
++
++	/* Depth-First Search on bus tree */
++	list_for_each_entry(bus, bus_list, node) {
++		if ((dev = bus->self)) {
++			for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
++				r = &dev->resource[idx];
++				if (!r->start)
++					continue;
++				pr = pci_find_parent_resource(dev, r);
++				if (!pr || request_resource(pr, r) < 0)
++					printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev));
++			}
++		}
++		pcibios_allocate_bus_resources(&bus->children);
++	}
++}
++
++static void __init pcibios_allocate_resources(int pass)
++{
++	struct pci_dev *dev = NULL;
++	int idx, disabled;
++	u16 command;
++	struct resource *r, *pr;
++
++	for_each_pci_dev(dev) {
++		pci_read_config_word(dev, PCI_COMMAND, &command);
++		for(idx = 0; idx < 6; idx++) {
++			r = &dev->resource[idx];
++			if (r->parent)		/* Already allocated */
++				continue;
++			if (!r->start)		/* Address not assigned at all */
++				continue;
++			if (r->flags & IORESOURCE_IO)
++				disabled = !(command & PCI_COMMAND_IO);
++			else
++				disabled = !(command & PCI_COMMAND_MEMORY);
++			if (pass == disabled) {
++				DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
++				    r->start, r->end, r->flags, disabled, pass);
++				pr = pci_find_parent_resource(dev, r);
++				if (!pr || request_resource(pr, r) < 0) {
++					printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev));
++					/* We'll assign a new address later */
++					r->end -= r->start;
++					r->start = 0;
++				}
++			}
++		}
++		if (!pass) {
++			r = &dev->resource[PCI_ROM_RESOURCE];
++			if (r->flags & IORESOURCE_ROM_ENABLE) {
++				/* Turn the ROM off, leave the resource region, but keep it unregistered. */
++				u32 reg;
++				DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
++				r->flags &= ~IORESOURCE_ROM_ENABLE;
++				pci_read_config_dword(dev, dev->rom_base_reg, &reg);
++				pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE);
++			}
++		}
++	}
++}
++
++static int __init pcibios_assign_resources(void)
++{
++	struct pci_dev *dev = NULL;
++	int idx;
++	struct resource *r;
++
++	for_each_pci_dev(dev) {
++		int class = dev->class >> 8;
++
++		/* Don't touch classless devices and host bridges */
++		if (!class || class == PCI_CLASS_BRIDGE_HOST)
++			continue;
++
++		for(idx=0; idx<6; idx++) {
++			r = &dev->resource[idx];
++
++			/*
++			 *  Don't touch IDE controllers and I/O ports of video cards!
++			 */
++			if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) ||
++			    (class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO)))
++				continue;
++
++			/*
++			 *  We shall assign a new address to this resource, either because
++			 *  the BIOS forgot to do so or because we have decided the old
++			 *  address was unusable for some reason.
++			 */
++			if (!r->start && r->end)
++				pci_assign_resource(dev, idx);
++		}
++
++		if (pci_probe & PCI_ASSIGN_ROMS) {
++			r = &dev->resource[PCI_ROM_RESOURCE];
++			r->end -= r->start;
++			r->start = 0;
++			if (r->end)
++				pci_assign_resource(dev, PCI_ROM_RESOURCE);
++		}
++	}
++	return 0;
++}
++
++void __init pcibios_resource_survey(void)
++{
++	DBG("PCI: Allocating resources\n");
++	pcibios_allocate_bus_resources(&pci_root_buses);
++	pcibios_allocate_resources(0);
++	pcibios_allocate_resources(1);
++}
++
++/**
++ * called in fs_initcall (one below subsys_initcall),
++ * give a chance for motherboard reserve resources
++ */
++fs_initcall(pcibios_assign_resources);
++
++int pcibios_enable_resources(struct pci_dev *dev, int mask)
++{
++	u16 cmd, old_cmd;
++	int idx;
++	struct resource *r;
++
++	pci_read_config_word(dev, PCI_COMMAND, &cmd);
++	old_cmd = cmd;
++	for(idx=0; idx<6; idx++) {
++		/* Only set up the requested stuff */
++		if (!(mask & (1<<idx)))
++			continue;
++
++		r = &dev->resource[idx];
++		if (!r->start && r->end) {
++			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
++			return -EINVAL;
++		}
++		if (r->flags & IORESOURCE_IO)
++			cmd |= PCI_COMMAND_IO;
++		if (r->flags & IORESOURCE_MEM)
++			cmd |= PCI_COMMAND_MEMORY;
++	}
++	if (dev->resource[PCI_ROM_RESOURCE].start)
++		cmd |= PCI_COMMAND_MEMORY;
++	if (cmd != old_cmd) {
++		printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
++		pci_write_config_word(dev, PCI_COMMAND, cmd);
++	}
++	return 0;
++}
++
++/*
++ *  If we set up a device for bus mastering, we need to check the latency
++ *  timer as certain crappy BIOSes forget to set it properly.
++ */
++unsigned int pcibios_max_latency = 255;
++
++void pcibios_set_master(struct pci_dev *dev)
++{
++	u8 lat;
++	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
++	if (lat < 16)
++		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
++	else if (lat > pcibios_max_latency)
++		lat = pcibios_max_latency;
++	else
++		return;
++	printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
++	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
++}
++
++int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++			enum pci_mmap_state mmap_state, int write_combine)
++{
++	unsigned long prot;
++
++	/* I/O space cannot be accessed via normal processor loads and
++	 * stores on this platform.
++	 */
++	if (mmap_state == pci_mmap_io)
++		return -EINVAL;
++
++	/* Leave vm_pgoff as-is, the PCI space address is the physical
++	 * address on this platform.
++	 */
++	vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
++
++	prot = pgprot_val(vma->vm_page_prot);
++	if (boot_cpu_data.x86 > 3)
++		prot |= _PAGE_PCD | _PAGE_PWT;
++	vma->vm_page_prot = __pgprot(prot);
++
++	/* Write-combine setting is ignored, it is changed via the mtrr
++	 * interfaces on this platform.
++	 */
++	if (direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++				   vma->vm_end - vma->vm_start,
++				   vma->vm_page_prot, DOMID_IO))
++		return -EAGAIN;
++
++	return 0;
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/pci/irq.c linux-2.6.12-xen/arch/xen/i386/pci/irq.c
+--- pristine-linux-2.6.12/arch/xen/i386/pci/irq.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/pci/irq.c	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,1120 @@
++/*
++ *	Low-Level PCI Support for PC -- Routing of Interrupts
++ *
++ *	(c) 1999--2000 Martin Mares <mj at ucw.cz>
++ */
++
++#include <linux/config.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/dmi.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/io_apic.h>
++#include <asm/hw_irq.h>
++#include <linux/acpi.h>
++
++#include "pci.h"
++
++#define PIRQ_SIGNATURE	(('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
++#define PIRQ_VERSION 0x0100
++
++static int broken_hp_bios_irq9;
++static int acer_tm360_irqrouting;
++
++static struct irq_routing_table *pirq_table;
++
++static int pirq_enable_irq(struct pci_dev *dev);
++
++/*
++ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
++ * Avoid using: 13, 14 and 15 (FP error and IDE).
++ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
++ */
++unsigned int pcibios_irq_mask = 0xfff8;
++
++static int pirq_penalty[16] = {
++	1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
++	0, 0, 0, 0, 1000, 100000, 100000, 100000
++};
++
++struct irq_router {
++	char *name;
++	u16 vendor, device;
++	int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
++	int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
++};
++
++struct irq_router_handler {
++	u16 vendor;
++	int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
++};
++
++int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
++
++/*
++ *  Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
++ */
++
++static struct irq_routing_table * __init pirq_find_routing_table(void)
++{
++	u8 *addr;
++	struct irq_routing_table *rt;
++	int i;
++	u8 sum;
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++	for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
++		rt = (struct irq_routing_table *) addr;
++		if (rt->signature != PIRQ_SIGNATURE ||
++		    rt->version != PIRQ_VERSION ||
++		    rt->size % 16 ||
++		    rt->size < sizeof(struct irq_routing_table))
++			continue;
++		sum = 0;
++		for(i=0; i<rt->size; i++)
++			sum += addr[i];
++		if (!sum) {
++			DBG("PCI: Interrupt Routing Table found at 0x%p\n", rt);
++			return rt;
++		}
++	}
++#endif
++	
++	return NULL;
++}
++
++/*
++ *  If we have a IRQ routing table, use it to search for peer host
++ *  bridges.  It's a gross hack, but since there are no other known
++ *  ways how to get a list of buses, we have to go this way.
++ */
++
++static void __init pirq_peer_trick(void)
++{
++	struct irq_routing_table *rt = pirq_table;
++	u8 busmap[256];
++	int i;
++	struct irq_info *e;
++
++	memset(busmap, 0, sizeof(busmap));
++	for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
++		e = &rt->slots[i];
++#ifdef DEBUG
++		{
++			int j;
++			DBG("%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
++			for(j=0; j<4; j++)
++				DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
++			DBG("\n");
++		}
++#endif
++		busmap[e->bus] = 1;
++	}
++	for(i = 1; i < 256; i++) {
++		if (!busmap[i] || pci_find_bus(0, i))
++			continue;
++		if (pci_scan_bus(i, &pci_root_ops, NULL))
++			printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
++	}
++	pcibios_last_bus = -1;
++}
++
++/*
++ *  Code for querying and setting of IRQ routes on various interrupt routers.
++ */
++
++void eisa_set_level_irq(unsigned int irq)
++{
++	unsigned char mask = 1 << (irq & 7);
++	unsigned int port = 0x4d0 + (irq >> 3);
++	unsigned char val;
++	static u16 eisa_irq_mask;
++
++	if (irq >= 16 || (1 << irq) & eisa_irq_mask)
++		return;
++
++	eisa_irq_mask |= (1 << irq);
++	printk("PCI: setting IRQ %u as level-triggered\n", irq);
++	val = inb(port);
++	if (!(val & mask)) {
++		DBG(" -> edge");
++		outb(val | mask, port);
++	}
++}
++
++/*
++ * Common IRQ routing practice: nybbles in config space,
++ * offset by some magic constant.
++ */
++static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
++{
++	u8 x;
++	unsigned reg = offset + (nr >> 1);
++
++	pci_read_config_byte(router, reg, &x);
++	return (nr & 1) ? (x >> 4) : (x & 0xf);
++}
++
++static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
++{
++	u8 x;
++	unsigned reg = offset + (nr >> 1);
++
++	pci_read_config_byte(router, reg, &x);
++	x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
++	pci_write_config_byte(router, reg, x);
++}
++
++/*
++ * ALI pirq entries are damn ugly, and completely undocumented.
++ * This has been figured out from pirq tables, and it's not a pretty
++ * picture.
++ */
++static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++	static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
++
++	return irqmap[read_config_nybble(router, 0x48, pirq-1)];
++}
++
++static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	static unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
++	unsigned int val = irqmap[irq];
++		
++	if (val) {
++		write_config_nybble(router, 0x48, pirq-1, val);
++		return 1;
++	}
++	return 0;
++}
++
++/*
++ * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
++ * just a pointer to the config space.
++ */
++static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++	u8 x;
++
++	pci_read_config_byte(router, pirq, &x);
++	return (x < 16) ? x : 0;
++}
++
++static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	pci_write_config_byte(router, pirq, irq);
++	return 1;
++}
++
++/*
++ * The VIA pirq rules are nibble-based, like ALI,
++ * but without the ugly irq number munging.
++ * However, PIRQD is in the upper instead of lower 4 bits.
++ */
++static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++	return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
++}
++
++static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
++	return 1;
++}
++
++/*
++ * ITE 8330G pirq rules are nibble-based
++ * FIXME: pirqmap may be { 1, 0, 3, 2 },
++ * 	  2+3 are both mapped to irq 9 on my system
++ */
++static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++	static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++	return read_config_nybble(router,0x43, pirqmap[pirq-1]);
++}
++
++static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++	write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
++	return 1;
++}
++
++/*
++ * OPTI: high four bits are nibble pointer..
++ * I wonder what the low bits do?
++ */
++static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++	return read_config_nybble(router, 0xb8, pirq >> 4);
++}
++
++static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	write_config_nybble(router, 0xb8, pirq >> 4, irq);
++	return 1;
++}
++
++/*
++ * Cyrix: nibble offset 0x5C
++ * 0x5C bits 7:4 is INTB bits 3:0 is INTA 
++ * 0x5D bits 7:4 is INTD bits 3:0 is INTC
++ */
++static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++	return read_config_nybble(router, 0x5C, (pirq-1)^1);
++}
++
++static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
++	return 1;
++}
++
++/*
++ *	PIRQ routing for SiS 85C503 router used in several SiS chipsets.
++ *	We have to deal with the following issues here:
++ *	- vendors have different ideas about the meaning of link values
++ *	- some onboard devices (integrated in the chipset) have special
++ *	  links and are thus routed differently (i.e. not via PCI INTA-INTD)
++ *	- different revision of the router have a different layout for
++ *	  the routing registers, particularly for the onchip devices
++ *
++ *	For all routing registers the common thing is we have one byte
++ *	per routeable link which is defined as:
++ *		 bit 7      IRQ mapping enabled (0) or disabled (1)
++ *		 bits [6:4] reserved (sometimes used for onchip devices)
++ *		 bits [3:0] IRQ to map to
++ *		     allowed: 3-7, 9-12, 14-15
++ *		     reserved: 0, 1, 2, 8, 13
++ *
++ *	The config-space registers located at 0x41/0x42/0x43/0x44 are
++ *	always used to route the normal PCI INT A/B/C/D respectively.
++ *	Apparently there are systems implementing PCI routing table using
++ *	link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
++ *	We try our best to handle both link mappings.
++ *	
++ *	Currently (2003-05-21) it appears most SiS chipsets follow the
++ *	definition of routing registers from the SiS-5595 southbridge.
++ *	According to the SiS 5595 datasheets the revision id's of the
++ *	router (ISA-bridge) should be 0x01 or 0xb0.
++ *
++ *	Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
++ *	Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
++ *	They seem to work with the current routing code. However there is
++ *	some concern because of the two USB-OHCI HCs (original SiS 5595
++ *	had only one). YMMV.
++ *
++ *	Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
++ *
++ *	0x61:	IDEIRQ:
++ *		bits [6:5] must be written 01
++ *		bit 4 channel-select primary (0), secondary (1)
++ *
++ *	0x62:	USBIRQ:
++ *		bit 6 OHCI function disabled (0), enabled (1)
++ *	
++ *	0x6a:	ACPI/SCI IRQ: bits 4-6 reserved
++ *
++ *	0x7e:	Data Acq. Module IRQ - bits 4-6 reserved
++ *
++ *	We support USBIRQ (in addition to INTA-INTD) and keep the
++ *	IDE, ACPI and DAQ routing untouched as set by the BIOS.
++ *
++ *	Currently the only reported exception is the new SiS 65x chipset
++ *	which includes the SiS 69x southbridge. Here we have the 85C503
++ *	router revision 0x04 and there are changes in the register layout
++ *	mostly related to the different USB HCs with USB 2.0 support.
++ *
++ *	Onchip routing for router rev-id 0x04 (try-and-error observation)
++ *
++ *	0x60/0x61/0x62/0x63:	1xEHCI and 3xOHCI (companion) USB-HCs
++ *				bit 6-4 are probably unused, not like 5595
++ */
++
++#define PIRQ_SIS_IRQ_MASK	0x0f
++#define PIRQ_SIS_IRQ_DISABLE	0x80
++#define PIRQ_SIS_USB_ENABLE	0x40
++
++static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++	u8 x;
++	int reg;
++
++	reg = pirq;
++	if (reg >= 0x01 && reg <= 0x04)
++		reg += 0x40;
++	pci_read_config_byte(router, reg, &x);
++	return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK);
++}
++
++static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	u8 x;
++	int reg;
++
++	reg = pirq;
++	if (reg >= 0x01 && reg <= 0x04)
++		reg += 0x40;
++	pci_read_config_byte(router, reg, &x);
++	x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE);
++	x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE;
++	pci_write_config_byte(router, reg, x);
++	return 1;
++}
++
++
++/*
++ * VLSI: nibble offset 0x74 - educated guess due to routing table and
++ *       config space of VLSI 82C534 PCI-bridge/router (1004:0102)
++ *       Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
++ *       devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
++ *       for the busbridge to the docking station.
++ */
++
++static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++	if (pirq > 8) {
++		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
++		return 0;
++	}
++	return read_config_nybble(router, 0x74, pirq-1);
++}
++
++static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	if (pirq > 8) {
++		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
++		return 0;
++	}
++	write_config_nybble(router, 0x74, pirq-1, irq);
++	return 1;
++}
++
++/*
++ * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
++ * and Redirect I/O registers (0x0c00 and 0x0c01).  The Index register
++ * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a.  The Redirect
++ * register is a straight binary coding of desired PIC IRQ (low nibble).
++ *
++ * The 'link' value in the PIRQ table is already in the correct format
++ * for the Index register.  There are some special index values:
++ * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
++ * and 0x03 for SMBus.
++ */
++static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++	outb_p(pirq, 0xc00);
++	return inb(0xc01) & 0xf;
++}
++
++static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	outb_p(pirq, 0xc00);
++	outb_p(irq, 0xc01);
++	return 1;
++}
++
++/* Support for AMD756 PCI IRQ Routing
++ * Jhon H. Caicedo <jhcaiced at osso.org.co>
++ * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
++ * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
++ * The AMD756 pirq rules are nibble-based
++ * offset 0x56 0-3 PIRQA  4-7  PIRQB
++ * offset 0x57 0-3 PIRQC  4-7  PIRQD
++ */
++static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++	u8 irq;
++	irq = 0;
++	if (pirq <= 4)
++	{
++		irq = read_config_nybble(router, 0x56, pirq - 1);
++	}
++	printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
++		dev->vendor, dev->device, pirq, irq);
++	return irq;
++}
++
++static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n", 
++		dev->vendor, dev->device, pirq, irq);
++	if (pirq <= 4)
++	{
++		write_config_nybble(router, 0x56, pirq - 1, irq);
++	}
++	return 1;
++}
++
++#ifdef CONFIG_PCI_BIOS
++
++static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++	struct pci_dev *bridge;
++	int pin = pci_get_interrupt_pin(dev, &bridge);
++	return pcibios_set_irq_routing(bridge, pin, irq);
++}
++
++#endif
++
++static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++	static struct pci_device_id pirq_440gx[] = {
++		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
++		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
++		{ },
++	};
++
++	/* 440GX has a proprietary PIRQ router -- don't use it */
++	if (pci_dev_present(pirq_440gx))
++		return 0;
++
++	switch(device)
++	{
++		case PCI_DEVICE_ID_INTEL_82371FB_0:
++		case PCI_DEVICE_ID_INTEL_82371SB_0:
++		case PCI_DEVICE_ID_INTEL_82371AB_0:
++		case PCI_DEVICE_ID_INTEL_82371MX:
++		case PCI_DEVICE_ID_INTEL_82443MX_0:
++		case PCI_DEVICE_ID_INTEL_82801AA_0:
++		case PCI_DEVICE_ID_INTEL_82801AB_0:
++		case PCI_DEVICE_ID_INTEL_82801BA_0:
++		case PCI_DEVICE_ID_INTEL_82801BA_10:
++		case PCI_DEVICE_ID_INTEL_82801CA_0:
++		case PCI_DEVICE_ID_INTEL_82801CA_12:
++		case PCI_DEVICE_ID_INTEL_82801DB_0:
++		case PCI_DEVICE_ID_INTEL_82801E_0:
++		case PCI_DEVICE_ID_INTEL_82801EB_0:
++		case PCI_DEVICE_ID_INTEL_ESB_1:
++		case PCI_DEVICE_ID_INTEL_ICH6_0:
++		case PCI_DEVICE_ID_INTEL_ICH6_1:
++		case PCI_DEVICE_ID_INTEL_ICH7_0:
++		case PCI_DEVICE_ID_INTEL_ICH7_1:
++		case PCI_DEVICE_ID_INTEL_ICH7_30:
++		case PCI_DEVICE_ID_INTEL_ICH7_31:
++		case PCI_DEVICE_ID_INTEL_ESB2_0:
++			r->name = "PIIX/ICH";
++			r->get = pirq_piix_get;
++			r->set = pirq_piix_set;
++			return 1;
++	}
++	return 0;
++}
++
++static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++	/* FIXME: We should move some of the quirk fixup stuff here */
++	switch(device)
++	{
++		case PCI_DEVICE_ID_VIA_82C586_0:
++		case PCI_DEVICE_ID_VIA_82C596:
++		case PCI_DEVICE_ID_VIA_82C686:
++		case PCI_DEVICE_ID_VIA_8231:
++		/* FIXME: add new ones for 8233/5 */
++			r->name = "VIA";
++			r->get = pirq_via_get;
++			r->set = pirq_via_set;
++			return 1;
++	}
++	return 0;
++}
++
++static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++	switch(device)
++	{
++		case PCI_DEVICE_ID_VLSI_82C534:
++			r->name = "VLSI 82C534";
++			r->get = pirq_vlsi_get;
++			r->set = pirq_vlsi_set;
++			return 1;
++	}
++	return 0;
++}
++
++
++static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++	switch(device)
++	{
++		case PCI_DEVICE_ID_SERVERWORKS_OSB4:
++		case PCI_DEVICE_ID_SERVERWORKS_CSB5:
++			r->name = "ServerWorks";
++			r->get = pirq_serverworks_get;
++			r->set = pirq_serverworks_set;
++			return 1;
++	}
++	return 0;
++}
++
++static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++	if (device != PCI_DEVICE_ID_SI_503)
++		return 0;
++		
++	r->name = "SIS";
++	r->get = pirq_sis_get;
++	r->set = pirq_sis_set;
++	return 1;
++}
++
++static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++	switch(device)
++	{
++		case PCI_DEVICE_ID_CYRIX_5520:
++			r->name = "NatSemi";
++			r->get = pirq_cyrix_get;
++			r->set = pirq_cyrix_set;
++			return 1;
++	}
++	return 0;
++}
++
++static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++	switch(device)
++	{
++		case PCI_DEVICE_ID_OPTI_82C700:
++			r->name = "OPTI";
++			r->get = pirq_opti_get;
++			r->set = pirq_opti_set;
++			return 1;
++	}
++	return 0;
++}
++
++static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++	switch(device)
++	{
++		case PCI_DEVICE_ID_ITE_IT8330G_0:
++			r->name = "ITE";
++			r->get = pirq_ite_get;
++			r->set = pirq_ite_set;
++			return 1;
++	}
++	return 0;
++}
++
++static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++	switch(device)
++	{
++	case PCI_DEVICE_ID_AL_M1533:
++	case PCI_DEVICE_ID_AL_M1563:
++		printk("PCI: Using ALI IRQ Router\n");
++			r->name = "ALI";
++			r->get = pirq_ali_get;
++			r->set = pirq_ali_set;
++			return 1;
++	}
++	return 0;
++}
++
++static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++	switch(device)
++	{
++		case PCI_DEVICE_ID_AMD_VIPER_740B:
++			r->name = "AMD756";
++			break;
++		case PCI_DEVICE_ID_AMD_VIPER_7413:
++			r->name = "AMD766";
++			break;
++		case PCI_DEVICE_ID_AMD_VIPER_7443:
++			r->name = "AMD768";
++			break;
++		default:
++			return 0;
++	}
++	r->get = pirq_amd756_get;
++	r->set = pirq_amd756_set;
++	return 1;
++}
++		
++static __initdata struct irq_router_handler pirq_routers[] = {
++	{ PCI_VENDOR_ID_INTEL, intel_router_probe },
++	{ PCI_VENDOR_ID_AL, ali_router_probe },
++	{ PCI_VENDOR_ID_ITE, ite_router_probe },
++	{ PCI_VENDOR_ID_VIA, via_router_probe },
++	{ PCI_VENDOR_ID_OPTI, opti_router_probe },
++	{ PCI_VENDOR_ID_SI, sis_router_probe },
++	{ PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
++	{ PCI_VENDOR_ID_VLSI, vlsi_router_probe },
++	{ PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
++	{ PCI_VENDOR_ID_AMD, amd_router_probe },
++	/* Someone with docs needs to add the ATI Radeon IGP */
++	{ 0, NULL }
++};
++static struct irq_router pirq_router;
++static struct pci_dev *pirq_router_dev;
++
++
++/*
++ *	FIXME: should we have an option to say "generic for
++ *	chipset" ?
++ */
++ 
++static void __init pirq_find_router(struct irq_router *r)
++{
++	struct irq_routing_table *rt = pirq_table;
++	struct irq_router_handler *h;
++
++#ifdef CONFIG_PCI_BIOS
++	if (!rt->signature) {
++		printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
++		r->set = pirq_bios_set;
++		r->name = "BIOS";
++		return;
++	}
++#endif
++
++	/* Default unless a driver reloads it */
++	r->name = "default";
++	r->get = NULL;
++	r->set = NULL;
++	
++	DBG("PCI: Attempting to find IRQ router for %04x:%04x\n",
++	    rt->rtr_vendor, rt->rtr_device);
++
++	pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
++	if (!pirq_router_dev) {
++		DBG("PCI: Interrupt router not found at %02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
++		return;
++	}
++
++	for( h = pirq_routers; h->vendor; h++) {
++		/* First look for a router match */
++		if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device))
++			break;
++		/* Fall back to a device match */
++		if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device))
++			break;
++	}
++	printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
++		pirq_router.name,
++		pirq_router_dev->vendor,
++		pirq_router_dev->device,
++		pci_name(pirq_router_dev));
++}
++
++static struct irq_info *pirq_get_info(struct pci_dev *dev)
++{
++	struct irq_routing_table *rt = pirq_table;
++	int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
++	struct irq_info *info;
++
++	for (info = rt->slots; entries--; info++)
++		if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
++			return info;
++	return NULL;
++}
++
++static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
++{
++	u8 pin;
++	struct irq_info *info;
++	int i, pirq, newirq;
++	int irq = 0;
++	u32 mask;
++	struct irq_router *r = &pirq_router;
++	struct pci_dev *dev2 = NULL;
++	char *msg = NULL;
++
++	/* Find IRQ pin */
++	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++	if (!pin) {
++		DBG(" -> no interrupt pin\n");
++		return 0;
++	}
++	pin = pin - 1;
++
++	/* Find IRQ routing entry */
++
++	if (!pirq_table)
++		return 0;
++	
++	DBG("IRQ for %s[%c]", pci_name(dev), 'A' + pin);
++	info = pirq_get_info(dev);
++	if (!info) {
++		DBG(" -> not found in routing table\n");
++		return 0;
++	}
++	pirq = info->irq[pin].link;
++	mask = info->irq[pin].bitmap;
++	if (!pirq) {
++		DBG(" -> not routed\n");
++		return 0;
++	}
++	DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
++	mask &= pcibios_irq_mask;
++
++	/* Work around broken HP Pavilion Notebooks which assign USB to
++	   IRQ 9 even though it is actually wired to IRQ 11 */
++
++	if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
++		dev->irq = 11;
++		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
++		r->set(pirq_router_dev, dev, pirq, 11);
++	}
++
++	/* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
++	if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) {
++		pirq = 0x68;
++		mask = 0x400;
++		dev->irq = r->get(pirq_router_dev, dev, pirq);
++		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
++	}
++
++	/*
++	 * Find the best IRQ to assign: use the one
++	 * reported by the device if possible.
++	 */
++	newirq = dev->irq;
++	if (!((1 << newirq) & mask)) {
++		if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
++		else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev));
++	}
++	if (!newirq && assign) {
++		for (i = 0; i < 16; i++) {
++			if (!(mask & (1 << i)))
++				continue;
++			if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, SA_SHIRQ))
++				newirq = i;
++		}
++	}
++	DBG(" -> newirq=%d", newirq);
++
++	/* Check if it is hardcoded */
++	if ((pirq & 0xf0) == 0xf0) {
++		irq = pirq & 0xf;
++		DBG(" -> hardcoded IRQ %d\n", irq);
++		msg = "Hardcoded";
++	} else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
++	((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
++		DBG(" -> got IRQ %d\n", irq);
++		msg = "Found";
++	} else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
++		DBG(" -> assigning IRQ %d", newirq);
++		if (r->set(pirq_router_dev, dev, pirq, newirq)) {
++			eisa_set_level_irq(newirq);
++			DBG(" ... OK\n");
++			msg = "Assigned";
++			irq = newirq;
++		}
++	}
++
++	if (!irq) {
++		DBG(" ... failed\n");
++		if (newirq && mask == (1 << newirq)) {
++			msg = "Guessed";
++			irq = newirq;
++		} else
++			return 0;
++	}
++	printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev));
++
++	/* Update IRQ for all devices with the same pirq value */
++	while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
++		pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
++		if (!pin)
++			continue;
++		pin--;
++		info = pirq_get_info(dev2);
++		if (!info)
++			continue;
++		if (info->irq[pin].link == pirq) {
++			/* We refuse to override the dev->irq information. Give a warning! */
++		    	if ( dev2->irq && dev2->irq != irq && \
++			(!(pci_probe & PCI_USE_PIRQ_MASK) || \
++			((1 << dev2->irq) & mask)) ) {
++#ifndef CONFIG_PCI_MSI
++		    		printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
++				       pci_name(dev2), dev2->irq, irq);
++#endif
++		    		continue;
++		    	}
++			dev2->irq = irq;
++			pirq_penalty[irq]++;
++			if (dev != dev2)
++				printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2));
++		}
++	}
++	return 1;
++}
++
++static void __init pcibios_fixup_irqs(void)
++{
++	struct pci_dev *dev = NULL;
++	u8 pin;
++
++	DBG("PCI: IRQ fixup\n");
++	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
++		/*
++		 * If the BIOS has set an out of range IRQ number, just ignore it.
++		 * Also keep track of which IRQ's are already in use.
++		 */
++		if (dev->irq >= 16) {
++			DBG("%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
++			dev->irq = 0;
++		}
++		/* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
++		if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
++			pirq_penalty[dev->irq] = 0;
++		pirq_penalty[dev->irq]++;
++	}
++
++	dev = NULL;
++	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
++		pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++#ifdef CONFIG_X86_IO_APIC
++		/*
++		 * Recalculate IRQ numbers if we use the I/O APIC.
++		 */
++		if (io_apic_assign_pci_irqs)
++		{
++			int irq;
++
++			if (pin) {
++				pin--;		/* interrupt pins are numbered starting from 1 */
++				irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
++	/*
++	 * Busses behind bridges are typically not listed in the MP-table.
++	 * In this case we have to look up the IRQ based on the parent bus,
++	 * parent slot, and pin number. The SMP code detects such bridged
++	 * busses itself so we should get into this branch reliably.
++	 */
++				if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
++					struct pci_dev * bridge = dev->bus->self;
++
++					pin = (pin + PCI_SLOT(dev->devfn)) % 4;
++					irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 
++							PCI_SLOT(bridge->devfn), pin);
++					if (irq >= 0)
++						printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
++							pci_name(bridge), 'A' + pin, irq);
++				}
++				if (irq >= 0) {
++					if (use_pci_vector() &&
++						!platform_legacy_irq(irq))
++						irq = IO_APIC_VECTOR(irq);
++
++					printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
++						pci_name(dev), 'A' + pin, irq);
++					dev->irq = irq;
++				}
++			}
++		}
++#endif
++		/*
++		 * Still no IRQ? Try to lookup one...
++		 */
++		if (pin && !dev->irq)
++			pcibios_lookup_irq(dev, 0);
++	}
++}
++
++/*
++ * Work around broken HP Pavilion Notebooks which assign USB to
++ * IRQ 9 even though it is actually wired to IRQ 11
++ */
++static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
++{
++	if (!broken_hp_bios_irq9) {
++		broken_hp_bios_irq9 = 1;
++		printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
++	}
++	return 0;
++}
++
++/*
++ * Work around broken Acer TravelMate 360 Notebooks which assign
++ * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
++ */
++static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
++{
++	if (!acer_tm360_irqrouting) {
++		acer_tm360_irqrouting = 1;
++		printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
++	}
++	return 0;
++}
++
++static struct dmi_system_id __initdata pciirq_dmi_table[] = {
++	{
++		.callback = fix_broken_hp_bios_irq9,
++		.ident = "HP Pavilion N5400 Series Laptop",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++			DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"),
++			DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
++		},
++	},
++	{
++		.callback = fix_acer_tm360_irqrouting,
++		.ident = "Acer TravelMate 36x Laptop",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
++		},
++	},
++	{ }
++};
++
++static int __init pcibios_irq_init(void)
++{
++	DBG("PCI: IRQ init\n");
++
++	if (pcibios_enable_irq || raw_pci_ops == NULL)
++		return 0;
++
++	dmi_check_system(pciirq_dmi_table);
++
++	pirq_table = pirq_find_routing_table();
++
++#ifdef CONFIG_PCI_BIOS
++	if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
++		pirq_table = pcibios_get_irq_routing_table();
++#endif
++	if (pirq_table) {
++		pirq_peer_trick();
++		pirq_find_router(&pirq_router);
++		if (pirq_table->exclusive_irqs) {
++			int i;
++			for (i=0; i<16; i++)
++				if (!(pirq_table->exclusive_irqs & (1 << i)))
++					pirq_penalty[i] += 100;
++		}
++		/* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
++		if (io_apic_assign_pci_irqs)
++			pirq_table = NULL;
++	}
++
++	pcibios_enable_irq = pirq_enable_irq;
++
++	pcibios_fixup_irqs();
++	return 0;
++}
++
++subsys_initcall(pcibios_irq_init);
++
++
++static void pirq_penalize_isa_irq(int irq)
++{
++	/*
++	 *  If any ISAPnP device reports an IRQ in its list of possible
++	 *  IRQ's, we try to avoid assigning it to PCI devices.
++	 */
++	if (irq < 16)
++		pirq_penalty[irq] += 100;
++}
++
++void pcibios_penalize_isa_irq(int irq)
++{
++#ifdef CONFIG_ACPI_PCI
++	if (!acpi_noirq)
++		acpi_penalize_isa_irq(irq);
++	else
++#endif
++		pirq_penalize_isa_irq(irq);
++}
++
++static int pirq_enable_irq(struct pci_dev *dev)
++{
++	u8 pin;
++	struct pci_dev *temp_dev;
++
++	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++	if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
++		char *msg = "";
++
++		pin--;		/* interrupt pins are numbered starting from 1 */
++
++		if (io_apic_assign_pci_irqs) {
++			int irq;
++
++			irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
++			/*
++			 * Busses behind bridges are typically not listed in the MP-table.
++			 * In this case we have to look up the IRQ based on the parent bus,
++			 * parent slot, and pin number. The SMP code detects such bridged
++			 * busses itself so we should get into this branch reliably.
++			 */
++			temp_dev = dev;
++			while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
++				struct pci_dev * bridge = dev->bus->self;
++
++				pin = (pin + PCI_SLOT(dev->devfn)) % 4;
++				irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 
++						PCI_SLOT(bridge->devfn), pin);
++				if (irq >= 0)
++					printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
++						pci_name(bridge), 'A' + pin, irq);
++				dev = bridge;
++			}
++			dev = temp_dev;
++			if (irq >= 0) {
++#ifdef CONFIG_PCI_MSI
++				if (!platform_legacy_irq(irq))
++					irq = IO_APIC_VECTOR(irq);
++#endif
++				printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
++					pci_name(dev), 'A' + pin, irq);
++				dev->irq = irq;
++				return 0;
++			} else
++				msg = " Probably buggy MP table.";
++		} else if (pci_probe & PCI_BIOS_IRQ_SCAN)
++			msg = "";
++		else
++			msg = " Please try using pci=biosirq.";
++
++		/* With IDE legacy devices the IRQ lookup failure is not a problem.. */
++		if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5))
++			return 0;
++
++		printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
++		       'A' + pin, pci_name(dev), msg);
++	}
++	return 0;
++}
++
++int pci_vector_resources(int last, int nr_released)
++{
++	int count = nr_released;
++
++	int next = last;
++	int offset = (last % 8);
++
++	while (next < FIRST_SYSTEM_VECTOR) {
++		next += 8;
++#ifdef CONFIG_X86_64
++		if (next == IA32_SYSCALL_VECTOR)
++			continue;
++#else
++		if (next == SYSCALL_VECTOR)
++			continue;
++#endif
++		count++;
++		if (next >= FIRST_SYSTEM_VECTOR) {
++			if (offset%8) {
++				next = FIRST_DEVICE_VECTOR + offset;
++				offset++;
++				continue;
++			}
++			count--;
++		}
++	}
++
++	return count;
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/i386/pci/Makefile linux-2.6.12-xen/arch/xen/i386/pci/Makefile
+--- pristine-linux-2.6.12/arch/xen/i386/pci/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/i386/pci/Makefile	2006-03-05 23:36:29.000000000 +0100
+@@ -0,0 +1,33 @@
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++CFLAGS	+= -Iarch/$(XENARCH)/pci
++
++obj-y				:= i386.o
++
++#c-obj-$(CONFIG_PCI_BIOS)		+= pcbios.o
++c-obj-$(CONFIG_PCI_MMCONFIG)	+= mmconfig.o
++c-obj-$(CONFIG_PCI_DIRECT)	+= direct.o
++
++c-pci-y				:= fixup.o
++c-pci-$(CONFIG_ACPI_PCI)	+= acpi.o
++c-pci-y				+= legacy.o
++# Make sure irq.o gets linked in after legacy.o
++l-pci-y				+= irq.o
++
++c-pci-$(CONFIG_X86_VISWS)	:= visws.o fixup.o
++pci-$(CONFIG_X86_VISWS)		:=
++c-pci-$(CONFIG_X86_NUMAQ)	:= numa.o
++l-pci-$(CONFIG_X86_NUMAQ)	:= irq.o
++
++obj-y				+= $(pci-y)
++c-obj-y				+= $(c-pci-y) common.o
++
++c-link	:=
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
++	@ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
++
++# Make sure irq.o gets linked in before common.o
++obj-y	+= $(patsubst common.o,$(l-pci-y) common.o,$(c-obj-y))
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
+diff -Nurp pristine-linux-2.6.12/arch/xen/Kconfig linux-2.6.12-xen/arch/xen/Kconfig
+--- pristine-linux-2.6.12/arch/xen/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/Kconfig	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,212 @@
++#
++# For a description of the syntax of this configuration file,
++# see Documentation/kbuild/kconfig-language.txt.
++#
++
++mainmenu "Linux Kernel Configuration"
++
++config XEN
++	bool
++	default y
++	help
++	  This is the Linux Xen port.
++
++config ARCH_XEN
++	bool
++	default y
++
++
++config NO_IDLE_HZ
++	bool
++	default y
++
++
++menu "XEN"
++
++config XEN_PRIVILEGED_GUEST
++	bool "Privileged Guest (domain 0)"
++	default n
++	select XEN_PHYSDEV_ACCESS
++	help
++	  Support for privileged operation (domain 0)
++
++config XEN_PHYSDEV_ACCESS
++	bool "Physical device access"
++	default XEN_PRIVILEGED_GUEST
++	help
++	  Assume access is available to physical hardware devices
++	  (e.g., hard drives, network cards). This allows you to configure
++	  such devices and also includes some low-level support that is
++	  otherwise not compiled into the kernel.
++
++config XEN_BLKDEV_BACKEND
++	bool "Block-device backend driver"
++	depends on XEN_PHYSDEV_ACCESS
++	default y
++	help
++	  The block-device backend driver allows the kernel to export its
++	  block devices to other guests via a high-performance shared-memory
++	  interface.
++
++config XEN_BLKDEV_TAP_BE
++        bool "Block Tap support for backend driver (DANGEROUS)"
++        depends on XEN_BLKDEV_BACKEND
++        default n
++        help
++          If you intend to use the block tap driver, the backend domain will
++          not know the domain id of the real frontend, and so will not be able
++          to map its data pages.  This modifies the backend to attempt to map
++          from both the tap domain and the real frontend.  This presents a
++          security risk, and so should ONLY be used for development
++          with the blktap.  This option will be removed as the block drivers are
++          modified to use grant tables.
++
++config XEN_NETDEV_BACKEND
++	bool "Network-device backend driver"
++	depends on XEN_PHYSDEV_ACCESS
++	default y
++	help
++	  The network-device backend driver allows the kernel to export its
++	  network devices to other guests via a high-performance shared-memory
++	  interface.
++
++config XEN_NETDEV_PIPELINED_TRANSMITTER
++	bool "Pipelined transmitter (DANGEROUS)"
++	depends on XEN_NETDEV_BACKEND
++	default n
++	help
++	  If the net backend is a dumb domain, such as a transparent Ethernet
++	  bridge with no local IP interface, it is safe to say Y here to get
++	  slightly lower network overhead.
++	  If the backend has a local IP interface; or may be doing smart things
++	  like reassembling packets to perform firewall filtering; or if you
++	  are unsure; or if you experience network hangs when this option is
++	  enabled; then you must say N here.
++
++config XEN_TPMDEV_FRONTEND
++        bool "TPM-device frontend driver"
++        default n
++	select TCG_TPM
++	select TCG_XEN
++        help
++          The TPM-device frontend driver.
++
++config XEN_TPMDEV_BACKEND
++        bool "TPM-device backend driver"
++        default n
++        help
++          The TPM-device backend driver
++
++config XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
++        bool "TPM backend closes upon vTPM failure"
++        depends on XEN_TPMDEV_BACKEND
++        default n
++        help
++          The TPM backend closes the channel if the vTPM in userspace indicates
++          a failure. The corresponding domain's channel will be closed.
++          Say Y if you want this feature.
++
++config XEN_BLKDEV_FRONTEND
++	tristate "Block-device frontend driver"
++	default y
++	help
++	  The block-device frontend driver allows the kernel to access block
++	  devices mounted within another guest OS. Unless you are building a
++	  dedicated device-driver domain, or your master control domain
++	  (domain 0), then you almost certainly want to say Y here.
++
++config XEN_NETDEV_FRONTEND
++	tristate "Network-device frontend driver"
++	default y
++	help
++	  The network-device frontend driver allows the kernel to access
++	  network interfaces within another guest OS. Unless you are building a
++	  dedicated device-driver domain, or your master control domain
++	  (domain 0), then you almost certainly want to say Y here.
++
++config XEN_BLKDEV_TAP
++	bool "Block device tap driver"
++	default n
++	help
++	  This driver allows a VM to interact on block device channels
++	  to other VMs.  Block messages may be passed through or redirected
++	  to a character device, allowing device prototyping in application
++	  space.  Odds are that you want to say N here.
++
++config XEN_SHADOW_MODE
++	bool "Fake shadow mode"
++	default n
++    help
++      fakes out a shadow mode kernel
++
++
++config XEN_SCRUB_PAGES
++	bool "Scrub memory before freeing it to Xen"
++	default y
++	help
++	  Erase memory contents before freeing it back to Xen's global
++	  pool. This ensures that any secrets contained within that
++	  memory (e.g., private keys) cannot be found by other guests that
++	  may be running on the machine. Most people will want to say Y here.
++	  If security is not a concern then you may increase performance by
++	  saying N.
++
++choice
++	prompt "Processor Type"
++	default XEN_X86
++
++config XEN_X86
++	bool "X86"
++	help
++	  Choose this option if your computer is a X86 architecture.
++
++config XEN_X86_64
++	bool "X86_64"
++	help
++	  Choose this option if your computer is a X86_64 architecture.
++
++endchoice
++
++endmenu
++
++config HAVE_ARCH_ALLOC_SKB
++	bool
++	default y
++
++config HAVE_ARCH_DEV_ALLOC_SKB
++	bool
++	default y
++
++source "init/Kconfig"
++
++if XEN_X86
++source "arch/xen/i386/Kconfig"
++endif
++
++if XEN_X86_64
++source "arch/xen/x86_64/Kconfig"
++endif
++
++menu "Executable file formats"
++
++source "fs/Kconfig.binfmt"
++
++endmenu
++
++source "arch/xen/Kconfig.drivers"
++
++if XEN_PRIVILEGED_GUEST
++menu "Power management options"
++source "drivers/acpi/Kconfig"
++endmenu
++endif
++
++source "fs/Kconfig"
++
++source "security/Kconfig"
++
++source "crypto/Kconfig"
++
++source "lib/Kconfig"
++
++source "arch/xen/Kconfig.debug"
+diff -Nurp pristine-linux-2.6.12/arch/xen/Kconfig.debug linux-2.6.12-xen/arch/xen/Kconfig.debug
+--- pristine-linux-2.6.12/arch/xen/Kconfig.debug	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/Kconfig.debug	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,129 @@
++menu "Kernel hacking"
++
++source "lib/Kconfig.debug"
++
++# X86
++config EARLY_PRINTK
++	bool "Early printk" if EMBEDDED && DEBUG_KERNEL
++	default y
++	depends on X86
++	help
++	  Write kernel log output directly into the VGA buffer or to a serial
++	  port.
++
++	  This is useful for kernel debugging when your machine crashes very
++	  early before the console code is initialized. For normal operation
++	  it is not recommended because it looks ugly and doesn't cooperate
++	  with klogd/syslogd or the X server. You should normally N here,
++	  unless you want to debug such a crash.
++
++config DEBUG_STACKOVERFLOW
++	bool "Check for stack overflows"
++	depends on DEBUG_KERNEL && X86
++
++config KPROBES
++	bool "Kprobes"
++	depends on DEBUG_KERNEL && X86
++	help
++	  Kprobes allows you to trap at almost any kernel address and
++	  execute a callback function.  register_kprobe() establishes
++	  a probepoint and specifies the callback.  Kprobes is useful
++	  for kernel debugging, non-intrusive instrumentation and testing.
++	  If in doubt, say "N".
++
++config DEBUG_STACK_USAGE
++	bool "Stack utilization instrumentation"
++	depends on DEBUG_KERNEL && X86
++	help
++	  Enables the display of the minimum amount of free stack which each
++	  task has ever had available in the sysrq-T and sysrq-P debug output.
++
++	  This option will slow down process creation somewhat.
++
++comment "Page alloc debug is incompatible with Software Suspend on i386"
++	depends on DEBUG_KERNEL && SOFTWARE_SUSPEND && X86
++
++config DEBUG_PAGEALLOC
++	bool "Page alloc debugging"
++	depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && X86
++	help
++	  Unmap pages from the kernel linear mapping after free_pages().
++	  This results in a large slowdown, but helps to find certain types
++	  of memory corruptions.
++
++config 4KSTACKS
++	bool "Use 4Kb for kernel stacks instead of 8Kb"
++	depends on DEBUG_KERNEL && X86
++	help
++	  If you say Y here the kernel will use a 4Kb stacksize for the
++	  kernel stack attached to each process/thread. This facilitates
++	  running more threads on a system and also reduces the pressure
++	  on the VM subsystem for higher order allocations. This option
++	  will also use IRQ stacks to compensate for the reduced stackspace.
++
++config X86_FIND_SMP_CONFIG
++	bool
++	depends on X86_LOCAL_APIC || X86_VOYAGER && X86
++	default y
++
++config X86_MPPARSE
++	bool
++	depends on X86_LOCAL_APIC && !X86_VISWS && X86
++	default y
++
++# X86_64
++
++# !SMP for now because the context switch early causes GPF in segment reloading
++# and the GS base checking does the wrong thing then, causing a hang.
++config CHECKING
++	bool "Additional run-time checks"
++	depends on DEBUG_KERNEL && !SMP && X86_64
++	help
++	  Enables some internal consistency checks for kernel debugging.
++	  You should normally say N.
++
++config INIT_DEBUG
++	bool "Debug __init statements"
++	depends on DEBUG_KERNEL && X86_64
++	help
++	  Fill __init and __initdata at the end of boot. This helps debugging
++	  illegal uses of __init and __initdata after initialization.
++
++config IOMMU_DEBUG
++       depends on GART_IOMMU && DEBUG_KERNEL && X86_64
++       bool "Enable IOMMU debugging"
++       help
++         Force the IOMMU to on even when you have less than 4GB of
++	 memory and add debugging code. On overflow always panic. And
++	 allow to enable IOMMU leak tracing. Can be disabled at boot
++	 time with iommu=noforce. This will also enable scatter gather
++	 list merging.  Currently not recommended for production
++	 code. When you use it make sure you have a big enough
++	 IOMMU/AGP aperture.  Most of the options enabled by this can
++	 be set more finegrained using the iommu= command line
++	 options. See Documentation/x86_64/boot-options.txt for more
++	 details.
++
++config IOMMU_LEAK
++       bool "IOMMU leak tracing"
++       depends on DEBUG_KERNEL && X86_64
++       depends on IOMMU_DEBUG
++       help
++         Add a simple leak tracer to the IOMMU code. This is useful when you
++	 are debugging a buggy device driver that leaks IOMMU mappings.
++
++#config X86_REMOTE_DEBUG
++#       bool "kgdb debugging stub"
++
++# X86 & X86_64
++config KPROBES
++	bool "Kprobes"
++	depends on DEBUG_KERNEL
++	help
++	  Kprobes allows you to trap at almost any kernel address and
++	  execute a callback function.  register_kprobe() establishes
++	  a probepoint and specifies the callback.  Kprobes is useful
++	  for kernel debugging, non-intrusive instrumentation and testing.
++	  If in doubt, say "N".
++
++endmenu
+diff -Nurp pristine-linux-2.6.12/arch/xen/Kconfig.drivers linux-2.6.12-xen/arch/xen/Kconfig.drivers
+--- pristine-linux-2.6.12/arch/xen/Kconfig.drivers	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/Kconfig.drivers	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,98 @@
++# arch/xen/Kconfig.drivers
++
++menu "Device Drivers"
++
++source "drivers/base/Kconfig"
++
++if XEN_PHYSDEV_ACCESS
++source "drivers/mtd/Kconfig"
++source "drivers/parport/Kconfig"
++source "drivers/pnp/Kconfig"
++endif
++
++source "drivers/block/Kconfig"
++
++if XEN_PHYSDEV_ACCESS
++source "drivers/ide/Kconfig"
++endif
++
++source "drivers/scsi/Kconfig"
++
++if XEN_PHYSDEV_ACCESS
++source "drivers/cdrom/Kconfig"
++endif
++
++source "drivers/md/Kconfig"
++
++if XEN_PHYSDEV_ACCESS
++source "drivers/message/fusion/Kconfig"
++source "drivers/ieee1394/Kconfig"
++source "drivers/message/i2o/Kconfig"
++endif
++
++source "net/Kconfig"
++
++if XEN_PHYSDEV_ACCESS
++source "drivers/isdn/Kconfig"
++source "drivers/telephony/Kconfig"
++source "drivers/input/Kconfig"
++source "drivers/char/Kconfig"
++source "drivers/i2c/Kconfig"
++source "drivers/w1/Kconfig"
++source "drivers/misc/Kconfig"
++source "drivers/media/Kconfig"
++source "drivers/video/Kconfig"
++source "sound/Kconfig"
++source "drivers/usb/Kconfig"
++source "drivers/mmc/Kconfig"
++source "drivers/infiniband/Kconfig"
++endif
++
++if !XEN_PHYSDEV_ACCESS
++source "drivers/char/tpm/Kconfig.domU"
++endif
++
++if !XEN_PHYSDEV_ACCESS
++
++menu "Character devices"
++
++config UNIX98_PTYS
++	bool
++	default y
++
++config LEGACY_PTYS
++	bool "Legacy (BSD) PTY support"
++	default y
++	---help---
++	  A pseudo terminal (PTY) is a software device consisting of two
++	  halves: a master and a slave. The slave device behaves identical to
++	  a physical terminal; the master device is used by a process to
++	  read data from and write data to the slave, thereby emulating a
++	  terminal. Typical programs for the master side are telnet servers
++	  and xterms.
++
++	  Linux has traditionally used the BSD-like names /dev/ptyxx
++	  for masters and /dev/ttyxx for slaves of pseudo
++	  terminals. This scheme has a number of problems, including
++	  security.  This option enables these legacy devices; on most
++	  systems, it is safe to say N.
++
++
++config LEGACY_PTY_COUNT
++	int "Maximum number of legacy PTY in use"
++	depends on LEGACY_PTYS
++	range 1 256
++	default "256"
++	---help---
++	  The maximum number of legacy PTYs that can be used at any one time.
++	  The default is 256, and should be more than enough.  Embedded
++	  systems may want to reduce this to save memory.
++
++	  When not in use, each legacy PTY occupies 12 bytes on 32-bit
++	  architectures and 24 bytes on 64-bit architectures.
++
++endmenu
++
++endif
++
++endmenu
+diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/evtchn.c linux-2.6.12-xen/arch/xen/kernel/evtchn.c
+--- pristine-linux-2.6.12/arch/xen/kernel/evtchn.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/kernel/evtchn.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,820 @@
++/******************************************************************************
++ * evtchn.c
++ * 
++ * Communication via Xen event channels.
++ * 
++ * Copyright (c) 2002-2005, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/version.h>
++#include <asm/atomic.h>
++#include <asm/system.h>
++#include <asm/ptrace.h>
++#include <asm/synch_bitops.h>
++#include <asm-xen/xen-public/event_channel.h>
++#include <asm-xen/xen-public/physdev.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/evtchn.h>
++#include <linux/mc146818rtc.h> /* RTC_IRQ */
++
++/*
++ * This lock protects updates to the following mapping and reference-count
++ * arrays. The lock does not need to be acquired to read the mapping tables.
++ */
++static spinlock_t irq_mapping_update_lock;
++
++/* IRQ <-> event-channel mappings. */
++static int evtchn_to_irq[NR_EVENT_CHANNELS];
++
++/* Packed IRQ information: binding type, sub-type index, and event channel. */
++static u32 irq_info[NR_IRQS];
++/* Binding types. */
++enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
++/* Constructor for packed IRQ information. */
++#define mk_irq_info(type, index, evtchn)				\
++	(((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
++/* Convenient shorthand for packed representation of an unbound IRQ. */
++#define IRQ_UNBOUND	mk_irq_info(IRQT_UNBOUND, 0, 0)
++/* Accessor macros for packed IRQ information. */
++#define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
++#define index_from_irq(irq)  ((u8)(irq_info[irq] >> 16))
++#define type_from_irq(irq)   ((u8)(irq_info[irq] >> 24))
++
++/* IRQ <-> VIRQ mapping. */
++DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
++
++/* IRQ <-> IPI mapping. */
++#ifndef NR_IPIS
++#define NR_IPIS 1 
++#endif
++DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++/* Reference counts for bindings to IRQs. */
++static int irq_bindcount[NR_IRQS];
++
++/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
++static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
++
++#ifdef CONFIG_SMP
++
++static u8 cpu_evtchn[NR_EVENT_CHANNELS];
++static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
++
++#define active_evtchns(cpu,sh,idx)		\
++	((sh)->evtchn_pending[idx] &		\
++	 cpu_evtchn_mask[cpu][idx] &		\
++	 ~(sh)->evtchn_mask[idx])
++
++static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
++{
++	clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
++	set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
++	cpu_evtchn[chn] = cpu;
++}
++
++static void init_evtchn_cpu_bindings(void)
++{
++	/* By default all event channels notify CPU#0. */
++	memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
++	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
++}
++
++#define cpu_from_evtchn(evtchn)		(cpu_evtchn[evtchn])
++
++#else
++
++#define active_evtchns(cpu,sh,idx)		\
++	((sh)->evtchn_pending[idx] &		\
++	 ~(sh)->evtchn_mask[idx])
++#define bind_evtchn_to_cpu(chn,cpu)	((void)0)
++#define init_evtchn_cpu_bindings()	((void)0)
++#define cpu_from_evtchn(evtchn)		(0)
++
++#endif
++
++/* Upcall to generic IRQ layer. */
++#ifdef CONFIG_X86
++extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
++#if defined (__i386__)
++#define IRQ_REG orig_eax
++#elif defined (__x86_64__)
++#define IRQ_REG orig_rax
++#endif
++#define do_IRQ(irq, regs) do {			\
++	(regs)->IRQ_REG = (irq);		\
++	do_IRQ((regs));				\
++} while (0)
++#endif
++
++/* Xen will never allocate port zero for any purpose. */
++#define VALID_EVTCHN(chn)	((chn) != 0)
++
++/*
++ * Force a proper event-channel callback from Xen after clearing the
++ * callback mask. We do this in a very simple manner, by making a call
++ * down into Xen. The pending flag will be checked by Xen on return.
++ */
++void force_evtchn_callback(void)
++{
++	(void)HYPERVISOR_xen_version(0, NULL);
++}
++EXPORT_SYMBOL(force_evtchn_callback);
++
++/* NB. Interrupts are disabled on entry. */
++asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
++{
++	unsigned long  l1, l2;
++	unsigned int   l1i, l2i, port;
++	int            irq, cpu = smp_processor_id();
++	shared_info_t *s = HYPERVISOR_shared_info;
++	vcpu_info_t   *vcpu_info = &s->vcpu_info[cpu];
++
++	vcpu_info->evtchn_upcall_pending = 0;
++
++	/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
++	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
++	while (l1 != 0) {
++		l1i = __ffs(l1);
++		l1 &= ~(1UL << l1i);
++
++		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
++			l2i = __ffs(l2);
++
++			port = (l1i * BITS_PER_LONG) + l2i;
++			if ((irq = evtchn_to_irq[port]) != -1)
++				do_IRQ(irq, regs);
++			else
++				evtchn_device_upcall(port);
++		}
++	}
++}
++EXPORT_SYMBOL(evtchn_do_upcall);
++
++static int find_unbound_irq(void)
++{
++	int irq;
++
++	for (irq = 0; irq < NR_IRQS; irq++)
++		if (irq_bindcount[irq] == 0)
++			break;
++
++	if (irq == NR_IRQS)
++		panic("No available IRQ to bind to: increase NR_IRQS!\n");
++
++	return irq;
++}
++
++static int bind_evtchn_to_irq(unsigned int evtchn)
++{
++	int irq;
++
++	spin_lock(&irq_mapping_update_lock);
++
++	if ((irq = evtchn_to_irq[evtchn]) == -1) {
++		irq = find_unbound_irq();
++		evtchn_to_irq[evtchn] = irq;
++		irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
++	}
++
++	irq_bindcount[irq]++;
++
++	spin_unlock(&irq_mapping_update_lock);
++    
++	return irq;
++}
++
++static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
++{
++	evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq };
++	int evtchn, irq;
++
++	spin_lock(&irq_mapping_update_lock);
++
++	if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
++		op.u.bind_virq.virq = virq;
++		op.u.bind_virq.vcpu = cpu;
++		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
++		evtchn = op.u.bind_virq.port;
++
++		irq = find_unbound_irq();
++		evtchn_to_irq[evtchn] = irq;
++		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++
++		per_cpu(virq_to_irq, cpu)[virq] = irq;
++
++		bind_evtchn_to_cpu(evtchn, cpu);
++	}
++
++	irq_bindcount[irq]++;
++
++	spin_unlock(&irq_mapping_update_lock);
++    
++	return irq;
++}
++
++static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
++{
++	evtchn_op_t op = { .cmd = EVTCHNOP_bind_ipi };
++	int evtchn, irq;
++
++	spin_lock(&irq_mapping_update_lock);
++
++	if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
++		op.u.bind_ipi.vcpu = cpu;
++		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
++		evtchn = op.u.bind_ipi.port;
++
++		irq = find_unbound_irq();
++		evtchn_to_irq[evtchn] = irq;
++		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
++
++		per_cpu(ipi_to_irq, cpu)[ipi] = irq;
++
++		bind_evtchn_to_cpu(evtchn, cpu);
++	}
++
++	irq_bindcount[irq]++;
++
++	spin_unlock(&irq_mapping_update_lock);
++
++	return irq;
++}
++
++static void unbind_from_irq(unsigned int irq)
++{
++	evtchn_op_t op = { .cmd = EVTCHNOP_close };
++	int evtchn = evtchn_from_irq(irq);
++
++	spin_lock(&irq_mapping_update_lock);
++
++	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
++		op.u.close.port = evtchn;
++		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
++
++		switch (type_from_irq(irq)) {
++		case IRQT_VIRQ:
++			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
++				[index_from_irq(irq)] = -1;
++			break;
++		case IRQT_IPI:
++			per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
++				[index_from_irq(irq)] = -1;
++			break;
++		default:
++			break;
++		}
++
++		/* Closed ports are implicitly re-bound to VCPU0. */
++		bind_evtchn_to_cpu(evtchn, 0);
++
++		evtchn_to_irq[evtchn] = -1;
++		irq_info[irq] = IRQ_UNBOUND;
++	}
++
++	spin_unlock(&irq_mapping_update_lock);
++}
++
++int bind_evtchn_to_irqhandler(
++	unsigned int evtchn,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id)
++{
++	unsigned int irq;
++	int retval;
++
++	irq = bind_evtchn_to_irq(evtchn);
++	retval = request_irq(irq, handler, irqflags, devname, dev_id);
++	if (retval != 0) {
++		unbind_from_irq(irq);
++		return retval;
++	}
++
++	return irq;
++}
++EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
++
++int bind_virq_to_irqhandler(
++	unsigned int virq,
++	unsigned int cpu,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id)
++{
++	unsigned int irq;
++	int retval;
++
++	irq = bind_virq_to_irq(virq, cpu);
++	retval = request_irq(irq, handler, irqflags, devname, dev_id);
++	if (retval != 0) {
++		unbind_from_irq(irq);
++		return retval;
++	}
++
++	return irq;
++}
++EXPORT_SYMBOL(bind_virq_to_irqhandler);
++
++int bind_ipi_to_irqhandler(
++	unsigned int ipi,
++	unsigned int cpu,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id)
++{
++	unsigned int irq;
++	int retval;
++
++	irq = bind_ipi_to_irq(ipi, cpu);
++	retval = request_irq(irq, handler, irqflags, devname, dev_id);
++	if (retval != 0) {
++		unbind_from_irq(irq);
++		return retval;
++	}
++
++	return irq;
++}
++EXPORT_SYMBOL(bind_ipi_to_irqhandler);
++
++void unbind_from_irqhandler(unsigned int irq, void *dev_id)
++{
++	free_irq(irq, dev_id);
++	unbind_from_irq(irq);
++}
++EXPORT_SYMBOL(unbind_from_irqhandler);
++
++#ifdef CONFIG_SMP
++static void do_nothing_function(void *ign)
++{
++}
++#endif
++
++/* Rebind an evtchn so that it gets delivered to a specific cpu */
++static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
++{
++	evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
++	int evtchn;
++
++	spin_lock(&irq_mapping_update_lock);
++
++	evtchn = evtchn_from_irq(irq);
++	if (!VALID_EVTCHN(evtchn)) {
++		spin_unlock(&irq_mapping_update_lock);
++		return;
++	}
++
++	/* Send future instances of this interrupt to other vcpu. */
++	op.u.bind_vcpu.port = evtchn;
++	op.u.bind_vcpu.vcpu = tcpu;
++
++	/*
++	 * If this fails, it usually just indicates that we're dealing with a 
++	 * virq or IPI channel, which don't actually need to be rebound. Ignore
++	 * it, but don't do the xenlinux-level rebind in that case.
++	 */
++	if (HYPERVISOR_event_channel_op(&op) >= 0)
++		bind_evtchn_to_cpu(evtchn, tcpu);
++
++	spin_unlock(&irq_mapping_update_lock);
++
++	/*
++	 * Now send the new target processor a NOP IPI. When this returns, it
++	 * will check for any pending interrupts, and so service any that got 
++	 * delivered to the wrong processor by mistake.
++	 * 
++	 * XXX: The only time this is called with interrupts disabled is from
++	 * the hotplug/hotunplug path. In that case, all cpus are stopped with 
++	 * interrupts disabled, and the missed interrupts will be picked up
++	 * when they start again. This is kind of a hack.
++	 */
++	if (!irqs_disabled())
++		smp_call_function(do_nothing_function, NULL, 0, 0);
++}
++
++
++static void set_affinity_irq(unsigned irq, cpumask_t dest)
++{
++	unsigned tcpu = first_cpu(dest);
++	rebind_irq_to_cpu(irq, tcpu);
++}
++
++/*
++ * Interface to generic handling in irq.c
++ */
++
++static unsigned int startup_dynirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn))
++		unmask_evtchn(evtchn);
++	return 0;
++}
++
++static void shutdown_dynirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn))
++		mask_evtchn(evtchn);
++}
++
++static void enable_dynirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn))
++		unmask_evtchn(evtchn);
++}
++
++static void disable_dynirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn))
++		mask_evtchn(evtchn);
++}
++
++static void ack_dynirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn)) {
++		mask_evtchn(evtchn);
++		clear_evtchn(evtchn);
++	}
++}
++
++static void end_dynirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
++		unmask_evtchn(evtchn);
++}
++
++static struct hw_interrupt_type dynirq_type = {
++	"Dynamic-irq",
++	startup_dynirq,
++	shutdown_dynirq,
++	enable_dynirq,
++	disable_dynirq,
++	ack_dynirq,
++	end_dynirq,
++	set_affinity_irq
++};
++
++static inline void pirq_unmask_notify(int pirq)
++{
++	physdev_op_t op;
++	if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
++		op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
++		(void)HYPERVISOR_physdev_op(&op);
++	}
++}
++
++static inline void pirq_query_unmask(int pirq)
++{
++	physdev_op_t op;
++	op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
++	op.u.irq_status_query.irq = pirq;
++	(void)HYPERVISOR_physdev_op(&op);
++	clear_bit(pirq, &pirq_needs_unmask_notify[0]);
++	if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
++		set_bit(pirq, &pirq_needs_unmask_notify[0]);
++}
++
++/*
++ * On startup, if there is no action associated with the IRQ then we are
++ * probing. In this case we should not share with others as it will confuse us.
++ */
++#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
++
++static unsigned int startup_pirq(unsigned int irq)
++{
++	evtchn_op_t op = { .cmd = EVTCHNOP_bind_pirq };
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn))
++		goto out;
++
++	op.u.bind_pirq.pirq  = irq;
++	/* NB. We are happy to share unless we are probing. */
++	op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
++	if (HYPERVISOR_event_channel_op(&op) != 0) {
++		if ( !probing_irq(irq) )
++			printk(KERN_INFO "Failed to obtain physical "
++			       "IRQ %d\n", irq);
++		return 0;
++	}
++	evtchn = op.u.bind_pirq.port;
++
++	pirq_query_unmask(irq_to_pirq(irq));
++
++	bind_evtchn_to_cpu(evtchn, 0);
++	evtchn_to_irq[evtchn] = irq;
++	irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
++
++ out:
++	unmask_evtchn(evtchn);
++	pirq_unmask_notify(irq_to_pirq(irq));
++
++	return 0;
++}
++
++static void shutdown_pirq(unsigned int irq)
++{
++	evtchn_op_t op = { .cmd = EVTCHNOP_close };
++	int evtchn = evtchn_from_irq(irq);
++
++	if (!VALID_EVTCHN(evtchn))
++		return;
++
++	mask_evtchn(evtchn);
++
++	op.u.close.port = evtchn;
++	BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
++
++	bind_evtchn_to_cpu(evtchn, 0);
++	evtchn_to_irq[evtchn] = -1;
++	irq_info[irq] = IRQ_UNBOUND;
++}
++
++static void enable_pirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn)) {
++		unmask_evtchn(evtchn);
++		pirq_unmask_notify(irq_to_pirq(irq));
++	}
++}
++
++static void disable_pirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn))
++		mask_evtchn(evtchn);
++}
++
++static void ack_pirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn)) {
++		mask_evtchn(evtchn);
++		clear_evtchn(evtchn);
++	}
++}
++
++static void end_pirq(unsigned int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
++		unmask_evtchn(evtchn);
++		pirq_unmask_notify(irq_to_pirq(irq));
++	}
++}
++
++static struct hw_interrupt_type pirq_type = {
++	"Phys-irq",
++	startup_pirq,
++	shutdown_pirq,
++	enable_pirq,
++	disable_pirq,
++	ack_pirq,
++	end_pirq,
++	set_affinity_irq
++};
++
++void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
++{
++	int evtchn = evtchn_from_irq(i);
++	shared_info_t *s = HYPERVISOR_shared_info;
++	if (!VALID_EVTCHN(evtchn))
++		return;
++	BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
++	synch_set_bit(evtchn, &s->evtchn_pending[0]);
++}
++
++void notify_remote_via_irq(int irq)
++{
++	int evtchn = evtchn_from_irq(irq);
++
++	if (VALID_EVTCHN(evtchn))
++		notify_remote_via_evtchn(evtchn);
++}
++EXPORT_SYMBOL(notify_remote_via_irq);
++
++void mask_evtchn(int port)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++	synch_set_bit(port, &s->evtchn_mask[0]);
++}
++EXPORT_SYMBOL(mask_evtchn);
++
++void unmask_evtchn(int port)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++	unsigned int cpu = smp_processor_id();
++	vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
++
++	/* Slow path (hypercall) if this is a non-local port. */
++	if (unlikely(cpu != cpu_from_evtchn(port))) {
++		evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
++				   .u.unmask.port = port };
++		(void)HYPERVISOR_event_channel_op(&op);
++		return;
++	}
++
++	synch_clear_bit(port, &s->evtchn_mask[0]);
++
++	/*
++	 * The following is basically the equivalent of 'hw_resend_irq'. Just
++	 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
++	 * masked.
++	 */
++	if (synch_test_bit(port, &s->evtchn_pending[0]) && 
++	    !synch_test_and_set_bit(port / BITS_PER_LONG,
++				    &vcpu_info->evtchn_pending_sel)) {
++		vcpu_info->evtchn_upcall_pending = 1;
++		if (!vcpu_info->evtchn_upcall_mask)
++			force_evtchn_callback();
++	}
++}
++EXPORT_SYMBOL(unmask_evtchn);
++
++void irq_resume(void)
++{
++	evtchn_op_t op;
++	int         cpu, pirq, virq, ipi, irq, evtchn;
++
++	init_evtchn_cpu_bindings();
++
++	/* New event-channel space is not 'live' yet. */
++	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
++		mask_evtchn(evtchn);
++
++	/* Check that no PIRQs are still bound. */
++	for (pirq = 0; pirq < NR_PIRQS; pirq++)
++		BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
++
++	/* Secondary CPUs must have no VIRQ or IPI bindings. */
++	for (cpu = 1; cpu < NR_CPUS; cpu++) {
++		for (virq = 0; virq < NR_VIRQS; virq++)
++			BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
++		for (ipi = 0; ipi < NR_IPIS; ipi++)
++			BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
++	}
++
++	/* No IRQ <-> event-channel mappings. */
++	for (irq = 0; irq < NR_IRQS; irq++)
++		irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
++	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
++		evtchn_to_irq[evtchn] = -1;
++
++	/* Primary CPU: rebind VIRQs automatically. */
++	for (virq = 0; virq < NR_VIRQS; virq++) {
++		if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
++			continue;
++
++		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
++
++		/* Get a new binding from Xen. */
++		memset(&op, 0, sizeof(op));
++		op.cmd              = EVTCHNOP_bind_virq;
++		op.u.bind_virq.virq = virq;
++		op.u.bind_virq.vcpu = 0;
++		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
++		evtchn = op.u.bind_virq.port;
++        
++		/* Record the new mapping. */
++		evtchn_to_irq[evtchn] = irq;
++		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++
++		/* Ready for use. */
++		unmask_evtchn(evtchn);
++	}
++
++	/* Primary CPU: rebind IPIs automatically. */
++	for (ipi = 0; ipi < NR_IPIS; ipi++) {
++		if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
++			continue;
++
++		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
++
++		/* Get a new binding from Xen. */
++		memset(&op, 0, sizeof(op));
++		op.cmd = EVTCHNOP_bind_ipi;
++		op.u.bind_ipi.vcpu = 0;
++		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
++		evtchn = op.u.bind_ipi.port;
++        
++		/* Record the new mapping. */
++		evtchn_to_irq[evtchn] = irq;
++		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
++
++		/* Ready for use. */
++		unmask_evtchn(evtchn);
++	}
++}
++
++void __init init_IRQ(void)
++{
++	int i;
++	int cpu;
++
++	irq_ctx_init(0);
++
++	spin_lock_init(&irq_mapping_update_lock);
++
++	init_evtchn_cpu_bindings();
++
++	/* No VIRQ or IPI bindings. */
++	for (cpu = 0; cpu < NR_CPUS; cpu++) {
++		for (i = 0; i < NR_VIRQS; i++)
++			per_cpu(virq_to_irq, cpu)[i] = -1;
++		for (i = 0; i < NR_IPIS; i++)
++			per_cpu(ipi_to_irq, cpu)[i] = -1;
++	}
++
++	/* No event-channel -> IRQ mappings. */
++	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
++		evtchn_to_irq[i] = -1;
++		mask_evtchn(i); /* No event channels are 'live' right now. */
++	}
++
++	/* No IRQ -> event-channel mappings. */
++	for (i = 0; i < NR_IRQS; i++)
++		irq_info[i] = IRQ_UNBOUND;
++
++	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
++	for (i = 0; i < NR_DYNIRQS; i++) {
++		irq_bindcount[dynirq_to_irq(i)] = 0;
++
++		irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
++		irq_desc[dynirq_to_irq(i)].action  = NULL;
++		irq_desc[dynirq_to_irq(i)].depth   = 1;
++		irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
++	}
++
++	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
++	for (i = 0; i < NR_PIRQS; i++)
++	{
++		irq_bindcount[pirq_to_irq(i)] = 1;
++
++#ifdef RTC_IRQ
++		/* If not domain 0, force our RTC driver to fail its probe. */
++		if ((i == RTC_IRQ) &&
++		    !(xen_start_info->flags & SIF_INITDOMAIN))
++			continue;
++#endif
++
++		irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
++		irq_desc[pirq_to_irq(i)].action  = NULL;
++		irq_desc[pirq_to_irq(i)].depth   = 1;
++		irq_desc[pirq_to_irq(i)].handler = &pirq_type;
++	}
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/features.c linux-2.6.12-xen/arch/xen/kernel/features.c
+--- pristine-linux-2.6.12/arch/xen/kernel/features.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/kernel/features.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,38 @@
++/******************************************************************************
++ * features.c
++ *
++ * Xen feature flags.
++ *
++ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
++ */
++#include <linux/types.h>
++#include <linux/cache.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/features.h>
++
++/* When we rebase to a more recent Linux we can use __read_mostly here. */
++unsigned long xen_features[XENFEAT_NR_SUBMAPS] __cacheline_aligned;
++
++void setup_xen_features(void)
++{
++	uint32_t *flags = (uint32_t *)&xen_features[0];
++	xen_feature_info_t fi;
++	int i;
++
++	for (i=0; i<XENFEAT_NR_SUBMAPS; i++) {
++		fi.submap_idx = i;
++		if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
++			break;
++		flags[i] = fi.submap;
++	}
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/gnttab.c linux-2.6.12-xen/arch/xen/kernel/gnttab.c
+--- pristine-linux-2.6.12/arch/xen/kernel/gnttab.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/kernel/gnttab.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,411 @@
++/******************************************************************************
++ * gnttab.c
++ * 
++ * Granting foreign access to our memory reservation.
++ * 
++ * Copyright (c) 2005, Christopher Clark
++ * Copyright (c) 2004-2005, K A Fraser
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <asm/pgtable.h>
++#include <asm-xen/xen-public/xen.h>
++#include <asm/fixmap.h>
++#include <asm/uaccess.h>
++#include <asm-xen/linux-public/privcmd.h>
++#include <asm-xen/gnttab.h>
++#include <asm/synch_bitops.h>
++
++#if 1
++#define ASSERT(_p)							      \
++	if ( !(_p) ) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
++	#_p , __LINE__, __FILE__); *(int*)0=0; }
++#else
++#define ASSERT(_p) ((void)0)
++#endif
++
++#define WPRINTK(fmt, args...)				\
++	printk(KERN_WARNING "xen_grant: " fmt, ##args)
++
++
++EXPORT_SYMBOL(gnttab_grant_foreign_access);
++EXPORT_SYMBOL(gnttab_end_foreign_access_ref);
++EXPORT_SYMBOL(gnttab_end_foreign_access);
++EXPORT_SYMBOL(gnttab_query_foreign_access);
++EXPORT_SYMBOL(gnttab_grant_foreign_transfer);
++EXPORT_SYMBOL(gnttab_end_foreign_transfer_ref);
++EXPORT_SYMBOL(gnttab_end_foreign_transfer);
++EXPORT_SYMBOL(gnttab_alloc_grant_references);
++EXPORT_SYMBOL(gnttab_free_grant_references);
++EXPORT_SYMBOL(gnttab_free_grant_reference);
++EXPORT_SYMBOL(gnttab_claim_grant_reference);
++EXPORT_SYMBOL(gnttab_release_grant_reference);
++EXPORT_SYMBOL(gnttab_request_free_callback);
++EXPORT_SYMBOL(gnttab_grant_foreign_access_ref);
++EXPORT_SYMBOL(gnttab_grant_foreign_transfer_ref);
++
++/* External tools reserve first few grant table entries. */
++#define NR_RESERVED_ENTRIES 8
++
++#define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t))
++#define GNTTAB_LIST_END (NR_GRANT_ENTRIES + 1)
++
++static grant_ref_t gnttab_list[NR_GRANT_ENTRIES];
++static int gnttab_free_count;
++static grant_ref_t gnttab_free_head;
++static spinlock_t gnttab_list_lock = SPIN_LOCK_UNLOCKED;
++
++static grant_entry_t *shared;
++
++static struct gnttab_free_callback *gnttab_free_callback_list = NULL;
++
++static int
++get_free_entries(int count)
++{
++	unsigned long flags;
++	int ref;
++	grant_ref_t head;
++	spin_lock_irqsave(&gnttab_list_lock, flags);
++	if (gnttab_free_count < count) {
++		spin_unlock_irqrestore(&gnttab_list_lock, flags);
++		return -1;
++	}
++	ref = head = gnttab_free_head;
++	gnttab_free_count -= count;
++	while (count-- > 1)
++		head = gnttab_list[head];
++	gnttab_free_head = gnttab_list[head];
++	gnttab_list[head] = GNTTAB_LIST_END;
++	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++	return ref;
++}
++
++#define get_free_entry() get_free_entries(1)
++
++static void
++do_free_callbacks(void)
++{
++	struct gnttab_free_callback *callback, *next;
++
++	callback = gnttab_free_callback_list;
++	gnttab_free_callback_list = NULL;
++
++	while (callback != NULL) {
++		next = callback->next;
++		if (gnttab_free_count >= callback->count) {
++			callback->next = NULL;
++			callback->fn(callback->arg);
++		} else {
++			callback->next = gnttab_free_callback_list;
++			gnttab_free_callback_list = callback;
++		}
++		callback = next;
++	}
++}
++
++static inline void
++check_free_callbacks(void)
++{
++	if (unlikely(gnttab_free_callback_list))
++		do_free_callbacks();
++}
++
++static void
++put_free_entry(grant_ref_t ref)
++{
++	unsigned long flags;
++	spin_lock_irqsave(&gnttab_list_lock, flags);
++	gnttab_list[ref] = gnttab_free_head;
++	gnttab_free_head = ref;
++	gnttab_free_count++;
++	check_free_callbacks();
++	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++
++/*
++ * Public grant-issuing interface functions
++ */
++
++int
++gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly)
++{
++	int ref;
++    
++	if (unlikely((ref = get_free_entry()) == -1))
++		return -ENOSPC;
++
++	shared[ref].frame = frame;
++	shared[ref].domid = domid;
++	wmb();
++	shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
++
++	return ref;
++}
++
++void
++gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
++				unsigned long frame, int readonly)
++{
++	shared[ref].frame = frame;
++	shared[ref].domid = domid;
++	wmb();
++	shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
++}
++
++
++int
++gnttab_query_foreign_access(grant_ref_t ref)
++{
++	u16 nflags;
++
++	nflags = shared[ref].flags;
++
++	return (nflags & (GTF_reading|GTF_writing));
++}
++
++int
++gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
++{
++	u16 flags, nflags;
++
++	nflags = shared[ref].flags;
++	do {
++		if ( (flags = nflags) & (GTF_reading|GTF_writing) ) {
++			printk(KERN_ALERT "WARNING: g.e. still in use!\n");
++			return 0;
++		}
++	}
++	while ((nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) !=
++	       flags);
++
++	return 1;
++}
++
++void
++gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page)
++{
++	if (gnttab_end_foreign_access_ref(ref, readonly)) {
++		put_free_entry(ref);
++		if (page != 0) {
++			free_page(page);
++		}
++	}
++	else {
++		/* XXX This needs to be fixed so that the ref and page are
++		   placed on a list to be freed up later. */
++		printk(KERN_WARNING
++		       "WARNING: leaking g.e. and page still in use!\n");
++	}
++}
++
++int
++gnttab_grant_foreign_transfer(domid_t domid)
++{
++	int ref;
++
++	if (unlikely((ref = get_free_entry()) == -1))
++		return -ENOSPC;
++
++	shared[ref].frame = 0;
++	shared[ref].domid = domid;
++	wmb();
++	shared[ref].flags = GTF_accept_transfer;
++
++	return ref;
++}
++
++void
++gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid)
++{
++	shared[ref].frame = 0;
++	shared[ref].domid = domid;
++	wmb();
++	shared[ref].flags = GTF_accept_transfer;
++}
++
++unsigned long
++gnttab_end_foreign_transfer_ref(grant_ref_t ref)
++{
++	unsigned long frame;
++	u16           flags;
++
++	/*
++         * If a transfer is not even yet started, try to reclaim the grant
++         * reference and return failure (== 0).
++         */
++	while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
++		if ( synch_cmpxchg(&shared[ref].flags, flags, 0) == flags )
++			return 0;
++		cpu_relax();
++	}
++
++	/* If a transfer is in progress then wait until it is completed. */
++	while (!(flags & GTF_transfer_completed)) {
++		flags = shared[ref].flags;
++		cpu_relax();
++	}
++
++	/* Read the frame number /after/ reading completion status. */
++	rmb();
++	frame = shared[ref].frame;
++	BUG_ON(frame == 0);
++
++	return frame;
++}
++
++unsigned long
++gnttab_end_foreign_transfer(grant_ref_t ref)
++{
++	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
++	put_free_entry(ref);
++	return frame;
++}
++
++void
++gnttab_free_grant_reference(grant_ref_t ref)
++{
++
++	put_free_entry(ref);
++}
++
++void
++gnttab_free_grant_references(grant_ref_t head)
++{
++	grant_ref_t ref;
++	unsigned long flags;
++	int count = 1;
++	if (head == GNTTAB_LIST_END)
++		return;
++	spin_lock_irqsave(&gnttab_list_lock, flags);
++	ref = head;
++	while (gnttab_list[ref] != GNTTAB_LIST_END) {
++		ref = gnttab_list[ref];
++		count++;
++	}
++	gnttab_list[ref] = gnttab_free_head;
++	gnttab_free_head = head;
++	gnttab_free_count += count;
++	check_free_callbacks();
++	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++
++int
++gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
++{
++	int h = get_free_entries(count);
++
++	if (h == -1)
++		return -ENOSPC;
++
++	*head = h;
++
++	return 0;
++}
++
++int
++gnttab_claim_grant_reference(grant_ref_t *private_head)
++{
++	grant_ref_t g = *private_head;
++	if (unlikely(g == GNTTAB_LIST_END))
++		return -ENOSPC;
++	*private_head = gnttab_list[g];
++	return g;
++}
++
++void
++gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t  release)
++{
++	gnttab_list[release] = *private_head;
++	*private_head = release;
++}
++
++void
++gnttab_request_free_callback(struct gnttab_free_callback *callback,
++			     void (*fn)(void *), void *arg, u16 count)
++{
++	unsigned long flags;
++	spin_lock_irqsave(&gnttab_list_lock, flags);
++	if (callback->next)
++		goto out;
++	callback->fn = fn;
++	callback->arg = arg;
++	callback->count = count;
++	callback->next = gnttab_free_callback_list;
++	gnttab_free_callback_list = callback;
++	check_free_callbacks();
++ out:
++	spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++
++int
++gnttab_resume(void)
++{
++	gnttab_setup_table_t setup;
++	unsigned long        frames[NR_GRANT_FRAMES];
++	int                  i;
++
++	setup.dom        = DOMID_SELF;
++	setup.nr_frames  = NR_GRANT_FRAMES;
++	setup.frame_list = frames;
++
++	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
++	BUG_ON(setup.status != 0);
++
++#ifdef __ia64__
++	shared = __va(frames[0] << PAGE_SHIFT);
++	printk("grant table at %p\n", shared);
++#else
++	for (i = 0; i < NR_GRANT_FRAMES; i++)
++		set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
++#endif
++
++	return 0;
++}
++
++int
++gnttab_suspend(void)
++{
++	int i;
++
++	for (i = 0; i < NR_GRANT_FRAMES; i++)
++		clear_fixmap(FIX_GNTTAB_END - i);
++
++	return 0;
++}
++
++static int __init
++gnttab_init(void)
++{
++	int i;
++
++	if (xen_init() < 0)
++		return -ENODEV;
++
++	BUG_ON(gnttab_resume());
++
++#ifndef __ia64__
++	shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
++#endif
++
++	for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
++		gnttab_list[i] = i + 1;
++	gnttab_free_count = NR_GRANT_ENTRIES - NR_RESERVED_ENTRIES;
++	gnttab_free_head  = NR_RESERVED_ENTRIES;
++
++	printk("Grant table initialized\n");
++	return 0;
++}
++
++core_initcall(gnttab_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/Makefile linux-2.6.12-xen/arch/xen/kernel/Makefile
+--- pristine-linux-2.6.12/arch/xen/kernel/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/kernel/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,18 @@
++#
++# Makefile for the linux kernel.
++#
++
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++CPPFLAGS_vmlinux.lds += -U$(XENARCH)
++
++$(obj)/vmlinux.lds.S:
++	@ln -fsn $(srctree)/arch/$(XENARCH)/kernel/vmlinux.lds.S $@
++
++extra-y += vmlinux.lds
++
++obj-y   := evtchn.o reboot.o gnttab.o features.o
++
++obj-$(CONFIG_PROC_FS) += xen_proc.o
++obj-$(CONFIG_NET)     += skbuff.o
++obj-$(CONFIG_SMP)     += smpboot.o
+diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/reboot.c linux-2.6.12-xen/arch/xen/kernel/reboot.c
+--- pristine-linux-2.6.12/arch/xen/kernel/reboot.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/kernel/reboot.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,430 @@
++#define __KERNEL_SYSCALLS__
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/unistd.h>
++#include <linux/module.h>
++#include <linux/reboot.h>
++#include <linux/sysrq.h>
++#include <linux/stringify.h>
++#include <asm/irq.h>
++#include <asm/mmu_context.h>
++#include <asm-xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/xen-public/dom0_ops.h>
++#include <asm-xen/xenbus.h>
++#include <linux/cpu.h>
++#include <linux/kthread.h>
++#include <asm-xen/xencons.h>
++
++#if defined(__i386__) || defined(__x86_64__)
++/*
++ * Power off function, if any
++ */
++void (*pm_power_off)(void);
++#endif
++
++#define SHUTDOWN_INVALID  -1
++#define SHUTDOWN_POWEROFF  0
++#define SHUTDOWN_REBOOT    1
++#define SHUTDOWN_SUSPEND   2
++// Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
++// report a crash, not be instructed to crash!
++// HALT is the same as POWEROFF, as far as we're concerned.  The tools use
++// the distinction when we return the reason code to them.
++#define SHUTDOWN_HALT      4
++
++void machine_restart(char * __unused)
++{
++	/* We really want to get pending console data out before we die. */
++	xencons_force_flush();
++	HYPERVISOR_sched_op(SCHEDOP_shutdown, SHUTDOWN_reboot);
++}
++
++void machine_halt(void)
++{
++	machine_power_off();
++}
++
++void machine_power_off(void)
++{
++	/* We really want to get pending console data out before we die. */
++	xencons_force_flush();
++	HYPERVISOR_sched_op(SCHEDOP_shutdown, SHUTDOWN_poweroff);
++}
++
++int reboot_thru_bios = 0;	/* for dmi_scan.c */
++EXPORT_SYMBOL(machine_restart);
++EXPORT_SYMBOL(machine_halt);
++EXPORT_SYMBOL(machine_power_off);
++
++
++/******************************************************************************
++ * Stop/pickle callback handling.
++ */
++
++/* Ignore multiple shutdown requests. */
++static int shutting_down = SHUTDOWN_INVALID;
++static void __shutdown_handler(void *unused);
++static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
++
++#ifndef CONFIG_HOTPLUG_CPU
++#define cpu_down(x) (-EOPNOTSUPP)
++#define cpu_up(x) (-EOPNOTSUPP)
++#endif
++
++
++static int __do_suspend(void *ignore)
++{
++	int i, j, k, fpp;
++
++	extern int gnttab_suspend(void);
++	extern int gnttab_resume(void);
++
++	extern void time_resume(void);
++	extern unsigned long max_pfn;
++	extern unsigned long *pfn_to_mfn_frame_list_list;
++	extern unsigned long *pfn_to_mfn_frame_list[];
++
++#ifdef CONFIG_SMP
++	cpumask_t prev_online_cpus;
++	int vcpu_prepare(int vcpu);
++#endif
++
++	int err = 0;
++
++	BUG_ON(smp_processor_id() != 0);
++	BUG_ON(in_interrupt());
++
++#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
++	if (num_online_cpus() > 1) {
++		printk(KERN_WARNING "Can't suspend SMP guests "
++		       "without CONFIG_HOTPLUG_CPU\n");
++		return -EOPNOTSUPP;
++	}
++#endif
++
++	xenbus_suspend();
++
++	lock_cpu_hotplug();
++#ifdef CONFIG_SMP
++	/*
++	 * Take all other CPUs offline. We hold the hotplug semaphore to
++	 * avoid other processes bringing up CPUs under our feet.
++	 */
++	cpus_clear(prev_online_cpus);
++	while (num_online_cpus() > 1) {
++		for_each_online_cpu(i) {
++			if (i == 0)
++				continue;
++			unlock_cpu_hotplug();
++			err = cpu_down(i);
++			lock_cpu_hotplug();
++			if (err != 0) {
++				printk(KERN_CRIT "Failed to take all CPUs "
++				       "down: %d.\n", err);
++				goto out_reenable_cpus;
++			}
++			cpu_set(i, prev_online_cpus);
++		}
++	}
++#endif
++
++	preempt_disable();
++
++#ifdef __i386__
++	kmem_cache_shrink(pgd_cache);
++	mm_pin_all();
++#endif
++
++	__cli();
++	preempt_enable();
++	unlock_cpu_hotplug();
++
++	gnttab_suspend();
++
++	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++	clear_fixmap(FIX_SHARED_INFO);
++
++	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
++	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
++
++	/*
++	 * We'll stop somewhere inside this hypercall. When it returns,
++	 * we'll start resuming after the restore.
++	 */
++	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
++
++	shutting_down = SHUTDOWN_INVALID; 
++
++	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++
++	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++
++	memset(empty_zero_page, 0, PAGE_SIZE);
++	     
++	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++		virt_to_mfn(pfn_to_mfn_frame_list_list);
++  
++	fpp = PAGE_SIZE/sizeof(unsigned long);
++	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
++		if ((j % fpp) == 0) {
++			k++;
++			pfn_to_mfn_frame_list_list[k] = 
++				virt_to_mfn(pfn_to_mfn_frame_list[k]);
++			j = 0;
++		}
++		pfn_to_mfn_frame_list[k][j] = 
++			virt_to_mfn(&phys_to_machine_mapping[i]);
++	}
++	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++
++	gnttab_resume();
++
++	irq_resume();
++
++	time_resume();
++
++	__sti();
++
++	xencons_resume();
++
++#ifdef CONFIG_SMP
++	for_each_cpu(i)
++		vcpu_prepare(i);
++
++#endif
++
++	/* 
++	 * Only resume xenbus /after/ we've prepared our VCPUs; otherwise
++	 * the VCPU hotplug callback can race with our vcpu_prepare
++	 */
++	xenbus_resume();
++
++#ifdef CONFIG_SMP
++ out_reenable_cpus:
++	for_each_cpu_mask(i, prev_online_cpus) {
++		j = cpu_up(i);
++		if ((j != 0) && !cpu_online(i)) {
++			printk(KERN_CRIT "Failed to bring cpu "
++			       "%d back up (%d).\n",
++			       i, j);
++			err = j;
++		}
++	}
++#endif
++
++	return err;
++}
++
++static int shutdown_process(void *__unused)
++{
++	static char *envp[] = { "HOME=/", "TERM=linux", 
++				"PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
++	static char *restart_argv[]  = { "/sbin/reboot", NULL };
++	static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
++
++	extern asmlinkage long sys_reboot(int magic1, int magic2,
++					  unsigned int cmd, void *arg);
++
++	daemonize("shutdown");
++
++	switch (shutting_down) {
++	case SHUTDOWN_POWEROFF:
++	case SHUTDOWN_HALT:
++		if (execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
++			sys_reboot(LINUX_REBOOT_MAGIC1,
++				   LINUX_REBOOT_MAGIC2,
++				   LINUX_REBOOT_CMD_POWER_OFF,
++				   NULL);
++		}
++		break;
++
++	case SHUTDOWN_REBOOT:
++		if (execve("/sbin/reboot", restart_argv, envp) < 0) {
++			sys_reboot(LINUX_REBOOT_MAGIC1,
++				   LINUX_REBOOT_MAGIC2,
++				   LINUX_REBOOT_CMD_RESTART,
++				   NULL);
++		}
++		break;
++	}
++
++	shutting_down = SHUTDOWN_INVALID; /* could try again */
++
++	return 0;
++}
++
++static int kthread_create_on_cpu(int (*f)(void *arg),
++				 void *arg,
++				 const char *name,
++				 int cpu)
++{
++	struct task_struct *p;
++	p = kthread_create(f, arg, name);
++	if (IS_ERR(p))
++		return PTR_ERR(p);
++	kthread_bind(p, cpu);
++	wake_up_process(p);
++	return 0;
++}
++
++static void __shutdown_handler(void *unused)
++{
++	int err;
++
++	if (shutting_down != SHUTDOWN_SUSPEND)
++		err = kernel_thread(shutdown_process, NULL,
++				    CLONE_FS | CLONE_FILES);
++	else
++		err = kthread_create_on_cpu(__do_suspend, NULL, "suspend", 0);
++
++	if ( err < 0 ) {
++		printk(KERN_WARNING "Error creating shutdown process (%d): "
++		       "retrying...\n", -err);
++		schedule_delayed_work(&shutdown_work, HZ/2);
++	}
++}
++
++static void shutdown_handler(struct xenbus_watch *watch,
++			     const char **vec, unsigned int len)
++{
++	char *str;
++	xenbus_transaction_t xbt;
++	int err;
++
++	if (shutting_down != SHUTDOWN_INVALID)
++		return;
++
++ again:
++	err = xenbus_transaction_start(&xbt);
++	if (err)
++		return;
++	str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
++	/* Ignore read errors and empty reads. */
++	if (XENBUS_IS_ERR_READ(str)) {
++		xenbus_transaction_end(xbt, 1);
++		return;
++	}
++
++	xenbus_write(xbt, "control", "shutdown", "");
++
++	err = xenbus_transaction_end(xbt, 0);
++	if (err == -EAGAIN) {
++		kfree(str);
++		goto again;
++	}
++
++	if (strcmp(str, "poweroff") == 0)
++		shutting_down = SHUTDOWN_POWEROFF;
++	else if (strcmp(str, "reboot") == 0)
++		shutting_down = SHUTDOWN_REBOOT;
++	else if (strcmp(str, "suspend") == 0)
++		shutting_down = SHUTDOWN_SUSPEND;
++	else if (strcmp(str, "halt") == 0)
++		shutting_down = SHUTDOWN_HALT;
++	else {
++		printk("Ignoring shutdown request: %s\n", str);
++		shutting_down = SHUTDOWN_INVALID;
++	}
++
++	if (shutting_down != SHUTDOWN_INVALID)
++		schedule_work(&shutdown_work);
++
++	kfree(str);
++}
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
++			  unsigned int len)
++{
++	char sysrq_key = '\0';
++	xenbus_transaction_t xbt;
++	int err;
++
++ again:
++	err = xenbus_transaction_start(&xbt);
++	if (err)
++		return;
++	if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
++		printk(KERN_ERR "Unable to read sysrq code in "
++		       "control/sysrq\n");
++		xenbus_transaction_end(xbt, 1);
++		return;
++	}
++
++	if (sysrq_key != '\0')
++		xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
++
++	err = xenbus_transaction_end(xbt, 0);
++	if (err == -EAGAIN)
++		goto again;
++
++	if (sysrq_key != '\0') {
++		handle_sysrq(sysrq_key, NULL, NULL);
++	}
++}
++#endif
++
++static struct xenbus_watch shutdown_watch = {
++	.node = "control/shutdown",
++	.callback = shutdown_handler
++};
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static struct xenbus_watch sysrq_watch = {
++	.node ="control/sysrq",
++	.callback = sysrq_handler
++};
++#endif
++
++static struct notifier_block xenstore_notifier;
++
++static int setup_shutdown_watcher(struct notifier_block *notifier,
++                                  unsigned long event,
++                                  void *data)
++{
++	int err1 = 0;
++#ifdef CONFIG_MAGIC_SYSRQ
++	int err2 = 0;
++#endif
++
++	err1 = register_xenbus_watch(&shutdown_watch);
++#ifdef CONFIG_MAGIC_SYSRQ
++	err2 = register_xenbus_watch(&sysrq_watch);
++#endif
++
++	if (err1) {
++		printk(KERN_ERR "Failed to set shutdown watcher\n");
++	}
++    
++#ifdef CONFIG_MAGIC_SYSRQ
++	if (err2) {
++		printk(KERN_ERR "Failed to set sysrq watcher\n");
++	}
++#endif
++
++	return NOTIFY_DONE;
++}
++
++static int __init setup_shutdown_event(void)
++{
++    
++	xenstore_notifier.notifier_call = setup_shutdown_watcher;
++
++	register_xenstore_notifier(&xenstore_notifier);
++    
++	return 0;
++}
++
++subsys_initcall(setup_shutdown_event);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/skbuff.c linux-2.6.12-xen/arch/xen/kernel/skbuff.c
+--- pristine-linux-2.6.12/arch/xen/kernel/skbuff.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/kernel/skbuff.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,142 @@
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/init.h>
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/hypervisor.h>
++
++/* Referenced in netback.c. */
++/*static*/ kmem_cache_t *skbuff_cachep;
++
++#define MAX_SKBUFF_ORDER 4
++static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
++
++static struct {
++	int size;
++	kmem_cache_t *cachep;
++} skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
++
++struct sk_buff *alloc_skb(unsigned int length, int gfp_mask)
++{
++	int order, i;
++	kmem_cache_t *cachep;
++
++	length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
++
++	if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
++		for (i = 0; skbuff_small[i].size < length; i++)
++			continue;
++		cachep = skbuff_small[i].cachep;
++	} else {
++		order = get_order(length);
++		if (order > MAX_SKBUFF_ORDER) {
++			printk(KERN_ALERT "Attempt to allocate order %d "
++			       "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
++			return NULL;
++		}
++		cachep = skbuff_order_cachep[order];
++	}
++
++	length -= sizeof(struct skb_shared_info);
++
++	return alloc_skb_from_cache(cachep, length, gfp_mask);
++}
++
++struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
++{
++	struct sk_buff *skb;
++	int order;
++
++	length = SKB_DATA_ALIGN(length + 16);
++	order = get_order(length + sizeof(struct skb_shared_info));
++	if (order > MAX_SKBUFF_ORDER) {
++		printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
++		       "Increase MAX_SKBUFF_ORDER.\n", order);
++		return NULL;
++	}
++
++	skb = alloc_skb_from_cache(
++		skbuff_order_cachep[order], length, gfp_mask);
++	if (skb != NULL)
++		skb_reserve(skb, 16);
++
++	return skb;
++}
++
++static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
++{
++	int order = 0;
++
++	while (skbuff_order_cachep[order] != cachep)
++		order++;
++
++	/* Do our best to allocate contiguous memory but fall back to IOMMU. */
++	if (order != 0)
++		(void)xen_create_contiguous_region(
++			(unsigned long)buf, order, 0);
++
++	scrub_pages(buf, 1 << order);
++}
++
++static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
++{
++	int order = 0;
++
++	while (skbuff_order_cachep[order] != cachep)
++		order++;
++
++	if (order != 0)
++		xen_destroy_contiguous_region((unsigned long)buf, order);
++}
++
++static int __init skbuff_init(void)
++{
++	static char name[MAX_SKBUFF_ORDER + 1][20];
++	static char small_name[ARRAY_SIZE(skbuff_small)][20];
++	unsigned long size;
++	int i, order;
++
++	for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) {
++		size = skbuff_small[i].size;
++		sprintf(small_name[i], "xen-skb-%lu", size);
++		/*
++		 * No ctor/dtor: objects do not span page boundaries, and they
++		 * are only used on transmit path so no need for scrubbing.
++		 */
++		skbuff_small[i].cachep = kmem_cache_create(
++			small_name[i], size, size, 0, NULL, NULL);
++	}
++
++	for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
++		size = PAGE_SIZE << order;
++		sprintf(name[order], "xen-skb-%lu", size);
++		skbuff_order_cachep[order] = kmem_cache_create(
++			name[order], size, size, 0, skbuff_ctor, skbuff_dtor);
++	}
++
++	skbuff_cachep = skbuff_order_cachep[0];
++
++	return 0;
++}
++core_initcall(skbuff_init);
++
++EXPORT_SYMBOL(__dev_alloc_skb);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/smpboot.c linux-2.6.12-xen/arch/xen/kernel/smpboot.c
+--- pristine-linux-2.6.12/arch/xen/kernel/smpboot.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/kernel/smpboot.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,439 @@
++/*
++ *	Xen SMP booting functions
++ *
++ *	See arch/i386/kernel/smpboot.c for copyright and credits for derived
++ *	portions of this file.
++ */
++
++#include <linux/module.h>
++#include <linux/config.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/smp_lock.h>
++#include <linux/irq.h>
++#include <linux/bootmem.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/percpu.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/pgalloc.h>
++#include <asm-xen/evtchn.h>
++#include <asm-xen/xen-public/vcpu.h>
++#include <asm-xen/xenbus.h>
++
++#ifdef CONFIG_SMP_ALTERNATIVES
++#include <asm/smp_alt.h>
++#endif
++
++extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
++extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
++
++extern void local_setup_timer(unsigned int cpu);
++extern void local_teardown_timer(unsigned int cpu);
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void system_call(void);
++extern void smp_trap_init(trap_info_t *);
++
++extern cpumask_t cpu_initialized;
++
++/* Number of siblings per CPU package */
++int smp_num_siblings = 1;
++int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
++EXPORT_SYMBOL(phys_proc_id);
++int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
++EXPORT_SYMBOL(cpu_core_id);
++
++cpumask_t cpu_online_map;
++EXPORT_SYMBOL(cpu_online_map);
++cpumask_t cpu_possible_map;
++EXPORT_SYMBOL(cpu_possible_map);
++
++struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_data);
++
++#ifdef CONFIG_HOTPLUG_CPU
++DEFINE_PER_CPU(int, cpu_state) = { 0 };
++#endif
++
++static DEFINE_PER_CPU(int, resched_irq);
++static DEFINE_PER_CPU(int, callfunc_irq);
++static char resched_name[NR_CPUS][15];
++static char callfunc_name[NR_CPUS][15];
++
++u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++void *xquad_portio;
++
++cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
++cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_core_map);
++
++#if defined(__i386__)
++u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++#elif !defined(CONFIG_X86_IO_APIC)
++unsigned int maxcpus = NR_CPUS;
++#endif
++
++void __init smp_alloc_memory(void)
++{
++}
++
++static void xen_smp_intr_init(unsigned int cpu)
++{
++	sprintf(resched_name[cpu], "resched%d", cpu);
++	per_cpu(resched_irq, cpu) =
++		bind_ipi_to_irqhandler(
++			RESCHEDULE_VECTOR,
++			cpu,
++			smp_reschedule_interrupt,
++			SA_INTERRUPT,
++			resched_name[cpu],
++			NULL);
++	BUG_ON(per_cpu(resched_irq, cpu) < 0);
++
++	sprintf(callfunc_name[cpu], "callfunc%d", cpu);
++	per_cpu(callfunc_irq, cpu) =
++		bind_ipi_to_irqhandler(
++			CALL_FUNCTION_VECTOR,
++			cpu,
++			smp_call_function_interrupt,
++			SA_INTERRUPT,
++			callfunc_name[cpu],
++			NULL);
++	BUG_ON(per_cpu(callfunc_irq, cpu) < 0);
++
++	if (cpu != 0)
++		local_setup_timer(cpu);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void xen_smp_intr_exit(unsigned int cpu)
++{
++	if (cpu != 0)
++		local_teardown_timer(cpu);
++
++	unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
++	unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
++}
++#endif
++
++static void cpu_bringup(void)
++{
++	if (!cpu_isset(smp_processor_id(), cpu_initialized))
++		cpu_init();
++	local_irq_enable();
++	cpu_idle();
++}
++
++void vcpu_prepare(int vcpu)
++{
++	vcpu_guest_context_t ctxt;
++	struct task_struct *idle = idle_task(vcpu);
++
++	if (vcpu == 0)
++		return;
++
++	memset(&ctxt, 0, sizeof(ctxt));
++
++	ctxt.flags = VGCF_IN_KERNEL;
++	ctxt.user_regs.ds = __USER_DS;
++	ctxt.user_regs.es = __USER_DS;
++	ctxt.user_regs.fs = 0;
++	ctxt.user_regs.gs = 0;
++	ctxt.user_regs.ss = __KERNEL_DS;
++	ctxt.user_regs.eip = (unsigned long)cpu_bringup;
++	ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
++
++	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
++
++	smp_trap_init(ctxt.trap_ctxt);
++
++	ctxt.ldt_ents = 0;
++
++	ctxt.gdt_frames[0] = virt_to_mfn(cpu_gdt_descr[vcpu].address);
++	ctxt.gdt_ents      = cpu_gdt_descr[vcpu].size / 8;
++
++#ifdef __i386__
++	ctxt.user_regs.cs = __KERNEL_CS;
++	ctxt.user_regs.esp = idle->thread.esp;
++
++	ctxt.kernel_ss = __KERNEL_DS;
++	ctxt.kernel_sp = idle->thread.esp0;
++
++	ctxt.event_callback_cs     = __KERNEL_CS;
++	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
++	ctxt.failsafe_callback_cs  = __KERNEL_CS;
++	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
++
++	ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
++#else
++	ctxt.user_regs.cs = __KERNEL_CS | 3;
++	ctxt.user_regs.esp = idle->thread.rsp;
++
++	ctxt.kernel_ss = __KERNEL_DS;
++	ctxt.kernel_sp = idle->thread.rsp0;
++
++	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
++	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
++	ctxt.syscall_callback_eip  = (unsigned long)system_call;
++
++	ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
++
++	ctxt.gs_base_kernel = (unsigned long)(cpu_pda + vcpu);
++#endif
++
++	BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt));
++}
++
++void __init smp_prepare_cpus(unsigned int max_cpus)
++{
++	int cpu, rc;
++	struct task_struct *idle;
++
++	cpu_data[0] = boot_cpu_data;
++
++	cpu_2_logical_apicid[0] = 0;
++	x86_cpu_to_apicid[0] = 0;
++
++	current_thread_info()->cpu = 0;
++	cpu_sibling_map[0] = cpumask_of_cpu(0);
++	cpu_core_map[0]    = cpumask_of_cpu(0);
++
++	if (max_cpus != 0)
++		xen_smp_intr_init(0);
++
++	for (cpu = 1; cpu < max_cpus; cpu++) {
++		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
++		if (rc == -ENOENT)
++			break;
++		BUG_ON(rc != 0);
++
++		cpu_data[cpu] = boot_cpu_data;
++		cpu_2_logical_apicid[cpu] = cpu;
++		x86_cpu_to_apicid[cpu] = cpu;
++
++		idle = fork_idle(cpu);
++		if (IS_ERR(idle))
++			panic("failed fork for CPU %d", cpu);
++
++#ifdef __x86_64__
++		cpu_pda[cpu].pcurrent = idle;
++		cpu_pda[cpu].cpunumber = cpu;
++		per_cpu(init_tss,cpu).rsp0 = idle->thread.rsp;
++		clear_ti_thread_flag(idle->thread_info, TIF_FORK);
++#endif
++
++		irq_ctx_init(cpu);
++
++		cpu_gdt_descr[cpu].address =
++			__get_free_page(GFP_KERNEL|__GFP_ZERO);
++		BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
++		cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
++		memcpy((void *)cpu_gdt_descr[cpu].address,
++		       (void *)cpu_gdt_descr[0].address,
++		       cpu_gdt_descr[0].size);
++		make_page_readonly(
++			(void *)cpu_gdt_descr[cpu].address,
++			XENFEAT_writable_descriptor_tables);
++
++		cpu_set(cpu, cpu_possible_map);
++#ifdef CONFIG_HOTPLUG_CPU
++		if (xen_start_info->flags & SIF_INITDOMAIN)
++			cpu_set(cpu, cpu_present_map);
++#else
++		cpu_set(cpu, cpu_present_map);
++#endif
++
++		vcpu_prepare(cpu);
++	}
++
++	/* Currently, Xen gives no dynamic NUMA/HT info. */
++	for (cpu = 1; cpu < NR_CPUS; cpu++) {
++		cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
++		cpu_core_map[cpu]    = cpumask_of_cpu(cpu);
++	}
++
++#ifdef CONFIG_X86_IO_APIC
++	/*
++	 * Here we can be sure that there is an IO-APIC in the system. Let's
++	 * go and set it up:
++	 */
++	if (!skip_ioapic_setup && nr_ioapics)
++		setup_IO_APIC();
++#endif
++}
++
++void __devinit smp_prepare_boot_cpu(void)
++{
++	cpu_possible_map = cpumask_of_cpu(0);
++	cpu_present_map  = cpumask_of_cpu(0);
++	cpu_online_map   = cpumask_of_cpu(0);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
++ * But do it early enough to catch critical for_each_present_cpu() loops
++ * in i386-specific code.
++ */
++static int __init initialize_cpu_present_map(void)
++{
++	cpu_present_map = cpu_possible_map;
++	return 0;
++}
++core_initcall(initialize_cpu_present_map);
++
++static void vcpu_hotplug(unsigned int cpu)
++{
++	int err;
++	char dir[32], state[32];
++
++	if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
++		return;
++
++	sprintf(dir, "cpu/%d", cpu);
++	err = xenbus_scanf(XBT_NULL, dir, "availability", "%s", state);
++	if (err != 1) {
++		printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
++		return;
++	}
++
++	if (strcmp(state, "online") == 0) {
++		(void)cpu_up(cpu);
++	} else if (strcmp(state, "offline") == 0) {
++		(void)cpu_down(cpu);
++	} else {
++		printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
++		       state, cpu);
++	}
++}
++
++static void handle_vcpu_hotplug_event(
++	struct xenbus_watch *watch, const char **vec, unsigned int len)
++{
++	int cpu;
++	char *cpustr;
++	const char *node = vec[XS_WATCH_PATH];
++
++	if ((cpustr = strstr(node, "cpu/")) != NULL) {
++		sscanf(cpustr, "cpu/%d", &cpu);
++		vcpu_hotplug(cpu);
++	}
++}
++
++static int setup_cpu_watcher(struct notifier_block *notifier,
++			      unsigned long event, void *data)
++{
++	int i;
++
++	static struct xenbus_watch cpu_watch = {
++		.node = "cpu",
++		.callback = handle_vcpu_hotplug_event };
++	(void)register_xenbus_watch(&cpu_watch);
++
++	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
++		for_each_cpu(i)
++			vcpu_hotplug(i);
++		printk(KERN_INFO "Brought up %ld CPUs\n",
++		       (long)num_online_cpus());
++	}
++
++	return NOTIFY_DONE;
++}
++
++static int __init setup_vcpu_hotplug_event(void)
++{
++	static struct notifier_block xsn_cpu = {
++		.notifier_call = setup_cpu_watcher };
++	register_xenstore_notifier(&xsn_cpu);
++	return 0;
++}
++
++arch_initcall(setup_vcpu_hotplug_event);
++
++int __cpu_disable(void)
++{
++	cpumask_t map = cpu_online_map;
++	int cpu = smp_processor_id();
++
++	if (cpu == 0)
++		return -EBUSY;
++
++	cpu_clear(cpu, map);
++	fixup_irqs(map);
++	cpu_clear(cpu, cpu_online_map);
++
++	return 0;
++}
++
++void __cpu_die(unsigned int cpu)
++{
++	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
++		current->state = TASK_UNINTERRUPTIBLE;
++		schedule_timeout(HZ/10);
++	}
++
++	xen_smp_intr_exit(cpu);
++
++#ifdef CONFIG_SMP_ALTERNATIVES
++	if (num_online_cpus() == 1)
++		unprepare_for_smp();
++#endif
++}
++
++#else /* !CONFIG_HOTPLUG_CPU */
++
++int __cpu_disable(void)
++{
++	return -ENOSYS;
++}
++
++void __cpu_die(unsigned int cpu)
++{
++	BUG();
++}
++
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __devinit __cpu_up(unsigned int cpu)
++{
++#ifdef CONFIG_SMP_ALTERNATIVES
++	if (num_online_cpus() == 1)
++		prepare_for_smp();
++#endif
++
++	xen_smp_intr_init(cpu);
++	cpu_set(cpu, cpu_online_map);
++	if (HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL) != 0)
++		BUG();
++
++	return 0;
++}
++
++void __init smp_cpus_done(unsigned int max_cpus)
++{
++}
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++	/* Dummy function. */
++	return 0;
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/xen_proc.c linux-2.6.12-xen/arch/xen/kernel/xen_proc.c
+--- pristine-linux-2.6.12/arch/xen/kernel/xen_proc.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/kernel/xen_proc.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,29 @@
++
++#include <linux/config.h>
++#include <linux/proc_fs.h>
++#include <asm-xen/xen_proc.h>
++
++static struct proc_dir_entry *xen_base;
++
++struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
++{
++	if ( xen_base == NULL )
++		if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
++			panic("Couldn't create /proc/xen");
++	return create_proc_entry(name, mode, xen_base);
++}
++
++void remove_xen_proc_entry(const char *name)
++{
++	remove_proc_entry(name, xen_base);
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/Makefile linux-2.6.12-xen/arch/xen/Makefile
+--- pristine-linux-2.6.12/arch/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,93 @@
++#
++# xen/Makefile
++#
++# This file is included by the global makefile so that you can add your own
++# architecture-specific flags and dependencies. Remember to do have actions
++# for "archclean" cleaning up for this architecture.
++#
++# This file is subject to the terms and conditions of the GNU General Public
++# License.  See the file "COPYING" in the main directory of this archive
++# for more details.
++#
++# Copyright (C) 2004 by Christian Limpach
++#
++
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++# pick up headers from include/asm-xen/asm in preference over include/asm
++NOSTDINC_FLAGS  = -nostdinc -iwithprefix include/asm-xen -Iinclude/asm-xen -iwithprefix include
++ifneq ($(KBUILD_SRC),)
++NOSTDINC_FLAGS += -I$(srctree)/include/asm-xen
++endif
++
++# make uname return the processor arch
++UTS_MACHINE := $(XENARCH)
++
++core-y	+= arch/xen/kernel/
++
++.PHONY: include2/asm
++include2/asm:
++ifneq ($(KBUILD_SRC),)
++	@echo '  SYMLINK ../include/asm-$(XENARCH) -> include2/asm'
++	$(Q)ln -fsn ../include/asm-$(XENARCH) include2/asm
++endif
++
++include/.asm-ignore: include/asm
++	@rm -f include/.asm-ignore
++	@mv include/asm include/.asm-ignore
++	@echo '  SYMLINK include/asm -> include/asm-$(XENARCH)'
++	$(Q)if [ ! -d include ]; then mkdir -p include; fi;
++	@ln -fsn $(srctree)/include/asm-$(XENARCH) include/asm
++
++include/asm-xen/asm:
++	@echo '  SYMLINK $@ -> include/asm-xen/asm-$(XENARCH)'
++	@mkdir -p include/asm-xen
++	@ln -fsn $(srctree)/include/asm-xen/asm-$(XENARCH) $@
++
++arch/xen/arch:
++	@rm -f $@
++	@mkdir -p arch/xen
++	@ln -fsn $(srctree)/arch/xen/$(XENARCH) $@
++
++arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
++				   include/config/MARKER
++
++include/asm-$(ARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
++	$(call filechk,gen-asm-offsets)
++
++prepare: include/.asm-ignore include/asm-xen/asm \
++	arch/xen/arch include/asm-$(ARCH)/asm_offsets.h include2/asm ;
++
++all: vmlinuz
++
++vmlinuz: vmlinux
++	$(Q)$(MAKE) $(build)=arch/xen/boot vmlinuz
++
++bzImage: vmlinuz
++	$(Q)$(MAKE) $(build)=arch/xen/boot bzImage
++
++XINSTALL_NAME ?= $(KERNELRELEASE)
++install: vmlinuz
++install kernel_install:
++	mkdir -p $(INSTALL_PATH)/boot
++	ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
++	rm -f $(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++	install -m0644 vmlinuz $(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++	install -m0644 vmlinux $(INSTALL_PATH)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++	install -m0664 .config $(INSTALL_PATH)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++	install -m0664 System.map $(INSTALL_PATH)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++	ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
++
++archclean:
++	@if [ -e arch/xen/arch ]; then $(MAKE) $(clean)=arch/xen/arch; fi;
++	@rm -f arch/xen/arch include/.asm-ignore include/asm-xen/asm
++	@rm -f vmlinux-stripped vmlinuz
++
++define archhelp
++  echo  '* vmlinuz	- Compressed kernel image'
++  echo  '  install	- Install kernel image and config file'
++endef
++
++ifneq ($(XENARCH),)
++include	$(srctree)/arch/xen/$(XENARCH)/Makefile
++endif
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/ia32/ia32entry.S linux-2.6.12-xen/arch/xen/x86_64/ia32/ia32entry.S
+--- pristine-linux-2.6.12/arch/xen/x86_64/ia32/ia32entry.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/ia32/ia32entry.S	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,629 @@
++/*
++ * Compatibility mode system call entry point for x86-64. 
++ * 		
++ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
++ */		 
++
++#include <asm/dwarf2.h>
++#include <asm/calling.h>
++#include <asm/offset.h>
++#include <asm/current.h>
++#include <asm/errno.h>
++#include <asm/ia32_unistd.h>	
++#include <asm/thread_info.h>	
++#include <asm/segment.h>
++#include <asm/vsyscall32.h>
++#include <linux/linkage.h>
++
++#define __XEN_X86_64 1
++	
++	.macro IA32_ARG_FIXUP noebp=0
++	movl	%edi,%r8d
++	.if \noebp
++	.else
++	movl	%ebp,%r9d
++	.endif
++	xchg	%ecx,%esi
++	movl	%ebx,%edi
++	movl	%edx,%edx	/* zero extension */
++	.endm 
++
++	/* clobbers %eax */	
++	.macro  CLEAR_RREGS
++	xorl 	%eax,%eax
++	movq	%rax,R11(%rsp)
++	movq	%rax,R10(%rsp)
++	movq	%rax,R9(%rsp)
++	movq	%rax,R8(%rsp)
++	.endm
++
++#if defined (__XEN_X86_64)
++#include "../kernel/xen_entry.S"
++		
++#define	__swapgs
++#define __cli
++#define __sti	
++#else
++/*
++ * Use the native instructions
++ */	
++#define	__swapgs	swapgs
++#define __cli		cli
++#define __sti		sti	
++#endif			
++
++/*
++ * 32bit SYSENTER instruction entry.
++ *
++ * Arguments:
++ * %eax	System call number.
++ * %ebx Arg1
++ * %ecx Arg2
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp user stack
++ * 0(%ebp) Arg6	
++ * 	
++ * Interrupts off.
++ *	
++ * This is purely a fast path. For anything complicated we use the int 0x80
++ * path below.	Set up a complete hardware stack frame to share code
++ * with the int 0x80 path.
++ */ 	
++ENTRY(ia32_sysenter_target)
++	CFI_STARTPROC
++	__swapgs 
++	movq	%gs:pda_kernelstack, %rsp
++	addq	$(PDA_STACKOFFSET),%rsp
++	XEN_UNBLOCK_EVENTS(%r11)	
++	__sti
++ 	movl	%ebp,%ebp		/* zero extension */
++	pushq	$__USER32_DS
++	pushq	%rbp
++	pushfq
++	movl	$VSYSCALL32_SYSEXIT, %r10d
++	pushq	$__USER32_CS
++	movl	%eax, %eax
++	pushq	%r10
++	pushq	%rax
++	cld
++	SAVE_ARGS 0,0,1
++ 	/* no need to do an access_ok check here because rbp has been
++ 	   32bit zero extended */ 
++1:	movl	(%rbp),%r9d
++ 	.section __ex_table,"a"
++ 	.quad 1b,ia32_badarg
++ 	.previous	
++	GET_THREAD_INFO(%r10)
++	testl  $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++	jnz  sysenter_tracesys
++sysenter_do_call:	
++	cmpl	$(IA32_NR_syscalls),%eax
++	jae	ia32_badsys
++	IA32_ARG_FIXUP 1
++	call	*ia32_sys_call_table(,%rax,8)
++	movq	%rax,RAX-ARGOFFSET(%rsp)
++	GET_THREAD_INFO(%r10)
++	XEN_BLOCK_EVENTS(%r11)	
++	__cli
++	testl	$_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
++	jnz	int_ret_from_sys_call
++	/* clear IF, that popfq doesn't enable interrupts early */
++	andl  $~0x200,EFLAGS-R11(%rsp) 
++	RESTORE_ARGS 1,24,1,1,1,1
++	popfq
++	popq	%rcx				/* User %esp */
++	movl	$VSYSCALL32_SYSEXIT,%edx	/* User %eip */
++	__swapgs
++	XEN_UNBLOCK_EVENTS(%r11)		
++	__sti		/* sti only takes effect after the next instruction */
++	/* sysexit */
++	.byte	0xf, 0x35  /* TBD */
++
++sysenter_tracesys:
++	SAVE_REST
++	CLEAR_RREGS
++	movq	$-ENOSYS,RAX(%rsp)	/* really needed? */
++	movq	%rsp,%rdi        /* &pt_regs -> arg1 */
++	call	syscall_trace_enter
++	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
++	RESTORE_REST
++	movl	%ebp, %ebp
++	/* no need to do an access_ok check here because rbp has been
++	   32bit zero extended */ 
++1:	movl	(%rbp),%r9d
++	.section __ex_table,"a"
++	.quad 1b,ia32_badarg
++	.previous
++	jmp	sysenter_do_call
++	CFI_ENDPROC
++
++/*
++ * 32bit SYSCALL instruction entry.
++ *
++ * Arguments:
++ * %eax	System call number.
++ * %ebx Arg1
++ * %ecx return EIP 
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp Arg2    [note: not saved in the stack frame, should not be touched]
++ * %esp user stack 
++ * 0(%esp) Arg6
++ * 	
++ * Interrupts off.
++ *	
++ * This is purely a fast path. For anything complicated we use the int 0x80
++ * path below.	Set up a complete hardware stack frame to share code
++ * with the int 0x80 path.	
++ */ 	
++ENTRY(ia32_cstar_target)
++	CFI_STARTPROC
++	__swapgs
++	movl	%esp,%r8d
++	movq	%gs:pda_kernelstack,%rsp
++	XEN_UNBLOCK_EVENTS(%r11)	
++	__sti
++	SAVE_ARGS 8,1,1
++	movl 	%eax,%eax	/* zero extension */
++	movq	%rax,ORIG_RAX-ARGOFFSET(%rsp)
++	movq	%rcx,RIP-ARGOFFSET(%rsp)
++	movq	%rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
++	movl	%ebp,%ecx
++	movq	$__USER32_CS,CS-ARGOFFSET(%rsp)
++	movq	$__USER32_DS,SS-ARGOFFSET(%rsp)
++	movq	%r11,EFLAGS-ARGOFFSET(%rsp)
++	movq	%r8,RSP-ARGOFFSET(%rsp)	
++	/* no need to do an access_ok check here because r8 has been
++	   32bit zero extended */ 
++	/* hardware stack frame is complete now */	
++1:	movl	(%r8),%r9d
++	.section __ex_table,"a"
++	.quad 1b,ia32_badarg
++	.previous	
++	GET_THREAD_INFO(%r10)
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++	jnz   cstar_tracesys
++cstar_do_call:	
++	cmpl $IA32_NR_syscalls,%eax
++	jae  ia32_badsys
++	IA32_ARG_FIXUP 1
++	call *ia32_sys_call_table(,%rax,8)
++	movq %rax,RAX-ARGOFFSET(%rsp)
++	GET_THREAD_INFO(%r10)
++	XEN_BLOCK_EVENTS(%r11)		
++	__cli
++	testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
++	jnz  int_ret_from_sys_call
++	RESTORE_ARGS 1,-ARG_SKIP,1,1,1
++	movl RIP-ARGOFFSET(%rsp),%ecx
++	movl EFLAGS-ARGOFFSET(%rsp),%r11d	
++	movl RSP-ARGOFFSET(%rsp),%esp
++	__swapgs
++	sysretl  /* TBD */
++	
++cstar_tracesys:	
++	SAVE_REST
++	CLEAR_RREGS
++	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
++	movq %rsp,%rdi        /* &pt_regs -> arg1 */
++	call syscall_trace_enter
++	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
++	RESTORE_REST
++	movl RSP-ARGOFFSET(%rsp), %r8d
++	/* no need to do an access_ok check here because r8 has been
++	   32bit zero extended */ 
++1:	movl	(%r8),%r9d
++	.section __ex_table,"a"
++	.quad 1b,ia32_badarg
++	.previous
++	jmp cstar_do_call
++				
++ia32_badarg:
++	movq $-EFAULT,%rax
++	jmp ia32_sysret
++	CFI_ENDPROC
++
++/* 
++ * Emulated IA32 system calls via int 0x80. 
++ *
++ * Arguments:	 
++ * %eax	System call number.
++ * %ebx Arg1
++ * %ecx Arg2
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp Arg6    [note: not saved in the stack frame, should not be touched]
++ *
++ * Notes:
++ * Uses the same stack frame as the x86-64 version.	
++ * All registers except %eax must be saved (but ptrace may violate that)
++ * Arguments are zero extended. For system calls that want sign extension and
++ * take long arguments a wrapper is needed. Most calls can just be called
++ * directly.
++ * Assumes it is only called from user space and entered with interrupts off.	
++ */ 				
++
++ENTRY(ia32_syscall)
++	CFI_STARTPROC
++	__swapgs
++	XEN_UNBLOCK_EVENTS(%r11)
++	__sti
++	movq (%rsp),%rcx
++	movq 8(%rsp),%r11
++        addq $0x10,%rsp /* skip rcx and r11 */
++	movl %eax,%eax
++	pushq %rax
++	cld
++/* 1:	jmp 1b	 */
++	/* note the registers are not zero extended to the sf.
++	   this could be a problem. */
++	SAVE_ARGS 0,0,1
++	GET_THREAD_INFO(%r10)
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++	jnz ia32_tracesys
++ia32_do_syscall:	
++	cmpl $(IA32_NR_syscalls),%eax
++	jae  ia32_badsys
++	IA32_ARG_FIXUP
++	call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
++ia32_sysret:
++	movq %rax,RAX-ARGOFFSET(%rsp)
++	jmp int_ret_from_sys_call 
++
++ia32_tracesys:			 
++	SAVE_REST
++	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
++	movq %rsp,%rdi        /* &pt_regs -> arg1 */
++	call syscall_trace_enter
++	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
++	RESTORE_REST
++	jmp ia32_do_syscall
++
++ia32_badsys:
++	movq $0,ORIG_RAX-ARGOFFSET(%rsp)
++	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
++	jmp int_ret_from_sys_call
++
++ni_syscall:
++	movq %rax,%rdi
++	jmp  sys32_ni_syscall			
++
++quiet_ni_syscall:
++	movq $-ENOSYS,%rax
++	ret
++	CFI_ENDPROC
++	
++	.macro PTREGSCALL label, func, arg
++	.globl \label
++\label:
++	leaq \func(%rip),%rax
++	leaq -ARGOFFSET+8(%rsp),\arg	/* 8 for return address */
++	jmp  ia32_ptregs_common	
++	.endm
++
++	PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
++	PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
++	PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
++	PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
++	PTREGSCALL stub32_execve, sys32_execve, %rcx
++	PTREGSCALL stub32_fork, sys_fork, %rdi
++	PTREGSCALL stub32_clone, sys32_clone, %rdx
++	PTREGSCALL stub32_vfork, sys_vfork, %rdi
++	PTREGSCALL stub32_iopl, sys_iopl, %rsi
++	PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++
++ENTRY(ia32_ptregs_common)
++	CFI_STARTPROC
++	popq %r11
++	SAVE_REST
++	call *%rax
++	RESTORE_REST
++	jmp  ia32_sysret	/* misbalances the return cache */
++	CFI_ENDPROC
++
++	.data
++	.align 8
++	.globl ia32_sys_call_table
++ia32_sys_call_table:
++	.quad sys_restart_syscall
++	.quad sys_exit
++	.quad stub32_fork
++	.quad sys_read
++	.quad sys_write
++	.quad sys32_open		/* 5 */
++	.quad sys_close
++	.quad sys32_waitpid
++	.quad sys_creat
++	.quad sys_link
++	.quad sys_unlink		/* 10 */
++	.quad stub32_execve
++	.quad sys_chdir
++	.quad compat_sys_time
++	.quad sys_mknod
++	.quad sys_chmod		/* 15 */
++	.quad sys_lchown16
++	.quad quiet_ni_syscall			/* old break syscall holder */
++	.quad sys_stat
++	.quad sys32_lseek
++	.quad sys_getpid		/* 20 */
++	.quad compat_sys_mount	/* mount  */
++	.quad sys_oldumount	/* old_umount  */
++	.quad sys_setuid16
++	.quad sys_getuid16
++	.quad compat_sys_stime	/* stime */		/* 25 */
++	.quad sys32_ptrace	/* ptrace */
++	.quad sys_alarm
++	.quad sys_fstat	/* (old)fstat */
++	.quad sys_pause
++	.quad compat_sys_utime	/* 30 */
++	.quad quiet_ni_syscall	/* old stty syscall holder */
++	.quad quiet_ni_syscall	/* old gtty syscall holder */
++	.quad sys_access
++	.quad sys_nice	
++	.quad quiet_ni_syscall	/* 35 */	/* old ftime syscall holder */
++	.quad sys_sync
++	.quad sys32_kill
++	.quad sys_rename
++	.quad sys_mkdir
++	.quad sys_rmdir		/* 40 */
++	.quad sys_dup
++	.quad sys32_pipe
++	.quad compat_sys_times
++	.quad quiet_ni_syscall			/* old prof syscall holder */
++	.quad sys_brk		/* 45 */
++	.quad sys_setgid16
++	.quad sys_getgid16
++	.quad sys_signal
++	.quad sys_geteuid16
++	.quad sys_getegid16	/* 50 */
++	.quad sys_acct
++	.quad sys_umount			/* new_umount */
++	.quad quiet_ni_syscall			/* old lock syscall holder */
++	.quad compat_sys_ioctl
++	.quad compat_sys_fcntl64		/* 55 */
++	.quad quiet_ni_syscall			/* old mpx syscall holder */
++	.quad sys_setpgid
++	.quad quiet_ni_syscall			/* old ulimit syscall holder */
++	.quad sys32_olduname
++	.quad sys_umask		/* 60 */
++	.quad sys_chroot
++	.quad sys32_ustat
++	.quad sys_dup2
++	.quad sys_getppid
++	.quad sys_getpgrp		/* 65 */
++	.quad sys_setsid
++	.quad sys32_sigaction
++	.quad sys_sgetmask
++	.quad sys_ssetmask
++	.quad sys_setreuid16	/* 70 */
++	.quad sys_setregid16
++	.quad stub32_sigsuspend
++	.quad compat_sys_sigpending
++	.quad sys_sethostname
++	.quad compat_sys_setrlimit	/* 75 */
++	.quad compat_sys_old_getrlimit	/* old_getrlimit */
++	.quad compat_sys_getrusage
++	.quad sys32_gettimeofday
++	.quad sys32_settimeofday
++	.quad sys_getgroups16	/* 80 */
++	.quad sys_setgroups16
++	.quad sys32_old_select
++	.quad sys_symlink
++	.quad sys_lstat
++	.quad sys_readlink		/* 85 */
++#ifdef CONFIG_IA32_AOUT
++	.quad sys_uselib
++#else
++	.quad quiet_ni_syscall
++#endif
++	.quad sys_swapon
++	.quad sys_reboot
++	.quad compat_sys_old_readdir
++	.quad sys32_mmap		/* 90 */
++	.quad sys_munmap
++	.quad sys_truncate
++	.quad sys_ftruncate
++	.quad sys_fchmod
++	.quad sys_fchown16		/* 95 */
++	.quad sys_getpriority
++	.quad sys_setpriority
++	.quad quiet_ni_syscall			/* old profil syscall holder */
++	.quad compat_sys_statfs
++	.quad compat_sys_fstatfs		/* 100 */
++	.quad sys_ioperm
++	.quad compat_sys_socketcall
++	.quad sys_syslog
++	.quad compat_sys_setitimer
++	.quad compat_sys_getitimer	/* 105 */
++	.quad compat_sys_newstat
++	.quad compat_sys_newlstat
++	.quad compat_sys_newfstat
++	.quad sys32_uname
++	.quad stub32_iopl		/* 110 */
++	.quad sys_vhangup
++	.quad quiet_ni_syscall	/* old "idle" system call */
++	.quad sys32_vm86_warning	/* vm86old */ 
++	.quad compat_sys_wait4
++	.quad sys_swapoff		/* 115 */
++	.quad sys32_sysinfo
++	.quad sys32_ipc
++	.quad sys_fsync
++	.quad stub32_sigreturn
++	.quad stub32_clone		/* 120 */
++	.quad sys_setdomainname
++	.quad sys_uname
++	.quad sys_modify_ldt
++	.quad sys32_adjtimex
++	.quad sys32_mprotect		/* 125 */
++	.quad compat_sys_sigprocmask
++	.quad quiet_ni_syscall		/* create_module */
++	.quad sys_init_module
++	.quad sys_delete_module
++	.quad quiet_ni_syscall		/* 130  get_kernel_syms */
++	.quad sys_quotactl
++	.quad sys_getpgid
++	.quad sys_fchdir
++	.quad quiet_ni_syscall	/* bdflush */
++	.quad sys_sysfs		/* 135 */
++	.quad sys_personality
++	.quad quiet_ni_syscall	/* for afs_syscall */
++	.quad sys_setfsuid16
++	.quad sys_setfsgid16
++	.quad sys_llseek		/* 140 */
++	.quad compat_sys_getdents
++	.quad compat_sys_select
++	.quad sys_flock
++	.quad sys_msync
++	.quad compat_sys_readv		/* 145 */
++	.quad compat_sys_writev
++	.quad sys_getsid
++	.quad sys_fdatasync
++	.quad sys32_sysctl	/* sysctl */
++	.quad sys_mlock		/* 150 */
++	.quad sys_munlock
++	.quad sys_mlockall
++	.quad sys_munlockall
++	.quad sys_sched_setparam
++	.quad sys_sched_getparam   /* 155 */
++	.quad sys_sched_setscheduler
++	.quad sys_sched_getscheduler
++	.quad sys_sched_yield
++	.quad sys_sched_get_priority_max
++	.quad sys_sched_get_priority_min  /* 160 */
++	.quad sys_sched_rr_get_interval
++	.quad compat_sys_nanosleep
++	.quad sys_mremap
++	.quad sys_setresuid16
++	.quad sys_getresuid16	/* 165 */
++	.quad sys32_vm86_warning	/* vm86 */ 
++	.quad quiet_ni_syscall	/* query_module */
++	.quad sys_poll
++	.quad compat_sys_nfsservctl
++	.quad sys_setresgid16	/* 170 */
++	.quad sys_getresgid16
++	.quad sys_prctl
++	.quad stub32_rt_sigreturn
++	.quad sys32_rt_sigaction
++	.quad sys32_rt_sigprocmask	/* 175 */
++	.quad sys32_rt_sigpending
++	.quad compat_sys_rt_sigtimedwait
++	.quad sys32_rt_sigqueueinfo
++	.quad stub32_rt_sigsuspend
++	.quad sys32_pread		/* 180 */
++	.quad sys32_pwrite
++	.quad sys_chown16
++	.quad sys_getcwd
++	.quad sys_capget
++	.quad sys_capset
++	.quad stub32_sigaltstack
++	.quad sys32_sendfile
++	.quad quiet_ni_syscall		/* streams1 */
++	.quad quiet_ni_syscall		/* streams2 */
++	.quad stub32_vfork            /* 190 */
++	.quad compat_sys_getrlimit
++	.quad sys32_mmap2
++	.quad sys32_truncate64
++	.quad sys32_ftruncate64
++	.quad sys32_stat64		/* 195 */
++	.quad sys32_lstat64
++	.quad sys32_fstat64
++	.quad sys_lchown
++	.quad sys_getuid
++	.quad sys_getgid		/* 200 */
++	.quad sys_geteuid
++	.quad sys_getegid
++	.quad sys_setreuid
++	.quad sys_setregid
++	.quad sys_getgroups	/* 205 */
++	.quad sys_setgroups
++	.quad sys_fchown
++	.quad sys_setresuid
++	.quad sys_getresuid
++	.quad sys_setresgid	/* 210 */
++	.quad sys_getresgid
++	.quad sys_chown
++	.quad sys_setuid
++	.quad sys_setgid
++	.quad sys_setfsuid		/* 215 */
++	.quad sys_setfsgid
++	.quad sys_pivot_root
++	.quad sys_mincore
++	.quad sys_madvise
++	.quad compat_sys_getdents64	/* 220 getdents64 */
++	.quad compat_sys_fcntl64	
++	.quad quiet_ni_syscall		/* tux */
++	.quad quiet_ni_syscall    	/* security */
++	.quad sys_gettid	
++	.quad sys_readahead	/* 225 */
++	.quad sys_setxattr
++	.quad sys_lsetxattr
++	.quad sys_fsetxattr
++	.quad sys_getxattr
++	.quad sys_lgetxattr	/* 230 */
++	.quad sys_fgetxattr
++	.quad sys_listxattr
++	.quad sys_llistxattr
++	.quad sys_flistxattr
++	.quad sys_removexattr	/* 235 */
++	.quad sys_lremovexattr
++	.quad sys_fremovexattr
++	.quad sys_tkill
++	.quad sys_sendfile64 
++	.quad compat_sys_futex		/* 240 */
++	.quad compat_sys_sched_setaffinity
++	.quad compat_sys_sched_getaffinity
++	.quad sys32_set_thread_area
++	.quad sys32_get_thread_area
++	.quad compat_sys_io_setup	/* 245 */
++	.quad sys_io_destroy
++	.quad compat_sys_io_getevents
++	.quad compat_sys_io_submit
++	.quad sys_io_cancel
++	.quad sys_fadvise64		/* 250 */
++	.quad quiet_ni_syscall 	/* free_huge_pages */
++	.quad sys_exit_group
++	.quad sys32_lookup_dcookie
++	.quad sys_epoll_create
++	.quad sys_epoll_ctl		/* 255 */
++	.quad sys_epoll_wait
++	.quad sys_remap_file_pages
++	.quad sys_set_tid_address
++	.quad sys32_timer_create
++	.quad compat_sys_timer_settime	/* 260 */
++	.quad compat_sys_timer_gettime
++	.quad sys_timer_getoverrun
++	.quad sys_timer_delete
++	.quad compat_sys_clock_settime
++	.quad compat_sys_clock_gettime	/* 265 */
++	.quad compat_sys_clock_getres
++	.quad compat_sys_clock_nanosleep
++	.quad compat_sys_statfs64
++	.quad compat_sys_fstatfs64
++	.quad sys_tgkill		/* 270 */
++	.quad compat_sys_utimes
++	.quad sys32_fadvise64_64
++	.quad quiet_ni_syscall	/* sys_vserver */
++	.quad sys_mbind
++	.quad compat_sys_get_mempolicy	/* 275 */
++	.quad sys_set_mempolicy
++	.quad compat_sys_mq_open
++	.quad sys_mq_unlink
++	.quad compat_sys_mq_timedsend
++	.quad compat_sys_mq_timedreceive	/* 280 */
++	.quad compat_sys_mq_notify
++	.quad compat_sys_mq_getsetattr
++	.quad quiet_ni_syscall		/* reserved for kexec */
++	.quad compat_sys_waitid
++	.quad quiet_ni_syscall		/* sys_altroot */
++	.quad sys_add_key
++	.quad sys_request_key
++	.quad sys_keyctl
++	/* don't forget to change IA32_NR_syscalls */
++ia32_syscall_end:		
++	.rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
++		.quad ni_syscall
++	.endr
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/ia32/Makefile linux-2.6.12-xen/arch/xen/x86_64/ia32/Makefile
+--- pristine-linux-2.6.12/arch/xen/x86_64/ia32/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/ia32/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,66 @@
++#
++# Makefile for the ia32 kernel emulation subsystem.
++#
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++CFLAGS	+= -Iarch/$(XENARCH)/kernel
++
++obj-$(CONFIG_IA32_EMULATION) := ia32entry.o syscall32.o
++
++c-obj-$(CONFIG_IA32_EMULATION) := sys_ia32.o ia32_ioctl.o \
++	ia32_signal.o tls32.o \
++	ia32_binfmt.o fpu32.o ptrace32.o 
++
++s-obj-y :=
++
++sysv-$(CONFIG_SYSVIPC) := ipc32.o
++c-obj-$(CONFIG_IA32_EMULATION) += $(sysv-y)
++
++c-obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
++
++$(obj)/syscall32.o: $(src)/syscall32.c \
++	$(foreach F,int80 sysenter syscall,$(obj)/vsyscall-$F.so)
++
++# syscall32.c currently contains inline asm which has .incbin directives.
++# This defeats ccache's signature checks, and also breaks distcc.
++# Make sure neither ccache nor distcc compiles this file.
++#
++$(obj)/syscall32.o: override CC := env CCACHE_DISABLE=1 DISTCC_HOSTS=localhost $(CC)
++
++# Teach kbuild about targets
++targets := $(foreach F,int80 sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
++
++# The DSO images are built using a special linker script
++quiet_cmd_syscall = SYSCALL $@
++      cmd_syscall = $(CC) -m32 -nostdlib -shared -s \
++			   -Wl,-soname=linux-gate.so.1 -o $@ \
++			   -Wl,-T,$(filter-out FORCE,$^)
++
++
++$(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
++$(obj)/vsyscall-%.so: $(obj)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
++	$(call if_changed,syscall)
++
++AFLAGS_vsyscall-int80.o = -m32 -I$(obj)
++AFLAGS_vsyscall-sysenter.o = -m32 -I$(obj)
++AFLAGS_vsyscall-syscall.o = -m32 -I$(obj)
++CFLAGS_ia32_ioctl.o += -Ifs/
++
++s-link	:= vsyscall-syscall.o vsyscall-sysenter.o vsyscall-sigreturn.o
++
++$(obj)/vsyscall.lds:
++	@ln -fsn $(srctree)/arch/x86_64/ia32/$(notdir $@) $@
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
++	@ln -fsn $(srctree)/arch/x86_64/ia32/$(notdir $@) $@
++
++$(obj)/vsyscall-int80.o $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-syscall.o: \
++	$(obj)/vsyscall-sigreturn.S $(obj)/../../i386/kernel/vsyscall-note.S
++
++$(obj)/../../i386/kernel/vsyscall-note.S:
++	@ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
++
++obj-y	+= $(c-obj-y) $(s-obj-y)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
++clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/ia32/syscall32.c linux-2.6.12-xen/arch/xen/x86_64/ia32/syscall32.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/ia32/syscall32.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/ia32/syscall32.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,153 @@
++/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
++
++/* vsyscall handling for 32bit processes. Map a stub page into it 
++   on demand because 32bit cannot reach the kernel's fixmaps */
++
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/gfp.h>
++#include <linux/init.h>
++#include <linux/stringify.h>
++#include <linux/security.h>
++#include <asm/proto.h>
++#include <asm/tlbflush.h>
++#include <asm/ia32_unistd.h>
++
++#define USE_INT80
++
++#ifdef USE_INT80
++/* 32bit VDSOs mapped into user space. */ 
++asm(".section \".init.data\",\"aw\"\n"
++    "syscall32_int80:\n"
++    ".incbin \"arch/xen/x86_64/ia32/vsyscall-int80.so\"\n"
++    "syscall32_int80_end:\n"
++    "syscall32_syscall:\n"
++    ".incbin \"arch/xen/x86_64/ia32/vsyscall-syscall.so\"\n"
++    "syscall32_syscall_end:\n"
++    "syscall32_sysenter:\n"
++    ".incbin \"arch/xen/x86_64/ia32/vsyscall-sysenter.so\"\n"
++    "syscall32_sysenter_end:\n"
++    ".previous");
++
++extern unsigned char syscall32_int80[], syscall32_int80_end[];
++#else
++/* 32bit VDSOs mapped into user space. */ 
++asm(".section \".init.data\",\"aw\"\n"
++    "syscall32_syscall:\n"
++    ".incbin \"arch/xen/x86_64/ia32/vsyscall-syscall.so\"\n"
++    "syscall32_syscall_end:\n"
++    "syscall32_sysenter:\n"
++    ".incbin \"arch/xen/x86_64/ia32/vsyscall-sysenter.so\"\n"
++    "syscall32_sysenter_end:\n"
++    ".previous");
++
++static int use_sysenter = -1;
++#endif
++
++extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
++extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
++extern int sysctl_vsyscall32;
++
++char *syscall32_page; 
++
++static struct page *
++syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
++{
++	struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
++	get_page(p);
++	return p;
++}
++
++/* Prevent VMA merging */
++static void syscall32_vma_close(struct vm_area_struct *vma)
++{
++}
++
++static struct vm_operations_struct syscall32_vm_ops = {
++	.close = syscall32_vma_close,
++	.nopage = syscall32_nopage,
++};
++
++struct linux_binprm;
++
++/* Setup a VMA at program startup for the vsyscall page */
++int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
++{
++	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
++	struct vm_area_struct *vma;
++	struct mm_struct *mm = current->mm;
++
++	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++	if (!vma)
++		return -ENOMEM;
++	if (security_vm_enough_memory(npages)) {
++		kmem_cache_free(vm_area_cachep, vma);
++		return -ENOMEM;
++	}
++
++	memset(vma, 0, sizeof(struct vm_area_struct));
++	/* Could randomize here */
++	vma->vm_start = VSYSCALL32_BASE;
++	vma->vm_end = VSYSCALL32_END;
++	/* MAYWRITE to allow gdb to COW and set breakpoints */
++	vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYEXEC|VM_MAYWRITE;
++	vma->vm_flags |= mm->def_flags;
++	vma->vm_page_prot = protection_map[vma->vm_flags & 7];
++	vma->vm_ops = &syscall32_vm_ops;
++	vma->vm_mm = mm;
++
++	down_write(&mm->mmap_sem);
++	insert_vm_struct(mm, vma);
++	mm->total_vm += npages;
++	up_write(&mm->mmap_sem);
++	return 0;
++}
++
++static int __init init_syscall32(void)
++{ 
++	syscall32_page = (void *)get_zeroed_page(GFP_KERNEL); 
++	if (!syscall32_page) 
++		panic("Cannot allocate syscall32 page"); 
++
++#ifdef USE_INT80
++	/*
++	 * At this point we use int 0x80.
++	 */
++	memcpy(syscall32_page, syscall32_int80,
++	       syscall32_int80_end - syscall32_int80);
++#else
++
++ 	if (use_sysenter > 0) {
++ 		memcpy(syscall32_page, syscall32_sysenter,
++ 		       syscall32_sysenter_end - syscall32_sysenter);
++ 	} else {
++  		memcpy(syscall32_page, syscall32_syscall,
++  		       syscall32_syscall_end - syscall32_syscall);
++  	}	
++#endif
++	return 0;
++} 
++
++/*
++ * This must be done early in case we have an initrd containing 32-bit
++ * binaries (e.g., hotplug). This could be pushed upstream to arch/x86_64.
++ */	
++core_initcall(init_syscall32); 
++
++/* May not be __init: called during resume */
++void syscall32_cpu_init(void)
++{
++#ifndef USE_INT80
++	if (use_sysenter < 0)
++ 		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
++
++	/* Load these always in case some future AMD CPU supports
++	   SYSENTER from compat mode too. */
++	checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)(__KERNEL_CS | 3));
++	checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
++	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
++
++	wrmsrl(MSR_CSTAR, ia32_cstar_target);
++#endif
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/ia32/vsyscall-int80.S linux-2.6.12-xen/arch/xen/x86_64/ia32/vsyscall-int80.S
+--- pristine-linux-2.6.12/arch/xen/x86_64/ia32/vsyscall-int80.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/ia32/vsyscall-int80.S	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,57 @@
++/*
++ * Code for the vsyscall page.  This version uses the old int $0x80 method.
++ *
++ * NOTE:
++ * 1) __kernel_vsyscall _must_ be first in this page.
++ * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
++ *    for details.
++ */
++#include <asm/ia32_unistd.h>
++#include <asm/offset.h>
++
++	.text
++	.section .text.vsyscall,"ax"
++	.globl __kernel_vsyscall
++	.type __kernel_vsyscall, at function
++__kernel_vsyscall:
++.LSTART_vsyscall:
++	int $0x80
++	ret
++.LEND_vsyscall:
++	.size __kernel_vsyscall,.-.LSTART_vsyscall
++	.previous
++
++	.section .eh_frame,"a", at progbits
++.LSTARTFRAME:
++	.long .LENDCIE-.LSTARTCIE
++.LSTARTCIE:
++	.long 0			/* CIE ID */
++	.byte 1			/* Version number */
++	.string "zR"		/* NUL-terminated augmentation string */
++	.uleb128 1		/* Code alignment factor */
++	.sleb128 -4		/* Data alignment factor */
++	.byte 8			/* Return address register column */
++	.uleb128 1		/* Augmentation value length */
++	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
++	.byte 0x0c		/* DW_CFA_def_cfa */
++	.uleb128 4
++	.uleb128 4
++	.byte 0x88		/* DW_CFA_offset, column 0x8 */
++	.uleb128 1
++	.align 4
++.LENDCIE:
++
++	.long .LENDFDE1-.LSTARTFDE1	/* Length FDE */
++.LSTARTFDE1:
++	.long .LSTARTFDE1-.LSTARTFRAME	/* CIE pointer */
++	.long .LSTART_vsyscall-.	/* PC-relative start address */
++	.long .LEND_vsyscall-.LSTART_vsyscall
++	.uleb128 0			/* Augmentation length */
++	.align 4
++.LENDFDE1:
++		
++/*
++ * Get the common code for the sigreturn entry points.
++ */
++#define SYSCALL_ENTER_KERNEL    int $0x80
++#include "vsyscall-sigreturn.S"
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/Kconfig linux-2.6.12-xen/arch/xen/x86_64/Kconfig
+--- pristine-linux-2.6.12/arch/xen/x86_64/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/Kconfig	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,480 @@
++#
++# For a description of the syntax of this configuration file,
++# see Documentation/kbuild/kconfig-language.txt.
++#
++# Note: ISA is disabled and will hopefully never be enabled.
++# If you managed to buy an ISA x86-64 box you'll have to fix all the
++# ISA drivers you need yourself.
++#
++
++menu "X86_64 processor configuration"
++
++config XENARCH
++	string
++	default x86_64
++
++config X86_64
++	bool
++	default y
++	help
++	  Port to the x86-64 architecture. x86-64 is a 64-bit extension to the
++	  classical 32-bit x86 architecture. For details see
++	  <http://www.x86-64.org/>.
++
++config 64BIT
++	def_bool y
++
++config X86
++	bool
++	default y
++
++config MMU
++	bool
++	default y
++
++config ISA
++	bool
++
++config SBUS
++	bool
++
++config RWSEM_GENERIC_SPINLOCK
++	bool
++	default y
++
++config RWSEM_XCHGADD_ALGORITHM
++	bool
++
++config GENERIC_CALIBRATE_DELAY
++	bool
++	default y
++
++config X86_CMPXCHG
++	bool
++	default y
++
++config EARLY_PRINTK
++	bool "Early Printk"
++	default n
++	help
++	  Write kernel log output directly into the VGA buffer or to a serial
++	  port.
++
++	  This is useful for kernel debugging when your machine crashes very
++	  early before the console code is initialized. For normal operation
++	  it is not recommended because it looks ugly and doesn't cooperate
++	  with klogd/syslogd or the X server. You should normally N here,
++	  unless you want to debug such a crash.
++
++config GENERIC_ISA_DMA
++	bool
++	default y
++
++config GENERIC_IOMAP
++	bool
++	default y
++
++#source "init/Kconfig"
++
++
++menu "Processor type and features"
++
++choice
++	prompt "Processor family"
++	default MK8
++
++#config MK8
++#	bool "AMD-Opteron/Athlon64"
++#	help
++#	  Optimize for AMD Opteron/Athlon64/Hammer/K8 CPUs.
++
++config MPSC
++       bool "Intel EM64T"
++       help
++	  Optimize for Intel Pentium 4 and Xeon CPUs with Intel
++	  Extended Memory 64 Technology(EM64T). For details see
++	  <http://www.intel.com/technology/64bitextensions/>.
++
++config GENERIC_CPU
++	bool "Generic-x86-64"
++	help
++	  Generic x86-64 CPU.
++
++endchoice
++
++#
++# Define implied options from the CPU selection here
++#
++config X86_L1_CACHE_BYTES
++	int
++	default "128" if GENERIC_CPU || MPSC
++	default "64" if MK8
++
++config X86_L1_CACHE_SHIFT
++	int
++	default "7" if GENERIC_CPU || MPSC
++	default "6" if MK8
++
++config X86_TSC
++	bool
++	default n
++
++config X86_GOOD_APIC
++	bool
++	default y
++
++config X86_IO_APIC
++	bool
++	default XEN_PRIVILEGED_GUEST
++
++config X86_XEN_GENAPIC
++	bool
++	default XEN_PRIVILEGED_GUEST || SMP
++
++config X86_LOCAL_APIC
++	bool
++	default XEN_PRIVILEGED_GUEST
++
++config MICROCODE
++	tristate "/dev/cpu/microcode - Intel CPU microcode support"
++	---help---
++	  If you say Y here the 'File systems' section, you will be
++	  able to update the microcode on Intel processors. You will
++	  obviously need the actual microcode binary data itself which is
++	  not shipped with the Linux kernel.
++
++	  For latest news and information on obtaining all the required
++	  ingredients for this driver, check:
++	  <http://www.urbanmyth.org/microcode/>.
++
++	  To compile this driver as a module, choose M here: the
++	  module will be called microcode.
++	  If you use modprobe or kmod you may also want to add the line
++	  'alias char-major-10-184 microcode' to your /etc/modules.conf file.
++
++config X86_MSR
++	tristate "/dev/cpu/*/msr - Model-specific register support"
++	help
++	  This device gives privileged processes access to the x86
++	  Model-Specific Registers (MSRs).  It is a character device with
++	  major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
++	  MSR accesses are directed to a specific CPU on multi-processor
++	  systems.
++
++config X86_CPUID
++	tristate "/dev/cpu/*/cpuid - CPU information support"
++	help
++	  This device gives processes access to the x86 CPUID instruction to
++	  be executed on a specific processor.  It is a character device
++	  with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
++	  /dev/cpu/31/cpuid.
++
++# disable it for opteron optimized builds because it pulls in ACPI_BOOT
++config X86_HT
++	bool
++	depends on SMP && !MK8
++	default y
++
++config MATH_EMULATION
++	bool
++
++config MCA
++	bool
++
++config EISA
++	bool
++
++config MTRR
++	bool "MTRR (Memory Type Range Register) support"
++	---help---
++	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
++	  the Memory Type Range Registers (MTRRs) may be used to control
++	  processor access to memory ranges. This is most useful if you have
++	  a video (VGA) card on a PCI or AGP bus. Enabling write-combining
++	  allows bus write transfers to be combined into a larger transfer
++	  before bursting over the PCI/AGP bus. This can increase performance
++	  of image write operations 2.5 times or more. Saying Y here creates a
++	  /proc/mtrr file which may be used to manipulate your processor's
++	  MTRRs. Typically the X server should use this.
++
++	  This code has a reasonably generic interface so that similar
++	  control registers on other processors can be easily supported
++	  as well.
++
++	  Saying Y here also fixes a problem with buggy SMP BIOSes which only
++	  set the MTRRs for the boot CPU and not for the secondary CPUs. This
++	  can lead to all sorts of problems, so it's good to say Y here.
++
++	  Just say Y here, all x86-64 machines support MTRRs.
++
++	  See <file:Documentation/mtrr.txt> for more information.
++
++config SMP
++	bool "Symmetric multi-processing support"
++	---help---
++	  This enables support for systems with more than one CPU. If you have
++	  a system with only one CPU, like most personal computers, say N. If
++	  you have a system with more than one CPU, say Y.
++
++	  If you say N here, the kernel will run on single and multiprocessor
++	  machines, but will use only one CPU of a multiprocessor machine. If
++	  you say Y here, the kernel will run on many, but not all,
++	  singleprocessor machines. On a singleprocessor machine, the kernel
++	  will run faster if you say N here.
++
++	  If you don't know what to do here, say N.
++
++#config PREEMPT
++#	bool "Preemptible Kernel"
++#	---help---
++#	  This option reduces the latency of the kernel when reacting to
++#	  real-time or interactive events by allowing a low priority process to
++#	  be preempted even if it is in kernel mode executing a system call.
++#	  This allows applications to run more reliably even when the system is
++#	  under load. On contrary it may also break your drivers and add
++#	  priority inheritance problems to your system. Don't select it if
++#	  you rely on a stable system or have slightly obscure hardware.
++#	  It's also not very well tested on x86-64 currently.
++#	  You have been warned.
++#
++#	  Say Y here if you are feeling brave and building a kernel for a
++#	  desktop, embedded or real-time system.  Say N if you are unsure.
++
++config SCHED_SMT
++	bool "SMT (Hyperthreading) scheduler support"
++	depends on SMP
++	default n
++	help
++	  SMT scheduler support improves the CPU scheduler's decision making
++	  when dealing with Intel Pentium 4 chips with HyperThreading at a
++	  cost of slightly increased overhead in some places. If unsure say
++	  N here.
++
++config K8_NUMA
++       bool "K8 NUMA support"
++       select NUMA
++       depends on SMP
++       help
++	  Enable NUMA (Non Unified Memory Architecture) support for
++	  AMD Opteron Multiprocessor systems. The kernel will try to allocate
++	  memory used by a CPU on the local memory controller of the CPU
++	  and add some more NUMA awareness to the kernel.
++	  This code is recommended on all multiprocessor Opteron systems
++	  and normally doesn't hurt on others.
++
++config NUMA_EMU
++	bool "NUMA emulation support"
++	select NUMA
++	depends on SMP
++	help
++	  Enable NUMA emulation. A flat machine will be split
++	  into virtual nodes when booted with "numa=fake=N", where N is the
++	  number of nodes. This is only useful for debugging.
++
++config DISCONTIGMEM
++       bool
++       depends on NUMA
++       default y
++
++config NUMA
++       bool
++       default n
++
++config HAVE_DEC_LOCK
++	bool
++	depends on SMP
++	default y
++
++# actually 64 maximum, but you need to fix the APIC code first
++# to use clustered mode or whatever your big iron needs
++config NR_CPUS
++	int "Maximum number of CPUs (2-255)"
++	range 2 255
++	depends on SMP
++	default "16"
++	help
++	  This allows you to specify the maximum number of CPUs which this
++	  kernel will support.  The maximum supported value is 32 and the
++	  minimum value which makes sense is 2.
++
++	  This is purely to save memory - each supported CPU requires
++	  memory in the static kernel configuration.
++
++config HPET_TIMER
++	bool
++	default n
++	help
++	  Use the IA-PC HPET (High Precision Event Timer) to manage
++	  time in preference to the PIT and RTC, if a HPET is
++	  present.  The HPET provides a stable time base on SMP
++	  systems, unlike the RTC, but it is more expensive to access,
++	  as it is off-chip.  You can find the HPET spec at
++	  <http://www.intel.com/labs/platcomp/hpet/hpetspec.htm>.
++
++	  If unsure, say Y.
++
++config HPET_EMULATE_RTC
++	bool "Provide RTC interrupt"
++	depends on HPET_TIMER && RTC=y
++
++config GART_IOMMU
++	bool "IOMMU support"
++	depends on PCI
++	help
++	  Support the K8 IOMMU. Needed to run systems with more than 4GB of memory
++	  properly with 32-bit PCI devices that do not support DAC (Double Address
++	  Cycle). The IOMMU can be turned off at runtime with the iommu=off parameter.
++	  Normally the kernel will take the right choice by itself.
++	  If unsure, say Y.
++
++# need this always enabled with GART_IOMMU for the VIA workaround
++config SWIOTLB
++       bool
++       depends on PCI
++       default y
++
++config DUMMY_IOMMU
++	bool
++	depends on !GART_IOMMU
++	default y
++	help
++	  Don't use IOMMU code. This will cause problems when you have more than 4GB
++	  of memory and any 32-bit devices. Don't turn on unless you know what you
++	  are doing.
++
++config X86_MCE
++	bool "Machine check support" if EMBEDDED
++	default n
++	help
++	   Include a machine check error handler to report hardware errors.
++	   This version will require the mcelog utility to decode some
++	   machine check error logs. See
++	   ftp://ftp.x86-64.org/pub/linux/tools/mcelog
++
++config SECCOMP
++	bool "Enable seccomp to safely compute untrusted bytecode"
++	depends on PROC_FS
++	default y
++	help
++	  This kernel feature is useful for number crunching applications
++	  that may need to compute untrusted bytecode during their
++	  execution. By using pipes or other transports made available to
++	  the process as file descriptors supporting the read/write
++	  syscalls, it's possible to isolate those applications in
++	  their own address space using seccomp. Once seccomp is
++	  enabled via /proc/<pid>/seccomp, it cannot be disabled
++	  and the task is only allowed to execute a few safe syscalls
++	  defined by each seccomp mode.
++
++	  If unsure, say Y. Only embedded should say N here.
++
++endmenu
++
++#
++# Use the generic interrupt handling code in kernel/irq/:
++#
++config GENERIC_HARDIRQS
++	bool
++	default y
++
++config GENERIC_IRQ_PROBE
++	bool
++	default y
++
++# we have no ISA slots, but we do have ISA-style DMA.
++config ISA_DMA_API
++	bool
++	default y
++
++menu "Power management options"
++
++source kernel/power/Kconfig
++
++source "arch/x86_64/kernel/cpufreq/Kconfig"
++
++endmenu
++
++menu "Bus options (PCI etc.)"
++
++config PCI
++	bool "PCI support"
++
++# x86-64 doesn't support PCI BIOS access from long mode so always go direct.
++config PCI_DIRECT
++	bool
++	depends on PCI
++	default y
++
++config PCI_MMCONFIG
++	bool "Support mmconfig PCI config space access"
++	depends on PCI && ACPI
++	select ACPI_BOOT
++
++config UNORDERED_IO
++       bool "Unordered IO mapping access"
++       depends on EXPERIMENTAL
++       help
++         Use unordered stores to access IO memory mappings in device drivers.
++	 Still very experimental. When a driver works on IA64/ppc64/pa-risc it should
++	 work with this option, but it makes the drivers behave differently
++	 from i386. Requires that the driver writer used memory barriers
++	 properly.
++
++#source "drivers/pci/pcie/Kconfig"
++
++#source "drivers/pci/Kconfig"
++
++#source "drivers/pcmcia/Kconfig"
++
++#source "drivers/pci/hotplug/Kconfig"
++
++endmenu
++
++
++menu "Executable file formats / Emulations"
++
++# source "fs/Kconfig.binfmt"
++
++config IA32_EMULATION
++	bool "IA32 Emulation"
++	help
++	  Include code to run 32-bit programs under a 64-bit kernel. You should likely
++	  turn this on, unless you're 100% sure that you don't have any 32-bit programs
++	  left.
++
++config IA32_AOUT
++       bool "IA32 a.out support"
++       depends on IA32_EMULATION
++       help
++         Support old a.out binaries in the 32bit emulation.
++
++config COMPAT
++	bool
++	depends on IA32_EMULATION
++	default y
++
++config SYSVIPC_COMPAT
++	bool
++	depends on COMPAT && SYSVIPC
++	default y
++
++config UID16
++	bool
++	depends on IA32_EMULATION
++	default y
++
++endmenu
++
++# source drivers/Kconfig
++
++# source "drivers/firmware/Kconfig"
++
++# source fs/Kconfig
++
++#source "arch/x86_64/oprofile/Kconfig"
++
++# source "security/Kconfig"
++
++# source "crypto/Kconfig"
++
++endmenu
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/acpi/Makefile linux-2.6.12-xen/arch/xen/x86_64/kernel/acpi/Makefile
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/acpi/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/acpi/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,20 @@
++i386-obj-$(CONFIG_ACPI_BOOT)		:= boot.o
++c-obj-$(CONFIG_X86_IO_APIC)	        := earlyquirk.o
++c-obj-$(CONFIG_ACPI_SLEEP)	        += sleep.o
++s-obj-$(CONFIG_ACPI_SLEEP)	        += wakeup.o
++
++c-link                                  :=
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
++	@ln -fsn $(srctree)/arch/i386/kernel/acpi/$(notdir $@) $@
++
++$(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
++	@ln -fsn $(srctree)/arch/x86_64/kernel/acpi/$(notdir $@) $@
++
++$(patsubst %.o,$(obj)/%.c,$(i386-obj-y)):
++	@ln -fsn $(srctree)/arch/xen/i386/kernel/acpi/$(notdir $@) $@
++
++obj-y	+= $(c-obj-y) $(s-obj-y) $(i386-obj-y)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
++clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/apic.c linux-2.6.12-xen/arch/xen/x86_64/kernel/apic.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/apic.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/apic.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,201 @@
++/*
++ *	Local APIC handling, local APIC timers
++ *
++ *	(c) 1999, 2000 Ingo Molnar <mingo at redhat.com>
++ *
++ *	Fixes
++ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
++ *					thanks to Eric Gilmore
++ *					and Rolf G. Tews
++ *					for testing these extensively.
++ *	Maciej W. Rozycki	:	Various updates and fixes.
++ *	Mikael Pettersson	:	Power Management for UP-APIC.
++ *	Pavel Machek and
++ *	Mikael Pettersson	:	PM converted to driver model.
++ */
++
++#include <linux/config.h>
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++
++#include "io_ports.h"
++
++/*
++ * Debug level
++ */
++int apic_verbosity;
++int disable_apic;
++
++void smp_local_timer_interrupt(struct pt_regs *regs)
++{
++
++	profile_tick(CPU_PROFILING, regs);
++#ifndef CONFIG_XEN
++	int cpu = smp_processor_id();
++
++	if (--per_cpu(prof_counter, cpu) <= 0) {
++		/*
++		 * The multiplier may have changed since the last time we got
++		 * to this point as a result of the user writing to
++		 * /proc/profile. In this case we need to adjust the APIC
++		 * timer accordingly.
++		 *
++		 * Interrupts are already masked off at this point.
++		 */
++		per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
++		if (per_cpu(prof_counter, cpu) != 
++		    per_cpu(prof_old_multiplier, cpu)) {
++			__setup_APIC_LVTT(calibration_result/
++					per_cpu(prof_counter, cpu));
++			per_cpu(prof_old_multiplier, cpu) =
++				per_cpu(prof_counter, cpu);
++		}
++
++#ifdef CONFIG_SMP
++		update_process_times(user_mode(regs));
++#endif
++	}
++#endif
++
++	/*
++	 * We take the 'long' return path, and there every subsystem
++	 * grabs the appropriate locks (kernel lock/ irq lock).
++	 *
++	 * we might want to decouple profiling from the 'long path',
++	 * and do the profiling totally in assembly.
++	 *
++	 * Currently this isn't too much of an issue (performance wise),
++	 * we can take more than 100K local irqs per second on a 100 MHz P5.
++	 */
++}
++
++/*
++ * Local APIC timer interrupt. This is the most natural way for doing
++ * local interrupts, but local timer interrupts can be emulated by
++ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
++ *
++ * [ if a single-CPU system runs an SMP kernel then we call the local
++ *   interrupt as well. Thus we cannot inline the local irq ... ]
++ */
++void smp_apic_timer_interrupt(struct pt_regs *regs)
++{
++	/*
++	 * the NMI deadlock-detector uses this.
++	 */
++	add_pda(apic_timer_irqs, 1);
++
++	/*
++	 * NOTE! We'd better ACK the irq immediately,
++	 * because timer handling can be slow.
++	 */
++	ack_APIC_irq();
++	/*
++	 * update_process_times() expects us to have done irq_enter().
++	 * Besides, if we don't timer interrupts ignore the global
++	 * interrupt lock, which is the WrongThing (tm) to do.
++	 */
++	irq_enter();
++	smp_local_timer_interrupt(regs);
++	irq_exit();
++}
++
++/*
++ * This interrupt should _never_ happen with our APIC/SMP architecture
++ */
++asmlinkage void smp_spurious_interrupt(void)
++{
++	unsigned int v;
++	irq_enter();
++	/*
++	 * Check if this really is a spurious interrupt and ACK it
++	 * if it is a vectored one.  Just in case...
++	 * Spurious interrupts should not be ACKed.
++	 */
++	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
++	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
++		ack_APIC_irq();
++
++#if 0
++	static unsigned long last_warning; 
++	static unsigned long skipped; 
++
++	/* see sw-dev-man vol 3, chapter 7.4.13.5 */
++	if (time_before(last_warning+30*HZ,jiffies)) { 
++		printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
++		       smp_processor_id(), skipped);
++		last_warning = jiffies; 
++		skipped = 0;
++	} else { 
++		skipped++; 
++	} 
++#endif 
++	irq_exit();
++}
++
++/*
++ * This interrupt should never happen with our APIC/SMP architecture
++ */
++
++asmlinkage void smp_error_interrupt(void)
++{
++	unsigned int v, v1;
++
++	irq_enter();
++	/* First tickle the hardware, only then report what went on. -- REW */
++	v = apic_read(APIC_ESR);
++	apic_write(APIC_ESR, 0);
++	v1 = apic_read(APIC_ESR);
++	ack_APIC_irq();
++	atomic_inc(&irq_err_count);
++
++	/* Here is what the APIC error bits mean:
++	   0: Send CS error
++	   1: Receive CS error
++	   2: Send accept error
++	   3: Receive accept error
++	   4: Reserved
++	   5: Send illegal vector
++	   6: Received illegal vector
++	   7: Illegal register address
++	*/
++	printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
++	        smp_processor_id(), v , v1);
++	irq_exit();
++}
++
++int get_physical_broadcast(void)
++{
++        return 0xff;
++}
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++	if (smp_found_config)
++		if (!skip_ioapic_setup && nr_ioapics)
++			setup_IO_APIC();
++#endif
++
++	return 0;
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/e820.c linux-2.6.12-xen/arch/xen/x86_64/kernel/e820.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/e820.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/e820.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,676 @@
++/* 
++ * Handle the memory map.
++ * The functions here do the job until bootmem takes over.
++ * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
++ *
++ *  Getting sanitize_e820_map() in sync with i386 version by applying change:
++ *  -  Provisions for empty E820 memory regions (reported by certain BIOSes).
++ *     Alex Achenbach <xela at slit.de>, December 2002.
++ *  Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
++ *
++ */
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/ioport.h>
++#include <linux/string.h>
++#include <asm/page.h>
++#include <asm/e820.h>
++#include <asm/proto.h>
++#include <asm/bootsetup.h>
++#include <asm-xen/xen-public/memory.h>
++
++unsigned long pci_mem_start = 0xaeedbabe;
++
++/* 
++ * PFN of last memory page.
++ */
++unsigned long end_pfn; 
++unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;  
++unsigned long end_pfn_map; 
++
++/* 
++ * Add a memory region to the kernel e820 map.
++ */ 
++void __init add_memory_region(unsigned long start, unsigned long size, int type)
++{
++	int x = e820.nr_map;
++
++	if (x == E820MAX) {
++		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++		return;
++	}
++
++	e820.map[x].addr = start;
++	e820.map[x].size = size;
++	e820.map[x].type = type;
++	e820.nr_map++;
++}
++
++#ifndef CONFIG_XEN
++extern char _end[];
++
++/* 
++ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
++ * The direct mapping extends to end_pfn_map, so that we can directly access
++ * apertures, ACPI and other tables without having to play with fixmaps.
++ */ 
++
++/* 
++ * Last pfn which the user wants to use.
++ */
++
++extern struct resource code_resource, data_resource;
++
++/* Check for some hardcoded bad areas that early boot is not allowed to touch */ 
++static inline int bad_addr(unsigned long *addrp, unsigned long size)
++{ 
++	unsigned long addr = *addrp, last = addr + size; 
++
++	/* various gunk below that needed for SMP startup */
++	if (addr < 0x8000) { 
++		*addrp = 0x8000;
++		return 1; 
++	}
++
++	/* direct mapping tables of the kernel */
++	if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { 
++		*addrp = table_end << PAGE_SHIFT; 
++		return 1;
++	} 
++
++	/* initrd */ 
++#ifdef CONFIG_BLK_DEV_INITRD
++	if (LOADER_TYPE && INITRD_START && last >= INITRD_START && 
++	    addr < INITRD_START+INITRD_SIZE) { 
++		*addrp = INITRD_START + INITRD_SIZE; 
++		return 1;
++	} 
++#endif
++	/* kernel code + 640k memory hole (later should not be needed, but 
++	   be paranoid for now) */
++	if (last >= 640*1024 && addr < __pa_symbol(&_end)) { 
++		*addrp = __pa_symbol(&_end);
++		return 1;
++	}
++	/* XXX ramdisk image here? */ 
++	return 0;
++} 
++
++int __init e820_mapped(unsigned long start, unsigned long end, unsigned type) 
++{ 
++	int i;
++	for (i = 0; i < e820.nr_map; i++) { 
++		struct e820entry *ei = &e820.map[i]; 
++		if (type && ei->type != type) 
++			continue;
++		if (ei->addr >= end || ei->addr + ei->size < start) 
++			continue; 
++		return 1; 
++	} 
++	return 0;
++}
++
++/* 
++ * Find a free area in a specific range. 
++ */ 
++unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size) 
++{ 
++	int i; 
++	for (i = 0; i < e820.nr_map; i++) { 
++		struct e820entry *ei = &e820.map[i]; 
++		unsigned long addr = ei->addr, last; 
++		if (ei->type != E820_RAM) 
++			continue; 
++		if (addr < start) 
++			addr = start;
++		if (addr > ei->addr + ei->size) 
++			continue; 
++		while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size)
++			;
++		last = addr + size;
++		if (last > ei->addr + ei->size)
++			continue;
++		if (last > end) 
++			continue;
++		return addr; 
++	} 
++	return -1UL;		
++} 
++
++/* 
++ * Free bootmem based on the e820 table for a node.
++ */
++void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
++{
++	int i;
++	for (i = 0; i < e820.nr_map; i++) {
++		struct e820entry *ei = &e820.map[i]; 
++		unsigned long last, addr;
++
++		if (ei->type != E820_RAM || 
++		    ei->addr+ei->size <= start || 
++		    ei->addr > end)
++			continue;
++
++		addr = round_up(ei->addr, PAGE_SIZE);
++		if (addr < start) 
++			addr = start;
++
++		last = round_down(ei->addr + ei->size, PAGE_SIZE); 
++		if (last >= end)
++			last = end; 
++
++		if (last > addr && last-addr >= PAGE_SIZE)
++			free_bootmem_node(pgdat, addr, last-addr);
++	}
++}
++
++/*
++ * Find the highest page frame number we have available
++ */
++unsigned long __init e820_end_of_ram(void)
++{
++	int i;
++	unsigned long end_pfn = 0;
++	
++	for (i = 0; i < e820.nr_map; i++) {
++		struct e820entry *ei = &e820.map[i]; 
++		unsigned long start, end;
++
++		start = round_up(ei->addr, PAGE_SIZE); 
++		end = round_down(ei->addr + ei->size, PAGE_SIZE); 
++		if (start >= end)
++			continue;
++		if (ei->type == E820_RAM) { 
++		if (end > end_pfn<<PAGE_SHIFT)
++			end_pfn = end>>PAGE_SHIFT;
++		} else { 
++			if (end > end_pfn_map<<PAGE_SHIFT) 
++				end_pfn_map = end>>PAGE_SHIFT;
++		} 
++	}
++
++	if (end_pfn > end_pfn_map) 
++		end_pfn_map = end_pfn;
++	if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
++		end_pfn_map = MAXMEM>>PAGE_SHIFT;
++	if (end_pfn > end_user_pfn)
++		end_pfn = end_user_pfn;
++	if (end_pfn > end_pfn_map) 
++		end_pfn = end_pfn_map; 
++
++	return end_pfn;	
++}
++
++/* 
++ * Mark e820 reserved areas as busy for the resource manager.
++ */
++void __init e820_reserve_resources(void)
++{
++	int i;
++	for (i = 0; i < e820.nr_map; i++) {
++		struct resource *res;
++		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
++			continue;
++		res = alloc_bootmem_low(sizeof(struct resource));
++		switch (e820.map[i].type) {
++		case E820_RAM:	res->name = "System RAM"; break;
++		case E820_ACPI:	res->name = "ACPI Tables"; break;
++		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
++		default:	res->name = "reserved";
++		}
++		res->start = e820.map[i].addr;
++		res->end = res->start + e820.map[i].size - 1;
++		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++		request_resource(&iomem_resource, res);
++		if (e820.map[i].type == E820_RAM) {
++			/*
++			 *  We don't know which RAM region contains kernel data,
++			 *  so we try it repeatedly and let the resource manager
++			 *  test it.
++			 */
++			request_resource(res, &code_resource);
++			request_resource(res, &data_resource);
++		}
++	}
++}
++
++void __init e820_print_map(char *who)
++{
++	int i;
++
++	for (i = 0; i < e820.nr_map; i++) {
++		printk(" %s: %016Lx - %016Lx ", who,
++			(unsigned long long) e820.map[i].addr,
++			(unsigned long long) (e820.map[i].addr + e820.map[i].size));
++		switch (e820.map[i].type) {
++		case E820_RAM:	printk("(usable)\n");
++				break;
++		case E820_RESERVED:
++				printk("(reserved)\n");
++				break;
++		case E820_ACPI:
++				printk("(ACPI data)\n");
++				break;
++		case E820_NVS:
++				printk("(ACPI NVS)\n");
++				break;
++		default:	printk("type %u\n", e820.map[i].type);
++				break;
++		}
++	}
++}
++
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries.  The following 
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++	struct change_member {
++		struct e820entry *pbios; /* pointer to original bios entry */
++		unsigned long long addr; /* address for this change point */
++	};
++	static struct change_member change_point_list[2*E820MAX] __initdata;
++	static struct change_member *change_point[2*E820MAX] __initdata;
++	static struct e820entry *overlap_list[E820MAX] __initdata;
++	static struct e820entry new_bios[E820MAX] __initdata;
++	struct change_member *change_tmp;
++	unsigned long current_type, last_type;
++	unsigned long long last_addr;
++	int chgidx, still_changing;
++	int overlap_entries;
++	int new_bios_entry;
++	int old_nr, new_nr, chg_nr;
++	int i;
++
++	/*
++		Visually we're performing the following (1,2,3,4 = memory types)...
++
++		Sample memory map (w/overlaps):
++		   ____22__________________
++		   ______________________4_
++		   ____1111________________
++		   _44_____________________
++		   11111111________________
++		   ____________________33__
++		   ___________44___________
++		   __________33333_________
++		   ______________22________
++		   ___________________2222_
++		   _________111111111______
++		   _____________________11_
++		   _________________4______
++
++		Sanitized equivalent (no overlap):
++		   1_______________________
++		   _44_____________________
++		   ___1____________________
++		   ____22__________________
++		   ______11________________
++		   _________1______________
++		   __________3_____________
++		   ___________44___________
++		   _____________33_________
++		   _______________2________
++		   ________________1_______
++		   _________________4______
++		   ___________________2____
++		   ____________________33__
++		   ______________________4_
++	*/
++
++	/* if there's only one memory region, don't bother */
++	if (*pnr_map < 2)
++		return -1;
++
++	old_nr = *pnr_map;
++
++	/* bail out if we find any unreasonable addresses in bios map */
++	for (i=0; i<old_nr; i++)
++		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
++			return -1;
++
++	/* create pointers for initial change-point information (for sorting) */
++	for (i=0; i < 2*old_nr; i++)
++		change_point[i] = &change_point_list[i];
++
++	/* record all known change-points (starting and ending addresses),
++	   omitting those that are for empty memory regions */
++	chgidx = 0;
++	for (i=0; i < old_nr; i++)	{
++		if (biosmap[i].size != 0) {
++			change_point[chgidx]->addr = biosmap[i].addr;
++			change_point[chgidx++]->pbios = &biosmap[i];
++			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++			change_point[chgidx++]->pbios = &biosmap[i];
++		}
++	}
++	chg_nr = chgidx;
++
++	/* sort change-point list by memory addresses (low -> high) */
++	still_changing = 1;
++	while (still_changing)	{
++		still_changing = 0;
++		for (i=1; i < chg_nr; i++)  {
++			/* if <current_addr> > <last_addr>, swap */
++			/* or, if current=<start_addr> & last=<end_addr>, swap */
++			if ((change_point[i]->addr < change_point[i-1]->addr) ||
++				((change_point[i]->addr == change_point[i-1]->addr) &&
++				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
++				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++			   )
++			{
++				change_tmp = change_point[i];
++				change_point[i] = change_point[i-1];
++				change_point[i-1] = change_tmp;
++				still_changing=1;
++			}
++		}
++	}
++
++	/* create a new bios memory map, removing overlaps */
++	overlap_entries=0;	 /* number of entries in the overlap table */
++	new_bios_entry=0;	 /* index for creating new bios map entries */
++	last_type = 0;		 /* start with undefined memory type */
++	last_addr = 0;		 /* start with 0 as last starting address */
++	/* loop through change-points, determining affect on the new bios map */
++	for (chgidx=0; chgidx < chg_nr; chgidx++)
++	{
++		/* keep track of all overlapping bios entries */
++		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++		{
++			/* add map entry to overlap list (> 1 entry implies an overlap) */
++			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++		}
++		else
++		{
++			/* remove entry from list (order independent, so swap with last) */
++			for (i=0; i<overlap_entries; i++)
++			{
++				if (overlap_list[i] == change_point[chgidx]->pbios)
++					overlap_list[i] = overlap_list[overlap_entries-1];
++			}
++			overlap_entries--;
++		}
++		/* if there are overlapping entries, decide which "type" to use */
++		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++		current_type = 0;
++		for (i=0; i<overlap_entries; i++)
++			if (overlap_list[i]->type > current_type)
++				current_type = overlap_list[i]->type;
++		/* continue building up new bios map based on this information */
++		if (current_type != last_type)	{
++			if (last_type != 0)	 {
++				new_bios[new_bios_entry].size =
++					change_point[chgidx]->addr - last_addr;
++				/* move forward only if the new size was non-zero */
++				if (new_bios[new_bios_entry].size != 0)
++					if (++new_bios_entry >= E820MAX)
++						break; 	/* no more space left for new bios entries */
++			}
++			if (current_type != 0)	{
++				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++				new_bios[new_bios_entry].type = current_type;
++				last_addr=change_point[chgidx]->addr;
++			}
++			last_type = current_type;
++		}
++	}
++	new_nr = new_bios_entry;   /* retain count for new bios entries */
++
++	/* copy new bios mapping into original location */
++	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++	*pnr_map = new_nr;
++
++	return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory.  If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++	/* Only one memory region (or negative)? Ignore it */
++	if (nr_map < 2)
++		return -1;
++
++	do {
++		unsigned long start = biosmap->addr;
++		unsigned long size = biosmap->size;
++		unsigned long end = start + size;
++		unsigned long type = biosmap->type;
++
++		/* Overflow in 64 bits? Ignore the memory map. */
++		if (start > end)
++			return -1;
++
++		/*
++		 * Some BIOSes claim RAM in the 640k - 1M region.
++		 * Not right. Fix it up.
++		 * 
++		 * This should be removed on Hammer which is supposed to not
++		 * have non e820 covered ISA mappings there, but I had some strange
++		 * problems so it stays for now.  -AK
++		 */
++		if (type == E820_RAM) {
++			if (start < 0x100000ULL && end > 0xA0000ULL) {
++				if (start < 0xA0000ULL)
++					add_memory_region(start, 0xA0000ULL-start, type);
++				if (end <= 0x100000ULL)
++					continue;
++				start = 0x100000ULL;
++				size = end - start;
++			}
++		}
++
++		add_memory_region(start, size, type);
++	} while (biosmap++,--nr_map);
++	return 0;
++}
++
++void __init setup_memory_region(void)
++{
++	char *who = "BIOS-e820";
++
++	/*
++	 * Try to copy the BIOS-supplied E820-map.
++	 *
++	 * Otherwise fake a memory map; one section from 0k->640k,
++	 * the next section from 1mb->appropriate_mem_k
++	 */
++	sanitize_e820_map(E820_MAP, &E820_MAP_NR);
++	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
++		unsigned long mem_size;
++
++		/* compare results from other methods and take the greater */
++		if (ALT_MEM_K < EXT_MEM_K) {
++			mem_size = EXT_MEM_K;
++			who = "BIOS-88";
++		} else {
++			mem_size = ALT_MEM_K;
++			who = "BIOS-e801";
++		}
++
++		e820.nr_map = 0;
++		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
++		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
++  	}
++	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++	e820_print_map(who);
++}
++
++#else  /* CONFIG_XEN */
++
++extern unsigned long xen_override_max_pfn;
++extern union xen_start_info_union xen_start_info_union;
++
++unsigned long __init e820_end_of_ram(void)
++{
++	unsigned long max_end_pfn;
++
++	if (xen_override_max_pfn == 0) {
++		max_end_pfn = xen_start_info->nr_pages;
++		/* Default 8MB slack (to balance backend allocations). */
++		max_end_pfn += 8 << (20 - PAGE_SHIFT);
++	} else if (xen_override_max_pfn > xen_start_info->nr_pages) {
++		max_end_pfn = xen_override_max_pfn;
++	} else {
++		max_end_pfn = xen_start_info->nr_pages;
++	}
++
++	return max_end_pfn;
++}
++
++void __init e820_reserve_resources(void) 
++{
++	dom0_op_t op;
++	struct dom0_memory_map_entry *map;
++	unsigned long gapstart, gapsize, last;
++	int i, found = 0;
++
++	if (!(xen_start_info->flags & SIF_INITDOMAIN))
++		return;
++
++	map = alloc_bootmem_low_pages(PAGE_SIZE);
++	op.cmd = DOM0_PHYSICAL_MEMORY_MAP;
++	op.u.physical_memory_map.memory_map = map;
++	op.u.physical_memory_map.max_map_entries =
++		PAGE_SIZE / sizeof(struct dom0_memory_map_entry);
++	BUG_ON(HYPERVISOR_dom0_op(&op));
++
++	last = 0x100000000ULL;
++	gapstart = 0x10000000;
++	gapsize = 0x400000;
++
++	for (i = op.u.physical_memory_map.nr_map_entries - 1; i >= 0; i--) {
++		struct resource *res;
++
++		if ((last > map[i].end) && ((last - map[i].end) > gapsize)) {
++			gapsize = last - map[i].end;
++			gapstart = map[i].end;
++			found = 1;
++		}
++		if (map[i].start < last)
++			last = map[i].start;
++
++		if (map[i].end > 0x100000000ULL)
++			continue;
++		res = alloc_bootmem_low(sizeof(struct resource));
++		res->name = map[i].is_ram ? "System RAM" : "reserved";
++		res->start = map[i].start;
++		res->end = map[i].end - 1;
++		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++		request_resource(&iomem_resource, res);
++	}
++
++	free_bootmem(__pa(map), PAGE_SIZE);
++
++	if (!found) {
++		gapstart = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++		gapstart = (gapstart << PAGE_SHIFT) + 1024*1024;
++		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
++		       KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
++	}
++
++	/*
++	 * Start allocating dynamic PCI memory a bit into the gap,
++	 * aligned up to the nearest megabyte.
++	 *
++	 * Question: should we try to pad it up a bit (do something
++	 * like " + (gapsize >> 3)" in there too?). We now have the
++	 * technology.
++	 */
++	pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
++
++	printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
++		pci_mem_start, gapstart, gapsize);
++}
++
++#endif
++
++void __init parse_memopt(char *p, char **from) 
++{ 
++	end_user_pfn = memparse(p, from);
++	end_user_pfn >>= PAGE_SHIFT;	
++	xen_override_max_pfn = (unsigned long) end_user_pfn;
++} 
++
++/*
++ * Search for the biggest gap in the low 32 bits of the e820
++ * memory space.  We pass this space to PCI to assign MMIO resources
++ * for hotplug or unconfigured devices in.
++ * Hopefully the BIOS let enough space left.
++ */
++__init void e820_setup_gap(void)
++{
++#ifndef CONFIG_XEN
++	unsigned long gapstart, gapsize;
++	unsigned long last;
++	int i;
++	int found = 0;
++
++	last = 0x100000000ull;
++	gapstart = 0x10000000;
++	gapsize = 0x400000;
++	i = e820.nr_map;
++	while (--i >= 0) {
++		unsigned long long start = e820.map[i].addr;
++		unsigned long long end = start + e820.map[i].size;
++
++		/*
++		 * Since "last" is at most 4GB, we know we'll
++		 * fit in 32 bits if this condition is true
++		 */
++		if (last > end) {
++			unsigned long gap = last - end;
++
++			if (gap > gapsize) {
++				gapsize = gap;
++				gapstart = end;
++				found = 1;
++			}
++		}
++		if (start < last)
++			last = start;
++	}
++
++	if (!found) {
++		gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
++		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
++		       KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
++	}
++
++	/*
++	 * Start allocating dynamic PCI memory a bit into the gap,
++	 * aligned up to the nearest megabyte.
++	 *
++	 * Question: should we try to pad it up a bit (do something
++	 * like " + (gapsize >> 3)" in there too?). We now have the
++	 * technology.
++	 */
++	pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
++
++	printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
++		pci_mem_start, gapstart, gapsize);
++#endif
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/early_printk.c linux-2.6.12-xen/arch/xen/x86_64/kernel/early_printk.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/early_printk.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/early_printk.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,251 @@
++#include <linux/config.h>
++#include <linux/console.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/string.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++
++#ifndef CONFIG_XEN
++
++/* Simple VGA output */
++
++#ifdef __i386__
++#define VGABASE		(__ISA_IO_base + 0xb8000)
++#else
++#define VGABASE		((void __iomem *)0xffffffff800b8000UL)
++#endif
++
++#define MAX_YPOS	25
++#define MAX_XPOS	80
++
++static int current_ypos = 1, current_xpos = 0; 
++
++static void early_vga_write(struct console *con, const char *str, unsigned n)
++{
++	char c;
++	int  i, k, j;
++
++	while ((c = *str++) != '\0' && n-- > 0) {
++		if (current_ypos >= MAX_YPOS) {
++			/* scroll 1 line up */
++			for (k = 1, j = 0; k < MAX_YPOS; k++, j++) {
++				for (i = 0; i < MAX_XPOS; i++) {
++					writew(readw(VGABASE + 2*(MAX_XPOS*k + i)),
++					       VGABASE + 2*(MAX_XPOS*j + i));
++				}
++			}
++			for (i = 0; i < MAX_XPOS; i++)
++				writew(0x720, VGABASE + 2*(MAX_XPOS*j + i));
++			current_ypos = MAX_YPOS-1;
++		}
++		if (c == '\n') {
++			current_xpos = 0;
++			current_ypos++;
++		} else if (c != '\r')  {
++			writew(((0x7 << 8) | (unsigned short) c),
++			       VGABASE + 2*(MAX_XPOS*current_ypos +
++						current_xpos++));
++			if (current_xpos >= MAX_XPOS) {
++				current_xpos = 0;
++				current_ypos++;
++			}
++		}
++	}
++}
++
++static struct console early_vga_console = {
++	.name =		"earlyvga",
++	.write =	early_vga_write,
++	.flags =	CON_PRINTBUFFER,
++	.index =	-1,
++};
++
++/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */ 
++
++static int early_serial_base = 0x3f8;  /* ttyS0 */
++
++#define XMTRDY          0x20
++
++#define DLAB		0x80
++
++#define TXR             0       /*  Transmit register (WRITE) */
++#define RXR             0       /*  Receive register  (READ)  */
++#define IER             1       /*  Interrupt Enable          */
++#define IIR             2       /*  Interrupt ID              */
++#define FCR             2       /*  FIFO control              */
++#define LCR             3       /*  Line control              */
++#define MCR             4       /*  Modem control             */
++#define LSR             5       /*  Line Status               */
++#define MSR             6       /*  Modem Status              */
++#define DLL             0       /*  Divisor Latch Low         */
++#define DLH             1       /*  Divisor latch High        */
++
++static int early_serial_putc(unsigned char ch) 
++{ 
++	unsigned timeout = 0xffff; 
++	while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout) 
++		cpu_relax();
++	outb(ch, early_serial_base + TXR);
++	return timeout ? 0 : -1;
++} 
++
++static void early_serial_write(struct console *con, const char *s, unsigned n)
++{
++	while (*s && n-- > 0) { 
++		early_serial_putc(*s); 
++		if (*s == '\n') 
++			early_serial_putc('\r'); 
++		s++; 
++	} 
++} 
++
++#define DEFAULT_BAUD 9600
++
++static __init void early_serial_init(char *s)
++{
++	unsigned char c; 
++	unsigned divisor;
++	unsigned baud = DEFAULT_BAUD;
++	char *e;
++
++	if (*s == ',')
++		++s;
++
++	if (*s) {
++		unsigned port; 
++		if (!strncmp(s,"0x",2)) {
++			early_serial_base = simple_strtoul(s, &e, 16);
++		} else {
++			static int bases[] = { 0x3f8, 0x2f8 };
++
++			if (!strncmp(s,"ttyS",4))
++				s += 4;
++			port = simple_strtoul(s, &e, 10);
++			if (port > 1 || s == e)
++				port = 0;
++			early_serial_base = bases[port];
++		}
++		s += strcspn(s, ",");
++		if (*s == ',')
++			s++;
++	}
++
++	outb(0x3, early_serial_base + LCR);	/* 8n1 */
++	outb(0, early_serial_base + IER);	/* no interrupt */
++	outb(0, early_serial_base + FCR);	/* no fifo */
++	outb(0x3, early_serial_base + MCR);	/* DTR + RTS */
++
++	if (*s) {
++		baud = simple_strtoul(s, &e, 0); 
++		if (baud == 0 || s == e) 
++			baud = DEFAULT_BAUD;
++	} 
++	
++	divisor = 115200 / baud; 
++	c = inb(early_serial_base + LCR); 
++	outb(c | DLAB, early_serial_base + LCR); 
++	outb(divisor & 0xff, early_serial_base + DLL); 
++	outb((divisor >> 8) & 0xff, early_serial_base + DLH); 
++	outb(c & ~DLAB, early_serial_base + LCR);
++}
++
++#else /* CONFIG_XEN */
++
++static void
++early_serial_write(struct console *con, const char *s, unsigned count)
++{
++	int n;
++
++	while (count > 0) {
++		n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
++		if (n <= 0)
++			break;
++		count -= n;
++		s += n;
++	}
++} 
++
++static __init void early_serial_init(char *s)
++{
++}
++
++/*
++ * No early VGA console on Xen, as we do not have convenient ISA-space
++ * mappings. Someone should fix this for domain 0. For now, use fake serial.
++ */
++#define early_vga_console early_serial_console
++
++#endif
++
++static struct console early_serial_console = {
++	.name =		"earlyser",
++	.write =	early_serial_write,
++	.flags =	CON_PRINTBUFFER,
++	.index =	-1,
++};
++
++/* Direct interface for emergencies */
++struct console *early_console = &early_vga_console;
++static int early_console_initialized = 0;
++
++void early_printk(const char *fmt, ...)
++{ 
++	char buf[512]; 
++	int n; 
++	va_list ap;
++
++	va_start(ap,fmt); 
++	n = vscnprintf(buf,512,fmt,ap);
++	early_console->write(early_console,buf,n);
++	va_end(ap); 
++} 
++
++static int keep_early; 
++
++int __init setup_early_printk(char *opt) 
++{  
++	char *space;
++	char buf[256]; 
++
++	if (early_console_initialized)
++		return -1;
++
++	opt = strchr(opt, '=') + 1;
++
++	strlcpy(buf,opt,sizeof(buf)); 
++	space = strchr(buf, ' '); 
++	if (space)
++		*space = 0; 
++
++	if (strstr(buf,"keep"))
++		keep_early = 1; 
++
++	if (!strncmp(buf, "serial", 6)) { 
++		early_serial_init(buf + 6);
++		early_console = &early_serial_console;
++	} else if (!strncmp(buf, "ttyS", 4)) { 
++		early_serial_init(buf);
++		early_console = &early_serial_console;		
++	} else if (!strncmp(buf, "vga", 3)) {
++		early_console = &early_vga_console; 
++	}
++	early_console_initialized = 1;
++	register_console(early_console);       
++	return 0;
++}
++
++void __init disable_early_printk(void)
++{ 
++	if (!early_console_initialized || !early_console)
++		return;
++	if (!keep_early) {
++		printk("disabling early console\n");
++		unregister_console(early_console);
++		early_console_initialized = 0;
++	} else { 
++		printk("keeping early console\n");
++	}
++} 
++
++__setup("earlyprintk=", setup_early_printk);
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/entry.S linux-2.6.12-xen/arch/xen/x86_64/kernel/entry.S
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/entry.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/entry.S	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,1019 @@
++/*
++ *  linux/arch/x86_64/entry.S
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
++ *  Copyright (C) 2000  Pavel Machek <pavel at suse.cz>
++ * 
++ *  $Id$
++ *
++ *  Jun Nakajima <jun.nakajima at intel.com>
++ *  Asit Mallick <asit.k.mallick at intel.com>
++ *      Modified for Xen
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after an interrupt and after each system call.
++ * 
++ * Normal syscalls and interrupts don't save a full stack frame, this is 
++ * only done for syscall tracing, signals or fork/exec et.al.
++ * 
++ * A note on terminology:	 
++ * - top of stack: Architecture defined interrupt frame from SS to RIP 
++ * at the top of the kernel process stack.	
++ * - partial stack frame: partially saved registers upto R11.
++ * - full stack frame: Like partial stack frame, but all register saved. 
++ *	
++ * TODO:	 
++ * - schedule it carefully for the final hardware.
++ */
++
++#define ASSEMBLY 1
++#include <linux/config.h>
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/cache.h>
++#include <asm/errno.h>
++#include <asm/dwarf2.h>
++#include <asm/calling.h>
++#include <asm/asm_offset.h>
++#include <asm/msr.h>
++#include <asm/unistd.h>
++#include <asm/thread_info.h>
++#include <asm/hw_irq.h>
++#include <asm/errno.h>
++#include <asm-xen/xen-public/arch-x86_64.h>
++
++#include "irq_vectors.h"
++
++#include "xen_entry.S"
++	
++	.code64
++
++#ifndef CONFIG_PREEMPT
++#define retint_kernel retint_restore_args
++#endif	
++	
++/*
++ * C code is not supposed to know about undefined top of stack. Every time 
++ * a C function with an pt_regs argument is called from the SYSCALL based 
++ * fast path FIXUP_TOP_OF_STACK is needed.
++ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
++ * manipulation.
++ */        	
++		
++	/* %rsp:at FRAMEEND */ 
++	.macro FIXUP_TOP_OF_STACK tmp
++	movq    $__USER_CS,CS(%rsp)
++	movq 	$-1,RCX(%rsp)
++	.endm
++
++	.macro RESTORE_TOP_OF_STACK tmp,offset=0
++	.endm
++
++	.macro FAKE_STACK_FRAME child_rip
++	/* push in order ss, rsp, eflags, cs, rip */
++	xorq %rax, %rax
++	pushq %rax /* ss */
++	CFI_ADJUST_CFA_OFFSET	8
++	pushq %rax /* rsp */
++	CFI_ADJUST_CFA_OFFSET	8
++	CFI_OFFSET	rip,0
++	pushq $(1<<9) /* eflags - interrupts on */
++	CFI_ADJUST_CFA_OFFSET	8
++	pushq $__KERNEL_CS /* cs */
++	CFI_ADJUST_CFA_OFFSET	8
++	pushq \child_rip /* rip */
++	CFI_ADJUST_CFA_OFFSET	8
++	CFI_OFFSET	rip,0
++	pushq	%rax /* orig rax */
++	CFI_ADJUST_CFA_OFFSET	8
++	.endm
++
++	.macro UNFAKE_STACK_FRAME
++	addq $8*6, %rsp
++	CFI_ADJUST_CFA_OFFSET	-(6*8)
++	.endm
++
++	.macro	CFI_DEFAULT_STACK
++	CFI_ADJUST_CFA_OFFSET  (SS)
++	CFI_OFFSET	r15,R15-SS
++	CFI_OFFSET	r14,R14-SS
++	CFI_OFFSET	r13,R13-SS
++	CFI_OFFSET	r12,R12-SS
++	CFI_OFFSET	rbp,RBP-SS
++	CFI_OFFSET	rbx,RBX-SS
++	CFI_OFFSET	r11,R11-SS
++	CFI_OFFSET	r10,R10-SS
++	CFI_OFFSET	r9,R9-SS
++	CFI_OFFSET	r8,R8-SS
++	CFI_OFFSET	rax,RAX-SS
++	CFI_OFFSET	rcx,RCX-SS
++	CFI_OFFSET	rdx,RDX-SS
++	CFI_OFFSET	rsi,RSI-SS
++	CFI_OFFSET	rdi,RDI-SS
++	CFI_OFFSET	rsp,RSP-SS
++	CFI_OFFSET	rip,RIP-SS
++	.endm
++
++        /*
++         * Must be consistent with the definition in arch-x86_64.h:    
++         *     struct iret_context {
++         *        u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++         *     };
++         * #define VGCF_IN_SYSCALL (1<<8) 
++         */
++	.macro HYPERVISOR_IRET flag
++	pushq $\flag
++	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
++	.endm
++
++        .macro SWITCH_TO_KERNEL ssoff,adjust=0
++	jc  1f
++	orb  $1,\ssoff-\adjust+4(%rsp)
++1:
++        .endm
++
++/*
++ * A newly forked process directly context switches into this.
++ */ 	
++/* rdi:	prev */	
++ENTRY(ret_from_fork)
++	CFI_STARTPROC
++	CFI_DEFAULT_STACK
++	call schedule_tail
++	GET_THREAD_INFO(%rcx)
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
++	jnz rff_trace
++rff_action:	
++	RESTORE_REST
++	testl $3,CS-ARGOFFSET(%rsp)	# from kernel_thread?
++	je   int_ret_from_sys_call
++	testl $_TIF_IA32,threadinfo_flags(%rcx)
++	jnz  int_ret_from_sys_call
++	RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
++	jmp ret_from_sys_call
++rff_trace:
++	movq %rsp,%rdi
++	call syscall_trace_leave
++	GET_THREAD_INFO(%rcx)	
++	jmp rff_action
++	CFI_ENDPROC
++
++/*
++ * System call entry. Upto 6 arguments in registers are supported.
++ *
++ * SYSCALL does not save anything on the stack and does not change the
++ * stack pointer.
++ */
++		
++/*
++ * Register setup:	
++ * rax  system call number
++ * rdi  arg0
++ * rcx  return address for syscall/sysret, C arg3 
++ * rsi  arg1
++ * rdx  arg2	
++ * r10  arg3 	(--> moved to rcx for C)
++ * r8   arg4
++ * r9   arg5
++ * r11  eflags for syscall/sysret, temporary for C
++ * r12-r15,rbp,rbx saved by C code, not touched. 		
++ * 
++ * Interrupts are off on entry.
++ * Only called from user space.
++ *
++ * XXX	if we had a free scratch register we could save the RSP into the stack frame
++ *      and report it properly in ps. Unfortunately we haven't.
++ */ 			 		
++
++ENTRY(system_call)
++	CFI_STARTPROC
++	SAVE_ARGS -8,0
++	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
++        XEN_UNBLOCK_EVENTS(%r11)        
++	GET_THREAD_INFO(%rcx)
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
++	jnz tracesys
++	cmpq $__NR_syscall_max,%rax
++	ja badsys
++	movq %r10,%rcx
++	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
++	movq %rax,RAX-ARGOFFSET(%rsp)
++/*
++ * Syscall return path ending with SYSRET (fast path)
++ * Has incomplete stack frame and undefined top of stack. 
++ */		
++	.globl ret_from_sys_call
++ret_from_sys_call:
++	movl $_TIF_ALLWORK_MASK,%edi
++	/* edi:	flagmask */
++sysret_check:		
++	GET_THREAD_INFO(%rcx)
++        XEN_BLOCK_EVENTS(%rsi)        
++	movl threadinfo_flags(%rcx),%edx
++	andl %edi,%edx
++	jnz  sysret_careful 
++        XEN_UNBLOCK_EVENTS(%rsi)                
++	RESTORE_ARGS 0,8,0
++        HYPERVISOR_IRET VGCF_IN_SYSCALL
++
++	/* Handle reschedules */
++	/* edx:	work, edi: workmask */	
++sysret_careful:
++	bt $TIF_NEED_RESCHED,%edx
++	jnc sysret_signal
++        XEN_BLOCK_EVENTS(%rsi)        
++	pushq %rdi
++	call schedule
++	popq  %rdi
++	jmp sysret_check
++
++	/* Handle a signal */ 
++sysret_signal:
++/*	sti */
++        XEN_UNBLOCK_EVENTS(%rsi)        
++	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
++	jz    1f
++
++	/* Really a signal */
++	/* edx:	work flags (arg3) */
++	leaq do_notify_resume(%rip),%rax
++	leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
++	xorl %esi,%esi # oldset -> arg2
++	call ptregscall_common
++1:	movl $_TIF_NEED_RESCHED,%edi
++	jmp sysret_check
++	
++	/* Do syscall tracing */
++tracesys:			 
++	SAVE_REST
++	movq $-ENOSYS,RAX(%rsp)
++	FIXUP_TOP_OF_STACK %rdi
++	movq %rsp,%rdi
++	call syscall_trace_enter
++	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
++	RESTORE_REST
++	cmpq $__NR_syscall_max,%rax
++	ja  1f
++	movq %r10,%rcx	/* fixup for C */
++	call *sys_call_table(,%rax,8)
++	movq %rax,RAX-ARGOFFSET(%rsp)
++1:	SAVE_REST
++	movq %rsp,%rdi
++	call syscall_trace_leave
++	RESTORE_TOP_OF_STACK %rbx
++	RESTORE_REST
++	jmp ret_from_sys_call
++		
++badsys:
++	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)	
++	jmp ret_from_sys_call
++
++/* 
++ * Syscall return path ending with IRET.
++ * Has correct top of stack, but partial stack frame.
++ */ 	
++ENTRY(int_ret_from_sys_call)	
++        XEN_BLOCK_EVENTS(%rsi)
++	testb $3,CS-ARGOFFSET(%rsp)
++        jnz 1f
++        /* Need to set the proper %ss (not NULL) for ring 3 iretq */
++        movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
++        jmp retint_restore_args   # retrun from ring3 kernel
++1:              
++	movl $_TIF_ALLWORK_MASK,%edi
++	/* edi:	mask to check */
++int_with_check:
++	GET_THREAD_INFO(%rcx)
++	movl threadinfo_flags(%rcx),%edx
++	andl %edi,%edx
++	jnz   int_careful
++	jmp   retint_restore_args
++
++	/* Either reschedule or signal or syscall exit tracking needed. */
++	/* First do a reschedule test. */
++	/* edx:	work, edi: workmask */
++int_careful:
++	bt $TIF_NEED_RESCHED,%edx
++	jnc  int_very_careful
++/*	sti */
++        XEN_UNBLOCK_EVENTS(%rsi)
++	pushq %rdi
++	call schedule
++	popq %rdi
++	cli
++	jmp int_with_check
++
++	/* handle signals and tracing -- both require a full stack frame */
++int_very_careful:
++/*	sti */
++        XEN_UNBLOCK_EVENTS(%rsi)
++	SAVE_REST
++	/* Check for syscall exit trace */	
++	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
++	jz int_signal
++	pushq %rdi
++	leaq 8(%rsp),%rdi	# &ptregs -> arg1	
++	call syscall_trace_leave
++	popq %rdi
++	andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
++	cli
++	jmp int_restore_rest
++	
++int_signal:
++	testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
++	jz 1f
++	movq %rsp,%rdi		# &ptregs -> arg1
++	xorl %esi,%esi		# oldset -> arg2
++	call do_notify_resume
++1:	movl $_TIF_NEED_RESCHED,%edi	
++int_restore_rest:
++	RESTORE_REST
++	cli
++	jmp int_with_check
++	CFI_ENDPROC
++		
++/* 
++ * Certain special system calls that need to save a complete full stack frame.
++ */ 								
++	
++	.macro PTREGSCALL label,func,arg
++	.globl \label
++\label:
++	leaq	\func(%rip),%rax
++	leaq    -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
++	jmp	ptregscall_common
++	.endm
++
++	PTREGSCALL stub_clone, sys_clone, %r8
++	PTREGSCALL stub_fork, sys_fork, %rdi
++	PTREGSCALL stub_vfork, sys_vfork, %rdi
++	PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
++	PTREGSCALL stub_iopl, sys_iopl, %rsi
++
++ENTRY(ptregscall_common)
++	CFI_STARTPROC
++	popq %r11
++	CFI_ADJUST_CFA_OFFSET	-8
++	SAVE_REST
++	movq %r11, %r15
++	FIXUP_TOP_OF_STACK %r11
++	call *%rax
++	RESTORE_TOP_OF_STACK %r11
++	movq %r15, %r11
++	RESTORE_REST
++	pushq %r11
++	CFI_ADJUST_CFA_OFFSET	8
++	ret
++	CFI_ENDPROC
++	
++ENTRY(stub_execve)
++	CFI_STARTPROC
++	popq %r11
++	CFI_ADJUST_CFA_OFFSET	-8
++	SAVE_REST
++	movq %r11, %r15
++	FIXUP_TOP_OF_STACK %r11
++	call sys_execve
++	GET_THREAD_INFO(%rcx)
++	bt $TIF_IA32,threadinfo_flags(%rcx)
++	jc exec_32bit
++	RESTORE_TOP_OF_STACK %r11
++	movq %r15, %r11
++	RESTORE_REST
++	push %r11
++	ret
++
++exec_32bit:
++	CFI_ADJUST_CFA_OFFSET	REST_SKIP
++	movq %rax,RAX(%rsp)
++	RESTORE_REST
++	jmp int_ret_from_sys_call
++	CFI_ENDPROC
++	
++/*
++ * sigreturn is special because it needs to restore all registers on return.
++ * This cannot be done with SYSRET, so use the IRET return path instead.
++ */                
++ENTRY(stub_rt_sigreturn)
++	CFI_STARTPROC
++	addq $8, %rsp		
++	SAVE_REST
++	movq %rsp,%rdi
++	FIXUP_TOP_OF_STACK %r11
++	call sys_rt_sigreturn
++	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
++	RESTORE_REST
++	jmp int_ret_from_sys_call
++	CFI_ENDPROC
++
++/* 
++ * Interrupt entry/exit.
++ *
++ * Interrupt entry points save only callee clobbered registers in fast path.
++ *	
++ * Entry runs with interrupts off.	
++ */ 
++
++/* 0(%rsp): interrupt number */ 
++	.macro interrupt func
++	CFI_STARTPROC	simple
++	CFI_DEF_CFA	rsp,(SS-RDI)
++	CFI_REL_OFFSET	rsp,(RSP-ORIG_RAX)
++	CFI_REL_OFFSET	rip,(RIP-ORIG_RAX)
++	cld
++#ifdef CONFIG_DEBUG_INFO
++	SAVE_ALL	
++	movq %rsp,%rdi
++	/*
++	 * Setup a stack frame pointer.  This allows gdb to trace
++	 * back to the original stack.
++	 */
++	movq %rsp,%rbp
++	CFI_DEF_CFA_REGISTER	rbp
++#else		
++	SAVE_ARGS
++	leaq -ARGOFFSET(%rsp),%rdi	# arg1 for handler
++#endif	
++#if 0 /* For Xen we don't need to do this */       
++	testl $3,CS(%rdi)
++	je 1f
++	swapgs	
++#endif        
++1:	addl $1,%gs:pda_irqcount	# RED-PEN should check preempt count
++	movq %gs:pda_irqstackptr,%rax
++	cmoveq %rax,%rsp							
++	pushq %rdi			# save old stack	
++	call \func
++	.endm
++
++retint_check:			
++	movl threadinfo_flags(%rcx),%edx
++	andl %edi,%edx
++	jnz  retint_careful
++retint_restore_args:
++        movb EVENT_MASK-REST_SKIP(%rsp), %al
++        notb %al			# %al == ~saved_mask
++        XEN_GET_VCPU_INFO(%rsi)
++        andb evtchn_upcall_mask(%rsi),%al
++	andb $1,%al			# %al == mask & ~saved_mask
++	jnz restore_all_enable_events	# != 0 => reenable event delivery      
++        XEN_PUT_VCPU_INFO(%rsi)
++		
++	RESTORE_ARGS 0,8,0						
++	testb $3,8(%rsp)                # check CS
++	jnz  user_mode
++kernel_mode:
++        orb   $3,1*8(%rsp)
++	iretq
++user_mode:
++	HYPERVISOR_IRET 0
++	
++	/* edi: workmask, edx: work */	
++retint_careful:
++	bt    $TIF_NEED_RESCHED,%edx
++	jnc   retint_signal
++	XEN_UNBLOCK_EVENTS(%rsi)
++/*	sti */        
++	pushq %rdi
++	call  schedule
++	popq %rdi		
++	XEN_BLOCK_EVENTS(%rsi)		
++	GET_THREAD_INFO(%rcx)
++/*	cli */
++	jmp retint_check
++	
++retint_signal:
++	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
++	jz    retint_restore_args
++        XEN_UNBLOCK_EVENTS(%rsi)
++	SAVE_REST
++	movq $-1,ORIG_RAX(%rsp) 			
++	xorq %rsi,%rsi		# oldset
++	movq %rsp,%rdi		# &pt_regs
++	call do_notify_resume
++	RESTORE_REST
++        XEN_BLOCK_EVENTS(%rsi)		
++	movl $_TIF_NEED_RESCHED,%edi
++	GET_THREAD_INFO(%rcx)
++	jmp retint_check
++
++#ifdef CONFIG_PREEMPT
++	/* Returning to kernel space. Check if we need preemption */
++	/* rcx:	 threadinfo. interrupts off. */
++	.p2align
++retint_kernel:	
++	cmpl $0,threadinfo_preempt_count(%rcx)
++	jnz  retint_restore_args
++	bt  $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
++	jnc  retint_restore_args
++	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
++	jnc  retint_restore_args
++	call preempt_schedule_irq
++	jmp retint_kernel       /* check again */
++#endif	
++	CFI_ENDPROC
++	
++/*
++ * APIC interrupts.
++ */		
++	.macro apicinterrupt num,func
++	pushq $\num-256
++	interrupt \func
++	jmp error_entry
++	CFI_ENDPROC
++	.endm
++
++#if 0
++ENTRY(reschedule_interrupt)
++	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
++
++ENTRY(invalidate_interrupt)
++	apicinterrupt INVALIDATE_TLB_VECTOR,smp_invalidate_interrupt
++
++ENTRY(call_function_interrupt)
++	apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC	
++ENTRY(apic_timer_interrupt)
++	apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
++
++ENTRY(error_interrupt)
++	apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
++
++ENTRY(spurious_interrupt)
++	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
++#endif
++				
++/*
++ * Exception entry points.
++ */ 		
++	.macro zeroentry sym
++        movq (%rsp),%rcx
++        movq 8(%rsp),%r11
++        addq $0x10,%rsp /* skip rcx and r11 */
++	pushq $0	/* push error code/oldrax */ 
++	pushq %rax	/* push real oldrax to the rdi slot */ 
++	leaq  \sym(%rip),%rax
++	jmp error_entry
++	.endm	
++
++	.macro errorentry sym
++        movq (%rsp),%rcx
++        movq 8(%rsp),%r11
++        addq $0x10,%rsp /* rsp points to the error code */
++	pushq %rax
++	leaq  \sym(%rip),%rax
++	jmp error_entry
++	.endm
++
++#if 0
++	/* error code is on the stack already */
++	/* handle NMI like exceptions that can happen everywhere */
++	.macro paranoidentry sym
++        movq (%rsp),%rcx
++        movq 8(%rsp),%r11
++        addq $0x10,%rsp /* skip rcx and r11 */        
++	SAVE_ALL
++	cld
++	movl $1,%ebx
++	movl  $MSR_GS_BASE,%ecx
++	rdmsr
++	testl %edx,%edx
++	js    1f
++/*	swapgs */
++	xorl  %ebx,%ebx
++1:	movq %rsp,%rdi
++	movq ORIG_RAX(%rsp),%rsi
++	movq $-1,ORIG_RAX(%rsp)
++	call \sym
++	cli
++	.endm
++#endif
++	
++/*
++ * Exception entry point. This expects an error code/orig_rax on the stack
++ * and the exception handler in %rax.	
++ */ 		  				
++ENTRY(error_entry)
++	CFI_STARTPROC	simple
++	CFI_DEF_CFA	rsp,(SS-RDI)
++	CFI_REL_OFFSET	rsp,(RSP-RDI)
++	CFI_REL_OFFSET	rip,(RIP-RDI)
++	/* rdi slot contains rax, oldrax contains error code */
++	cld	
++	subq  $14*8,%rsp
++	CFI_ADJUST_CFA_OFFSET	(14*8)
++	movq %rsi,13*8(%rsp)
++	CFI_REL_OFFSET	rsi,RSI
++	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */
++	movq %rdx,12*8(%rsp)
++	CFI_REL_OFFSET	rdx,RDX
++	movq %rcx,11*8(%rsp)
++	CFI_REL_OFFSET	rcx,RCX
++	movq %rsi,10*8(%rsp)	/* store rax */ 
++	CFI_REL_OFFSET	rax,RAX
++	movq %r8, 9*8(%rsp)
++	CFI_REL_OFFSET	r8,R8
++	movq %r9, 8*8(%rsp)
++	CFI_REL_OFFSET	r9,R9
++	movq %r10,7*8(%rsp)
++	CFI_REL_OFFSET	r10,R10
++	movq %r11,6*8(%rsp)
++	CFI_REL_OFFSET	r11,R11
++	movq %rbx,5*8(%rsp) 
++	CFI_REL_OFFSET	rbx,RBX
++	movq %rbp,4*8(%rsp) 
++	CFI_REL_OFFSET	rbp,RBP
++	movq %r12,3*8(%rsp) 
++	CFI_REL_OFFSET	r12,R12
++	movq %r13,2*8(%rsp) 
++	CFI_REL_OFFSET	r13,R13
++	movq %r14,1*8(%rsp) 
++	CFI_REL_OFFSET	r14,R14
++	movq %r15,(%rsp) 
++	CFI_REL_OFFSET	r15,R15
++#if 0        
++	cmpl $__KERNEL_CS,CS(%rsp)
++	je  error_kernelspace
++#endif        
++error_call_handler:
++	movq %rdi, RDI(%rsp)            
++	movq %rsp,%rdi
++	movq ORIG_RAX(%rsp),%rsi	# get error code 
++	movq $-1,ORIG_RAX(%rsp)
++	call *%rax
++error_exit:		
++	RESTORE_REST
++/*	cli */
++	XEN_BLOCK_EVENTS(%rsi)		
++	GET_THREAD_INFO(%rcx)	
++	testb $3,CS-ARGOFFSET(%rsp)
++	jz retint_kernel
++	movl  threadinfo_flags(%rcx),%edx
++	movl  $_TIF_WORK_MASK,%edi	
++	andl  %edi,%edx
++	jnz   retint_careful
++	jmp   retint_restore_args
++
++error_kernelspace:
++         /*
++         * We need to re-write the logic here because we don't do iretq to 
++         * to return to user mode. It's still possible that we get trap/fault
++         * in the kernel (when accessing buffers pointed to by system calls, 
++         * for example).
++         *
++         */           
++#if 0
++	incl %ebx
++       /* There are two places in the kernel that can potentially fault with
++          usergs. Handle them here. The exception handlers after
++	   iret run with kernel gs again, so don't set the user space flag.
++	   B stepping K8s sometimes report an truncated RIP for IRET 
++	   exceptions returning to compat mode. Check for these here too. */
++	leaq iret_label(%rip),%rbp
++	cmpq %rbp,RIP(%rsp) 
++	je   error_swapgs
++	movl %ebp,%ebp	/* zero extend */
++	cmpq %rbp,RIP(%rsp) 
++	je   error_swapgs
++	cmpq $gs_change,RIP(%rsp)
++        je   error_swapgs
++	jmp  error_sti
++#endif        
++	
++ENTRY(hypervisor_callback)
++	zeroentry do_hypervisor_callback
++        
++/*
++ * Copied from arch/xen/i386/kernel/entry.S
++ */               
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++ENTRY(do_hypervisor_callback)   # do_hyperviosr_callback(struct *pt_regs)
++# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
++# see the correct pointer to the pt_regs
++        addq $8, %rsp            # we don't return, adjust the stack frame
++11:	movb $0, EVENT_MASK(%rsp)         
++	call evtchn_do_upcall
++        jmp  error_exit
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ENTRY(nmi)
++	zeroentry do_nmi_callback
++ENTRY(do_nmi_callback)
++        addq $8, %rsp
++        call do_nmi
++        RESTORE_REST
++        XEN_BLOCK_EVENTS(%rsi)
++        GET_THREAD_INFO(%rcx)
++        jmp  retint_restore_args
++#endif
++
++        ALIGN
++restore_all_enable_events:  
++	XEN_UNBLOCK_EVENTS(%rsi)        # %rsi is already set up...
++
++scrit:	/**** START OF CRITICAL REGION ****/
++	XEN_TEST_PENDING(%rsi)
++	jnz  14f			# process more events if necessary...
++	XEN_PUT_VCPU_INFO(%rsi)
++        RESTORE_ARGS 0,8,0
++        testb $3,8(%rsp)                # check CS
++        jnz  crit_user_mode
++        orb   $3,1*8(%rsp)
++        iretq
++crit_user_mode:
++        HYPERVISOR_IRET 0
++        
++14:	XEN_LOCKED_BLOCK_EVENTS(%rsi)
++	XEN_PUT_VCPU_INFO(%rsi)
++	SAVE_REST
++        movq %rsp,%rdi                  # set the argument again
++	jmp  11b
++ecrit:  /**** END OF CRITICAL REGION ****/
++# At this point, unlike on x86-32, we don't do the fixup to simplify the 
++# code and the stack frame is more complex on x86-64.
++# When the kernel is interrupted in the critical section, the kernel 
++# will do IRET in that case, and everything will be restored at that point, 
++# i.e. it just resumes from the next instruction interrupted with the same context. 
++	
++# Hypervisor uses this for application faults while it executes.
++ENTRY(failsafe_callback)
++	addq $0x10,%rsp /* skip rcx and r11 */	
++1:	mov  (%rsp),%ds
++2:	mov  8(%rsp),%es
++3:	mov  16(%rsp),%fs
++4:	mov  24(%rsp),%gs
++	addq $0x20,%rsp /* skip the above selectors */		
++	SAVE_ALL
++	jmp  error_exit
++.section .fixup,"ax";	\
++6:	movq $0,(%rsp);	\
++	jmp 1b;		\
++7:	movq $0,8(%rsp);	\
++	jmp 2b;		\
++8:	movq $0,16(%rsp);	\
++	jmp 3b;		\
++9:	movq $0,24(%rsp);	\
++	jmp 4b;		\
++.previous;		\
++.section __ex_table,"a";\
++	.align 16;	\
++	.quad 1b,6b;	\
++	.quad 2b,7b;	\
++	.quad 3b,8b;	\
++	.quad 4b,9b;	\
++.previous
++ 
++#if 0	      
++        .section __ex_table,"a"
++        .align 8
++        .quad gs_change,bad_gs
++        .previous
++        .section .fixup,"ax"
++	/* running with kernelgs */
++bad_gs: 
++/*	swapgs		*/	/* switch back to user gs */
++	xorl %eax,%eax
++        movl %eax,%gs
++        jmp  2b
++        .previous       
++#endif
++	
++/*
++ * Create a kernel thread.
++ *
++ * C extern interface:
++ *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++ *
++ * asm input arguments:
++ *	rdi: fn, rsi: arg, rdx: flags
++ */
++ENTRY(kernel_thread)
++	CFI_STARTPROC
++	FAKE_STACK_FRAME $child_rip
++	SAVE_ALL
++
++	# rdi: flags, rsi: usp, rdx: will be &pt_regs
++	movq %rdx,%rdi
++	orq  kernel_thread_flags(%rip),%rdi
++	movq $-1, %rsi
++	movq %rsp, %rdx
++
++	xorl %r8d,%r8d
++	xorl %r9d,%r9d
++	
++	# clone now
++	call do_fork
++	movq %rax,RAX(%rsp)
++	xorl %edi,%edi
++
++	/*
++	 * It isn't worth to check for reschedule here,
++	 * so internally to the x86_64 port you can rely on kernel_thread()
++	 * not to reschedule the child before returning, this avoids the need
++	 * of hacks for example to fork off the per-CPU idle tasks.
++         * [Hopefully no generic code relies on the reschedule -AK]	
++	 */
++	RESTORE_ALL
++	UNFAKE_STACK_FRAME
++	ret
++	CFI_ENDPROC
++
++	
++child_rip:
++	/*
++	 * Here we are in the child and the registers are set as they were
++	 * at kernel_thread() invocation in the parent.
++	 */
++	movq %rdi, %rax
++	movq %rsi, %rdi
++	call *%rax
++	# exit
++	xorq %rdi, %rdi
++	call do_exit
++
++/*
++ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
++ *
++ * C extern interface:
++ *	 extern long execve(char *name, char **argv, char **envp)
++ *
++ * asm input arguments:
++ *	rdi: name, rsi: argv, rdx: envp
++ *
++ * We want to fallback into:
++ *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
++ *
++ * do_sys_execve asm fallback arguments:
++ *	rdi: name, rsi: argv, rdx: envp, fake frame on the stack
++ */
++ENTRY(execve)
++	CFI_STARTPROC
++	FAKE_STACK_FRAME $0
++	SAVE_ALL	
++	call sys_execve
++	movq %rax, RAX(%rsp)	
++	RESTORE_REST
++	testq %rax,%rax
++	jne 1f
++        jmp int_ret_from_sys_call
++1:      RESTORE_ARGS
++	UNFAKE_STACK_FRAME
++	ret
++	CFI_ENDPROC
++
++ENTRY(page_fault)
++	errorentry do_page_fault
++
++ENTRY(coprocessor_error)
++	zeroentry do_coprocessor_error
++
++ENTRY(simd_coprocessor_error)
++	zeroentry do_simd_coprocessor_error	
++
++ENTRY(device_not_available)
++	zeroentry math_state_restore
++
++	/* runs on exception stack */
++ENTRY(debug)
++	CFI_STARTPROC
++/*	pushq $0
++	CFI_ADJUST_CFA_OFFSET 8	*/
++	zeroentry do_debug
++/*	jmp paranoid_exit */
++	CFI_ENDPROC
++
++#if 0
++	/* runs on exception stack */	
++ENTRY(nmi)
++	CFI_STARTPROC
++	pushq $-1
++	CFI_ADJUST_CFA_OFFSET 8		
++	paranoidentry do_nmi
++	/*
++ 	 * "Paranoid" exit path from exception stack.
++  	 * Paranoid because this is used by NMIs and cannot take
++	 * any kernel state for granted.
++	 * We don't do kernel preemption checks here, because only
++	 * NMI should be common and it does not enable IRQs and
++	 * cannot get reschedule ticks.
++	 */
++	/* ebx:	no swapgs flag */
++paranoid_exit:
++	testl %ebx,%ebx				/* swapgs needed? */
++	jnz paranoid_restore
++	testl $3,CS(%rsp)
++	jnz   paranoid_userspace
++paranoid_swapgs:	
++	swapgs
++paranoid_restore:	
++	RESTORE_ALL 8
++	iretq
++paranoid_userspace:	
++	GET_THREAD_INFO(%rcx)
++	movl threadinfo_flags(%rcx),%ebx
++	andl $_TIF_WORK_MASK,%ebx
++	jz paranoid_swapgs
++	movq %rsp,%rdi			/* &pt_regs */
++	call sync_regs
++	movq %rax,%rsp			/* switch stack for scheduling */
++	testl $_TIF_NEED_RESCHED,%ebx
++	jnz paranoid_schedule
++	movl %ebx,%edx			/* arg3: thread flags */
++	sti
++	xorl %esi,%esi 			/* arg2: oldset */
++	movq %rsp,%rdi 			/* arg1: &pt_regs */
++	call do_notify_resume
++	cli
++	jmp paranoid_userspace
++paranoid_schedule:
++	sti
++	call schedule
++	cli
++	jmp paranoid_userspace
++	CFI_ENDPROC
++#endif        
++
++ENTRY(int3)
++	zeroentry do_int3	
++
++ENTRY(overflow)
++	zeroentry do_overflow
++
++ENTRY(bounds)
++	zeroentry do_bounds
++
++ENTRY(invalid_op)
++	zeroentry do_invalid_op	
++
++ENTRY(coprocessor_segment_overrun)
++	zeroentry do_coprocessor_segment_overrun
++
++ENTRY(reserved)
++	zeroentry do_reserved
++
++#if 0
++	/* runs on exception stack */
++ENTRY(double_fault)
++	CFI_STARTPROC
++	paranoidentry do_double_fault
++	jmp paranoid_exit
++	CFI_ENDPROC
++#endif
++
++ENTRY(invalid_TSS)
++	errorentry do_invalid_TSS
++
++ENTRY(segment_not_present)
++	errorentry do_segment_not_present
++
++	/* runs on exception stack */
++ENTRY(stack_segment)
++	CFI_STARTPROC
++	errorentry do_stack_segment
++	CFI_ENDPROC
++
++ENTRY(general_protection)
++	errorentry do_general_protection
++
++ENTRY(alignment_check)
++	errorentry do_alignment_check
++
++ENTRY(divide_error)
++	zeroentry do_divide_error
++
++ENTRY(spurious_interrupt_bug)
++	zeroentry do_spurious_interrupt_bug
++
++#ifdef CONFIG_X86_MCE
++	/* runs on exception stack */
++ENTRY(machine_check)
++	CFI_STARTPROC
++	pushq $0
++	CFI_ADJUST_CFA_OFFSET 8	
++	paranoidentry do_machine_check
++	jmp paranoid_exit
++	CFI_ENDPROC
++#endif
++
++ENTRY(call_debug)
++       zeroentry do_call_debug
++
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/genapic.c linux-2.6.12-xen/arch/xen/x86_64/kernel/genapic.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/genapic.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/genapic.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,123 @@
++/*
++ * Copyright 2004 James Cleverdon, IBM.
++ * Subject to the GNU Public License, v.2
++ *
++ * Generic APIC sub-arch probe layer.
++ *
++ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
++ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
++ * James Cleverdon.
++ */
++#include <linux/config.h>
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/module.h>
++
++#include <asm/smp.h>
++#include <asm/ipi.h>
++
++#if defined(CONFIG_ACPI_BUS)
++#include <acpi/acpi_bus.h>
++#endif
++
++/* which logical CPU number maps to which CPU (physical APIC ID) */
++u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++extern struct genapic apic_cluster;
++extern struct genapic apic_flat;
++
++#ifndef CONFIG_XEN
++struct genapic *genapic = &apic_flat;
++#else
++extern struct genapic apic_xen;
++struct genapic *genapic = &apic_xen;
++#endif
++
++
++/*
++ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
++ */
++void __init clustered_apic_check(void)
++{
++#ifndef CONFIG_XEN
++	long i;
++	u8 clusters, max_cluster;
++	u8 id;
++	u8 cluster_cnt[NUM_APIC_CLUSTERS];
++
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
++		/* AMD always uses flat mode right now */
++		genapic = &apic_flat;
++		goto print;
++	}
++
++#if defined(CONFIG_ACPI_BUS)
++	/*
++	 * Some x86_64 machines use physical APIC mode regardless of how many
++	 * procs/clusters are present (x86_64 ES7000 is an example).
++	 */
++	if (acpi_fadt.revision > FADT2_REVISION_ID)
++		if (acpi_fadt.force_apic_physical_destination_mode) {
++			genapic = &apic_cluster;
++			goto print;
++		}
++#endif
++
++	memset(cluster_cnt, 0, sizeof(cluster_cnt));
++
++	for (i = 0; i < NR_CPUS; i++) {
++		id = bios_cpu_apicid[i];
++		if (id != BAD_APICID)
++			cluster_cnt[APIC_CLUSTERID(id)]++;
++	}
++
++	clusters = 0;
++	max_cluster = 0;
++	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
++		if (cluster_cnt[i] > 0) {
++			++clusters;
++			if (cluster_cnt[i] > max_cluster)
++				max_cluster = cluster_cnt[i];
++		}
++	}
++
++	/*
++	 * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
++	 * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
++	 * else physical mode.
++	 * (We don't use lowest priority delivery + HW APIC IRQ steering, so
++	 * can ignore the clustered logical case and go straight to physical.)
++	 */
++	if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster)
++		genapic = &apic_flat;
++	else
++		genapic = &apic_cluster;
++
++print:
++#else
++	/* hardcode to xen apic functions */
++	genapic = &apic_xen;
++#endif
++	printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
++}
++
++/* Same for both flat and clustered. */
++
++#ifdef CONFIG_XEN
++extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
++#endif
++
++void send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++	__send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++#else
++	xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++#endif
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/genapic_xen.c linux-2.6.12-xen/arch/xen/x86_64/kernel/genapic_xen.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/genapic_xen.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/genapic_xen.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,162 @@
++/*
++ * Copyright 2004 James Cleverdon, IBM.
++ * Subject to the GNU Public License, v.2
++ *
++ * Xen APIC subarch code.  Maximum 8 CPUs, logical delivery.
++ *
++ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
++ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
++ * James Cleverdon.
++ *
++ * Hacked to pieces for Xen by Chris Wright.
++ */
++#include <linux/config.h>
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#include <asm/smp.h>
++#include <asm/ipi.h>
++#else
++#include <asm/apic.h>
++#include <asm/apicdef.h>
++#include <asm/genapic.h>
++#endif
++#include <asm-xen/evtchn.h>
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++	int irq = per_cpu(ipi_to_irq, cpu)[vector];
++	BUG_ON(irq < 0);
++	notify_remote_via_irq(irq);
++}
++
++void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
++{
++	int cpu;
++
++	switch (shortcut) {
++	case APIC_DEST_SELF:
++		__send_IPI_one(smp_processor_id(), vector);
++		break;
++	case APIC_DEST_ALLBUT:
++		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++			if (cpu == smp_processor_id())
++				continue;
++			if (cpu_isset(cpu, cpu_online_map)) {
++				__send_IPI_one(cpu, vector);
++			}
++		}
++		break;
++	case APIC_DEST_ALLINC:
++		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++			if (cpu_isset(cpu, cpu_online_map)) {
++				__send_IPI_one(cpu, vector);
++			}
++		}
++		break;
++	default:
++		printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++		       vector);
++		break;
++	}
++}
++
++static cpumask_t xen_target_cpus(void)
++{
++	return cpu_online_map;
++}
++
++/*
++ * Set up the logical destination ID.
++ * Do nothing, not called now.
++ */
++static void xen_init_apic_ldr(void)
++{
++	Dprintk("%s\n", __FUNCTION__);
++	return;
++}
++
++static void xen_send_IPI_allbutself(int vector)
++{
++	/*
++	 * if there are no other CPUs in the system then
++	 * we get an APIC send error if we try to broadcast.
++	 * thus we have to avoid sending IPIs in this case.
++	 */
++	Dprintk("%s\n", __FUNCTION__);
++	if (num_online_cpus() > 1)
++		xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
++}
++
++static void xen_send_IPI_all(int vector)
++{
++	Dprintk("%s\n", __FUNCTION__);
++	xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
++}
++
++static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
++{
++	unsigned long mask = cpus_addr(cpumask)[0];
++	unsigned int cpu;
++	unsigned long flags;
++
++	Dprintk("%s\n", __FUNCTION__);
++	local_irq_save(flags);
++	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
++
++	for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++		if (cpu_isset(cpu, cpumask)) {
++			__send_IPI_one(cpu, vector);
++		}
++	}
++	local_irq_restore(flags);
++}
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++static int xen_apic_id_registered(void)
++{
++	/* better be set */
++	Dprintk("%s\n", __FUNCTION__);
++	return physid_isset(smp_processor_id(), phys_cpu_present_map);
++}
++#endif
++
++static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
++{
++	Dprintk("%s\n", __FUNCTION__);
++	return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
++}
++
++static unsigned int phys_pkg_id(int index_msb)
++{
++	u32 ebx;
++
++	Dprintk("%s\n", __FUNCTION__);
++	ebx = cpuid_ebx(1);
++	return ((ebx >> 24) & 0xFF) >> index_msb;
++}
++
++struct genapic apic_xen =  {
++	.name = "xen",
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++	.int_delivery_mode = dest_LowestPrio,
++#endif
++	.int_dest_mode = (APIC_DEST_LOGICAL != 0),
++	.int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
++	.target_cpus = xen_target_cpus,
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++	.apic_id_registered = xen_apic_id_registered,
++#endif
++	.init_apic_ldr = xen_init_apic_ldr,
++	.send_IPI_all = xen_send_IPI_all,
++	.send_IPI_allbutself = xen_send_IPI_allbutself,
++	.send_IPI_mask = xen_send_IPI_mask,
++	.cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
++	.phys_pkg_id = phys_pkg_id,
++};
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/head64.c linux-2.6.12-xen/arch/xen/x86_64/kernel/head64.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/head64.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/head64.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,127 @@
++/*
++ *  linux/arch/x86_64/kernel/head64.c -- prepare to run common code
++ *
++ *  Copyright (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
++ *
++ *  $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $
++ *
++ *  Jun Nakajima <jun.nakajima at intel.com>
++ *	Modified for Xen.
++ */
++
++#include <linux/init.h>
++#include <linux/linkage.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/percpu.h>
++
++#include <asm/processor.h>
++#include <asm/proto.h>
++#include <asm/smp.h>
++#include <asm/bootsetup.h>
++#include <asm/setup.h>
++#include <asm/desc.h>
++
++unsigned long start_pfn;
++
++/* Don't add a printk in there. printk relies on the PDA which is not initialized 
++   yet. */
++#if 0
++static void __init clear_bss(void)
++{
++	extern char __bss_start[], __bss_end[];
++	memset(__bss_start, 0,
++	       (unsigned long) __bss_end - (unsigned long) __bss_start);
++}
++#endif
++
++#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
++#define OLD_CL_MAGIC_ADDR	0x90020
++#define OLD_CL_MAGIC            0xA33F
++#define OLD_CL_BASE_ADDR        0x90000
++#define OLD_CL_OFFSET           0x90022
++
++extern char saved_command_line[];
++
++#if 0
++static void __init copy_bootdata(char *real_mode_data)
++{
++	int new_data;
++	char * command_line;
++
++	memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
++	new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
++	if (!new_data) {
++		if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
++			printk("so old bootloader that it does not support commandline?!\n");
++			return;
++		}
++		new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
++		printk("old bootloader convention, maybe loadlin?\n");
++	}
++	command_line = (char *) ((u64)(new_data));
++	memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
++	printk("Bootdata ok (command line is %s)\n", saved_command_line);	
++}
++#endif
++
++static void __init setup_boot_cpu_data(void)
++{
++	unsigned int dummy, eax;
++
++	/* get vendor info */
++	cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
++	      (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
++	      (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
++	      (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
++
++	/* get cpu type */
++	cpuid(1, &eax, &dummy, &dummy,
++		(unsigned int *) &boot_cpu_data.x86_capability);
++	boot_cpu_data.x86 = (eax >> 8) & 0xf;
++	boot_cpu_data.x86_model = (eax >> 4) & 0xf;
++	boot_cpu_data.x86_mask = eax & 0xf;
++}
++
++extern char _end[];
++
++void __init x86_64_start_kernel(char * real_mode_data)
++{
++	int i;
++
++        phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;
++        start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) + 
++		xen_start_info->nr_pt_frames;
++
++	for (i = 0; i < 256; i++)
++		set_intr_gate(i, early_idt_handler);
++#if 0
++	asm volatile("lidt %0" :: "m" (idt_descr));
++#endif
++	pda_init(0);
++        /*	copy_bootdata(real_mode_data); */
++#ifdef CONFIG_SMP
++	cpu_set(0, cpu_online_map);
++#endif
++#if 0
++	s = strstr(saved_command_line, "earlyprintk=");
++	if (s != NULL)
++		setup_early_printk(s);
++#endif
++#ifdef CONFIG_DISCONTIGMEM
++	s = strstr(saved_command_line, "numa=");
++	if (s != NULL)
++		numa_setup(s+5);
++#endif
++#ifdef CONFIG_X86_IO_APIC
++	if (strstr(saved_command_line, "disableapic"))
++		disable_apic = 1;
++#endif
++	/* You need early console to see that */
++	if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
++		panic("Kernel too big for kernel mapping\n");
++
++	setup_boot_cpu_data();
++	start_kernel();
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/head.S linux-2.6.12-xen/arch/xen/x86_64/kernel/head.S
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/head.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/head.S	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,275 @@
++/*
++ *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
++ *
++ *  Copyright (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
++ *  Copyright (C) 2000 Pavel Machek <pavel at suse.cz>
++ *  Copyright (C) 2000 Karsten Keil <kkeil at suse.de>
++ *  Copyright (C) 2001,2002 Andi Kleen <ak at suse.de>
++ *
++ *  $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
++ *
++ *  Jun Nakajima <jun.nakajima at intel.com>
++ *    Modified for Xen                                
++ */
++
++
++#include <linux/linkage.h>
++
++.section __xen_guest
++	.ascii	"GUEST_OS=linux,GUEST_VER=2.6"
++	.ascii	",XEN_VER=xen-3.0"
++	.ascii	",VIRT_BASE=0xffffffff80000000"
++	.ascii	",HYPERCALL_PAGE=0x10d" /* __pa(hypercall_page) >> 12 */
++	.ascii	",LOADER=generic"
++	.byte	0
++                
++      
++#include <linux/threads.h>
++#include <asm/desc.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/msr.h>
++#include <asm/cache.h>
++	
++/* we are not able to switch in one step to the final KERNEL ADRESS SPACE
++ * because we need identity-mapped pages on setup so define __START_KERNEL to
++ * 0x100000 for this stage
++ * 
++ */
++
++	.text
++	.code64
++	.globl startup_64
++startup_64:
++ENTRY(_start)
++	movq %rsi,xen_start_info(%rip)
++
++#ifdef CONFIG_SMP
++ENTRY(startup_64_smp)
++#endif /* CONFIG_SMP */
++
++	cld
++
++	movq init_rsp(%rip),%rsp
++	/* zero EFLAGS after setting rsp */
++	pushq $0
++	popfq
++	movq	initial_code(%rip),%rax
++	jmp	*%rax
++
++	/* SMP bootup changes these two */	
++	.globl	initial_code
++initial_code:
++	.quad	x86_64_start_kernel
++	.globl init_rsp
++init_rsp:
++	.quad  init_thread_union+THREAD_SIZE-8
++
++ENTRY(early_idt_handler)
++	xorl %eax,%eax
++	movq 8(%rsp),%rsi	# get rip
++	movq (%rsp),%rdx
++	leaq early_idt_msg(%rip),%rdi
++1:	hlt                     # generate #GP
++	jmp 1b
++
++early_idt_msg:
++	.asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
++
++#if 0
++ENTRY(lgdt_finish)
++        movl $(__USER_DS),%eax          # DS/ES contains default USER segment
++        movw %ax,%ds
++        movw %ax,%es
++        movl $(__KERNEL_DS),%eax        
++        movw %ax,%ss                    # after changing gdt.
++        popq %rax                       # get the retrun address
++        pushq $(__KERNEL_CS)
++        pushq %rax
++        lretq
++#endif 
++
++ENTRY(stext)
++ENTRY(_stext)
++
++	/*
++	 * This default setting generates an ident mapping at address 0x100000
++	 * and a mapping for the kernel that precisely maps virtual address
++	 * 0xffffffff80000000 to physical address 0x000000. (always using
++	 * 2Mbyte large pages provided by PAE mode)
++	 */
++.org 0x1000
++ENTRY(init_level4_pgt)
++	.fill	512,8,0
++
++        /*
++         * We update two pgd entries to make kernel and user pgd consistent
++         * at pgd_populate(). It can be used for kernel modules. So we place 
++         * this page here for those cases to avoid memory corruption.
++         * We also use this page to establish the initiali mapping for
++         * vsyscall area.
++         */
++.org 0x2000
++ENTRY(init_level4_user_pgt)
++	.fill	512,8,0
++
++	/*
++	 * In Xen the following pre-initialized pgt entries are re-initialized.
++	 */
++.org 0x3000
++ENTRY(level3_kernel_pgt)
++	.fill	510,8,0
++	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
++	.quad	0x0000000000105007		/* -> level2_kernel_pgt */
++	.fill	1,8,0
++
++.org 0x4000
++ENTRY(level2_ident_pgt)
++	/* 40MB for bootup. 	*/
++	.quad	0x0000000000000283
++	.quad	0x0000000000200183
++	.quad	0x0000000000400183
++	.quad	0x0000000000600183
++	.quad	0x0000000000800183
++	.quad	0x0000000000A00183
++	.quad	0x0000000000C00183
++	.quad	0x0000000000E00183
++	.quad	0x0000000001000183
++	.quad	0x0000000001200183
++	.quad	0x0000000001400183
++	.quad	0x0000000001600183
++	.quad	0x0000000001800183
++	.quad	0x0000000001A00183
++	.quad	0x0000000001C00183
++	.quad	0x0000000001E00183
++	.quad	0x0000000002000183
++	.quad	0x0000000002200183
++	.quad	0x0000000002400183
++	.quad	0x0000000002600183
++	/* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
++	.globl temp_boot_pmds
++temp_boot_pmds:
++	.fill	492,8,0
++
++.org 0x5000
++ENTRY(level2_kernel_pgt)
++	/* 40MB kernel mapping. The kernel code cannot be bigger than that.
++	   When you change this change KERNEL_TEXT_SIZE in page.h too. */
++	/* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
++	.quad	0x0000000000000183
++	.quad	0x0000000000200183
++	.quad	0x0000000000400183
++	.quad	0x0000000000600183
++	.quad	0x0000000000800183
++	.quad	0x0000000000A00183
++	.quad	0x0000000000C00183
++	.quad	0x0000000000E00183
++	.quad	0x0000000001000183
++	.quad	0x0000000001200183
++	.quad	0x0000000001400183
++	.quad	0x0000000001600183
++	.quad	0x0000000001800183
++	.quad	0x0000000001A00183
++	.quad	0x0000000001C00183
++	.quad	0x0000000001E00183
++	.quad	0x0000000002000183
++	.quad	0x0000000002200183
++	.quad	0x0000000002400183
++	.quad	0x0000000002600183
++	/* Module mapping starts here */
++	.fill	492,8,0
++	
++        /*
++         * This is used for vsyscall area mapping as we have a different
++         * level4 page table for user.
++         */
++.org 0x6000
++ENTRY(level3_user_pgt)
++        .fill	512,8,0
++
++.org 0x7000
++ENTRY(cpu_gdt_table)
++/* The TLS descriptors are currently at a different place compared to i386.
++   Hopefully nobody expects them at a fixed place (Wine?) */
++	.quad	0x0000000000000000	/* NULL descriptor */
++	.quad	0x008ffa000000ffff	/* __KERNEL_COMPAT32_CS */	
++	.quad	0x00affa000000ffff	/* __KERNEL_CS */
++	.quad	0x00cff2000000ffff	/* __KERNEL_DS */
++	
++       	.quad	0x00cffa000000ffff	/* __USER32_CS */
++	.quad	0x00cff2000000ffff	/* __USER_DS, __USER32_DS  */		
++	.quad	0x00affa000000ffff	/* __USER_CS */
++	.quad	0x00cffa000000ffff	/* __KERNEL32_CS */        
++	.quad	0,0			/* TSS */
++	.quad	0,0			/* LDT */
++	.quad   0,0,0			/* three TLS descriptors */ 
++	.quad	0			/* unused now?   __KERNEL16_CS - 16bit PM for S3 wakeup. */
++
++gdt_end:
++#if 0
++	/* asm/segment.h:GDT_ENTRIES must match this */	
++	/* This should be a multiple of the cache line size */
++	/* GDTs of other CPUs: */	
++	.fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
++#endif
++
++.org 0x8000
++ENTRY(empty_zero_page)
++
++.org 0x9000
++ENTRY(empty_bad_page)
++
++.org 0xa000
++ENTRY(empty_bad_pte_table)
++
++.org 0xb000
++ENTRY(empty_bad_pmd_table)
++
++.org 0xc000
++ENTRY(level3_physmem_pgt)
++	.quad	0x0000000000105007		/* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
++
++.org 0xd000
++ENTRY(hypercall_page)
++
++.org 0xe000
++#ifdef CONFIG_ACPI_SLEEP
++ENTRY(wakeup_level4_pgt)
++	.quad	0x0000000000102007		/* -> level3_ident_pgt */
++	.fill	255,8,0
++	.quad	0x000000000010a007
++	.fill	254,8,0
++	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
++	.quad	0x0000000000103007		/* -> level3_kernel_pgt */
++#endif
++
++	.data
++
++	.align 16
++	.globl cpu_gdt_descr
++cpu_gdt_descr:
++	.word	gdt_end-cpu_gdt_table
++gdt:
++	.quad	cpu_gdt_table
++#ifdef CONFIG_SMP
++	.rept	NR_CPUS-1
++	.word	0
++	.quad	0
++	.endr
++#endif
++
++/* We need valid kernel segments for data and code in long mode too
++ * IRET will check the segment types  kkeil 2000/10/28
++ * Also sysret mandates a special GDT layout 
++ */
++		 		
++#if 0        		 		
++.align L1_CACHE_BYTES
++#endif
++	.align  L1_CACHE_BYTES
++ENTRY(idt_table)	
++	.rept   256
++	.quad   0
++	.quad 	0
++	.endr
++
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/io_apic.c linux-2.6.12-xen/arch/xen/x86_64/kernel/io_apic.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/io_apic.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/io_apic.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,1992 @@
++/*
++ *	Intel IO-APIC support for multi-Pentium hosts.
++ *
++ *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ *	Many thanks to Stig Venaas for trying out countless experimental
++ *	patches and reporting/debugging problems patiently!
++ *
++ *	(c) 1999, Multiple IO-APIC support, developed by
++ *	Ken-ichi Yaku <yaku at css1.kbnes.nec.co.jp> and
++ *      Hidemi Kishimoto <kisimoto at css1.kbnes.nec.co.jp>,
++ *	further tested and cleaned up by Zach Brown <zab at redhat.com>
++ *	and Ingo Molnar <mingo at redhat.com>
++ *
++ *	Fixes
++ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
++ *					thanks to Eric Gilmore
++ *					and Rolf G. Tews
++ *					for testing these extensively
++ *	Paul Diefenbaugh	:	Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/config.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
++#include <linux/sysdev.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/mach_apic.h>
++#include <asm/acpi.h>
++
++#define __apicdebuginit  __init
++
++int sis_apic_bug; /* not actually supported, dummy for compile */
++
++static int no_timer_check;
++
++static DEFINE_SPINLOCK(ioapic_lock);
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++	short apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
++#ifdef CONFIG_PCI_MSI
++#define vector_to_irq(vector) 	\
++	(platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#else
++#define vector_to_irq(vector)	(vector)
++#endif
++
++#ifdef CONFIG_XEN
++
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/xen-public/physdev.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq)  ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++	physdev_op_t op;
++	int ret;
++
++	op.cmd = PHYSDEVOP_APIC_READ;
++	op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
++	op.u.apic_op.offset = reg;
++	ret = HYPERVISOR_physdev_op(&op);
++	if (ret)
++		return ret;
++	return op.u.apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++	physdev_op_t op;
++
++	op.cmd = PHYSDEVOP_APIC_WRITE;
++	op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
++	op.u.apic_op.offset = reg;
++	op.u.apic_op.value = value;
++	HYPERVISOR_physdev_op(&op);
++}
++
++#define io_apic_read(a,r)    xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#define clear_IO_APIC() ((void)0)
++
++#endif /* !CONFIG_XEN */
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++	static int first_free_entry = NR_IRQS;
++	struct irq_pin_list *entry = irq_2_pin + irq;
++
++	while (entry->next)
++		entry = irq_2_pin + entry->next;
++
++	if (entry->pin != -1) {
++		entry->next = first_free_entry;
++		entry = irq_2_pin + entry->next;
++		if (++first_free_entry >= PIN_MAP_SIZE)
++			panic("io_apic.c: whoops");
++	}
++	entry->apic = apic;
++	entry->pin = pin;
++}
++
++#ifndef CONFIG_XEN
++#define __DO_ACTION(R, ACTION, FINAL)					\
++									\
++{									\
++	int pin;							\
++	struct irq_pin_list *entry = irq_2_pin + irq;			\
++									\
++	for (;;) {							\
++		unsigned int reg;					\
++		pin = entry->pin;					\
++		if (pin == -1)						\
++			break;						\
++		reg = io_apic_read(entry->apic, 0x10 + R + pin*2);	\
++		reg ACTION;						\
++		io_apic_modify(entry->apic, reg);			\
++		if (!entry->next)					\
++			break;						\
++		entry = irq_2_pin + entry->next;			\
++	}								\
++	FINAL;								\
++}
++
++#define DO_ACTION(name,R,ACTION, FINAL)					\
++									\
++	static void name##_IO_APIC_irq (unsigned int irq)		\
++	__DO_ACTION(R, ACTION, FINAL)
++
++DO_ACTION( __mask,             0, |= 0x00010000, io_apic_sync(entry->apic) )
++						/* mask = 1 */
++DO_ACTION( __unmask,           0, &= 0xfffeffff, )
++						/* mask = 0 */
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__mask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__unmask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++	struct IO_APIC_route_entry entry;
++	unsigned long flags;
++
++	/* Check delivery_mode to be sure we're not clearing an SMI pin */
++	spin_lock_irqsave(&ioapic_lock, flags);
++	*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++	*(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++	if (entry.delivery_mode == dest_SMI)
++		return;
++	/*
++	 * Disable it in the IO-APIC irq-routing table:
++	 */
++	memset(&entry, 0, sizeof(entry));
++	entry.mask = 1;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
++	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC (void)
++{
++	int apic, pin;
++
++	for (apic = 0; apic < nr_ioapics; apic++)
++		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++			clear_IO_APIC_pin(apic, pin);
++}
++
++#endif /* !CONFIG_XEN */
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++int ioapic_force;
++
++/* dummy parsing: see setup.c */
++
++static int __init disable_ioapic_setup(char *str)
++{
++	skip_ioapic_setup = 1;
++	return 1;
++}
++
++static int __init enable_ioapic_setup(char *str)
++{
++	ioapic_force = 1;
++	skip_ioapic_setup = 0;
++	return 1;
++}
++
++__setup("noapic", disable_ioapic_setup);
++__setup("apic", enable_ioapic_setup);
++
++#include <asm/pci-direct.h>
++#include <linux/pci_ids.h>
++#include <linux/pci.h>
++
++/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
++   off. Check for an Nvidia or VIA PCI bridge and turn it off.
++   Use pci direct infrastructure because this runs before the PCI subsystem. 
++
++   Can be overwritten with "apic"
++
++   And another hack to disable the IOMMU on VIA chipsets.
++
++   Kludge-O-Rama. */
++void __init check_ioapic(void) 
++{ 
++	int num,slot,func; 
++	if (ioapic_force) 
++		return; 
++
++	/* Poor man's PCI discovery */
++	for (num = 0; num < 32; num++) { 
++		for (slot = 0; slot < 32; slot++) { 
++			for (func = 0; func < 8; func++) { 
++				u32 class;
++				u32 vendor;
++				u8 type;
++				class = read_pci_config(num,slot,func,
++							PCI_CLASS_REVISION);
++				if (class == 0xffffffff)
++					break; 
++
++		       		if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
++					continue; 
++
++				vendor = read_pci_config(num, slot, func, 
++							 PCI_VENDOR_ID);
++				vendor &= 0xffff;
++				switch (vendor) { 
++				case PCI_VENDOR_ID_VIA:
++#ifdef CONFIG_GART_IOMMU
++					if ((end_pfn >= (0xffffffff>>PAGE_SHIFT) ||
++					     force_iommu) &&
++					    !iommu_aperture_allowed) {
++						printk(KERN_INFO
++    "Looks like a VIA chipset. Disabling IOMMU. Overwrite with \"iommu=allowed\"\n");
++						iommu_aperture_disabled = 1;
++					}
++#endif
++					return;
++				case PCI_VENDOR_ID_NVIDIA:
++#ifdef CONFIG_ACPI
++					/* All timer overrides on Nvidia
++				           seem to be wrong. Skip them. */
++					acpi_skip_timer_override = 1;
++					printk(KERN_INFO 
++	     "Nvidia board detected. Ignoring ACPI timer override.\n");
++#endif
++					/* RED-PEN skip them on mptables too? */
++					return;
++				} 
++
++				/* No multi-function device? */
++				type = read_pci_config_byte(num,slot,func,
++							    PCI_HEADER_TYPE);
++				if (!(type & 0x80))
++					break;
++			} 
++		}
++	}
++} 
++
++static int __init ioapic_pirq_setup(char *str)
++{
++	int i, max;
++	int ints[MAX_PIRQS+1];
++
++	get_options(str, ARRAY_SIZE(ints), ints);
++
++	for (i = 0; i < MAX_PIRQS; i++)
++		pirq_entries[i] = -1;
++
++	pirqs_enabled = 1;
++	apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
++	max = MAX_PIRQS;
++	if (ints[0] < MAX_PIRQS)
++		max = ints[0];
++
++	for (i = 0; i < max; i++) {
++		apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++		/*
++		 * PIRQs are mapped upside down, usually.
++		 */
++		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++	}
++	return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++	int i;
++
++	for (i = 0; i < mp_irq_entries; i++)
++		if (mp_irqs[i].mpc_irqtype == type &&
++		    (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++		     mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++		    mp_irqs[i].mpc_dstirq == pin)
++			return i;
++
++	return -1;
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++	int i;
++
++	for (i = 0; i < mp_irq_entries; i++) {
++		int lbus = mp_irqs[i].mpc_srcbus;
++
++		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++		     mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++		    (mp_irqs[i].mpc_irqtype == type) &&
++		    (mp_irqs[i].mpc_srcbusirq == irq))
++
++			return mp_irqs[i].mpc_dstirq;
++	}
++	return -1;
++}
++#endif
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++	int apic, i, best_guess = -1;
++
++	apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
++		bus, slot, pin);
++	if (mp_bus_id_to_pci_bus[bus] == -1) {
++		apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++		return -1;
++	}
++	for (i = 0; i < mp_irq_entries; i++) {
++		int lbus = mp_irqs[i].mpc_srcbus;
++
++		for (apic = 0; apic < nr_ioapics; apic++)
++			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++				break;
++
++		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++		    !mp_irqs[i].mpc_irqtype &&
++		    (bus == lbus) &&
++		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++			int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++			if (!(apic || IO_APIC_IRQ(irq)))
++				continue;
++
++			if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++				return irq;
++			/*
++			 * Use the first all-but-pin matching entry as a
++			 * best-guess fuzzy result for broken mptables.
++			 */
++			if (best_guess < 0)
++				best_guess = irq;
++		}
++	}
++	return best_guess;
++}
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++	if (irq < 16) {
++		unsigned int port = 0x4d0 + (irq >> 3);
++		return (inb(port) >> (irq & 7)) & 1;
++	}
++	apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
++	return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value.  If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx)	(0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx)	(0)
++#define default_ISA_polarity(idx)	(0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx)	(1)
++#define default_PCI_polarity(idx)	(1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx)	(1)
++#define default_MCA_polarity(idx)	(0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++	int bus = mp_irqs[idx].mpc_srcbus;
++	int polarity;
++
++	/*
++	 * Determine IRQ line polarity (high active or low active):
++	 */
++	switch (mp_irqs[idx].mpc_irqflag & 3)
++	{
++		case 0: /* conforms, ie. bus-type dependent polarity */
++		{
++			switch (mp_bus_id_to_type[bus])
++			{
++				case MP_BUS_ISA: /* ISA pin */
++				{
++					polarity = default_ISA_polarity(idx);
++					break;
++				}
++				case MP_BUS_EISA: /* EISA pin */
++				{
++					polarity = default_EISA_polarity(idx);
++					break;
++				}
++				case MP_BUS_PCI: /* PCI pin */
++				{
++					polarity = default_PCI_polarity(idx);
++					break;
++				}
++				case MP_BUS_MCA: /* MCA pin */
++				{
++					polarity = default_MCA_polarity(idx);
++					break;
++				}
++				default:
++				{
++					printk(KERN_WARNING "broken BIOS!!\n");
++					polarity = 1;
++					break;
++				}
++			}
++			break;
++		}
++		case 1: /* high active */
++		{
++			polarity = 0;
++			break;
++		}
++		case 2: /* reserved */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			polarity = 1;
++			break;
++		}
++		case 3: /* low active */
++		{
++			polarity = 1;
++			break;
++		}
++		default: /* invalid */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			polarity = 1;
++			break;
++		}
++	}
++	return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++	int bus = mp_irqs[idx].mpc_srcbus;
++	int trigger;
++
++	/*
++	 * Determine IRQ trigger mode (edge or level sensitive):
++	 */
++	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++	{
++		case 0: /* conforms, ie. bus-type dependent */
++		{
++			switch (mp_bus_id_to_type[bus])
++			{
++				case MP_BUS_ISA: /* ISA pin */
++				{
++					trigger = default_ISA_trigger(idx);
++					break;
++				}
++				case MP_BUS_EISA: /* EISA pin */
++				{
++					trigger = default_EISA_trigger(idx);
++					break;
++				}
++				case MP_BUS_PCI: /* PCI pin */
++				{
++					trigger = default_PCI_trigger(idx);
++					break;
++				}
++				case MP_BUS_MCA: /* MCA pin */
++				{
++					trigger = default_MCA_trigger(idx);
++					break;
++				}
++				default:
++				{
++					printk(KERN_WARNING "broken BIOS!!\n");
++					trigger = 1;
++					break;
++				}
++			}
++			break;
++		}
++		case 1: /* edge */
++		{
++			trigger = 0;
++			break;
++		}
++		case 2: /* reserved */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			trigger = 1;
++			break;
++		}
++		case 3: /* level */
++		{
++			trigger = 1;
++			break;
++		}
++		default: /* invalid */
++		{
++			printk(KERN_WARNING "broken BIOS!!\n");
++			trigger = 0;
++			break;
++		}
++	}
++	return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++	return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++	return MPBIOS_trigger(idx);
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++	int irq, i;
++	int bus = mp_irqs[idx].mpc_srcbus;
++
++	/*
++	 * Debugging check, we are in big trouble if this message pops up!
++	 */
++	if (mp_irqs[idx].mpc_dstirq != pin)
++		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++	switch (mp_bus_id_to_type[bus])
++	{
++		case MP_BUS_ISA: /* ISA pin */
++		case MP_BUS_EISA:
++		case MP_BUS_MCA:
++		{
++			irq = mp_irqs[idx].mpc_srcbusirq;
++			break;
++		}
++		case MP_BUS_PCI: /* PCI pin */
++		{
++			/*
++			 * PCI IRQs are mapped in order
++			 */
++			i = irq = 0;
++			while (i < apic)
++				irq += nr_ioapic_registers[i++];
++			irq += pin;
++			break;
++		}
++		default:
++		{
++			printk(KERN_ERR "unknown bus type %d.\n",bus); 
++			irq = 0;
++			break;
++		}
++	}
++
++	/*
++	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
++	 */
++	if ((pin >= 16) && (pin <= 23)) {
++		if (pirq_entries[pin-16] != -1) {
++			if (!pirq_entries[pin-16]) {
++				apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
++			} else {
++				irq = pirq_entries[pin-16];
++				apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
++						pin-16, irq);
++			}
++		}
++	}
++	return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++	int apic, idx, pin;
++
++	for (apic = 0; apic < nr_ioapics; apic++) {
++		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++			idx = find_irq_entry(apic,pin,mp_INT);
++			if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++				return irq_trigger(idx);
++		}
++	}
++	/*
++	 * nonexistent IRQs are edge default
++	 */
++	return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++u8 irq_vector[NR_IRQ_VECTORS];
++
++int assign_irq_vector(int irq)
++{
++	static int current_vector = FIRST_DEVICE_VECTOR;
++	physdev_op_t op;
++  
++  	BUG_ON(irq >= NR_IRQ_VECTORS);
++  	if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
++  		return IO_APIC_VECTOR(irq);
++
++	op.cmd = PHYSDEVOP_ASSIGN_VECTOR;
++	op.u.irq_op.irq = irq;
++	if (HYPERVISOR_physdev_op(&op))
++		return -ENOSPC;
++	current_vector = op.u.irq_op.vector;
++
++	vector_irq[current_vector] = irq;
++	if (irq != AUTO_ASSIGN)
++		IO_APIC_VECTOR(irq) = current_vector;
++
++	return current_vector;
++}
++
++extern void (*interrupt[NR_IRQS])(void);
++#ifndef CONFIG_XEN
++static struct hw_interrupt_type ioapic_level_type;
++static struct hw_interrupt_type ioapic_edge_type;
++
++#define IOAPIC_AUTO	-1
++#define IOAPIC_EDGE	0
++#define IOAPIC_LEVEL	1
++
++static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++	if (use_pci_vector() && !platform_legacy_irq(irq)) {
++		if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++				trigger == IOAPIC_LEVEL)
++			irq_desc[vector].handler = &ioapic_level_type;
++		else
++			irq_desc[vector].handler = &ioapic_edge_type;
++		set_intr_gate(vector, interrupt[vector]);
++	} else	{
++		if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++				trigger == IOAPIC_LEVEL)
++			irq_desc[irq].handler = &ioapic_level_type;
++		else
++			irq_desc[irq].handler = &ioapic_edge_type;
++		set_intr_gate(vector, interrupt[irq]);
++	}
++}
++#else
++#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
++#endif /* !CONFIG_XEN */
++
++static void __init setup_IO_APIC_irqs(void)
++{
++	struct IO_APIC_route_entry entry;
++	int apic, pin, idx, irq, first_notcon = 1, vector;
++	unsigned long flags;
++
++	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++	for (apic = 0; apic < nr_ioapics; apic++) {
++	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++		/*
++		 * add it to the IO-APIC irq-routing table:
++		 */
++		memset(&entry,0,sizeof(entry));
++
++		entry.delivery_mode = INT_DELIVERY_MODE;
++		entry.dest_mode = INT_DEST_MODE;
++		entry.mask = 0;				/* enable IRQ */
++		entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++
++		idx = find_irq_entry(apic,pin,mp_INT);
++		if (idx == -1) {
++			if (first_notcon) {
++				apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
++				first_notcon = 0;
++			} else
++				apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
++			continue;
++		}
++
++		entry.trigger = irq_trigger(idx);
++		entry.polarity = irq_polarity(idx);
++
++		if (irq_trigger(idx)) {
++			entry.trigger = 1;
++			entry.mask = 1;
++			entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++		}
++
++		irq = pin_2_irq(idx, apic, pin);
++		add_pin_to_irq(irq, apic, pin);
++
++		if (/* !apic && */ !IO_APIC_IRQ(irq))
++			continue;
++
++		if (IO_APIC_IRQ(irq)) {
++			vector = assign_irq_vector(irq);
++			entry.vector = vector;
++
++			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++			if (!apic && (irq < 16))
++				disable_8259A_irq(irq);
++		}
++		spin_lock_irqsave(&ioapic_lock, flags);
++		io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++		io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++	}
++	}
++
++	if (!first_notcon)
++		apic_printk(APIC_VERBOSE," not connected.\n");
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Set up the 8259A-master output pin as broadcast to all
++ * CPUs.
++ */
++static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
++{
++	struct IO_APIC_route_entry entry;
++	unsigned long flags;
++
++	memset(&entry,0,sizeof(entry));
++
++	disable_8259A_irq(0);
++
++	/* mask LVT0 */
++	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++	/*
++	 * We use logical delivery to get the timer IRQ
++	 * to the first CPU.
++	 */
++	entry.dest_mode = INT_DEST_MODE;
++	entry.mask = 0;					/* unmask IRQ now */
++	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++	entry.delivery_mode = INT_DELIVERY_MODE;
++	entry.polarity = 0;
++	entry.trigger = 0;
++	entry.vector = vector;
++
++	/*
++	 * The timer IRQ doesn't have to know that behind the
++	 * scene we have a 8259A-master in AEOI mode ...
++	 */
++	irq_desc[0].handler = &ioapic_edge_type;
++
++	/*
++	 * Add it to the IO-APIC irq-routing table:
++	 */
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
++	io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	enable_8259A_irq(0);
++}
++
++void __init UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __apicdebuginit print_IO_APIC(void)
++{
++	int apic, i;
++	union IO_APIC_reg_00 reg_00;
++	union IO_APIC_reg_01 reg_01;
++	union IO_APIC_reg_02 reg_02;
++	unsigned long flags;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++	for (i = 0; i < nr_ioapics; i++)
++		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++		       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++	/*
++	 * We are a bit conservative about what we expect.  We have to
++	 * know about every hardware change ASAP.
++	 */
++	printk(KERN_INFO "testing the IO APIC.......................\n");
++
++	for (apic = 0; apic < nr_ioapics; apic++) {
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_00.raw = io_apic_read(apic, 0);
++	reg_01.raw = io_apic_read(apic, 1);
++	if (reg_01.bits.version >= 0x10)
++		reg_02.raw = io_apic_read(apic, 2);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	printk("\n");
++	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
++	if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++		UNEXPECTED_IO_APIC();
++
++	printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
++	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
++	if (	(reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++		(reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++		(reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++		(reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++		(reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++		(reg_01.bits.entries != 0x2E) &&
++		(reg_01.bits.entries != 0x3F) &&
++		(reg_01.bits.entries != 0x03) 
++	)
++		UNEXPECTED_IO_APIC();
++
++	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
++	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
++	if (	(reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++		(reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
++		(reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++		(reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++		(reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++		(reg_01.bits.version != 0x20)    /* Intel P64H (82806 AA) */
++	)
++		UNEXPECTED_IO_APIC();
++	if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++		UNEXPECTED_IO_APIC();
++
++	if (reg_01.bits.version >= 0x10) {
++		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
++		if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++			UNEXPECTED_IO_APIC();
++	}
++
++	printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++			  " Stat Dest Deli Vect:   \n");
++
++	for (i = 0; i <= reg_01.bits.entries; i++) {
++		struct IO_APIC_route_entry entry;
++
++		spin_lock_irqsave(&ioapic_lock, flags);
++		*(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
++		*(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++
++		printk(KERN_DEBUG " %02x %03X %02X  ",
++			i,
++			entry.dest.logical.logical_dest,
++			entry.dest.physical.physical_dest
++		);
++
++		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
++			entry.mask,
++			entry.trigger,
++			entry.irr,
++			entry.polarity,
++			entry.delivery_status,
++			entry.dest_mode,
++			entry.delivery_mode,
++			entry.vector
++		);
++	}
++	}
++	if (use_pci_vector())
++		printk(KERN_INFO "Using vector-based indexing\n");
++	printk(KERN_DEBUG "IRQ to pin mappings:\n");
++	for (i = 0; i < NR_IRQS; i++) {
++		struct irq_pin_list *entry = irq_2_pin + i;
++		if (entry->pin < 0)
++			continue;
++ 		if (use_pci_vector() && !platform_legacy_irq(i))
++			printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
++		else
++			printk(KERN_DEBUG "IRQ%d ", i);
++		for (;;) {
++			printk("-> %d:%d", entry->apic, entry->pin);
++			if (!entry->next)
++				break;
++			entry = irq_2_pin + entry->next;
++		}
++		printk("\n");
++	}
++
++	printk(KERN_INFO ".................................... done.\n");
++
++	return;
++}
++
++#if 0
++
++static __apicdebuginit void print_APIC_bitfield (int base)
++{
++	unsigned int v;
++	int i, j;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++	for (i = 0; i < 8; i++) {
++		v = apic_read(base + i*0x10);
++		for (j = 0; j < 32; j++) {
++			if (v & (1<<j))
++				printk("1");
++			else
++				printk("0");
++		}
++		printk("\n");
++	}
++}
++
++void __apicdebuginit print_local_APIC(void * dummy)
++{
++	unsigned int v, ver, maxlvt;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++		smp_processor_id(), hard_smp_processor_id());
++	v = apic_read(APIC_ID);
++	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
++	v = apic_read(APIC_LVR);
++	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++	ver = GET_APIC_VERSION(v);
++	maxlvt = get_maxlvt();
++
++	v = apic_read(APIC_TASKPRI);
++	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++	if (APIC_INTEGRATED(ver)) {			/* !82489DX */
++		v = apic_read(APIC_ARBPRI);
++		printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++			v & APIC_ARBPRI_MASK);
++		v = apic_read(APIC_PROCPRI);
++		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++	}
++
++	v = apic_read(APIC_EOI);
++	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++	v = apic_read(APIC_RRR);
++	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++	v = apic_read(APIC_LDR);
++	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++	v = apic_read(APIC_DFR);
++	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++	v = apic_read(APIC_SPIV);
++	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++	printk(KERN_DEBUG "... APIC ISR field:\n");
++	print_APIC_bitfield(APIC_ISR);
++	printk(KERN_DEBUG "... APIC TMR field:\n");
++	print_APIC_bitfield(APIC_TMR);
++	printk(KERN_DEBUG "... APIC IRR field:\n");
++	print_APIC_bitfield(APIC_IRR);
++
++	if (APIC_INTEGRATED(ver)) {		/* !82489DX */
++		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
++			apic_write(APIC_ESR, 0);
++		v = apic_read(APIC_ESR);
++		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++	}
++
++	v = apic_read(APIC_ICR);
++	printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++	v = apic_read(APIC_ICR2);
++	printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++	v = apic_read(APIC_LVTT);
++	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++	if (maxlvt > 3) {                       /* PC is LVT#4. */
++		v = apic_read(APIC_LVTPC);
++		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++	}
++	v = apic_read(APIC_LVT0);
++	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++	v = apic_read(APIC_LVT1);
++	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++	if (maxlvt > 2) {			/* ERR is LVT#3. */
++		v = apic_read(APIC_LVTERR);
++		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++	}
++
++	v = apic_read(APIC_TMICT);
++	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++	v = apic_read(APIC_TMCCT);
++	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++	v = apic_read(APIC_TDCR);
++	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++	printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++	on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void __apicdebuginit print_PIC(void)
++{
++	extern spinlock_t i8259A_lock;
++	unsigned int v;
++	unsigned long flags;
++
++	if (apic_verbosity == APIC_QUIET)
++		return;
++
++	printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++	spin_lock_irqsave(&i8259A_lock, flags);
++
++	v = inb(0xa1) << 8 | inb(0x21);
++	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
++
++	v = inb(0xa0) << 8 | inb(0x20);
++	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
++
++	outb(0x0b,0xa0);
++	outb(0x0b,0x20);
++	v = inb(0xa0) << 8 | inb(0x20);
++	outb(0x0a,0xa0);
++	outb(0x0a,0x20);
++
++	spin_unlock_irqrestore(&i8259A_lock, flags);
++
++	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
++
++	v = inb(0x4d1) << 8 | inb(0x4d0);
++	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++
++#endif  /*  0  */
++
++#else
++void __init print_IO_APIC(void) { }
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++	union IO_APIC_reg_01 reg_01;
++	int i;
++	unsigned long flags;
++
++	for (i = 0; i < PIN_MAP_SIZE; i++) {
++		irq_2_pin[i].pin = -1;
++		irq_2_pin[i].next = 0;
++	}
++	if (!pirqs_enabled)
++		for (i = 0; i < MAX_PIRQS; i++)
++			pirq_entries[i] = -1;
++
++	/*
++	 * The number of IO-APIC IRQ registers (== #pins):
++	 */
++	for (i = 0; i < nr_ioapics; i++) {
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_01.raw = io_apic_read(i, 1);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		nr_ioapic_registers[i] = reg_01.bits.entries+1;
++	}
++
++	/*
++	 * Do not trust the IO-APIC being empty at bootup
++	 */
++	clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++	/*
++	 * Clear the IO-APIC before rebooting:
++	 */
++	clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++	disconnect_bsp_APIC();
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch at dell.com>  Tue Dec 21 12:25:05 CST 1999
++ */
++
++#ifndef CONFIG_XEN
++static void __init setup_ioapic_ids_from_mpc (void)
++{
++	union IO_APIC_reg_00 reg_00;
++	int apic;
++	int i;
++	unsigned char old_id;
++	unsigned long flags;
++
++	/*
++	 * Set the IOAPIC ID to the value stored in the MPC table.
++	 */
++	for (apic = 0; apic < nr_ioapics; apic++) {
++
++		/* Read the register 0 value */
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_00.raw = io_apic_read(apic, 0);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		
++		old_id = mp_ioapics[apic].mpc_apicid;
++
++
++		printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
++
++
++		/*
++		 * We need to adjust the IRQ routing table
++		 * if the ID changed.
++		 */
++		if (old_id != mp_ioapics[apic].mpc_apicid)
++			for (i = 0; i < mp_irq_entries; i++)
++				if (mp_irqs[i].mpc_dstapic == old_id)
++					mp_irqs[i].mpc_dstapic
++						= mp_ioapics[apic].mpc_apicid;
++
++		/*
++		 * Read the right value from the MPC table and
++		 * write it into the ID register.
++	 	 */
++		apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
++				mp_ioapics[apic].mpc_apicid);
++
++		reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++		spin_lock_irqsave(&ioapic_lock, flags);
++		io_apic_write(apic, 0, reg_00.raw);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++
++		/*
++		 * Sanity check
++		 */
++		spin_lock_irqsave(&ioapic_lock, flags);
++		reg_00.raw = io_apic_read(apic, 0);
++		spin_unlock_irqrestore(&ioapic_lock, flags);
++		if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++			printk("could not set ID!\n");
++		else
++			apic_printk(APIC_VERBOSE," ok.\n");
++	}
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ *	- timer IRQ defaults to IO-APIC IRQ
++ *	- if this function detects that timer IRQs are defunct, then we fall
++ *	  back to ISA timer IRQs
++ */
++#ifndef CONFIG_XEN
++static int __init timer_irq_works(void)
++{
++	unsigned long t1 = jiffies;
++
++	local_irq_enable();
++	/* Let ten ticks pass... */
++	mdelay((10 * 1000) / HZ);
++
++	/*
++	 * Expect a few ticks at least, to be sure some possible
++	 * glue logic does not lock up after one or two first
++	 * ticks in a non-ExtINT mode.  Also the local APIC
++	 * might have cached one ExtINT interrupt.  Finally, at
++	 * least one tick may be lost due to delays.
++	 */
++
++	/* jiffies wrap? */
++	if (jiffies - t1 > 4)
++		return 1;
++	return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ */
++
++static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++{
++	int was_pending = 0;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	if (irq < 16) {
++		disable_8259A_irq(irq);
++		if (i8259A_irq_pending(irq))
++			was_pending = 1;
++	}
++	__unmask_IO_APIC_irq(irq);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return was_pending;
++}
++
++/*
++ * Once we have recorded IRQ_PENDING already, we can mask the
++ * interrupt for real. This prevents IRQ storms from unhandled
++ * devices.
++ */
++static void ack_edge_ioapic_irq(unsigned int irq)
++{
++	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
++					== (IRQ_PENDING | IRQ_DISABLED))
++		mask_IO_APIC_irq(irq);
++	ack_APIC_irq();
++}
++
++/*
++ * Level triggered interrupts can just be masked,
++ * and shutting down and starting up the interrupt
++ * is the same as enabling and disabling them -- except
++ * with a startup need to return a "was pending" value.
++ *
++ * Level triggered interrupts are special because we
++ * do not touch any IO-APIC register while handling
++ * them. We ack the APIC in the end-IRQ handler, not
++ * in the start-IRQ-handler. Protection against reentrance
++ * from the same interrupt is still provided, both by the
++ * generic IRQ layer and by the fact that an unacked local
++ * APIC does not accept IRQs.
++ */
++static unsigned int startup_level_ioapic_irq (unsigned int irq)
++{
++	unmask_IO_APIC_irq(irq);
++
++	return 0; /* don't check for pending */
++}
++
++static void end_level_ioapic_irq (unsigned int irq)
++{
++	ack_APIC_irq();
++}
++
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
++{
++	unsigned long flags;
++	unsigned int dest;
++
++	dest = cpu_mask_to_apicid(mask);
++
++	/*
++	 * Only the high 8 bits are valid.
++	 */
++	dest = SET_APIC_LOGICAL_ID(dest);
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	__DO_ACTION(1, = dest, )
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++#ifdef CONFIG_PCI_MSI
++static unsigned int startup_edge_ioapic_vector(unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	return startup_edge_ioapic_irq(irq);
++}
++
++static void ack_edge_ioapic_vector(unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	ack_edge_ioapic_irq(irq);
++}
++
++static unsigned int startup_level_ioapic_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	return startup_level_ioapic_irq (irq);
++}
++
++static void end_level_ioapic_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	end_level_ioapic_irq(irq);
++}
++
++static void mask_IO_APIC_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	mask_IO_APIC_irq(irq);
++}
++
++static void unmask_IO_APIC_vector (unsigned int vector)
++{
++	int irq = vector_to_irq(vector);
++
++	unmask_IO_APIC_irq(irq);
++}
++
++static void set_ioapic_affinity_vector (unsigned int vector,
++					cpumask_t cpu_mask)
++{
++	int irq = vector_to_irq(vector);
++
++	set_ioapic_affinity_irq(irq, cpu_mask);
++}
++#endif
++
++/*
++ * Level and edge triggered IO-APIC interrupts need different handling,
++ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
++ * handled with the level-triggered descriptor, but that one has slightly
++ * more overhead. Level-triggered interrupts cannot be handled with the
++ * edge-triggered handler, without risking IRQ storms and other ugly
++ * races.
++ */
++
++static struct hw_interrupt_type ioapic_edge_type = {
++	.typename = "IO-APIC-edge",
++	.startup 	= startup_edge_ioapic,
++	.shutdown 	= shutdown_edge_ioapic,
++	.enable 	= enable_edge_ioapic,
++	.disable 	= disable_edge_ioapic,
++	.ack 		= ack_edge_ioapic,
++	.end 		= end_edge_ioapic,
++	.set_affinity = set_ioapic_affinity,
++};
++
++static struct hw_interrupt_type ioapic_level_type = {
++	.typename = "IO-APIC-level",
++	.startup 	= startup_level_ioapic,
++	.shutdown 	= shutdown_level_ioapic,
++	.enable 	= enable_level_ioapic,
++	.disable 	= disable_level_ioapic,
++	.ack 		= mask_and_ack_level_ioapic,
++	.end 		= end_level_ioapic,
++	.set_affinity = set_ioapic_affinity,
++};
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++	int irq;
++
++	/*
++	 * NOTE! The local APIC isn't very good at handling
++	 * multiple interrupts at the same interrupt level.
++	 * As the interrupt level is determined by taking the
++	 * vector number and shifting that right by 4, we
++	 * want to spread these out a bit so that they don't
++	 * all fall in the same interrupt level.
++	 *
++	 * Also, we've got to be careful not to trash gate
++	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
++	 */
++	for (irq = 0; irq < NR_IRQS ; irq++) {
++		int tmp = irq;
++		if (use_pci_vector()) {
++			if (!platform_legacy_irq(tmp))
++				if ((tmp = vector_to_irq(tmp)) == -1)
++					continue;
++		}
++		if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++			/*
++			 * Hmm.. We don't have an entry for this,
++			 * so default to an old-fashioned 8259
++			 * interrupt if we can..
++			 */
++			if (irq < 16)
++				make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++			else
++				/* Strange. Oh, well.. */
++				irq_desc[irq].handler = &no_irq_type;
++#endif
++		}
++	}
++}
++
++#ifndef CONFIG_XEN
++static void enable_lapic_irq (unsigned int irq)
++{
++	unsigned long v;
++
++	v = apic_read(APIC_LVT0);
++	apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static void disable_lapic_irq (unsigned int irq)
++{
++	unsigned long v;
++
++	v = apic_read(APIC_LVT0);
++	apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void ack_lapic_irq (unsigned int irq)
++{
++	ack_APIC_irq();
++}
++
++static void end_lapic_irq (unsigned int i) { /* nothing */ }
++
++static struct hw_interrupt_type lapic_irq_type = {
++	.typename = "local-APIC-edge",
++	.startup = NULL, /* startup_irq() not used for IRQ0 */
++	.shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
++	.enable = enable_lapic_irq,
++	.disable = disable_lapic_irq,
++	.ack = ack_lapic_irq,
++	.end = end_lapic_irq,
++};
++
++static void setup_nmi (void)
++{
++	/*
++ 	 * Dirty trick to enable the NMI watchdog ...
++	 * We put the 8259A master into AEOI mode and
++	 * unmask on all local APICs LVT0 as NMI.
++	 *
++	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++	 * is from Maciej W. Rozycki - so we do not have to EOI from
++	 * the NMI handler or the timer interrupt.
++	 */ 
++	printk(KERN_INFO "activating NMI Watchdog ...");
++
++	enable_NMI_through_LVT0(NULL);
++
++	printk(" done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic.  ICR does
++ * not support the ExtINT mode, unfortunately.  We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA.  --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++	int pin, i;
++	struct IO_APIC_route_entry entry0, entry1;
++	unsigned char save_control, save_freq_select;
++	unsigned long flags;
++
++	pin = find_isa_irq_pin(8, mp_INT);
++	if (pin == -1)
++		return;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	*(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin);
++	*(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++	clear_IO_APIC_pin(0, pin);
++
++	memset(&entry1, 0, sizeof(entry1));
++
++	entry1.dest_mode = 0;			/* physical delivery */
++	entry1.mask = 0;			/* unmask IRQ now */
++	entry1.dest.physical.physical_dest = hard_smp_processor_id();
++	entry1.delivery_mode = dest_ExtINT;
++	entry1.polarity = entry0.polarity;
++	entry1.trigger = 0;
++	entry1.vector = 0;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
++	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	save_control = CMOS_READ(RTC_CONTROL);
++	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++		   RTC_FREQ_SELECT);
++	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++	i = 100;
++	while (i-- > 0) {
++		mdelay(10);
++		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++			i -= 10;
++	}
++
++	CMOS_WRITE(save_control, RTC_CONTROL);
++	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++	clear_IO_APIC_pin(0, pin);
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
++	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
++ * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ */
++static inline void check_timer(void)
++{
++	int pin1, pin2;
++	int vector;
++
++	/*
++	 * get/set the timer IRQ vector:
++	 */
++	disable_8259A_irq(0);
++	vector = assign_irq_vector(0);
++	set_intr_gate(vector, interrupt[0]);
++
++	/*
++	 * Subtle, code in do_timer_interrupt() expects an AEOI
++	 * mode for the 8259A whenever interrupts are routed
++	 * through I/O APICs.  Also IRQ0 has to be enabled in
++	 * the 8259A which implies the virtual wire has to be
++	 * disabled in the local APIC.
++	 */
++	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++	init_8259A(1);
++	enable_8259A_irq(0);
++
++	pin1 = find_isa_irq_pin(0, mp_INT);
++	pin2 = find_isa_irq_pin(0, mp_ExtINT);
++
++	apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2);
++
++	if (pin1 != -1) {
++		/*
++		 * Ok, does IRQ0 through the IOAPIC work?
++		 */
++		unmask_IO_APIC_irq(0);
++		if (!no_timer_check && timer_irq_works()) {
++			nmi_watchdog_default();
++			if (nmi_watchdog == NMI_IO_APIC) {
++				disable_8259A_irq(0);
++				setup_nmi();
++				enable_8259A_irq(0);
++			}
++			return;
++		}
++		clear_IO_APIC_pin(0, pin1);
++		apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
++	}
++
++	apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
++	if (pin2 != -1) {
++		apic_printk(APIC_VERBOSE,"\n..... (found pin %d) ...", pin2);
++		/*
++		 * legacy devices should be connected to IO APIC #0
++		 */
++		setup_ExtINT_IRQ0_pin(pin2, vector);
++		if (timer_irq_works()) {
++			printk("works.\n");
++			nmi_watchdog_default();
++			if (nmi_watchdog == NMI_IO_APIC) {
++				setup_nmi();
++			}
++			return;
++		}
++		/*
++		 * Cleanup, just in case ...
++		 */
++		clear_IO_APIC_pin(0, pin2);
++	}
++	printk(" failed.\n");
++
++	if (nmi_watchdog) {
++		printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++		nmi_watchdog = 0;
++	}
++
++	apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++	disable_8259A_irq(0);
++	irq_desc[0].handler = &lapic_irq_type;
++	apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
++	enable_8259A_irq(0);
++
++	if (timer_irq_works()) {
++		apic_printk(APIC_QUIET, " works.\n");
++		return;
++	}
++	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++	apic_printk(APIC_VERBOSE," failed.\n");
++
++	apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++	init_8259A(0);
++	make_8259A_irq(0);
++	apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
++
++	unlock_ExtINT_logic();
++
++	if (timer_irq_works()) {
++		apic_printk(APIC_VERBOSE," works.\n");
++		return;
++	}
++	apic_printk(APIC_VERBOSE," failed :(.\n");
++	panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
++}
++#else
++#define check_timer() ((void)0)
++#endif /* !CONFIG_XEN */
++
++static int __init notimercheck(char *s)
++{
++	no_timer_check = 1;
++	return 1;
++}
++__setup("no_timer_check", notimercheck);
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ *   Linux doesn't really care, as it's not actually used
++ *   for any interrupt handling anyway.
++ */
++#define PIC_IRQS	(1<<2)
++
++void __init setup_IO_APIC(void)
++{
++	enable_IO_APIC();
++
++	if (acpi_ioapic)
++		io_apic_irqs = ~0;	/* all IRQs go through IOAPIC */
++	else
++		io_apic_irqs = ~PIC_IRQS;
++
++	apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
++
++	/*
++	 * Set up the IO-APIC IRQ routing table.
++	 */
++	if (!acpi_ioapic)
++		setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++	sync_Arb_IDs();
++#endif /* !CONFIG_XEN */
++	setup_IO_APIC_irqs();
++	init_IO_APIC_traps();
++	check_timer();
++	if (!acpi_ioapic)
++		print_IO_APIC();
++}
++
++struct sysfs_ioapic_data {
++	struct sys_device dev;
++	struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++	struct IO_APIC_route_entry *entry;
++	struct sysfs_ioapic_data *data;
++	unsigned long flags;
++	int i;
++
++	data = container_of(dev, struct sysfs_ioapic_data, dev);
++	entry = data->entry;
++	spin_lock_irqsave(&ioapic_lock, flags);
++	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++		*(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
++		*(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++	}
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++	struct IO_APIC_route_entry *entry;
++	struct sysfs_ioapic_data *data;
++	unsigned long flags;
++	union IO_APIC_reg_00 reg_00;
++	int i;
++
++	data = container_of(dev, struct sysfs_ioapic_data, dev);
++	entry = data->entry;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_00.raw = io_apic_read(dev->id, 0);
++	if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++		reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++		io_apic_write(dev->id, 0, reg_00.raw);
++	}
++	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++		io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
++		io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++	}
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++	set_kset_name("ioapic"),
++	.suspend = ioapic_suspend,
++	.resume = ioapic_resume,
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++	struct sys_device * dev;
++	int i, size, error = 0;
++
++	error = sysdev_class_register(&ioapic_sysdev_class);
++	if (error)
++		return error;
++
++	for (i = 0; i < nr_ioapics; i++ ) {
++		size = sizeof(struct sys_device) + nr_ioapic_registers[i]
++			* sizeof(struct IO_APIC_route_entry);
++		mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++		if (!mp_ioapic_data[i]) {
++			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++			continue;
++		}
++		memset(mp_ioapic_data[i], 0, size);
++		dev = &mp_ioapic_data[i]->dev;
++		dev->id = i;
++		dev->cls = &ioapic_sysdev_class;
++		error = sysdev_register(dev);
++		if (error) {
++			kfree(mp_ioapic_data[i]);
++			mp_ioapic_data[i] = NULL;
++			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++			continue;
++		}
++	}
++
++	return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/* --------------------------------------------------------------------------
++                          ACPI-based IOAPIC Configuration
++   -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI_BOOT
++
++#define IO_APIC_MAX_ID		0xFE
++
++int __init io_apic_get_version (int ioapic)
++{
++	union IO_APIC_reg_01	reg_01;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_01.raw = io_apic_read(ioapic, 1);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++	union IO_APIC_reg_01	reg_01;
++	unsigned long flags;
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	reg_01.raw = io_apic_read(ioapic, 1);
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++	struct IO_APIC_route_entry entry;
++	unsigned long flags;
++
++	if (!IO_APIC_IRQ(irq)) {
++		apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++			ioapic);
++		return -EINVAL;
++	}
++
++	/*
++	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++	 * Note that we mask (disable) IRQs now -- these get enabled when the
++	 * corresponding device driver registers for this IRQ.
++	 */
++
++	memset(&entry,0,sizeof(entry));
++
++	entry.delivery_mode = INT_DELIVERY_MODE;
++	entry.dest_mode = INT_DEST_MODE;
++	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++	entry.trigger = edge_level;
++	entry.polarity = active_high_low;
++	entry.mask = 1;					 /* Disabled (masked) */
++
++	/*
++	 * IRQs < 16 are already in the irq_2_pin[] map
++	 */
++	if (irq >= 16)
++		add_pin_to_irq(irq, ioapic, pin);
++
++	entry.vector = assign_irq_vector(irq);
++
++	apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
++		"IRQ %d Mode:%i Active:%i)\n", ioapic, 
++	       mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++	       edge_level, active_high_low);
++
++	ioapic_register_intr(irq, entry.vector, edge_level);
++
++	if (!ioapic && (irq < 16))
++		disable_8259A_irq(irq);
++
++	spin_lock_irqsave(&ioapic_lock, flags);
++	io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
++	io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
++	spin_unlock_irqrestore(&ioapic_lock, flags);
++
++	return 0;
++}
++
++#endif /*CONFIG_ACPI_BOOT*/
++
++
++#ifndef CONFIG_XEN
++/*
++ * This function currently is only a helper for the i386 smp boot process where
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++void __init setup_ioapic_dest(void)
++{
++	int pin, ioapic, irq, irq_entry;
++
++	if (skip_ioapic_setup == 1)
++		return;
++
++	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++			if (irq_entry == -1)
++				continue;
++			irq = pin_2_irq(irq_entry, ioapic, pin);
++			set_ioapic_affinity_irq(irq, TARGET_CPUS);
++		}
++
++	}
++}
++#endif /* !CONFIG_XEN */
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/ioport.c linux-2.6.12-xen/arch/xen/x86_64/kernel/ioport.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/ioport.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/ioport.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,57 @@
++/*
++ *	linux/arch/x86_64/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <asm-xen/xen-public/physdev.h>
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ */
++
++asmlinkage long sys_iopl(unsigned int new_io_pl, struct pt_regs *regs)
++{
++        unsigned int old_io_pl = current->thread.io_pl;
++        physdev_op_t op;
++
++	if (new_io_pl > 3)
++		return -EINVAL;
++
++	/* Need "raw I/O" privileges for direct port access. */
++	if ((new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO))
++		return -EPERM;
++
++	/* Change our version of the privilege levels. */
++	current->thread.io_pl = new_io_pl;
++
++	/* Force the change at ring 0. */
++	op.cmd             = PHYSDEVOP_SET_IOPL;
++	op.u.set_iopl.iopl = (new_io_pl == 0) ? 1 : new_io_pl;
++	HYPERVISOR_physdev_op(&op);
++
++	return 0;
++}
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++  return turn_on ? sys_iopl(3, NULL) : 0;
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/irq.c linux-2.6.12-xen/arch/xen/x86_64/kernel/irq.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/irq.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/irq.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,107 @@
++/*
++ *	linux/arch/x86_64/kernel/irq.c
++ *
++ *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
++ *
++ * This file contains the lowest level x86_64-specific interrupt
++ * entry and irq statistics code. All the remaining irq logic is
++ * done by the generic kernel/irq/ code and in the
++ * x86_64-specific irq controller code. (e.g. i8259.c and
++ * io_apic.c.)
++ */
++
++#include <linux/kernel_stat.h>
++#include <linux/interrupt.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++#include <asm/uaccess.h>
++#include <asm/io_apic.h>
++
++atomic_t irq_err_count;
++#ifdef CONFIG_X86_IO_APIC
++#ifdef APIC_MISMATCH_DEBUG
++atomic_t irq_mis_count;
++#endif
++#endif
++
++/*
++ * Generic, controller-independent functions:
++ */
++
++int show_interrupts(struct seq_file *p, void *v)
++{
++	int i = *(loff_t *) v, j;
++	struct irqaction * action;
++	unsigned long flags;
++
++	if (i == 0) {
++		seq_printf(p, "           ");
++		for (j=0; j<NR_CPUS; j++)
++			if (cpu_online(j))
++				seq_printf(p, "CPU%d       ",j);
++		seq_putc(p, '\n');
++	}
++
++	if (i < NR_IRQS) {
++		spin_lock_irqsave(&irq_desc[i].lock, flags);
++		action = irq_desc[i].action;
++		if (!action) 
++			goto skip;
++		seq_printf(p, "%3d: ",i);
++#ifndef CONFIG_SMP
++		seq_printf(p, "%10u ", kstat_irqs(i));
++#else
++		for (j=0; j<NR_CPUS; j++)
++			if (cpu_online(j))
++			seq_printf(p, "%10u ",
++				kstat_cpu(j).irqs[i]);
++#endif
++		seq_printf(p, " %14s", irq_desc[i].handler->typename);
++
++		seq_printf(p, "  %s", action->name);
++		for (action=action->next; action; action = action->next)
++			seq_printf(p, ", %s", action->name);
++		seq_putc(p, '\n');
++skip:
++		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++	} else if (i == NR_IRQS) {
++		seq_printf(p, "NMI: ");
++		for (j = 0; j < NR_CPUS; j++)
++			if (cpu_online(j))
++				seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
++		seq_putc(p, '\n');
++#ifdef CONFIG_X86_LOCAL_APIC
++		seq_printf(p, "LOC: ");
++		for (j = 0; j < NR_CPUS; j++)
++			if (cpu_online(j))
++				seq_printf(p, "%10u ", cpu_pda[j].apic_timer_irqs);
++		seq_putc(p, '\n');
++#endif
++		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#ifdef CONFIG_X86_IO_APIC
++#ifdef APIC_MISMATCH_DEBUG
++		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
++#endif
++#endif
++	}
++	return 0;
++}
++
++/*
++ * do_IRQ handles all normal device IRQ's (the special
++ * SMP cross-CPU interrupts have their own specific
++ * handlers).
++ */
++asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
++{	
++	/* high bits used in ret_from_ code  */
++        int irq = regs->orig_rax & __IRQ_MASK(HARDIRQ_BITS);
++
++	irq_enter();
++
++	__do_IRQ(irq, regs);
++	irq_exit();
++
++	return 1;
++}
++
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/ldt.c linux-2.6.12-xen/arch/xen/x86_64/kernel/ldt.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/ldt.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/ldt.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,282 @@
++/*
++ * linux/arch/x86_64/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
++ * Copyright (C) 2002 Andi Kleen
++ * 
++ * This handles calls from both 32bit and 64bit mode.
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/pgalloc.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++	if (current->active_mm)
++               load_LDT(&current->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
++{
++	void *oldldt;
++	void *newldt;
++	unsigned oldsize;
++
++	if (mincount <= (unsigned)pc->size)
++		return 0;
++	oldsize = pc->size;
++	mincount = (mincount+511)&(~511);
++	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++	else
++		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++	if (!newldt)
++		return -ENOMEM;
++
++	if (oldsize)
++		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++	oldldt = pc->ldt;
++	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++	wmb();
++	pc->ldt = newldt;
++	wmb();
++	pc->size = mincount;
++	wmb();
++	if (reload) {
++#ifdef CONFIG_SMP
++		cpumask_t mask;
++
++		preempt_disable();
++#endif
++		make_pages_readonly(
++			pc->ldt,
++			(pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		load_LDT(pc);
++#ifdef CONFIG_SMP
++		mask = cpumask_of_cpu(smp_processor_id());
++		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++			smp_call_function(flush_ldt, NULL, 1, 1);
++		preempt_enable();
++#endif
++	}
++	if (oldsize) {
++		make_pages_writable(
++			oldldt,
++			(oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++			vfree(oldldt);
++		else
++			kfree(oldldt);
++	}
++	return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++	int err = alloc_ldt(new, old->size, 0);
++	if (err < 0)
++		return err;
++	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++	make_pages_readonly(
++		new->ldt,
++		(new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++		XENFEAT_writable_descriptor_tables);
++	return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++	struct mm_struct * old_mm;
++	int retval = 0;
++
++	memset(&mm->context, 0, sizeof(mm->context));
++	init_MUTEX(&mm->context.sem);
++	old_mm = current->mm;
++	if (old_mm && old_mm->context.size > 0) {
++		down(&old_mm->context.sem);
++		retval = copy_ldt(&mm->context, &old_mm->context);
++		up(&old_mm->context.sem);
++	}
++	if (retval == 0) {
++		spin_lock(&mm_unpinned_lock);
++		list_add(&mm->context.unpinned, &mm_unpinned);
++		spin_unlock(&mm_unpinned_lock);
++	}
++	return retval;
++}
++
++/*
++ * 
++ * Don't touch the LDT register - we're already in the next thread.
++ */
++void destroy_context(struct mm_struct *mm)
++{
++	if (mm->context.size) {
++		if (mm == current->active_mm)
++			clear_LDT();
++		make_pages_writable(
++			mm->context.ldt,
++			(mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++			XENFEAT_writable_descriptor_tables);
++		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++			vfree(mm->context.ldt);
++		else
++			kfree(mm->context.ldt);
++		mm->context.size = 0;
++	}
++	if (!mm->context.pinned) {
++		spin_lock(&mm_unpinned_lock);
++		list_del(&mm->context.unpinned);
++		spin_unlock(&mm_unpinned_lock);
++	}
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++	int err;
++	unsigned long size;
++	struct mm_struct * mm = current->mm;
++
++	if (!mm->context.size)
++		return 0;
++	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++	down(&mm->context.sem);
++	size = mm->context.size*LDT_ENTRY_SIZE;
++	if (size > bytecount)
++		size = bytecount;
++
++	err = 0;
++	if (copy_to_user(ptr, mm->context.ldt, size))
++		err = -EFAULT;
++	up(&mm->context.sem);
++	if (err < 0)
++		goto error_return;
++	if (size != bytecount) {
++		/* zero-fill the rest */
++		if (clear_user(ptr+size, bytecount-size) != 0) {
++			err = -EFAULT;
++			goto error_return;
++		}
++	}
++	return bytecount;
++error_return:
++	return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++	/* Arbitrary number */ 
++	/* x86-64 default LDT is all zeros */
++	if (bytecount > 128) 
++		bytecount = 128; 	
++	if (clear_user(ptr, bytecount))
++		return -EFAULT;
++	return bytecount; 
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++	struct task_struct *me = current;
++	struct mm_struct * mm = me->mm;
++	__u32 entry_1, entry_2, *lp;
++	unsigned long mach_lp;
++	int error;
++	struct user_desc ldt_info;
++
++	error = -EINVAL;
++
++	if (bytecount != sizeof(ldt_info))
++		goto out;
++	error = -EFAULT; 	
++	if (copy_from_user(&ldt_info, ptr, bytecount))
++		goto out;
++
++	error = -EINVAL;
++	if (ldt_info.entry_number >= LDT_ENTRIES)
++		goto out;
++	if (ldt_info.contents == 3) {
++		if (oldmode)
++			goto out;
++		if (ldt_info.seg_not_present == 0)
++			goto out;
++	}
++
++	down(&mm->context.sem);
++	if (ldt_info.entry_number >= (unsigned)mm->context.size) {
++		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
++		if (error < 0)
++			goto out_unlock;
++	}
++
++	lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
++ 	mach_lp = arbitrary_virt_to_machine(lp);
++
++   	/* Allow LDTs to be cleared by the user. */
++   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++		if (oldmode || LDT_empty(&ldt_info)) {
++			entry_1 = 0;
++			entry_2 = 0;
++			goto install;
++		}
++	}
++
++	entry_1 = LDT_entry_a(&ldt_info);
++	entry_2 = LDT_entry_b(&ldt_info);
++	if (oldmode)
++		entry_2 &= ~(1 << 20);
++
++	/* Install the new entry ...  */
++install:
++	error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
++
++out_unlock:
++	up(&mm->context.sem);
++out:
++	return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++	int ret = -ENOSYS;
++
++	switch (func) {
++	case 0:
++		ret = read_ldt(ptr, bytecount);
++		break;
++	case 1:
++		ret = write_ldt(ptr, bytecount, 1);
++		break;
++	case 2:
++		ret = read_default_ldt(ptr, bytecount);
++		break;
++	case 0x11:
++		ret = write_ldt(ptr, bytecount, 0);
++		break;
++	}
++	return ret;
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/Makefile linux-2.6.12-xen/arch/xen/x86_64/kernel/Makefile
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,71 @@
++#
++# Makefile for the linux kernel.
++#
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++CFLAGS	+= -Iarch/$(XENARCH)/kernel
++
++extra-y 	:= head.o head64.o init_task.o
++
++obj-y	:= process.o signal.o entry.o traps.o  \
++		ioport.o ldt.o setup.o \
++		x8664_ksyms.o vsyscall.o \
++		setup64.o e820.o irq.o early_printk.o
++c-obj-y	:= semaphore.o i387.o sys_x86_64.o \
++		ptrace.o quirks.o syscall.o bootflag.o
++
++i386-obj-y			:= time.o
++#obj-y				+= ../../i386/kernel/timers/
++
++s-obj-y	:=
++
++#obj-$(CONFIG_X86_MCE)         += mce.o
++#obj-$(CONFIG_MTRR)		+= ../../i386/kernel/cpu/mtrr/
++obj-$(CONFIG_ACPI_BOOT)		+= acpi/
++c-obj-$(CONFIG_X86_MSR)		+= msr.o
++obj-$(CONFIG_MICROCODE)		+= microcode.o
++obj-$(CONFIG_X86_CPUID)		+= cpuid.o
++obj-$(CONFIG_SMP)		+= smp.o
++obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o
++c-obj-$(CONFIG_X86_LOCAL_APIC)	+= nmi.o
++obj-$(CONFIG_X86_IO_APIC)	+= io_apic.o mpparse.o
++obj-$(CONFIG_X86_XEN_GENAPIC)	+= genapic.o genapic_xen.o
++c-obj-$(CONFIG_X86_IO_APIC)	+= genapic_cluster.o genapic_flat.o
++#obj-$(CONFIG_PM)		+= suspend.o
++#obj-$(CONFIG_SOFTWARE_SUSPEND)	+= suspend_asm.o
++#obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
++#obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
++#obj-$(CONFIG_GART_IOMMU)	+= pci-gart.o aperture.o
++obj-$(CONFIG_DUMMY_IOMMU)	+= pci-nommu.o
++i386-obj-$(CONFIG_DUMMY_IOMMU)	+= pci-dma.o
++i386-obj-$(CONFIG_SWIOTLB)	+= swiotlb.o
++obj-$(CONFIG_KPROBES)		+= kprobes.o
++#obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer.o
++
++c-obj-$(CONFIG_MODULES)		+= module.o
++
++obj-y				+= topology.o
++c-obj-y				+= intel_cacheinfo.o
++
++bootflag-y			+= ../../../i386/kernel/bootflag.o
++cpuid-$(subst m,y,$(CONFIG_X86_CPUID))  += ../../../i386/kernel/cpuid.o
++topology-y                     += ../../../i386/mach-default/topology.o
++#swiotlb-$(CONFIG_SWIOTLB)      += ../../../ia64/lib/swiotlb.o
++microcode-$(subst m,y,$(CONFIG_MICROCODE))  += ../../i386/kernel/microcode.o
++intel_cacheinfo-y		+= ../../../i386/kernel/cpu/intel_cacheinfo.o
++quirks-y			+= ../../i386/kernel/quirks.o
++
++c-link	:= init_task.o
++s-link	:= vsyscall.o 
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-obj-m) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
++	@ln -fsn $(srctree)/arch/x86_64/kernel/$(notdir $@) $@
++
++$(patsubst %.o,$(obj)/%.c,$(i386-obj-y)):
++	@ln -fsn $(srctree)/arch/xen/i386/kernel/$(notdir $@) $@
++
++obj-y	+= $(c-obj-y) $(s-obj-y) $(i386-obj-y)
++obj-m	+= $(c-obj-m)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-m) $(c-obj-) $(c-link) $(i386-obj-y))
++clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/mpparse.c linux-2.6.12-xen/arch/xen/x86_64/kernel/mpparse.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/mpparse.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/mpparse.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,963 @@
++/*
++ *	Intel Multiprocessor Specification 1.1 and 1.4
++ *	compliant MP-table parsing routines.
++ *
++ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
++ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo at redhat.com>
++ *
++ *	Fixes
++ *		Erich Boleyn	:	MP v1.4 and additional changes.
++ *		Alan Cox	:	Added EBDA scanning
++ *		Ingo Molnar	:	various cleanups and rewrites
++ *		Maciej W. Rozycki:	Bits for default MP configurations
++ *		Paul Diefenbaugh:	Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/config.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
++
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/pgalloc.h>
++#include <asm/io_apic.h>
++#include <asm/proto.h>
++#include <asm/acpi.h>
++
++/* Have we found an MP table */
++int smp_found_config;
++unsigned int __initdata maxcpus = NR_CPUS;
++
++int acpi_found_madt;
++
++/*
++ * Various Linux-internal data structures created from the
++ * MP-table.
++ */
++int apic_version [MAX_APICS];
++unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++cpumask_t pci_bus_to_cpumask [256] = { [0 ... 255] = CPU_MASK_ALL };
++
++static int mp_current_pci_id = 0;
++/* I/O APIC entries */
++struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++
++/* # of MP IRQ source entries */
++struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++
++/* MP IRQ source entries */
++int mp_irq_entries;
++
++int nr_ioapics;
++int pic_mode;
++unsigned long mp_lapic_addr = 0;
++
++
++
++/* Processor that is doing the boot up */
++unsigned int boot_cpu_id = -1U;
++/* Internal processor count */
++static unsigned int num_processors = 0;
++
++/* Bitmask of physically existing CPUs */
++physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
++
++/* ACPI MADT entry parsing functions */
++#ifdef CONFIG_ACPI_BOOT
++extern struct acpi_boot_flags acpi_boot;
++#ifdef CONFIG_X86_LOCAL_APIC
++extern int acpi_parse_lapic (acpi_table_entry_header *header);
++extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
++extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
++#endif /*CONFIG_X86_LOCAL_APIC*/
++#ifdef CONFIG_X86_IO_APIC
++extern int acpi_parse_ioapic (acpi_table_entry_header *header);
++#endif /*CONFIG_X86_IO_APIC*/
++#endif /*CONFIG_ACPI_BOOT*/
++
++u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++
++/*
++ * Intel MP BIOS table parsing routines:
++ */
++
++/*
++ * Checksum an MP configuration block.
++ */
++
++static int __init mpf_checksum(unsigned char *mp, int len)
++{
++	int sum = 0;
++
++	while (len--)
++		sum += *mp++;
++
++	return sum & 0xFF;
++}
++
++#ifndef CONFIG_XEN
++static void __init MP_processor_info (struct mpc_config_processor *m)
++{
++	int ver;
++	static int found_bsp=0;
++
++	if (!(m->mpc_cpuflag & CPU_ENABLED))
++		return;
++
++	printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
++		m->mpc_apicid,
++	       (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
++	       (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
++		m->mpc_apicver);
++
++	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++		Dprintk("    Bootup CPU\n");
++		boot_cpu_id = m->mpc_apicid;
++	}
++	if (num_processors >= NR_CPUS) {
++		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
++			" Processor ignored.\n", NR_CPUS);
++		return;
++	}
++
++	num_processors++;
++
++	if (m->mpc_apicid > MAX_APICS) {
++		printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
++			m->mpc_apicid, MAX_APICS);
++		return;
++	}
++	ver = m->mpc_apicver;
++
++	physid_set(m->mpc_apicid, phys_cpu_present_map);
++	/*
++	 * Validate version
++	 */
++	if (ver == 0x0) {
++		printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
++		ver = 0x10;
++	}
++	apic_version[m->mpc_apicid] = ver;
++ 	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ 		/*
++ 		 * bios_cpu_apicid is required to have processors listed
++ 		 * in same order as logical cpu numbers. Hence the first
++ 		 * entry is BSP, and so on.
++ 		 */
++ 		bios_cpu_apicid[0] = m->mpc_apicid;
++ 		x86_cpu_to_apicid[0] = m->mpc_apicid;
++ 		found_bsp = 1;
++ 	} else {
++ 		bios_cpu_apicid[num_processors - found_bsp] = m->mpc_apicid;
++ 		x86_cpu_to_apicid[num_processors - found_bsp] = m->mpc_apicid;
++ 	}
++}
++#else
++void __init MP_processor_info (struct mpc_config_processor *m)
++{
++	num_processors++;
++}
++#endif /* CONFIG_XEN */
++
++static void __init MP_bus_info (struct mpc_config_bus *m)
++{
++	char str[7];
++
++	memcpy(str, m->mpc_bustype, 6);
++	str[6] = 0;
++	Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
++
++	if (strncmp(str, "ISA", 3) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
++	} else if (strncmp(str, "EISA", 4) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++	} else if (strncmp(str, "PCI", 3) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
++		mp_current_pci_id++;
++	} else if (strncmp(str, "MCA", 3) == 0) {
++		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
++	} else {
++		printk(KERN_ERR "Unknown bustype %s\n", str);
++	}
++}
++
++static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++{
++	if (!(m->mpc_flags & MPC_APIC_USABLE))
++		return;
++
++	printk("I/O APIC #%d Version %d at 0x%X.\n",
++		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
++	if (nr_ioapics >= MAX_IO_APICS) {
++		printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
++			MAX_IO_APICS, nr_ioapics);
++		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++	}
++	if (!m->mpc_apicaddr) {
++		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
++			" found in MP table, skipping!\n");
++		return;
++	}
++	mp_ioapics[nr_ioapics] = *m;
++	nr_ioapics++;
++}
++
++static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++{
++	mp_irqs [mp_irq_entries] = *m;
++	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
++		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
++			m->mpc_irqtype, m->mpc_irqflag & 3,
++			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
++			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
++	if (++mp_irq_entries == MAX_IRQ_SOURCES)
++		panic("Max # of irq sources exceeded!!\n");
++}
++
++static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++{
++	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
++		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
++			m->mpc_irqtype, m->mpc_irqflag & 3,
++			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
++			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
++	/*
++	 * Well it seems all SMP boards in existence
++	 * use ExtINT/LVT1 == LINT0 and
++	 * NMI/LVT2 == LINT1 - the following check
++	 * will show us if this assumptions is false.
++	 * Until then we do not have to add baggage.
++	 */
++	if ((m->mpc_irqtype == mp_ExtINT) &&
++		(m->mpc_destapiclint != 0))
++			BUG();
++	if ((m->mpc_irqtype == mp_NMI) &&
++		(m->mpc_destapiclint != 1))
++			BUG();
++}
++
++/*
++ * Read/parse the MPC
++ */
++
++static int __init smp_read_mpc(struct mp_config_table *mpc)
++{
++	char str[16];
++	int count=sizeof(*mpc);
++	unsigned char *mpt=((unsigned char *)mpc)+count;
++
++	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
++		printk("SMP mptable: bad signature [%c%c%c%c]!\n",
++			mpc->mpc_signature[0],
++			mpc->mpc_signature[1],
++			mpc->mpc_signature[2],
++			mpc->mpc_signature[3]);
++		return 0;
++	}
++	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
++		printk("SMP mptable: checksum error!\n");
++		return 0;
++	}
++	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
++		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++			mpc->mpc_spec);
++		return 0;
++	}
++	if (!mpc->mpc_lapic) {
++		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++		return 0;
++	}
++	memcpy(str,mpc->mpc_oem,8);
++	str[8]=0;
++	printk(KERN_INFO "OEM ID: %s ",str);
++
++	memcpy(str,mpc->mpc_productid,12);
++	str[12]=0;
++	printk(KERN_INFO "Product ID: %s ",str);
++
++	printk(KERN_INFO "APIC at: 0x%X\n",mpc->mpc_lapic);
++
++	/* save the local APIC address, it might be non-default */
++	if (!acpi_lapic)
++	mp_lapic_addr = mpc->mpc_lapic;
++
++	/*
++	 *	Now process the configuration blocks.
++	 */
++	while (count < mpc->mpc_length) {
++		switch(*mpt) {
++			case MP_PROCESSOR:
++			{
++				struct mpc_config_processor *m=
++					(struct mpc_config_processor *)mpt;
++				if (!acpi_lapic)
++				MP_processor_info(m);
++				mpt += sizeof(*m);
++				count += sizeof(*m);
++				break;
++			}
++			case MP_BUS:
++			{
++				struct mpc_config_bus *m=
++					(struct mpc_config_bus *)mpt;
++				MP_bus_info(m);
++				mpt += sizeof(*m);
++				count += sizeof(*m);
++				break;
++			}
++			case MP_IOAPIC:
++			{
++				struct mpc_config_ioapic *m=
++					(struct mpc_config_ioapic *)mpt;
++				MP_ioapic_info(m);
++				mpt+=sizeof(*m);
++				count+=sizeof(*m);
++				break;
++			}
++			case MP_INTSRC:
++			{
++				struct mpc_config_intsrc *m=
++					(struct mpc_config_intsrc *)mpt;
++
++				MP_intsrc_info(m);
++				mpt+=sizeof(*m);
++				count+=sizeof(*m);
++				break;
++			}
++			case MP_LINTSRC:
++			{
++				struct mpc_config_lintsrc *m=
++					(struct mpc_config_lintsrc *)mpt;
++				MP_lintsrc_info(m);
++				mpt+=sizeof(*m);
++				count+=sizeof(*m);
++				break;
++			}
++		}
++	}
++	clustered_apic_check();
++	if (!num_processors)
++		printk(KERN_ERR "SMP mptable: no processors registered!\n");
++	return num_processors;
++}
++
++static int __init ELCR_trigger(unsigned int irq)
++{
++	unsigned int port;
++
++	port = 0x4d0 + (irq >> 3);
++	return (inb(port) >> (irq & 7)) & 1;
++}
++
++static void __init construct_default_ioirq_mptable(int mpc_default_type)
++{
++	struct mpc_config_intsrc intsrc;
++	int i;
++	int ELCR_fallback = 0;
++
++	intsrc.mpc_type = MP_INTSRC;
++	intsrc.mpc_irqflag = 0;			/* conforming */
++	intsrc.mpc_srcbus = 0;
++	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++
++	intsrc.mpc_irqtype = mp_INT;
++
++	/*
++	 *  If true, we have an ISA/PCI system with no IRQ entries
++	 *  in the MP table. To prevent the PCI interrupts from being set up
++	 *  incorrectly, we try to use the ELCR. The sanity check to see if
++	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
++	 *  never be level sensitive, so we simply see if the ELCR agrees.
++	 *  If it does, we assume it's valid.
++	 */
++	if (mpc_default_type == 5) {
++		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++
++		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
++			printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
++		else {
++			printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
++			ELCR_fallback = 1;
++		}
++	}
++
++	for (i = 0; i < 16; i++) {
++		switch (mpc_default_type) {
++		case 2:
++			if (i == 0 || i == 13)
++				continue;	/* IRQ0 & IRQ13 not connected */
++			/* fall through */
++		default:
++			if (i == 2)
++				continue;	/* IRQ2 is never connected */
++		}
++
++		if (ELCR_fallback) {
++			/*
++			 *  If the ELCR indicates a level-sensitive interrupt, we
++			 *  copy that information over to the MP table in the
++			 *  irqflag field (level sensitive, active high polarity).
++			 */
++			if (ELCR_trigger(i))
++				intsrc.mpc_irqflag = 13;
++			else
++				intsrc.mpc_irqflag = 0;
++		}
++
++		intsrc.mpc_srcbusirq = i;
++		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
++		MP_intsrc_info(&intsrc);
++	}
++
++	intsrc.mpc_irqtype = mp_ExtINT;
++	intsrc.mpc_srcbusirq = 0;
++	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
++	MP_intsrc_info(&intsrc);
++}
++
++static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++{
++	struct mpc_config_processor processor;
++	struct mpc_config_bus bus;
++	struct mpc_config_ioapic ioapic;
++	struct mpc_config_lintsrc lintsrc;
++	int linttypes[2] = { mp_ExtINT, mp_NMI };
++	int i;
++
++	/*
++	 * local APIC has default address
++	 */
++	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++
++	/*
++	 * 2 CPUs, numbered 0 & 1.
++	 */
++	processor.mpc_type = MP_PROCESSOR;
++	/* Either an integrated APIC or a discrete 82489DX. */
++	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++	processor.mpc_cpuflag = CPU_ENABLED;
++	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++				   (boot_cpu_data.x86_model << 4) |
++				   boot_cpu_data.x86_mask;
++	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++	processor.mpc_reserved[0] = 0;
++	processor.mpc_reserved[1] = 0;
++	for (i = 0; i < 2; i++) {
++		processor.mpc_apicid = i;
++		MP_processor_info(&processor);
++	}
++
++	bus.mpc_type = MP_BUS;
++	bus.mpc_busid = 0;
++	switch (mpc_default_type) {
++		default:
++			printk(KERN_ERR "???\nUnknown standard configuration %d\n",
++				mpc_default_type);
++			/* fall through */
++		case 1:
++		case 5:
++			memcpy(bus.mpc_bustype, "ISA   ", 6);
++			break;
++		case 2:
++		case 6:
++		case 3:
++			memcpy(bus.mpc_bustype, "EISA  ", 6);
++			break;
++		case 4:
++		case 7:
++			memcpy(bus.mpc_bustype, "MCA   ", 6);
++	}
++	MP_bus_info(&bus);
++	if (mpc_default_type > 4) {
++		bus.mpc_busid = 1;
++		memcpy(bus.mpc_bustype, "PCI   ", 6);
++		MP_bus_info(&bus);
++	}
++
++	ioapic.mpc_type = MP_IOAPIC;
++	ioapic.mpc_apicid = 2;
++	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++	ioapic.mpc_flags = MPC_APIC_USABLE;
++	ioapic.mpc_apicaddr = 0xFEC00000;
++	MP_ioapic_info(&ioapic);
++
++	/*
++	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
++	 */
++	construct_default_ioirq_mptable(mpc_default_type);
++
++	lintsrc.mpc_type = MP_LINTSRC;
++	lintsrc.mpc_irqflag = 0;		/* conforming */
++	lintsrc.mpc_srcbusid = 0;
++	lintsrc.mpc_srcbusirq = 0;
++	lintsrc.mpc_destapic = MP_APIC_ALL;
++	for (i = 0; i < 2; i++) {
++		lintsrc.mpc_irqtype = linttypes[i];
++		lintsrc.mpc_destapiclint = i;
++		MP_lintsrc_info(&lintsrc);
++	}
++}
++
++static struct intel_mp_floating *mpf_found;
++
++/*
++ * Scan the memory blocks for an SMP configuration block.
++ */
++void __init get_smp_config (void)
++{
++	struct intel_mp_floating *mpf = mpf_found;
++
++	/*
++ 	 * ACPI may be used to obtain the entire SMP configuration or just to 
++ 	 * enumerate/configure processors (CONFIG_ACPI_BOOT).  Note that 
++ 	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
++ 	 * processors, where MPS only supports physical.
++ 	 */
++ 	if (acpi_lapic && acpi_ioapic) {
++ 		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
++ 		return;
++	}
++ 	else if (acpi_lapic)
++ 		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++
++	printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
++	if (mpf->mpf_feature2 & (1<<7)) {
++		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
++		pic_mode = 1;
++	} else {
++		printk(KERN_INFO "    Virtual Wire compatibility mode.\n");
++		pic_mode = 0;
++	}
++
++	/*
++	 * Now see if we need to read further.
++	 */
++	if (mpf->mpf_feature1 != 0) {
++
++		printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
++		construct_default_ISA_mptable(mpf->mpf_feature1);
++
++	} else if (mpf->mpf_physptr) {
++
++		/*
++		 * Read the physical hardware table.  Anything here will
++		 * override the defaults.
++		 */
++ 		if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++			smp_found_config = 0;
++			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
++			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
++			return;
++		}
++		/*
++		 * If there are no explicit MP IRQ entries, then we are
++		 * broken.  We set up most of the low 16 IO-APIC pins to
++		 * ISA defaults and hope it will work.
++		 */
++		if (!mp_irq_entries) {
++			struct mpc_config_bus bus;
++
++			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++
++			bus.mpc_type = MP_BUS;
++			bus.mpc_busid = 0;
++			memcpy(bus.mpc_bustype, "ISA   ", 6);
++			MP_bus_info(&bus);
++
++			construct_default_ioirq_mptable(0);
++		}
++
++	} else
++		BUG();
++
++	printk(KERN_INFO "Processors: %d\n", num_processors);
++	/*
++	 * Only use the first configuration found.
++	 */
++}
++
++static int __init smp_scan_config (unsigned long base, unsigned long length)
++{
++	extern void __bad_mpf_size(void); 
++	unsigned int *bp = isa_bus_to_virt(base);
++	struct intel_mp_floating *mpf;
++
++	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++	if (sizeof(*mpf) != 16)
++		__bad_mpf_size();
++
++	while (length > 0) {
++		mpf = (struct intel_mp_floating *)bp;
++		if ((*bp == SMP_MAGIC_IDENT) &&
++			(mpf->mpf_length == 1) &&
++			!mpf_checksum((unsigned char *)bp, 16) &&
++			((mpf->mpf_specification == 1)
++				|| (mpf->mpf_specification == 4)) ) {
++
++			smp_found_config = 1;
++			mpf_found = mpf;
++			return 1;
++		}
++		bp += 4;
++		length -= 16;
++	}
++	return 0;
++}
++
++void __init find_intel_smp (void)
++{
++	unsigned int address;
++
++	/*
++	 * FIXME: Linux assumes you have 640K of base ram..
++	 * this continues the error...
++	 *
++	 * 1) Scan the bottom 1K for a signature
++	 * 2) Scan the top 1K of base RAM
++	 * 3) Scan the 64K of bios
++	 */
++	if (smp_scan_config(0x0,0x400) ||
++		smp_scan_config(639*0x400,0x400) ||
++			smp_scan_config(0xF0000,0x10000))
++		return;
++	/*
++	 * If it is an SMP machine we should know now, unless the
++	 * configuration is in an EISA/MCA bus machine with an
++	 * extended bios data area.
++	 *
++	 * there is a real-mode segmented pointer pointing to the
++	 * 4K EBDA area at 0x40E, calculate and scan it here.
++	 *
++	 * NOTE! There are Linux loaders that will corrupt the EBDA
++	 * area, and as such this kind of SMP config may be less
++	 * trustworthy, simply because the SMP table may have been
++	 * stomped on during early boot. These loaders are buggy and
++	 * should be fixed.
++	 */
++
++	address = *(unsigned short *)phys_to_virt(0x40E);
++	address <<= 4;
++	if (smp_scan_config(address, 0x1000))
++		return;
++
++	/* If we have come this far, we did not find an MP table  */
++	 printk(KERN_INFO "No mptable found.\n");
++}
++
++/*
++ * - Intel MP Configuration Table
++ */
++void __init find_smp_config (void)
++{
++#ifdef CONFIG_X86_LOCAL_APIC
++	find_intel_smp();
++#endif
++}
++
++
++/* --------------------------------------------------------------------------
++                            ACPI-based MP Configuration
++   -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI_BOOT
++
++void __init mp_register_lapic_address (
++	u64			address)
++{
++#ifndef CONFIG_XEN
++	mp_lapic_addr = (unsigned long) address;
++
++	if (boot_cpu_id == -1U)
++		boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
++
++	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
++}
++
++
++void __init mp_register_lapic (
++	u8			id, 
++	u8			enabled)
++{
++	struct mpc_config_processor processor;
++	int			boot_cpu = 0;
++	
++	if (id >= MAX_APICS) {
++		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
++			id, MAX_APICS);
++		return;
++	}
++
++	if (id == boot_cpu_physical_apicid)
++		boot_cpu = 1;
++
++#ifndef CONFIG_XEN
++	processor.mpc_type = MP_PROCESSOR;
++	processor.mpc_apicid = id;
++	processor.mpc_apicver = 0x10; /* TBD: lapic version */
++	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
++	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
++	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 
++		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++	processor.mpc_reserved[0] = 0;
++	processor.mpc_reserved[1] = 0;
++#endif
++
++	MP_processor_info(&processor);
++}
++
++#ifdef CONFIG_X86_IO_APIC
++
++#define MP_ISA_BUS		0
++#define MP_MAX_IOAPIC_PIN	127
++
++static struct mp_ioapic_routing {
++	int			apic_id;
++	int			gsi_start;
++	int			gsi_end;
++	u32			pin_programmed[4];
++} mp_ioapic_routing[MAX_IO_APICS];
++
++
++static int mp_find_ioapic (
++	int			gsi)
++{
++	int			i = 0;
++
++	/* Find the IOAPIC that manages this GSI. */
++	for (i = 0; i < nr_ioapics; i++) {
++		if ((gsi >= mp_ioapic_routing[i].gsi_start)
++			&& (gsi <= mp_ioapic_routing[i].gsi_end))
++			return i;
++	}
++
++	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++
++	return -1;
++}
++	
++
++void __init mp_register_ioapic (
++	u8			id, 
++	u32			address,
++	u32			gsi_base)
++{
++	int			idx = 0;
++
++	if (nr_ioapics >= MAX_IO_APICS) {
++		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
++		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++	}
++	if (!address) {
++		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++			" found in MADT table, skipping!\n");
++		return;
++	}
++
++	idx = nr_ioapics++;
++
++	mp_ioapics[idx].mpc_type = MP_IOAPIC;
++	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
++	mp_ioapics[idx].mpc_apicaddr = address;
++
++	mp_ioapics[idx].mpc_apicid = id;
++	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++	
++	/* 
++	 * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
++	 * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
++	 */
++	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
++	mp_ioapic_routing[idx].gsi_start = gsi_base;
++	mp_ioapic_routing[idx].gsi_end = gsi_base + 
++		io_apic_get_redir_entries(idx);
++
++	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
++		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
++		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++		mp_ioapic_routing[idx].gsi_start,
++		mp_ioapic_routing[idx].gsi_end);
++
++	return;
++}
++
++
++void __init mp_override_legacy_irq (
++	u8			bus_irq,
++	u8			polarity, 
++	u8			trigger, 
++	u32			gsi)
++{
++	struct mpc_config_intsrc intsrc;
++	int			ioapic = -1;
++	int			pin = -1;
++
++	/* 
++	 * Convert 'gsi' to 'ioapic.pin'.
++	 */
++	ioapic = mp_find_ioapic(gsi);
++	if (ioapic < 0)
++		return;
++	pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++
++	/*
++	 * TBD: This check is for faulty timer entries, where the override
++	 *      erroneously sets the trigger to level, resulting in a HUGE 
++	 *      increase of timer interrupts!
++	 */
++	if ((bus_irq == 0) && (trigger == 3))
++		trigger = 1;
++
++	intsrc.mpc_type = MP_INTSRC;
++	intsrc.mpc_irqtype = mp_INT;
++	intsrc.mpc_irqflag = (trigger << 2) | polarity;
++	intsrc.mpc_srcbus = MP_ISA_BUS;
++	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
++	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
++	intsrc.mpc_dstirq = pin;				    /* INTIN# */
++
++	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", 
++		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
++		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
++		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++
++	mp_irqs[mp_irq_entries] = intsrc;
++	if (++mp_irq_entries == MAX_IRQ_SOURCES)
++		panic("Max # of irq sources exceeded!\n");
++
++	return;
++}
++
++
++void __init mp_config_acpi_legacy_irqs (void)
++{
++	struct mpc_config_intsrc intsrc;
++	int			i = 0;
++	int			ioapic = -1;
++
++	/* 
++	 * Fabricate the legacy ISA bus (bus #31).
++	 */
++	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
++	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++
++	/* 
++	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
++	 */
++	ioapic = mp_find_ioapic(0);
++	if (ioapic < 0)
++		return;
++
++	intsrc.mpc_type = MP_INTSRC;
++	intsrc.mpc_irqflag = 0;					/* Conforming */
++	intsrc.mpc_srcbus = MP_ISA_BUS;
++	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++
++	/* 
++	 * Use the default configuration for the IRQs 0-15.  Unless
++	 * overridden by (MADT) interrupt source override entries.
++	 */
++	for (i = 0; i < 16; i++) {
++		int idx;
++
++		for (idx = 0; idx < mp_irq_entries; idx++) {
++			struct mpc_config_intsrc *irq = mp_irqs + idx;
++
++			/* Do we already have a mapping for this ISA IRQ? */
++			if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
++				break;
++
++			/* Do we already have a mapping for this IOAPIC pin */
++			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
++				(irq->mpc_dstirq == i))
++				break;
++		}
++
++		if (idx != mp_irq_entries) {
++			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
++			continue;			/* IRQ already used */
++		}
++
++		intsrc.mpc_irqtype = mp_INT;
++		intsrc.mpc_srcbusirq = i;		   /* Identity mapped */
++		intsrc.mpc_dstirq = i;
++
++		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
++			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
++			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
++			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 
++			intsrc.mpc_dstirq);
++
++		mp_irqs[mp_irq_entries] = intsrc;
++		if (++mp_irq_entries == MAX_IRQ_SOURCES)
++			panic("Max # of irq sources exceeded!\n");
++	}
++
++	return;
++}
++
++int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
++{
++	int			ioapic = -1;
++	int			ioapic_pin = 0;
++	int			idx, bit = 0;
++
++	if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
++		return gsi;
++
++#ifdef CONFIG_ACPI_BUS
++	/* Don't set up the ACPI SCI because it's already set up */
++	if (acpi_fadt.sci_int == gsi)
++		return gsi;
++#endif
++
++	ioapic = mp_find_ioapic(gsi);
++	if (ioapic < 0) {
++		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
++		return gsi;
++	}
++
++	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++
++	/* 
++	 * Avoid pin reprogramming.  PRTs typically include entries  
++	 * with redundant pin->gsi mappings (but unique PCI devices);
++	 * we only program the IOAPIC on the first.
++	 */
++	bit = ioapic_pin % 32;
++	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
++	if (idx > 3) {
++		printk(KERN_ERR "Invalid reference to IOAPIC pin "
++			"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
++			ioapic_pin);
++		return gsi;
++	}
++	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
++		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
++			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
++		return gsi;
++	}
++
++	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++
++	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
++		edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
++		active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
++	return gsi;
++}
++
++#endif /*CONFIG_X86_IO_APIC*/
++#endif /*CONFIG_ACPI_BOOT*/
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/pci-nommu.c linux-2.6.12-xen/arch/xen/x86_64/kernel/pci-nommu.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/pci-nommu.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/pci-nommu.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,98 @@
++/* Fallback functions when the main IOMMU code is not compiled in. This
++   code is roughly equivalent to i386. */
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/string.h>
++#include <asm/proto.h>
++#include <asm/processor.h>
++
++int iommu_merge = 0;
++EXPORT_SYMBOL(iommu_merge);
++
++dma_addr_t bad_dma_address;
++EXPORT_SYMBOL(bad_dma_address);
++
++int iommu_bio_merge = 0;
++EXPORT_SYMBOL(iommu_bio_merge);
++
++int iommu_sac_force = 0;
++EXPORT_SYMBOL(iommu_sac_force);
++
++#if 0
++/* 
++ * Dummy IO MMU functions
++ */
++
++void *dma_alloc_coherent(struct device *hwdev, size_t size,
++			 dma_addr_t *dma_handle, unsigned gfp)
++{
++	void *ret;
++	u64 mask;
++	int order = get_order(size);
++
++	if (hwdev)
++		mask = hwdev->coherent_dma_mask & *hwdev->dma_mask;
++	else
++		mask = 0xffffffff;
++	for (;;) {
++		ret = (void *)__get_free_pages(gfp, order);
++		if (ret == NULL)
++			return NULL;
++		*dma_handle = virt_to_bus(ret);
++		if ((*dma_handle & ~mask) == 0)
++			break;
++		free_pages((unsigned long)ret, order);
++		if (gfp & GFP_DMA)
++			return NULL;
++		gfp |= GFP_DMA;
++	}
++
++	memset(ret, 0, size);
++	return ret;
++}
++EXPORT_SYMBOL(dma_alloc_coherent);
++
++void dma_free_coherent(struct device *hwdev, size_t size,
++			 void *vaddr, dma_addr_t dma_handle)
++{
++	free_pages((unsigned long)vaddr, get_order(size));
++}
++EXPORT_SYMBOL(dma_free_coherent);
++#endif
++
++#if 0
++int dma_supported(struct device *hwdev, u64 mask)
++{
++        /*
++         * we fall back to GFP_DMA when the mask isn't all 1s,
++         * so we can't guarantee allocations that must be
++         * within a tighter range than GFP_DMA..
++	 * RED-PEN this won't work for pci_map_single. Caller has to
++	 * use GFP_DMA in the first place.
++         */
++        if (mask < 0x00ffffff)
++                return 0;
++
++	return 1;
++} 
++EXPORT_SYMBOL(dma_supported);
++#endif
++
++int dma_get_cache_alignment(void)
++{
++	return boot_cpu_data.x86_clflush_size;
++}
++EXPORT_SYMBOL(dma_get_cache_alignment);
++
++static int __init check_ram(void) 
++{ 
++	if (end_pfn >= 0xffffffff>>PAGE_SHIFT) { 
++		printk(
++		KERN_ERR "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
++		KERN_ERR "WARNING 32bit PCI may malfunction.\n");
++	} 
++	return 0;
++} 
++__initcall(check_ram);
++
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/process.c linux-2.6.12-xen/arch/xen/x86_64/kernel/process.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/process.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/process.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,752 @@
++/*
++ *  linux/arch/x86-64/kernel/process.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ * 
++ *  X86-64 port
++ *	Andi Kleen.
++ * 
++ *  $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
++ * 
++ *  Jun Nakajima <jun.nakajima at intel.com> 
++ *     Modified for Xen
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/module.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/irq.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/random.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/mmu_context.h>
++#include <asm/pda.h>
++#include <asm/prctl.h>
++#include <asm/kdebug.h>
++#include <asm-xen/xen-public/dom0_ops.h>
++#include <asm-xen/xen-public/physdev.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/hardirq.h>
++#include <asm/ia32.h>
++
++asmlinkage extern void ret_from_fork(void);
++
++unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
++
++static atomic_t hlt_counter = ATOMIC_INIT(0);
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++void disable_hlt(void)
++{
++	atomic_inc(&hlt_counter);
++}
++
++EXPORT_SYMBOL(disable_hlt);
++
++void enable_hlt(void)
++{
++	atomic_dec(&hlt_counter);
++}
++
++EXPORT_SYMBOL(enable_hlt);
++
++/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
++extern void stop_hz_timer(void);
++extern void start_hz_timer(void);
++void xen_idle(void)
++{
++	local_irq_disable();
++
++	if (need_resched()) {
++		local_irq_enable();
++	} else {
++		stop_hz_timer();
++		/* Blocking includes an implicit local_irq_enable(). */
++		HYPERVISOR_sched_op(SCHEDOP_block, 0);
++		start_hz_timer();
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++#include <asm/nmi.h>
++/* We don't actually take CPU down, just spin without interrupts. */
++static inline void play_dead(void)
++{
++	/* Ack it */
++	__get_cpu_var(cpu_state) = CPU_DEAD;
++
++	/* We shouldn't have to disable interrupts while dead, but
++	 * some interrupts just don't seem to go away, and this makes
++	 * it "work" for testing purposes. */
++	/* Death loop */
++	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
++		HYPERVISOR_sched_op(SCHEDOP_yield, 0);
++
++	local_irq_disable();
++	__flush_tlb_all();
++	cpu_set(smp_processor_id(), cpu_online_map);
++	local_irq_enable();
++}
++#else
++static inline void play_dead(void)
++{
++	BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle (void)
++{
++	int cpu = smp_processor_id();
++
++	/* endless idle loop with no priority at all */
++	while (1) {
++		while (!need_resched()) {
++			if (__get_cpu_var(cpu_idle_state))
++				__get_cpu_var(cpu_idle_state) = 0;
++			rmb();
++			
++			if (cpu_is_offline(cpu))
++				play_dead();
++
++			xen_idle();
++		}
++
++		schedule();
++	}
++}
++
++void cpu_idle_wait(void)
++{
++	unsigned int cpu, this_cpu = get_cpu();
++	cpumask_t map;
++
++	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++	put_cpu();
++
++ 	cpus_clear(map);
++	for_each_online_cpu(cpu) {
++		per_cpu(cpu_idle_state, cpu) = 1;
++		cpu_set(cpu, map);
++	}
++
++	__get_cpu_var(cpu_idle_state) = 0;
++
++	wmb();
++	do {
++		ssleep(1);
++		for_each_online_cpu(cpu) {
++			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
++				cpu_clear(cpu, map);
++		}
++		cpus_and(map, map, cpu_online_map);
++	} while (!cpus_empty(map));
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
++/* Always use xen_idle() instead. */
++void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
++
++/* Prints also some state that isn't saved in the pt_regs */ 
++void __show_regs(struct pt_regs * regs)
++{
++	unsigned long fs, gs, shadowgs;
++	unsigned int fsindex,gsindex;
++	unsigned int ds,cs,es; 
++
++	printk("\n");
++	print_modules();
++	printk("Pid: %d, comm: %.20s %s %s\n", 
++	       current->pid, current->comm, print_tainted(), system_utsname.release);
++	printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
++	printk_address(regs->rip); 
++	printk("\nRSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
++	printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
++	       regs->rax, regs->rbx, regs->rcx);
++	printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
++	       regs->rdx, regs->rsi, regs->rdi); 
++	printk("RBP: %016lx R08: %016lx R09: %016lx\n",
++	       regs->rbp, regs->r8, regs->r9); 
++	printk("R10: %016lx R11: %016lx R12: %016lx\n",
++	       regs->r10, regs->r11, regs->r12); 
++	printk("R13: %016lx R14: %016lx R15: %016lx\n",
++	       regs->r13, regs->r14, regs->r15); 
++
++	asm("mov %%ds,%0" : "=r" (ds)); 
++	asm("mov %%cs,%0" : "=r" (cs)); 
++	asm("mov %%es,%0" : "=r" (es)); 
++	asm("mov %%fs,%0" : "=r" (fsindex));
++	asm("mov %%gs,%0" : "=r" (gsindex));
++
++	rdmsrl(MSR_FS_BASE, fs);
++	rdmsrl(MSR_GS_BASE, gs); 
++	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 
++
++	printk("FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 
++	       fs,fsindex,gs,gsindex,shadowgs); 
++	printk("CS:  %04x DS: %04x ES: %04x\n", cs, ds, es); 
++
++}
++
++void show_regs(struct pt_regs *regs)
++{
++	__show_regs(regs);
++	show_trace(&regs->rsp);
++}
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++	struct task_struct *me = current;
++	struct thread_struct *t = &me->thread;
++	if (me->thread.io_bitmap_ptr) { 
++		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++
++		kfree(t->io_bitmap_ptr);
++		t->io_bitmap_ptr = NULL;
++		/*
++		 * Careful, clear this in the TSS too:
++		 */
++		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
++		t->io_bitmap_max = 0;
++		put_cpu();
++	}
++}
++
++void load_gs_index(unsigned gs)
++{
++	HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs);
++}
++
++void flush_thread(void)
++{
++	struct task_struct *tsk = current;
++	struct thread_info *t = current_thread_info();
++
++	if (t->flags & _TIF_ABI_PENDING)
++		t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
++
++	tsk->thread.debugreg0 = 0;
++	tsk->thread.debugreg1 = 0;
++	tsk->thread.debugreg2 = 0;
++	tsk->thread.debugreg3 = 0;
++	tsk->thread.debugreg6 = 0;
++	tsk->thread.debugreg7 = 0;
++	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
++	/*
++	 * Forget coprocessor state..
++	 */
++	clear_fpu(tsk);
++	clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++	if (dead_task->mm) {
++		if (dead_task->mm->context.size) {
++			printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
++					dead_task->comm,
++					dead_task->mm->context.ldt,
++					dead_task->mm->context.size);
++			BUG();
++		}
++	}
++}
++
++static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
++{
++	struct user_desc ud = { 
++		.base_addr = addr,
++		.limit = 0xfffff,
++		.contents = (3 << 3), /* user */
++		.seg_32bit = 1,
++		.limit_in_pages = 1,
++		.useable = 1,
++	};
++	struct n_desc_struct *desc = (void *)t->thread.tls_array;
++	desc += tls;
++	desc->a = LDT_entry_a(&ud); 
++	desc->b = LDT_entry_b(&ud); 
++}
++
++static inline u32 read_32bit_tls(struct task_struct *t, int tls)
++{
++	struct desc_struct *desc = (void *)t->thread.tls_array;
++	desc += tls;
++	return desc->base0 | 
++		(((u32)desc->base1) << 16) | 
++		(((u32)desc->base2) << 24);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++	unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, 
++		unsigned long unused,
++	struct task_struct * p, struct pt_regs * regs)
++{
++	int err;
++	struct pt_regs * childregs;
++	struct task_struct *me = current;
++
++	childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
++
++	*childregs = *regs;
++
++	childregs->rax = 0;
++	childregs->rsp = rsp;
++	if (rsp == ~0UL) {
++		childregs->rsp = (unsigned long)childregs;
++	}
++
++	p->thread.rsp = (unsigned long) childregs;
++	p->thread.rsp0 = (unsigned long) (childregs+1);
++	p->thread.userrsp = me->thread.userrsp; 
++
++	set_ti_thread_flag(p->thread_info, TIF_FORK);
++
++	p->thread.fs = me->thread.fs;
++	p->thread.gs = me->thread.gs;
++
++	asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
++	asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
++	asm("mov %%es,%0" : "=m" (p->thread.es));
++	asm("mov %%ds,%0" : "=m" (p->thread.ds));
++
++	if (unlikely(me->thread.io_bitmap_ptr != NULL)) { 
++		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++		if (!p->thread.io_bitmap_ptr) {
++			p->thread.io_bitmap_max = 0;
++			return -ENOMEM;
++		}
++		memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
++	} 
++
++	/*
++	 * Set a new TLS for the child thread?
++	 */
++	if (clone_flags & CLONE_SETTLS) {
++#ifdef CONFIG_IA32_EMULATION
++		if (test_thread_flag(TIF_IA32))
++			err = ia32_child_tls(p, childregs); 
++		else 			
++#endif	 
++			err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 
++		if (err) 
++			goto out;
++	}
++        p->thread.io_pl = current->thread.io_pl;
++
++	err = 0;
++out:
++	if (err && p->thread.io_bitmap_ptr) {
++		kfree(p->thread.io_bitmap_ptr);
++		p->thread.io_bitmap_max = 0;
++	}
++	return err;
++}
++
++/*
++ * This special macro can be used to load a debugging register
++ */
++#define loaddebug(thread,register) \
++		HYPERVISOR_set_debugreg((register),	\
++			(thread->debugreg ## register))
++
++
++static inline void __save_init_fpu( struct task_struct *tsk )
++{
++	asm volatile( "rex64 ; fxsave %0 ; fnclex"
++		      : "=m" (tsk->thread.i387.fxsave));
++	tsk->thread_info->status &= ~TS_USEDFPU;
++}
++
++/*
++ *	switch_to(x,y) should switch tasks from x to y.
++ *
++ * This could still be optimized: 
++ * - fold all the options into a flag word and test it with a single test.
++ * - could test fs/gs bitsliced
++ */
++struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++	struct thread_struct *prev = &prev_p->thread,
++				 *next = &next_p->thread;
++	int cpu = smp_processor_id();  
++	struct tss_struct *tss = &per_cpu(init_tss, cpu);
++	physdev_op_t iopl_op, iobmp_op;
++	multicall_entry_t _mcl[8], *mcl = _mcl;
++
++	/*
++	 * This is basically '__unlazy_fpu', except that we queue a
++	 * multicall to indicate FPU task switch, rather than
++	 * synchronously trapping to Xen.
++	 */
++	if (prev_p->thread_info->status & TS_USEDFPU) {
++		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++		mcl->op      = __HYPERVISOR_fpu_taskswitch;
++		mcl->args[0] = 1;
++		mcl++;
++	}
++
++	/*
++	 * Reload esp0, LDT and the page table pointer:
++	 */
++	tss->rsp0 = next->rsp0;
++	mcl->op      = __HYPERVISOR_stack_switch;
++	mcl->args[0] = __KERNEL_DS;
++	mcl->args[1] = tss->rsp0;
++	mcl++;
++
++	/*
++	 * Load the per-thread Thread-Local Storage descriptor.
++	 * This is load_TLS(next, cpu) with multicalls.
++	 */
++#define C(i) do {							\
++	if (unlikely(next->tls_array[i] != prev->tls_array[i])) {	\
++		mcl->op      = __HYPERVISOR_update_descriptor;		\
++		mcl->args[0] = virt_to_machine(				\
++			&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
++		mcl->args[1] = next->tls_array[i];			\
++		mcl++;							\
++	}								\
++} while (0)
++	C(0); C(1); C(2);
++#undef C
++
++	if (unlikely(prev->io_pl != next->io_pl)) {
++		iopl_op.cmd             = PHYSDEVOP_SET_IOPL;
++		iopl_op.u.set_iopl.iopl = (next->io_pl == 0) ? 1 : next->io_pl;
++		mcl->op      = __HYPERVISOR_physdev_op;
++		mcl->args[0] = (unsigned long)&iopl_op;
++		mcl++;
++	}
++
++	if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++		iobmp_op.cmd                     =
++			PHYSDEVOP_SET_IOBITMAP;
++		iobmp_op.u.set_iobitmap.bitmap   =
++			(char *)next->io_bitmap_ptr;
++		iobmp_op.u.set_iobitmap.nr_ports =
++			next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++		mcl->op      = __HYPERVISOR_physdev_op;
++		mcl->args[0] = (unsigned long)&iobmp_op;
++		mcl++;
++	}
++
++	(void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
++	/* 
++	 * Switch DS and ES.
++	 * This won't pick up thread selector changes, but I guess that is ok.
++	 */
++	if (unlikely(next->es))
++		loadsegment(es, next->es); 
++	
++	if (unlikely(next->ds))
++		loadsegment(ds, next->ds);
++
++	/* 
++	 * Switch FS and GS.
++	 */
++	if (unlikely(next->fsindex))
++		loadsegment(fs, next->fsindex);
++
++	if (next->fs)
++		HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs); 
++	
++	if (unlikely(next->gsindex))
++		load_gs_index(next->gsindex);
++
++	if (next->gs)
++		HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs); 
++
++	/* 
++	 * Switch the PDA context.
++	 */
++	prev->userrsp = read_pda(oldrsp); 
++	write_pda(oldrsp, next->userrsp); 
++	write_pda(pcurrent, next_p); 
++	write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
++
++	/*
++	 * Now maybe reload the debug registers
++	 */
++	if (unlikely(next->debugreg7)) {
++		loaddebug(next, 0);
++		loaddebug(next, 1);
++		loaddebug(next, 2);
++		loaddebug(next, 3);
++		/* no 4 and 5 */
++		loaddebug(next, 6);
++		loaddebug(next, 7);
++	}
++
++	return prev_p;
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage 
++long sys_execve(char __user *name, char __user * __user *argv,
++		char __user * __user *envp, struct pt_regs regs)
++{
++	long error;
++	char * filename;
++
++	filename = getname(name);
++	error = PTR_ERR(filename);
++	if (IS_ERR(filename)) 
++		return error;
++	error = do_execve(filename, argv, envp, &regs); 
++	if (error == 0) {
++		task_lock(current);
++		current->ptrace &= ~PT_DTRACE;
++		task_unlock(current);
++	}
++	putname(filename);
++	return error;
++}
++
++void set_personality_64bit(void)
++{
++	/* inherit personality from parent */
++
++	/* Make sure to be in 64bit mode */
++	clear_thread_flag(TIF_IA32); 
++
++	/* TBD: overwrites user setup. Should have two bits.
++	   But 64bit processes have always behaved this way,
++	   so it's not too bad. The main problem is just that
++   	   32bit childs are affected again. */
++	current->personality &= ~READ_IMPLIES_EXEC;
++}
++
++asmlinkage long sys_fork(struct pt_regs *regs)
++{
++	return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
++}
++
++asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
++{
++	if (!newsp)
++		newsp = regs->rsp;
++	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage long sys_vfork(struct pt_regs *regs)
++{
++	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
++		    NULL, NULL);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long stack;
++	u64 fp,rip;
++	int count = 0;
++
++	if (!p || p == current || p->state==TASK_RUNNING)
++		return 0; 
++	stack = (unsigned long)p->thread_info; 
++	if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
++		return 0;
++	fp = *(u64 *)(p->thread.rsp);
++	do { 
++		if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
++			return 0; 
++		rip = *(u64 *)(fp+8); 
++		if (!in_sched_functions(rip))
++			return rip; 
++		fp = *(u64 *)fp; 
++	} while (count++ < 16); 
++	return 0;
++}
++
++long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
++{ 
++	int ret = 0; 
++	int doit = task == current;
++	int cpu;
++
++	switch (code) { 
++	case ARCH_SET_GS:
++		if (addr >= TASK_SIZE) 
++			return -EPERM; 
++		cpu = get_cpu();
++		/* handle small bases via the GDT because that's faster to 
++		   switch. */
++		if (addr <= 0xffffffff) {  
++			set_32bit_tls(task, GS_TLS, addr); 
++			if (doit) { 
++				load_TLS(&task->thread, cpu);
++				load_gs_index(GS_TLS_SEL); 
++			}
++			task->thread.gsindex = GS_TLS_SEL; 
++			task->thread.gs = 0;
++		} else { 
++			task->thread.gsindex = 0;
++			task->thread.gs = addr;
++			if (doit) {
++		load_gs_index(0);
++                ret = HYPERVISOR_set_segment_base(SEGBASE_GS_USER, addr);
++			} 
++		}
++		put_cpu();
++		break;
++	case ARCH_SET_FS:
++		/* Not strictly needed for fs, but do it for symmetry
++		   with gs */
++		if (addr >= TASK_SIZE)
++			return -EPERM; 
++		cpu = get_cpu();
++		/* handle small bases via the GDT because that's faster to 
++		   switch. */
++		if (addr <= 0xffffffff) { 
++			set_32bit_tls(task, FS_TLS, addr);
++			if (doit) { 
++				load_TLS(&task->thread, cpu); 
++				asm volatile("mov %0,%%fs" :: "r" (FS_TLS_SEL));
++			}
++			task->thread.fsindex = FS_TLS_SEL;
++			task->thread.fs = 0;
++		} else { 
++			task->thread.fsindex = 0;
++			task->thread.fs = addr;
++			if (doit) {
++				/* set the selector to 0 to not confuse
++				   __switch_to */
++		asm volatile("mov %0,%%fs" :: "r" (0));
++                                ret = HYPERVISOR_set_segment_base(SEGBASE_FS, addr);
++
++			}
++		}
++		put_cpu();
++		break;
++	case ARCH_GET_FS: { 
++		unsigned long base; 
++		if (task->thread.fsindex == FS_TLS_SEL)
++			base = read_32bit_tls(task, FS_TLS);
++		else if (doit) {
++			rdmsrl(MSR_FS_BASE, base);
++		} else
++			base = task->thread.fs;
++		ret = put_user(base, (unsigned long __user *)addr); 
++		break; 
++	}
++	case ARCH_GET_GS: { 
++		unsigned long base;
++		if (task->thread.gsindex == GS_TLS_SEL)
++			base = read_32bit_tls(task, GS_TLS);
++		else if (doit) {
++			rdmsrl(MSR_KERNEL_GS_BASE, base);
++		} else
++			base = task->thread.gs;
++		ret = put_user(base, (unsigned long __user *)addr); 
++		break;
++	}
++
++	default:
++		ret = -EINVAL;
++		break;
++	} 
++
++	return ret;	
++} 
++
++long sys_arch_prctl(int code, unsigned long addr)
++{
++	return do_arch_prctl(current, code, addr);
++} 
++
++/* 
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++	struct pt_regs *pp, ptregs;
++
++	pp = (struct pt_regs *)(tsk->thread.rsp0);
++	--pp; 
++
++	ptregs = *pp; 
++	ptregs.cs &= 0xffff;
++	ptregs.ss &= 0xffff;
++
++	elf_core_copy_regs(regs, &ptregs);
++ 
++        boot_option_idle_override = 1;
++	return 1;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++	if (randomize_va_space)
++		sp -= get_random_int() % 8192;
++	return sp & ~0xf;
++}
++
++#ifndef CONFIG_SMP
++void _restore_vcpu(void)
++{
++}
++#endif
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/setup64.c linux-2.6.12-xen/arch/xen/x86_64/kernel/setup64.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/setup64.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/setup64.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,359 @@
++/* 
++ * X86-64 specific CPU setup.
++ * Copyright (C) 1995  Linus Torvalds
++ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
++ * See setup.c for older changelog.
++ * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
++ *
++ * Jun Nakajima <jun.nakajima at intel.com> 
++ *   Modified for Xen
++ *
++ */ 
++#include <linux/config.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/bootmem.h>
++#include <linux/bitops.h>
++#include <asm/bootsetup.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/processor.h>
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/mmu_context.h>
++#include <asm/smp.h>
++#include <asm/i387.h>
++#include <asm/percpu.h>
++#include <asm/mtrr.h>
++#include <asm/proto.h>
++#include <asm/mman.h>
++#include <asm/numa.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#endif
++
++char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
++
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++
++struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned; 
++
++extern struct task_struct init_task;
++
++extern unsigned char __per_cpu_start[], __per_cpu_end[]; 
++
++extern struct desc_ptr cpu_gdt_descr[];
++struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table }; 
++
++char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
++
++unsigned long __supported_pte_mask = ~0UL;
++static int do_not_nx __initdata = 0;
++
++/* noexec=on|off
++Control non executable mappings for 64bit processes.
++
++on	Enable(default)
++off	Disable
++*/ 
++int __init nonx_setup(char *str)
++{
++	if (!strncmp(str, "on", 2)) {
++                __supported_pte_mask |= _PAGE_NX; 
++ 		do_not_nx = 0; 
++	} else if (!strncmp(str, "off", 3)) {
++		do_not_nx = 1;
++		__supported_pte_mask &= ~_PAGE_NX;
++        }
++	return 0;
++} 
++__setup("noexec=", nonx_setup);	/* parsed early actually */
++
++int force_personality32 = READ_IMPLIES_EXEC;
++
++/* noexec32=on|off
++Control non executable heap for 32bit processes.
++To control the stack too use noexec=off
++
++on	PROT_READ does not imply PROT_EXEC for 32bit processes
++off	PROT_READ implies PROT_EXEC (default)
++*/
++static int __init nonx32_setup(char *str)
++{
++	if (!strcmp(str, "on"))
++		force_personality32 &= ~READ_IMPLIES_EXEC;
++	else if (!strcmp(str, "off"))
++		force_personality32 |= READ_IMPLIES_EXEC;
++	return 0;
++}
++__setup("noexec32=", nonx32_setup);
++
++/*
++ * Great future plan:
++ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
++ * Always point %gs to its beginning
++ */
++void __init setup_per_cpu_areas(void)
++{ 
++	int i;
++	unsigned long size;
++
++	/* Copy section for each CPU (we discard the original) */
++	size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
++#ifdef CONFIG_MODULES
++	if (size < PERCPU_ENOUGH_ROOM)
++		size = PERCPU_ENOUGH_ROOM;
++#endif
++
++	for (i = 0; i < NR_CPUS; i++) { 
++		unsigned char *ptr;
++
++		if (!NODE_DATA(cpu_to_node(i))) {
++			printk("cpu with no node %d, num_online_nodes %d\n",
++			       i, num_online_nodes());
++			ptr = alloc_bootmem(size);
++		} else { 
++			ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
++		}
++		if (!ptr)
++			panic("Cannot allocate cpu data for CPU %d\n", i);
++		cpu_pda[i].data_offset = ptr - __per_cpu_start;
++		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
++	}
++} 
++
++#ifdef CONFIG_XEN
++static void switch_pt(void)
++{
++	xen_pt_switch(__pa(init_level4_pgt));
++        xen_new_user_pt(__pa(init_level4_user_pgt));
++}
++
++void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr)
++{
++	unsigned long frames[16];
++	unsigned long va;
++	int f;
++
++	for (va = gdt_descr->address, f = 0;
++	     va < gdt_descr->address + gdt_descr->size;
++	     va += PAGE_SIZE, f++) {
++		frames[f] = virt_to_mfn(va);
++		make_page_readonly(
++			(void *)va, XENFEAT_writable_descriptor_tables);
++	}
++	if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
++                               sizeof (struct desc_struct)))
++		BUG();
++}
++#else
++static void switch_pt(void)
++{
++	asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
++}
++
++void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
++{
++#ifdef CONFIG_SMP
++	int cpu = stack_smp_processor_id();
++#else
++	int cpu = smp_processor_id();
++#endif
++
++	asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
++	asm volatile("lidt %0" :: "m" (idt_descr));
++}
++#endif
++
++void pda_init(int cpu)
++{ 
++	struct x8664_pda *pda = &cpu_pda[cpu];
++
++	/* Setup up data that may be needed in __get_free_pages early */
++	asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 
++#ifndef CONFIG_XEN
++	wrmsrl(MSR_GS_BASE, cpu_pda + cpu);
++#else
++	HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, 
++				    (unsigned long)(cpu_pda + cpu));
++#endif
++
++	pda->me = pda;
++	pda->cpunumber = cpu; 
++	pda->irqcount = -1;
++	pda->kernelstack = 
++		(unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; 
++	pda->active_mm = &init_mm;
++	pda->mmu_state = 0;
++
++	if (cpu == 0) {
++#ifdef CONFIG_XEN
++		xen_init_pt();
++#endif
++		/* others are initialized in smpboot.c */
++		pda->pcurrent = &init_task;
++		pda->irqstackptr = boot_cpu_stack; 
++	} else {
++		pda->irqstackptr = (char *)
++			__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
++		if (!pda->irqstackptr)
++			panic("cannot allocate irqstack for cpu %d", cpu); 
++	}
++
++	switch_pt();
++
++	pda->irqstackptr += IRQSTACKSIZE-64;
++} 
++
++char boot_exception_stacks[N_EXCEPTION_STACKS * EXCEPTION_STKSZ] 
++__attribute__((section(".bss.page_aligned")));
++
++/* May not be marked __init: used by software suspend */
++void syscall_init(void)
++{
++#ifndef CONFIG_XEN
++	/* 
++	 * LSTAR and STAR live in a bit strange symbiosis.
++	 * They both write to the same internal register. STAR allows to set CS/DS
++	 * but only a 32bit target. LSTAR sets the 64bit rip. 	 
++	 */ 
++	wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32); 
++	wrmsrl(MSR_LSTAR, system_call); 
++
++	/* Flags to clear on syscall */
++	wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 
++#endif
++#ifdef CONFIG_IA32_EMULATION   		
++	syscall32_cpu_init ();
++#endif
++}
++
++void __cpuinit check_efer(void)
++{
++	unsigned long efer;
++
++	rdmsrl(MSR_EFER, efer); 
++        if (!(efer & EFER_NX) || do_not_nx) { 
++                __supported_pte_mask &= ~_PAGE_NX; 
++        }       
++}
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ * A lot of state is already set up in PDA init.
++ */
++void __cpuinit cpu_init (void)
++{
++#ifdef CONFIG_SMP
++	int cpu = stack_smp_processor_id();
++#else
++	int cpu = smp_processor_id();
++#endif
++	struct tss_struct *t = &per_cpu(init_tss, cpu);
++	unsigned long v; 
++	char *estacks = NULL; 
++	struct task_struct *me;
++	int i;
++
++	/* CPU 0 is initialised in head64.c */
++	if (cpu != 0) {
++		pda_init(cpu);
++	} else 
++		estacks = boot_exception_stacks; 
++
++	me = current;
++
++	if (cpu_test_and_set(cpu, cpu_initialized))
++		panic("CPU#%d already initialized!\n", cpu);
++
++	printk("Initializing CPU#%d\n", cpu);
++
++		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++
++	/*
++	 * Initialize the per-CPU GDT with the boot GDT,
++	 * and set up the GDT descriptor:
++	 */
++#ifndef CONFIG_XEN 
++	if (cpu) {
++		memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
++	}	
++
++	cpu_gdt_descr[cpu].size = GDT_SIZE;
++	cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
++
++	memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
++#else
++	memcpy(me->thread.tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
++	    GDT_ENTRY_TLS_ENTRIES * 8);
++
++	cpu_gdt_init(&cpu_gdt_descr[cpu]);
++#endif
++       
++	/*
++	 * Delete NT
++	 */
++
++	asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax");
++
++	syscall_init();
++
++	wrmsrl(MSR_FS_BASE, 0);
++	wrmsrl(MSR_KERNEL_GS_BASE, 0);
++	barrier(); 
++
++	check_efer();
++
++	/*
++	 * set up and load the per-CPU TSS
++	 */
++	for (v = 0; v < N_EXCEPTION_STACKS; v++) {
++		if (cpu) {
++			estacks = (char *)__get_free_pages(GFP_ATOMIC, 
++						   EXCEPTION_STACK_ORDER);
++			if (!estacks)
++				panic("Cannot allocate exception stack %ld %d\n",
++				      v, cpu); 
++		}
++		estacks += EXCEPTION_STKSZ;
++		t->ist[v] = (unsigned long)estacks;
++	}
++
++	t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
++	/*
++	 * <= is required because the CPU will access up to
++	 * 8 bits beyond the end of the IO permission bitmap.
++	 */
++	for (i = 0; i <= IO_BITMAP_LONGS; i++)
++		t->io_bitmap[i] = ~0UL;
++
++	atomic_inc(&init_mm.mm_count);
++	me->active_mm = &init_mm;
++	if (me->mm)
++		BUG();
++	enter_lazy_tlb(&init_mm, me);
++
++#ifndef CONFIG_XEN
++	set_tss_desc(cpu, t);
++	load_TR_desc();
++#endif
++	load_LDT(&init_mm.context);
++
++	/*
++	 * Clear all 6 debug registers:
++	 */
++
++	set_debug(0UL, 0);
++	set_debug(0UL, 1);
++	set_debug(0UL, 2);
++	set_debug(0UL, 3);
++	set_debug(0UL, 6);
++	set_debug(0UL, 7);
++
++	fpu_init(); 
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/setup.c linux-2.6.12-xen/arch/xen/x86_64/kernel/setup.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/setup.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/setup.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,1472 @@
++/*
++ *  linux/arch/x86-64/kernel/setup.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *
++ *  Nov 2001 Dave Jones <davej at suse.de>
++ *  Forked from i386 setup code.
++ *
++ *  $Id$
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/stddef.h>
++#include <linux/unistd.h>
++#include <linux/ptrace.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/tty.h>
++#include <linux/ioport.h>
++#include <linux/delay.h>
++#include <linux/config.h>
++#include <linux/init.h>
++#include <linux/initrd.h>
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/module.h>
++#include <asm/processor.h>
++#include <linux/console.h>
++#include <linux/seq_file.h>
++#include <linux/root_dev.h>
++#include <linux/pci.h>
++#include <linux/acpi.h>
++#include <linux/kallsyms.h>
++#include <linux/edd.h>
++#include <asm/mtrr.h>
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/msr.h>
++#include <asm/desc.h>
++#include <video/edid.h>
++#include <asm/e820.h>
++#include <asm/dma.h>
++#include <asm/mpspec.h>
++#include <asm/mmu_context.h>
++#include <asm/bootsetup.h>
++#include <asm/proto.h>
++#include <asm/setup.h>
++#include <asm/mach_apic.h>
++#include <asm/numa.h>
++#ifdef CONFIG_XEN
++#include <linux/percpu.h>
++#include <asm-xen/xen-public/physdev.h>
++#include "setup_arch_pre.h"
++#include <asm/hypervisor.h>
++#include <asm-xen/xen-public/nmi.h>
++#include <asm-xen/features.h>
++#define PFN_UP(x)       (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
++#define PFN_PHYS(x)     ((x) << PAGE_SHIFT)
++#define end_pfn_map end_pfn
++#include <asm/mach-xen/setup_arch_post.h>
++
++extern unsigned long start_pfn;
++extern struct edid_info edid_info;
++
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++/* Allows setting of maximum possible memory size  */
++unsigned long xen_override_max_pfn;
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
++
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
++DEFINE_PER_CPU(int, nr_multicall_ents);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++#endif
++
++/*
++ * Machine setup..
++ */
++
++struct cpuinfo_x86 boot_cpu_data;
++
++unsigned long mmu_cr4_features;
++
++int acpi_disabled;
++EXPORT_SYMBOL(acpi_disabled);
++#ifdef	CONFIG_ACPI_BOOT
++extern int __initdata acpi_ht;
++extern acpi_interrupt_flags	acpi_sci_flags;
++int __initdata acpi_force = 0;
++#endif
++
++int acpi_numa __initdata;
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++unsigned long saved_video_mode;
++
++#ifdef CONFIG_SWIOTLB
++int swiotlb;
++EXPORT_SYMBOL(swiotlb);
++#endif
++
++/*
++ * Setup options
++ */
++struct drive_info_struct { char dummy[32]; } drive_info;
++struct screen_info screen_info;
++struct sys_desc_table_struct {
++	unsigned short length;
++	unsigned char table[0];
++};
++
++struct edid_info edid_info;
++struct e820map e820;
++
++extern int root_mountflags;
++extern char _text, _etext, _edata, _end;
++
++char command_line[COMMAND_LINE_SIZE];
++
++struct resource standard_io_resources[] = {
++	{ .name = "dma1", .start = 0x00, .end = 0x1f,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "pic1", .start = 0x20, .end = 0x21,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "timer0", .start = 0x40, .end = 0x43,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "timer1", .start = 0x50, .end = 0x53,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "keyboard", .start = 0x60, .end = 0x6f,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
++	{ .name = "fpu", .start = 0xf0, .end = 0xff,
++		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
++};
++
++#define STANDARD_IO_RESOURCES \
++	(sizeof standard_io_resources / sizeof standard_io_resources[0])
++
++#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
++
++struct resource data_resource = {
++	.name = "Kernel data",
++	.start = 0,
++	.end = 0,
++	.flags = IORESOURCE_RAM,
++};
++struct resource code_resource = {
++	.name = "Kernel code",
++	.start = 0,
++	.end = 0,
++	.flags = IORESOURCE_RAM,
++};
++
++#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
++
++#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
++static struct resource system_rom_resource = {
++	.name = "System ROM",
++	.start = 0xf0000,
++	.end = 0xfffff,
++	.flags = IORESOURCE_ROM,
++};
++
++static struct resource extension_rom_resource = {
++	.name = "Extension ROM",
++	.start = 0xe0000,
++	.end = 0xeffff,
++	.flags = IORESOURCE_ROM,
++};
++
++static struct resource adapter_rom_resources[] = {
++	{ .name = "Adapter ROM", .start = 0xc8000, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM },
++	{ .name = "Adapter ROM", .start = 0, .end = 0,
++		.flags = IORESOURCE_ROM }
++};
++#endif
++
++#define ADAPTER_ROM_RESOURCES \
++	(sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++
++#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
++static struct resource video_rom_resource = {
++	.name = "Video ROM",
++	.start = 0xc0000,
++	.end = 0xc7fff,
++	.flags = IORESOURCE_ROM,
++};
++#endif
++
++static struct resource video_ram_resource = {
++	.name = "Video RAM area",
++	.start = 0xa0000,
++	.end = 0xbffff,
++	.flags = IORESOURCE_RAM,
++};
++
++#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++	unsigned char *p, sum = 0;
++
++	for (p = rom; p < rom + length; p++)
++		sum += *p;
++	return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++	unsigned long start, length, upper;
++	unsigned char *rom;
++	int	      i;
++
++	/* video rom */
++	upper = adapter_rom_resources[0].start;
++	for (start = video_rom_resource.start; start < upper; start += 2048) {
++		rom = isa_bus_to_virt(start);
++		if (!romsignature(rom))
++			continue;
++
++		video_rom_resource.start = start;
++
++		/* 0 < length <= 0x7f * 512, historically */
++		length = rom[2] * 512;
++
++		/* if checksum okay, trust length byte */
++		if (length && romchecksum(rom, length))
++			video_rom_resource.end = start + length - 1;
++
++		request_resource(&iomem_resource, &video_rom_resource);
++		break;
++			}
++
++	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++	if (start < upper)
++		start = upper;
++
++	/* system rom */
++	request_resource(&iomem_resource, &system_rom_resource);
++	upper = system_rom_resource.start;
++
++	/* check for extension rom (ignore length byte!) */
++	rom = isa_bus_to_virt(extension_rom_resource.start);
++	if (romsignature(rom)) {
++		length = extension_rom_resource.end - extension_rom_resource.start + 1;
++		if (romchecksum(rom, length)) {
++			request_resource(&iomem_resource, &extension_rom_resource);
++			upper = extension_rom_resource.start;
++		}
++	}
++
++	/* check for adapter roms on 2k boundaries */
++	for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++		rom = isa_bus_to_virt(start);
++		if (!romsignature(rom))
++			continue;
++
++		/* 0 < length <= 0x7f * 512, historically */
++		length = rom[2] * 512;
++
++		/* but accept any length that fits if checksum okay */
++		if (!length || start + length > upper || !romchecksum(rom, length))
++			continue;
++
++		adapter_rom_resources[i].start = start;
++		adapter_rom_resources[i].end = start + length - 1;
++		request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++		start = adapter_rom_resources[i++].end & ~2047UL;
++	}
++}
++#endif
++
++static __init void parse_cmdline_early (char ** cmdline_p)
++{
++	char c = ' ', *to = command_line, *from = COMMAND_LINE;
++	int len = 0;
++
++	/* Save unparsed command line copy for /proc/cmdline */
++#ifdef CONFIG_XEN
++	int max_cmdline;
++	
++	if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++		max_cmdline = COMMAND_LINE_SIZE;
++	memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++	saved_command_line[max_cmdline-1] = '\0';
++#else
++	memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
++	saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
++#endif
++
++	for (;;) {
++		if (c != ' ') 
++			goto next_char; 
++
++#ifdef  CONFIG_SMP
++		/*
++		 * If the BIOS enumerates physical processors before logical,
++		 * maxcpus=N at enumeration-time can be used to disable HT.
++		 */
++		else if (!memcmp(from, "maxcpus=", 8)) {
++			extern unsigned int maxcpus;
++
++			maxcpus = simple_strtoul(from + 8, NULL, 0);
++		}
++#endif
++#ifdef CONFIG_ACPI_BOOT
++		/* "acpi=off" disables both ACPI table parsing and interpreter init */
++		if (!memcmp(from, "acpi=off", 8))
++			disable_acpi();
++
++		if (!memcmp(from, "acpi=force", 10)) { 
++			/* add later when we do DMI horrors: */
++			acpi_force = 1;
++			acpi_disabled = 0;
++		}
++
++		/* acpi=ht just means: do ACPI MADT parsing 
++		   at bootup, but don't enable the full ACPI interpreter */
++		if (!memcmp(from, "acpi=ht", 7)) { 
++			if (!acpi_force)
++				disable_acpi();
++			acpi_ht = 1; 
++		}
++                else if (!memcmp(from, "pci=noacpi", 10)) 
++			acpi_disable_pci();
++		else if (!memcmp(from, "acpi=noirq", 10))
++			acpi_noirq_set();
++
++		else if (!memcmp(from, "acpi_sci=edge", 13))
++			acpi_sci_flags.trigger =  1;
++		else if (!memcmp(from, "acpi_sci=level", 14))
++			acpi_sci_flags.trigger = 3;
++		else if (!memcmp(from, "acpi_sci=high", 13))
++			acpi_sci_flags.polarity = 1;
++		else if (!memcmp(from, "acpi_sci=low", 12))
++			acpi_sci_flags.polarity = 3;
++
++		/* acpi=strict disables out-of-spec workarounds */
++		else if (!memcmp(from, "acpi=strict", 11)) {
++			acpi_strict = 1;
++		}
++#ifdef CONFIG_X86_IO_APIC
++		else if (!memcmp(from, "acpi_skip_timer_override", 24))
++			acpi_skip_timer_override = 1;
++#endif
++#endif
++
++#ifndef CONFIG_XEN
++		if (!memcmp(from, "nolapic", 7) ||
++		    !memcmp(from, "disableapic", 11))
++			disable_apic = 1;
++
++		if (!memcmp(from, "noapic", 6)) 
++			skip_ioapic_setup = 1;
++
++		if (!memcmp(from, "apic", 4)) { 
++			skip_ioapic_setup = 0;
++			ioapic_force = 1;
++		}
++#endif
++			
++		if (!memcmp(from, "mem=", 4))
++			parse_memopt(from+4, &from); 
++
++#ifdef CONFIG_DISCONTIGMEM
++		if (!memcmp(from, "numa=", 5))
++			numa_setup(from+5); 
++#endif
++
++#ifdef CONFIG_GART_IOMMU 
++		if (!memcmp(from,"iommu=",6)) { 
++			iommu_setup(from+6); 
++		}
++#endif
++
++		if (!memcmp(from,"oops=panic", 10))
++			panic_on_oops = 1;
++
++		if (!memcmp(from, "noexec=", 7))
++			nonx_setup(from + 7);
++
++	next_char:
++		c = *(from++);
++		if (!c)
++			break;
++		if (COMMAND_LINE_SIZE <= ++len)
++			break;
++		*(to++) = c;
++	}
++	*to = '\0';
++	*cmdline_p = command_line;
++}
++
++#ifndef CONFIG_DISCONTIGMEM
++#ifdef CONFIG_XEN
++static void __init contig_initmem_init(void)
++{
++        unsigned long bootmap_size = init_bootmem(start_pfn, end_pfn);
++        free_bootmem(0, xen_start_info->nr_pages << PAGE_SHIFT);
++        reserve_bootmem(HIGH_MEMORY,
++                        (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1)
++                        - HIGH_MEMORY);
++}
++#else
++static void __init contig_initmem_init(void)
++{
++        unsigned long bootmap_size, bootmap; 
++        bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
++        bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
++        if (bootmap == -1L) 
++                panic("Cannot find bootmem map of size %ld\n",bootmap_size);
++        bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
++        e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT); 
++        reserve_bootmem(bootmap, bootmap_size);
++} 
++#endif	/* !CONFIG_XEN */
++#endif
++
++/* Use inline assembly to define this because the nops are defined 
++   as inline assembly strings in the include files and we cannot 
++   get them easily into strings. */
++asm("\t.data\nk8nops: " 
++    K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
++    K8_NOP7 K8_NOP8); 
++    
++extern unsigned char k8nops[];
++static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 
++     NULL,
++     k8nops,
++     k8nops + 1,
++     k8nops + 1 + 2,
++     k8nops + 1 + 2 + 3,
++     k8nops + 1 + 2 + 3 + 4,
++     k8nops + 1 + 2 + 3 + 4 + 5,
++     k8nops + 1 + 2 + 3 + 4 + 5 + 6,
++     k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
++}; 
++
++/* Replace instructions with better alternatives for this CPU type.
++
++   This runs before SMP is initialized to avoid SMP problems with
++   self modifying code. This implies that assymetric systems where
++   APs have less capabilities than the boot processor are not handled. 
++   In this case boot with "noreplacement". */ 
++void apply_alternatives(void *start, void *end) 
++{ 
++	struct alt_instr *a; 
++	int diff, i, k;
++	for (a = start; (void *)a < end; a++) { 
++		if (!boot_cpu_has(a->cpuid))
++			continue;
++
++		BUG_ON(a->replacementlen > a->instrlen); 
++		__inline_memcpy(a->instr, a->replacement, a->replacementlen); 
++		diff = a->instrlen - a->replacementlen; 
++
++		/* Pad the rest with nops */
++		for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
++			k = diff;
++			if (k > ASM_NOP_MAX)
++				k = ASM_NOP_MAX;
++			__inline_memcpy(a->instr + i, k8_nops[k], k); 
++		} 
++	}
++} 
++
++static int no_replacement __initdata = 0; 
++ 
++void __init alternative_instructions(void)
++{
++	extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
++	if (no_replacement) 
++		return;
++	apply_alternatives(__alt_instructions, __alt_instructions_end);
++}
++
++static int __init noreplacement_setup(char *s)
++{ 
++     no_replacement = 1; 
++     return 0; 
++} 
++
++__setup("noreplacement", noreplacement_setup); 
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ *              from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++     edd.edd_info_nr = EDD_NR;
++}
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++#ifndef CONFIG_XEN
++#define EBDA_ADDR_POINTER 0x40E
++static void __init reserve_ebda_region(void)
++{
++	unsigned int addr;
++	/** 
++	 * there is a real-mode segmented pointer pointing to the 
++	 * 4K EBDA area at 0x40E
++	 */
++	addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
++	addr <<= 4;
++	if (addr)
++		reserve_bootmem_generic(addr, PAGE_SIZE);
++}
++#endif
++
++void __init setup_arch(char **cmdline_p)
++{
++	unsigned long kernel_end;
++
++#ifdef CONFIG_XEN
++ 	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
++ 	drive_info = DRIVE_INFO;
++	kernel_end = 0;		/* dummy */
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++ 	screen_info = SCREEN_INFO;
++
++	/* This is drawn from a dump from vgacon:startup in standard Linux. */
++	screen_info.orig_video_mode = 3; 
++	screen_info.orig_video_isVGA = 1;
++	screen_info.orig_video_lines = 25;
++	screen_info.orig_video_cols = 80;
++	screen_info.orig_video_ega_bx = 3;
++	screen_info.orig_video_points = 16;
++#endif
++	edid_info = EDID_INFO;
++	saved_video_mode = SAVED_VIDEO_MODE;
++	bootloader_type = LOADER_TYPE;
++
++#ifdef CONFIG_BLK_DEV_RAM
++	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++
++
++#endif
++
++	setup_xen_features();
++
++	HYPERVISOR_vm_assist(VMASST_CMD_enable,
++			     VMASST_TYPE_writable_pagetables);
++
++	ARCH_SETUP
++#else
++ 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
++ 	drive_info = DRIVE_INFO;
++ 	screen_info = SCREEN_INFO;
++	edid_info = EDID_INFO;
++	saved_video_mode = SAVED_VIDEO_MODE;
++	bootloader_type = LOADER_TYPE;
++
++#ifdef CONFIG_BLK_DEV_RAM
++	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++	setup_memory_region();
++	copy_edd();
++#endif	/* !CONFIG_XEN */
++
++	if (!MOUNT_ROOT_RDONLY)
++		root_mountflags &= ~MS_RDONLY;
++	init_mm.start_code = (unsigned long) &_text;
++	init_mm.end_code = (unsigned long) &_etext;
++	init_mm.end_data = (unsigned long) &_edata;
++#ifdef CONFIG_XEN
++	init_mm.brk = start_pfn << PAGE_SHIFT;
++#else
++	init_mm.brk = (unsigned long) &_end;	
++
++	code_resource.start = virt_to_phys(&_text);
++	code_resource.end = virt_to_phys(&_etext)-1;
++	data_resource.start = virt_to_phys(&_etext);
++	data_resource.end = virt_to_phys(&_edata)-1;
++#endif
++
++	parse_cmdline_early(cmdline_p);
++
++	early_identify_cpu(&boot_cpu_data);
++
++	/*
++	 * partially used pages are not usable - thus
++	 * we are rounding upwards:
++	 */
++	end_pfn = e820_end_of_ram();
++
++	check_efer();
++
++	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
++
++#ifdef CONFIG_ACPI_NUMA
++	/*
++	 * Parse SRAT to discover nodes.
++	 */
++	acpi_numa_init();
++#endif
++
++#ifdef CONFIG_DISCONTIGMEM
++	numa_initmem_init(0, end_pfn); 
++#else
++	contig_initmem_init(); 
++#endif
++
++#ifndef CONFIG_XEN
++	/* Reserve direct mapping */
++	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
++				(table_end - table_start) << PAGE_SHIFT);
++
++	/* reserve kernel */
++	kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
++	reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
++
++	/*
++	 * reserve physical page 0 - it's a special BIOS page on many boxes,
++	 * enabling clean reboots, SMP operation, laptop functions.
++	 */
++	reserve_bootmem_generic(0, PAGE_SIZE);
++
++	/* reserve ebda region */
++	reserve_ebda_region();
++#endif
++
++#ifdef CONFIG_SMP
++	/*
++	 * But first pinch a few for the stack/trampoline stuff
++	 * FIXME: Don't need the extra page at 4K, but need to fix
++	 * trampoline before removing it. (see the GDT stuff)
++	 */
++	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
++
++	/* Reserve SMP trampoline */
++	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
++#endif
++
++#ifdef CONFIG_ACPI_SLEEP
++       /*
++        * Reserve low memory region for sleep support.
++        */
++       acpi_reserve_bootmem();
++#endif
++#ifdef CONFIG_XEN
++#ifdef CONFIG_BLK_DEV_INITRD
++	if (xen_start_info->mod_start) {
++		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++			/*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
++			initrd_start = INITRD_START + PAGE_OFFSET;
++			initrd_end = initrd_start+INITRD_SIZE;
++			initrd_below_start_ok = 1;
++		} else {
++			printk(KERN_ERR "initrd extends beyond end of memory "
++				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++				(unsigned long)(INITRD_START + INITRD_SIZE),
++				(unsigned long)(end_pfn << PAGE_SHIFT));
++			initrd_start = 0;
++		}
++	}
++#endif
++#else	/* CONFIG_XEN */
++#ifdef CONFIG_BLK_DEV_INITRD
++	if (LOADER_TYPE && INITRD_START) {
++		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
++			initrd_start =
++				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
++			initrd_end = initrd_start+INITRD_SIZE;
++		}
++		else {
++			printk(KERN_ERR "initrd extends beyond end of memory "
++			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++			    (unsigned long)(INITRD_START + INITRD_SIZE),
++			    (unsigned long)(end_pfn << PAGE_SHIFT));
++			initrd_start = 0;
++		}
++	}
++#endif
++#endif	/* !CONFIG_XEN */
++	paging_init();
++#ifdef CONFIG_X86_LOCAL_APIC
++	/*
++	 * Find and reserve possible boot-time SMP configuration:
++	 */
++	find_smp_config();
++#endif
++#ifdef CONFIG_XEN
++	{
++		int i, j, k, fpp;
++		unsigned long va;
++
++		/* Make sure we have a large enough P->M table. */
++		phys_to_machine_mapping = alloc_bootmem(
++			end_pfn * sizeof(unsigned long));
++		memset(phys_to_machine_mapping, ~0,
++		       end_pfn * sizeof(unsigned long));
++		memcpy(phys_to_machine_mapping,
++		       (unsigned long *)xen_start_info->mfn_list,
++		       xen_start_info->nr_pages * sizeof(unsigned long));
++		free_bootmem(
++			__pa(xen_start_info->mfn_list), 
++			PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++					sizeof(unsigned long))));
++
++		/* 'Initial mapping' of old p2m table must be destroyed. */
++		for (va = xen_start_info->mfn_list;
++		     va < (xen_start_info->mfn_list +
++			   (xen_start_info->nr_pages*sizeof(unsigned long)));
++		     va += PAGE_SIZE) {
++			HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
++		}
++
++		/* 'Initial mapping' of initrd must be destroyed. */
++		for (va = xen_start_info->mod_start;
++		     va < (xen_start_info->mod_start+xen_start_info->mod_len);
++		     va += PAGE_SIZE) {
++			HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
++		}
++
++		/* 
++		 * Initialise the list of the frames that specify the list of 
++		 * frames that make up the p2m table. Used by save/restore
++		 */
++		pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
++		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++		  virt_to_mfn(pfn_to_mfn_frame_list_list);
++
++		fpp = PAGE_SIZE/sizeof(unsigned long);
++		for ( i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++ )
++		{
++			if ( (j % fpp) == 0 )
++			{
++				k++;
++				BUG_ON(k>=fpp);
++				pfn_to_mfn_frame_list[k] = alloc_bootmem(PAGE_SIZE);
++				pfn_to_mfn_frame_list_list[k] = 
++					virt_to_mfn(pfn_to_mfn_frame_list[k]);
++				j=0;
++			}
++			pfn_to_mfn_frame_list[k][j] = 
++				virt_to_mfn(&phys_to_machine_mapping[i]);
++		}
++		HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
++
++	}
++
++	if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
++	{
++		acpi_disabled = 1;
++#ifdef  CONFIG_ACPI_BOOT
++		acpi_ht = 0;
++#endif
++	}
++#endif
++
++#ifndef CONFIG_XEN
++	check_ioapic();
++#endif
++
++#ifdef CONFIG_ACPI_BOOT
++	/*
++	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
++	 * Call this early for SRAT node setup.
++	 */
++	acpi_boot_table_init();
++
++	/*
++	 * Read APIC and some other early information from ACPI tables.
++	 */
++	acpi_boot_init();
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++	/*
++	 * get boot-time SMP configuration:
++	 */
++	if (smp_found_config)
++		get_smp_config();
++#ifndef CONFIG_XEN
++	init_apic_mappings();
++#endif
++#endif
++
++#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
++	/*
++	 * Request address space for all standard RAM and ROM resources
++	 * and also for regions reported as reserved by the e820.
++	 */
++	probe_roms();
++	e820_reserve_resources(); 
++#endif
++
++	request_resource(&iomem_resource, &video_ram_resource);
++
++	{
++	unsigned i;
++	/* request I/O space for devices used on all i[345]86 PCs */
++	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++		request_resource(&ioport_resource, &standard_io_resources[i]);
++	}
++
++	e820_setup_gap();
++
++#ifdef CONFIG_GART_IOMMU
++       iommu_hole_init();
++#endif
++
++#ifdef CONFIG_XEN
++       {
++	       physdev_op_t op;
++
++	       op.cmd             = PHYSDEVOP_SET_IOPL;
++	       op.u.set_iopl.iopl = 1;
++	       HYPERVISOR_physdev_op(&op);
++
++	       if (xen_start_info->flags & SIF_INITDOMAIN) {
++		       if (!(xen_start_info->flags & SIF_PRIVILEGED))
++			       panic("Xen granted us console access "
++				     "but not privileged status");
++		       
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++	       conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++	       conswitchp = &dummy_con;
++#endif
++#endif
++	       } else {
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++		       extern const struct consw xennull_con;
++		       extern int console_use_vt;
++#if defined(CONFIG_VGA_CONSOLE)
++		/* disable VGA driver */
++		       ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
++#endif
++		       conswitchp = &xennull_con;
++		       console_use_vt = 0;
++#endif
++	       }
++       }
++#else	/* CONFIG_XEN */
++
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++	conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++	conswitchp = &dummy_con;
++#endif
++#endif
++
++#endif /* !CONFIG_XEN */
++}
++
++static int __init get_model_name(struct cpuinfo_x86 *c)
++{
++	unsigned int *v;
++
++	if (c->extended_cpuid_level < 0x80000004)
++		return 0;
++
++	v = (unsigned int *) c->x86_model_id;
++	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++	c->x86_model_id[48] = 0;
++	return 1;
++}
++
++
++static void __init display_cacheinfo(struct cpuinfo_x86 *c)
++{
++	unsigned int n, dummy, eax, ebx, ecx, edx;
++
++	n = c->extended_cpuid_level;
++
++	if (n >= 0x80000005) {
++		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
++		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++		c->x86_cache_size=(ecx>>24)+(edx>>24);
++		/* On K8 L1 TLB is inclusive, so don't count it */
++		c->x86_tlbsize = 0;
++	}
++
++	if (n >= 0x80000006) {
++		cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
++		ecx = cpuid_ecx(0x80000006);
++		c->x86_cache_size = ecx >> 16;
++		c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
++
++		printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++		c->x86_cache_size, ecx & 0xFF);
++	}
++
++	if (n >= 0x80000007)
++		cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
++	if (n >= 0x80000008) {
++		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
++		c->x86_virt_bits = (eax >> 8) & 0xff;
++		c->x86_phys_bits = eax & 0xff;
++	}
++}
++
++/*
++ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
++ * Assumes number of cores is a power of two.
++ */
++static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++	int cpu = smp_processor_id();
++	int node = 0;
++	unsigned bits;
++	if (c->x86_num_cores == 1)
++		return;
++
++	bits = 0;
++	while ((1 << bits) < c->x86_num_cores)
++		bits++;
++
++	/* Low order bits define the core id (index of core in socket) */
++	cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
++	/* Convert the APIC ID into the socket ID */
++	phys_proc_id[cpu] >>= bits;
++
++#ifdef CONFIG_NUMA
++	/* When an ACPI SRAT table is available use the mappings from SRAT
++ 	   instead. */
++	if (acpi_numa <= 0) {
++		node = phys_proc_id[cpu];
++		if (!node_online(node))
++			node = first_node(node_online_map);
++		cpu_to_node[cpu] = node;
++	} else {
++		node = cpu_to_node[cpu];
++	}
++#endif
++
++	printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
++			cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
++#endif
++}
++
++static int __init init_amd(struct cpuinfo_x86 *c)
++{
++	int r;
++	int level;
++
++	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
++	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
++	clear_bit(0*32+31, &c->x86_capability);
++	
++	/* C-stepping K8? */
++	level = cpuid_eax(1);
++	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
++		set_bit(X86_FEATURE_K8_C, &c->x86_capability);
++
++	r = get_model_name(c);
++	if (!r) { 
++		switch (c->x86) { 
++		case 15:
++			/* Should distinguish Models here, but this is only
++			   a fallback anyways. */
++			strcpy(c->x86_model_id, "Hammer");
++			break; 
++		} 
++	} 
++	display_cacheinfo(c);
++
++	if (c->extended_cpuid_level >= 0x80000008) {
++		c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
++		if (c->x86_num_cores & (c->x86_num_cores - 1))
++			c->x86_num_cores = 1;
++
++		amd_detect_cmp(c);
++	}
++
++	return r;
++}
++
++static void __init detect_ht(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++	u32 	eax, ebx, ecx, edx;
++	int 	index_msb, tmp;
++	int 	cpu = smp_processor_id();
++	
++	if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
++		return;
++
++	cpuid(1, &eax, &ebx, &ecx, &edx);
++	smp_num_siblings = (ebx & 0xff0000) >> 16;
++	
++	if (smp_num_siblings == 1) {
++		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
++	} else if (smp_num_siblings > 1) {
++		index_msb = 31;
++		/*
++		 * At this point we only support two siblings per
++		 * processor package.
++		 */
++		if (smp_num_siblings > NR_CPUS) {
++			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
++			smp_num_siblings = 1;
++			return;
++		}
++		tmp = smp_num_siblings;
++		while ((tmp & 0x80000000 ) == 0) {
++			tmp <<=1 ;
++			index_msb--;
++		}
++		if (smp_num_siblings & (smp_num_siblings - 1))
++			index_msb++;
++		phys_proc_id[cpu] = phys_pkg_id(index_msb);
++		
++		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
++		       phys_proc_id[cpu]);
++
++		smp_num_siblings = smp_num_siblings / c->x86_num_cores;
++
++		tmp = smp_num_siblings;
++		index_msb = 31;
++		while ((tmp & 0x80000000) == 0) {
++			tmp <<=1 ;
++			index_msb--;
++		}
++		if (smp_num_siblings & (smp_num_siblings - 1))
++			index_msb++;
++
++		cpu_core_id[cpu] = phys_pkg_id(index_msb);
++
++		if (c->x86_num_cores > 1)
++			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
++			       cpu_core_id[cpu]);
++	}
++#endif
++}
++
++/*
++ * find out the number of processor cores on the die
++ */
++static int __init intel_num_cpu_cores(struct cpuinfo_x86 *c)
++{
++	unsigned int eax;
++
++	if (c->cpuid_level < 4)
++		return 1;
++
++	__asm__("cpuid"
++		: "=a" (eax)
++		: "0" (4), "c" (0)
++		: "bx", "dx");
++
++	if (eax & 0x1f)
++		return ((eax >> 26) + 1);
++	else
++		return 1;
++}
++
++static void __init init_intel(struct cpuinfo_x86 *c)
++{
++	/* Cache sizes */
++	unsigned n;
++
++	init_intel_cacheinfo(c);
++	n = c->extended_cpuid_level;
++	if (n >= 0x80000008) {
++		unsigned eax = cpuid_eax(0x80000008);
++		c->x86_virt_bits = (eax >> 8) & 0xff;
++		c->x86_phys_bits = eax & 0xff;
++	}
++
++	if (c->x86 == 15)
++		c->x86_cache_alignment = c->x86_clflush_size * 2;
++	if (c->x86 >= 15)
++		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++ 	c->x86_num_cores = intel_num_cpu_cores(c);
++}
++
++void __init get_cpu_vendor(struct cpuinfo_x86 *c)
++{
++	char *v = c->x86_vendor_id;
++
++	if (!strcmp(v, "AuthenticAMD"))
++		c->x86_vendor = X86_VENDOR_AMD;
++	else if (!strcmp(v, "GenuineIntel"))
++		c->x86_vendor = X86_VENDOR_INTEL;
++	else
++		c->x86_vendor = X86_VENDOR_UNKNOWN;
++}
++
++struct cpu_model_info {
++	int vendor;
++	int family;
++	char *model_names[16];
++};
++
++/* Do some early cpuid on the boot CPU to get some parameter that are
++   needed before check_bugs. Everything advanced is in identify_cpu
++   below. */
++void __init early_identify_cpu(struct cpuinfo_x86 *c)
++{
++	u32 tfms;
++
++	c->loops_per_jiffy = loops_per_jiffy;
++	c->x86_cache_size = -1;
++	c->x86_vendor = X86_VENDOR_UNKNOWN;
++	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
++	c->x86_vendor_id[0] = '\0'; /* Unset */
++	c->x86_model_id[0] = '\0';  /* Unset */
++	c->x86_clflush_size = 64;
++	c->x86_cache_alignment = c->x86_clflush_size;
++	c->x86_num_cores = 1;
++	c->extended_cpuid_level = 0;
++	memset(&c->x86_capability, 0, sizeof c->x86_capability);
++
++	/* Get vendor name */
++	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
++	      (unsigned int *)&c->x86_vendor_id[0],
++	      (unsigned int *)&c->x86_vendor_id[8],
++	      (unsigned int *)&c->x86_vendor_id[4]);
++		
++	get_cpu_vendor(c);
++
++	/* Initialize the standard set of capabilities */
++	/* Note that the vendor-specific code below might override */
++
++	/* Intel-defined flags: level 0x00000001 */
++	if (c->cpuid_level >= 0x00000001) {
++		__u32 misc;
++		cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
++		      &c->x86_capability[0]);
++		c->x86 = (tfms >> 8) & 0xf;
++		c->x86_model = (tfms >> 4) & 0xf;
++		c->x86_mask = tfms & 0xf;
++		if (c->x86 == 0xf) {
++			c->x86 += (tfms >> 20) & 0xff;
++			c->x86_model += ((tfms >> 16) & 0xF) << 4;
++		} 
++		if (c->x86_capability[0] & (1<<19)) 
++			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
++	} else {
++		/* Have CPUID level 0 only - unheard of */
++		c->x86 = 4;
++	}
++
++#ifdef CONFIG_SMP
++	phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
++}
++
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __init identify_cpu(struct cpuinfo_x86 *c)
++{
++	int i;
++	u32 xlvl;
++
++	early_identify_cpu(c);
++
++	/* AMD-defined flags: level 0x80000001 */
++	xlvl = cpuid_eax(0x80000000);
++	c->extended_cpuid_level = xlvl;
++	if ((xlvl & 0xffff0000) == 0x80000000) {
++		if (xlvl >= 0x80000001) {
++			c->x86_capability[1] = cpuid_edx(0x80000001);
++			c->x86_capability[6] = cpuid_ecx(0x80000001);
++		}
++		if (xlvl >= 0x80000004)
++			get_model_name(c); /* Default name */
++	}
++
++	/* Transmeta-defined flags: level 0x80860001 */
++	xlvl = cpuid_eax(0x80860000);
++	if ((xlvl & 0xffff0000) == 0x80860000) {
++		/* Don't set x86_cpuid_level here for now to not confuse. */
++		if (xlvl >= 0x80860001)
++			c->x86_capability[2] = cpuid_edx(0x80860001);
++	}
++
++	/*
++	 * Vendor-specific initialization.  In this section we
++	 * canonicalize the feature flags, meaning if there are
++	 * features a certain CPU supports which CPUID doesn't
++	 * tell us, CPUID claiming incorrect flags, or other bugs,
++	 * we handle them here.
++	 *
++	 * At the end of this section, c->x86_capability better
++	 * indicate the features this CPU genuinely supports!
++	 */
++	switch (c->x86_vendor) {
++	case X86_VENDOR_AMD:
++		init_amd(c);
++		break;
++
++	case X86_VENDOR_INTEL:
++		init_intel(c);
++		break;
++
++	case X86_VENDOR_UNKNOWN:
++	default:
++		display_cacheinfo(c);
++		break;
++	}
++
++	select_idle_routine(c);
++	detect_ht(c); 
++
++	/*
++	 * On SMP, boot_cpu_data holds the common feature set between
++	 * all CPUs; so make sure that we indicate which features are
++	 * common between the CPUs.  The first time this routine gets
++	 * executed, c == &boot_cpu_data.
++	 */
++	if (c != &boot_cpu_data) {
++		/* AND the already accumulated flags with these */
++		for (i = 0 ; i < NCAPINTS ; i++)
++			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++	}
++
++#ifdef CONFIG_X86_MCE
++	mcheck_init(c);
++#endif
++#ifdef CONFIG_NUMA
++	if (c != &boot_cpu_data)
++		numa_add_cpu(c - cpu_data);
++#endif
++}
++ 
++
++void __init print_cpu_info(struct cpuinfo_x86 *c)
++{
++	if (c->x86_model_id[0])
++		printk("%s", c->x86_model_id);
++
++	if (c->x86_mask || c->cpuid_level >= 0) 
++		printk(" stepping %02x\n", c->x86_mask);
++	else
++		printk("\n");
++}
++
++/*
++ *	Get CPU information for use by the procfs.
++ */
++
++static int show_cpuinfo(struct seq_file *m, void *v)
++{
++	struct cpuinfo_x86 *c = v;
++
++	/* 
++	 * These flag bits must match the definitions in <asm/cpufeature.h>.
++	 * NULL means this bit is undefined or reserved; either way it doesn't
++	 * have meaning as far as Linux is concerned.  Note that it's important
++	 * to realize there is a difference between this table and CPUID -- if
++	 * applications want to get the raw CPUID data, they should access
++	 * /dev/cpu/<cpu_nr>/cpuid instead.
++	 */
++	static char *x86_cap_flags[] = {
++		/* Intel-defined */
++	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
++	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
++	        "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
++	        "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
++
++		/* AMD-defined */
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
++		NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
++
++		/* Transmeta-defined */
++		"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++		/* Other (Linux-defined) */
++		"cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
++		"constant_tsc", NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++		/* Intel-defined (#2) */
++		"pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
++		"tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++		/* VIA/Cyrix/Centaur-defined */
++		NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++		/* AMD-defined (#2) */
++		"lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++	};
++	static char *x86_power_flags[] = { 
++		"ts",	/* temperature sensor */
++		"fid",  /* frequency id control */
++		"vid",  /* voltage id control */
++		"ttp",  /* thermal trip */
++		"tm",
++		"stc"
++	};
++
++
++#ifdef CONFIG_SMP
++	if (!cpu_online(c-cpu_data))
++		return 0;
++#endif
++
++	seq_printf(m,"processor\t: %u\n"
++		     "vendor_id\t: %s\n"
++		     "cpu family\t: %d\n"
++		     "model\t\t: %d\n"
++		     "model name\t: %s\n",
++		     (unsigned)(c-cpu_data),
++		     c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
++		     c->x86,
++		     (int)c->x86_model,
++		     c->x86_model_id[0] ? c->x86_model_id : "unknown");
++	
++	if (c->x86_mask || c->cpuid_level >= 0)
++		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
++	else
++		seq_printf(m, "stepping\t: unknown\n");
++	
++	if (cpu_has(c,X86_FEATURE_TSC)) {
++		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
++			     cpu_khz / 1000, (cpu_khz % 1000));
++	}
++
++	/* Cache size */
++	if (c->x86_cache_size >= 0) 
++		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
++	
++#ifdef CONFIG_SMP
++	if (smp_num_siblings * c->x86_num_cores > 1) {
++		int cpu = c - cpu_data;
++		seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
++		seq_printf(m, "siblings\t: %d\n",
++				c->x86_num_cores * smp_num_siblings);
++		seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
++		seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
++	}
++#endif	
++
++	seq_printf(m,
++	        "fpu\t\t: yes\n"
++	        "fpu_exception\t: yes\n"
++	        "cpuid level\t: %d\n"
++	        "wp\t\t: yes\n"
++	        "flags\t\t:",
++		   c->cpuid_level);
++
++	{ 
++		int i; 
++		for ( i = 0 ; i < 32*NCAPINTS ; i++ )
++			if ( test_bit(i, &c->x86_capability) &&
++			     x86_cap_flags[i] != NULL )
++				seq_printf(m, " %s", x86_cap_flags[i]);
++	}
++		
++	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
++		   c->loops_per_jiffy/(500000/HZ),
++		   (c->loops_per_jiffy/(5000/HZ)) % 100);
++
++	if (c->x86_tlbsize > 0) 
++		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
++	seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
++	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
++
++	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
++		   c->x86_phys_bits, c->x86_virt_bits);
++
++	seq_printf(m, "power management:");
++	{
++		unsigned i;
++		for (i = 0; i < 32; i++) 
++			if (c->x86_power & (1 << i)) {
++				if (i < ARRAY_SIZE(x86_power_flags))
++					seq_printf(m, " %s", x86_power_flags[i]);
++				else
++					seq_printf(m, " [%d]", i);
++			}
++	}
++
++	seq_printf(m, "\n\n");
++
++	return 0;
++}
++
++static void *c_start(struct seq_file *m, loff_t *pos)
++{
++	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
++}
++
++static void *c_next(struct seq_file *m, void *v, loff_t *pos)
++{
++	++*pos;
++	return c_start(m, pos);
++}
++
++static void c_stop(struct seq_file *m, void *v)
++{
++}
++
++struct seq_operations cpuinfo_op = {
++	.start =c_start,
++	.next =	c_next,
++	.stop =	c_stop,
++	.show =	show_cpuinfo,
++};
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/signal.c linux-2.6.12-xen/arch/xen/x86_64/kernel/signal.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/signal.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/signal.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,493 @@
++/*
++ *  linux/arch/x86_64/kernel/signal.c
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *  Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
++ *
++ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
++ *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
++ *  2000-2002   x86-64 support by Andi Kleen
++ * 
++ *  $Id: signal.c,v 1.18 2001/10/17 22:30:37 ak Exp $
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel.h>
++#include <linux/signal.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/ptrace.h>
++#include <linux/unistd.h>
++#include <linux/stddef.h>
++#include <linux/personality.h>
++#include <linux/compiler.h>
++#include <asm/ucontext.h>
++#include <asm/uaccess.h>
++#include <asm/i387.h>
++#include <asm/proto.h>
++#include <asm/ia32_unistd.h>
++
++/* #define DEBUG_SIG 1 */
++
++#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
++
++void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
++               sigset_t *set, struct pt_regs * regs); 
++void ia32_setup_frame(int sig, struct k_sigaction *ka,
++            sigset_t *set, struct pt_regs * regs); 
++
++asmlinkage long
++sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
++{
++	sigset_t saveset, newset;
++
++	/* XXX: Don't preclude handling different sized sigset_t's.  */
++	if (sigsetsize != sizeof(sigset_t))
++		return -EINVAL;
++
++	if (copy_from_user(&newset, unewset, sizeof(newset)))
++		return -EFAULT;
++	sigdelsetmask(&newset, ~_BLOCKABLE);
++
++	spin_lock_irq(&current->sighand->siglock);
++	saveset = current->blocked;
++	current->blocked = newset;
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
++#ifdef DEBUG_SIG
++	printk("rt_sigsuspend savset(%lx) newset(%lx) regs(%p) rip(%lx)\n",
++		saveset, newset, regs, regs->rip);
++#endif 
++	regs->rax = -EINTR;
++	while (1) {
++		current->state = TASK_INTERRUPTIBLE;
++		schedule();
++		if (do_signal(regs, &saveset))
++			return -EINTR;
++	}
++}
++
++asmlinkage long
++sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
++		struct pt_regs *regs)
++{
++	return do_sigaltstack(uss, uoss, regs->rsp);
++}
++
++
++/*
++ * Do a signal return; undo the signal stack.
++ */
++
++struct rt_sigframe
++{
++	char __user *pretcode;
++	struct ucontext uc;
++	struct siginfo info;
++};
++
++static int
++restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *prax)
++{
++	unsigned int err = 0;
++
++	/* Always make any pending restarted system calls return -EINTR */
++	current_thread_info()->restart_block.fn = do_no_restart_syscall;
++
++#define COPY(x)		err |= __get_user(regs->x, &sc->x)
++
++	COPY(rdi); COPY(rsi); COPY(rbp); COPY(rsp); COPY(rbx);
++	COPY(rdx); COPY(rcx); COPY(rip);
++	COPY(r8);
++	COPY(r9);
++	COPY(r10);
++	COPY(r11);
++	COPY(r12);
++	COPY(r13);
++	COPY(r14);
++	COPY(r15);
++
++	{
++		unsigned int tmpflags;
++		err |= __get_user(tmpflags, &sc->eflags);
++		regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
++		regs->orig_rax = -1;		/* disable syscall checks */
++	}
++
++	{
++		struct _fpstate __user * buf;
++		err |= __get_user(buf, &sc->fpstate);
++
++		if (buf) {
++			if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
++				goto badframe;
++			err |= restore_i387(buf);
++		} else {
++			struct task_struct *me = current;
++			if (used_math()) {
++				clear_fpu(me);
++				clear_used_math();
++			}
++		}
++	}
++
++	err |= __get_user(*prax, &sc->rax);
++	return err;
++
++badframe:
++	return 1;
++}
++
++asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
++{
++	struct rt_sigframe __user *frame;
++	sigset_t set;
++	unsigned long eax;
++
++	frame = (struct rt_sigframe __user *)(regs->rsp - 8);
++	if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) {
++		goto badframe;
++	} 
++	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) { 
++		goto badframe;
++	} 
++
++	sigdelsetmask(&set, ~_BLOCKABLE);
++	spin_lock_irq(&current->sighand->siglock);
++	current->blocked = set;
++	recalc_sigpending();
++	spin_unlock_irq(&current->sighand->siglock);
++	
++	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
++		goto badframe;
++
++#ifdef DEBUG_SIG
++	printk("%d sigreturn rip:%lx rsp:%lx frame:%p rax:%lx\n",current->pid,regs.rip,regs.rsp,frame,eax);
++#endif
++
++	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->rsp) == -EFAULT)
++		goto badframe;
++
++	return eax;
++
++badframe:
++	signal_fault(regs,frame,"sigreturn");
++	return 0;
++}	
++
++/*
++ * Set up a signal frame.
++ */
++
++static inline int
++setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me)
++{
++	int err = 0;
++
++	err |= __put_user(0, &sc->gs);
++	err |= __put_user(0, &sc->fs);
++
++	err |= __put_user(regs->rdi, &sc->rdi);
++	err |= __put_user(regs->rsi, &sc->rsi);
++	err |= __put_user(regs->rbp, &sc->rbp);
++	err |= __put_user(regs->rsp, &sc->rsp);
++	err |= __put_user(regs->rbx, &sc->rbx);
++	err |= __put_user(regs->rdx, &sc->rdx);
++	err |= __put_user(regs->rcx, &sc->rcx);
++	err |= __put_user(regs->rax, &sc->rax);
++	err |= __put_user(regs->r8, &sc->r8);
++	err |= __put_user(regs->r9, &sc->r9);
++	err |= __put_user(regs->r10, &sc->r10);
++	err |= __put_user(regs->r11, &sc->r11);
++	err |= __put_user(regs->r12, &sc->r12);
++	err |= __put_user(regs->r13, &sc->r13);
++	err |= __put_user(regs->r14, &sc->r14);
++	err |= __put_user(regs->r15, &sc->r15);
++	err |= __put_user(me->thread.trap_no, &sc->trapno);
++	err |= __put_user(me->thread.error_code, &sc->err);
++	err |= __put_user(regs->rip, &sc->rip);
++	err |= __put_user(regs->eflags, &sc->eflags);
++	err |= __put_user(mask, &sc->oldmask);
++	err |= __put_user(me->thread.cr2, &sc->cr2);
++
++	return err;
++}
++
++/*
++ * Determine which stack to use..
++ */
++
++static void __user *
++get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
++{
++	unsigned long rsp;
++
++	/* Default to using normal stack - redzone*/
++	rsp = regs->rsp - 128;
++
++	/* This is the X/Open sanctioned signal stack switching.  */
++	/* RED-PEN: redzone on that stack? */
++	if (ka->sa.sa_flags & SA_ONSTACK) {
++		if (sas_ss_flags(rsp) == 0)
++			rsp = current->sas_ss_sp + current->sas_ss_size;
++	}
++
++	return (void __user *)round_down(rsp - size, 16); 
++}
++
++static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
++			   sigset_t *set, struct pt_regs * regs)
++{
++	struct rt_sigframe __user *frame;
++	struct _fpstate __user *fp = NULL; 
++	int err = 0;
++	struct task_struct *me = current;
++
++	if (used_math()) {
++		fp = get_stack(ka, regs, sizeof(struct _fpstate)); 
++		frame = (void __user *)round_down(
++			(unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
++
++		if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
++			goto give_sigsegv;
++
++		if (save_i387(fp) < 0) 
++			err |= -1; 
++	} else
++		frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
++
++	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
++		goto give_sigsegv;
++
++	if (ka->sa.sa_flags & SA_SIGINFO) { 
++		err |= copy_siginfo_to_user(&frame->info, info);
++		if (err)
++			goto give_sigsegv;
++	}
++		
++	/* Create the ucontext.  */
++	err |= __put_user(0, &frame->uc.uc_flags);
++	err |= __put_user(0, &frame->uc.uc_link);
++	err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
++	err |= __put_user(sas_ss_flags(regs->rsp),
++			  &frame->uc.uc_stack.ss_flags);
++	err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
++	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
++	err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
++	if (sizeof(*set) == 16) { 
++		__put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
++		__put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 
++	} else
++		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
++
++	/* Set up to return from userspace.  If provided, use a stub
++	   already in userspace.  */
++	/* x86-64 should always use SA_RESTORER. */
++	if (ka->sa.sa_flags & SA_RESTORER) {
++		err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
++	} else {
++		/* could use a vstub here */
++		goto give_sigsegv; 
++	}
++
++	if (err)
++		goto give_sigsegv;
++
++#ifdef DEBUG_SIG
++	printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax);
++#endif
++
++	/* Set up registers for signal handler */
++	{ 
++		struct exec_domain *ed = current_thread_info()->exec_domain;
++		if (unlikely(ed && ed->signal_invmap && sig < 32))
++			sig = ed->signal_invmap[sig];
++	} 
++	regs->rdi = sig;
++	/* In case the signal handler was declared without prototypes */ 
++	regs->rax = 0;	
++
++	/* This also works for non SA_SIGINFO handlers because they expect the
++	   next argument after the signal number on the stack. */
++	regs->rsi = (unsigned long)&frame->info; 
++	regs->rdx = (unsigned long)&frame->uc; 
++	regs->rip = (unsigned long) ka->sa.sa_handler;
++
++	regs->rsp = (unsigned long)frame;
++
++	set_fs(USER_DS);
++	regs->eflags &= ~TF_MASK;
++	if (test_thread_flag(TIF_SINGLESTEP))
++		ptrace_notify(SIGTRAP);
++#ifdef DEBUG_SIG
++	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
++		current->comm, current->pid, frame, regs->rip, frame->pretcode);
++#endif
++
++	return;
++
++give_sigsegv:
++	force_sigsegv(sig, current);
++}
++
++/*
++ * OK, we're invoking a handler
++ */	
++
++static void
++handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
++		sigset_t *oldset, struct pt_regs *regs)
++{
++#ifdef DEBUG_SIG
++	printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n",
++		current->pid, sig,
++		regs->rip, regs->rsp, regs);
++#endif
++
++	/* Are we from a system call? */
++	if ((long)regs->orig_rax >= 0) {
++		/* If so, check system call restarting.. */
++		switch (regs->rax) {
++		        case -ERESTART_RESTARTBLOCK:
++			case -ERESTARTNOHAND:
++				regs->rax = -EINTR;
++				break;
++
++			case -ERESTARTSYS:
++				if (!(ka->sa.sa_flags & SA_RESTART)) {
++					regs->rax = -EINTR;
++					break;
++				}
++				/* fallthrough */
++			case -ERESTARTNOINTR:
++				regs->rax = regs->orig_rax;
++				regs->rip -= 2;
++				break;
++		}
++	}
++
++	/*
++	 * If TF is set due to a debugger (PT_DTRACE), clear the TF
++	 * flag so that register information in the sigcontext is
++	 * correct.
++	 */
++	if (unlikely(regs->eflags & TF_MASK)) {
++		if (likely(current->ptrace & PT_DTRACE)) {
++			current->ptrace &= ~PT_DTRACE;
++			regs->eflags &= ~TF_MASK;
++		}
++	}
++
++#ifdef CONFIG_IA32_EMULATION
++	if (test_thread_flag(TIF_IA32)) {
++		if (ka->sa.sa_flags & SA_SIGINFO)
++			ia32_setup_rt_frame(sig, ka, info, oldset, regs);
++		else
++			ia32_setup_frame(sig, ka, oldset, regs);
++	} else 
++#endif
++	setup_rt_frame(sig, ka, info, oldset, regs);
++
++	if (!(ka->sa.sa_flags & SA_NODEFER)) {
++		spin_lock_irq(&current->sighand->siglock);
++		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
++		sigaddset(&current->blocked,sig);
++		recalc_sigpending();
++		spin_unlock_irq(&current->sighand->siglock);
++	}
++}
++
++/*
++ * Note that 'init' is a special process: it doesn't get signals it doesn't
++ * want to handle. Thus you cannot kill init even with a SIGKILL even by
++ * mistake.
++ */
++int do_signal(struct pt_regs *regs, sigset_t *oldset)
++{
++	struct k_sigaction ka;
++	siginfo_t info;
++	int signr;
++
++	/*
++	 * We want the common case to go fast, which
++	 * is why we may in certain cases get here from
++	 * kernel mode. Just return without doing anything
++	 * if so.
++	 */
++	if ((regs->cs & 2) != 2)
++		return 1;
++
++	if (try_to_freeze(0))
++		goto no_signal;
++
++	if (!oldset)
++		oldset = &current->blocked;
++
++	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
++	if (signr > 0) {
++		/* Reenable any watchpoints before delivering the
++		 * signal to user space. The processor register will
++		 * have been cleared if the watchpoint triggered
++		 * inside the kernel.
++		 */
++                if (current->thread.debugreg7) {
++                        HYPERVISOR_set_debugreg(7,
++						current->thread.debugreg7);
++		}
++
++		/* Whee!  Actually deliver the signal.  */
++		handle_signal(signr, &info, &ka, oldset, regs);
++		return 1;
++	}
++
++ no_signal:
++	/* Did we come from a system call? */
++	if ((long)regs->orig_rax >= 0) {
++		/* Restart the system call - no handlers present */
++		long res = regs->rax;
++		if (res == -ERESTARTNOHAND ||
++		    res == -ERESTARTSYS ||
++		    res == -ERESTARTNOINTR) {
++			regs->rax = regs->orig_rax;
++			regs->rip -= 2;
++		}
++		if (regs->rax == (unsigned long)-ERESTART_RESTARTBLOCK) {
++			regs->rax = test_thread_flag(TIF_IA32) ?
++					__NR_ia32_restart_syscall :
++					__NR_restart_syscall;
++			regs->rip -= 2;
++		}
++	}
++	return 0;
++}
++
++void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, __u32 thread_info_flags)
++{
++#ifdef DEBUG_SIG
++	printk("do_notify_resume flags:%x rip:%lx rsp:%lx caller:%lx pending:%lx\n",
++	       thread_info_flags, regs->rip, regs->rsp, __builtin_return_address(0),signal_pending(current)); 
++#endif
++	       
++	/* Pending single-step? */
++	if (thread_info_flags & _TIF_SINGLESTEP) {
++		regs->eflags |= TF_MASK;
++		clear_thread_flag(TIF_SINGLESTEP);
++	}
++
++	/* deal with pending signal delivery */
++	if (thread_info_flags & _TIF_SIGPENDING)
++		do_signal(regs,oldset);
++}
++
++void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
++{ 
++	struct task_struct *me = current; 
++	if (exception_trace)
++		printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n",
++	       me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax); 
++
++	force_sig(SIGSEGV, me); 
++} 
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/smp.c linux-2.6.12-xen/arch/xen/x86_64/kernel/smp.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/smp.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/smp.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,493 @@
++/*
++ *	Intel SMP support routines.
++ *
++ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
++ *	(c) 1998-99, 2000 Ingo Molnar <mingo at redhat.com>
++ *      (c) 2002,2003 Andi Kleen, SuSE Labs.
++ *
++ *	This code is released under the GNU General Public License version 2 or
++ *	later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/smp.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/interrupt.h>
++
++#include <asm/mtrr.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++#include <asm/mach_apic.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/apicdef.h>
++#ifdef CONFIG_XEN
++#include <asm-xen/evtchn.h>
++#endif
++
++#ifndef CONFIG_XEN
++/*
++ *	Smarter SMP flushing macros. 
++ *		c/o Linus Torvalds.
++ *
++ *	These mean you can really definitely utterly forget about
++ *	writing to user space from interrupts. (Its not allowed anyway).
++ *
++ *	Optimizations Manfred Spraul <manfred at colorfullife.com>
++ */
++
++static cpumask_t flush_cpumask;
++static struct mm_struct * flush_mm;
++static unsigned long flush_va;
++static DEFINE_SPINLOCK(tlbstate_lock);
++#define FLUSH_ALL	-1ULL
++#endif
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context, 
++ * instead update mm->cpu_vm_mask.
++ */
++static inline void leave_mm (unsigned long cpu)
++{
++	if (read_pda(mmu_state) == TLBSTATE_OK)
++		BUG();
++	clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
++	load_cr3(swapper_pg_dir);
++}
++
++#ifndef CONFIG_XEN
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
++ * 	Stop ipi delivery for the old mm. This is not synchronized with
++ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * 	for the wrong mm, and in the worst case we perform a superfluous
++ * 	tlb flush.
++ * 1a2) set cpu mmu_state to TLBSTATE_OK
++ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ *	was in lazy tlb mode.
++ * 1a3) update cpu active_mm
++ * 	Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
++ * 	Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ *	cpu active_mm is correct, cpu0 already handles
++ *	flush ipis.
++ * 1b1) set cpu mmu_state to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * 	Atomically set the bit [other cpus will start sending flush ipis],
++ * 	and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ *   runs in kernel space, the cpu could load tlb entries for user space
++ *   pages.
++ *
++ * The good news is that cpu mmu_state is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ */
++
++asmlinkage void smp_invalidate_interrupt (void)
++{
++	unsigned long cpu;
++
++	cpu = get_cpu();
++
++	if (!cpu_isset(cpu, flush_cpumask))
++		goto out;
++		/* 
++		 * This was a BUG() but until someone can quote me the
++		 * line from the intel manual that guarantees an IPI to
++		 * multiple CPUs is retried _only_ on the erroring CPUs
++		 * its staying as a return
++		 *
++		 * BUG();
++		 */
++		 
++	if (flush_mm == read_pda(active_mm)) {
++		if (read_pda(mmu_state) == TLBSTATE_OK) {
++			if (flush_va == FLUSH_ALL)
++				local_flush_tlb();
++			else
++				__flush_tlb_one(flush_va);
++		} else
++			leave_mm(cpu);
++	}
++	ack_APIC_irq();
++	cpu_clear(cpu, flush_cpumask);
++
++out:
++	put_cpu_no_resched();
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++						unsigned long va)
++{
++	cpumask_t tmp;
++	/*
++	 * A couple of (to be removed) sanity checks:
++	 *
++	 * - we do not send IPIs to not-yet booted CPUs.
++	 * - current CPU must not be in mask
++	 * - mask must exist :)
++	 */
++	BUG_ON(cpus_empty(cpumask));
++	cpus_and(tmp, cpumask, cpu_online_map);
++	BUG_ON(!cpus_equal(tmp, cpumask));
++	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
++	if (!mm)
++		BUG();
++
++	/*
++	 * I'm not happy about this global shared spinlock in the
++	 * MM hot path, but we'll see how contended it is.
++	 * Temporarily this turns IRQs off, so that lockups are
++	 * detected by the NMI watchdog.
++	 */
++	spin_lock(&tlbstate_lock);
++	
++	flush_mm = mm;
++	flush_va = va;
++	cpus_or(flush_cpumask, cpumask, flush_cpumask);
++
++	/*
++	 * We have to send the IPI only to
++	 * CPUs affected.
++	 */
++	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
++
++	while (!cpus_empty(flush_cpumask))
++		mb();	/* nothing. lockup detection does not belong here */;
++
++	flush_mm = NULL;
++	flush_va = 0;
++	spin_unlock(&tlbstate_lock);
++}
++	
++void flush_tlb_current_task(void)
++{
++	struct mm_struct *mm = current->mm;
++	cpumask_t cpu_mask;
++
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
++
++	local_flush_tlb();
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++	preempt_enable();
++}
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++	cpumask_t cpu_mask;
++
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
++
++	if (current->active_mm == mm) {
++		if (current->mm)
++			local_flush_tlb();
++		else
++			leave_mm(smp_processor_id());
++	}
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++	preempt_enable();
++}
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++	struct mm_struct *mm = vma->vm_mm;
++	cpumask_t cpu_mask;
++
++	preempt_disable();
++	cpu_mask = mm->cpu_vm_mask;
++	cpu_clear(smp_processor_id(), cpu_mask);
++
++	if (current->active_mm == mm) {
++		if(current->mm)
++			__flush_tlb_one(va);
++		 else
++		 	leave_mm(smp_processor_id());
++	}
++
++	if (!cpus_empty(cpu_mask))
++		flush_tlb_others(cpu_mask, mm, va);
++
++	preempt_enable();
++}
++
++static void do_flush_tlb_all(void* info)
++{
++	unsigned long cpu = smp_processor_id();
++
++	__flush_tlb_all();
++	if (read_pda(mmu_state) == TLBSTATE_LAZY)
++		leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++#else
++asmlinkage void smp_invalidate_interrupt (void)
++{ return; }
++void flush_tlb_current_task(void)
++{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
++void flush_tlb_mm (struct mm_struct * mm)
++{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
++void flush_tlb_all(void)
++{ xen_tlb_flush_all(); }
++#endif /* Xen */
++
++void smp_kdb_stop(void)
++{
++	send_IPI_allbutself(KDB_VECTOR);
++}
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++
++void smp_send_reschedule(int cpu)
++{
++	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++	void (*func) (void *info);
++	void *info;
++	atomic_t started;
++	atomic_t finished;
++	int wait;
++};
++
++static struct call_data_struct * call_data;
++
++/*
++ * this function sends a 'generic call function' IPI to all other CPUs
++ * in the system.
++ */
++static void __smp_call_function (void (*func) (void *info), void *info,
++				int nonatomic, int wait)
++{
++	struct call_data_struct data;
++	int cpus = num_online_cpus()-1;
++
++	if (!cpus)
++		return;
++
++	data.func = func;
++	data.info = info;
++	atomic_set(&data.started, 0);
++	data.wait = wait;
++	if (wait)
++		atomic_set(&data.finished, 0);
++
++	call_data = &data;
++	wmb();
++	/* Send a message to all other CPUs and wait for them to respond */
++	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++	/* Wait for response */
++	while (atomic_read(&data.started) != cpus)
++#ifndef CONFIG_XEN
++		cpu_relax();
++#else
++		barrier();
++#endif
++
++	if (!wait)
++		return;
++
++	while (atomic_read(&data.finished) != cpus)
++#ifndef CONFIG_XEN
++		cpu_relax();
++#else
++		barrier();
++#endif
++}
++
++/*
++ * smp_call_function - run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other
++ *        CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute func or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ * Actually there are a few legal cases, like panic.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++			int wait)
++{
++	spin_lock(&call_lock);
++	__smp_call_function(func,info,nonatomic,wait);
++	spin_unlock(&call_lock);
++	return 0;
++}
++
++void smp_stop_cpu(void)
++{
++	/*
++	 * Remove this CPU:
++	 */
++	cpu_clear(smp_processor_id(), cpu_online_map);
++	local_irq_disable();
++#ifndef CONFIG_XEN
++	disable_local_APIC();
++#endif
++	local_irq_enable(); 
++}
++
++static void smp_really_stop_cpu(void *dummy)
++{
++	smp_stop_cpu(); 
++	for (;;) 
++		asm("hlt"); 
++} 
++
++void smp_send_stop(void)
++{
++	int nolock = 0;
++#ifndef CONFIG_XEN
++	if (reboot_force)
++		return;
++#endif
++	/* Don't deadlock on the call lock in panic */
++	if (!spin_trylock(&call_lock)) {
++		/* ignore locking because we have paniced anyways */
++		nolock = 1;
++	}
++	__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
++	if (!nolock)
++		spin_unlock(&call_lock);
++
++	local_irq_disable();
++#ifndef CONFIG_XEN
++	disable_local_APIC();
++#endif
++	local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++#ifndef CONFIG_XEN
++asmlinkage void smp_reschedule_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_reschedule_interrupt(void)
++#endif
++{
++#ifndef CONFIG_XEN
++	ack_APIC_irq();
++#else
++	return IRQ_HANDLED;
++#endif
++}
++
++#ifndef CONFIG_XEN
++asmlinkage void smp_call_function_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_call_function_interrupt(void)
++#endif
++{
++	void (*func) (void *info) = call_data->func;
++	void *info = call_data->info;
++	int wait = call_data->wait;
++
++#ifndef CONFIG_XEN
++	ack_APIC_irq();
++#endif
++	/*
++	 * Notify initiating CPU that I've grabbed the data and am
++	 * about to execute the function
++	 */
++	mb();
++	atomic_inc(&call_data->started);
++	/*
++	 * At this point the info structure may be out of scope unless wait==1
++	 */
++	irq_enter();
++	(*func)(info);
++	irq_exit();
++	if (wait) {
++		mb();
++		atomic_inc(&call_data->finished);
++	}
++#ifdef CONFIG_XEN
++	return IRQ_HANDLED;
++#endif
++}
++
++int safe_smp_processor_id(void)
++{
++#ifdef CONFIG_XEN
++	return smp_processor_id();
++#else
++	int apicid, i;
++
++	if (disable_apic)
++		return 0;
++
++	apicid = hard_smp_processor_id();
++	if (x86_cpu_to_apicid[apicid] == apicid)
++		return apicid;
++
++	for (i = 0; i < NR_CPUS; ++i) {
++		if (x86_cpu_to_apicid[i] == apicid)
++			return i;
++	}
++
++	/* No entries in x86_cpu_to_apicid?  Either no MPS|ACPI,
++	 * or called too early.  Either way, we must be CPU 0. */
++      	if (x86_cpu_to_apicid[0] == BAD_APICID)
++		return 0;
++
++	return 0; /* Should not happen */
++#endif
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/traps.c linux-2.6.12-xen/arch/xen/x86_64/kernel/traps.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/traps.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/traps.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,994 @@
++/*
++ *  linux/arch/x86-64/traps.c
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
++ *
++ *  Pentium III FXSR, SSE support
++ *	Gareth Hughes <gareth at valinux.com>, May 2000
++ *
++ *  $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'entry.S'.
++ */
++#include <linux/config.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/ptrace.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/nmi.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/kdebug.h>
++#include <asm/processor.h>
++
++#include <asm/smp.h>
++#include <asm/pgalloc.h>
++#include <asm/pda.h>
++#include <asm/proto.h>
++#include <asm/nmi.h>
++
++#include <linux/irq.h>
++
++
++extern struct gate_struct idt_table[256]; 
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void double_fault(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void reserved(void);
++asmlinkage void alignment_check(void);
++asmlinkage void machine_check(void);
++asmlinkage void spurious_interrupt_bug(void);
++asmlinkage void call_debug(void);
++
++struct notifier_block *die_chain;
++static DEFINE_SPINLOCK(die_notifier_lock);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++	int err = 0;
++	unsigned long flags;
++	spin_lock_irqsave(&die_notifier_lock, flags);
++	err = notifier_chain_register(&die_chain, nb);
++	spin_unlock_irqrestore(&die_notifier_lock, flags);
++	return err;
++}
++
++static inline void conditional_sti(struct pt_regs *regs)
++{
++	if (regs->eflags & X86_EFLAGS_IF)
++		local_irq_enable();
++}
++
++static int kstack_depth_to_print = 10;
++
++#ifdef CONFIG_KALLSYMS
++#include <linux/kallsyms.h> 
++int printk_address(unsigned long address)
++{ 
++	unsigned long offset = 0, symsize;
++	const char *symname;
++	char *modname;
++	char *delim = ":"; 
++	char namebuf[128];
++
++	symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); 
++	if (!symname) 
++		return printk("[<%016lx>]", address);
++	if (!modname) 
++		modname = delim = ""; 		
++        return printk("<%016lx>{%s%s%s%s%+ld}",
++		      address,delim,modname,delim,symname,offset); 
++} 
++#else
++int printk_address(unsigned long address)
++{ 
++	return printk("[<%016lx>]", address);
++} 
++#endif
++
++static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
++					unsigned *usedp, const char **idp)
++{
++	static const char ids[N_EXCEPTION_STACKS][8] = {
++		[DEBUG_STACK - 1] = "#DB",
++		[NMI_STACK - 1] = "NMI",
++		[DOUBLEFAULT_STACK - 1] = "#DF",
++		[STACKFAULT_STACK - 1] = "#SS",
++		[MCE_STACK - 1] = "#MC",
++	};
++	unsigned k;
++
++	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
++		unsigned long end;
++
++		end = per_cpu(init_tss, cpu).ist[k];
++		if (stack >= end)
++			continue;
++		if (stack >= end - EXCEPTION_STKSZ) {
++			if (*usedp & (1U << k))
++				break;
++			*usedp |= 1U << k;
++			*idp = ids[k];
++			return (unsigned long *)end;
++		}
++	}
++	return NULL;
++}
++
++/*
++ * x86-64 can have upto three kernel stacks: 
++ * process stack
++ * interrupt stack
++ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
++ */
++
++void show_trace(unsigned long *stack)
++{
++	unsigned long addr;
++	const unsigned cpu = safe_smp_processor_id();
++	unsigned long *irqstack_end = (unsigned long *)cpu_pda[cpu].irqstackptr;
++	int i;
++	unsigned used = 0;
++
++	printk("\nCall Trace:");
++
++#define HANDLE_STACK(cond) \
++	do while (cond) { \
++		addr = *stack++; \
++		if (kernel_text_address(addr)) { \
++			/* \
++			 * If the address is either in the text segment of the \
++			 * kernel, or in the region which contains vmalloc'ed \
++			 * memory, it *may* be the address of a calling \
++			 * routine; if so, print it so that someone tracing \
++			 * down the cause of the crash will be able to figure \
++			 * out the call path that was taken. \
++			 */ \
++			i += printk_address(addr); \
++			if (i > 50) { \
++				printk("\n       "); \
++				i = 0; \
++			} \
++			else \
++				i += printk(" "); \
++		} \
++	} while (0)
++
++	for(i = 0; ; ) {
++		const char *id;
++		unsigned long *estack_end;
++		estack_end = in_exception_stack(cpu, (unsigned long)stack,
++						&used, &id);
++
++		if (estack_end) {
++			i += printk(" <%s> ", id);
++			HANDLE_STACK (stack < estack_end);
++			i += printk(" <EOE> ");
++			stack = (unsigned long *) estack_end[-2];
++			continue;
++		}
++		if (irqstack_end) {
++			unsigned long *irqstack;
++			irqstack = irqstack_end -
++				(IRQSTACKSIZE - 64) / sizeof(*irqstack);
++
++			if (stack >= irqstack && stack < irqstack_end) {
++				i += printk(" <IRQ> ");
++				HANDLE_STACK (stack < irqstack_end);
++				stack = (unsigned long *) (irqstack_end[-1]);
++				irqstack_end = NULL;
++				i += printk(" <EOI> ");
++				continue;
++			}
++		}
++		break;
++	}
++
++	HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
++#undef HANDLE_STACK
++	printk("\n");
++}
++
++void show_stack(struct task_struct *tsk, unsigned long * rsp)
++{
++	unsigned long *stack;
++	int i;
++	const int cpu = safe_smp_processor_id();
++	unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
++	unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE);    
++
++	// debugging aid: "show_stack(NULL, NULL);" prints the
++	// back trace for this cpu.
++
++	if (rsp == NULL) {
++		if (tsk)
++			rsp = (unsigned long *)tsk->thread.rsp;
++		else
++			rsp = (unsigned long *)&rsp;
++	}
++
++	stack = rsp;
++	for(i=0; i < kstack_depth_to_print; i++) {
++		if (stack >= irqstack && stack <= irqstack_end) {
++			if (stack == irqstack_end) {
++				stack = (unsigned long *) (irqstack_end[-1]);
++				printk(" <EOI> ");
++			}
++		} else {
++		if (((long) stack & (THREAD_SIZE-1)) == 0)
++			break;
++		}
++		if (i && ((i % 4) == 0))
++			printk("\n       ");
++		printk("%016lx ", *stack++);
++		touch_nmi_watchdog();
++	}
++	show_trace((unsigned long *)rsp);
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++	unsigned long dummy;
++	show_trace(&dummy);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++	int i;
++	int in_kernel = (regs->cs & 3) == 0;
++	unsigned long rsp;
++	const int cpu = safe_smp_processor_id(); 
++	struct task_struct *cur = cpu_pda[cpu].pcurrent; 
++
++		rsp = regs->rsp;
++
++	printk("CPU %d ", cpu);
++	__show_regs(regs);
++	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
++		cur->comm, cur->pid, cur->thread_info, cur);
++
++	/*
++	 * When in-kernel, we also print out the stack and code at the
++	 * time of the fault..
++	 */
++	if (in_kernel) {
++
++		printk("Stack: ");
++		show_stack(NULL, (unsigned long*)rsp);
++
++		printk("\nCode: ");
++		if(regs->rip < PAGE_OFFSET)
++			goto bad;
++
++		for(i=0;i<20;i++)
++		{
++			unsigned char c;
++			if(__get_user(c, &((unsigned char*)regs->rip)[i])) {
++bad:
++				printk(" Bad RIP value.");
++				break;
++			}
++			printk("%02x ", c);
++		}
++	}
++	printk("\n");
++}	
++
++void handle_BUG(struct pt_regs *regs)
++{ 
++	struct bug_frame f;
++	char tmp;
++
++	if (regs->cs & 3)
++		return; 
++	if (__copy_from_user(&f, (struct bug_frame *) regs->rip, 
++			     sizeof(struct bug_frame)))
++		return; 
++	if ((unsigned long)f.filename < __PAGE_OFFSET || 
++	    f.ud2[0] != 0x0f || f.ud2[1] != 0x0b) 
++		return;
++	if (__get_user(tmp, f.filename))
++		f.filename = "unmapped filename"; 
++	printk("----------- [cut here ] --------- [please bite here ] ---------\n");
++	printk(KERN_ALERT "Kernel BUG at %.50s:%d\n", f.filename, f.line);
++} 
++
++#ifdef CONFIG_BUG
++void out_of_line_bug(void)
++{ 
++	BUG(); 
++} 
++#endif
++
++static DEFINE_SPINLOCK(die_lock);
++static int die_owner = -1;
++
++void oops_begin(void)
++{
++	int cpu = safe_smp_processor_id(); 
++	/* racy, but better than risking deadlock. */ 
++	local_irq_disable();
++	if (!spin_trylock(&die_lock)) { 
++		if (cpu == die_owner) 
++			/* nested oops. should stop eventually */;
++		else
++			spin_lock(&die_lock); 
++	}
++	die_owner = cpu; 
++	console_verbose();
++	bust_spinlocks(1); 
++}
++
++void oops_end(void)
++{ 
++	die_owner = -1;
++	bust_spinlocks(0); 
++	spin_unlock(&die_lock); 
++	if (panic_on_oops)
++		panic("Oops"); 
++} 
++
++void __die(const char * str, struct pt_regs * regs, long err)
++{
++	static int die_counter;
++	printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
++#ifdef CONFIG_PREEMPT
++	printk("PREEMPT ");
++#endif
++#ifdef CONFIG_SMP
++	printk("SMP ");
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++	printk("DEBUG_PAGEALLOC");
++#endif
++	printk("\n");
++	notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
++	show_registers(regs);
++	/* Executive summary in case the oops scrolled away */
++	printk(KERN_ALERT "RIP ");
++	printk_address(regs->rip); 
++	printk(" RSP <%016lx>\n", regs->rsp); 
++}
++
++void die(const char * str, struct pt_regs * regs, long err)
++{
++	oops_begin();
++	handle_BUG(regs);
++	__die(str, regs, err);
++	oops_end();
++	do_exit(SIGSEGV); 
++}
++static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
++{
++	if (!(regs->eflags & VM_MASK) && (regs->cs == __KERNEL_CS))
++		die(str, regs, err);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++void die_nmi(char *str, struct pt_regs *regs)
++{
++	oops_begin();
++	/*
++	 * We are in trouble anyway, lets at least try
++	 * to get a message out.
++	 */
++	printk(str, safe_smp_processor_id());
++	show_registers(regs);
++	if (panic_on_timeout || panic_on_oops)
++		panic("nmi watchdog");
++	printk("console shuts up ...\n");
++	oops_end();
++	do_exit(SIGSEGV);
++}
++#endif
++
++static void do_trap(int trapnr, int signr, char *str, 
++			   struct pt_regs * regs, long error_code, siginfo_t *info)
++{
++	conditional_sti(regs);
++
++#ifdef CONFIG_CHECKING
++       { 
++               unsigned long gs; 
++               struct x8664_pda *pda = cpu_pda + safe_smp_processor_id(); 
++               rdmsrl(MSR_GS_BASE, gs); 
++               if (gs != (unsigned long)pda) { 
++                       wrmsrl(MSR_GS_BASE, pda); 
++                       printk("%s: wrong gs %lx expected %p rip %lx\n", str, gs, pda,
++			      regs->rip);
++               }
++       }
++#endif
++
++	if ((regs->cs & 3)  != 0) { 
++		struct task_struct *tsk = current;
++
++		if (exception_trace && unhandled_signal(tsk, signr))
++			printk(KERN_INFO
++			       "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
++			       tsk->comm, tsk->pid, str,
++			       regs->rip,regs->rsp,error_code); 
++
++		tsk->thread.error_code = error_code;
++		tsk->thread.trap_no = trapnr;
++		if (info)
++			force_sig_info(signr, info, tsk);
++		else
++			force_sig(signr, tsk);
++		return;
++	}
++
++
++	/* kernel trap */ 
++	{	     
++		const struct exception_table_entry *fixup;
++		fixup = search_exception_tables(regs->rip);
++		if (fixup) {
++			regs->rip = fixup->fixup;
++		} else	
++			die(str, regs, error_code);
++		return;
++	}
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++							== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++	siginfo_t info; \
++	info.si_signo = signr; \
++	info.si_errno = 0; \
++	info.si_code = sicode; \
++	info.si_addr = (void __user *)siaddr; \
++	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++							== NOTIFY_STOP) \
++		return; \
++	do_trap(trapnr, signr, str, regs, error_code, &info); \
++}
++
++DO_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->rip)
++DO_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL,  "invalid operand", invalid_op, ILL_ILLOPN, regs->rip)
++DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
++DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR(18, SIGSEGV, "reserved", reserved)
++DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
++DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
++
++asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
++{
++	conditional_sti(regs);
++
++#ifdef CONFIG_CHECKING
++       { 
++               unsigned long gs; 
++               struct x8664_pda *pda = cpu_pda + safe_smp_processor_id(); 
++               rdmsrl(MSR_GS_BASE, gs); 
++               if (gs != (unsigned long)pda) { 
++                       wrmsrl(MSR_GS_BASE, pda); 
++		       oops_in_progress++;
++                       printk("general protection handler: wrong gs %lx expected %p\n", gs, pda);
++		       oops_in_progress--;
++               }
++       }
++#endif
++
++	if ((regs->cs & 3)!=0) { 
++		struct task_struct *tsk = current;
++
++		if (exception_trace && unhandled_signal(tsk, SIGSEGV))
++			printk(KERN_INFO
++		       "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
++			       tsk->comm, tsk->pid,
++			       regs->rip,regs->rsp,error_code); 
++
++		tsk->thread.error_code = error_code;
++		tsk->thread.trap_no = 13;
++		force_sig(SIGSEGV, tsk);
++		return;
++	} 
++
++	/* kernel gp */
++	{
++		const struct exception_table_entry *fixup;
++		fixup = search_exception_tables(regs->rip);
++		if (fixup) {
++			regs->rip = fixup->fixup;
++			return;
++		}
++		if (notify_die(DIE_GPF, "general protection fault", regs,
++					error_code, 13, SIGSEGV) == NOTIFY_STOP)
++			return;
++		die("general protection fault", regs, error_code);
++	}
++}
++
++static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++	printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
++	printk("You probably have a hardware problem with your RAM chips\n");
++
++#if 0 /* XEN */
++	/* Clear and disable the memory parity error line. */
++	reason = (reason & 0xf) | 4;
++	outb(reason, 0x61);
++#endif /* XEN */
++}
++
++static void io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++	printk("NMI: IOCK error (debug interrupt?)\n");
++	show_registers(regs);
++
++#if 0 /* XEN */
++	/* Re-enable the IOCK line, wait for a few seconds */
++	reason = (reason & 0xf) | 8;
++	outb(reason, 0x61);
++	mdelay(2000);
++	reason &= ~8;
++	outb(reason, 0x61);
++#endif /* XEN */
++}
++
++static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{	printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
++	printk("Dazed and confused, but trying to continue\n");
++	printk("Do you have a strange power saving mode enabled?\n");
++}
++
++/* Runs on IST stack. This code must keep interrupts off all the time.
++   Nested NMIs are prevented by the CPU. */
++asmlinkage void default_do_nmi(struct pt_regs *regs)
++{
++	unsigned char reason = 0;
++
++	/* Only the BSP gets external NMIs from the system.  */
++	if (!smp_processor_id())
++		reason = get_nmi_reason();
++
++	if (!(reason & 0xc0)) {
++		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
++								== NOTIFY_STOP)
++			return;
++#ifdef CONFIG_X86_LOCAL_APIC
++		/*
++		 * Ok, so this is none of the documented NMI sources,
++		 * so it must be the NMI watchdog.
++		 */
++		if (nmi_watchdog > 0) {
++			nmi_watchdog_tick(regs,reason);
++			return;
++		}
++#endif
++		unknown_nmi_error(reason, regs);
++		return;
++	}
++	if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
++		return; 
++
++	/* AK: following checks seem to be broken on modern chipsets. FIXME */
++
++	if (reason & 0x80)
++		mem_parity_error(reason, regs);
++	if (reason & 0x40)
++		io_check_error(reason, regs);
++}
++
++asmlinkage void do_int3(struct pt_regs * regs, long error_code)
++{
++	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
++		return;
++	}
++	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
++	return;
++}
++
++/* Help handler running on IST stack to switch back to user stack
++   for scheduling or signal handling. The actual stack switch is done in
++   entry.S */
++asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
++{
++	struct pt_regs *regs = eregs;
++	/* Did already sync */
++	if (eregs == (struct pt_regs *)eregs->rsp)
++		;
++	/* Exception from user space */
++	else if (eregs->cs & 3)
++		regs = ((struct pt_regs *)current->thread.rsp0) - 1;
++	/* Exception from kernel and interrupts are enabled. Move to
++ 	   kernel process stack. */
++	else if (eregs->eflags & X86_EFLAGS_IF)
++		regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
++	if (eregs != regs)
++		*regs = *eregs;
++	return regs;
++}
++
++/* runs on IST stack. */
++asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
++{
++	unsigned long condition;
++	struct task_struct *tsk = current;
++	siginfo_t info;
++
++#ifdef CONFIG_CHECKING
++       { 
++	       /* RED-PEN interaction with debugger - could destroy gs */
++               unsigned long gs; 
++               struct x8664_pda *pda = cpu_pda + safe_smp_processor_id(); 
++               rdmsrl(MSR_GS_BASE, gs); 
++               if (gs != (unsigned long)pda) { 
++                       wrmsrl(MSR_GS_BASE, pda); 
++                       printk("debug handler: wrong gs %lx expected %p\n", gs, pda);
++               }
++       }
++#endif
++
++	asm("movq %%db6,%0" : "=r" (condition));
++
++	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++						SIGTRAP) == NOTIFY_STOP)
++		return;
++
++	conditional_sti(regs);
++
++	/* Mask out spurious debug traps due to lazy DR7 setting */
++	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++		if (!tsk->thread.debugreg7) { 
++			goto clear_dr7;
++		}
++	}
++
++	tsk->thread.debugreg6 = condition;
++
++	/* Mask out spurious TF errors due to lazy TF clearing */
++	if (condition & DR_STEP) {
++		/*
++		 * The TF error should be masked out only if the current
++		 * process is not traced and if the TRAP flag has been set
++		 * previously by a tracing process (condition detected by
++		 * the PT_DTRACE flag); remember that the i386 TRAP flag
++		 * can be modified by the process itself in user mode,
++		 * allowing programs to debug themselves without the ptrace()
++		 * interface.
++		 */
++                if ((regs->cs & 3) == 0)
++                       goto clear_TF_reenable;
++		/*
++		 * Was the TF flag set by a debugger? If so, clear it now,
++		 * so that register information is correct.
++		 */
++		if (tsk->ptrace & PT_DTRACE) {
++			regs->eflags &= ~TF_MASK;
++			tsk->ptrace &= ~PT_DTRACE;
++		}
++	}
++
++	/* Ok, finally something we can handle */
++	tsk->thread.trap_no = 1;
++	tsk->thread.error_code = error_code;
++	info.si_signo = SIGTRAP;
++	info.si_errno = 0;
++	info.si_code = TRAP_BRKPT;
++	if ((regs->cs & 3) == 0) 
++		goto clear_dr7; 
++
++	info.si_addr = (void __user *)regs->rip;
++	force_sig_info(SIGTRAP, &info, tsk);	
++clear_dr7:
++	asm volatile("movq %0,%%db7"::"r"(0UL));
++	return;
++
++clear_TF_reenable:
++	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++	regs->eflags &= ~TF_MASK;
++}
++
++static int kernel_math_error(struct pt_regs *regs, char *str)
++{
++	const struct exception_table_entry *fixup;
++	fixup = search_exception_tables(regs->rip);
++	if (fixup) {
++		regs->rip = fixup->fixup;
++		return 1;
++	}
++	notify_die(DIE_GPF, str, regs, 0, 16, SIGFPE);
++	/* Illegal floating point operation in the kernel */
++	die(str, regs, 0);
++	return 0;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++asmlinkage void do_coprocessor_error(struct pt_regs *regs)
++{
++	void __user *rip = (void __user *)(regs->rip);
++	struct task_struct * task;
++	siginfo_t info;
++	unsigned short cwd, swd;
++
++	conditional_sti(regs);
++	if ((regs->cs & 3) == 0 &&
++	    kernel_math_error(regs, "kernel x87 math error"))
++		return;
++
++	/*
++	 * Save the info for the exception handler and clear the error.
++	 */
++	task = current;
++	save_init_fpu(task);
++	task->thread.trap_no = 16;
++	task->thread.error_code = 0;
++	info.si_signo = SIGFPE;
++	info.si_errno = 0;
++	info.si_code = __SI_FAULT;
++	info.si_addr = rip;
++	/*
++	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
++	 * status.  0x3f is the exception bits in these regs, 0x200 is the
++	 * C1 reg you need in case of a stack fault, 0x040 is the stack
++	 * fault bit.  We should only be taking one exception at a time,
++	 * so if this combination doesn't produce any single exception,
++	 * then we have a bad program that isn't synchronizing its FPU usage
++	 * and it will suffer the consequences since we won't be able to
++	 * fully reproduce the context of the exception
++	 */
++	cwd = get_fpu_cwd(task);
++	swd = get_fpu_swd(task);
++	switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
++		case 0x000:
++		default:
++			break;
++		case 0x001: /* Invalid Op */
++		case 0x041: /* Stack Fault */
++		case 0x241: /* Stack Fault | Direction */
++			info.si_code = FPE_FLTINV;
++			break;
++		case 0x002: /* Denormalize */
++		case 0x010: /* Underflow */
++			info.si_code = FPE_FLTUND;
++			break;
++		case 0x004: /* Zero Divide */
++			info.si_code = FPE_FLTDIV;
++			break;
++		case 0x008: /* Overflow */
++			info.si_code = FPE_FLTOVF;
++			break;
++		case 0x020: /* Precision */
++			info.si_code = FPE_FLTRES;
++			break;
++	}
++	force_sig_info(SIGFPE, &info, task);
++}
++
++asmlinkage void bad_intr(void)
++{
++	printk("bad interrupt"); 
++}
++
++asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
++{
++	void __user *rip = (void __user *)(regs->rip);
++	struct task_struct * task;
++	siginfo_t info;
++	unsigned short mxcsr;
++
++	conditional_sti(regs);
++	if ((regs->cs & 3) == 0 &&
++        	kernel_math_error(regs, "kernel simd math error"))
++		return;
++
++	/*
++	 * Save the info for the exception handler and clear the error.
++	 */
++	task = current;
++	save_init_fpu(task);
++	task->thread.trap_no = 19;
++	task->thread.error_code = 0;
++	info.si_signo = SIGFPE;
++	info.si_errno = 0;
++	info.si_code = __SI_FAULT;
++	info.si_addr = rip;
++	/*
++	 * The SIMD FPU exceptions are handled a little differently, as there
++	 * is only a single status/control register.  Thus, to determine which
++	 * unmasked exception was caught we must mask the exception mask bits
++	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++	 */
++	mxcsr = get_fpu_mxcsr(task);
++	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++		case 0x000:
++		default:
++			break;
++		case 0x001: /* Invalid Op */
++			info.si_code = FPE_FLTINV;
++			break;
++		case 0x002: /* Denormalize */
++		case 0x010: /* Underflow */
++			info.si_code = FPE_FLTUND;
++			break;
++		case 0x004: /* Zero Divide */
++			info.si_code = FPE_FLTDIV;
++			break;
++		case 0x008: /* Overflow */
++			info.si_code = FPE_FLTOVF;
++			break;
++		case 0x020: /* Precision */
++			info.si_code = FPE_FLTRES;
++			break;
++	}
++	force_sig_info(SIGFPE, &info, task);
++}
++
++asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
++{
++}
++
++#if 0
++asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
++{
++}
++#endif
++
++/*
++ *  'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ */
++asmlinkage void math_state_restore(void)
++{
++	struct task_struct *me = current;
++        /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
++
++	if (!used_math())
++		init_fpu(me);
++	restore_fpu_checking(&me->thread.i387.fxsave);
++	me->thread_info->status |= TS_USEDFPU;
++}
++
++void do_call_debug(struct pt_regs *regs) 
++{ 
++	notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT); 
++}
++
++
++/*
++ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
++ * specify <dpl>|4 in the second field.
++ */
++static trap_info_t trap_table[] = {
++        {  0, 0|4, (__KERNEL_CS|0x3), (unsigned long)divide_error               },
++        {  1, 0|4, (__KERNEL_CS|0x3), (unsigned long)debug                      },
++        {  3, 3|4, (__KERNEL_CS|0x3), (unsigned long)int3                       },
++        {  4, 3|4, (__KERNEL_CS|0x3), (unsigned long)overflow                   },
++        {  5, 3|4, (__KERNEL_CS|0x3), (unsigned long)bounds                     },
++        {  6, 0|4, (__KERNEL_CS|0x3), (unsigned long)invalid_op                 },
++        {  7, 0|4, (__KERNEL_CS|0x3), (unsigned long)device_not_available       },
++        {  9, 0|4, (__KERNEL_CS|0x3), (unsigned long)coprocessor_segment_overrun},
++        { 10, 0|4, (__KERNEL_CS|0x3), (unsigned long)invalid_TSS                },
++        { 11, 0|4, (__KERNEL_CS|0x3), (unsigned long)segment_not_present        },
++        { 12, 0|4, (__KERNEL_CS|0x3), (unsigned long)stack_segment              },
++        { 13, 0|4, (__KERNEL_CS|0x3), (unsigned long)general_protection         },
++        { 14, 0|4, (__KERNEL_CS|0x3), (unsigned long)page_fault                 },
++        { 15, 0|4, (__KERNEL_CS|0x3), (unsigned long)spurious_interrupt_bug     },
++        { 16, 0|4, (__KERNEL_CS|0x3), (unsigned long)coprocessor_error          },
++        { 17, 0|4, (__KERNEL_CS|0x3), (unsigned long)alignment_check            },
++#ifdef CONFIG_X86_MCE
++        { 18, 0|4, (__KERNEL_CS|0x3), (unsigned long)machine_check              },
++#endif
++        { 19, 0|4, (__KERNEL_CS|0x3), (unsigned long)simd_coprocessor_error     },
++#ifdef CONFIG_IA32_EMULATION
++	{ IA32_SYSCALL_VECTOR, 3|4, (__KERNEL_CS|0x3), (unsigned long)ia32_syscall},
++#endif
++        {  0, 0,           0, 0                                              }
++};
++
++void __init trap_init(void)
++{
++        int ret;
++
++        ret = HYPERVISOR_set_trap_table(trap_table);
++        
++        if (ret) 
++                printk("HYPERVISOR_set_trap_table faild: error %d\n",
++                       ret);
++
++#ifdef CONFIG_IA32_EMULATION
++	set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
++#endif
++       
++	/*
++	 * Should be a barrier for any external CPU state.
++	 */
++	cpu_init();
++}
++
++void smp_trap_init(trap_info_t *trap_ctxt)
++{
++	trap_info_t *t = trap_table;
++	int i;
++
++	for (i = 0; i < 256; i++) {
++		trap_ctxt[i].vector = i;
++		trap_ctxt[i].cs     = FLAT_KERNEL_CS;
++	}
++
++	for (t = trap_table; t->address; t++) {
++		trap_ctxt[t->vector].flags = t->flags;
++		trap_ctxt[t->vector].cs = t->cs;
++		trap_ctxt[t->vector].address = t->address;
++	}
++}
++
++
++/* Actual parsing is done early in setup.c. */
++static int __init oops_dummy(char *s)
++{ 
++	panic_on_oops = 1;
++	return -1; 
++} 
++__setup("oops=", oops_dummy); 
++
++static int __init kstack_setup(char *s)
++{
++	kstack_depth_to_print = simple_strtoul(s,NULL,0);
++	return 0;
++}
++__setup("kstack=", kstack_setup);
++
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/vsyscall.c linux-2.6.12-xen/arch/xen/x86_64/kernel/vsyscall.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/vsyscall.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/vsyscall.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,241 @@
++/*
++ *  linux/arch/x86_64/kernel/vsyscall.c
++ *
++ *  Copyright (C) 2001 Andrea Arcangeli <andrea at suse.de> SuSE
++ *  Copyright 2003 Andi Kleen, SuSE Labs.
++ *
++ *  Thanks to hpa at transmeta.com for some useful hint.
++ *  Special thanks to Ingo Molnar for his early experience with
++ *  a different vsyscall implementation for Linux/IA32 and for the name.
++ *
++ *  vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
++ *  at virtual address -10Mbyte+1024bytes etc... There are at max 4
++ *  vsyscalls. One vsyscall can reserve more than 1 slot to avoid
++ *  jumping out of line if necessary. We cannot add more with this
++ *  mechanism because older kernels won't return -ENOSYS.
++ *  If we want more than four we need a vDSO.
++ *
++ *  Note: the concept clashes with user mode linux. If you use UML and
++ *  want per guest time just set the kernel.vsyscall64 sysctl to 0.
++ */
++
++#include <linux/time.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/seqlock.h>
++#include <linux/jiffies.h>
++#include <linux/sysctl.h>
++
++#include <asm/vsyscall.h>
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/fixmap.h>
++#include <asm/errno.h>
++#include <asm/io.h>
++
++#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
++#define force_inline __attribute__((always_inline)) inline
++
++int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
++seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
++
++#include <asm/unistd.h>
++
++static force_inline void timeval_normalize(struct timeval * tv)
++{
++	time_t __sec;
++
++	__sec = tv->tv_usec / 1000000;
++	if (__sec) {
++		tv->tv_usec %= 1000000;
++		tv->tv_sec += __sec;
++	}
++}
++
++static force_inline void do_vgettimeofday(struct timeval * tv)
++{
++	long sequence, t;
++	unsigned long sec, usec;
++
++	do {
++		sequence = read_seqbegin(&__xtime_lock);
++		
++		sec = __xtime.tv_sec;
++		usec = (__xtime.tv_nsec / 1000) +
++			(__jiffies - __wall_jiffies) * (1000000 / HZ);
++
++		if (__vxtime.mode != VXTIME_HPET) {
++			sync_core();
++			rdtscll(t);
++			if (t < __vxtime.last_tsc)
++				t = __vxtime.last_tsc;
++			usec += ((t - __vxtime.last_tsc) *
++				 __vxtime.tsc_quot) >> 32;
++			/* See comment in x86_64 do_gettimeofday. */
++		} else {
++			usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
++				  __vxtime.last) * __vxtime.quot) >> 32;
++		}
++	} while (read_seqretry(&__xtime_lock, sequence));
++
++	tv->tv_sec = sec + usec / 1000000;
++	tv->tv_usec = usec % 1000000;
++}
++
++/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
++static force_inline void do_get_tz(struct timezone * tz)
++{
++	*tz = __sys_tz;
++}
++
++static force_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
++{
++	int ret;
++	asm volatile("vsysc2: syscall"
++		: "=a" (ret)
++		: "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
++	return ret;
++}
++
++static force_inline long time_syscall(long *t)
++{
++	long secs;
++	asm volatile("vsysc1: syscall"
++		: "=a" (secs)
++		: "0" (__NR_time),"D" (t) : __syscall_clobber);
++	return secs;
++}
++
++static int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
++{
++	if (unlikely(!__sysctl_vsyscall))
++		return gettimeofday(tv,tz);
++	if (tv)
++		do_vgettimeofday(tv);
++	if (tz)
++		do_get_tz(tz);
++	return 0;
++}
++
++/* This will break when the xtime seconds get inaccurate, but that is
++ * unlikely */
++static time_t __vsyscall(1) vtime(time_t *t)
++{
++	if (unlikely(!__sysctl_vsyscall))
++		return time_syscall(t);
++	else if (t)
++		*t = __xtime.tv_sec;		
++	return __xtime.tv_sec;
++}
++
++static long __vsyscall(2) venosys_0(void)
++{
++	return -ENOSYS;
++}
++
++static long __vsyscall(3) venosys_1(void)
++{
++	return -ENOSYS;
++}
++
++#ifdef CONFIG_SYSCTL
++
++#define SYSCALL 0x050f
++#define NOP2    0x9090
++
++/*
++ * NOP out syscall in vsyscall page when not needed.
++ */
++static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
++                        void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++	extern u16 vsysc1, vsysc2;
++	u16 *map1, *map2;
++	int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
++	if (!write)
++		return ret;
++	/* gcc has some trouble with __va(__pa()), so just do it this
++	   way. */
++	map1 = ioremap(__pa_symbol(&vsysc1), 2);
++	if (!map1)
++		return -ENOMEM;
++	map2 = ioremap(__pa_symbol(&vsysc2), 2);
++	if (!map2) {
++		ret = -ENOMEM;
++		goto out;
++	}
++	if (!sysctl_vsyscall) {
++		*map1 = SYSCALL;
++		*map2 = SYSCALL;
++	} else {
++		*map1 = NOP2;
++		*map2 = NOP2;
++	}
++	iounmap(map2);
++out:
++	iounmap(map1);
++	return ret;
++}
++
++static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
++				void __user *oldval, size_t __user *oldlenp,
++				void __user *newval, size_t newlen,
++				void **context)
++{
++	return -ENOSYS;
++}
++
++static ctl_table kernel_table2[] = {
++	{ .ctl_name = 99, .procname = "vsyscall64",
++	  .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
++	  .strategy = vsyscall_sysctl_nostrat,
++	  .proc_handler = vsyscall_sysctl_change },
++	{ 0, }
++};
++
++static ctl_table kernel_root_table2[] = {
++	{ .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
++	  .child = kernel_table2 },
++	{ 0 },
++};
++
++#endif
++
++static void __init map_vsyscall(void)
++{
++	extern char __vsyscall_0;
++	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
++
++	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
++}
++
++#ifdef CONFIG_XEN
++static void __init map_vsyscall_user(void)
++{
++	extern void __set_fixmap_user(enum fixed_addresses, unsigned long, pgprot_t);
++	extern char __vsyscall_0;
++	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
++
++	__set_fixmap_user(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
++}
++#endif
++
++static int __init vsyscall_init(void)
++{
++	BUG_ON(((unsigned long) &vgettimeofday !=
++			VSYSCALL_ADDR(__NR_vgettimeofday)));
++	BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
++	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
++	map_vsyscall();
++#ifdef CONFIG_XEN
++	map_vsyscall_user();
++	sysctl_vsyscall = 0; /* disable vgettimeofay() */
++#endif
++#ifdef CONFIG_SYSCTL
++	register_sysctl_table(kernel_root_table2, 0);
++#endif
++	return 0;
++}
++
++__initcall(vsyscall_init);
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/x8664_ksyms.c linux-2.6.12-xen/arch/xen/x86_64/kernel/x8664_ksyms.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/x8664_ksyms.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/x8664_ksyms.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,208 @@
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/smp.h>
++#include <linux/user.h>
++#include <linux/sched.h>
++#include <linux/in6.h>
++#include <linux/interrupt.h>
++#include <linux/smp_lock.h>
++#include <linux/pm.h>
++#include <linux/pci.h>
++#include <linux/apm_bios.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/syscalls.h>
++#include <linux/tty.h>
++#include <linux/ioctl32.h>
++
++#include <asm/semaphore.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/uaccess.h>
++#include <asm/checksum.h>
++#include <asm/io.h>
++#include <asm/delay.h>
++#include <asm/irq.h>
++#include <asm/mmx.h>
++#include <asm/desc.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/nmi.h>
++#include <asm/kdebug.h>
++#include <asm/unistd.h>
++#include <asm/tlbflush.h>
++#include <asm/kdebug.h>
++
++extern spinlock_t rtc_lock;
++
++#ifdef CONFIG_SMP
++extern void __write_lock_failed(rwlock_t *rw);
++extern void __read_lock_failed(rwlock_t *rw);
++#endif
++
++#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
++extern struct drive_info_struct drive_info;
++EXPORT_SYMBOL(drive_info);
++#endif
++
++extern unsigned long get_cmos_time(void);
++
++/* platform dependent support */
++EXPORT_SYMBOL(boot_cpu_data);
++//EXPORT_SYMBOL(dump_fpu);
++EXPORT_SYMBOL(__ioremap);
++EXPORT_SYMBOL(ioremap_nocache);
++EXPORT_SYMBOL(iounmap);
++EXPORT_SYMBOL(enable_irq);
++EXPORT_SYMBOL(disable_irq);
++EXPORT_SYMBOL(disable_irq_nosync);
++EXPORT_SYMBOL(probe_irq_mask);
++EXPORT_SYMBOL(kernel_thread);
++EXPORT_SYMBOL(pm_idle);
++EXPORT_SYMBOL(pm_power_off);
++EXPORT_SYMBOL(get_cmos_time);
++
++EXPORT_SYMBOL(__down_failed);
++EXPORT_SYMBOL(__down_failed_interruptible);
++EXPORT_SYMBOL(__down_failed_trylock);
++EXPORT_SYMBOL(__up_wakeup);
++/* Networking helper routines. */
++EXPORT_SYMBOL(csum_partial_copy_nocheck);
++EXPORT_SYMBOL(ip_compute_csum);
++/* Delay loops */
++EXPORT_SYMBOL(__udelay);
++EXPORT_SYMBOL(__ndelay);
++EXPORT_SYMBOL(__delay);
++EXPORT_SYMBOL(__const_udelay);
++
++EXPORT_SYMBOL(__get_user_1);
++EXPORT_SYMBOL(__get_user_2);
++EXPORT_SYMBOL(__get_user_4);
++EXPORT_SYMBOL(__get_user_8);
++EXPORT_SYMBOL(__put_user_1);
++EXPORT_SYMBOL(__put_user_2);
++EXPORT_SYMBOL(__put_user_4);
++EXPORT_SYMBOL(__put_user_8);
++
++EXPORT_SYMBOL(strpbrk);
++EXPORT_SYMBOL(strstr);
++
++EXPORT_SYMBOL(strncpy_from_user);
++EXPORT_SYMBOL(__strncpy_from_user);
++EXPORT_SYMBOL(clear_user);
++EXPORT_SYMBOL(__clear_user);
++EXPORT_SYMBOL(copy_user_generic);
++EXPORT_SYMBOL(copy_from_user);
++EXPORT_SYMBOL(copy_to_user);
++EXPORT_SYMBOL(copy_in_user);
++EXPORT_SYMBOL(strnlen_user);
++
++#ifdef CONFIG_PCI
++EXPORT_SYMBOL(pci_alloc_consistent);
++EXPORT_SYMBOL(pci_free_consistent);
++#endif
++
++#ifdef CONFIG_PCI
++EXPORT_SYMBOL(pci_mem_start);
++#endif
++
++EXPORT_SYMBOL(copy_page);
++EXPORT_SYMBOL(clear_page);
++
++EXPORT_SYMBOL(cpu_pda);
++#ifdef CONFIG_SMP
++EXPORT_SYMBOL(__write_lock_failed);
++EXPORT_SYMBOL(__read_lock_failed);
++
++EXPORT_SYMBOL(synchronize_irq);
++EXPORT_SYMBOL(smp_call_function);
++#endif
++
++#ifdef CONFIG_VT
++EXPORT_SYMBOL(screen_info);
++#endif
++
++EXPORT_SYMBOL(get_wchan);
++
++EXPORT_SYMBOL(rtc_lock);
++
++#ifdef CONFIG_X86_LOCAL_APIC
++EXPORT_SYMBOL_GPL(set_nmi_callback);
++EXPORT_SYMBOL_GPL(unset_nmi_callback);
++#endif
++
++/* Export string functions. We normally rely on gcc builtin for most of these,
++   but gcc sometimes decides not to inline them. */    
++#undef memcpy
++#undef memset
++#undef memmove
++#undef memchr
++#undef strlen
++#undef strncmp
++#undef strncpy
++#undef strchr	
++
++extern void * memset(void *,int,__kernel_size_t);
++extern size_t strlen(const char *);
++extern void * memmove(void * dest,const void *src,size_t count);
++extern void *memchr(const void *s, int c, size_t n);
++extern void * memcpy(void *,const void *,__kernel_size_t);
++extern void * __memcpy(void *,const void *,__kernel_size_t);
++
++EXPORT_SYMBOL(memset);
++EXPORT_SYMBOL(strlen);
++EXPORT_SYMBOL(memmove);
++EXPORT_SYMBOL(strncmp);
++EXPORT_SYMBOL(strncpy);
++EXPORT_SYMBOL(strchr);
++EXPORT_SYMBOL(strncat);
++EXPORT_SYMBOL(memchr);
++EXPORT_SYMBOL(strrchr);
++EXPORT_SYMBOL(strnlen);
++EXPORT_SYMBOL(memscan);
++EXPORT_SYMBOL(memcpy);
++EXPORT_SYMBOL(__memcpy);
++
++#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
++/* prototypes are wrong, these are assembly with custom calling functions */
++extern void rwsem_down_read_failed_thunk(void);
++extern void rwsem_wake_thunk(void);
++extern void rwsem_downgrade_thunk(void);
++extern void rwsem_down_write_failed_thunk(void);
++EXPORT_SYMBOL(rwsem_down_read_failed_thunk);
++EXPORT_SYMBOL(rwsem_wake_thunk);
++EXPORT_SYMBOL(rwsem_downgrade_thunk);
++EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
++#endif
++
++EXPORT_SYMBOL(empty_zero_page);
++
++#ifdef CONFIG_HAVE_DEC_LOCK
++EXPORT_SYMBOL(_atomic_dec_and_lock);
++#endif
++
++EXPORT_SYMBOL(die_chain);
++EXPORT_SYMBOL(register_die_notifier);
++
++#ifdef CONFIG_SMP
++EXPORT_SYMBOL(cpu_sibling_map);
++EXPORT_SYMBOL(smp_num_siblings);
++#endif
++
++extern void do_softirq_thunk(void);
++EXPORT_SYMBOL(do_softirq_thunk);
++
++#ifdef CONFIG_BUG
++EXPORT_SYMBOL(out_of_line_bug);
++#endif
++
++EXPORT_SYMBOL(init_level4_pgt);
++
++extern unsigned long __supported_pte_mask;
++EXPORT_SYMBOL(__supported_pte_mask);
++
++#ifdef CONFIG_SMP
++EXPORT_SYMBOL(flush_tlb_page);
++#endif
++
++EXPORT_SYMBOL(cpu_khz);
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/xen_entry.S linux-2.6.12-xen/arch/xen/x86_64/kernel/xen_entry.S
+--- pristine-linux-2.6.12/arch/xen/x86_64/kernel/xen_entry.S	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/kernel/xen_entry.S	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,41 @@
++/*
++ * Copied from arch/xen/i386/kernel/entry.S
++ */                        
++/* Offsets into shared_info_t. */                
++#define evtchn_upcall_pending		0
++#define evtchn_upcall_mask		1
++
++#define sizeof_vcpu_shift		6
++
++#ifdef CONFIG_SMP
++//#define preempt_disable(reg)	incl threadinfo_preempt_count(reg)
++//#define preempt_enable(reg)	decl threadinfo_preempt_count(reg)
++#define preempt_disable(reg)
++#define preempt_enable(reg)
++#define XEN_GET_VCPU_INFO(reg)	preempt_disable(%rbp)			; \
++				movq %gs:pda_cpunumber,reg		; \
++				shl  $32, reg				; \
++				shr  $32-sizeof_vcpu_shift,reg		; \
++				addq HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg)	preempt_enable(%rbp)			; \
++#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
++#else
++#define XEN_GET_VCPU_INFO(reg)	movq HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg)
++#define XEN_PUT_VCPU_INFO_fixup
++#endif
++
++#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
++#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
++#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
++				XEN_LOCKED_BLOCK_EVENTS(reg)		; \
++    				XEN_PUT_VCPU_INFO(reg)
++#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
++				XEN_LOCKED_UNBLOCK_EVENTS(reg)		; \
++    				XEN_PUT_VCPU_INFO(reg)
++#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
++
++EVENT_MASK      = (CS+4)
++VGCF_IN_SYSCALL = (1<<8)
++        
++	
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/Makefile linux-2.6.12-xen/arch/xen/x86_64/Makefile
+--- pristine-linux-2.6.12/arch/xen/x86_64/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,93 @@
++#
++# x86_64/Makefile
++#
++# This file is included by the global makefile so that you can add your own
++# architecture-specific flags and dependencies. Remember to do have actions
++# for "archclean" and "archdep" for cleaning up and making dependencies for
++# this architecture
++#
++# This file is subject to the terms and conditions of the GNU General Public
++# License.  See the file "COPYING" in the main directory of this archive
++# for more details.
++#
++# Copyright (C) 1994 by Linus Torvalds
++#
++# 19990713  Artur Skawina <skawina at geocities.com>
++#           Added '-march' and '-mpreferred-stack-boundary' support
++# 20000913  Pavel Machek <pavel at suse.cz>
++#	    Converted for x86_64 architecture
++# 20010105  Andi Kleen, add IA32 compiler.
++#           ....and later removed it again....
++# 20050205  Jun Nakajima <jun.nakajima at intel.com> 
++#           Modified for Xen
++#
++# $Id: Makefile,v 1.31 2002/03/22 15:56:07 ak Exp $
++
++#
++# early bootup linking needs 32bit. You can either use real 32bit tools
++# here or 64bit tools in 32bit mode.
++#
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++IA32_CC := $(CC) $(CPPFLAGS) -m32 -O2 -fomit-frame-pointer
++IA32_LD := $(LD) -m elf_i386
++IA32_AS := $(CC) $(AFLAGS) -m32 -Wa,--32 -traditional -c
++IA32_OBJCOPY := $(CROSS_COMPILE)objcopy
++IA32_CPP := $(CROSS_COMPILE)gcc -m32 -E
++export IA32_CC IA32_LD IA32_AS IA32_OBJCOPY IA32_CPP
++
++
++LDFLAGS		:= -m elf_x86_64
++#LDFLAGS_vmlinux := -e stext
++
++CHECKFLAGS      += -D__x86_64__ -m64
++
++cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
++CFLAGS += $(cflags-y)
++
++CFLAGS += -mno-red-zone
++CFLAGS += -mcmodel=kernel
++CFLAGS += -pipe
++# this makes reading assembly source easier, but produces worse code
++# actually it makes the kernel smaller too.
++CFLAGS += -fno-reorder-blocks	
++CFLAGS += -Wno-sign-compare
++ifneq ($(CONFIG_DEBUG_INFO),y)
++CFLAGS += -fno-asynchronous-unwind-tables
++# -fweb shrinks the kernel a bit, but the difference is very small
++# it also messes up debugging, so don't use it for now.
++#CFLAGS += $(call cc-option,-fweb)
++endif
++# -funit-at-a-time shrinks the kernel .text considerably
++# unfortunately it makes reading oopses harder.
++CFLAGS += $(call cc-option,-funit-at-a-time,)
++
++head-y := arch/xen/x86_64/kernel/head.o arch/xen/x86_64/kernel/head64.o arch/xen/x86_64/kernel/init_task.o
++
++libs-y 					+= arch/x86_64/lib/
++core-y					+= arch/xen/x86_64/kernel/ arch/xen/x86_64/mm/
++core-$(CONFIG_IA32_EMULATION)		+= arch/xen/x86_64/ia32/
++drivers-$(CONFIG_PCI)			+= arch/xen/x86_64/pci/
++drivers-$(CONFIG_OPROFILE)		+= arch/x86_64/oprofile/
++
++# for clean
++obj-	+= kernel/ mm/ pci/
++
++xenflags-y += -Iinclude/asm-xen/asm-x86_64/mach-xen
++
++CFLAGS += $(xenflags-y)
++AFLAGS += $(xenflags-y)
++
++prepare: include/asm-$(XENARCH)/asm_offsets.h
++CLEAN_FILES += include/asm-$(XENARCH)/asm_offsets.h
++
++arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
++	include/linux/version.h include/config/MARKER
++
++include/asm-$(XENARCH)/offset.h: arch/$(XENARCH)/kernel/asm-offsets.s
++	$(call filechk,gen-asm-offsets)
++
++include/asm-$(XENARCH)/asm_offsets.h: include/asm-$(XENARCH)/offset.h
++	ln -fsn offset.h include/asm-$(XENARCH)/asm_offsets.h
++	ln -fsn offset.h include/asm-$(XENARCH)/asm_offset.h
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/mm/fault.c linux-2.6.12-xen/arch/xen/x86_64/mm/fault.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/mm/fault.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/mm/fault.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,596 @@
++/*
++ *  linux/arch/x86-64/mm/fault.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
++ */
++
++#include <linux/config.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h>		/* For unblank_screen() */
++#include <linux/compiler.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <linux/kprobes.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgalloc.h>
++#include <asm/smp.h>
++#include <asm/tlbflush.h>
++#include <asm/proto.h>
++#include <asm/kdebug.h>
++#include <asm-generic/sections.h>
++#include <asm/kdebug.h>
++
++DEFINE_PER_CPU(pgd_t *, cur_pgd);
++
++void bust_spinlocks(int yes)
++{
++	int loglevel_save = console_loglevel;
++	if (yes) {
++		oops_in_progress = 1;
++	} else {
++#ifdef CONFIG_VT
++		unblank_screen();
++#endif
++		oops_in_progress = 0;
++		/*
++		 * OK, the message is on the console.  Now we call printk()
++		 * without oops_in_progress set so that printk will give klogd
++		 * a poke.  Hold onto your hats...
++		 */
++		console_loglevel = 15;		/* NMI oopser may have shut the console up */
++		printk(" ");
++		console_loglevel = loglevel_save;
++	}
++}
++
++/* Sometimes the CPU reports invalid exceptions on prefetch.
++   Check that here and ignore.
++   Opcode checker based on code by Richard Brunner */
++static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++				unsigned long error_code)
++{ 
++	unsigned char *instr;
++	int scan_more = 1;
++	int prefetch = 0; 
++	unsigned char *max_instr;
++
++	/* If it was a exec fault ignore */
++	if (error_code & (1<<4))
++		return 0;
++	
++	instr = (unsigned char *)convert_rip_to_linear(current, regs);
++	max_instr = instr + 15;
++
++	if ((regs->cs & 3) != 0 && instr >= (unsigned char *)TASK_SIZE)
++		return 0;
++
++	while (scan_more && instr < max_instr) { 
++		unsigned char opcode;
++		unsigned char instr_hi;
++		unsigned char instr_lo;
++
++		if (__get_user(opcode, instr))
++			break; 
++
++		instr_hi = opcode & 0xf0; 
++		instr_lo = opcode & 0x0f; 
++		instr++;
++
++		switch (instr_hi) { 
++		case 0x20:
++		case 0x30:
++			/* Values 0x26,0x2E,0x36,0x3E are valid x86
++			   prefixes.  In long mode, the CPU will signal
++			   invalid opcode if some of these prefixes are
++			   present so we will never get here anyway */
++			scan_more = ((instr_lo & 7) == 0x6);
++			break;
++			
++		case 0x40:
++			/* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
++			   Need to figure out under what instruction mode the
++			   instruction was issued ... */
++			/* Could check the LDT for lm, but for now it's good
++			   enough to assume that long mode only uses well known
++			   segments or kernel. */
++			scan_more = ((regs->cs & 3) == 0) || (regs->cs == __USER_CS);
++			break;
++			
++		case 0x60:
++			/* 0x64 thru 0x67 are valid prefixes in all modes. */
++			scan_more = (instr_lo & 0xC) == 0x4;
++			break;		
++		case 0xF0:
++			/* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
++			scan_more = !instr_lo || (instr_lo>>1) == 1;
++			break;			
++		case 0x00:
++			/* Prefetch instruction is 0x0F0D or 0x0F18 */
++			scan_more = 0;
++			if (__get_user(opcode, instr)) 
++				break;
++			prefetch = (instr_lo == 0xF) &&
++				(opcode == 0x0D || opcode == 0x18);
++			break;			
++		default:
++			scan_more = 0;
++			break;
++		} 
++	}
++	return prefetch;
++}
++
++static int bad_address(void *p) 
++{ 
++	unsigned long dummy;
++	return __get_user(dummy, (unsigned long *)p);
++} 
++
++void dump_pagetable(unsigned long address)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++
++	preempt_disable();
++	pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
++	preempt_enable();
++	pgd += pgd_index(address);
++	printk("PGD %lx ", pgd_val(*pgd));
++	if (bad_address(pgd)) goto bad;
++	if (!pgd_present(*pgd)) goto ret; 
++
++	pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
++	if (bad_address(pud)) goto bad;
++	printk("PUD %lx ", pud_val(*pud));
++	if (!pud_present(*pud))	goto ret;
++
++	pmd = pmd_offset(pud, address);
++	if (bad_address(pmd)) goto bad;
++	printk("PMD %lx ", pmd_val(*pmd));
++	if (!pmd_present(*pmd))	goto ret;	 
++
++	pte = pte_offset_kernel(pmd, address);
++	if (bad_address(pte)) goto bad;
++	printk("PTE %lx", pte_val(*pte)); 
++ret:
++	printk("\n");
++	return;
++bad:
++	printk("BAD\n");
++}
++
++static const char errata93_warning[] = 
++KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
++KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
++KERN_ERR "******* Please consider a BIOS update.\n"
++KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
++
++/* Workaround for K8 erratum #93 & buggy BIOS.
++   BIOS SMM functions are required to use a specific workaround
++   to avoid corruption of the 64bit RIP register on C stepping K8. 
++   A lot of BIOS that didn't get tested properly miss this. 
++   The OS sees this as a page fault with the upper 32bits of RIP cleared.
++   Try to work around it here.
++   Note we only handle faults in kernel here. */
++
++static int is_errata93(struct pt_regs *regs, unsigned long address) 
++{
++	static int warned;
++	if (address != regs->rip)
++		return 0;
++	if ((address >> 32) != 0) 
++		return 0;
++	address |= 0xffffffffUL << 32;
++	if ((address >= (u64)_stext && address <= (u64)_etext) || 
++	    (address >= MODULES_VADDR && address <= MODULES_END)) { 
++		if (!warned) {
++			printk(errata93_warning); 		
++			warned = 1;
++		}
++		regs->rip = address;
++		return 1;
++	}
++	return 0;
++} 
++
++int unhandled_signal(struct task_struct *tsk, int sig)
++{
++	if (tsk->pid == 1)
++		return 1;
++	/* Warn for strace, but not for gdb */
++	if (!test_ti_thread_flag(tsk->thread_info, TIF_SYSCALL_TRACE) &&
++	    (tsk->ptrace & PT_PTRACED))
++		return 0;
++	return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
++		(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
++}
++
++static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
++				 unsigned long error_code)
++{
++	oops_begin();
++	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
++	       current->comm, address);
++	dump_pagetable(address);
++	__die("Bad pagetable", regs, error_code);
++	oops_end();
++	do_exit(SIGKILL);
++}
++
++/*
++ * Handle a fault on the vmalloc or module mapping area
++ *
++ * This assumes no large pages in there.
++ */
++static int vmalloc_fault(unsigned long address)
++{
++	pgd_t *pgd, *pgd_ref;
++	pud_t *pud, *pud_ref;
++	pmd_t *pmd, *pmd_ref;
++	pte_t *pte, *pte_ref;
++
++	/* Copy kernel mappings over when needed. This can also
++	   happen within a race in page table update. In the later
++	   case just flush. */
++
++	/* On Xen the line below does not always work. Needs investigating! */
++	/*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
++	preempt_disable();
++	pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
++	preempt_enable();
++	pgd += pgd_index(address);
++	pgd_ref = pgd_offset_k(address);
++	if (pgd_none(*pgd_ref))
++		return -1;
++	if (pgd_none(*pgd))
++		set_pgd(pgd, *pgd_ref);
++
++	/* Below here mismatches are bugs because these lower tables
++	   are shared */
++
++	pud = pud_offset(pgd, address);
++	pud_ref = pud_offset(pgd_ref, address);
++	if (pud_none(*pud_ref))
++		return -1;
++	if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
++		BUG();
++	pmd = pmd_offset(pud, address);
++	pmd_ref = pmd_offset(pud_ref, address);
++	if (pmd_none(*pmd_ref))
++		return -1;
++	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
++		BUG();
++	pte_ref = pte_offset_kernel(pmd_ref, address);
++	if (!pte_present(*pte_ref))
++		return -1;
++	pte = pte_offset_kernel(pmd, address);
++	/* Don't use pte_page here, because the mappings can point
++	   outside mem_map, and the NUMA hash lookup cannot handle
++	   that. */
++	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
++		BUG();
++	__flush_tlb_all();
++	return 0;
++}
++
++int page_fault_trace = 0;
++int exception_trace = 1;
++
++
++#define MEM_VERBOSE 1
++
++#ifdef MEM_VERBOSE
++#define MEM_LOG(_f, _a...)			\
++	printk("fault.c:[%d]-> " _f "\n",	\
++	__LINE__ , ## _a )
++#else
++#define MEM_LOG(_f, _a...) ((void)0)
++#endif
++
++/*
++ * This routine handles page faults.  It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ *
++ * error_code:
++ *	bit 0 == 0 means no page found, 1 means protection fault
++ *	bit 1 == 0 means read, 1 means write
++ *	bit 2 == 0 means kernel, 1 means user-mode
++ *      bit 3 == 1 means fault was an instruction fetch
++ */
++asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
++{
++	struct task_struct *tsk;
++	struct mm_struct *mm;
++	struct vm_area_struct * vma;
++	unsigned long address;
++	const struct exception_table_entry *fixup;
++	int write;
++	siginfo_t info;
++
++	if (!user_mode(regs))
++		error_code &= ~4; /* means kernel */
++
++#ifdef CONFIG_CHECKING
++	{ 
++		unsigned long gs; 
++		struct x8664_pda *pda = cpu_pda + stack_smp_processor_id(); 
++		rdmsrl(MSR_GS_BASE, gs); 
++		if (gs != (unsigned long)pda) { 
++			wrmsrl(MSR_GS_BASE, pda); 
++			printk("page_fault: wrong gs %lx expected %p\n", gs, pda);
++		}
++	}
++#endif
++
++	/* get the address */
++	address = HYPERVISOR_shared_info->vcpu_info[
++		smp_processor_id()].arch.cr2;
++
++	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++					SIGSEGV) == NOTIFY_STOP)
++		return;
++
++	if (likely(regs->eflags & X86_EFLAGS_IF))
++		local_irq_enable();
++
++	if (unlikely(page_fault_trace))
++		printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
++		       regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code); 
++
++	tsk = current;
++	mm = tsk->mm;
++	info.si_code = SEGV_MAPERR;
++
++
++	/*
++	 * We fault-in kernel-space virtual memory on-demand. The
++	 * 'reference' page table is init_mm.pgd.
++	 *
++	 * NOTE! We MUST NOT take any locks for this case. We may
++	 * be in an interrupt or a critical region, and should
++	 * only copy the information from the master page table,
++	 * nothing more.
++	 *
++	 * This verifies that the fault happens in kernel space
++	 * (error_code & 4) == 0, and that the fault was not a
++	 * protection error (error_code & 1) == 0.
++	 */
++	if (unlikely(address >= TASK_SIZE)) {
++		if (!(error_code & 5) &&
++		      ((address >= VMALLOC_START && address < VMALLOC_END) ||
++		       (address >= MODULES_VADDR && address < MODULES_END))) {
++			if (vmalloc_fault(address) < 0)
++				goto bad_area_nosemaphore;
++			return;
++		}
++		/*
++		 * Don't take the mm semaphore here. If we fixup a prefetch
++		 * fault we could otherwise deadlock.
++		 */
++		goto bad_area_nosemaphore;
++	}
++
++	if (unlikely(error_code & (1 << 3)))
++		pgtable_bad(address, regs, error_code);
++
++	/*
++	 * If we're in an interrupt or have no user
++	 * context, we must not take the fault..
++	 */
++	if (unlikely(in_atomic() || !mm))
++		goto bad_area_nosemaphore;
++
++ again:
++	/* When running in the kernel we expect faults to occur only to
++	 * addresses in user space.  All other faults represent errors in the
++	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
++	 * erroneous fault occuring in a code path which already holds mmap_sem
++	 * we will deadlock attempting to validate the fault against the
++	 * address space.  Luckily the kernel only validly references user
++	 * space from well defined areas of code, which are listed in the
++	 * exceptions table.
++	 *
++	 * As the vast majority of faults will be valid we will only perform
++	 * the source reference check when there is a possibilty of a deadlock.
++	 * Attempt to lock the address space, if we cannot we then validate the
++	 * source.  If this is invalid we can skip the address space check,
++	 * thus avoiding the deadlock.
++	 */
++	if (!down_read_trylock(&mm->mmap_sem)) {
++		if ((error_code & 4) == 0 &&
++		    !search_exception_tables(regs->rip))
++			goto bad_area_nosemaphore;
++		down_read(&mm->mmap_sem);
++	}
++
++	vma = find_vma(mm, address);
++	if (!vma)
++		goto bad_area;
++	if (likely(vma->vm_start <= address))
++		goto good_area;
++	if (!(vma->vm_flags & VM_GROWSDOWN))
++		goto bad_area;
++	if (error_code & 4) {
++		// XXX: align red zone size with ABI 
++		if (address + 128 < regs->rsp)
++			goto bad_area;
++	}
++	if (expand_stack(vma, address))
++		goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++	info.si_code = SEGV_ACCERR;
++	write = 0;
++	switch (error_code & 3) {
++		default:	/* 3: write, present */
++			/* fall through */
++		case 2:		/* write, not present */
++			if (!(vma->vm_flags & VM_WRITE))
++				goto bad_area;
++			write++;
++			break;
++		case 1:		/* read, present */
++			goto bad_area;
++		case 0:		/* read, not present */
++			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++				goto bad_area;
++	}
++
++	/*
++	 * If for any reason at all we couldn't handle the fault,
++	 * make sure we exit gracefully rather than endlessly redo
++	 * the fault.
++	 */
++	switch (handle_mm_fault(mm, vma, address, write)) {
++	case 1:
++		tsk->min_flt++;
++		break;
++	case 2:
++		tsk->maj_flt++;
++		break;
++	case 0:
++		goto do_sigbus;
++	default:
++		goto out_of_memory;
++	}
++
++	up_read(&mm->mmap_sem);
++	return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++	up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++	/* User mode accesses just cause a SIGSEGV */
++	if (error_code & 4) {
++		if (is_prefetch(regs, address, error_code))
++			return;
++
++		/* Work around K8 erratum #100 K8 in compat mode
++		   occasionally jumps to illegal addresses >4GB.  We
++		   catch this here in the page fault handler because
++		   these addresses are not reachable. Just detect this
++		   case and return.  Any code segment in LDT is
++		   compatibility mode. */
++		if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
++		    (address >> 32))
++			return;
++
++		if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
++			printk(
++		       "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
++					tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
++					tsk->comm, tsk->pid, address, regs->rip,
++					regs->rsp, error_code);
++		}
++       
++		tsk->thread.cr2 = address;
++		/* Kernel addresses are always protection faults */
++		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++		tsk->thread.trap_no = 14;
++		info.si_signo = SIGSEGV;
++		info.si_errno = 0;
++		/* info.si_code has been set above */
++		info.si_addr = (void __user *)address;
++		force_sig_info(SIGSEGV, &info, tsk);
++		return;
++	}
++
++no_context:
++	
++	/* Are we prepared to handle this kernel fault?  */
++	fixup = search_exception_tables(regs->rip);
++	if (fixup) {
++		regs->rip = fixup->fixup;
++		return;
++	}
++
++	/* 
++	 * Hall of shame of CPU/BIOS bugs.
++	 */
++
++ 	if (is_prefetch(regs, address, error_code))
++ 		return;
++
++	if (is_errata93(regs, address))
++		return; 
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++	oops_begin(); 
++
++	if (address < PAGE_SIZE)
++		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++	else
++		printk(KERN_ALERT "Unable to handle kernel paging request");
++	printk(" at %016lx RIP: \n" KERN_ALERT,address);
++	printk_address(regs->rip);
++	printk("\n");
++	dump_pagetable(address);
++	__die("Oops", regs, error_code);
++	/* Executive summary in case the body of the oops scrolled away */
++	printk(KERN_EMERG "CR2: %016lx\n", address);
++	oops_end(); 
++	do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++	up_read(&mm->mmap_sem);
++	if (current->pid == 1) { 
++		yield();
++		goto again;
++	}
++	printk("VM: killing process %s\n", tsk->comm);
++	if (error_code & 4)
++		do_exit(SIGKILL);
++	goto no_context;
++
++do_sigbus:
++	up_read(&mm->mmap_sem);
++
++	/* Kernel mode? Handle exceptions or die */
++	if (!(error_code & 4))
++		goto no_context;
++
++	tsk->thread.cr2 = address;
++	tsk->thread.error_code = error_code;
++	tsk->thread.trap_no = 14;
++	info.si_signo = SIGBUS;
++	info.si_errno = 0;
++	info.si_code = BUS_ADRERR;
++	info.si_addr = (void __user *)address;
++	force_sig_info(SIGBUS, &info, tsk);
++	return;
++}
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/mm/init.c linux-2.6.12-xen/arch/xen/x86_64/mm/init.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/mm/init.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/mm/init.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,964 @@
++/*
++ *  linux/arch/x86_64/mm/init.c
++ *
++ *  Copyright (C) 1995  Linus Torvalds
++ *  Copyright (C) 2000  Pavel Machek <pavel at suse.cz>
++ *  Copyright (C) 2002,2003 Andi Kleen <ak at suse.de>
++ *
++ *  Jun Nakajima <jun.nakajima at intel.com>
++ *	Modified for Xen.
++ */
++
++#include <linux/config.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/proc_fs.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/smp.h>
++
++#include <asm-xen/features.h>
++
++#ifndef Dprintk
++#define Dprintk(x...)
++#endif
++
++extern unsigned long *contiguous_bitmap;
++
++#if defined(CONFIG_SWIOTLB)
++extern void swiotlb_init(void);
++#endif
++
++extern char _stext[];
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++extern unsigned long start_pfn;
++
++/*
++ * Use this until direct mapping is established, i.e. before __va() is 
++ * available in init_memory_mapping().
++ */
++
++#define addr_to_page(addr, page)				\
++	(addr) &= PHYSICAL_PAGE_MASK;				\
++	(page) = ((unsigned long *) ((unsigned long)		\
++	(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) +	\
++	__START_KERNEL_map)))
++
++static void early_make_page_readonly(void *va, unsigned int feature)
++{
++	unsigned long addr, _va = (unsigned long)va;
++	pte_t pte, *ptep;
++	unsigned long *page = (unsigned long *) init_level4_pgt;
++
++	if (xen_feature(feature))
++		return;
++
++	addr = (unsigned long) page[pgd_index(_va)];
++	addr_to_page(addr, page);
++
++	addr = page[pud_index(_va)];
++	addr_to_page(addr, page);
++
++	addr = page[pmd_index(_va)];
++	addr_to_page(addr, page);
++
++	ptep = (pte_t *) &page[pte_index(_va)];
++
++	pte.pte = ptep->pte & ~_PAGE_RW;
++	if (HYPERVISOR_update_va_mapping(_va, pte, 0))
++		BUG();
++}
++
++void make_page_readonly(void *va, unsigned int feature)
++{
++	pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
++	unsigned long addr = (unsigned long) va;
++
++	if (xen_feature(feature))
++		return;
++
++	pgd = pgd_offset_k(addr);
++	pud = pud_offset(pgd, addr);
++	pmd = pmd_offset(pud, addr);
++	ptep = pte_offset_kernel(pmd, addr);
++
++	pte.pte = ptep->pte & ~_PAGE_RW;
++	if (HYPERVISOR_update_va_mapping(addr, pte, 0))
++		xen_l1_entry_update(ptep, pte); /* fallback */
++
++	if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
++		make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT), feature);
++}
++
++void make_page_writable(void *va, unsigned int feature)
++{
++	pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
++	unsigned long addr = (unsigned long) va;
++
++	if (xen_feature(feature))
++		return;
++
++	pgd = pgd_offset_k(addr);
++	pud = pud_offset(pgd, addr);
++	pmd = pmd_offset(pud, addr);
++	ptep = pte_offset_kernel(pmd, addr);
++
++	pte.pte = ptep->pte | _PAGE_RW;
++	if (HYPERVISOR_update_va_mapping(addr, pte, 0))
++		xen_l1_entry_update(ptep, pte); /* fallback */
++
++	if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
++		make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT), feature);
++}
++
++void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
++{
++	if (xen_feature(feature))
++		return;
++
++	while (nr-- != 0) {
++		make_page_readonly(va, feature);
++		va = (void*)((unsigned long)va + PAGE_SIZE);
++	}
++}
++
++void make_pages_writable(void *va, unsigned nr, unsigned int feature)
++{
++	if (xen_feature(feature))
++		return;
++
++	while (nr-- != 0) {
++		make_page_writable(va, feature);
++		va = (void*)((unsigned long)va + PAGE_SIZE);
++	}
++}
++
++/*
++ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
++ * physical space so we can cache the place of the first one and move
++ * around without checking the pgd every time.
++ */
++
++void show_mem(void)
++{
++	int i, total = 0, reserved = 0;
++	int shared = 0, cached = 0;
++	pg_data_t *pgdat;
++	struct page *page;
++
++	printk("Mem-info:\n");
++	show_free_areas();
++	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++
++	for_each_pgdat(pgdat) {
++               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++			page = pfn_to_page(pgdat->node_start_pfn + i);
++			total++;
++                       if (PageReserved(page))
++			reserved++;
++                       else if (PageSwapCache(page))
++			cached++;
++                       else if (page_count(page))
++                               shared += page_count(page) - 1;
++               }
++	}
++	printk("%d pages of RAM\n", total);
++	printk("%d reserved pages\n",reserved);
++	printk("%d pages shared\n",shared);
++	printk("%d pages swap cached\n",cached);
++}
++
++/* References to section boundaries */
++
++extern char _text, _etext, _edata, __bss_start, _end[];
++extern char __init_begin, __init_end;
++
++int after_bootmem;
++
++static void *spp_getpage(void)
++{ 
++	void *ptr;
++	if (after_bootmem)
++		ptr = (void *) get_zeroed_page(GFP_ATOMIC); 
++	else
++		ptr = alloc_bootmem_pages(PAGE_SIZE);
++	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
++		panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
++
++	Dprintk("spp_getpage %p\n", ptr);
++	return ptr;
++} 
++
++#define pgd_offset_u(address) (pgd_t *)(init_level4_user_pgt + pgd_index(address))
++
++static inline pud_t *pud_offset_u(unsigned long address)
++{
++	pud_t *pud = level3_user_pgt;
++
++	return pud + pud_index(address);
++}
++
++static void set_pte_phys(unsigned long vaddr,
++			 unsigned long phys, pgprot_t prot, int user_mode)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte, new_pte;
++
++	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++
++	pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
++	if (pgd_none(*pgd)) {
++		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++		return;
++	}
++	pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
++	if (pud_none(*pud)) {
++		pmd = (pmd_t *) spp_getpage(); 
++		make_page_readonly(pmd, XENFEAT_writable_page_tables);
++		xen_pmd_pin(__pa(pmd));
++		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
++		if (pmd != pmd_offset(pud, 0)) {
++			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++			return;
++		}
++	}
++	pmd = pmd_offset(pud, vaddr);
++	if (pmd_none(*pmd)) {
++		pte = (pte_t *) spp_getpage();
++		make_page_readonly(pte, XENFEAT_writable_page_tables);
++		xen_pte_pin(__pa(pte));
++		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
++		if (pte != pte_offset_kernel(pmd, 0)) {
++			printk("PAGETABLE BUG #02!\n");
++			return;
++		}
++	}
++	new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
++
++	pte = pte_offset_kernel(pmd, vaddr);
++	if (!pte_none(*pte) &&
++	    pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
++		pte_ERROR(*pte);
++	set_pte(pte, new_pte);
++
++	/*
++	 * It's enough to flush this one mapping.
++	 * (PGE mappings get flushed as well)
++	 */
++	__flush_tlb_one(vaddr);
++}
++
++static void set_pte_phys_ma(unsigned long vaddr,
++			 unsigned long phys, pgprot_t prot)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte, new_pte;
++
++	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++
++	pgd = pgd_offset_k(vaddr);
++	if (pgd_none(*pgd)) {
++		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++		return;
++	}
++	pud = pud_offset(pgd, vaddr);
++	if (pud_none(*pud)) {
++
++		pmd = (pmd_t *) spp_getpage(); 
++		make_page_readonly(pmd, XENFEAT_writable_page_tables);
++		xen_pmd_pin(__pa(pmd));
++
++		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
++
++		if (pmd != pmd_offset(pud, 0)) {
++			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++			return;
++		}
++	}
++	pmd = pmd_offset(pud, vaddr);
++
++	if (pmd_none(*pmd)) {
++		pte = (pte_t *) spp_getpage();
++		make_page_readonly(pte, XENFEAT_writable_page_tables);
++		xen_pte_pin(__pa(pte));
++
++		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
++		if (pte != pte_offset_kernel(pmd, 0)) {
++			printk("PAGETABLE BUG #02!\n");
++			return;
++		}
++	}
++
++	new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
++	pte = pte_offset_kernel(pmd, vaddr);
++
++	/* 
++	 * Note that the pte page is already RO, thus we want to use
++	 * xen_l1_entry_update(), not set_pte().
++	 */
++	xen_l1_entry_update(pte, 
++			    pfn_pte_ma(phys >> PAGE_SHIFT, prot));
++
++	/*
++	 * It's enough to flush this one mapping.
++	 * (PGE mappings get flushed as well)
++	 */
++	__flush_tlb_one(vaddr);
++}
++
++#define SET_FIXMAP_KERNEL 0
++#define SET_FIXMAP_USER   1
++
++/* NOTE: this is meant to be run only at boot */
++void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
++{
++	unsigned long address = __fix_to_virt(idx);
++
++	if (idx >= __end_of_fixed_addresses) {
++		printk("Invalid __set_fixmap\n");
++		return;
++	}
++	switch (idx) {
++	case VSYSCALL_FIRST_PAGE:
++		set_pte_phys(address, phys, prot, SET_FIXMAP_KERNEL);
++		break;
++	default:
++		set_pte_phys_ma(address, phys, prot);
++		break;
++	}
++}
++
++/*
++ * At this point it only supports vsyscall area.
++ */
++void __set_fixmap_user (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
++{
++	unsigned long address = __fix_to_virt(idx);
++
++	if (idx >= __end_of_fixed_addresses) {
++		printk("Invalid __set_fixmap\n");
++		return;
++	}
++
++	set_pte_phys(address, phys, prot, SET_FIXMAP_USER); 
++}
++
++unsigned long __initdata table_start, tables_space; 
++
++unsigned long get_machine_pfn(unsigned long addr)
++{
++	pud_t* pud = pud_offset_k(addr);
++	pmd_t* pmd = pmd_offset(pud, addr);
++	pte_t *pte = pte_offset_kernel(pmd, addr);
++
++	return pte_mfn(*pte);
++} 
++
++static __init void *alloc_static_page(unsigned long *phys)
++{
++	unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
++	*phys = start_pfn << PAGE_SHIFT;
++	start_pfn++;
++	memset((void *)va, 0, PAGE_SIZE);
++	return (void *)va;
++} 
++
++#define PTE_SIZE PAGE_SIZE
++
++static inline void __set_pte(pte_t *dst, pte_t val)
++{
++	*dst = val;
++}
++
++static inline int make_readonly(unsigned long paddr)
++{
++	int readonly = 0;
++
++	/* Make old and new page tables read-only. */
++	if (!xen_feature(XENFEAT_writable_page_tables)
++	    && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
++	    && (paddr < ((table_start << PAGE_SHIFT) + tables_space)))
++		readonly = 1;
++	/*
++	 * No need for writable mapping of kernel image. This also ensures that
++	 * page and descriptor tables embedded inside don't have writable
++	 * mappings. 
++	 */
++	if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end)))
++		readonly = 1;
++
++	return readonly;
++}
++
++static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
++{ 
++	long i, j, k; 
++	unsigned long paddr;
++
++	i = pud_index(address);
++	pud = pud + i;
++	for (; i < PTRS_PER_PUD; pud++, i++) {
++		unsigned long pmd_phys;
++		pmd_t *pmd;
++
++		paddr = address + i*PUD_SIZE;
++		if (paddr >= end) { 
++			for (; i < PTRS_PER_PUD; i++, pud++) 
++				set_pud(pud, __pud(0)); 
++			break;
++		} 
++
++		pmd = alloc_static_page(&pmd_phys);
++		early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
++		xen_pmd_pin(pmd_phys);
++		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
++      		for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
++			unsigned long pte_phys;
++			pte_t *pte, *pte_save;
++
++			if (paddr >= end) { 
++				for (; j < PTRS_PER_PMD; j++, pmd++)
++					set_pmd(pmd,  __pmd(0)); 
++				break;
++			}
++			pte = alloc_static_page(&pte_phys);
++			pte_save = pte;
++			for (k = 0; k < PTRS_PER_PTE; pte++, k++, paddr += PTE_SIZE) {
++				if ((paddr >= end) ||
++				    ((paddr >> PAGE_SHIFT) >=
++				     xen_start_info->nr_pages)) { 
++					__set_pte(pte, __pte(0)); 
++					continue;
++				}
++				if (make_readonly(paddr)) {
++					__set_pte(pte, 
++						__pte(paddr | (_KERNPG_TABLE & ~_PAGE_RW)));
++					continue;
++				}
++				__set_pte(pte, __pte(paddr | _KERNPG_TABLE));
++			}
++			pte = pte_save;
++			early_make_page_readonly(
++				pte, XENFEAT_writable_page_tables);
++			xen_pte_pin(pte_phys);
++			set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
++		}
++	}
++	__flush_tlb();
++} 
++
++static void __init find_early_table_space(unsigned long end)
++{
++	unsigned long puds, pmds, ptes; 
++
++	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
++	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
++	ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
++
++	tables_space =
++		round_up(puds * 8, PAGE_SIZE) + 
++		round_up(pmds * 8, PAGE_SIZE) + 
++		round_up(ptes * 8, PAGE_SIZE); 
++}
++
++void __init xen_init_pt(void)
++{
++	unsigned long addr, *page;
++	int i;
++
++	for (i = 0; i < NR_CPUS; i++)
++		per_cpu(cur_pgd, i) = init_mm.pgd;
++
++	memset((void *)init_level4_pgt,   0, PAGE_SIZE);
++	memset((void *)level3_kernel_pgt, 0, PAGE_SIZE);
++	memset((void *)level2_kernel_pgt, 0, PAGE_SIZE);
++
++	/* Find the initial pte page that was built for us. */
++	page = (unsigned long *)xen_start_info->pt_base;
++	addr = page[pgd_index(__START_KERNEL_map)];
++	addr_to_page(addr, page);
++	addr = page[pud_index(__START_KERNEL_map)];
++	addr_to_page(addr, page);
++
++	/* Construct mapping of initial pte page in our own directories. */
++	init_level4_pgt[pgd_index(__START_KERNEL_map)] = 
++		mk_kernel_pgd(__pa_symbol(level3_kernel_pgt));
++	level3_kernel_pgt[pud_index(__START_KERNEL_map)] = 
++		__pud(__pa_symbol(level2_kernel_pgt) |
++		      _KERNPG_TABLE | _PAGE_USER);
++	memcpy((void *)level2_kernel_pgt, page, PAGE_SIZE);
++
++	early_make_page_readonly(init_level4_pgt,
++				 XENFEAT_writable_page_tables);
++	early_make_page_readonly(init_level4_user_pgt,
++				 XENFEAT_writable_page_tables);
++	early_make_page_readonly(level3_kernel_pgt,
++				 XENFEAT_writable_page_tables);
++	early_make_page_readonly(level3_user_pgt,
++				 XENFEAT_writable_page_tables);
++	early_make_page_readonly(level2_kernel_pgt,
++				 XENFEAT_writable_page_tables);
++
++	xen_pgd_pin(__pa_symbol(init_level4_pgt));
++	xen_pgd_pin(__pa_symbol(init_level4_user_pgt));
++	xen_pud_pin(__pa_symbol(level3_kernel_pgt));
++	xen_pud_pin(__pa_symbol(level3_user_pgt));
++	xen_pmd_pin(__pa_symbol(level2_kernel_pgt));
++
++	set_pgd((pgd_t *)(init_level4_user_pgt + 511), 
++		mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
++}
++
++void __init extend_init_mapping(void) 
++{
++	unsigned long va = __START_KERNEL_map;
++	unsigned long phys, addr, *pte_page;
++	pmd_t *pmd;
++	pte_t *pte, new_pte;
++	unsigned long *page = (unsigned long *)init_level4_pgt;
++
++	addr = page[pgd_index(va)];
++	addr_to_page(addr, page);
++	addr = page[pud_index(va)];
++	addr_to_page(addr, page);
++
++	/* Kill mapping of low 1MB. */
++	while (va < (unsigned long)&_text) {
++		HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
++		va += PAGE_SIZE;
++	}
++
++	/* Ensure init mappings cover kernel text/data and initial tables. */
++	while (va < (__START_KERNEL_map
++		     + (start_pfn << PAGE_SHIFT)
++		     + tables_space)) {
++		pmd = (pmd_t *)&page[pmd_index(va)];
++		if (pmd_none(*pmd)) {
++			pte_page = alloc_static_page(&phys);
++			early_make_page_readonly(
++				pte_page, XENFEAT_writable_page_tables);
++			xen_pte_pin(phys);
++			set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER));
++		} else {
++			addr = page[pmd_index(va)];
++			addr_to_page(addr, pte_page);
++		}
++		pte = (pte_t *)&pte_page[pte_index(va)];
++		if (pte_none(*pte)) {
++			new_pte = pfn_pte(
++				(va - __START_KERNEL_map) >> PAGE_SHIFT, 
++				__pgprot(_KERNPG_TABLE | _PAGE_USER));
++			xen_l1_entry_update(pte, new_pte);
++		}
++		va += PAGE_SIZE;
++	}
++
++	/* Finally, blow away any spurious initial mappings. */
++	while (1) {
++		pmd = (pmd_t *)&page[pmd_index(va)];
++		if (pmd_none(*pmd))
++			break;
++		HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
++		va += PAGE_SIZE;
++	}
++}
++
++/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
++   This runs before bootmem is initialized and gets pages directly from the 
++   physical memory. To access them they are temporarily mapped. */
++void __init init_memory_mapping(unsigned long start, unsigned long end)
++{ 
++	unsigned long next; 
++
++	Dprintk("init_memory_mapping\n");
++
++	find_early_table_space(end);
++	extend_init_mapping();
++
++	table_start = start_pfn;
++
++	start = (unsigned long)__va(start);
++	end = (unsigned long)__va(end);
++
++	for (; start < end; start = next) {
++		unsigned long pud_phys; 
++		pud_t *pud = alloc_static_page(&pud_phys);
++		early_make_page_readonly(pud, XENFEAT_writable_page_tables);
++		xen_pud_pin(pud_phys);
++		next = start + PGDIR_SIZE;
++		if (next > end) 
++			next = end; 
++		phys_pud_init(pud, __pa(start), __pa(next));
++		set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
++	}
++
++	printk("kernel direct mapping tables upto %lx @ %lx-%lx\n",
++	       __pa(end), table_start<<PAGE_SHIFT, start_pfn<<PAGE_SHIFT);
++
++	BUG_ON(start_pfn != (table_start + (tables_space >> PAGE_SHIFT)));
++
++	__flush_tlb_all();
++}
++
++extern struct x8664_pda cpu_pda[NR_CPUS];
++
++void zap_low_mappings(void)
++{
++	/* this is not required for Xen */
++#if 0
++	swap_low_mappings();
++#endif
++}
++
++#ifndef CONFIG_DISCONTIGMEM
++void __init paging_init(void)
++{
++	{
++		unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
++		/*	unsigned int max_dma; */
++		/* max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; */
++		/* if (end_pfn < max_dma) */
++			zones_size[ZONE_DMA] = end_pfn;
++#if 0
++		else {
++			zones_size[ZONE_DMA] = max_dma;
++			zones_size[ZONE_NORMAL] = end_pfn - max_dma;
++		}
++#endif
++		free_area_init(zones_size);
++	}
++
++	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++
++	memset(empty_zero_page, 0, sizeof(empty_zero_page));
++	init_mm.context.pinned = 1;
++
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++	{
++		int i;
++		/* Setup mapping of lower 1st MB */
++		for (i = 0; i < NR_FIX_ISAMAPS; i++)
++			if (xen_start_info->flags & SIF_PRIVILEGED)
++				set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++			else
++				__set_fixmap(FIX_ISAMAP_BEGIN - i,
++					     virt_to_mfn(empty_zero_page) << PAGE_SHIFT,
++					     PAGE_KERNEL_RO);
++	}
++#endif
++
++}
++#endif
++
++/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
++   from the CPU leading to inconsistent cache lines. address and size
++   must be aligned to 2MB boundaries. 
++   Does nothing when the mapping doesn't exist. */
++void __init clear_kernel_mapping(unsigned long address, unsigned long size) 
++{
++	unsigned long end = address + size;
++
++	BUG_ON(address & ~LARGE_PAGE_MASK);
++	BUG_ON(size & ~LARGE_PAGE_MASK); 
++	
++	for (; address < end; address += LARGE_PAGE_SIZE) { 
++		pgd_t *pgd = pgd_offset_k(address);
++		pud_t *pud;
++		pmd_t *pmd;
++		if (pgd_none(*pgd))
++			continue;
++		pud = pud_offset(pgd, address);
++		if (pud_none(*pud))
++			continue; 
++		pmd = pmd_offset(pud, address);
++		if (!pmd || pmd_none(*pmd))
++			continue; 
++		if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { 
++			/* Could handle this, but it should not happen currently. */
++			printk(KERN_ERR 
++	       "clear_kernel_mapping: mapping has been split. will leak memory\n"); 
++			pmd_ERROR(*pmd); 
++		}
++		set_pmd(pmd, __pmd(0)); 		
++	}
++	__flush_tlb_all();
++} 
++
++static inline int page_is_ram (unsigned long pagenr)
++{
++	return 1;
++}
++
++static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
++			 kcore_vsyscall;
++
++void __init mem_init(void)
++{
++	int codesize, reservedpages, datasize, initsize;
++	int tmp;
++
++	contiguous_bitmap = alloc_bootmem_low_pages(
++		(end_pfn + 2*BITS_PER_LONG) >> 3);
++	BUG_ON(!contiguous_bitmap);
++	memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
++
++#if defined(CONFIG_SWIOTLB)
++	swiotlb_init();	
++#endif
++
++	/* How many end-of-memory variables you have, grandma! */
++	max_low_pfn = end_pfn;
++	max_pfn = end_pfn;
++	num_physpages = end_pfn;
++	high_memory = (void *) __va(end_pfn * PAGE_SIZE);
++
++	/* clear the zero-page */
++	memset(empty_zero_page, 0, PAGE_SIZE);
++
++	reservedpages = 0;
++
++	/* this will put all low memory onto the freelists */
++#ifdef CONFIG_DISCONTIGMEM
++	totalram_pages += numa_free_all_bootmem();
++	tmp = 0;
++	/* should count reserved pages here for all nodes */ 
++#else
++	max_mapnr = end_pfn;
++	if (!mem_map) BUG();
++
++	totalram_pages += free_all_bootmem();
++
++	for (tmp = 0; tmp < end_pfn; tmp++)
++		/*
++		 * Only count reserved RAM pages
++		 */
++		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
++			reservedpages++;
++#endif
++
++	after_bootmem = 1;
++
++	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
++	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
++	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++	/* Register memory areas for /proc/kcore */
++	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
++	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
++		   VMALLOC_END-VMALLOC_START);
++	kclist_add(&kcore_kernel, &_stext, _end - _stext);
++	kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
++	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 
++				 VSYSCALL_END - VSYSCALL_START);
++
++	printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
++		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++		end_pfn << (PAGE_SHIFT-10),
++		codesize >> 10,
++		reservedpages << (PAGE_SHIFT-10),
++		datasize >> 10,
++		initsize >> 10);
++
++	/*
++	 * Subtle. SMP is doing its boot stuff late (because it has to
++	 * fork idle threads) - but it also needs low mappings for the
++	 * protected-mode entry to work. We zap these entries only after
++	 * the WP-bit has been tested.
++	 */
++#ifndef CONFIG_SMP
++	zap_low_mappings();
++#endif
++}
++
++extern char __initdata_begin[], __initdata_end[];
++
++void free_initmem(void)
++{
++#ifdef __DO_LATER__
++	/*
++	 * Some pages can be pinned, but some are not. Unpinning such pages 
++	 * triggers BUG(). 
++	 */
++	unsigned long addr;
++
++	addr = (unsigned long)(&__init_begin);
++	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
++		ClearPageReserved(virt_to_page(addr));
++		set_page_count(virt_to_page(addr), 1);
++		memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE); 
++		xen_pte_unpin(__pa(addr));
++		make_page_writable(
++			__va(__pa(addr)), XENFEAT_writable_page_tables);
++		/*
++		 * Make pages from __PAGE_OFFSET address as well
++		 */
++		make_page_writable(
++			(void *)addr, XENFEAT_writable_page_tables);
++		free_page(addr);
++		totalram_pages++;
++	}
++	memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
++	printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
++#endif
++}
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++	if (start < (unsigned long)&_end)
++		return;
++	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
++	for (; start < end; start += PAGE_SIZE) {
++		ClearPageReserved(virt_to_page(start));
++		set_page_count(virt_to_page(start), 1);
++		free_page(start);
++		totalram_pages++;
++	}
++}
++#endif
++
++void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 
++{ 
++	/* Should check here against the e820 map to avoid double free */ 
++#ifdef CONFIG_DISCONTIGMEM
++	int nid = phys_to_nid(phys);
++  	reserve_bootmem_node(NODE_DATA(nid), phys, len);
++#else       		
++	reserve_bootmem(phys, len);    
++#endif
++}
++
++int kern_addr_valid(unsigned long addr) 
++{ 
++	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
++       pgd_t *pgd;
++       pud_t *pud;
++       pmd_t *pmd;
++       pte_t *pte;
++
++	if (above != 0 && above != -1UL)
++		return 0; 
++	
++	pgd = pgd_offset_k(addr);
++	if (pgd_none(*pgd))
++		return 0;
++
++	pud = pud_offset_k(addr);
++	if (pud_none(*pud))
++		return 0; 
++
++	pmd = pmd_offset(pud, addr);
++	if (pmd_none(*pmd))
++		return 0;
++	if (pmd_large(*pmd))
++		return pfn_valid(pmd_pfn(*pmd));
++
++	pte = pte_offset_kernel(pmd, addr);
++	if (pte_none(*pte))
++		return 0;
++	return pfn_valid(pte_pfn(*pte));
++}
++
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++
++extern int exception_trace, page_fault_trace;
++
++static ctl_table debug_table2[] = {
++	{ 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
++	  proc_dointvec },
++#ifdef CONFIG_CHECKING
++	{ 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
++	  proc_dointvec },
++#endif
++	{ 0, }
++}; 
++
++static ctl_table debug_root_table2[] = { 
++	{ .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555, 
++	   .child = debug_table2 }, 
++	{ 0 }, 
++}; 
++
++static __init int x8664_sysctl_init(void)
++{ 
++	register_sysctl_table(debug_root_table2, 1);
++	return 0;
++}
++__initcall(x8664_sysctl_init);
++#endif
++
++/* A pseudo VMAs to allow ptrace access for the vsyscall page.   This only
++   covers the 64bit vsyscall page now. 32bit has a real VMA now and does
++   not need special handling anymore. */
++
++static struct vm_area_struct gate_vma = {
++	.vm_start = VSYSCALL_START,
++	.vm_end = VSYSCALL_END,
++	.vm_page_prot = PAGE_READONLY
++};
++
++struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
++{
++#ifdef CONFIG_IA32_EMULATION
++	if (test_tsk_thread_flag(tsk, TIF_IA32))
++		return NULL;
++#endif
++	return &gate_vma;
++}
++
++int in_gate_area(struct task_struct *task, unsigned long addr)
++{
++	struct vm_area_struct *vma = get_gate_vma(task);
++	if (!vma)
++		return 0;
++	return (addr >= vma->vm_start) && (addr < vma->vm_end);
++}
++
++/* Use this when you have no reliable task/vma, typically from interrupt
++ * context.  It is less reliable than using the task's vma and may give
++ * false positives.
++ */
++int in_gate_area_no_task(unsigned long addr)
++{
++	return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/mm/Makefile linux-2.6.12-xen/arch/xen/x86_64/mm/Makefile
+--- pristine-linux-2.6.12/arch/xen/x86_64/mm/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/mm/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,31 @@
++#
++# Makefile for the linux x86_64-specific parts of the memory manager.
++#
++
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++
++CFLAGS	+= -Iarch/$(XENARCH)/mm
++
++obj-y	:= init.o fault.o pageattr.o
++c-obj-y	:= extable.o
++
++i386-obj-y := hypervisor.o ioremap.o
++
++#obj-y	 := init.o fault.o ioremap.o extable.o pageattr.o
++#c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
++c-obj-$(CONFIG_DISCONTIGMEM) += numa.o
++c-obj-$(CONFIG_K8_NUMA) += k8topology.o
++
++hugetlbpage-y = ../../../i386/mm/hugetlbpage.o
++
++c-link	:=
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
++	@ln -fsn $(srctree)/arch/x86_64/mm/$(notdir $@) $@
++
++$(patsubst %.o,$(obj)/%.c,$(i386-obj-y)):
++	ln -fsn $(srctree)/arch/xen/i386/mm/$(notdir $@) $@
++
++obj-y	+= $(c-obj-y) $(i386-obj-y)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link) $(i386-obj-y))
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/mm/pageattr.c linux-2.6.12-xen/arch/xen/x86_64/mm/pageattr.c
+--- pristine-linux-2.6.12/arch/xen/x86_64/mm/pageattr.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/mm/pageattr.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,382 @@
++/* 
++ * Copyright 2002 Andi Kleen, SuSE Labs. 
++ * Thanks to Ben LaHaise for precious feedback.
++ */ 
++
++#include <linux/config.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++
++#ifdef CONFIG_XEN
++#include <asm/pgalloc.h>
++#include <asm/mmu_context.h>
++
++LIST_HEAD(mm_unpinned);
++DEFINE_SPINLOCK(mm_unpinned_lock);
++
++static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
++{
++	struct page *page = virt_to_page(pt);
++	unsigned long pfn = page_to_pfn(page);
++
++	BUG_ON(HYPERVISOR_update_va_mapping(
++		       (unsigned long)__va(pfn << PAGE_SHIFT),
++		       pfn_pte(pfn, flags), 0));
++}
++
++static void mm_walk(struct mm_struct *mm, pgprot_t flags)
++{
++	pgd_t       *pgd;
++	pud_t       *pud;
++	pmd_t       *pmd;
++	pte_t       *pte;
++	int          g,u,m;
++
++	pgd = mm->pgd;
++	for (g = 0; g <= USER_PTRS_PER_PGD; g++, pgd++) {
++		if (pgd_none(*pgd))
++			continue;
++		pud = pud_offset(pgd, 0);
++		if (PTRS_PER_PUD > 1) /* not folded */ 
++			mm_walk_set_prot(pud,flags);
++		for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++			if (pud_none(*pud))
++				continue;
++			pmd = pmd_offset(pud, 0);
++			if (PTRS_PER_PMD > 1) /* not folded */ 
++				mm_walk_set_prot(pmd,flags);
++			for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++				if (pmd_none(*pmd))
++					continue;
++				pte = pte_offset_kernel(pmd,0);
++				mm_walk_set_prot(pte,flags);
++			}
++		}
++	}
++}
++
++void mm_pin(struct mm_struct *mm)
++{
++	spin_lock(&mm->page_table_lock);
++
++	mm_walk(mm, PAGE_KERNEL_RO);
++	BUG_ON(HYPERVISOR_update_va_mapping(
++		       (unsigned long)mm->pgd,
++		       pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
++		       UVMF_TLB_FLUSH));
++	BUG_ON(HYPERVISOR_update_va_mapping(
++		       (unsigned long)__user_pgd(mm->pgd),
++		       pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL_RO),
++		       UVMF_TLB_FLUSH));
++	xen_pgd_pin(__pa(mm->pgd)); /* kernel */
++	xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
++	mm->context.pinned = 1;
++	spin_lock(&mm_unpinned_lock);
++	list_del(&mm->context.unpinned);
++	spin_unlock(&mm_unpinned_lock);
++
++	spin_unlock(&mm->page_table_lock);
++}
++
++void mm_unpin(struct mm_struct *mm)
++{
++	spin_lock(&mm->page_table_lock);
++
++	xen_pgd_unpin(__pa(mm->pgd));
++	xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
++	BUG_ON(HYPERVISOR_update_va_mapping(
++		       (unsigned long)mm->pgd,
++		       pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
++	BUG_ON(HYPERVISOR_update_va_mapping(
++		       (unsigned long)__user_pgd(mm->pgd),
++		       pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0));
++	mm_walk(mm, PAGE_KERNEL);
++	xen_tlb_flush();
++	mm->context.pinned = 0;
++	spin_lock(&mm_unpinned_lock);
++	list_add(&mm->context.unpinned, &mm_unpinned);
++	spin_unlock(&mm_unpinned_lock);
++
++	spin_unlock(&mm->page_table_lock);
++}
++
++void mm_pin_all(void)
++{
++	while (!list_empty(&mm_unpinned))	
++		mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
++				  context.unpinned));
++}
++
++void _arch_exit_mmap(struct mm_struct *mm)
++{
++    struct task_struct *tsk = current;
++
++    task_lock(tsk);
++
++    /*
++     * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++     * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++     */
++    if ( tsk->active_mm == mm )
++    {
++        tsk->active_mm = &init_mm;
++        atomic_inc(&init_mm.mm_count);
++
++        switch_mm(mm, &init_mm, tsk);
++
++        atomic_dec(&mm->mm_count);
++        BUG_ON(atomic_read(&mm->mm_count) == 0);
++    }
++
++    task_unlock(tsk);
++
++    if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) )
++        mm_unpin(mm);
++}
++
++void pte_free(struct page *pte)
++{
++	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
++
++	if (!pte_write(*virt_to_ptep(va)))
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
++	__free_page(pte);
++}
++#endif	/* CONFIG_XEN */
++
++static inline pte_t *lookup_address(unsigned long address) 
++{ 
++	pgd_t *pgd = pgd_offset_k(address);
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++	if (pgd_none(*pgd))
++		return NULL;
++	pud = pud_offset(pgd, address);
++	if (!pud_present(*pud))
++		return NULL; 
++	pmd = pmd_offset(pud, address);
++	if (!pmd_present(*pmd))
++		return NULL; 
++	if (pmd_large(*pmd))
++		return (pte_t *)pmd;
++	pte = pte_offset_kernel(pmd, address);
++	if (pte && !pte_present(*pte))
++		pte = NULL; 
++	return pte;
++} 
++
++static struct page *split_large_page(unsigned long address, pgprot_t prot,
++				     pgprot_t ref_prot)
++{ 
++	int i; 
++	unsigned long addr;
++	struct page *base = alloc_pages(GFP_KERNEL, 0);
++	pte_t *pbase;
++	if (!base) 
++		return NULL;
++	address = __pa(address);
++	addr = address & LARGE_PAGE_MASK; 
++	pbase = (pte_t *)page_address(base);
++	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
++		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
++				   addr == address ? prot : ref_prot);
++	}
++	return base;
++} 
++
++
++static void flush_kernel_map(void *address) 
++{
++	if (0 && address && cpu_has_clflush) {
++		/* is this worth it? */ 
++		int i;
++		for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 
++			asm volatile("clflush (%0)" :: "r" (address + i)); 
++	} else
++		asm volatile("wbinvd":::"memory"); 
++	if (address)
++		__flush_tlb_one(address);
++	else
++		__flush_tlb_all();
++}
++
++
++static inline void flush_map(unsigned long address)
++{	
++	on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
++}
++
++struct deferred_page { 
++	struct deferred_page *next; 
++	struct page *fpage;
++	unsigned long address;
++}; 
++static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
++
++static inline void save_page(unsigned long address, struct page *fpage)
++{
++	struct deferred_page *df;
++	df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL); 
++	if (!df) {
++		flush_map(address);
++		__free_page(fpage);
++	} else { 
++		df->next = df_list;
++		df->fpage = fpage;
++		df->address = address;
++		df_list = df;
++	} 			
++}
++
++/* 
++ * No more special protections in this 2/4MB area - revert to a
++ * large page again. 
++ */
++static void revert_page(unsigned long address, pgprot_t ref_prot)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t large_pte;
++
++	pgd = pgd_offset_k(address);
++	BUG_ON(pgd_none(*pgd));
++	pud = pud_offset(pgd,address);
++	BUG_ON(pud_none(*pud));
++	pmd = pmd_offset(pud, address);
++	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
++	pgprot_val(ref_prot) |= _PAGE_PSE;
++	large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
++	set_pte((pte_t *)pmd, large_pte);
++}      
++
++static int
++__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
++				   pgprot_t ref_prot)
++{ 
++	pte_t *kpte; 
++	struct page *kpte_page;
++	unsigned kpte_flags;
++	kpte = lookup_address(address);
++	if (!kpte) return 0;
++	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
++	kpte_flags = pte_val(*kpte); 
++	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
++		if ((kpte_flags & _PAGE_PSE) == 0) { 
++			set_pte(kpte, pfn_pte(pfn, prot));
++		} else {
++ 			/*
++ 			 * split_large_page will take the reference for this change_page_attr
++ 			 * on the split page.
++ 			 */
++			struct page *split = split_large_page(address, prot, ref_prot); 
++			if (!split)
++				return -ENOMEM;
++			set_pte(kpte,mk_pte(split, ref_prot));
++			kpte_page = split;
++		}	
++		get_page(kpte_page);
++	} else if ((kpte_flags & _PAGE_PSE) == 0) { 
++		set_pte(kpte, pfn_pte(pfn, ref_prot));
++		__put_page(kpte_page);
++	} else
++		BUG();
++
++	/* on x86-64 the direct mapping set at boot is not using 4k pages */
++	/*
++	 * ..., but the XEN guest kernels (currently) do:
++	 * If the pte was reserved, it means it was created at boot
++	 * time (not via split_large_page) and in turn we must not
++	 * replace it with a large page.
++	 */
++#ifndef CONFIG_XEN
++ 	BUG_ON(PageReserved(kpte_page));
++#else
++	if (!PageReserved(kpte_page))
++#endif
++		switch (page_count(kpte_page)) {
++		case 1:
++			save_page(address, kpte_page); 		     
++			revert_page(address, ref_prot);
++			break;
++		case 0:
++			BUG(); /* memleak and failed 2M page regeneration */
++	 	}
++	return 0;
++} 
++
++/*
++ * Change the page attributes of an page in the linear mapping.
++ *
++ * This should be used when a page is mapped with a different caching policy
++ * than write-back somewhere - some CPUs do not like it when mappings with
++ * different caching policies exist. This changes the page attributes of the
++ * in kernel linear mapping too.
++ * 
++ * The caller needs to ensure that there are no conflicting mappings elsewhere.
++ * This function only deals with the kernel linear map.
++ * 
++ * Caller must call global_flush_tlb() after this.
++ */
++int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
++{
++	int err = 0; 
++	int i; 
++
++	down_write(&init_mm.mmap_sem);
++	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
++		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
++
++		err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
++		if (err) 
++			break; 
++		/* Handle kernel mapping too which aliases part of the
++		 * lowmem */
++		if (__pa(address) < KERNEL_TEXT_SIZE) {
++			unsigned long addr2;
++			pgprot_t prot2 = prot;
++			addr2 = __START_KERNEL_map + __pa(address);
++ 			pgprot_val(prot2) &= ~_PAGE_NX;
++			err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
++		} 
++	} 	
++	up_write(&init_mm.mmap_sem); 
++	return err;
++}
++
++/* Don't call this for MMIO areas that may not have a mem_map entry */
++int change_page_attr(struct page *page, int numpages, pgprot_t prot)
++{
++	unsigned long addr = (unsigned long)page_address(page);
++	return change_page_attr_addr(addr, numpages, prot);
++}
++
++void global_flush_tlb(void)
++{ 
++	struct deferred_page *df, *next_df;
++
++	down_read(&init_mm.mmap_sem);
++	df = xchg(&df_list, NULL);
++	up_read(&init_mm.mmap_sem);
++	if (!df)
++		return;
++	flush_map((df && !df->next) ? df->address : 0);
++	for (; df; df = next_df) { 
++		next_df = df->next;
++		if (df->fpage) 
++			__free_page(df->fpage);
++		kfree(df);
++	} 
++} 
++
++EXPORT_SYMBOL(change_page_attr);
++EXPORT_SYMBOL(global_flush_tlb);
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/pci/Makefile linux-2.6.12-xen/arch/xen/x86_64/pci/Makefile
+--- pristine-linux-2.6.12/arch/xen/x86_64/pci/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/pci/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,39 @@
++#
++# Makefile for X86_64 specific PCI routines
++#
++# Reuse the i386 PCI subsystem
++#
++XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
++CFLAGS	+= -Iarch/$(XENARCH)/pci
++
++CFLAGS += -Iarch/i386/pci
++
++c-xen-obj-y		:= i386.o
++c-i386-obj-y		+= fixup.o
++c-i386-obj-$(CONFIG_ACPI_PCI)	+= acpi.o
++c-i386-obj-y			+= legacy.o common.o
++c-i386-obj-$(CONFIG_PCI_DIRECT)+= direct.o
++c-xen-obj-y		+= irq.o
++# mmconfig has a 64bit special
++c-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
++
++c-obj-$(CONFIG_NUMA)	+= k8-bus.o
++
++c-link	:=
++
++$(patsubst %.o,$(obj)/%.c,$(c-xen-obj-y)):
++	@ln -fsn $(srctree)/arch/xen/i386/pci/$(notdir $@) $@
++
++$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
++	@ln -fsn $(srctree)/arch/x86_64/pci/$(notdir $@) $@
++
++$(patsubst %.o,$(obj)/%.c,$(c-i386-obj-y)):
++	@ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
++
++# Make sure irq.o gets linked in before common.o
++obj-y	+= $(patsubst common.o,$(c-xen-obj-y) common.o,$(c-i386-obj-y))
++obj-y	+= $(c-obj-y)
++
++clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
++clean-files += $(patsubst %.o,%.c,$(c-i386-obj-y) $(c-i386-obj-))
++clean-files += $(patsubst %.o,%.c,$(c-xen-obj-y) $(c-xen-obj-))
+diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/pci/Makefile-BUS linux-2.6.12-xen/arch/xen/x86_64/pci/Makefile-BUS
+--- pristine-linux-2.6.12/arch/xen/x86_64/pci/Makefile-BUS	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/arch/xen/x86_64/pci/Makefile-BUS	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,22 @@
++#
++# Makefile for X86_64 specific PCI routines
++#
++# Reuse the i386 PCI subsystem
++#
++CFLAGS += -I arch/i386/pci
++
++obj-y		:= i386.o
++obj-$(CONFIG_PCI_DIRECT)+= direct.o
++obj-y		+= fixup.o
++obj-$(CONFIG_ACPI_PCI)	+= acpi.o
++obj-y			+= legacy.o irq.o common.o
++# mmconfig has a 64bit special
++obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
++
++direct-y += ../../i386/pci/direct.o
++acpi-y   += ../../i386/pci/acpi.o
++legacy-y += ../../i386/pci/legacy.o
++irq-y    += ../../i386/pci/irq.o
++common-y += ../../i386/pci/common.o
++fixup-y  += ../../i386/pci/fixup.o
++i386-y  += ../../i386/pci/i386.o
+diff -Nurp pristine-linux-2.6.12/.config linux-2.6.12-xen/.config
+--- pristine-linux-2.6.12/.config	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/.config	2006-03-05 23:55:06.826653101 +0100
+@@ -0,0 +1,2966 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.12.6-xen
++# Sun Mar  5 23:55:06 2006
++#
++CONFIG_XEN=y
++CONFIG_ARCH_XEN=y
++CONFIG_NO_IDLE_HZ=y
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++CONFIG_XEN_PHYSDEV_ACCESS=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++# CONFIG_XEN_BLKDEV_TAP_BE is not set
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_TPMDEV_FRONTEND is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++# CONFIG_XEN_BLKDEV_TAP is not set
++# CONFIG_XEN_SHADOW_MODE is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_X86=y
++# CONFIG_XEN_X86_64 is not set
++CONFIG_HAVE_ARCH_ALLOC_SKB=y
++CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++# CONFIG_CLEAN_COMPILE is not set
++CONFIG_BROKEN=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_HOTPLUG=y
++CONFIG_KOBJECT_UEVENT=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++# CONFIG_CPUSETS is not set
++CONFIG_EMBEDDED=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SHMEM=y
++CONFIG_CC_ALIGN_FUNCTIONS=0
++CONFIG_CC_ALIGN_LABELS=0
++CONFIG_CC_ALIGN_LOOPS=0
++CONFIG_CC_ALIGN_JUMPS=0
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_OBSOLETE_MODPARM=y
++CONFIG_MODVERSIONS=y
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# X86 Processor Configuration
++#
++CONFIG_XENARCH="i386"
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_UID16=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++# CONFIG_X86_GENERIC is not set
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=5
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++# CONFIG_HPET_TIMER is not set
++# CONFIG_HPET_EMULATE_RTC is not set
++CONFIG_SMP=y
++CONFIG_SMP_ALTERNATIVES=y
++CONFIG_NR_CPUS=8
++# CONFIG_SCHED_SMT is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++CONFIG_MICROCODE=y
++CONFIG_X86_CPUID=m
++CONFIG_SWIOTLB=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EDD=m
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_HIGHMEM=y
++CONFIG_MTRR=y
++CONFIG_HAVE_DEC_LOCK=y
++# CONFIG_REGPARM is not set
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_HOTPLUG_CPU=y
++
++#
++# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++#
++CONFIG_PCI=y
++# CONFIG_PCI_GOMMCONFIG is not set
++# CONFIG_PCI_GODIRECT is not set
++CONFIG_PCI_GOANY=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_MSI is not set
++# CONFIG_PCI_LEGACY_PROC is not set
++CONFIG_PCI_NAMES=y
++# CONFIG_PCI_DEBUG is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_ISA=y
++# CONFIG_EISA is not set
++# CONFIG_MCA is not set
++CONFIG_SCx200=m
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++CONFIG_PCCARD=m
++# CONFIG_PCMCIA_DEBUG is not set
++CONFIG_PCMCIA=m
++CONFIG_CARDBUS=y
++
++#
++# PC-card bridges
++#
++CONFIG_YENTA=m
++CONFIG_PD6729=m
++CONFIG_I82092=m
++CONFIG_I82365=m
++CONFIG_TCIC=m
++CONFIG_PCMCIA_PROBE=y
++CONFIG_PCCARD_NONSTATIC=m
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=m
++CONFIG_HOTPLUG_PCI_FAKE=m
++# CONFIG_HOTPLUG_PCI_ACPI is not set
++CONFIG_HOTPLUG_PCI_CPCI=y
++CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
++CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
++CONFIG_HOTPLUG_PCI_SHPC=m
++# CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE is not set
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_X86_SMP=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_X86_TRAMPOLINE=y
++CONFIG_SECCOMP=y
++# CONFIG_EARLY_PRINTK is not set
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_AOUT=m
++CONFIG_BINFMT_MISC=m
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=m
++# CONFIG_DEBUG_DRIVER is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++CONFIG_MTD=m
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=m
++CONFIG_MTD_PARTITIONS=y
++CONFIG_MTD_REDBOOT_PARTS=m
++CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
++# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
++# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=m
++CONFIG_MTD_BLOCK=m
++CONFIG_MTD_BLOCK_RO=m
++CONFIG_FTL=m
++CONFIG_NFTL=m
++CONFIG_NFTL_RW=y
++CONFIG_INFTL=m
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=m
++CONFIG_MTD_JEDECPROBE=m
++CONFIG_MTD_GEN_PROBE=m
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++CONFIG_MTD_CFI_INTELEXT=m
++CONFIG_MTD_CFI_AMDSTD=m
++CONFIG_MTD_CFI_AMDSTD_RETRY=0
++CONFIG_MTD_CFI_STAA=m
++CONFIG_MTD_CFI_UTIL=m
++CONFIG_MTD_RAM=m
++CONFIG_MTD_ROM=m
++CONFIG_MTD_ABSENT=m
++# CONFIG_MTD_OBSOLETE_CHIPS is not set
++
++#
++# Mapping drivers for chip access
++#
++CONFIG_MTD_COMPLEX_MAPPINGS=y
++CONFIG_MTD_PHYSMAP=m
++CONFIG_MTD_PHYSMAP_START=0x8000000
++CONFIG_MTD_PHYSMAP_LEN=0x4000000
++CONFIG_MTD_PHYSMAP_BANKWIDTH=2
++CONFIG_MTD_PNC2000=m
++CONFIG_MTD_SC520CDP=m
++CONFIG_MTD_NETSC520=m
++CONFIG_MTD_TS5500=m
++CONFIG_MTD_SBC_GXX=m
++CONFIG_MTD_ELAN_104NC=m
++CONFIG_MTD_SCx200_DOCFLASH=m
++# CONFIG_MTD_AMD76XROM is not set
++# CONFIG_MTD_ICHXROM is not set
++# CONFIG_MTD_SCB2_FLASH is not set
++CONFIG_MTD_NETtel=m
++CONFIG_MTD_DILNETPC=m
++CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
++# CONFIG_MTD_L440GX is not set
++CONFIG_MTD_PCI=m
++CONFIG_MTD_PCMCIA=m
++
++#
++# Self-contained MTD device drivers
++#
++CONFIG_MTD_PMC551=m
++# CONFIG_MTD_PMC551_BUGFIX is not set
++# CONFIG_MTD_PMC551_DEBUG is not set
++CONFIG_MTD_SLRAM=m
++CONFIG_MTD_PHRAM=m
++CONFIG_MTD_MTDRAM=m
++CONFIG_MTDRAM_TOTAL_SIZE=4096
++CONFIG_MTDRAM_ERASE_SIZE=128
++CONFIG_MTD_BLKMTD=m
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++CONFIG_MTD_DOC2000=m
++CONFIG_MTD_DOC2001=m
++CONFIG_MTD_DOC2001PLUS=m
++CONFIG_MTD_DOCPROBE=m
++CONFIG_MTD_DOCECC=m
++# CONFIG_MTD_DOCPROBE_ADVANCED is not set
++CONFIG_MTD_DOCPROBE_ADDRESS=0
++
++#
++# NAND Flash Device Drivers
++#
++CONFIG_MTD_NAND=m
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++CONFIG_MTD_NAND_IDS=m
++CONFIG_MTD_NAND_DISKONCHIP=m
++# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
++CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
++# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
++# CONFIG_MTD_NAND_NANDSIM is not set
++
++#
++# Parallel port support
++#
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++CONFIG_PARPORT_SERIAL=m
++CONFIG_PARPORT_PC_FIFO=y
++# CONFIG_PARPORT_PC_SUPERIO is not set
++CONFIG_PARPORT_PC_PCMCIA=m
++CONFIG_PARPORT_NOT_PC=y
++# CONFIG_PARPORT_GSC is not set
++CONFIG_PARPORT_1284=y
++
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++# CONFIG_PNP_DEBUG is not set
++
++#
++# Protocols
++#
++CONFIG_ISAPNP=y
++# CONFIG_PNPBIOS is not set
++# CONFIG_PNPACPI is not set
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=m
++CONFIG_BLK_DEV_XD=m
++CONFIG_PARIDE=m
++CONFIG_PARIDE_PARPORT=m
++
++#
++# Parallel IDE high-level drivers
++#
++CONFIG_PARIDE_PD=m
++CONFIG_PARIDE_PCD=m
++CONFIG_PARIDE_PF=m
++CONFIG_PARIDE_PT=m
++CONFIG_PARIDE_PG=m
++
++#
++# Parallel IDE protocol modules
++#
++CONFIG_PARIDE_ATEN=m
++CONFIG_PARIDE_BPCK=m
++CONFIG_PARIDE_BPCK6=m
++CONFIG_PARIDE_COMM=m
++CONFIG_PARIDE_DSTR=m
++CONFIG_PARIDE_FIT2=m
++CONFIG_PARIDE_FIT3=m
++CONFIG_PARIDE_EPAT=m
++# CONFIG_PARIDE_EPATC8 is not set
++CONFIG_PARIDE_EPIA=m
++CONFIG_PARIDE_FRIQ=m
++CONFIG_PARIDE_FRPW=m
++CONFIG_PARIDE_KBIC=m
++CONFIG_PARIDE_KTTI=m
++CONFIG_PARIDE_ON20=m
++CONFIG_PARIDE_ON26=m
++CONFIG_BLK_CPQ_DA=m
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++CONFIG_BLK_DEV_UMEM=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_SX8=m
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_LBD=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++# CONFIG_CDROM_PKTCDVD_WCACHE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_ATA_OVER_ETH=m
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=y
++CONFIG_IDEDISK_MULTI_MODE=y
++CONFIG_BLK_DEV_IDECS=m
++CONFIG_BLK_DEV_IDECD=y
++CONFIG_BLK_DEV_IDETAPE=m
++CONFIG_BLK_DEV_IDEFLOPPY=y
++CONFIG_BLK_DEV_IDESCSI=m
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++CONFIG_BLK_DEV_CMD640=y
++CONFIG_BLK_DEV_CMD640_ENHANCED=y
++CONFIG_BLK_DEV_IDEPNP=y
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++CONFIG_BLK_DEV_OPTI621=m
++CONFIG_BLK_DEV_RZ1000=y
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++CONFIG_BLK_DEV_AEC62XX=y
++CONFIG_BLK_DEV_ALI15X3=y
++# CONFIG_WDC_ALI15X3 is not set
++CONFIG_BLK_DEV_AMD74XX=y
++CONFIG_BLK_DEV_ATIIXP=y
++CONFIG_BLK_DEV_CMD64X=y
++CONFIG_BLK_DEV_TRIFLEX=y
++CONFIG_BLK_DEV_CY82C693=y
++CONFIG_BLK_DEV_CS5520=y
++CONFIG_BLK_DEV_CS5530=y
++CONFIG_BLK_DEV_HPT34X=y
++# CONFIG_HPT34X_AUTODMA is not set
++CONFIG_BLK_DEV_HPT366=y
++CONFIG_BLK_DEV_SC1200=m
++CONFIG_BLK_DEV_PIIX=y
++CONFIG_BLK_DEV_NS87415=m
++CONFIG_BLK_DEV_PDC202XX_OLD=y
++CONFIG_PDC202XX_BURST=y
++CONFIG_BLK_DEV_PDC202XX_NEW=y
++CONFIG_PDC202XX_FORCE=y
++CONFIG_BLK_DEV_SVWKS=y
++CONFIG_BLK_DEV_SIIMAGE=y
++CONFIG_BLK_DEV_SIS5513=y
++CONFIG_BLK_DEV_SLC90E66=y
++CONFIG_BLK_DEV_TRM290=m
++CONFIG_BLK_DEV_VIA82CXXX=y
++# CONFIG_IDE_ARM is not set
++# CONFIG_IDE_CHIPSETS is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=m
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_3W_9XXX=m
++# CONFIG_SCSI_7000FASST is not set
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AHA152X=m
++# CONFIG_SCSI_AHA1542 is not set
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_AIC7XXX_OLD=m
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++CONFIG_AIC79XX_ENABLE_RD_STRM=y
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_DPT_I2O=m
++CONFIG_SCSI_ADVANSYS=m
++CONFIG_SCSI_IN2000=m
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_SCSI_SATA=y
++CONFIG_SCSI_SATA_AHCI=m
++CONFIG_SCSI_SATA_SVW=m
++CONFIG_SCSI_ATA_PIIX=m
++CONFIG_SCSI_SATA_NV=m
++CONFIG_SCSI_SATA_PROMISE=m
++# CONFIG_SCSI_SATA_QSTOR is not set
++CONFIG_SCSI_SATA_SX4=m
++CONFIG_SCSI_SATA_SIL=m
++CONFIG_SCSI_SATA_SIS=m
++CONFIG_SCSI_SATA_ULI=m
++CONFIG_SCSI_SATA_VIA=m
++CONFIG_SCSI_SATA_VITESSE=m
++CONFIG_SCSI_BUSLOGIC=m
++# CONFIG_SCSI_OMIT_FLASHPOINT is not set
++# CONFIG_SCSI_CPQFCTS is not set
++CONFIG_SCSI_DMX3191D=m
++CONFIG_SCSI_DTC3280=m
++CONFIG_SCSI_EATA=m
++CONFIG_SCSI_EATA_TAGGED_QUEUE=y
++CONFIG_SCSI_EATA_LINKED_COMMANDS=y
++CONFIG_SCSI_EATA_MAX_TAGS=16
++CONFIG_SCSI_EATA_PIO=m
++CONFIG_SCSI_FUTURE_DOMAIN=m
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_GENERIC_NCR5380=m
++CONFIG_SCSI_GENERIC_NCR5380_MMIO=m
++CONFIG_SCSI_GENERIC_NCR53C400=y
++CONFIG_SCSI_IPS=m
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_PPA=m
++CONFIG_SCSI_IMM=m
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_NCR53C406A=m
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
++CONFIG_SCSI_IPR=m
++# CONFIG_SCSI_IPR_TRACE is not set
++# CONFIG_SCSI_IPR_DUMP is not set
++CONFIG_SCSI_PAS16=m
++# CONFIG_SCSI_PCI2000 is not set
++# CONFIG_SCSI_PCI2220I is not set
++CONFIG_SCSI_PSI240I=m
++CONFIG_SCSI_QLOGIC_FAS=m
++CONFIG_SCSI_QLOGIC_ISP=m
++CONFIG_SCSI_QLOGIC_FC=m
++CONFIG_SCSI_QLOGIC_FC_FIRMWARE=y
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_QLOGIC_1280_1040=y
++CONFIG_SCSI_QLA2XXX=m
++CONFIG_SCSI_QLA21XX=m
++CONFIG_SCSI_QLA22XX=m
++CONFIG_SCSI_QLA2300=m
++CONFIG_SCSI_QLA2322=m
++CONFIG_SCSI_QLA6312=m
++CONFIG_SCSI_LPFC=m
++# CONFIG_SCSI_SEAGATE is not set
++CONFIG_SCSI_SYM53C416=m
++CONFIG_SCSI_DC395x=m
++CONFIG_SCSI_DC390T=m
++CONFIG_SCSI_T128=m
++CONFIG_SCSI_U14_34F=m
++CONFIG_SCSI_U14_34F_TAGGED_QUEUE=y
++CONFIG_SCSI_U14_34F_LINKED_COMMANDS=y
++CONFIG_SCSI_U14_34F_MAX_TAGS=8
++# CONFIG_SCSI_ULTRASTOR is not set
++CONFIG_SCSI_NSP32=m
++CONFIG_SCSI_DEBUG=m
++
++#
++# PCMCIA SCSI adapter support
++#
++CONFIG_PCMCIA_AHA152X=m
++CONFIG_PCMCIA_FDOMAIN=m
++CONFIG_PCMCIA_NINJA_SCSI=m
++CONFIG_PCMCIA_QLOGIC=m
++CONFIG_PCMCIA_SYM53C500=m
++
++#
++# Old CD-ROM drivers (not SCSI, not IDE)
++#
++CONFIG_CD_NO_IDESCSI=y
++CONFIG_AZTCD=m
++CONFIG_GSCD=m
++# CONFIG_SBPCD is not set
++CONFIG_MCDX=m
++CONFIG_OPTCD=m
++# CONFIG_CM206 is not set
++CONFIG_SJCD=m
++CONFIG_ISP16_CDI=m
++CONFIG_CDU31A=m
++CONFIG_CDU535=m
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=m
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID5=m
++CONFIG_MD_RAID6=m
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_FAULTY=m
++CONFIG_BLK_DEV_DM=m
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=m
++CONFIG_FUSION_MAX_SGE=40
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
++
++#
++# IEEE 1394 (FireWire) support
++#
++CONFIG_IEEE1394=m
++
++#
++# Subsystem Options
++#
++# CONFIG_IEEE1394_VERBOSEDEBUG is not set
++# CONFIG_IEEE1394_OUI_DB is not set
++CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
++CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
++
++#
++# Device Drivers
++#
++CONFIG_IEEE1394_PCILYNX=m
++CONFIG_IEEE1394_OHCI1394=m
++
++#
++# Protocol Drivers
++#
++CONFIG_IEEE1394_VIDEO1394=m
++CONFIG_IEEE1394_SBP2=m
++# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
++CONFIG_IEEE1394_ETH1394=m
++CONFIG_IEEE1394_DV1394=m
++CONFIG_IEEE1394_RAWIO=m
++CONFIG_IEEE1394_CMP=m
++CONFIG_IEEE1394_AMDTP=m
++
++#
++# I2O device support
++#
++CONFIG_I2O=m
++CONFIG_I2O_CONFIG=m
++CONFIG_I2O_BLOCK=m
++CONFIG_I2O_SCSI=m
++CONFIG_I2O_PROC=m
++
++#
++# Networking support
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=m
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=m
++CONFIG_NET_KEY=m
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++CONFIG_IP_ROUTE_VERBOSE=y
++# CONFIG_IP_PNP is not set
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_TUNNEL=m
++CONFIG_IP_TCPDIAG=m
++CONFIG_IP_TCPDIAG_IPV6=y
++
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IPV6=m
++CONFIG_IPV6_PRIVACY=y
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_IPV6_TUNNEL=m
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++CONFIG_IP_NF_CONNTRACK_MARK=y
++CONFIG_IP_NF_CT_PROTO_SCTP=m
++CONFIG_IP_NF_FTP=m
++CONFIG_IP_NF_IRC=m
++CONFIG_IP_NF_TFTP=m
++CONFIG_IP_NF_AMANDA=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_LIMIT=m
++CONFIG_IP_NF_MATCH_IPRANGE=m
++CONFIG_IP_NF_MATCH_MAC=m
++CONFIG_IP_NF_MATCH_PKTTYPE=m
++CONFIG_IP_NF_MATCH_MARK=m
++CONFIG_IP_NF_MATCH_MULTIPORT=m
++CONFIG_IP_NF_MATCH_TOS=m
++CONFIG_IP_NF_MATCH_RECENT=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_DSCP=m
++CONFIG_IP_NF_MATCH_AH_ESP=m
++CONFIG_IP_NF_MATCH_LENGTH=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_MATCH_TCPMSS=m
++CONFIG_IP_NF_MATCH_HELPER=m
++CONFIG_IP_NF_MATCH_STATE=m
++CONFIG_IP_NF_MATCH_CONNTRACK=m
++CONFIG_IP_NF_MATCH_OWNER=m
++CONFIG_IP_NF_MATCH_PHYSDEV=m
++CONFIG_IP_NF_MATCH_ADDRTYPE=m
++CONFIG_IP_NF_MATCH_REALM=m
++CONFIG_IP_NF_MATCH_SCTP=m
++CONFIG_IP_NF_MATCH_COMMENT=m
++CONFIG_IP_NF_MATCH_CONNMARK=m
++CONFIG_IP_NF_MATCH_HASHLIMIT=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_TCPMSS=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_SAME=m
++CONFIG_IP_NF_NAT_SNMP_BASIC=m
++CONFIG_IP_NF_NAT_IRC=m
++CONFIG_IP_NF_NAT_FTP=m
++CONFIG_IP_NF_NAT_TFTP=m
++CONFIG_IP_NF_NAT_AMANDA=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_TOS=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_DSCP=m
++CONFIG_IP_NF_TARGET_MARK=m
++CONFIG_IP_NF_TARGET_CLASSIFY=m
++CONFIG_IP_NF_TARGET_CONNMARK=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_TARGET_NOTRACK=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++CONFIG_IP6_NF_QUEUE=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_LIMIT=m
++CONFIG_IP6_NF_MATCH_MAC=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_MULTIPORT=m
++CONFIG_IP6_NF_MATCH_OWNER=m
++CONFIG_IP6_NF_MATCH_MARK=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_AHESP=m
++CONFIG_IP6_NF_MATCH_LENGTH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_MATCH_PHYSDEV=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_TARGET_MARK=m
++CONFIG_IP6_NF_RAW=m
++
++#
++# DECnet: Netfilter Configuration
++#
++CONFIG_DECNET_NF_GRABULATOR=m
++
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++# CONFIG_BRIDGE_EBT_ULOG is not set
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_HMAC_NONE is not set
++# CONFIG_SCTP_HMAC_SHA1 is not set
++CONFIG_SCTP_HMAC_MD5=y
++CONFIG_ATM=y
++CONFIG_ATM_CLIP=y
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++CONFIG_ATM_LANE=m
++CONFIG_ATM_MPOA=m
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_BRIDGE=m
++CONFIG_VLAN_8021Q=m
++CONFIG_DECNET=m
++# CONFIG_DECNET_ROUTER is not set
++CONFIG_LLC=y
++CONFIG_LLC2=m
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=y
++CONFIG_LTPC=m
++CONFIG_COPS=m
++CONFIG_COPS_DAYNA=y
++CONFIG_COPS_TANGENT=y
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++CONFIG_X25=m
++CONFIG_LAPB=m
++# CONFIG_NET_DIVERT is not set
++CONFIG_ECONET=m
++CONFIG_ECONET_AUNUDP=y
++CONFIG_ECONET_NATIVE=y
++CONFIG_WAN_ROUTER=m
++
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CLK_JIFFIES=y
++# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
++# CONFIG_NET_SCH_CLK_CPU is not set
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_INGRESS=m
++CONFIG_NET_QOS=y
++CONFIG_NET_ESTIMATOR=y
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++# CONFIG_CLS_U32_PERF is not set
++# CONFIG_NET_CLS_IND is not set
++# CONFIG_CLS_U32_MARK is not set
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_U32=m
++CONFIG_NET_EMATCH_META=m
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_CLS_POLICE=y
++
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_RX is not set
++# CONFIG_NETPOLL_TRAP is not set
++CONFIG_NET_POLL_CONTROLLER=y
++CONFIG_HAMRADIO=y
++
++#
++# Packet Radio protocols
++#
++CONFIG_AX25=m
++# CONFIG_AX25_DAMA_SLAVE is not set
++CONFIG_NETROM=m
++CONFIG_ROSE=m
++
++#
++# AX.25 network device drivers
++#
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++# CONFIG_DMASCC is not set
++CONFIG_SCC=m
++# CONFIG_SCC_DELAY is not set
++# CONFIG_SCC_TRXECHO is not set
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_BAYCOM_EPP=m
++CONFIG_YAM=m
++CONFIG_IRDA=m
++
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++# CONFIG_IRDA_ULTRA is not set
++
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++CONFIG_IRDA_DEBUG=y
++
++#
++# Infrared-port device drivers
++#
++
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
++
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++
++#
++# Old SIR device drivers
++#
++CONFIG_IRPORT_SIR=m
++
++#
++# Old Serial dongle support
++#
++# CONFIG_DONGLE_OLD is not set
++
++#
++# FIR device drivers
++#
++CONFIG_USB_IRDA=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_WINBOND_FIR=m
++# CONFIG_TOSHIBA_FIR is not set
++CONFIG_SMC_IRCC_FIR=m
++CONFIG_ALI_FIR=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_BT=m
++CONFIG_BT_L2CAP=m
++CONFIG_BT_SCO=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_HIDP=m
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIUSB=m
++CONFIG_BT_HCIUSB_SCO=y
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++# CONFIG_BT_HCIUART_BCSP_TXCRC is not set
++CONFIG_BT_HCIBCM203X=m
++# CONFIG_BT_HCIBPA10X is not set
++CONFIG_BT_HCIBFUSB=m
++CONFIG_BT_HCIDTL1=m
++CONFIG_BT_HCIBT3C=m
++CONFIG_BT_HCIBLUECARD=m
++CONFIG_BT_HCIBTUART=m
++CONFIG_BT_HCIVHCI=m
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++CONFIG_NET_SB1000=m
++
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=m
++CONFIG_ARCNET_1201=m
++CONFIG_ARCNET_1051=m
++CONFIG_ARCNET_RAW=m
++# CONFIG_ARCNET_CAP is not set
++CONFIG_ARCNET_COM90xx=m
++CONFIG_ARCNET_COM90xxIO=m
++CONFIG_ARCNET_RIM_I=m
++CONFIG_ARCNET_COM20020=m
++CONFIG_ARCNET_COM20020_ISA=m
++CONFIG_ARCNET_COM20020_PCI=m
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=m
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_EL1=m
++CONFIG_EL2=m
++# CONFIG_ELPLUS is not set
++CONFIG_EL16=m
++CONFIG_EL3=m
++# CONFIG_3C515 is not set
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++# CONFIG_LANCE is not set
++CONFIG_NET_VENDOR_SMC=y
++CONFIG_WD80x3=m
++CONFIG_ULTRA=m
++CONFIG_SMC9194=m
++CONFIG_NET_VENDOR_RACAL=y
++CONFIG_NI5010=m
++CONFIG_NI52=m
++# CONFIG_NI65 is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_TULIP=m
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_PCMCIA_XIRCOM=m
++# CONFIG_PCMCIA_XIRTULIP is not set
++CONFIG_AT1700=m
++CONFIG_DEPCA=m
++CONFIG_HP100=m
++CONFIG_NET_ISA=y
++CONFIG_E2100=m
++CONFIG_EWRK3=m
++CONFIG_EEXPRESS=m
++CONFIG_EEXPRESS_PRO=m
++CONFIG_HPLAN_PLUS=m
++CONFIG_HPLAN=m
++CONFIG_LP486E=m
++CONFIG_ETH16I=m
++CONFIG_NE2000=m
++CONFIG_ZNET=m
++CONFIG_SEEQ8005=m
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=m
++CONFIG_AMD8111_ETH=m
++# CONFIG_AMD8111E_NAPI is not set
++CONFIG_ADAPTEC_STARFIRE=m
++# CONFIG_ADAPTEC_STARFIRE_NAPI is not set
++CONFIG_AC3200=m
++CONFIG_APRICOT=m
++CONFIG_B44=m
++CONFIG_FORCEDETH=m
++CONFIG_CS89x0=m
++# CONFIG_DGRS is not set
++CONFIG_EEPRO100=m
++CONFIG_E100=m
++CONFIG_FEALNX=m
++CONFIG_NATSEMI=m
++CONFIG_NE2K_PCI=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++CONFIG_8139TOO_PIO=y
++CONFIG_8139TOO_TUNE_TWISTER=y
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_SIS900=m
++CONFIG_EPIC100=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++CONFIG_TLAN=m
++CONFIG_VIA_RHINE=m
++# CONFIG_VIA_RHINE_MMIO is not set
++CONFIG_NET_POCKET=y
++CONFIG_ATP=m
++CONFIG_DE600=m
++CONFIG_DE620=m
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++CONFIG_DL2K=m
++CONFIG_E1000=m
++# CONFIG_E1000_NAPI is not set
++CONFIG_NS83820=m
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++CONFIG_R8169=m
++# CONFIG_R8169_NAPI is not set
++# CONFIG_R8169_VLAN is not set
++CONFIG_SK98LIN=m
++CONFIG_VIA_VELOCITY=m
++CONFIG_TIGON3=m
++CONFIG_BNX2=m
++
++#
++# Ethernet (10000 Mbit)
++#
++CONFIG_IXGB=m
++# CONFIG_IXGB_NAPI is not set
++CONFIG_S2IO=m
++# CONFIG_S2IO_NAPI is not set
++# CONFIG_2BUFF_MODE is not set
++
++#
++# Token Ring devices
++#
++CONFIG_TR=y
++CONFIG_IBMTR=m
++CONFIG_IBMOL=m
++CONFIG_IBMLS=m
++CONFIG_3C359=m
++CONFIG_TMS380TR=m
++CONFIG_TMSPCI=m
++CONFIG_SKISA=m
++CONFIG_PROTEON=m
++CONFIG_ABYSS=m
++# CONFIG_SMCTR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++CONFIG_NET_RADIO=y
++
++#
++# Obsolete Wireless cards support (pre-802.11)
++#
++CONFIG_STRIP=m
++CONFIG_ARLAN=m
++CONFIG_WAVELAN=m
++CONFIG_PCMCIA_WAVELAN=m
++CONFIG_PCMCIA_NETWAVE=m
++
++#
++# Wireless 802.11 Frequency Hopping cards support
++#
++CONFIG_PCMCIA_RAYCS=m
++
++#
++# Wireless 802.11b ISA/PCI cards support
++#
++CONFIG_AIRO=m
++CONFIG_HERMES=m
++CONFIG_PLX_HERMES=m
++CONFIG_TMD_HERMES=m
++CONFIG_PCI_HERMES=m
++CONFIG_ATMEL=m
++CONFIG_PCI_ATMEL=m
++
++#
++# Wireless 802.11b Pcmcia/Cardbus cards support
++#
++CONFIG_PCMCIA_HERMES=m
++CONFIG_AIRO_CS=m
++CONFIG_PCMCIA_ATMEL=m
++CONFIG_PCMCIA_WL3501=m
++
++#
++# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
++#
++CONFIG_PRISM54=m
++CONFIG_NET_WIRELESS=y
++
++#
++# PCMCIA network device support
++#
++CONFIG_NET_PCMCIA=y
++CONFIG_PCMCIA_3C589=m
++CONFIG_PCMCIA_3C574=m
++CONFIG_PCMCIA_FMVJ18X=m
++CONFIG_PCMCIA_PCNET=m
++CONFIG_PCMCIA_NMCLAN=m
++CONFIG_PCMCIA_SMC91C92=m
++CONFIG_PCMCIA_XIRC2PS=m
++CONFIG_PCMCIA_AXNET=m
++CONFIG_ARCNET_COM20020_CS=m
++CONFIG_PCMCIA_IBMTR=m
++
++#
++# Wan interfaces
++#
++CONFIG_WAN=y
++CONFIG_HOSTESS_SV11=m
++CONFIG_COSA=m
++CONFIG_DSCC4=m
++CONFIG_DSCC4_PCISYNC=y
++CONFIG_DSCC4_PCI_RST=y
++CONFIG_LANMEDIA=m
++CONFIG_SEALEVEL_4021=m
++CONFIG_SYNCLINK_SYNCPPP=m
++CONFIG_HDLC=m
++CONFIG_HDLC_RAW=y
++CONFIG_HDLC_RAW_ETH=y
++CONFIG_HDLC_CISCO=y
++CONFIG_HDLC_FR=y
++CONFIG_HDLC_PPP=y
++CONFIG_HDLC_X25=y
++CONFIG_PCI200SYN=m
++CONFIG_WANXL=m
++CONFIG_PC300=m
++CONFIG_PC300_MLPPP=y
++CONFIG_N2=m
++CONFIG_C101=m
++CONFIG_FARSYNC=m
++CONFIG_DLCI=m
++CONFIG_DLCI_COUNT=24
++CONFIG_DLCI_MAX=8
++CONFIG_SDLA=m
++CONFIG_WAN_ROUTER_DRIVERS=y
++# CONFIG_VENDOR_SANGOMA is not set
++CONFIG_CYCLADES_SYNC=m
++CONFIG_CYCLOMX_X25=y
++CONFIG_LAPBETHER=m
++CONFIG_X25_ASY=m
++CONFIG_SBNI=m
++# CONFIG_SBNI_MULTILINE is not set
++
++#
++# ATM drivers
++#
++CONFIG_ATM_TCP=m
++CONFIG_ATM_LANAI=m
++CONFIG_ATM_ENI=m
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++CONFIG_ATM_FIRESTREAM=m
++CONFIG_ATM_ZATM=m
++# CONFIG_ATM_ZATM_DEBUG is not set
++CONFIG_ATM_NICSTAR=m
++# CONFIG_ATM_NICSTAR_USE_SUNI is not set
++# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
++CONFIG_ATM_IDT77252=m
++# CONFIG_ATM_IDT77252_DEBUG is not set
++# CONFIG_ATM_IDT77252_RCV_ALL is not set
++CONFIG_ATM_IDT77252_USE_SUNI=y
++CONFIG_ATM_AMBASSADOR=m
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++CONFIG_ATM_HORIZON=m
++# CONFIG_ATM_HORIZON_DEBUG is not set
++CONFIG_ATM_IA=m
++# CONFIG_ATM_IA_DEBUG is not set
++CONFIG_ATM_FORE200E_MAYBE=m
++CONFIG_ATM_FORE200E_PCA=y
++CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
++# CONFIG_ATM_FORE200E_USE_TASKLET is not set
++CONFIG_ATM_FORE200E_TX_RETRY=16
++CONFIG_ATM_FORE200E_DEBUG=0
++CONFIG_ATM_FORE200E=m
++CONFIG_ATM_HE=m
++CONFIG_ATM_HE_USE_SUNI=y
++CONFIG_FDDI=y
++CONFIG_DEFXX=m
++CONFIG_SKFP=m
++CONFIG_HIPPI=y
++CONFIG_ROADRUNNER=m
++# CONFIG_ROADRUNNER_LARGE_RINGS is not set
++CONFIG_PLIP=m
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_PPP_BSDCOMP=m
++CONFIG_PPPOE=m
++CONFIG_PPPOATM=m
++CONFIG_SLIP=m
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++CONFIG_SLIP_MODE_SLIP6=y
++CONFIG_NET_FC=y
++CONFIG_SHAPER=m
++CONFIG_NETCONSOLE=m
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
++
++#
++# Old ISDN4Linux
++#
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++CONFIG_IPPP_FILTER=y
++CONFIG_ISDN_PPP_BSDCOMP=m
++CONFIG_ISDN_AUDIO=y
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_ISDN_X25=y
++
++#
++# ISDN feature submodules
++#
++# CONFIG_ISDN_DRV_LOOP is not set
++# CONFIG_ISDN_DIVERSION is not set
++
++#
++# ISDN4Linux hardware drivers
++#
++
++#
++# Passive cards
++#
++CONFIG_ISDN_DRV_HISAX=m
++
++#
++# D-channel protocol features
++#
++CONFIG_HISAX_EURO=y
++CONFIG_DE_AOC=y
++# CONFIG_HISAX_NO_SENDCOMPLETE is not set
++# CONFIG_HISAX_NO_LLC is not set
++# CONFIG_HISAX_NO_KEYPAD is not set
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
++
++#
++# HiSax supported cards
++#
++CONFIG_HISAX_16_0=y
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_AVM_A1=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_IX1MICROR2=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_ASUSCOM=y
++CONFIG_HISAX_TELEINT=y
++CONFIG_HISAX_HFCS=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_SPORTSTER=y
++CONFIG_HISAX_MIC=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_ISURF=y
++CONFIG_HISAX_HSTSAPHIR=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
++
++#
++# HiSax PCMCIA card service modules
++#
++CONFIG_HISAX_SEDLBAUER_CS=m
++CONFIG_HISAX_ELSA_CS=m
++CONFIG_HISAX_AVM_A1_CS=m
++CONFIG_HISAX_TELES_CS=m
++
++#
++# HiSax sub driver modules
++#
++CONFIG_HISAX_ST5481=m
++CONFIG_HISAX_HFCUSB=m
++CONFIG_HISAX_HFC4S8S=m
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_HISAX_HDLC=y
++
++#
++# Active cards
++#
++CONFIG_ISDN_DRV_ICN=m
++CONFIG_ISDN_DRV_PCBIT=m
++CONFIG_ISDN_DRV_SC=m
++CONFIG_ISDN_DRV_ACT2000=m
++# CONFIG_HYSDN is not set
++
++#
++# CAPI subsystem
++#
++CONFIG_ISDN_CAPI=m
++CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
++CONFIG_ISDN_CAPI_CAPIFS=m
++CONFIG_ISDN_CAPI_CAPIDRV=m
++
++#
++# CAPI hardware drivers
++#
++
++#
++# Active AVM cards
++#
++CONFIG_CAPI_AVM=y
++CONFIG_ISDN_DRV_AVMB1_B1ISA=m
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_T1ISA=m
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
++
++#
++# Active Eicon DIVA Server cards
++#
++CONFIG_CAPI_EICON=y
++CONFIG_ISDN_DIVAS=m
++CONFIG_ISDN_DIVAS_BRIPCI=y
++CONFIG_ISDN_DIVAS_PRIPCI=y
++CONFIG_ISDN_DIVAS_DIVACAPI=m
++CONFIG_ISDN_DIVAS_USERIDI=m
++CONFIG_ISDN_DIVAS_MAINT=m
++
++#
++# Telephony Support
++#
++CONFIG_PHONE=m
++CONFIG_PHONE_IXJ=m
++CONFIG_PHONE_IXJ_PCMCIA=m
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_TSDEV=m
++CONFIG_INPUT_TSDEV_SCREEN_X=240
++CONFIG_INPUT_TSDEV_SCREEN_Y=320
++CONFIG_INPUT_EVDEV=m
++CONFIG_INPUT_EVBUG=m
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++CONFIG_KEYBOARD_SUNKBD=m
++CONFIG_KEYBOARD_LKKBD=m
++CONFIG_KEYBOARD_XTKBD=m
++CONFIG_KEYBOARD_NEWTON=m
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_SERIAL=m
++CONFIG_MOUSE_INPORT=m
++# CONFIG_MOUSE_ATIXL is not set
++CONFIG_MOUSE_LOGIBM=m
++CONFIG_MOUSE_PC110PAD=m
++CONFIG_MOUSE_VSXXXAA=m
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_GUNZE=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_UINPUT=m
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_SERIO_CT82C710=m
++CONFIG_SERIO_PARKBD=m
++CONFIG_SERIO_PCIPS2=m
++CONFIG_SERIO_LIBPS2=y
++CONFIG_SERIO_RAW=m
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_VORTEX=m
++CONFIG_GAMEPORT_FM801=m
++# CONFIG_GAMEPORT_CS461X is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=m
++# CONFIG_SERIAL_8250_CS is not set
++# CONFIG_SERIAL_8250_ACPI is not set
++CONFIG_SERIAL_8250_NR_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=m
++CONFIG_SERIAL_JSM=m
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++CONFIG_PRINTER=m
++# CONFIG_LP_CONSOLE is not set
++CONFIG_PPDEV=m
++CONFIG_TIPAR=m
++
++#
++# IPMI
++#
++CONFIG_IPMI_HANDLER=m
++# CONFIG_IPMI_PANIC_EVENT is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_POWEROFF=m
++
++#
++# Watchdog Cards
++#
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_ACQUIRE_WDT=m
++CONFIG_ADVANTECH_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_ALIM7101_WDT=m
++CONFIG_SC520_WDT=m
++CONFIG_EUROTECH_WDT=m
++CONFIG_IB700_WDT=m
++CONFIG_WAFER_WDT=m
++CONFIG_I8XX_TCO=m
++CONFIG_SC1200_WDT=m
++CONFIG_SCx200_WDT=m
++CONFIG_60XX_WDT=m
++CONFIG_CPU5_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_W83877F_WDT=m
++CONFIG_MACHZ_WDT=m
++
++#
++# ISA-based Watchdog Cards
++#
++CONFIG_PCWATCHDOG=m
++CONFIG_MIXCOMWD=m
++CONFIG_WDT=m
++CONFIG_WDT_501=y
++
++#
++# PCI-based Watchdog Cards
++#
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_WDTPCI=m
++CONFIG_WDT_501_PCI=y
++
++#
++# USB-based Watchdog Cards
++#
++CONFIG_USBPCWATCHDOG=m
++CONFIG_HW_RANDOM=m
++CONFIG_NVRAM=m
++CONFIG_RTC=m
++CONFIG_GEN_RTC=m
++CONFIG_GEN_RTC_X=y
++CONFIG_DTLK=m
++CONFIG_R3964=m
++CONFIG_APPLICOM=m
++CONFIG_SONYPI=m
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_FTAPE is not set
++CONFIG_AGP=m
++CONFIG_AGP_ALI=m
++CONFIG_AGP_ATI=m
++CONFIG_AGP_AMD=m
++CONFIG_AGP_AMD64=m
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_NVIDIA=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_SWORKS=m
++CONFIG_AGP_VIA=m
++CONFIG_AGP_EFFICEON=m
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++# CONFIG_DRM_GAMMA is not set
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++CONFIG_DRM_I830=m
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++
++#
++# PCMCIA character devices
++#
++CONFIG_SYNCLINK_CS=m
++CONFIG_MWAVE=m
++CONFIG_SCx200_GPIO=m
++CONFIG_RAW_DRIVER=m
++# CONFIG_HPET is not set
++CONFIG_MAX_RAW_DEVS=256
++CONFIG_HANGCHECK_TIMER=m
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=m
++CONFIG_I2C_CHARDEV=m
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=m
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_ALI1535=m
++CONFIG_I2C_ALI1563=m
++CONFIG_I2C_ALI15X3=m
++CONFIG_I2C_AMD756=m
++CONFIG_I2C_AMD756_S4882=m
++CONFIG_I2C_AMD8111=m
++CONFIG_I2C_ELEKTOR=m
++CONFIG_I2C_I801=m
++CONFIG_I2C_I810=m
++CONFIG_I2C_PIIX4=m
++CONFIG_I2C_ISA=m
++CONFIG_I2C_NFORCE2=m
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++CONFIG_I2C_PROSAVAGE=m
++CONFIG_I2C_SAVAGE4=m
++CONFIG_SCx200_I2C=m
++CONFIG_SCx200_I2C_SCL=12
++CONFIG_SCx200_I2C_SDA=13
++CONFIG_SCx200_ACB=m
++CONFIG_I2C_SIS5595=m
++CONFIG_I2C_SIS630=m
++CONFIG_I2C_SIS96X=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_VIA=m
++CONFIG_I2C_VIAPRO=m
++CONFIG_I2C_VOODOO3=m
++CONFIG_I2C_PCA_ISA=m
++
++#
++# Hardware Sensors Chip support
++#
++CONFIG_I2C_SENSOR=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ASB100=m
++CONFIG_SENSORS_DS1621=m
++CONFIG_SENSORS_FSCHER=m
++CONFIG_SENSORS_FSCPOS=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_PC87360=m
++# CONFIG_SENSORS_SMSC47B397 is not set
++CONFIG_SENSORS_SIS5595=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83627HF=m
++
++#
++# Other I2C Chip support
++#
++CONFIG_SENSORS_DS1337=m
++CONFIG_SENSORS_EEPROM=m
++CONFIG_SENSORS_PCF8574=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_SENSORS_RTC8564=m
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# Dallas's 1-wire bus
++#
++CONFIG_W1=m
++CONFIG_W1_MATROX=m
++CONFIG_W1_DS9490=m
++CONFIG_W1_DS9490_BRIDGE=m
++CONFIG_W1_THERM=m
++CONFIG_W1_SMEM=m
++
++#
++# Misc devices
++#
++CONFIG_IBM_ASM=m
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=m
++
++#
++# Video For Linux
++#
++
++#
++# Video Adapters
++#
++CONFIG_VIDEO_BT848=m
++CONFIG_VIDEO_PMS=m
++CONFIG_VIDEO_BWQCAM=m
++CONFIG_VIDEO_CQCAM=m
++CONFIG_VIDEO_W9966=m
++CONFIG_VIDEO_CPIA=m
++CONFIG_VIDEO_CPIA_PP=m
++CONFIG_VIDEO_CPIA_USB=m
++CONFIG_VIDEO_SAA5246A=m
++CONFIG_VIDEO_SAA5249=m
++CONFIG_TUNER_3036=m
++CONFIG_VIDEO_STRADIS=m
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++# CONFIG_VIDEO_ZR36120 is not set
++CONFIG_VIDEO_MEYE=m
++# CONFIG_VIDEO_SAA7134 is not set
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_DPC=m
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_CX88=m
++# CONFIG_VIDEO_CX88_DVB is not set
++CONFIG_VIDEO_OVCAMCHIP=m
++
++#
++# Radio Adapters
++#
++CONFIG_RADIO_CADET=m
++CONFIG_RADIO_RTRACK=m
++CONFIG_RADIO_RTRACK2=m
++CONFIG_RADIO_AZTECH=m
++CONFIG_RADIO_GEMTEK=m
++CONFIG_RADIO_GEMTEK_PCI=m
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_MAESTRO=m
++CONFIG_RADIO_MIROPCM20=m
++CONFIG_RADIO_MIROPCM20_RDS=m
++CONFIG_RADIO_SF16FMI=m
++CONFIG_RADIO_SF16FMR2=m
++CONFIG_RADIO_TERRATEC=m
++CONFIG_RADIO_TRUST=m
++CONFIG_RADIO_TYPHOON=m
++CONFIG_RADIO_TYPHOON_PROC_FS=y
++CONFIG_RADIO_ZOLTRIX=m
++
++#
++# Digital Video Broadcasting Devices
++#
++CONFIG_DVB=y
++CONFIG_DVB_CORE=m
++
++#
++# Supported SAA7146 based PCI Adapters
++#
++CONFIG_DVB_AV7110=m
++# CONFIG_DVB_AV7110_OSD is not set
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
++
++#
++# Supported USB Adapters
++#
++CONFIG_DVB_TTUSB_BUDGET=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_DVB_DIBUSB=m
++CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES=y
++# CONFIG_DVB_DIBCOM_DEBUG is not set
++CONFIG_DVB_CINERGYT2=m
++# CONFIG_DVB_CINERGYT2_TUNING is not set
++
++#
++# Supported FlexCopII (B2C2) Adapters
++#
++CONFIG_DVB_B2C2_FLEXCOP=m
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
++CONFIG_DVB_B2C2_SKYSTAR=m
++
++#
++# Supported BT878 Adapters
++#
++CONFIG_DVB_BT8XX=m
++
++#
++# Supported DVB Frontends
++#
++
++#
++# Customise DVB Frontends
++#
++
++#
++# DVB-S (satellite) frontends
++#
++CONFIG_DVB_STV0299=m
++CONFIG_DVB_CX24110=m
++CONFIG_DVB_TDA8083=m
++CONFIG_DVB_TDA80XX=m
++CONFIG_DVB_MT312=m
++CONFIG_DVB_VES1X93=m
++
++#
++# DVB-T (terrestrial) frontends
++#
++CONFIG_DVB_SP8870=m
++CONFIG_DVB_SP887X=m
++CONFIG_DVB_CX22700=m
++CONFIG_DVB_CX22702=m
++CONFIG_DVB_L64781=m
++CONFIG_DVB_TDA1004X=m
++CONFIG_DVB_NXT6000=m
++CONFIG_DVB_MT352=m
++CONFIG_DVB_DIB3000MB=m
++CONFIG_DVB_DIB3000MC=m
++
++#
++# DVB-C (cable) frontends
++#
++CONFIG_DVB_ATMEL_AT76C651=m
++CONFIG_DVB_VES1820=m
++CONFIG_DVB_TDA10021=m
++CONFIG_DVB_STV0297=m
++
++#
++# ATSC (North American/Korean Terresterial DTV) frontends
++#
++CONFIG_DVB_NXT2002=m
++CONFIG_DVB_OR51211=m
++CONFIG_DVB_OR51132=m
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_VIDEO_VIDEOBUF=m
++CONFIG_VIDEO_TUNER=m
++CONFIG_VIDEO_BUF=m
++CONFIG_VIDEO_BTCX=m
++CONFIG_VIDEO_IR=m
++CONFIG_VIDEO_TVEEPROM=m
++
++#
++# Graphics support
++#
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=m
++CONFIG_FB_CFB_COPYAREA=m
++CONFIG_FB_CFB_IMAGEBLIT=m
++CONFIG_FB_SOFT_CURSOR=m
++# CONFIG_FB_MACMODES is not set
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++CONFIG_FB_CIRRUS=m
++CONFIG_FB_PM2=m
++CONFIG_FB_PM2_FIFO_DISCONNECT=y
++CONFIG_FB_CYBER2000=m
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++CONFIG_FB_VGA16=m
++# CONFIG_FB_VESA is not set
++CONFIG_VIDEO_SELECT=y
++CONFIG_FB_HGA=m
++# CONFIG_FB_HGA_ACCEL is not set
++CONFIG_FB_NVIDIA=m
++CONFIG_FB_NVIDIA_I2C=y
++CONFIG_FB_RIVA=m
++CONFIG_FB_RIVA_I2C=y
++CONFIG_FB_RIVA_DEBUG=y
++CONFIG_FB_I810=m
++# CONFIG_FB_I810_GTF is not set
++CONFIG_FB_INTEL=m
++# CONFIG_FB_INTEL_DEBUG is not set
++CONFIG_FB_MATROX=m
++CONFIG_FB_MATROX_MILLENIUM=y
++CONFIG_FB_MATROX_MYSTIQUE=y
++# CONFIG_FB_MATROX_G is not set
++CONFIG_FB_MATROX_I2C=m
++CONFIG_FB_MATROX_MULTIHEAD=y
++CONFIG_FB_RADEON_OLD=m
++CONFIG_FB_RADEON=m
++CONFIG_FB_RADEON_I2C=y
++# CONFIG_FB_RADEON_DEBUG is not set
++CONFIG_FB_ATY128=m
++CONFIG_FB_ATY=m
++CONFIG_FB_ATY_CT=y
++CONFIG_FB_ATY_GENERIC_LCD=y
++CONFIG_FB_ATY_XL_INIT=y
++CONFIG_FB_ATY_GX=y
++CONFIG_FB_SAVAGE=m
++CONFIG_FB_SAVAGE_I2C=y
++CONFIG_FB_SAVAGE_ACCEL=y
++CONFIG_FB_SIS=m
++CONFIG_FB_SIS_300=y
++CONFIG_FB_SIS_315=y
++CONFIG_FB_NEOMAGIC=m
++CONFIG_FB_KYRO=m
++CONFIG_FB_3DFX=m
++# CONFIG_FB_3DFX_ACCEL is not set
++CONFIG_FB_VOODOO1=m
++CONFIG_FB_TRIDENT=m
++# CONFIG_FB_TRIDENT_ACCEL is not set
++# CONFIG_FB_PM3 is not set
++CONFIG_FB_GEODE=y
++CONFIG_FB_GEODE_GX1=m
++CONFIG_FB_S1D13XXX=m
++CONFIG_FB_VIRTUAL=m
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++CONFIG_MDA_CONSOLE=m
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=m
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++# CONFIG_LOGO is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=m
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_RAWMIDI=m
++CONFIG_SND_SEQUENCER=m
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=m
++CONFIG_SND_PCM_OSS=m
++CONFIG_SND_SEQUENCER_OSS=y
++CONFIG_SND_RTCTIMER=m
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++CONFIG_SND_GENERIC_PM=y
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=m
++CONFIG_SND_OPL3_LIB=m
++CONFIG_SND_OPL4_LIB=m
++CONFIG_SND_VX_LIB=m
++CONFIG_SND_DUMMY=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
++
++#
++# ISA devices
++#
++CONFIG_SND_AD1848_LIB=m
++CONFIG_SND_CS4231_LIB=m
++CONFIG_SND_AD1816A=m
++CONFIG_SND_AD1848=m
++CONFIG_SND_CS4231=m
++CONFIG_SND_CS4232=m
++CONFIG_SND_CS4236=m
++CONFIG_SND_ES968=m
++CONFIG_SND_ES1688=m
++CONFIG_SND_ES18XX=m
++CONFIG_SND_GUS_SYNTH=m
++CONFIG_SND_GUSCLASSIC=m
++CONFIG_SND_GUSEXTREME=m
++CONFIG_SND_GUSMAX=m
++CONFIG_SND_INTERWAVE=m
++CONFIG_SND_INTERWAVE_STB=m
++CONFIG_SND_OPTI92X_AD1848=m
++CONFIG_SND_OPTI92X_CS4231=m
++CONFIG_SND_OPTI93X=m
++CONFIG_SND_SB8=m
++CONFIG_SND_SB16=m
++CONFIG_SND_SBAWE=m
++CONFIG_SND_SB16_CSP=y
++CONFIG_SND_WAVEFRONT=m
++CONFIG_SND_ALS100=m
++CONFIG_SND_AZT2320=m
++CONFIG_SND_CMI8330=m
++CONFIG_SND_DT019X=m
++CONFIG_SND_OPL3SA2=m
++CONFIG_SND_SGALAXY=m
++CONFIG_SND_SSCAPE=m
++
++#
++# PCI devices
++#
++CONFIG_SND_AC97_CODEC=m
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_CS4281=m
++CONFIG_SND_EMU10K1=m
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_CA0106 is not set
++CONFIG_SND_KORG1212=m
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_RME32=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_HDSP=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_YMFPCI=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_FM801=m
++CONFIG_SND_FM801_TEA575X=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=m
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_VIA82XX=m
++# CONFIG_SND_VIA82XX_MODEM is not set
++CONFIG_SND_VX222=m
++CONFIG_SND_HDA_INTEL=m
++
++#
++# USB devices
++#
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_USB_USX2Y=m
++
++#
++# PCMCIA devices
++#
++CONFIG_SND_VXPOCKET=m
++CONFIG_SND_VXP440=m
++CONFIG_SND_PDAUDIOCF=m
++
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=m
++CONFIG_SOUND_BT878=m
++CONFIG_SOUND_CMPCI=m
++# CONFIG_SOUND_CMPCI_FM is not set
++# CONFIG_SOUND_CMPCI_MIDI is not set
++CONFIG_SOUND_CMPCI_JOYSTICK=y
++CONFIG_SOUND_EMU10K1=m
++CONFIG_MIDI_EMU10K1=y
++CONFIG_SOUND_FUSION=m
++CONFIG_SOUND_CS4281=m
++CONFIG_SOUND_ES1370=m
++CONFIG_SOUND_ES1371=m
++CONFIG_SOUND_ESSSOLO1=m
++CONFIG_SOUND_MAESTRO=m
++CONFIG_SOUND_MAESTRO3=m
++CONFIG_SOUND_ICH=m
++CONFIG_SOUND_SONICVIBES=m
++CONFIG_SOUND_TRIDENT=m
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++CONFIG_SOUND_VIA82CXXX=m
++CONFIG_MIDI_VIA82CXXX=y
++CONFIG_SOUND_OSS=m
++# CONFIG_SOUND_TRACEINIT is not set
++# CONFIG_SOUND_DMAP is not set
++# CONFIG_SOUND_AD1816 is not set
++CONFIG_SOUND_AD1889=m
++CONFIG_SOUND_SGALAXY=m
++CONFIG_SOUND_ADLIB=m
++CONFIG_SOUND_ACI_MIXER=m
++CONFIG_SOUND_CS4232=m
++CONFIG_SOUND_SSCAPE=m
++CONFIG_SOUND_GUS=m
++CONFIG_SOUND_GUS16=y
++CONFIG_SOUND_GUSMAX=y
++CONFIG_SOUND_VMIDI=m
++CONFIG_SOUND_TRIX=m
++CONFIG_SOUND_MSS=m
++CONFIG_SOUND_MPU401=m
++CONFIG_SOUND_NM256=m
++CONFIG_SOUND_MAD16=m
++CONFIG_MAD16_OLDCARD=y
++CONFIG_SOUND_PAS=m
++CONFIG_SOUND_PSS=m
++CONFIG_PSS_MIXER=y
++CONFIG_SOUND_SB=m
++# CONFIG_SOUND_AWE32_SYNTH is not set
++CONFIG_SOUND_WAVEFRONT=m
++CONFIG_SOUND_MAUI=m
++CONFIG_SOUND_YM3812=m
++CONFIG_SOUND_OPL3SA1=m
++CONFIG_SOUND_OPL3SA2=m
++CONFIG_SOUND_YMFPCI=m
++# CONFIG_SOUND_YMFPCI_LEGACY is not set
++CONFIG_SOUND_UART6850=m
++CONFIG_SOUND_AEDSP16=m
++CONFIG_SC6600=y
++CONFIG_SC6600_JOY=y
++CONFIG_SC6600_CDROM=4
++CONFIG_SC6600_CDROMBASE=0x0
++# CONFIG_AEDSP16_MSS is not set
++# CONFIG_AEDSP16_SBPRO is not set
++# CONFIG_AEDSP16_MPU401 is not set
++CONFIG_SOUND_TVMIXER=m
++CONFIG_SOUND_KAHLUA=m
++CONFIG_SOUND_ALI5455=m
++CONFIG_SOUND_FORTE=m
++CONFIG_SOUND_RME96XX=m
++CONFIG_SOUND_AD1980=m
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_SPLIT_ISO=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_OHCI_HCD=m
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=m
++CONFIG_USB_SL811_HCD=m
++CONFIG_USB_SL811_CS=m
++
++#
++# USB Device Class drivers
++#
++CONFIG_USB_AUDIO=m
++
++#
++# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
++#
++CONFIG_USB_MIDI=m
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=m
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_DATAFAB=y
++CONFIG_USB_STORAGE_FREECOM=y
++CONFIG_USB_STORAGE_ISD200=y
++CONFIG_USB_STORAGE_DPCM=y
++CONFIG_USB_STORAGE_USBAT=y
++CONFIG_USB_STORAGE_SDDR09=y
++CONFIG_USB_STORAGE_SDDR55=y
++CONFIG_USB_STORAGE_JUMPSHOT=y
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=m
++CONFIG_USB_HIDINPUT=y
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++
++#
++# USB HID Boot Protocol drivers
++#
++CONFIG_USB_KBD=m
++CONFIG_USB_MOUSE=m
++CONFIG_USB_AIPTEK=m
++CONFIG_USB_WACOM=m
++CONFIG_USB_KBTAB=m
++CONFIG_USB_POWERMATE=m
++CONFIG_USB_MTOUCH=m
++CONFIG_USB_EGALAX=m
++CONFIG_USB_XPAD=m
++CONFIG_USB_ATI_REMOTE=m
++
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
++
++#
++# USB Multimedia devices
++#
++# CONFIG_USB_DABUSB is not set
++CONFIG_USB_VICAM=m
++CONFIG_USB_DSBR=m
++CONFIG_USB_IBMCAM=m
++CONFIG_USB_KONICAWC=m
++CONFIG_USB_OV511=m
++CONFIG_USB_SE401=m
++CONFIG_USB_SN9C102=m
++CONFIG_USB_STV680=m
++CONFIG_USB_W9968CF=m
++CONFIG_USB_PWC=m
++
++#
++# USB Network Adapters
++#
++CONFIG_USB_CATC=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_USBNET=m
++
++#
++# USB Host-to-Host Cables
++#
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++CONFIG_USB_GENESYS=y
++CONFIG_USB_NET1080=y
++CONFIG_USB_PL2301=y
++CONFIG_USB_KC2190=y
++
++#
++# Intelligent USB Devices/Gadgets
++#
++CONFIG_USB_ARMLINUX=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_ZAURUS=y
++CONFIG_USB_CDCETHER=y
++
++#
++# USB Network Adapters
++#
++CONFIG_USB_AX8817X=y
++CONFIG_USB_ZD1201=m
++CONFIG_USB_MON=m
++
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
++
++#
++# USB Serial Converter support
++#
++CONFIG_USB_SERIAL=m
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_AIRPRIME=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP2101=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_EMPEG=m
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++# CONFIG_USB_SERIAL_GARMIN is not set
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++CONFIG_USB_SERIAL_PL2303=m
++CONFIG_USB_SERIAL_HP4X=m
++CONFIG_USB_SERIAL_SAFE=m
++# CONFIG_USB_SERIAL_SAFE_PADDED is not set
++# CONFIG_USB_SERIAL_TI is not set
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_XIRCOM=m
++CONFIG_USB_SERIAL_OPTION=m
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_EZUSB=y
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++CONFIG_USB_AUERSWALD=m
++CONFIG_USB_RIO500=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LED=m
++CONFIG_USB_CYTHERM=m
++CONFIG_USB_PHIDGETKIT=m
++CONFIG_USB_PHIDGETSERVO=m
++# CONFIG_USB_IDMOUSE is not set
++CONFIG_USB_SISUSBVGA=m
++CONFIG_USB_TEST=m
++
++#
++# USB ATM/DSL drivers
++#
++CONFIG_USB_ATM=m
++CONFIG_USB_SPEEDTOUCH=m
++
++#
++# USB Gadget Support
++#
++CONFIG_USB_GADGET=m
++# CONFIG_USB_GADGET_DEBUG_FILES is not set
++CONFIG_USB_GADGET_NET2280=y
++CONFIG_USB_NET2280=m
++# CONFIG_USB_GADGET_PXA2XX is not set
++# CONFIG_USB_GADGET_GOKU is not set
++# CONFIG_USB_GADGET_LH7A40X is not set
++# CONFIG_USB_GADGET_OMAP is not set
++# CONFIG_USB_GADGET_DUMMY_HCD is not set
++CONFIG_USB_GADGET_DUALSPEED=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_ETH_RNDIS=y
++CONFIG_USB_GADGETFS=m
++CONFIG_USB_FILE_STORAGE=m
++# CONFIG_USB_FILE_STORAGE_TEST is not set
++CONFIG_USB_G_SERIAL=m
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# Power management options
++#
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BOOT=y
++CONFIG_ACPI_INTERPRETER=y
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_PROCESSOR=m
++# CONFIG_ACPI_HOTPLUG_CPU is not set
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_BUS=y
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_PCI=y
++CONFIG_ACPI_SYSTEM=y
++# CONFIG_X86_PM_TIMER is not set
++# CONFIG_ACPI_CONTAINER is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT3_FS=m
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=m
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++# CONFIG_JFS_SECURITY is not set
++# CONFIG_JFS_DEBUG is not set
++CONFIG_JFS_STATISTICS=y
++CONFIG_FS_POSIX_ACL=y
++
++#
++# XFS support
++#
++CONFIG_XFS_FS=m
++CONFIG_XFS_EXPORT=y
++CONFIG_XFS_RT=y
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_SECURITY=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++CONFIG_QUOTA=y
++CONFIG_QFMT_V1=m
++CONFIG_QFMT_V2=m
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=m
++CONFIG_AUTOFS4_FS=m
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=m
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++CONFIG_NTFS_FS=m
++# CONFIG_NTFS_DEBUG is not set
++# CONFIG_NTFS_RW is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++# CONFIG_DEVFS_FS is not set
++CONFIG_DEVPTS_FS_XATTR=y
++CONFIG_DEVPTS_FS_SECURITY=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_TMPFS_SECURITY=y
++# CONFIG_HUGETLBFS is not set
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++
++#
++# Miscellaneous filesystems
++#
++CONFIG_ADFS_FS=m
++# CONFIG_ADFS_FS_RW is not set
++CONFIG_AFFS_FS=m
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_JFFS_FS=m
++CONFIG_JFFS_FS_VERBOSE=0
++CONFIG_JFFS_PROC_FS=y
++CONFIG_JFFS2_FS=m
++CONFIG_JFFS2_FS_DEBUG=0
++# CONFIG_JFFS2_FS_NAND is not set
++# CONFIG_JFFS2_FS_NOR_ECC is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++CONFIG_VXFS_FS=m
++CONFIG_HPFS_FS=m
++CONFIG_QNX4FS_FS=m
++# CONFIG_QNX4FS_RW is not set
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=m
++CONFIG_NFS_V3=y
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_RPCSEC_GSS_SPKM3=m
++CONFIG_SMB_FS=m
++# CONFIG_SMB_NLS_DEFAULT is not set
++CONFIG_CIFS=m
++# CONFIG_CIFS_STATS is not set
++# CONFIG_CIFS_XATTR is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++# CONFIG_NCPFS_SMALLDOS is not set
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_CODA_FS=m
++# CONFIG_CODA_FS_OLD_API is not set
++CONFIG_AFS_FS=m
++CONFIG_RXRPC=m
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++CONFIG_ACORN_PARTITION=y
++CONFIG_ACORN_PARTITION_CUMANA=y
++# CONFIG_ACORN_PARTITION_EESOX is not set
++CONFIG_ACORN_PARTITION_ICS=y
++# CONFIG_ACORN_PARTITION_ADFS is not set
++# CONFIG_ACORN_PARTITION_POWERTEC is not set
++CONFIG_ACORN_PARTITION_RISCIX=y
++CONFIG_OSF_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++CONFIG_ATARI_PARTITION=y
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++CONFIG_LDM_PARTITION=y
++# CONFIG_LDM_DEBUG is not set
++CONFIG_SGI_PARTITION=y
++CONFIG_ULTRIX_PARTITION=y
++CONFIG_SUN_PARTITION=y
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=m
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=m
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_UTF8=m
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
++CONFIG_SECURITY=y
++# CONFIG_SECURITY_NETWORK is not set
++CONFIG_SECURITY_CAPABILITIES=y
++CONFIG_SECURITY_ROOTPLUG=m
++CONFIG_SECURITY_SECLVL=m
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
++CONFIG_SECURITY_SELINUX_DISABLE=y
++CONFIG_SECURITY_SELINUX_DEVELOP=y
++CONFIG_SECURITY_SELINUX_AVC_STATS=y
++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_SHA1=m
++CONFIG_CRYPTO_SHA256=m
++CONFIG_CRYPTO_SHA512=m
++CONFIG_CRYPTO_WP512=m
++CONFIG_CRYPTO_TGR192=m
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_AES_586=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_CRC32C=m
++CONFIG_CRYPTO_TEST=m
++
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=m
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=m
++CONFIG_REED_SOLOMON=m
++CONFIG_REED_SOLOMON_DEC16=y
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_FRAME_POINTER is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_KPROBES is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_X86_FIND_SMP_CONFIG=y
++CONFIG_X86_MPPARSE=y
+diff -Nurp pristine-linux-2.6.12/.config.cmd linux-2.6.12-xen/.config.cmd
+--- pristine-linux-2.6.12/.config.cmd	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/.config.cmd	2006-03-05 23:55:06.826653101 +0100
+@@ -0,0 +1,191 @@
++deps_config := \
++	lib/Kconfig.debug \
++	arch/xen/Kconfig.debug \
++	lib/Kconfig \
++	drivers/crypto/Kconfig \
++	crypto/Kconfig \
++	security/selinux/Kconfig \
++	security/Kconfig \
++	fs/nls/Kconfig \
++	fs/partitions/Kconfig \
++	fs/ncpfs/Kconfig \
++	fs/xfs/Kconfig \
++	fs/Kconfig \
++	drivers/acpi/Kconfig \
++	drivers/char/tpm/Kconfig.domU \
++	drivers/infiniband/ulp/ipoib/Kconfig \
++	drivers/infiniband/hw/mthca/Kconfig \
++	drivers/infiniband/Kconfig \
++	drivers/mmc/Kconfig \
++	drivers/usb/gadget/Kconfig \
++	drivers/usb/atm/Kconfig \
++	drivers/usb/misc/sisusbvga/Kconfig \
++	drivers/usb/misc/Kconfig \
++	drivers/usb/serial/Kconfig \
++	drivers/usb/mon/Kconfig \
++	drivers/usb/net/Kconfig \
++	drivers/usb/media/Kconfig \
++	drivers/usb/image/Kconfig \
++	drivers/usb/input/Kconfig \
++	drivers/usb/storage/Kconfig \
++	drivers/usb/class/Kconfig \
++	drivers/usb/host/Kconfig \
++	drivers/usb/core/Kconfig \
++	drivers/usb/Kconfig \
++	sound/oss/Kconfig \
++	sound/parisc/Kconfig \
++	sound/sparc/Kconfig \
++	sound/pcmcia/Kconfig \
++	sound/usb/Kconfig \
++	sound/mips/Kconfig \
++	sound/arm/Kconfig \
++	sound/ppc/Kconfig \
++	sound/pci/Kconfig \
++	sound/isa/Kconfig \
++	sound/drivers/Kconfig \
++	sound/core/Kconfig \
++	sound/oss/dmasound/Kconfig \
++	sound/Kconfig \
++	drivers/video/backlight/Kconfig \
++	drivers/video/logo/Kconfig \
++	drivers/video/console/Kconfig \
++	drivers/video/geode/Kconfig \
++	drivers/video/Kconfig \
++	drivers/media/common/Kconfig \
++	drivers/media/dvb/frontends/Kconfig \
++	drivers/media/dvb/bt8xx/Kconfig \
++	drivers/media/dvb/b2c2/Kconfig \
++	drivers/media/dvb/cinergyT2/Kconfig \
++	drivers/media/dvb/dibusb/Kconfig \
++	drivers/media/dvb/ttusb-dec/Kconfig \
++	drivers/media/dvb/ttusb-budget/Kconfig \
++	drivers/media/dvb/ttpci/Kconfig \
++	drivers/media/dvb/dvb-core/Kconfig \
++	drivers/media/dvb/Kconfig \
++	drivers/media/radio/Kconfig \
++	drivers/media/video/Kconfig \
++	drivers/media/Kconfig \
++	drivers/misc/Kconfig \
++	drivers/w1/Kconfig \
++	drivers/i2c/chips/Kconfig \
++	drivers/i2c/busses/Kconfig \
++	drivers/i2c/algos/Kconfig \
++	drivers/i2c/Kconfig \
++	drivers/char/tpm/Kconfig \
++	drivers/char/pcmcia/Kconfig \
++	drivers/char/drm/Kconfig \
++	drivers/char/agp/Kconfig \
++	drivers/char/ftape/Kconfig \
++	drivers/char/watchdog/Kconfig \
++	drivers/char/ipmi/Kconfig \
++	drivers/serial/Kconfig \
++	drivers/char/Kconfig \
++	drivers/input/gameport/Kconfig \
++	drivers/input/serio/Kconfig \
++	drivers/input/misc/Kconfig \
++	drivers/input/touchscreen/Kconfig \
++	drivers/input/joystick/iforce/Kconfig \
++	drivers/input/joystick/Kconfig \
++	drivers/input/mouse/Kconfig \
++	drivers/input/keyboard/Kconfig \
++	drivers/input/Kconfig \
++	drivers/telephony/Kconfig \
++	drivers/isdn/hardware/eicon/Kconfig \
++	drivers/isdn/hardware/avm/Kconfig \
++	drivers/isdn/hardware/Kconfig \
++	drivers/isdn/capi/Kconfig \
++	drivers/isdn/hysdn/Kconfig \
++	drivers/isdn/act2000/Kconfig \
++	drivers/isdn/sc/Kconfig \
++	drivers/isdn/pcbit/Kconfig \
++	drivers/isdn/icn/Kconfig \
++	drivers/isdn/hisax/Kconfig \
++	drivers/isdn/i4l/Kconfig \
++	drivers/isdn/Kconfig \
++	drivers/s390/net/Kconfig \
++	drivers/atm/Kconfig \
++	drivers/net/wan/Kconfig \
++	drivers/net/pcmcia/Kconfig \
++	drivers/net/wireless/Kconfig \
++	drivers/net/tokenring/Kconfig \
++	drivers/net/fec_8xx/Kconfig \
++	drivers/net/tulip/Kconfig \
++	drivers/net/arm/Kconfig \
++	drivers/net/arcnet/Kconfig \
++	drivers/net/Kconfig \
++	drivers/bluetooth/Kconfig \
++	net/bluetooth/hidp/Kconfig \
++	net/bluetooth/cmtp/Kconfig \
++	net/bluetooth/bnep/Kconfig \
++	net/bluetooth/rfcomm/Kconfig \
++	net/bluetooth/Kconfig \
++	drivers/net/irda/Kconfig \
++	net/irda/ircomm/Kconfig \
++	net/irda/irnet/Kconfig \
++	net/irda/irlan/Kconfig \
++	net/irda/Kconfig \
++	drivers/net/hamradio/Kconfig \
++	net/ax25/Kconfig \
++	net/sched/Kconfig \
++	drivers/net/appletalk/Kconfig \
++	net/ipx/Kconfig \
++	net/llc/Kconfig \
++	net/decnet/Kconfig \
++	net/sctp/Kconfig \
++	net/xfrm/Kconfig \
++	net/bridge/netfilter/Kconfig \
++	net/decnet/netfilter/Kconfig \
++	net/ipv6/netfilter/Kconfig \
++	net/ipv4/netfilter/Kconfig \
++	net/ipv6/Kconfig \
++	net/ipv4/ipvs/Kconfig \
++	net/ipv4/Kconfig \
++	net/Kconfig \
++	drivers/message/i2o/Kconfig \
++	drivers/ieee1394/Kconfig \
++	drivers/message/fusion/Kconfig \
++	drivers/md/Kconfig \
++	drivers/cdrom/Kconfig \
++	drivers/scsi/pcmcia/Kconfig \
++	drivers/scsi/arm/Kconfig \
++	drivers/scsi/qla2xxx/Kconfig \
++	drivers/scsi/megaraid/Kconfig.megaraid \
++	drivers/scsi/aic7xxx/Kconfig.aic79xx \
++	drivers/scsi/aic7xxx/Kconfig.aic7xxx \
++	drivers/scsi/Kconfig \
++	drivers/ide/Kconfig \
++	drivers/block/Kconfig.iosched \
++	drivers/s390/block/Kconfig \
++	drivers/block/paride/Kconfig \
++	drivers/block/Kconfig \
++	drivers/pnp/pnpacpi/Kconfig \
++	drivers/pnp/pnpbios/Kconfig \
++	drivers/pnp/isapnp/Kconfig \
++	drivers/pnp/Kconfig \
++	drivers/parport/Kconfig \
++	drivers/mtd/nand/Kconfig \
++	drivers/mtd/devices/Kconfig \
++	drivers/mtd/maps/Kconfig \
++	drivers/mtd/chips/Kconfig \
++	drivers/mtd/Kconfig \
++	drivers/base/Kconfig \
++	arch/xen/Kconfig.drivers \
++	fs/Kconfig.binfmt \
++	drivers/cpufreq/Kconfig \
++	arch/x86_64/kernel/cpufreq/Kconfig \
++	kernel/power/Kconfig \
++	arch/xen/x86_64/Kconfig \
++	drivers/pci/hotplug/Kconfig \
++	drivers/pcmcia/Kconfig \
++	drivers/mca/Kconfig \
++	drivers/eisa/Kconfig \
++	drivers/pci/Kconfig \
++	drivers/pci/pcie/Kconfig \
++	drivers/firmware/Kconfig \
++	arch/xen/i386/Kconfig \
++	init/Kconfig \
++	arch/xen/Kconfig
++
++.config include/linux/autoconf.h: $(deps_config)
++
++$(deps_config):
+diff -Nurp pristine-linux-2.6.12/.config.old linux-2.6.12-xen/.config.old
+--- pristine-linux-2.6.12/.config.old	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/.config.old	2006-03-05 23:55:00.603571330 +0100
+@@ -0,0 +1,2966 @@
++#
++# Automatically generated make config: don't edit
++# Linux kernel version: 2.6.12.6-xen
++# Sun Mar  5 23:55:00 2006
++#
++CONFIG_XEN=y
++CONFIG_ARCH_XEN=y
++CONFIG_NO_IDLE_HZ=y
++
++#
++# XEN
++#
++CONFIG_XEN_PRIVILEGED_GUEST=y
++CONFIG_XEN_PHYSDEV_ACCESS=y
++CONFIG_XEN_BLKDEV_BACKEND=y
++# CONFIG_XEN_BLKDEV_TAP_BE is not set
++CONFIG_XEN_NETDEV_BACKEND=y
++# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_TPMDEV_FRONTEND is not set
++# CONFIG_XEN_TPMDEV_BACKEND is not set
++CONFIG_XEN_BLKDEV_FRONTEND=y
++CONFIG_XEN_NETDEV_FRONTEND=y
++# CONFIG_XEN_BLKDEV_TAP is not set
++# CONFIG_XEN_SHADOW_MODE is not set
++CONFIG_XEN_SCRUB_PAGES=y
++CONFIG_XEN_X86=y
++# CONFIG_XEN_X86_64 is not set
++CONFIG_HAVE_ARCH_ALLOC_SKB=y
++CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
++
++#
++# Code maturity level options
++#
++CONFIG_EXPERIMENTAL=y
++# CONFIG_CLEAN_COMPILE is not set
++CONFIG_BROKEN=y
++CONFIG_BROKEN_ON_SMP=y
++CONFIG_LOCK_KERNEL=y
++CONFIG_INIT_ENV_ARG_LIMIT=32
++
++#
++# General setup
++#
++CONFIG_LOCALVERSION=""
++CONFIG_SWAP=y
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_BSD_PROCESS_ACCT=y
++# CONFIG_BSD_PROCESS_ACCT_V3 is not set
++CONFIG_SYSCTL=y
++# CONFIG_AUDIT is not set
++CONFIG_HOTPLUG=y
++CONFIG_KOBJECT_UEVENT=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++# CONFIG_CPUSETS is not set
++CONFIG_EMBEDDED=y
++CONFIG_KALLSYMS=y
++# CONFIG_KALLSYMS_ALL is not set
++# CONFIG_KALLSYMS_EXTRA_PASS is not set
++CONFIG_PRINTK=y
++CONFIG_BUG=y
++CONFIG_BASE_FULL=y
++CONFIG_FUTEX=y
++CONFIG_EPOLL=y
++# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
++CONFIG_SHMEM=y
++CONFIG_CC_ALIGN_FUNCTIONS=0
++CONFIG_CC_ALIGN_LABELS=0
++CONFIG_CC_ALIGN_LOOPS=0
++CONFIG_CC_ALIGN_JUMPS=0
++# CONFIG_TINY_SHMEM is not set
++CONFIG_BASE_SMALL=0
++
++#
++# Loadable module support
++#
++CONFIG_MODULES=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++CONFIG_OBSOLETE_MODPARM=y
++CONFIG_MODVERSIONS=y
++# CONFIG_MODULE_SRCVERSION_ALL is not set
++CONFIG_KMOD=y
++CONFIG_STOP_MACHINE=y
++
++#
++# X86 Processor Configuration
++#
++CONFIG_XENARCH="i386"
++CONFIG_X86=y
++CONFIG_MMU=y
++CONFIG_UID16=y
++CONFIG_GENERIC_ISA_DMA=y
++CONFIG_GENERIC_IOMAP=y
++# CONFIG_M386 is not set
++# CONFIG_M486 is not set
++# CONFIG_M586 is not set
++# CONFIG_M586TSC is not set
++# CONFIG_M586MMX is not set
++CONFIG_M686=y
++# CONFIG_MPENTIUMII is not set
++# CONFIG_MPENTIUMIII is not set
++# CONFIG_MPENTIUMM is not set
++# CONFIG_MPENTIUM4 is not set
++# CONFIG_MK6 is not set
++# CONFIG_MK7 is not set
++# CONFIG_MK8 is not set
++# CONFIG_MCRUSOE is not set
++# CONFIG_MEFFICEON is not set
++# CONFIG_MWINCHIPC6 is not set
++# CONFIG_MWINCHIP2 is not set
++# CONFIG_MWINCHIP3D is not set
++# CONFIG_MGEODEGX1 is not set
++# CONFIG_MCYRIXIII is not set
++# CONFIG_MVIAC3_2 is not set
++# CONFIG_X86_GENERIC is not set
++CONFIG_X86_CMPXCHG=y
++CONFIG_X86_XADD=y
++CONFIG_X86_L1_CACHE_SHIFT=5
++CONFIG_RWSEM_XCHGADD_ALGORITHM=y
++CONFIG_GENERIC_CALIBRATE_DELAY=y
++CONFIG_X86_PPRO_FENCE=y
++CONFIG_X86_WP_WORKS_OK=y
++CONFIG_X86_INVLPG=y
++CONFIG_X86_BSWAP=y
++CONFIG_X86_POPAD_OK=y
++CONFIG_X86_GOOD_APIC=y
++CONFIG_X86_USE_PPRO_CHECKSUM=y
++# CONFIG_HPET_TIMER is not set
++# CONFIG_HPET_EMULATE_RTC is not set
++CONFIG_SMP=y
++CONFIG_SMP_ALTERNATIVES=y
++CONFIG_NR_CPUS=8
++# CONFIG_SCHED_SMT is not set
++# CONFIG_X86_REBOOTFIXUPS is not set
++CONFIG_MICROCODE=y
++CONFIG_X86_CPUID=m
++CONFIG_SWIOTLB=y
++
++#
++# Firmware Drivers
++#
++CONFIG_EDD=m
++# CONFIG_NOHIGHMEM is not set
++CONFIG_HIGHMEM4G=y
++# CONFIG_HIGHMEM64G is not set
++CONFIG_HIGHMEM=y
++CONFIG_MTRR=y
++CONFIG_HAVE_DEC_LOCK=y
++# CONFIG_REGPARM is not set
++CONFIG_X86_LOCAL_APIC=y
++CONFIG_X86_IO_APIC=y
++CONFIG_HOTPLUG_CPU=y
++
++#
++# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++#
++CONFIG_PCI=y
++# CONFIG_PCI_GOMMCONFIG is not set
++# CONFIG_PCI_GODIRECT is not set
++CONFIG_PCI_GOANY=y
++CONFIG_PCI_DIRECT=y
++CONFIG_PCI_MMCONFIG=y
++# CONFIG_PCIEPORTBUS is not set
++# CONFIG_PCI_MSI is not set
++# CONFIG_PCI_LEGACY_PROC is not set
++CONFIG_PCI_NAMES=y
++# CONFIG_PCI_DEBUG is not set
++CONFIG_ISA_DMA_API=y
++CONFIG_ISA=y
++# CONFIG_EISA is not set
++# CONFIG_MCA is not set
++CONFIG_SCx200=m
++
++#
++# PCCARD (PCMCIA/CardBus) support
++#
++CONFIG_PCCARD=m
++# CONFIG_PCMCIA_DEBUG is not set
++CONFIG_PCMCIA=m
++CONFIG_CARDBUS=y
++
++#
++# PC-card bridges
++#
++CONFIG_YENTA=m
++CONFIG_PD6729=m
++CONFIG_I82092=m
++CONFIG_I82365=m
++CONFIG_TCIC=m
++CONFIG_PCMCIA_PROBE=y
++CONFIG_PCCARD_NONSTATIC=m
++
++#
++# PCI Hotplug Support
++#
++CONFIG_HOTPLUG_PCI=m
++CONFIG_HOTPLUG_PCI_FAKE=m
++# CONFIG_HOTPLUG_PCI_ACPI is not set
++CONFIG_HOTPLUG_PCI_CPCI=y
++CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
++CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
++CONFIG_HOTPLUG_PCI_SHPC=m
++# CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE is not set
++CONFIG_GENERIC_HARDIRQS=y
++CONFIG_GENERIC_IRQ_PROBE=y
++CONFIG_X86_SMP=y
++CONFIG_X86_BIOS_REBOOT=y
++CONFIG_X86_TRAMPOLINE=y
++CONFIG_SECCOMP=y
++# CONFIG_EARLY_PRINTK is not set
++
++#
++# Executable file formats
++#
++CONFIG_BINFMT_ELF=y
++CONFIG_BINFMT_AOUT=m
++CONFIG_BINFMT_MISC=m
++
++#
++# Device Drivers
++#
++
++#
++# Generic Driver Options
++#
++CONFIG_STANDALONE=y
++CONFIG_PREVENT_FIRMWARE_BUILD=y
++CONFIG_FW_LOADER=m
++# CONFIG_DEBUG_DRIVER is not set
++
++#
++# Memory Technology Devices (MTD)
++#
++CONFIG_MTD=m
++# CONFIG_MTD_DEBUG is not set
++CONFIG_MTD_CONCAT=m
++CONFIG_MTD_PARTITIONS=y
++CONFIG_MTD_REDBOOT_PARTS=m
++CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
++# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
++# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
++# CONFIG_MTD_CMDLINE_PARTS is not set
++
++#
++# User Modules And Translation Layers
++#
++CONFIG_MTD_CHAR=m
++CONFIG_MTD_BLOCK=m
++CONFIG_MTD_BLOCK_RO=m
++CONFIG_FTL=m
++CONFIG_NFTL=m
++CONFIG_NFTL_RW=y
++CONFIG_INFTL=m
++
++#
++# RAM/ROM/Flash chip drivers
++#
++CONFIG_MTD_CFI=m
++CONFIG_MTD_JEDECPROBE=m
++CONFIG_MTD_GEN_PROBE=m
++# CONFIG_MTD_CFI_ADV_OPTIONS is not set
++CONFIG_MTD_MAP_BANK_WIDTH_1=y
++CONFIG_MTD_MAP_BANK_WIDTH_2=y
++CONFIG_MTD_MAP_BANK_WIDTH_4=y
++# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
++# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
++CONFIG_MTD_CFI_I1=y
++CONFIG_MTD_CFI_I2=y
++# CONFIG_MTD_CFI_I4 is not set
++# CONFIG_MTD_CFI_I8 is not set
++CONFIG_MTD_CFI_INTELEXT=m
++CONFIG_MTD_CFI_AMDSTD=m
++CONFIG_MTD_CFI_AMDSTD_RETRY=0
++CONFIG_MTD_CFI_STAA=m
++CONFIG_MTD_CFI_UTIL=m
++CONFIG_MTD_RAM=m
++CONFIG_MTD_ROM=m
++CONFIG_MTD_ABSENT=m
++# CONFIG_MTD_OBSOLETE_CHIPS is not set
++
++#
++# Mapping drivers for chip access
++#
++CONFIG_MTD_COMPLEX_MAPPINGS=y
++CONFIG_MTD_PHYSMAP=m
++CONFIG_MTD_PHYSMAP_START=0x8000000
++CONFIG_MTD_PHYSMAP_LEN=0x4000000
++CONFIG_MTD_PHYSMAP_BANKWIDTH=2
++CONFIG_MTD_PNC2000=m
++CONFIG_MTD_SC520CDP=m
++CONFIG_MTD_NETSC520=m
++CONFIG_MTD_TS5500=m
++CONFIG_MTD_SBC_GXX=m
++CONFIG_MTD_ELAN_104NC=m
++CONFIG_MTD_SCx200_DOCFLASH=m
++# CONFIG_MTD_AMD76XROM is not set
++# CONFIG_MTD_ICHXROM is not set
++# CONFIG_MTD_SCB2_FLASH is not set
++CONFIG_MTD_NETtel=m
++CONFIG_MTD_DILNETPC=m
++CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
++# CONFIG_MTD_L440GX is not set
++CONFIG_MTD_PCI=m
++CONFIG_MTD_PCMCIA=m
++
++#
++# Self-contained MTD device drivers
++#
++CONFIG_MTD_PMC551=m
++# CONFIG_MTD_PMC551_BUGFIX is not set
++# CONFIG_MTD_PMC551_DEBUG is not set
++CONFIG_MTD_SLRAM=m
++CONFIG_MTD_PHRAM=m
++CONFIG_MTD_MTDRAM=m
++CONFIG_MTDRAM_TOTAL_SIZE=4096
++CONFIG_MTDRAM_ERASE_SIZE=128
++CONFIG_MTD_BLKMTD=m
++# CONFIG_MTD_BLOCK2MTD is not set
++
++#
++# Disk-On-Chip Device Drivers
++#
++CONFIG_MTD_DOC2000=m
++CONFIG_MTD_DOC2001=m
++CONFIG_MTD_DOC2001PLUS=m
++CONFIG_MTD_DOCPROBE=m
++CONFIG_MTD_DOCECC=m
++# CONFIG_MTD_DOCPROBE_ADVANCED is not set
++CONFIG_MTD_DOCPROBE_ADDRESS=0
++
++#
++# NAND Flash Device Drivers
++#
++CONFIG_MTD_NAND=m
++# CONFIG_MTD_NAND_VERIFY_WRITE is not set
++CONFIG_MTD_NAND_IDS=m
++CONFIG_MTD_NAND_DISKONCHIP=m
++# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
++CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
++# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
++# CONFIG_MTD_NAND_NANDSIM is not set
++
++#
++# Parallel port support
++#
++CONFIG_PARPORT=m
++CONFIG_PARPORT_PC=m
++CONFIG_PARPORT_SERIAL=m
++CONFIG_PARPORT_PC_FIFO=y
++# CONFIG_PARPORT_PC_SUPERIO is not set
++CONFIG_PARPORT_PC_PCMCIA=m
++CONFIG_PARPORT_NOT_PC=y
++# CONFIG_PARPORT_GSC is not set
++CONFIG_PARPORT_1284=y
++
++#
++# Plug and Play support
++#
++CONFIG_PNP=y
++# CONFIG_PNP_DEBUG is not set
++
++#
++# Protocols
++#
++CONFIG_ISAPNP=y
++# CONFIG_PNPBIOS is not set
++# CONFIG_PNPACPI is not set
++
++#
++# Block devices
++#
++CONFIG_BLK_DEV_FD=m
++CONFIG_BLK_DEV_XD=m
++CONFIG_PARIDE=m
++CONFIG_PARIDE_PARPORT=m
++
++#
++# Parallel IDE high-level drivers
++#
++CONFIG_PARIDE_PD=m
++CONFIG_PARIDE_PCD=m
++CONFIG_PARIDE_PF=m
++CONFIG_PARIDE_PT=m
++CONFIG_PARIDE_PG=m
++
++#
++# Parallel IDE protocol modules
++#
++CONFIG_PARIDE_ATEN=m
++CONFIG_PARIDE_BPCK=m
++CONFIG_PARIDE_BPCK6=m
++CONFIG_PARIDE_COMM=m
++CONFIG_PARIDE_DSTR=m
++CONFIG_PARIDE_FIT2=m
++CONFIG_PARIDE_FIT3=m
++CONFIG_PARIDE_EPAT=m
++# CONFIG_PARIDE_EPATC8 is not set
++CONFIG_PARIDE_EPIA=m
++CONFIG_PARIDE_FRIQ=m
++CONFIG_PARIDE_FRPW=m
++CONFIG_PARIDE_KBIC=m
++CONFIG_PARIDE_KTTI=m
++CONFIG_PARIDE_ON20=m
++CONFIG_PARIDE_ON26=m
++CONFIG_BLK_CPQ_DA=m
++CONFIG_BLK_CPQ_CISS_DA=m
++CONFIG_CISS_SCSI_TAPE=y
++CONFIG_BLK_DEV_DAC960=m
++CONFIG_BLK_DEV_UMEM=m
++# CONFIG_BLK_DEV_COW_COMMON is not set
++CONFIG_BLK_DEV_LOOP=m
++CONFIG_BLK_DEV_CRYPTOLOOP=m
++CONFIG_BLK_DEV_NBD=m
++CONFIG_BLK_DEV_SX8=m
++# CONFIG_BLK_DEV_UB is not set
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_COUNT=16
++CONFIG_BLK_DEV_RAM_SIZE=16384
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_INITRAMFS_SOURCE=""
++CONFIG_LBD=y
++CONFIG_CDROM_PKTCDVD=m
++CONFIG_CDROM_PKTCDVD_BUFFERS=8
++# CONFIG_CDROM_PKTCDVD_WCACHE is not set
++
++#
++# IO Schedulers
++#
++CONFIG_IOSCHED_NOOP=y
++CONFIG_IOSCHED_AS=y
++CONFIG_IOSCHED_DEADLINE=y
++CONFIG_IOSCHED_CFQ=y
++CONFIG_ATA_OVER_ETH=m
++
++#
++# ATA/ATAPI/MFM/RLL support
++#
++CONFIG_IDE=y
++CONFIG_BLK_DEV_IDE=y
++
++#
++# Please see Documentation/ide.txt for help/info on IDE drives
++#
++# CONFIG_BLK_DEV_IDE_SATA is not set
++# CONFIG_BLK_DEV_HD_IDE is not set
++CONFIG_BLK_DEV_IDEDISK=y
++CONFIG_IDEDISK_MULTI_MODE=y
++CONFIG_BLK_DEV_IDECS=m
++CONFIG_BLK_DEV_IDECD=y
++CONFIG_BLK_DEV_IDETAPE=m
++CONFIG_BLK_DEV_IDEFLOPPY=y
++CONFIG_BLK_DEV_IDESCSI=m
++# CONFIG_IDE_TASK_IOCTL is not set
++
++#
++# IDE chipset support/bugfixes
++#
++CONFIG_IDE_GENERIC=y
++CONFIG_BLK_DEV_CMD640=y
++CONFIG_BLK_DEV_CMD640_ENHANCED=y
++CONFIG_BLK_DEV_IDEPNP=y
++CONFIG_BLK_DEV_IDEPCI=y
++CONFIG_IDEPCI_SHARE_IRQ=y
++# CONFIG_BLK_DEV_OFFBOARD is not set
++CONFIG_BLK_DEV_GENERIC=y
++CONFIG_BLK_DEV_OPTI621=m
++CONFIG_BLK_DEV_RZ1000=y
++CONFIG_BLK_DEV_IDEDMA_PCI=y
++# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
++CONFIG_IDEDMA_PCI_AUTO=y
++# CONFIG_IDEDMA_ONLYDISK is not set
++CONFIG_BLK_DEV_AEC62XX=y
++CONFIG_BLK_DEV_ALI15X3=y
++# CONFIG_WDC_ALI15X3 is not set
++CONFIG_BLK_DEV_AMD74XX=y
++CONFIG_BLK_DEV_ATIIXP=y
++CONFIG_BLK_DEV_CMD64X=y
++CONFIG_BLK_DEV_TRIFLEX=y
++CONFIG_BLK_DEV_CY82C693=y
++CONFIG_BLK_DEV_CS5520=y
++CONFIG_BLK_DEV_CS5530=y
++CONFIG_BLK_DEV_HPT34X=y
++# CONFIG_HPT34X_AUTODMA is not set
++CONFIG_BLK_DEV_HPT366=y
++CONFIG_BLK_DEV_SC1200=m
++CONFIG_BLK_DEV_PIIX=y
++CONFIG_BLK_DEV_NS87415=m
++CONFIG_BLK_DEV_PDC202XX_OLD=y
++CONFIG_PDC202XX_BURST=y
++CONFIG_BLK_DEV_PDC202XX_NEW=y
++CONFIG_PDC202XX_FORCE=y
++CONFIG_BLK_DEV_SVWKS=y
++CONFIG_BLK_DEV_SIIMAGE=y
++CONFIG_BLK_DEV_SIS5513=y
++CONFIG_BLK_DEV_SLC90E66=y
++CONFIG_BLK_DEV_TRM290=m
++CONFIG_BLK_DEV_VIA82CXXX=y
++# CONFIG_IDE_ARM is not set
++# CONFIG_IDE_CHIPSETS is not set
++CONFIG_BLK_DEV_IDEDMA=y
++# CONFIG_IDEDMA_IVB is not set
++CONFIG_IDEDMA_AUTO=y
++# CONFIG_BLK_DEV_HD is not set
++
++#
++# SCSI device support
++#
++CONFIG_SCSI=m
++CONFIG_SCSI_PROC_FS=y
++
++#
++# SCSI support type (disk, tape, CD-ROM)
++#
++CONFIG_BLK_DEV_SD=m
++CONFIG_CHR_DEV_ST=m
++CONFIG_CHR_DEV_OSST=m
++CONFIG_BLK_DEV_SR=m
++# CONFIG_BLK_DEV_SR_VENDOR is not set
++CONFIG_CHR_DEV_SG=m
++
++#
++# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++#
++CONFIG_SCSI_MULTI_LUN=y
++CONFIG_SCSI_CONSTANTS=y
++CONFIG_SCSI_LOGGING=y
++
++#
++# SCSI Transport Attributes
++#
++CONFIG_SCSI_SPI_ATTRS=m
++CONFIG_SCSI_FC_ATTRS=m
++# CONFIG_SCSI_ISCSI_ATTRS is not set
++
++#
++# SCSI low-level drivers
++#
++CONFIG_BLK_DEV_3W_XXXX_RAID=m
++CONFIG_SCSI_3W_9XXX=m
++# CONFIG_SCSI_7000FASST is not set
++CONFIG_SCSI_ACARD=m
++CONFIG_SCSI_AHA152X=m
++# CONFIG_SCSI_AHA1542 is not set
++CONFIG_SCSI_AACRAID=m
++CONFIG_SCSI_AIC7XXX=m
++CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
++CONFIG_AIC7XXX_RESET_DELAY_MS=15000
++CONFIG_AIC7XXX_DEBUG_ENABLE=y
++CONFIG_AIC7XXX_DEBUG_MASK=0
++CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_AIC7XXX_OLD=m
++CONFIG_SCSI_AIC79XX=m
++CONFIG_AIC79XX_CMDS_PER_DEVICE=32
++CONFIG_AIC79XX_RESET_DELAY_MS=15000
++CONFIG_AIC79XX_ENABLE_RD_STRM=y
++CONFIG_AIC79XX_DEBUG_ENABLE=y
++CONFIG_AIC79XX_DEBUG_MASK=0
++CONFIG_AIC79XX_REG_PRETTY_PRINT=y
++CONFIG_SCSI_DPT_I2O=m
++CONFIG_SCSI_ADVANSYS=m
++CONFIG_SCSI_IN2000=m
++CONFIG_MEGARAID_NEWGEN=y
++CONFIG_MEGARAID_MM=m
++CONFIG_MEGARAID_MAILBOX=m
++CONFIG_SCSI_SATA=y
++CONFIG_SCSI_SATA_AHCI=m
++CONFIG_SCSI_SATA_SVW=m
++CONFIG_SCSI_ATA_PIIX=m
++CONFIG_SCSI_SATA_NV=m
++CONFIG_SCSI_SATA_PROMISE=m
++# CONFIG_SCSI_SATA_QSTOR is not set
++CONFIG_SCSI_SATA_SX4=m
++CONFIG_SCSI_SATA_SIL=m
++CONFIG_SCSI_SATA_SIS=m
++CONFIG_SCSI_SATA_ULI=m
++CONFIG_SCSI_SATA_VIA=m
++CONFIG_SCSI_SATA_VITESSE=m
++CONFIG_SCSI_BUSLOGIC=m
++# CONFIG_SCSI_OMIT_FLASHPOINT is not set
++# CONFIG_SCSI_CPQFCTS is not set
++CONFIG_SCSI_DMX3191D=m
++CONFIG_SCSI_DTC3280=m
++CONFIG_SCSI_EATA=m
++CONFIG_SCSI_EATA_TAGGED_QUEUE=y
++CONFIG_SCSI_EATA_LINKED_COMMANDS=y
++CONFIG_SCSI_EATA_MAX_TAGS=16
++CONFIG_SCSI_EATA_PIO=m
++CONFIG_SCSI_FUTURE_DOMAIN=m
++CONFIG_SCSI_GDTH=m
++CONFIG_SCSI_GENERIC_NCR5380=m
++CONFIG_SCSI_GENERIC_NCR5380_MMIO=m
++CONFIG_SCSI_GENERIC_NCR53C400=y
++CONFIG_SCSI_IPS=m
++# CONFIG_SCSI_INITIO is not set
++# CONFIG_SCSI_INIA100 is not set
++CONFIG_SCSI_PPA=m
++CONFIG_SCSI_IMM=m
++# CONFIG_SCSI_IZIP_EPP16 is not set
++# CONFIG_SCSI_IZIP_SLOW_CTR is not set
++CONFIG_SCSI_NCR53C406A=m
++CONFIG_SCSI_SYM53C8XX_2=m
++CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
++CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
++CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
++# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
++CONFIG_SCSI_IPR=m
++# CONFIG_SCSI_IPR_TRACE is not set
++# CONFIG_SCSI_IPR_DUMP is not set
++CONFIG_SCSI_PAS16=m
++# CONFIG_SCSI_PCI2000 is not set
++# CONFIG_SCSI_PCI2220I is not set
++CONFIG_SCSI_PSI240I=m
++CONFIG_SCSI_QLOGIC_FAS=m
++CONFIG_SCSI_QLOGIC_ISP=m
++CONFIG_SCSI_QLOGIC_FC=m
++CONFIG_SCSI_QLOGIC_FC_FIRMWARE=y
++CONFIG_SCSI_QLOGIC_1280=m
++CONFIG_SCSI_QLOGIC_1280_1040=y
++CONFIG_SCSI_QLA2XXX=m
++CONFIG_SCSI_QLA21XX=m
++CONFIG_SCSI_QLA22XX=m
++CONFIG_SCSI_QLA2300=m
++CONFIG_SCSI_QLA2322=m
++CONFIG_SCSI_QLA6312=m
++CONFIG_SCSI_LPFC=m
++# CONFIG_SCSI_SEAGATE is not set
++CONFIG_SCSI_SYM53C416=m
++CONFIG_SCSI_DC395x=m
++CONFIG_SCSI_DC390T=m
++CONFIG_SCSI_T128=m
++CONFIG_SCSI_U14_34F=m
++CONFIG_SCSI_U14_34F_TAGGED_QUEUE=y
++CONFIG_SCSI_U14_34F_LINKED_COMMANDS=y
++CONFIG_SCSI_U14_34F_MAX_TAGS=8
++# CONFIG_SCSI_ULTRASTOR is not set
++CONFIG_SCSI_NSP32=m
++CONFIG_SCSI_DEBUG=m
++
++#
++# PCMCIA SCSI adapter support
++#
++CONFIG_PCMCIA_AHA152X=m
++CONFIG_PCMCIA_FDOMAIN=m
++CONFIG_PCMCIA_NINJA_SCSI=m
++CONFIG_PCMCIA_QLOGIC=m
++CONFIG_PCMCIA_SYM53C500=m
++
++#
++# Old CD-ROM drivers (not SCSI, not IDE)
++#
++CONFIG_CD_NO_IDESCSI=y
++CONFIG_AZTCD=m
++CONFIG_GSCD=m
++# CONFIG_SBPCD is not set
++CONFIG_MCDX=m
++CONFIG_OPTCD=m
++# CONFIG_CM206 is not set
++CONFIG_SJCD=m
++CONFIG_ISP16_CDI=m
++CONFIG_CDU31A=m
++CONFIG_CDU535=m
++
++#
++# Multi-device support (RAID and LVM)
++#
++CONFIG_MD=y
++CONFIG_BLK_DEV_MD=m
++CONFIG_MD_LINEAR=m
++CONFIG_MD_RAID0=m
++CONFIG_MD_RAID1=m
++CONFIG_MD_RAID10=m
++CONFIG_MD_RAID5=m
++CONFIG_MD_RAID6=m
++CONFIG_MD_MULTIPATH=m
++CONFIG_MD_FAULTY=m
++CONFIG_BLK_DEV_DM=m
++CONFIG_DM_CRYPT=m
++CONFIG_DM_SNAPSHOT=m
++CONFIG_DM_MIRROR=m
++CONFIG_DM_ZERO=m
++CONFIG_DM_MULTIPATH=m
++CONFIG_DM_MULTIPATH_EMC=m
++
++#
++# Fusion MPT device support
++#
++CONFIG_FUSION=m
++CONFIG_FUSION_MAX_SGE=40
++CONFIG_FUSION_CTL=m
++CONFIG_FUSION_LAN=m
++
++#
++# IEEE 1394 (FireWire) support
++#
++CONFIG_IEEE1394=m
++
++#
++# Subsystem Options
++#
++# CONFIG_IEEE1394_VERBOSEDEBUG is not set
++# CONFIG_IEEE1394_OUI_DB is not set
++CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
++CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
++
++#
++# Device Drivers
++#
++CONFIG_IEEE1394_PCILYNX=m
++CONFIG_IEEE1394_OHCI1394=m
++
++#
++# Protocol Drivers
++#
++CONFIG_IEEE1394_VIDEO1394=m
++CONFIG_IEEE1394_SBP2=m
++# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
++CONFIG_IEEE1394_ETH1394=m
++CONFIG_IEEE1394_DV1394=m
++CONFIG_IEEE1394_RAWIO=m
++CONFIG_IEEE1394_CMP=m
++CONFIG_IEEE1394_AMDTP=m
++
++#
++# I2O device support
++#
++CONFIG_I2O=m
++CONFIG_I2O_CONFIG=m
++CONFIG_I2O_BLOCK=m
++CONFIG_I2O_SCSI=m
++CONFIG_I2O_PROC=m
++
++#
++# Networking support
++#
++CONFIG_NET=y
++
++#
++# Networking options
++#
++CONFIG_PACKET=m
++CONFIG_PACKET_MMAP=y
++CONFIG_UNIX=m
++CONFIG_NET_KEY=m
++CONFIG_INET=y
++CONFIG_IP_MULTICAST=y
++CONFIG_IP_ADVANCED_ROUTER=y
++CONFIG_IP_MULTIPLE_TABLES=y
++CONFIG_IP_ROUTE_FWMARK=y
++CONFIG_IP_ROUTE_MULTIPATH=y
++# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
++CONFIG_IP_ROUTE_VERBOSE=y
++# CONFIG_IP_PNP is not set
++CONFIG_NET_IPIP=m
++CONFIG_NET_IPGRE=m
++CONFIG_NET_IPGRE_BROADCAST=y
++CONFIG_IP_MROUTE=y
++CONFIG_IP_PIMSM_V1=y
++CONFIG_IP_PIMSM_V2=y
++# CONFIG_ARPD is not set
++CONFIG_SYN_COOKIES=y
++CONFIG_INET_AH=m
++CONFIG_INET_ESP=m
++CONFIG_INET_IPCOMP=m
++CONFIG_INET_TUNNEL=m
++CONFIG_IP_TCPDIAG=m
++CONFIG_IP_TCPDIAG_IPV6=y
++
++#
++# IP: Virtual Server Configuration
++#
++CONFIG_IP_VS=m
++# CONFIG_IP_VS_DEBUG is not set
++CONFIG_IP_VS_TAB_BITS=12
++
++#
++# IPVS transport protocol load balancing support
++#
++CONFIG_IP_VS_PROTO_TCP=y
++CONFIG_IP_VS_PROTO_UDP=y
++CONFIG_IP_VS_PROTO_ESP=y
++CONFIG_IP_VS_PROTO_AH=y
++
++#
++# IPVS scheduler
++#
++CONFIG_IP_VS_RR=m
++CONFIG_IP_VS_WRR=m
++CONFIG_IP_VS_LC=m
++CONFIG_IP_VS_WLC=m
++CONFIG_IP_VS_LBLC=m
++CONFIG_IP_VS_LBLCR=m
++CONFIG_IP_VS_DH=m
++CONFIG_IP_VS_SH=m
++CONFIG_IP_VS_SED=m
++CONFIG_IP_VS_NQ=m
++
++#
++# IPVS application helper
++#
++CONFIG_IP_VS_FTP=m
++CONFIG_IPV6=m
++CONFIG_IPV6_PRIVACY=y
++CONFIG_INET6_AH=m
++CONFIG_INET6_ESP=m
++CONFIG_INET6_IPCOMP=m
++CONFIG_INET6_TUNNEL=m
++CONFIG_IPV6_TUNNEL=m
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_DEBUG is not set
++CONFIG_BRIDGE_NETFILTER=y
++
++#
++# IP: Netfilter Configuration
++#
++CONFIG_IP_NF_CONNTRACK=m
++CONFIG_IP_NF_CT_ACCT=y
++CONFIG_IP_NF_CONNTRACK_MARK=y
++CONFIG_IP_NF_CT_PROTO_SCTP=m
++CONFIG_IP_NF_FTP=m
++CONFIG_IP_NF_IRC=m
++CONFIG_IP_NF_TFTP=m
++CONFIG_IP_NF_AMANDA=m
++CONFIG_IP_NF_QUEUE=m
++CONFIG_IP_NF_IPTABLES=m
++CONFIG_IP_NF_MATCH_LIMIT=m
++CONFIG_IP_NF_MATCH_IPRANGE=m
++CONFIG_IP_NF_MATCH_MAC=m
++CONFIG_IP_NF_MATCH_PKTTYPE=m
++CONFIG_IP_NF_MATCH_MARK=m
++CONFIG_IP_NF_MATCH_MULTIPORT=m
++CONFIG_IP_NF_MATCH_TOS=m
++CONFIG_IP_NF_MATCH_RECENT=m
++CONFIG_IP_NF_MATCH_ECN=m
++CONFIG_IP_NF_MATCH_DSCP=m
++CONFIG_IP_NF_MATCH_AH_ESP=m
++CONFIG_IP_NF_MATCH_LENGTH=m
++CONFIG_IP_NF_MATCH_TTL=m
++CONFIG_IP_NF_MATCH_TCPMSS=m
++CONFIG_IP_NF_MATCH_HELPER=m
++CONFIG_IP_NF_MATCH_STATE=m
++CONFIG_IP_NF_MATCH_CONNTRACK=m
++CONFIG_IP_NF_MATCH_OWNER=m
++CONFIG_IP_NF_MATCH_PHYSDEV=m
++CONFIG_IP_NF_MATCH_ADDRTYPE=m
++CONFIG_IP_NF_MATCH_REALM=m
++CONFIG_IP_NF_MATCH_SCTP=m
++CONFIG_IP_NF_MATCH_COMMENT=m
++CONFIG_IP_NF_MATCH_CONNMARK=m
++CONFIG_IP_NF_MATCH_HASHLIMIT=m
++CONFIG_IP_NF_FILTER=m
++CONFIG_IP_NF_TARGET_REJECT=m
++CONFIG_IP_NF_TARGET_LOG=m
++CONFIG_IP_NF_TARGET_ULOG=m
++CONFIG_IP_NF_TARGET_TCPMSS=m
++CONFIG_IP_NF_NAT=m
++CONFIG_IP_NF_NAT_NEEDED=y
++CONFIG_IP_NF_TARGET_MASQUERADE=m
++CONFIG_IP_NF_TARGET_REDIRECT=m
++CONFIG_IP_NF_TARGET_NETMAP=m
++CONFIG_IP_NF_TARGET_SAME=m
++CONFIG_IP_NF_NAT_SNMP_BASIC=m
++CONFIG_IP_NF_NAT_IRC=m
++CONFIG_IP_NF_NAT_FTP=m
++CONFIG_IP_NF_NAT_TFTP=m
++CONFIG_IP_NF_NAT_AMANDA=m
++CONFIG_IP_NF_MANGLE=m
++CONFIG_IP_NF_TARGET_TOS=m
++CONFIG_IP_NF_TARGET_ECN=m
++CONFIG_IP_NF_TARGET_DSCP=m
++CONFIG_IP_NF_TARGET_MARK=m
++CONFIG_IP_NF_TARGET_CLASSIFY=m
++CONFIG_IP_NF_TARGET_CONNMARK=m
++CONFIG_IP_NF_TARGET_CLUSTERIP=m
++CONFIG_IP_NF_RAW=m
++CONFIG_IP_NF_TARGET_NOTRACK=m
++CONFIG_IP_NF_ARPTABLES=m
++CONFIG_IP_NF_ARPFILTER=m
++CONFIG_IP_NF_ARP_MANGLE=m
++
++#
++# IPv6: Netfilter Configuration (EXPERIMENTAL)
++#
++CONFIG_IP6_NF_QUEUE=m
++CONFIG_IP6_NF_IPTABLES=m
++CONFIG_IP6_NF_MATCH_LIMIT=m
++CONFIG_IP6_NF_MATCH_MAC=m
++CONFIG_IP6_NF_MATCH_RT=m
++CONFIG_IP6_NF_MATCH_OPTS=m
++CONFIG_IP6_NF_MATCH_FRAG=m
++CONFIG_IP6_NF_MATCH_HL=m
++CONFIG_IP6_NF_MATCH_MULTIPORT=m
++CONFIG_IP6_NF_MATCH_OWNER=m
++CONFIG_IP6_NF_MATCH_MARK=m
++CONFIG_IP6_NF_MATCH_IPV6HEADER=m
++CONFIG_IP6_NF_MATCH_AHESP=m
++CONFIG_IP6_NF_MATCH_LENGTH=m
++CONFIG_IP6_NF_MATCH_EUI64=m
++CONFIG_IP6_NF_MATCH_PHYSDEV=m
++CONFIG_IP6_NF_FILTER=m
++CONFIG_IP6_NF_TARGET_LOG=m
++CONFIG_IP6_NF_MANGLE=m
++CONFIG_IP6_NF_TARGET_MARK=m
++CONFIG_IP6_NF_RAW=m
++
++#
++# DECnet: Netfilter Configuration
++#
++CONFIG_DECNET_NF_GRABULATOR=m
++
++#
++# Bridge: Netfilter Configuration
++#
++CONFIG_BRIDGE_NF_EBTABLES=m
++CONFIG_BRIDGE_EBT_BROUTE=m
++CONFIG_BRIDGE_EBT_T_FILTER=m
++CONFIG_BRIDGE_EBT_T_NAT=m
++CONFIG_BRIDGE_EBT_802_3=m
++CONFIG_BRIDGE_EBT_AMONG=m
++CONFIG_BRIDGE_EBT_ARP=m
++CONFIG_BRIDGE_EBT_IP=m
++CONFIG_BRIDGE_EBT_LIMIT=m
++CONFIG_BRIDGE_EBT_MARK=m
++CONFIG_BRIDGE_EBT_PKTTYPE=m
++CONFIG_BRIDGE_EBT_STP=m
++CONFIG_BRIDGE_EBT_VLAN=m
++CONFIG_BRIDGE_EBT_ARPREPLY=m
++CONFIG_BRIDGE_EBT_DNAT=m
++CONFIG_BRIDGE_EBT_MARK_T=m
++CONFIG_BRIDGE_EBT_REDIRECT=m
++CONFIG_BRIDGE_EBT_SNAT=m
++CONFIG_BRIDGE_EBT_LOG=m
++# CONFIG_BRIDGE_EBT_ULOG is not set
++CONFIG_XFRM=y
++CONFIG_XFRM_USER=m
++
++#
++# SCTP Configuration (EXPERIMENTAL)
++#
++CONFIG_IP_SCTP=m
++# CONFIG_SCTP_DBG_MSG is not set
++# CONFIG_SCTP_DBG_OBJCNT is not set
++# CONFIG_SCTP_HMAC_NONE is not set
++# CONFIG_SCTP_HMAC_SHA1 is not set
++CONFIG_SCTP_HMAC_MD5=y
++CONFIG_ATM=y
++CONFIG_ATM_CLIP=y
++# CONFIG_ATM_CLIP_NO_ICMP is not set
++CONFIG_ATM_LANE=m
++CONFIG_ATM_MPOA=m
++CONFIG_ATM_BR2684=m
++# CONFIG_ATM_BR2684_IPFILTER is not set
++CONFIG_BRIDGE=m
++CONFIG_VLAN_8021Q=m
++CONFIG_DECNET=m
++# CONFIG_DECNET_ROUTER is not set
++CONFIG_LLC=y
++CONFIG_LLC2=m
++CONFIG_IPX=m
++# CONFIG_IPX_INTERN is not set
++CONFIG_ATALK=m
++CONFIG_DEV_APPLETALK=y
++CONFIG_LTPC=m
++CONFIG_COPS=m
++CONFIG_COPS_DAYNA=y
++CONFIG_COPS_TANGENT=y
++CONFIG_IPDDP=m
++CONFIG_IPDDP_ENCAP=y
++CONFIG_IPDDP_DECAP=y
++CONFIG_X25=m
++CONFIG_LAPB=m
++# CONFIG_NET_DIVERT is not set
++CONFIG_ECONET=m
++CONFIG_ECONET_AUNUDP=y
++CONFIG_ECONET_NATIVE=y
++CONFIG_WAN_ROUTER=m
++
++#
++# QoS and/or fair queueing
++#
++CONFIG_NET_SCHED=y
++CONFIG_NET_SCH_CLK_JIFFIES=y
++# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
++# CONFIG_NET_SCH_CLK_CPU is not set
++CONFIG_NET_SCH_CBQ=m
++CONFIG_NET_SCH_HTB=m
++CONFIG_NET_SCH_HFSC=m
++CONFIG_NET_SCH_ATM=m
++CONFIG_NET_SCH_PRIO=m
++CONFIG_NET_SCH_RED=m
++CONFIG_NET_SCH_SFQ=m
++CONFIG_NET_SCH_TEQL=m
++CONFIG_NET_SCH_TBF=m
++CONFIG_NET_SCH_GRED=m
++CONFIG_NET_SCH_DSMARK=m
++CONFIG_NET_SCH_NETEM=m
++CONFIG_NET_SCH_INGRESS=m
++CONFIG_NET_QOS=y
++CONFIG_NET_ESTIMATOR=y
++CONFIG_NET_CLS=y
++CONFIG_NET_CLS_BASIC=m
++CONFIG_NET_CLS_TCINDEX=m
++CONFIG_NET_CLS_ROUTE4=m
++CONFIG_NET_CLS_ROUTE=y
++CONFIG_NET_CLS_FW=m
++CONFIG_NET_CLS_U32=m
++# CONFIG_CLS_U32_PERF is not set
++# CONFIG_NET_CLS_IND is not set
++# CONFIG_CLS_U32_MARK is not set
++CONFIG_NET_CLS_RSVP=m
++CONFIG_NET_CLS_RSVP6=m
++CONFIG_NET_EMATCH=y
++CONFIG_NET_EMATCH_STACK=32
++CONFIG_NET_EMATCH_CMP=m
++CONFIG_NET_EMATCH_NBYTE=m
++CONFIG_NET_EMATCH_U32=m
++CONFIG_NET_EMATCH_META=m
++# CONFIG_NET_CLS_ACT is not set
++CONFIG_NET_CLS_POLICE=y
++
++#
++# Network testing
++#
++CONFIG_NET_PKTGEN=m
++CONFIG_NETPOLL=y
++# CONFIG_NETPOLL_RX is not set
++# CONFIG_NETPOLL_TRAP is not set
++CONFIG_NET_POLL_CONTROLLER=y
++CONFIG_HAMRADIO=y
++
++#
++# Packet Radio protocols
++#
++CONFIG_AX25=m
++# CONFIG_AX25_DAMA_SLAVE is not set
++CONFIG_NETROM=m
++CONFIG_ROSE=m
++
++#
++# AX.25 network device drivers
++#
++CONFIG_MKISS=m
++CONFIG_6PACK=m
++CONFIG_BPQETHER=m
++# CONFIG_DMASCC is not set
++CONFIG_SCC=m
++# CONFIG_SCC_DELAY is not set
++# CONFIG_SCC_TRXECHO is not set
++CONFIG_BAYCOM_SER_FDX=m
++CONFIG_BAYCOM_SER_HDX=m
++CONFIG_BAYCOM_PAR=m
++CONFIG_BAYCOM_EPP=m
++CONFIG_YAM=m
++CONFIG_IRDA=m
++
++#
++# IrDA protocols
++#
++CONFIG_IRLAN=m
++CONFIG_IRNET=m
++CONFIG_IRCOMM=m
++# CONFIG_IRDA_ULTRA is not set
++
++#
++# IrDA options
++#
++CONFIG_IRDA_CACHE_LAST_LSAP=y
++CONFIG_IRDA_FAST_RR=y
++CONFIG_IRDA_DEBUG=y
++
++#
++# Infrared-port device drivers
++#
++
++#
++# SIR device drivers
++#
++CONFIG_IRTTY_SIR=m
++
++#
++# Dongle support
++#
++CONFIG_DONGLE=y
++CONFIG_ESI_DONGLE=m
++CONFIG_ACTISYS_DONGLE=m
++CONFIG_TEKRAM_DONGLE=m
++CONFIG_LITELINK_DONGLE=m
++CONFIG_MA600_DONGLE=m
++CONFIG_GIRBIL_DONGLE=m
++CONFIG_MCP2120_DONGLE=m
++CONFIG_OLD_BELKIN_DONGLE=m
++CONFIG_ACT200L_DONGLE=m
++
++#
++# Old SIR device drivers
++#
++CONFIG_IRPORT_SIR=m
++
++#
++# Old Serial dongle support
++#
++# CONFIG_DONGLE_OLD is not set
++
++#
++# FIR device drivers
++#
++CONFIG_USB_IRDA=m
++CONFIG_SIGMATEL_FIR=m
++CONFIG_NSC_FIR=m
++CONFIG_WINBOND_FIR=m
++# CONFIG_TOSHIBA_FIR is not set
++CONFIG_SMC_IRCC_FIR=m
++CONFIG_ALI_FIR=m
++CONFIG_VLSI_FIR=m
++CONFIG_VIA_FIR=m
++CONFIG_BT=m
++CONFIG_BT_L2CAP=m
++CONFIG_BT_SCO=m
++CONFIG_BT_RFCOMM=m
++CONFIG_BT_RFCOMM_TTY=y
++CONFIG_BT_BNEP=m
++CONFIG_BT_BNEP_MC_FILTER=y
++CONFIG_BT_BNEP_PROTO_FILTER=y
++CONFIG_BT_CMTP=m
++CONFIG_BT_HIDP=m
++
++#
++# Bluetooth device drivers
++#
++CONFIG_BT_HCIUSB=m
++CONFIG_BT_HCIUSB_SCO=y
++CONFIG_BT_HCIUART=m
++CONFIG_BT_HCIUART_H4=y
++CONFIG_BT_HCIUART_BCSP=y
++# CONFIG_BT_HCIUART_BCSP_TXCRC is not set
++CONFIG_BT_HCIBCM203X=m
++# CONFIG_BT_HCIBPA10X is not set
++CONFIG_BT_HCIBFUSB=m
++CONFIG_BT_HCIDTL1=m
++CONFIG_BT_HCIBT3C=m
++CONFIG_BT_HCIBLUECARD=m
++CONFIG_BT_HCIBTUART=m
++CONFIG_BT_HCIVHCI=m
++CONFIG_NETDEVICES=y
++CONFIG_DUMMY=m
++CONFIG_BONDING=m
++CONFIG_EQUALIZER=m
++CONFIG_TUN=m
++CONFIG_NET_SB1000=m
++
++#
++# ARCnet devices
++#
++CONFIG_ARCNET=m
++CONFIG_ARCNET_1201=m
++CONFIG_ARCNET_1051=m
++CONFIG_ARCNET_RAW=m
++# CONFIG_ARCNET_CAP is not set
++CONFIG_ARCNET_COM90xx=m
++CONFIG_ARCNET_COM90xxIO=m
++CONFIG_ARCNET_RIM_I=m
++CONFIG_ARCNET_COM20020=m
++CONFIG_ARCNET_COM20020_ISA=m
++CONFIG_ARCNET_COM20020_PCI=m
++
++#
++# Ethernet (10 or 100Mbit)
++#
++CONFIG_NET_ETHERNET=y
++CONFIG_MII=m
++CONFIG_HAPPYMEAL=m
++CONFIG_SUNGEM=m
++CONFIG_NET_VENDOR_3COM=y
++CONFIG_EL1=m
++CONFIG_EL2=m
++# CONFIG_ELPLUS is not set
++CONFIG_EL16=m
++CONFIG_EL3=m
++# CONFIG_3C515 is not set
++CONFIG_VORTEX=m
++CONFIG_TYPHOON=m
++# CONFIG_LANCE is not set
++CONFIG_NET_VENDOR_SMC=y
++CONFIG_WD80x3=m
++CONFIG_ULTRA=m
++CONFIG_SMC9194=m
++CONFIG_NET_VENDOR_RACAL=y
++CONFIG_NI5010=m
++CONFIG_NI52=m
++# CONFIG_NI65 is not set
++
++#
++# Tulip family network device support
++#
++CONFIG_NET_TULIP=y
++CONFIG_DE2104X=m
++CONFIG_TULIP=m
++# CONFIG_TULIP_MWI is not set
++# CONFIG_TULIP_MMIO is not set
++# CONFIG_TULIP_NAPI is not set
++CONFIG_DE4X5=m
++CONFIG_WINBOND_840=m
++CONFIG_DM9102=m
++CONFIG_PCMCIA_XIRCOM=m
++# CONFIG_PCMCIA_XIRTULIP is not set
++CONFIG_AT1700=m
++CONFIG_DEPCA=m
++CONFIG_HP100=m
++CONFIG_NET_ISA=y
++CONFIG_E2100=m
++CONFIG_EWRK3=m
++CONFIG_EEXPRESS=m
++CONFIG_EEXPRESS_PRO=m
++CONFIG_HPLAN_PLUS=m
++CONFIG_HPLAN=m
++CONFIG_LP486E=m
++CONFIG_ETH16I=m
++CONFIG_NE2000=m
++CONFIG_ZNET=m
++CONFIG_SEEQ8005=m
++CONFIG_NET_PCI=y
++CONFIG_PCNET32=m
++CONFIG_AMD8111_ETH=m
++# CONFIG_AMD8111E_NAPI is not set
++CONFIG_ADAPTEC_STARFIRE=m
++# CONFIG_ADAPTEC_STARFIRE_NAPI is not set
++CONFIG_AC3200=m
++CONFIG_APRICOT=m
++CONFIG_B44=m
++CONFIG_FORCEDETH=m
++CONFIG_CS89x0=m
++# CONFIG_DGRS is not set
++CONFIG_EEPRO100=m
++CONFIG_E100=m
++CONFIG_FEALNX=m
++CONFIG_NATSEMI=m
++CONFIG_NE2K_PCI=m
++CONFIG_8139CP=m
++CONFIG_8139TOO=m
++CONFIG_8139TOO_PIO=y
++CONFIG_8139TOO_TUNE_TWISTER=y
++CONFIG_8139TOO_8129=y
++# CONFIG_8139_OLD_RX_RESET is not set
++CONFIG_SIS900=m
++CONFIG_EPIC100=m
++CONFIG_SUNDANCE=m
++# CONFIG_SUNDANCE_MMIO is not set
++CONFIG_TLAN=m
++CONFIG_VIA_RHINE=m
++# CONFIG_VIA_RHINE_MMIO is not set
++CONFIG_NET_POCKET=y
++CONFIG_ATP=m
++CONFIG_DE600=m
++CONFIG_DE620=m
++
++#
++# Ethernet (1000 Mbit)
++#
++# CONFIG_ACENIC is not set
++CONFIG_DL2K=m
++CONFIG_E1000=m
++# CONFIG_E1000_NAPI is not set
++CONFIG_NS83820=m
++CONFIG_HAMACHI=m
++CONFIG_YELLOWFIN=m
++CONFIG_R8169=m
++# CONFIG_R8169_NAPI is not set
++# CONFIG_R8169_VLAN is not set
++CONFIG_SK98LIN=m
++CONFIG_VIA_VELOCITY=m
++CONFIG_TIGON3=m
++CONFIG_BNX2=m
++
++#
++# Ethernet (10000 Mbit)
++#
++CONFIG_IXGB=m
++# CONFIG_IXGB_NAPI is not set
++CONFIG_S2IO=m
++# CONFIG_S2IO_NAPI is not set
++# CONFIG_2BUFF_MODE is not set
++
++#
++# Token Ring devices
++#
++CONFIG_TR=y
++CONFIG_IBMTR=m
++CONFIG_IBMOL=m
++CONFIG_IBMLS=m
++CONFIG_3C359=m
++CONFIG_TMS380TR=m
++CONFIG_TMSPCI=m
++CONFIG_SKISA=m
++CONFIG_PROTEON=m
++CONFIG_ABYSS=m
++# CONFIG_SMCTR is not set
++
++#
++# Wireless LAN (non-hamradio)
++#
++CONFIG_NET_RADIO=y
++
++#
++# Obsolete Wireless cards support (pre-802.11)
++#
++CONFIG_STRIP=m
++CONFIG_ARLAN=m
++CONFIG_WAVELAN=m
++CONFIG_PCMCIA_WAVELAN=m
++CONFIG_PCMCIA_NETWAVE=m
++
++#
++# Wireless 802.11 Frequency Hopping cards support
++#
++CONFIG_PCMCIA_RAYCS=m
++
++#
++# Wireless 802.11b ISA/PCI cards support
++#
++CONFIG_AIRO=m
++CONFIG_HERMES=m
++CONFIG_PLX_HERMES=m
++CONFIG_TMD_HERMES=m
++CONFIG_PCI_HERMES=m
++CONFIG_ATMEL=m
++CONFIG_PCI_ATMEL=m
++
++#
++# Wireless 802.11b Pcmcia/Cardbus cards support
++#
++CONFIG_PCMCIA_HERMES=m
++CONFIG_AIRO_CS=m
++CONFIG_PCMCIA_ATMEL=m
++CONFIG_PCMCIA_WL3501=m
++
++#
++# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
++#
++CONFIG_PRISM54=m
++CONFIG_NET_WIRELESS=y
++
++#
++# PCMCIA network device support
++#
++CONFIG_NET_PCMCIA=y
++CONFIG_PCMCIA_3C589=m
++CONFIG_PCMCIA_3C574=m
++CONFIG_PCMCIA_FMVJ18X=m
++CONFIG_PCMCIA_PCNET=m
++CONFIG_PCMCIA_NMCLAN=m
++CONFIG_PCMCIA_SMC91C92=m
++CONFIG_PCMCIA_XIRC2PS=m
++CONFIG_PCMCIA_AXNET=m
++CONFIG_ARCNET_COM20020_CS=m
++CONFIG_PCMCIA_IBMTR=m
++
++#
++# Wan interfaces
++#
++CONFIG_WAN=y
++CONFIG_HOSTESS_SV11=m
++CONFIG_COSA=m
++CONFIG_DSCC4=m
++CONFIG_DSCC4_PCISYNC=y
++CONFIG_DSCC4_PCI_RST=y
++CONFIG_LANMEDIA=m
++CONFIG_SEALEVEL_4021=m
++CONFIG_SYNCLINK_SYNCPPP=m
++CONFIG_HDLC=m
++CONFIG_HDLC_RAW=y
++CONFIG_HDLC_RAW_ETH=y
++CONFIG_HDLC_CISCO=y
++CONFIG_HDLC_FR=y
++CONFIG_HDLC_PPP=y
++CONFIG_HDLC_X25=y
++CONFIG_PCI200SYN=m
++CONFIG_WANXL=m
++CONFIG_PC300=m
++CONFIG_PC300_MLPPP=y
++CONFIG_N2=m
++CONFIG_C101=m
++CONFIG_FARSYNC=m
++CONFIG_DLCI=m
++CONFIG_DLCI_COUNT=24
++CONFIG_DLCI_MAX=8
++CONFIG_SDLA=m
++CONFIG_WAN_ROUTER_DRIVERS=y
++# CONFIG_VENDOR_SANGOMA is not set
++CONFIG_CYCLADES_SYNC=m
++CONFIG_CYCLOMX_X25=y
++CONFIG_LAPBETHER=m
++CONFIG_X25_ASY=m
++CONFIG_SBNI=m
++# CONFIG_SBNI_MULTILINE is not set
++
++#
++# ATM drivers
++#
++CONFIG_ATM_TCP=m
++CONFIG_ATM_LANAI=m
++CONFIG_ATM_ENI=m
++# CONFIG_ATM_ENI_DEBUG is not set
++# CONFIG_ATM_ENI_TUNE_BURST is not set
++CONFIG_ATM_FIRESTREAM=m
++CONFIG_ATM_ZATM=m
++# CONFIG_ATM_ZATM_DEBUG is not set
++CONFIG_ATM_NICSTAR=m
++# CONFIG_ATM_NICSTAR_USE_SUNI is not set
++# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
++CONFIG_ATM_IDT77252=m
++# CONFIG_ATM_IDT77252_DEBUG is not set
++# CONFIG_ATM_IDT77252_RCV_ALL is not set
++CONFIG_ATM_IDT77252_USE_SUNI=y
++CONFIG_ATM_AMBASSADOR=m
++# CONFIG_ATM_AMBASSADOR_DEBUG is not set
++CONFIG_ATM_HORIZON=m
++# CONFIG_ATM_HORIZON_DEBUG is not set
++CONFIG_ATM_IA=m
++# CONFIG_ATM_IA_DEBUG is not set
++CONFIG_ATM_FORE200E_MAYBE=m
++CONFIG_ATM_FORE200E_PCA=y
++CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
++# CONFIG_ATM_FORE200E_USE_TASKLET is not set
++CONFIG_ATM_FORE200E_TX_RETRY=16
++CONFIG_ATM_FORE200E_DEBUG=0
++CONFIG_ATM_FORE200E=m
++CONFIG_ATM_HE=m
++CONFIG_ATM_HE_USE_SUNI=y
++CONFIG_FDDI=y
++CONFIG_DEFXX=m
++CONFIG_SKFP=m
++CONFIG_HIPPI=y
++CONFIG_ROADRUNNER=m
++# CONFIG_ROADRUNNER_LARGE_RINGS is not set
++CONFIG_PLIP=m
++CONFIG_PPP=m
++CONFIG_PPP_MULTILINK=y
++CONFIG_PPP_FILTER=y
++CONFIG_PPP_ASYNC=m
++CONFIG_PPP_SYNC_TTY=m
++CONFIG_PPP_DEFLATE=m
++CONFIG_PPP_BSDCOMP=m
++CONFIG_PPPOE=m
++CONFIG_PPPOATM=m
++CONFIG_SLIP=m
++CONFIG_SLIP_COMPRESSED=y
++CONFIG_SLIP_SMART=y
++CONFIG_SLIP_MODE_SLIP6=y
++CONFIG_NET_FC=y
++CONFIG_SHAPER=m
++CONFIG_NETCONSOLE=m
++
++#
++# ISDN subsystem
++#
++CONFIG_ISDN=m
++
++#
++# Old ISDN4Linux
++#
++CONFIG_ISDN_I4L=m
++CONFIG_ISDN_PPP=y
++CONFIG_ISDN_PPP_VJ=y
++CONFIG_ISDN_MPP=y
++CONFIG_IPPP_FILTER=y
++CONFIG_ISDN_PPP_BSDCOMP=m
++CONFIG_ISDN_AUDIO=y
++CONFIG_ISDN_TTY_FAX=y
++CONFIG_ISDN_X25=y
++
++#
++# ISDN feature submodules
++#
++# CONFIG_ISDN_DRV_LOOP is not set
++# CONFIG_ISDN_DIVERSION is not set
++
++#
++# ISDN4Linux hardware drivers
++#
++
++#
++# Passive cards
++#
++CONFIG_ISDN_DRV_HISAX=m
++
++#
++# D-channel protocol features
++#
++CONFIG_HISAX_EURO=y
++CONFIG_DE_AOC=y
++# CONFIG_HISAX_NO_SENDCOMPLETE is not set
++# CONFIG_HISAX_NO_LLC is not set
++# CONFIG_HISAX_NO_KEYPAD is not set
++CONFIG_HISAX_1TR6=y
++CONFIG_HISAX_NI1=y
++CONFIG_HISAX_MAX_CARDS=8
++
++#
++# HiSax supported cards
++#
++CONFIG_HISAX_16_0=y
++CONFIG_HISAX_16_3=y
++CONFIG_HISAX_TELESPCI=y
++CONFIG_HISAX_S0BOX=y
++CONFIG_HISAX_AVM_A1=y
++CONFIG_HISAX_FRITZPCI=y
++CONFIG_HISAX_AVM_A1_PCMCIA=y
++CONFIG_HISAX_ELSA=y
++CONFIG_HISAX_IX1MICROR2=y
++CONFIG_HISAX_DIEHLDIVA=y
++CONFIG_HISAX_ASUSCOM=y
++CONFIG_HISAX_TELEINT=y
++CONFIG_HISAX_HFCS=y
++CONFIG_HISAX_SEDLBAUER=y
++CONFIG_HISAX_SPORTSTER=y
++CONFIG_HISAX_MIC=y
++CONFIG_HISAX_NETJET=y
++CONFIG_HISAX_NETJET_U=y
++CONFIG_HISAX_NICCY=y
++CONFIG_HISAX_ISURF=y
++CONFIG_HISAX_HSTSAPHIR=y
++CONFIG_HISAX_BKM_A4T=y
++CONFIG_HISAX_SCT_QUADRO=y
++CONFIG_HISAX_GAZEL=y
++CONFIG_HISAX_HFC_PCI=y
++CONFIG_HISAX_W6692=y
++CONFIG_HISAX_HFC_SX=y
++CONFIG_HISAX_ENTERNOW_PCI=y
++# CONFIG_HISAX_DEBUG is not set
++
++#
++# HiSax PCMCIA card service modules
++#
++CONFIG_HISAX_SEDLBAUER_CS=m
++CONFIG_HISAX_ELSA_CS=m
++CONFIG_HISAX_AVM_A1_CS=m
++CONFIG_HISAX_TELES_CS=m
++
++#
++# HiSax sub driver modules
++#
++CONFIG_HISAX_ST5481=m
++CONFIG_HISAX_HFCUSB=m
++CONFIG_HISAX_HFC4S8S=m
++CONFIG_HISAX_FRITZ_PCIPNP=m
++CONFIG_HISAX_HDLC=y
++
++#
++# Active cards
++#
++CONFIG_ISDN_DRV_ICN=m
++CONFIG_ISDN_DRV_PCBIT=m
++CONFIG_ISDN_DRV_SC=m
++CONFIG_ISDN_DRV_ACT2000=m
++# CONFIG_HYSDN is not set
++
++#
++# CAPI subsystem
++#
++CONFIG_ISDN_CAPI=m
++CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
++CONFIG_ISDN_CAPI_MIDDLEWARE=y
++CONFIG_ISDN_CAPI_CAPI20=m
++CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
++CONFIG_ISDN_CAPI_CAPIFS=m
++CONFIG_ISDN_CAPI_CAPIDRV=m
++
++#
++# CAPI hardware drivers
++#
++
++#
++# Active AVM cards
++#
++CONFIG_CAPI_AVM=y
++CONFIG_ISDN_DRV_AVMB1_B1ISA=m
++CONFIG_ISDN_DRV_AVMB1_B1PCI=m
++CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
++CONFIG_ISDN_DRV_AVMB1_T1ISA=m
++CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
++CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
++CONFIG_ISDN_DRV_AVMB1_T1PCI=m
++CONFIG_ISDN_DRV_AVMB1_C4=m
++
++#
++# Active Eicon DIVA Server cards
++#
++CONFIG_CAPI_EICON=y
++CONFIG_ISDN_DIVAS=m
++CONFIG_ISDN_DIVAS_BRIPCI=y
++CONFIG_ISDN_DIVAS_PRIPCI=y
++CONFIG_ISDN_DIVAS_DIVACAPI=m
++CONFIG_ISDN_DIVAS_USERIDI=m
++CONFIG_ISDN_DIVAS_MAINT=m
++
++#
++# Telephony Support
++#
++CONFIG_PHONE=m
++CONFIG_PHONE_IXJ=m
++CONFIG_PHONE_IXJ_PCMCIA=m
++
++#
++# Input device support
++#
++CONFIG_INPUT=y
++
++#
++# Userland interfaces
++#
++CONFIG_INPUT_MOUSEDEV=y
++CONFIG_INPUT_MOUSEDEV_PSAUX=y
++CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
++CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
++CONFIG_INPUT_JOYDEV=m
++CONFIG_INPUT_TSDEV=m
++CONFIG_INPUT_TSDEV_SCREEN_X=240
++CONFIG_INPUT_TSDEV_SCREEN_Y=320
++CONFIG_INPUT_EVDEV=m
++CONFIG_INPUT_EVBUG=m
++
++#
++# Input Device Drivers
++#
++CONFIG_INPUT_KEYBOARD=y
++CONFIG_KEYBOARD_ATKBD=y
++CONFIG_KEYBOARD_SUNKBD=m
++CONFIG_KEYBOARD_LKKBD=m
++CONFIG_KEYBOARD_XTKBD=m
++CONFIG_KEYBOARD_NEWTON=m
++CONFIG_INPUT_MOUSE=y
++CONFIG_MOUSE_PS2=y
++CONFIG_MOUSE_SERIAL=m
++CONFIG_MOUSE_INPORT=m
++# CONFIG_MOUSE_ATIXL is not set
++CONFIG_MOUSE_LOGIBM=m
++CONFIG_MOUSE_PC110PAD=m
++CONFIG_MOUSE_VSXXXAA=m
++CONFIG_INPUT_JOYSTICK=y
++CONFIG_JOYSTICK_ANALOG=m
++CONFIG_JOYSTICK_A3D=m
++CONFIG_JOYSTICK_ADI=m
++CONFIG_JOYSTICK_COBRA=m
++CONFIG_JOYSTICK_GF2K=m
++CONFIG_JOYSTICK_GRIP=m
++CONFIG_JOYSTICK_GRIP_MP=m
++CONFIG_JOYSTICK_GUILLEMOT=m
++CONFIG_JOYSTICK_INTERACT=m
++CONFIG_JOYSTICK_SIDEWINDER=m
++CONFIG_JOYSTICK_TMDC=m
++CONFIG_JOYSTICK_IFORCE=m
++CONFIG_JOYSTICK_IFORCE_USB=y
++CONFIG_JOYSTICK_IFORCE_232=y
++CONFIG_JOYSTICK_WARRIOR=m
++CONFIG_JOYSTICK_MAGELLAN=m
++CONFIG_JOYSTICK_SPACEORB=m
++CONFIG_JOYSTICK_SPACEBALL=m
++CONFIG_JOYSTICK_STINGER=m
++CONFIG_JOYSTICK_TWIDJOY=m
++CONFIG_JOYSTICK_DB9=m
++CONFIG_JOYSTICK_GAMECON=m
++CONFIG_JOYSTICK_TURBOGRAFX=m
++CONFIG_JOYSTICK_JOYDUMP=m
++CONFIG_INPUT_TOUCHSCREEN=y
++CONFIG_TOUCHSCREEN_GUNZE=m
++CONFIG_TOUCHSCREEN_ELO=m
++CONFIG_TOUCHSCREEN_MTOUCH=m
++CONFIG_TOUCHSCREEN_MK712=m
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_PCSPKR=m
++CONFIG_INPUT_UINPUT=m
++
++#
++# Hardware I/O ports
++#
++CONFIG_SERIO=y
++CONFIG_SERIO_I8042=y
++CONFIG_SERIO_SERPORT=m
++CONFIG_SERIO_CT82C710=m
++CONFIG_SERIO_PARKBD=m
++CONFIG_SERIO_PCIPS2=m
++CONFIG_SERIO_LIBPS2=y
++CONFIG_SERIO_RAW=m
++CONFIG_GAMEPORT=m
++CONFIG_GAMEPORT_NS558=m
++CONFIG_GAMEPORT_L4=m
++CONFIG_GAMEPORT_EMU10K1=m
++CONFIG_GAMEPORT_VORTEX=m
++CONFIG_GAMEPORT_FM801=m
++# CONFIG_GAMEPORT_CS461X is not set
++
++#
++# Character devices
++#
++CONFIG_VT=y
++CONFIG_VT_CONSOLE=y
++CONFIG_HW_CONSOLE=y
++# CONFIG_SERIAL_NONSTANDARD is not set
++
++#
++# Serial drivers
++#
++CONFIG_SERIAL_8250=m
++# CONFIG_SERIAL_8250_CS is not set
++# CONFIG_SERIAL_8250_ACPI is not set
++CONFIG_SERIAL_8250_NR_UARTS=4
++# CONFIG_SERIAL_8250_EXTENDED is not set
++
++#
++# Non-8250 serial port support
++#
++CONFIG_SERIAL_CORE=m
++CONFIG_SERIAL_JSM=m
++CONFIG_UNIX98_PTYS=y
++CONFIG_LEGACY_PTYS=y
++CONFIG_LEGACY_PTY_COUNT=256
++CONFIG_PRINTER=m
++# CONFIG_LP_CONSOLE is not set
++CONFIG_PPDEV=m
++CONFIG_TIPAR=m
++
++#
++# IPMI
++#
++CONFIG_IPMI_HANDLER=m
++# CONFIG_IPMI_PANIC_EVENT is not set
++CONFIG_IPMI_DEVICE_INTERFACE=m
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_WATCHDOG=m
++CONFIG_IPMI_POWEROFF=m
++
++#
++# Watchdog Cards
++#
++CONFIG_WATCHDOG=y
++# CONFIG_WATCHDOG_NOWAYOUT is not set
++
++#
++# Watchdog Device Drivers
++#
++CONFIG_SOFT_WATCHDOG=m
++CONFIG_ACQUIRE_WDT=m
++CONFIG_ADVANTECH_WDT=m
++CONFIG_ALIM1535_WDT=m
++CONFIG_ALIM7101_WDT=m
++CONFIG_SC520_WDT=m
++CONFIG_EUROTECH_WDT=m
++CONFIG_IB700_WDT=m
++CONFIG_WAFER_WDT=m
++CONFIG_I8XX_TCO=m
++CONFIG_SC1200_WDT=m
++CONFIG_SCx200_WDT=m
++CONFIG_60XX_WDT=m
++CONFIG_CPU5_WDT=m
++CONFIG_W83627HF_WDT=m
++CONFIG_W83877F_WDT=m
++CONFIG_MACHZ_WDT=m
++
++#
++# ISA-based Watchdog Cards
++#
++CONFIG_PCWATCHDOG=m
++CONFIG_MIXCOMWD=m
++CONFIG_WDT=m
++CONFIG_WDT_501=y
++
++#
++# PCI-based Watchdog Cards
++#
++CONFIG_PCIPCWATCHDOG=m
++CONFIG_WDTPCI=m
++CONFIG_WDT_501_PCI=y
++
++#
++# USB-based Watchdog Cards
++#
++CONFIG_USBPCWATCHDOG=m
++CONFIG_HW_RANDOM=m
++CONFIG_NVRAM=m
++CONFIG_RTC=m
++CONFIG_GEN_RTC=m
++CONFIG_GEN_RTC_X=y
++CONFIG_DTLK=m
++CONFIG_R3964=m
++CONFIG_APPLICOM=m
++CONFIG_SONYPI=m
++
++#
++# Ftape, the floppy tape device driver
++#
++# CONFIG_FTAPE is not set
++CONFIG_AGP=m
++CONFIG_AGP_ALI=m
++CONFIG_AGP_ATI=m
++CONFIG_AGP_AMD=m
++CONFIG_AGP_AMD64=m
++CONFIG_AGP_INTEL=m
++CONFIG_AGP_NVIDIA=m
++CONFIG_AGP_SIS=m
++CONFIG_AGP_SWORKS=m
++CONFIG_AGP_VIA=m
++CONFIG_AGP_EFFICEON=m
++CONFIG_DRM=m
++CONFIG_DRM_TDFX=m
++# CONFIG_DRM_GAMMA is not set
++CONFIG_DRM_R128=m
++CONFIG_DRM_RADEON=m
++CONFIG_DRM_I810=m
++CONFIG_DRM_I830=m
++CONFIG_DRM_I915=m
++CONFIG_DRM_MGA=m
++CONFIG_DRM_SIS=m
++
++#
++# PCMCIA character devices
++#
++CONFIG_SYNCLINK_CS=m
++CONFIG_MWAVE=m
++CONFIG_SCx200_GPIO=m
++CONFIG_RAW_DRIVER=m
++# CONFIG_HPET is not set
++CONFIG_MAX_RAW_DEVS=256
++CONFIG_HANGCHECK_TIMER=m
++
++#
++# TPM devices
++#
++# CONFIG_TCG_TPM is not set
++
++#
++# I2C support
++#
++CONFIG_I2C=m
++CONFIG_I2C_CHARDEV=m
++
++#
++# I2C Algorithms
++#
++CONFIG_I2C_ALGOBIT=m
++CONFIG_I2C_ALGOPCF=m
++CONFIG_I2C_ALGOPCA=m
++
++#
++# I2C Hardware Bus support
++#
++CONFIG_I2C_ALI1535=m
++CONFIG_I2C_ALI1563=m
++CONFIG_I2C_ALI15X3=m
++CONFIG_I2C_AMD756=m
++CONFIG_I2C_AMD756_S4882=m
++CONFIG_I2C_AMD8111=m
++CONFIG_I2C_ELEKTOR=m
++CONFIG_I2C_I801=m
++CONFIG_I2C_I810=m
++CONFIG_I2C_PIIX4=m
++CONFIG_I2C_ISA=m
++CONFIG_I2C_NFORCE2=m
++CONFIG_I2C_PARPORT=m
++CONFIG_I2C_PARPORT_LIGHT=m
++CONFIG_I2C_PROSAVAGE=m
++CONFIG_I2C_SAVAGE4=m
++CONFIG_SCx200_I2C=m
++CONFIG_SCx200_I2C_SCL=12
++CONFIG_SCx200_I2C_SDA=13
++CONFIG_SCx200_ACB=m
++CONFIG_I2C_SIS5595=m
++CONFIG_I2C_SIS630=m
++CONFIG_I2C_SIS96X=m
++CONFIG_I2C_STUB=m
++CONFIG_I2C_VIA=m
++CONFIG_I2C_VIAPRO=m
++CONFIG_I2C_VOODOO3=m
++CONFIG_I2C_PCA_ISA=m
++
++#
++# Hardware Sensors Chip support
++#
++CONFIG_I2C_SENSOR=m
++CONFIG_SENSORS_ADM1021=m
++CONFIG_SENSORS_ADM1025=m
++CONFIG_SENSORS_ADM1026=m
++CONFIG_SENSORS_ADM1031=m
++CONFIG_SENSORS_ASB100=m
++CONFIG_SENSORS_DS1621=m
++CONFIG_SENSORS_FSCHER=m
++CONFIG_SENSORS_FSCPOS=m
++CONFIG_SENSORS_GL518SM=m
++CONFIG_SENSORS_GL520SM=m
++CONFIG_SENSORS_IT87=m
++CONFIG_SENSORS_LM63=m
++CONFIG_SENSORS_LM75=m
++CONFIG_SENSORS_LM77=m
++CONFIG_SENSORS_LM78=m
++CONFIG_SENSORS_LM80=m
++CONFIG_SENSORS_LM83=m
++CONFIG_SENSORS_LM85=m
++CONFIG_SENSORS_LM87=m
++CONFIG_SENSORS_LM90=m
++CONFIG_SENSORS_LM92=m
++CONFIG_SENSORS_MAX1619=m
++CONFIG_SENSORS_PC87360=m
++# CONFIG_SENSORS_SMSC47B397 is not set
++CONFIG_SENSORS_SIS5595=m
++CONFIG_SENSORS_SMSC47M1=m
++CONFIG_SENSORS_VIA686A=m
++CONFIG_SENSORS_W83781D=m
++CONFIG_SENSORS_W83L785TS=m
++CONFIG_SENSORS_W83627HF=m
++
++#
++# Other I2C Chip support
++#
++CONFIG_SENSORS_DS1337=m
++CONFIG_SENSORS_EEPROM=m
++CONFIG_SENSORS_PCF8574=m
++CONFIG_SENSORS_PCF8591=m
++CONFIG_SENSORS_RTC8564=m
++# CONFIG_I2C_DEBUG_CORE is not set
++# CONFIG_I2C_DEBUG_ALGO is not set
++# CONFIG_I2C_DEBUG_BUS is not set
++# CONFIG_I2C_DEBUG_CHIP is not set
++
++#
++# Dallas's 1-wire bus
++#
++CONFIG_W1=m
++CONFIG_W1_MATROX=m
++CONFIG_W1_DS9490=m
++CONFIG_W1_DS9490_BRIDGE=m
++CONFIG_W1_THERM=m
++CONFIG_W1_SMEM=m
++
++#
++# Misc devices
++#
++CONFIG_IBM_ASM=m
++
++#
++# Multimedia devices
++#
++CONFIG_VIDEO_DEV=m
++
++#
++# Video For Linux
++#
++
++#
++# Video Adapters
++#
++CONFIG_VIDEO_BT848=m
++CONFIG_VIDEO_PMS=m
++CONFIG_VIDEO_BWQCAM=m
++CONFIG_VIDEO_CQCAM=m
++CONFIG_VIDEO_W9966=m
++CONFIG_VIDEO_CPIA=m
++CONFIG_VIDEO_CPIA_PP=m
++CONFIG_VIDEO_CPIA_USB=m
++CONFIG_VIDEO_SAA5246A=m
++CONFIG_VIDEO_SAA5249=m
++CONFIG_TUNER_3036=m
++CONFIG_VIDEO_STRADIS=m
++CONFIG_VIDEO_ZORAN=m
++CONFIG_VIDEO_ZORAN_BUZ=m
++CONFIG_VIDEO_ZORAN_DC10=m
++CONFIG_VIDEO_ZORAN_DC30=m
++CONFIG_VIDEO_ZORAN_LML33=m
++CONFIG_VIDEO_ZORAN_LML33R10=m
++# CONFIG_VIDEO_ZR36120 is not set
++CONFIG_VIDEO_MEYE=m
++# CONFIG_VIDEO_SAA7134 is not set
++CONFIG_VIDEO_MXB=m
++CONFIG_VIDEO_DPC=m
++CONFIG_VIDEO_HEXIUM_ORION=m
++CONFIG_VIDEO_HEXIUM_GEMINI=m
++CONFIG_VIDEO_CX88=m
++# CONFIG_VIDEO_CX88_DVB is not set
++CONFIG_VIDEO_OVCAMCHIP=m
++
++#
++# Radio Adapters
++#
++CONFIG_RADIO_CADET=m
++CONFIG_RADIO_RTRACK=m
++CONFIG_RADIO_RTRACK2=m
++CONFIG_RADIO_AZTECH=m
++CONFIG_RADIO_GEMTEK=m
++CONFIG_RADIO_GEMTEK_PCI=m
++CONFIG_RADIO_MAXIRADIO=m
++CONFIG_RADIO_MAESTRO=m
++CONFIG_RADIO_MIROPCM20=m
++CONFIG_RADIO_MIROPCM20_RDS=m
++CONFIG_RADIO_SF16FMI=m
++CONFIG_RADIO_SF16FMR2=m
++CONFIG_RADIO_TERRATEC=m
++CONFIG_RADIO_TRUST=m
++CONFIG_RADIO_TYPHOON=m
++CONFIG_RADIO_TYPHOON_PROC_FS=y
++CONFIG_RADIO_ZOLTRIX=m
++
++#
++# Digital Video Broadcasting Devices
++#
++CONFIG_DVB=y
++CONFIG_DVB_CORE=m
++
++#
++# Supported SAA7146 based PCI Adapters
++#
++CONFIG_DVB_AV7110=m
++# CONFIG_DVB_AV7110_OSD is not set
++CONFIG_DVB_BUDGET=m
++CONFIG_DVB_BUDGET_CI=m
++CONFIG_DVB_BUDGET_AV=m
++CONFIG_DVB_BUDGET_PATCH=m
++
++#
++# Supported USB Adapters
++#
++CONFIG_DVB_TTUSB_BUDGET=m
++CONFIG_DVB_TTUSB_DEC=m
++CONFIG_DVB_DIBUSB=m
++CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES=y
++# CONFIG_DVB_DIBCOM_DEBUG is not set
++CONFIG_DVB_CINERGYT2=m
++# CONFIG_DVB_CINERGYT2_TUNING is not set
++
++#
++# Supported FlexCopII (B2C2) Adapters
++#
++CONFIG_DVB_B2C2_FLEXCOP=m
++CONFIG_DVB_B2C2_FLEXCOP_PCI=m
++CONFIG_DVB_B2C2_FLEXCOP_USB=m
++# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
++CONFIG_DVB_B2C2_SKYSTAR=m
++
++#
++# Supported BT878 Adapters
++#
++CONFIG_DVB_BT8XX=m
++
++#
++# Supported DVB Frontends
++#
++
++#
++# Customise DVB Frontends
++#
++
++#
++# DVB-S (satellite) frontends
++#
++CONFIG_DVB_STV0299=m
++CONFIG_DVB_CX24110=m
++CONFIG_DVB_TDA8083=m
++CONFIG_DVB_TDA80XX=m
++CONFIG_DVB_MT312=m
++CONFIG_DVB_VES1X93=m
++
++#
++# DVB-T (terrestrial) frontends
++#
++CONFIG_DVB_SP8870=m
++CONFIG_DVB_SP887X=m
++CONFIG_DVB_CX22700=m
++CONFIG_DVB_CX22702=m
++CONFIG_DVB_L64781=m
++CONFIG_DVB_TDA1004X=m
++CONFIG_DVB_NXT6000=m
++CONFIG_DVB_MT352=m
++CONFIG_DVB_DIB3000MB=m
++CONFIG_DVB_DIB3000MC=m
++
++#
++# DVB-C (cable) frontends
++#
++CONFIG_DVB_ATMEL_AT76C651=m
++CONFIG_DVB_VES1820=m
++CONFIG_DVB_TDA10021=m
++CONFIG_DVB_STV0297=m
++
++#
++# ATSC (North American/Korean Terresterial DTV) frontends
++#
++CONFIG_DVB_NXT2002=m
++CONFIG_DVB_OR51211=m
++CONFIG_DVB_OR51132=m
++CONFIG_VIDEO_SAA7146=m
++CONFIG_VIDEO_SAA7146_VV=m
++CONFIG_VIDEO_VIDEOBUF=m
++CONFIG_VIDEO_TUNER=m
++CONFIG_VIDEO_BUF=m
++CONFIG_VIDEO_BTCX=m
++CONFIG_VIDEO_IR=m
++CONFIG_VIDEO_TVEEPROM=m
++
++#
++# Graphics support
++#
++CONFIG_FB=y
++CONFIG_FB_CFB_FILLRECT=m
++CONFIG_FB_CFB_COPYAREA=m
++CONFIG_FB_CFB_IMAGEBLIT=m
++CONFIG_FB_SOFT_CURSOR=m
++# CONFIG_FB_MACMODES is not set
++CONFIG_FB_MODE_HELPERS=y
++CONFIG_FB_TILEBLITTING=y
++CONFIG_FB_CIRRUS=m
++CONFIG_FB_PM2=m
++CONFIG_FB_PM2_FIFO_DISCONNECT=y
++CONFIG_FB_CYBER2000=m
++# CONFIG_FB_ASILIANT is not set
++# CONFIG_FB_IMSTT is not set
++CONFIG_FB_VGA16=m
++# CONFIG_FB_VESA is not set
++CONFIG_VIDEO_SELECT=y
++CONFIG_FB_HGA=m
++# CONFIG_FB_HGA_ACCEL is not set
++CONFIG_FB_NVIDIA=m
++CONFIG_FB_NVIDIA_I2C=y
++CONFIG_FB_RIVA=m
++CONFIG_FB_RIVA_I2C=y
++CONFIG_FB_RIVA_DEBUG=y
++CONFIG_FB_I810=m
++# CONFIG_FB_I810_GTF is not set
++CONFIG_FB_INTEL=m
++# CONFIG_FB_INTEL_DEBUG is not set
++CONFIG_FB_MATROX=m
++CONFIG_FB_MATROX_MILLENIUM=y
++CONFIG_FB_MATROX_MYSTIQUE=y
++# CONFIG_FB_MATROX_G is not set
++CONFIG_FB_MATROX_I2C=m
++CONFIG_FB_MATROX_MULTIHEAD=y
++CONFIG_FB_RADEON_OLD=m
++CONFIG_FB_RADEON=m
++CONFIG_FB_RADEON_I2C=y
++# CONFIG_FB_RADEON_DEBUG is not set
++CONFIG_FB_ATY128=m
++CONFIG_FB_ATY=m
++CONFIG_FB_ATY_CT=y
++CONFIG_FB_ATY_GENERIC_LCD=y
++CONFIG_FB_ATY_XL_INIT=y
++CONFIG_FB_ATY_GX=y
++CONFIG_FB_SAVAGE=m
++CONFIG_FB_SAVAGE_I2C=y
++CONFIG_FB_SAVAGE_ACCEL=y
++CONFIG_FB_SIS=m
++CONFIG_FB_SIS_300=y
++CONFIG_FB_SIS_315=y
++CONFIG_FB_NEOMAGIC=m
++CONFIG_FB_KYRO=m
++CONFIG_FB_3DFX=m
++# CONFIG_FB_3DFX_ACCEL is not set
++CONFIG_FB_VOODOO1=m
++CONFIG_FB_TRIDENT=m
++# CONFIG_FB_TRIDENT_ACCEL is not set
++# CONFIG_FB_PM3 is not set
++CONFIG_FB_GEODE=y
++CONFIG_FB_GEODE_GX1=m
++CONFIG_FB_S1D13XXX=m
++CONFIG_FB_VIRTUAL=m
++
++#
++# Console display driver support
++#
++CONFIG_VGA_CONSOLE=y
++CONFIG_MDA_CONSOLE=m
++CONFIG_DUMMY_CONSOLE=y
++CONFIG_FRAMEBUFFER_CONSOLE=m
++# CONFIG_FONTS is not set
++CONFIG_FONT_8x8=y
++CONFIG_FONT_8x16=y
++
++#
++# Logo configuration
++#
++# CONFIG_LOGO is not set
++# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
++
++#
++# Sound
++#
++CONFIG_SOUND=m
++
++#
++# Advanced Linux Sound Architecture
++#
++CONFIG_SND=m
++CONFIG_SND_TIMER=m
++CONFIG_SND_PCM=m
++CONFIG_SND_HWDEP=m
++CONFIG_SND_RAWMIDI=m
++CONFIG_SND_SEQUENCER=m
++CONFIG_SND_SEQ_DUMMY=m
++CONFIG_SND_OSSEMUL=y
++CONFIG_SND_MIXER_OSS=m
++CONFIG_SND_PCM_OSS=m
++CONFIG_SND_SEQUENCER_OSS=y
++CONFIG_SND_RTCTIMER=m
++# CONFIG_SND_VERBOSE_PRINTK is not set
++# CONFIG_SND_DEBUG is not set
++CONFIG_SND_GENERIC_PM=y
++
++#
++# Generic devices
++#
++CONFIG_SND_MPU401_UART=m
++CONFIG_SND_OPL3_LIB=m
++CONFIG_SND_OPL4_LIB=m
++CONFIG_SND_VX_LIB=m
++CONFIG_SND_DUMMY=m
++CONFIG_SND_VIRMIDI=m
++CONFIG_SND_MTPAV=m
++CONFIG_SND_SERIAL_U16550=m
++CONFIG_SND_MPU401=m
++
++#
++# ISA devices
++#
++CONFIG_SND_AD1848_LIB=m
++CONFIG_SND_CS4231_LIB=m
++CONFIG_SND_AD1816A=m
++CONFIG_SND_AD1848=m
++CONFIG_SND_CS4231=m
++CONFIG_SND_CS4232=m
++CONFIG_SND_CS4236=m
++CONFIG_SND_ES968=m
++CONFIG_SND_ES1688=m
++CONFIG_SND_ES18XX=m
++CONFIG_SND_GUS_SYNTH=m
++CONFIG_SND_GUSCLASSIC=m
++CONFIG_SND_GUSEXTREME=m
++CONFIG_SND_GUSMAX=m
++CONFIG_SND_INTERWAVE=m
++CONFIG_SND_INTERWAVE_STB=m
++CONFIG_SND_OPTI92X_AD1848=m
++CONFIG_SND_OPTI92X_CS4231=m
++CONFIG_SND_OPTI93X=m
++CONFIG_SND_SB8=m
++CONFIG_SND_SB16=m
++CONFIG_SND_SBAWE=m
++CONFIG_SND_SB16_CSP=y
++CONFIG_SND_WAVEFRONT=m
++CONFIG_SND_ALS100=m
++CONFIG_SND_AZT2320=m
++CONFIG_SND_CMI8330=m
++CONFIG_SND_DT019X=m
++CONFIG_SND_OPL3SA2=m
++CONFIG_SND_SGALAXY=m
++CONFIG_SND_SSCAPE=m
++
++#
++# PCI devices
++#
++CONFIG_SND_AC97_CODEC=m
++CONFIG_SND_ALI5451=m
++CONFIG_SND_ATIIXP=m
++CONFIG_SND_ATIIXP_MODEM=m
++CONFIG_SND_AU8810=m
++CONFIG_SND_AU8820=m
++CONFIG_SND_AU8830=m
++CONFIG_SND_AZT3328=m
++CONFIG_SND_BT87X=m
++# CONFIG_SND_BT87X_OVERCLOCK is not set
++CONFIG_SND_CS46XX=m
++CONFIG_SND_CS46XX_NEW_DSP=y
++CONFIG_SND_CS4281=m
++CONFIG_SND_EMU10K1=m
++# CONFIG_SND_EMU10K1X is not set
++# CONFIG_SND_CA0106 is not set
++CONFIG_SND_KORG1212=m
++CONFIG_SND_MIXART=m
++CONFIG_SND_NM256=m
++CONFIG_SND_RME32=m
++CONFIG_SND_RME96=m
++CONFIG_SND_RME9652=m
++CONFIG_SND_HDSP=m
++CONFIG_SND_TRIDENT=m
++CONFIG_SND_YMFPCI=m
++CONFIG_SND_ALS4000=m
++CONFIG_SND_CMIPCI=m
++CONFIG_SND_ENS1370=m
++CONFIG_SND_ENS1371=m
++CONFIG_SND_ES1938=m
++CONFIG_SND_ES1968=m
++CONFIG_SND_MAESTRO3=m
++CONFIG_SND_FM801=m
++CONFIG_SND_FM801_TEA575X=m
++CONFIG_SND_ICE1712=m
++CONFIG_SND_ICE1724=m
++CONFIG_SND_INTEL8X0=m
++CONFIG_SND_INTEL8X0M=m
++CONFIG_SND_SONICVIBES=m
++CONFIG_SND_VIA82XX=m
++# CONFIG_SND_VIA82XX_MODEM is not set
++CONFIG_SND_VX222=m
++CONFIG_SND_HDA_INTEL=m
++
++#
++# USB devices
++#
++CONFIG_SND_USB_AUDIO=m
++CONFIG_SND_USB_USX2Y=m
++
++#
++# PCMCIA devices
++#
++CONFIG_SND_VXPOCKET=m
++CONFIG_SND_VXP440=m
++CONFIG_SND_PDAUDIOCF=m
++
++#
++# Open Sound System
++#
++CONFIG_SOUND_PRIME=m
++CONFIG_SOUND_BT878=m
++CONFIG_SOUND_CMPCI=m
++# CONFIG_SOUND_CMPCI_FM is not set
++# CONFIG_SOUND_CMPCI_MIDI is not set
++CONFIG_SOUND_CMPCI_JOYSTICK=y
++CONFIG_SOUND_EMU10K1=m
++CONFIG_MIDI_EMU10K1=y
++CONFIG_SOUND_FUSION=m
++CONFIG_SOUND_CS4281=m
++CONFIG_SOUND_ES1370=m
++CONFIG_SOUND_ES1371=m
++CONFIG_SOUND_ESSSOLO1=m
++CONFIG_SOUND_MAESTRO=m
++CONFIG_SOUND_MAESTRO3=m
++CONFIG_SOUND_ICH=m
++CONFIG_SOUND_SONICVIBES=m
++CONFIG_SOUND_TRIDENT=m
++# CONFIG_SOUND_MSNDCLAS is not set
++# CONFIG_SOUND_MSNDPIN is not set
++CONFIG_SOUND_VIA82CXXX=m
++CONFIG_MIDI_VIA82CXXX=y
++CONFIG_SOUND_OSS=m
++# CONFIG_SOUND_TRACEINIT is not set
++# CONFIG_SOUND_DMAP is not set
++# CONFIG_SOUND_AD1816 is not set
++CONFIG_SOUND_AD1889=m
++CONFIG_SOUND_SGALAXY=m
++CONFIG_SOUND_ADLIB=m
++CONFIG_SOUND_ACI_MIXER=m
++CONFIG_SOUND_CS4232=m
++CONFIG_SOUND_SSCAPE=m
++CONFIG_SOUND_GUS=m
++CONFIG_SOUND_GUS16=y
++CONFIG_SOUND_GUSMAX=y
++CONFIG_SOUND_VMIDI=m
++CONFIG_SOUND_TRIX=m
++CONFIG_SOUND_MSS=m
++CONFIG_SOUND_MPU401=m
++CONFIG_SOUND_NM256=m
++CONFIG_SOUND_MAD16=m
++CONFIG_MAD16_OLDCARD=y
++CONFIG_SOUND_PAS=m
++CONFIG_SOUND_PSS=m
++CONFIG_PSS_MIXER=y
++CONFIG_SOUND_SB=m
++# CONFIG_SOUND_AWE32_SYNTH is not set
++CONFIG_SOUND_WAVEFRONT=m
++CONFIG_SOUND_MAUI=m
++CONFIG_SOUND_YM3812=m
++CONFIG_SOUND_OPL3SA1=m
++CONFIG_SOUND_OPL3SA2=m
++CONFIG_SOUND_YMFPCI=m
++# CONFIG_SOUND_YMFPCI_LEGACY is not set
++CONFIG_SOUND_UART6850=m
++CONFIG_SOUND_AEDSP16=m
++CONFIG_SC6600=y
++CONFIG_SC6600_JOY=y
++CONFIG_SC6600_CDROM=4
++CONFIG_SC6600_CDROMBASE=0x0
++# CONFIG_AEDSP16_MSS is not set
++# CONFIG_AEDSP16_SBPRO is not set
++# CONFIG_AEDSP16_MPU401 is not set
++CONFIG_SOUND_TVMIXER=m
++CONFIG_SOUND_KAHLUA=m
++CONFIG_SOUND_ALI5455=m
++CONFIG_SOUND_FORTE=m
++CONFIG_SOUND_RME96XX=m
++CONFIG_SOUND_AD1980=m
++
++#
++# USB support
++#
++CONFIG_USB_ARCH_HAS_HCD=y
++CONFIG_USB_ARCH_HAS_OHCI=y
++CONFIG_USB=y
++# CONFIG_USB_DEBUG is not set
++
++#
++# Miscellaneous USB options
++#
++CONFIG_USB_DEVICEFS=y
++CONFIG_USB_BANDWIDTH=y
++# CONFIG_USB_DYNAMIC_MINORS is not set
++# CONFIG_USB_OTG is not set
++
++#
++# USB Host Controller Drivers
++#
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_SPLIT_ISO=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_OHCI_HCD=m
++# CONFIG_USB_OHCI_BIG_ENDIAN is not set
++CONFIG_USB_OHCI_LITTLE_ENDIAN=y
++CONFIG_USB_UHCI_HCD=m
++CONFIG_USB_SL811_HCD=m
++CONFIG_USB_SL811_CS=m
++
++#
++# USB Device Class drivers
++#
++CONFIG_USB_AUDIO=m
++
++#
++# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
++#
++CONFIG_USB_MIDI=m
++CONFIG_USB_ACM=m
++CONFIG_USB_PRINTER=m
++
++#
++# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
++#
++CONFIG_USB_STORAGE=m
++# CONFIG_USB_STORAGE_DEBUG is not set
++CONFIG_USB_STORAGE_DATAFAB=y
++CONFIG_USB_STORAGE_FREECOM=y
++CONFIG_USB_STORAGE_ISD200=y
++CONFIG_USB_STORAGE_DPCM=y
++CONFIG_USB_STORAGE_USBAT=y
++CONFIG_USB_STORAGE_SDDR09=y
++CONFIG_USB_STORAGE_SDDR55=y
++CONFIG_USB_STORAGE_JUMPSHOT=y
++
++#
++# USB Input Devices
++#
++CONFIG_USB_HID=m
++CONFIG_USB_HIDINPUT=y
++# CONFIG_HID_FF is not set
++CONFIG_USB_HIDDEV=y
++
++#
++# USB HID Boot Protocol drivers
++#
++CONFIG_USB_KBD=m
++CONFIG_USB_MOUSE=m
++CONFIG_USB_AIPTEK=m
++CONFIG_USB_WACOM=m
++CONFIG_USB_KBTAB=m
++CONFIG_USB_POWERMATE=m
++CONFIG_USB_MTOUCH=m
++CONFIG_USB_EGALAX=m
++CONFIG_USB_XPAD=m
++CONFIG_USB_ATI_REMOTE=m
++
++#
++# USB Imaging devices
++#
++CONFIG_USB_MDC800=m
++CONFIG_USB_MICROTEK=m
++
++#
++# USB Multimedia devices
++#
++# CONFIG_USB_DABUSB is not set
++CONFIG_USB_VICAM=m
++CONFIG_USB_DSBR=m
++CONFIG_USB_IBMCAM=m
++CONFIG_USB_KONICAWC=m
++CONFIG_USB_OV511=m
++CONFIG_USB_SE401=m
++CONFIG_USB_SN9C102=m
++CONFIG_USB_STV680=m
++CONFIG_USB_W9968CF=m
++CONFIG_USB_PWC=m
++
++#
++# USB Network Adapters
++#
++CONFIG_USB_CATC=m
++CONFIG_USB_KAWETH=m
++CONFIG_USB_PEGASUS=m
++CONFIG_USB_RTL8150=m
++CONFIG_USB_USBNET=m
++
++#
++# USB Host-to-Host Cables
++#
++CONFIG_USB_ALI_M5632=y
++CONFIG_USB_AN2720=y
++CONFIG_USB_BELKIN=y
++CONFIG_USB_GENESYS=y
++CONFIG_USB_NET1080=y
++CONFIG_USB_PL2301=y
++CONFIG_USB_KC2190=y
++
++#
++# Intelligent USB Devices/Gadgets
++#
++CONFIG_USB_ARMLINUX=y
++CONFIG_USB_EPSON2888=y
++CONFIG_USB_ZAURUS=y
++CONFIG_USB_CDCETHER=y
++
++#
++# USB Network Adapters
++#
++CONFIG_USB_AX8817X=y
++CONFIG_USB_ZD1201=m
++CONFIG_USB_MON=m
++
++#
++# USB port drivers
++#
++CONFIG_USB_USS720=m
++
++#
++# USB Serial Converter support
++#
++CONFIG_USB_SERIAL=m
++CONFIG_USB_SERIAL_GENERIC=y
++CONFIG_USB_SERIAL_AIRPRIME=m
++CONFIG_USB_SERIAL_BELKIN=m
++CONFIG_USB_SERIAL_WHITEHEAT=m
++CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
++CONFIG_USB_SERIAL_CP2101=m
++CONFIG_USB_SERIAL_CYPRESS_M8=m
++CONFIG_USB_SERIAL_EMPEG=m
++CONFIG_USB_SERIAL_FTDI_SIO=m
++CONFIG_USB_SERIAL_VISOR=m
++CONFIG_USB_SERIAL_IPAQ=m
++CONFIG_USB_SERIAL_IR=m
++CONFIG_USB_SERIAL_EDGEPORT=m
++CONFIG_USB_SERIAL_EDGEPORT_TI=m
++# CONFIG_USB_SERIAL_GARMIN is not set
++CONFIG_USB_SERIAL_IPW=m
++CONFIG_USB_SERIAL_KEYSPAN_PDA=m
++CONFIG_USB_SERIAL_KEYSPAN=m
++# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
++# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set
++CONFIG_USB_SERIAL_KLSI=m
++CONFIG_USB_SERIAL_KOBIL_SCT=m
++CONFIG_USB_SERIAL_MCT_U232=m
++CONFIG_USB_SERIAL_PL2303=m
++CONFIG_USB_SERIAL_HP4X=m
++CONFIG_USB_SERIAL_SAFE=m
++# CONFIG_USB_SERIAL_SAFE_PADDED is not set
++# CONFIG_USB_SERIAL_TI is not set
++CONFIG_USB_SERIAL_CYBERJACK=m
++CONFIG_USB_SERIAL_XIRCOM=m
++CONFIG_USB_SERIAL_OPTION=m
++CONFIG_USB_SERIAL_OMNINET=m
++CONFIG_USB_EZUSB=y
++
++#
++# USB Miscellaneous drivers
++#
++# CONFIG_USB_EMI62 is not set
++# CONFIG_USB_EMI26 is not set
++CONFIG_USB_AUERSWALD=m
++CONFIG_USB_RIO500=m
++CONFIG_USB_LEGOTOWER=m
++CONFIG_USB_LCD=m
++CONFIG_USB_LED=m
++CONFIG_USB_CYTHERM=m
++CONFIG_USB_PHIDGETKIT=m
++CONFIG_USB_PHIDGETSERVO=m
++# CONFIG_USB_IDMOUSE is not set
++CONFIG_USB_SISUSBVGA=m
++CONFIG_USB_TEST=m
++
++#
++# USB ATM/DSL drivers
++#
++CONFIG_USB_ATM=m
++CONFIG_USB_SPEEDTOUCH=m
++
++#
++# USB Gadget Support
++#
++CONFIG_USB_GADGET=m
++# CONFIG_USB_GADGET_DEBUG_FILES is not set
++CONFIG_USB_GADGET_NET2280=y
++CONFIG_USB_NET2280=m
++# CONFIG_USB_GADGET_PXA2XX is not set
++# CONFIG_USB_GADGET_GOKU is not set
++# CONFIG_USB_GADGET_LH7A40X is not set
++# CONFIG_USB_GADGET_OMAP is not set
++# CONFIG_USB_GADGET_DUMMY_HCD is not set
++CONFIG_USB_GADGET_DUALSPEED=y
++CONFIG_USB_ZERO=m
++CONFIG_USB_ETH=m
++CONFIG_USB_ETH_RNDIS=y
++CONFIG_USB_GADGETFS=m
++CONFIG_USB_FILE_STORAGE=m
++# CONFIG_USB_FILE_STORAGE_TEST is not set
++CONFIG_USB_G_SERIAL=m
++
++#
++# MMC/SD Card support
++#
++# CONFIG_MMC is not set
++
++#
++# InfiniBand support
++#
++# CONFIG_INFINIBAND is not set
++
++#
++# Power management options
++#
++
++#
++# ACPI (Advanced Configuration and Power Interface) Support
++#
++CONFIG_ACPI=y
++CONFIG_ACPI_BOOT=y
++CONFIG_ACPI_INTERPRETER=y
++CONFIG_ACPI_AC=m
++CONFIG_ACPI_BATTERY=m
++CONFIG_ACPI_BUTTON=m
++CONFIG_ACPI_VIDEO=m
++CONFIG_ACPI_FAN=m
++CONFIG_ACPI_PROCESSOR=m
++# CONFIG_ACPI_HOTPLUG_CPU is not set
++CONFIG_ACPI_THERMAL=m
++CONFIG_ACPI_ASUS=m
++CONFIG_ACPI_IBM=m
++CONFIG_ACPI_TOSHIBA=m
++CONFIG_ACPI_BLACKLIST_YEAR=0
++# CONFIG_ACPI_DEBUG is not set
++CONFIG_ACPI_BUS=y
++CONFIG_ACPI_EC=y
++CONFIG_ACPI_POWER=y
++CONFIG_ACPI_PCI=y
++CONFIG_ACPI_SYSTEM=y
++# CONFIG_X86_PM_TIMER is not set
++# CONFIG_ACPI_CONTAINER is not set
++
++#
++# File systems
++#
++CONFIG_EXT2_FS=y
++CONFIG_EXT2_FS_XATTR=y
++CONFIG_EXT2_FS_POSIX_ACL=y
++CONFIG_EXT2_FS_SECURITY=y
++CONFIG_EXT3_FS=m
++CONFIG_EXT3_FS_XATTR=y
++CONFIG_EXT3_FS_POSIX_ACL=y
++CONFIG_EXT3_FS_SECURITY=y
++CONFIG_JBD=m
++# CONFIG_JBD_DEBUG is not set
++CONFIG_FS_MBCACHE=y
++CONFIG_REISERFS_FS=m
++# CONFIG_REISERFS_CHECK is not set
++# CONFIG_REISERFS_PROC_INFO is not set
++# CONFIG_REISERFS_FS_XATTR is not set
++CONFIG_JFS_FS=m
++CONFIG_JFS_POSIX_ACL=y
++# CONFIG_JFS_SECURITY is not set
++# CONFIG_JFS_DEBUG is not set
++CONFIG_JFS_STATISTICS=y
++CONFIG_FS_POSIX_ACL=y
++
++#
++# XFS support
++#
++CONFIG_XFS_FS=m
++CONFIG_XFS_EXPORT=y
++CONFIG_XFS_RT=y
++CONFIG_XFS_QUOTA=y
++CONFIG_XFS_SECURITY=y
++CONFIG_XFS_POSIX_ACL=y
++CONFIG_MINIX_FS=m
++CONFIG_ROMFS_FS=m
++CONFIG_QUOTA=y
++CONFIG_QFMT_V1=m
++CONFIG_QFMT_V2=m
++CONFIG_QUOTACTL=y
++CONFIG_DNOTIFY=y
++CONFIG_AUTOFS_FS=m
++CONFIG_AUTOFS4_FS=m
++
++#
++# CD-ROM/DVD Filesystems
++#
++CONFIG_ISO9660_FS=m
++CONFIG_JOLIET=y
++CONFIG_ZISOFS=y
++CONFIG_ZISOFS_FS=m
++CONFIG_UDF_FS=m
++CONFIG_UDF_NLS=y
++
++#
++# DOS/FAT/NT Filesystems
++#
++CONFIG_FAT_FS=m
++CONFIG_MSDOS_FS=m
++CONFIG_VFAT_FS=m
++CONFIG_FAT_DEFAULT_CODEPAGE=437
++CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
++CONFIG_NTFS_FS=m
++# CONFIG_NTFS_DEBUG is not set
++# CONFIG_NTFS_RW is not set
++
++#
++# Pseudo filesystems
++#
++CONFIG_PROC_FS=y
++CONFIG_PROC_KCORE=y
++CONFIG_SYSFS=y
++# CONFIG_DEVFS_FS is not set
++CONFIG_DEVPTS_FS_XATTR=y
++CONFIG_DEVPTS_FS_SECURITY=y
++CONFIG_TMPFS=y
++CONFIG_TMPFS_XATTR=y
++CONFIG_TMPFS_SECURITY=y
++# CONFIG_HUGETLBFS is not set
++# CONFIG_HUGETLB_PAGE is not set
++CONFIG_RAMFS=y
++
++#
++# Miscellaneous filesystems
++#
++CONFIG_ADFS_FS=m
++# CONFIG_ADFS_FS_RW is not set
++CONFIG_AFFS_FS=m
++CONFIG_HFS_FS=m
++CONFIG_HFSPLUS_FS=m
++CONFIG_BEFS_FS=m
++# CONFIG_BEFS_DEBUG is not set
++CONFIG_BFS_FS=m
++CONFIG_EFS_FS=m
++CONFIG_JFFS_FS=m
++CONFIG_JFFS_FS_VERBOSE=0
++CONFIG_JFFS_PROC_FS=y
++CONFIG_JFFS2_FS=m
++CONFIG_JFFS2_FS_DEBUG=0
++# CONFIG_JFFS2_FS_NAND is not set
++# CONFIG_JFFS2_FS_NOR_ECC is not set
++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
++CONFIG_JFFS2_ZLIB=y
++CONFIG_JFFS2_RTIME=y
++# CONFIG_JFFS2_RUBIN is not set
++CONFIG_CRAMFS=y
++CONFIG_VXFS_FS=m
++CONFIG_HPFS_FS=m
++CONFIG_QNX4FS_FS=m
++# CONFIG_QNX4FS_RW is not set
++CONFIG_SYSV_FS=m
++CONFIG_UFS_FS=m
++# CONFIG_UFS_FS_WRITE is not set
++
++#
++# Network File Systems
++#
++CONFIG_NFS_FS=m
++CONFIG_NFS_V3=y
++CONFIG_NFS_V4=y
++CONFIG_NFS_DIRECTIO=y
++CONFIG_NFSD=m
++CONFIG_NFSD_V3=y
++CONFIG_NFSD_V4=y
++CONFIG_NFSD_TCP=y
++CONFIG_LOCKD=m
++CONFIG_LOCKD_V4=y
++CONFIG_EXPORTFS=m
++CONFIG_SUNRPC=m
++CONFIG_SUNRPC_GSS=m
++CONFIG_RPCSEC_GSS_KRB5=m
++CONFIG_RPCSEC_GSS_SPKM3=m
++CONFIG_SMB_FS=m
++# CONFIG_SMB_NLS_DEFAULT is not set
++CONFIG_CIFS=m
++# CONFIG_CIFS_STATS is not set
++# CONFIG_CIFS_XATTR is not set
++# CONFIG_CIFS_EXPERIMENTAL is not set
++CONFIG_NCP_FS=m
++CONFIG_NCPFS_PACKET_SIGNING=y
++CONFIG_NCPFS_IOCTL_LOCKING=y
++CONFIG_NCPFS_STRONG=y
++CONFIG_NCPFS_NFS_NS=y
++CONFIG_NCPFS_OS2_NS=y
++# CONFIG_NCPFS_SMALLDOS is not set
++CONFIG_NCPFS_NLS=y
++CONFIG_NCPFS_EXTRAS=y
++CONFIG_CODA_FS=m
++# CONFIG_CODA_FS_OLD_API is not set
++CONFIG_AFS_FS=m
++CONFIG_RXRPC=m
++
++#
++# Partition Types
++#
++CONFIG_PARTITION_ADVANCED=y
++CONFIG_ACORN_PARTITION=y
++CONFIG_ACORN_PARTITION_CUMANA=y
++# CONFIG_ACORN_PARTITION_EESOX is not set
++CONFIG_ACORN_PARTITION_ICS=y
++# CONFIG_ACORN_PARTITION_ADFS is not set
++# CONFIG_ACORN_PARTITION_POWERTEC is not set
++CONFIG_ACORN_PARTITION_RISCIX=y
++CONFIG_OSF_PARTITION=y
++CONFIG_AMIGA_PARTITION=y
++CONFIG_ATARI_PARTITION=y
++CONFIG_MAC_PARTITION=y
++CONFIG_MSDOS_PARTITION=y
++CONFIG_BSD_DISKLABEL=y
++CONFIG_MINIX_SUBPARTITION=y
++CONFIG_SOLARIS_X86_PARTITION=y
++CONFIG_UNIXWARE_DISKLABEL=y
++CONFIG_LDM_PARTITION=y
++# CONFIG_LDM_DEBUG is not set
++CONFIG_SGI_PARTITION=y
++CONFIG_ULTRIX_PARTITION=y
++CONFIG_SUN_PARTITION=y
++CONFIG_EFI_PARTITION=y
++
++#
++# Native Language Support
++#
++CONFIG_NLS=y
++CONFIG_NLS_DEFAULT="cp437"
++CONFIG_NLS_CODEPAGE_437=m
++CONFIG_NLS_CODEPAGE_737=m
++CONFIG_NLS_CODEPAGE_775=m
++CONFIG_NLS_CODEPAGE_850=m
++CONFIG_NLS_CODEPAGE_852=m
++CONFIG_NLS_CODEPAGE_855=m
++CONFIG_NLS_CODEPAGE_857=m
++CONFIG_NLS_CODEPAGE_860=m
++CONFIG_NLS_CODEPAGE_861=m
++CONFIG_NLS_CODEPAGE_862=m
++CONFIG_NLS_CODEPAGE_863=m
++CONFIG_NLS_CODEPAGE_864=m
++CONFIG_NLS_CODEPAGE_865=m
++CONFIG_NLS_CODEPAGE_866=m
++CONFIG_NLS_CODEPAGE_869=m
++CONFIG_NLS_CODEPAGE_936=m
++CONFIG_NLS_CODEPAGE_950=m
++CONFIG_NLS_CODEPAGE_932=m
++CONFIG_NLS_CODEPAGE_949=m
++CONFIG_NLS_CODEPAGE_874=m
++CONFIG_NLS_ISO8859_8=m
++CONFIG_NLS_CODEPAGE_1250=m
++CONFIG_NLS_CODEPAGE_1251=m
++CONFIG_NLS_ASCII=m
++CONFIG_NLS_ISO8859_1=m
++CONFIG_NLS_ISO8859_2=m
++CONFIG_NLS_ISO8859_3=m
++CONFIG_NLS_ISO8859_4=m
++CONFIG_NLS_ISO8859_5=m
++CONFIG_NLS_ISO8859_6=m
++CONFIG_NLS_ISO8859_7=m
++CONFIG_NLS_ISO8859_9=m
++CONFIG_NLS_ISO8859_13=m
++CONFIG_NLS_ISO8859_14=m
++CONFIG_NLS_ISO8859_15=m
++CONFIG_NLS_KOI8_R=m
++CONFIG_NLS_KOI8_U=m
++CONFIG_NLS_UTF8=m
++
++#
++# Security options
++#
++CONFIG_KEYS=y
++# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
++CONFIG_SECURITY=y
++# CONFIG_SECURITY_NETWORK is not set
++CONFIG_SECURITY_CAPABILITIES=y
++CONFIG_SECURITY_ROOTPLUG=m
++CONFIG_SECURITY_SECLVL=m
++CONFIG_SECURITY_SELINUX=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM=y
++CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
++CONFIG_SECURITY_SELINUX_DISABLE=y
++CONFIG_SECURITY_SELINUX_DEVELOP=y
++CONFIG_SECURITY_SELINUX_AVC_STATS=y
++CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
++
++#
++# Cryptographic options
++#
++CONFIG_CRYPTO=y
++CONFIG_CRYPTO_HMAC=y
++CONFIG_CRYPTO_NULL=m
++CONFIG_CRYPTO_MD4=m
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_SHA1=m
++CONFIG_CRYPTO_SHA256=m
++CONFIG_CRYPTO_SHA512=m
++CONFIG_CRYPTO_WP512=m
++CONFIG_CRYPTO_TGR192=m
++CONFIG_CRYPTO_DES=m
++CONFIG_CRYPTO_BLOWFISH=m
++CONFIG_CRYPTO_TWOFISH=m
++CONFIG_CRYPTO_SERPENT=m
++CONFIG_CRYPTO_AES_586=m
++CONFIG_CRYPTO_CAST5=m
++CONFIG_CRYPTO_CAST6=m
++CONFIG_CRYPTO_TEA=m
++CONFIG_CRYPTO_ARC4=m
++CONFIG_CRYPTO_KHAZAD=m
++CONFIG_CRYPTO_ANUBIS=m
++CONFIG_CRYPTO_DEFLATE=m
++CONFIG_CRYPTO_MICHAEL_MIC=m
++CONFIG_CRYPTO_CRC32C=m
++CONFIG_CRYPTO_TEST=m
++
++#
++# Hardware crypto devices
++#
++# CONFIG_CRYPTO_DEV_PADLOCK is not set
++
++#
++# Library routines
++#
++CONFIG_CRC_CCITT=m
++CONFIG_CRC32=y
++CONFIG_LIBCRC32C=m
++CONFIG_ZLIB_INFLATE=y
++CONFIG_ZLIB_DEFLATE=m
++CONFIG_REED_SOLOMON=m
++CONFIG_REED_SOLOMON_DEC16=y
++
++#
++# Kernel hacking
++#
++# CONFIG_PRINTK_TIME is not set
++CONFIG_DEBUG_KERNEL=y
++CONFIG_MAGIC_SYSRQ=y
++CONFIG_LOG_BUF_SHIFT=14
++# CONFIG_SCHEDSTATS is not set
++# CONFIG_DEBUG_SLAB is not set
++# CONFIG_DEBUG_SPINLOCK is not set
++# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
++# CONFIG_DEBUG_KOBJECT is not set
++# CONFIG_DEBUG_HIGHMEM is not set
++# CONFIG_DEBUG_BUGVERBOSE is not set
++# CONFIG_DEBUG_INFO is not set
++# CONFIG_DEBUG_FS is not set
++# CONFIG_FRAME_POINTER is not set
++# CONFIG_DEBUG_STACKOVERFLOW is not set
++# CONFIG_KPROBES is not set
++# CONFIG_DEBUG_STACK_USAGE is not set
++# CONFIG_DEBUG_PAGEALLOC is not set
++# CONFIG_4KSTACKS is not set
++CONFIG_X86_FIND_SMP_CONFIG=y
++CONFIG_X86_MPPARSE=y
+diff -Nurp pristine-linux-2.6.12/drivers/acpi/pci_irq.c linux-2.6.12-xen/drivers/acpi/pci_irq.c
+--- pristine-linux-2.6.12/drivers/acpi/pci_irq.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/acpi/pci_irq.c	2006-03-05 23:54:35.696243735 +0100
+@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
+ 		printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
+ 			pci_name(dev), ('A' + pin));
+ 		/* Interrupt Line values above 0xF are forbidden */
+-		if (dev->irq >= 0 && (dev->irq <= 0xF)) {
++		if (dev->irq > 0 && (dev->irq <= 0xF)) {
+ 			printk(" - using IRQ %d\n", dev->irq);
++			acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
+ 			return_VALUE(0);
+ 		}
+ 		else {
+diff -Nurp pristine-linux-2.6.12/drivers/acpi/tables.c linux-2.6.12-xen/drivers/acpi/tables.c
+--- pristine-linux-2.6.12/drivers/acpi/tables.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/acpi/tables.c	2006-03-05 23:36:31.000000000 +0100
+@@ -565,6 +565,11 @@ acpi_table_get_sdt (
+  * 
+  * result: sdt_entry[] is initialized
+  */
++#if defined(CONFIG_XEN_X86) || defined(CONFIG_XEN_X86_64)
++#define acpi_rsdp_phys_to_va(rsdp_phys) isa_bus_to_virt(rsdp_phys)
++#else
++#define acpi_rsdp_phys_to_va(rsdp_phys) __va(rsdp_phys)
++#endif
+ 
+ int __init
+ acpi_table_init (void)
+@@ -581,7 +586,7 @@ acpi_table_init (void)
+ 		return -ENODEV;
+ 	}
+ 
+-	rsdp = (struct acpi_table_rsdp *) __va(rsdp_phys);
++	rsdp = (struct acpi_table_rsdp *) acpi_rsdp_phys_to_va(rsdp_phys);
+ 	if (!rsdp) {
+ 		printk(KERN_WARNING PREFIX "Unable to map RSDP\n");
+ 		return -ENODEV;
+diff -Nurp pristine-linux-2.6.12/drivers/char/mem.c linux-2.6.12-xen/drivers/char/mem.c
+--- pristine-linux-2.6.12/drivers/char/mem.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/char/mem.c	2006-03-05 23:36:31.000000000 +0100
+@@ -104,6 +104,7 @@ static inline int valid_phys_addr_range(
+ }
+ #endif
+ 
++#ifndef ARCH_HAS_DEV_MEM
+ /*
+  * This funcion reads the *physical* memory. The f_pos points directly to the 
+  * memory location. 
+@@ -228,6 +229,7 @@ static ssize_t write_mem(struct file * f
+ 	*ppos += written;
+ 	return written;
+ }
++#endif
+ 
+ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
+ {
+@@ -722,6 +724,7 @@ static int open_port(struct inode * inod
+ #define open_mem	open_port
+ #define open_kmem	open_mem
+ 
++#ifndef ARCH_HAS_DEV_MEM
+ static struct file_operations mem_fops = {
+ 	.llseek		= memory_lseek,
+ 	.read		= read_mem,
+@@ -729,6 +732,9 @@ static struct file_operations mem_fops =
+ 	.mmap		= mmap_mem,
+ 	.open		= open_mem,
+ };
++#else
++extern struct file_operations mem_fops;
++#endif
+ 
+ static struct file_operations kmem_fops = {
+ 	.llseek		= memory_lseek,
+diff -Nurp pristine-linux-2.6.12/drivers/char/rocket.c linux-2.6.12-xen/drivers/char/rocket.c
+--- pristine-linux-2.6.12/drivers/char/rocket.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/char/rocket.c	2006-03-05 23:54:36.690097286 +0100
+@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
+ 		ToRecv = space;
+ 
+ 	if (ToRecv <= 0)
+-		return;
++		goto done;
+ 
+ 	/*
+ 	 * if status indicates there are errored characters in the
+@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
+ 	}
+ 	/*  Push the data up to the tty layer */
+ 	ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
++done:
+ 	tty_ldisc_deref(ld);
+ }
+ 
+diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/Kconfig linux-2.6.12-xen/drivers/char/tpm/Kconfig
+--- pristine-linux-2.6.12/drivers/char/tpm/Kconfig	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/char/tpm/Kconfig	2006-03-05 23:36:31.000000000 +0100
+@@ -35,5 +35,15 @@ config TCG_ATMEL
+ 	  will be accessible from within Linux.  To compile this driver 
+ 	  as a module, choose M here; the module will be called tpm_atmel.
+ 
++config TCG_XEN
++	tristate "XEN TPM Interface"
++	depends on TCG_TPM && ARCH_XEN && XEN_TPMDEV_FRONTEND
++	---help---
++	  If you want to make TPM support available to a Xen
++	  user domain, say Yes and it will
++          be accessible from within Linux. To compile this driver
++          as a module, choose M here; the module will be called
++          tpm_xen.
++
+ endmenu
+ 
+diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/Kconfig.domU linux-2.6.12-xen/drivers/char/tpm/Kconfig.domU
+--- pristine-linux-2.6.12/drivers/char/tpm/Kconfig.domU	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/char/tpm/Kconfig.domU	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,30 @@
++#
++# TPM device configuration
++#
++
++menu "TPM devices"
++
++config TCG_TPM
++	tristate "TPM Support for XEN"
++	depends on ARCH_XEN && !XEN_PHYSDEV_ACCESS
++	---help---
++	  If you want to make TPM security available in your system,
++	  say Yes and it will be accessible from within a user domain.  For
++	  more information see <http://www.trustedcomputinggroup.org>.
++	  An implementation of the Trusted Software Stack (TSS), the
++	  userspace enablement piece of the specification, can be
++	  obtained at: <http://sourceforge.net/projects/trousers>.  To
++	  compile this driver as a module, choose M here; the module
++	  will be called tpm. If unsure, say N.
++
++config TCG_XEN
++	tristate "XEN TPM Interface"
++	depends on TCG_TPM && ARCH_XEN && XEN_TPMDEV_FRONTEND
++	---help---
++	  If you want to make TPM support available to a Xen
++	  user domain, say Yes and it will
++          be accessible from within Linux. To compile this driver
++          as a module, choose M here; the module will be called
++          tpm_xen.
++
++endmenu
+diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/Makefile linux-2.6.12-xen/drivers/char/tpm/Makefile
+--- pristine-linux-2.6.12/drivers/char/tpm/Makefile	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/char/tpm/Makefile	2006-03-05 23:36:31.000000000 +0100
+@@ -1,7 +1,13 @@
+ #
+ # Makefile for the kernel tpm device drivers.
+ #
++ifeq ($(CONFIG_XEN_PHYSDEV_ACCESS),y)
+ obj-$(CONFIG_TCG_TPM) += tpm.o
+ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
+ obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
+-
++obj-$(CONFIG_TCG_TIS) += tpm_tis.o
++obj-$(CONFIG_TCG_XEN) += tpm_xen.o
++else
++obj-$(CONFIG_TCG_TPM) += tpm.o
++obj-$(CONFIG_TCG_XEN) += tpm_xen.o
++endif
+diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/tpm_atmel.c linux-2.6.12-xen/drivers/char/tpm/tpm_atmel.c
+--- pristine-linux-2.6.12/drivers/char/tpm/tpm_atmel.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/char/tpm/tpm_atmel.c	2006-03-05 23:36:31.000000000 +0100
+@@ -22,17 +22,23 @@
+ #include "tpm.h"
+ 
+ /* Atmel definitions */
+-#define	TPM_ATML_BASE			0x400
++enum tpm_atmel_addr {
++	TPM_ATMEL_BASE_ADDR_LO = 0x08,
++	TPM_ATMEL_BASE_ADDR_HI = 0x09
++};
+ 
+ /* write status bits */
+-#define	ATML_STATUS_ABORT		0x01
+-#define	ATML_STATUS_LASTBYTE		0x04
+-
++enum tpm_atmel_write_status {
++	ATML_STATUS_ABORT = 0x01,
++	ATML_STATUS_LASTBYTE = 0x04
++};
+ /* read status bits */
+-#define	ATML_STATUS_BUSY		0x01
+-#define	ATML_STATUS_DATA_AVAIL		0x02
+-#define	ATML_STATUS_REWRITE		0x04
+-
++enum tpm_atmel_read_status {
++	ATML_STATUS_BUSY = 0x01,
++	ATML_STATUS_DATA_AVAIL = 0x02,
++	ATML_STATUS_REWRITE = 0x04,
++	ATML_STATUS_READY = 0x08
++};
+ 
+ static int tpm_atml_recv(struct tpm_chip *chip, u8 * buf, size_t count)
+ {
+@@ -48,7 +54,7 @@ static int tpm_atml_recv(struct tpm_chip
+ 	for (i = 0; i < 6; i++) {
+ 		status = inb(chip->vendor->base + 1);
+ 		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+-			dev_err(&chip->pci_dev->dev,
++			dev_err(chip->dev,
+ 				"error reading header\n");
+ 			return -EIO;
+ 		}
+@@ -60,12 +66,12 @@ static int tpm_atml_recv(struct tpm_chip
+ 	size = be32_to_cpu(*native_size);
+ 
+ 	if (count < size) {
+-		dev_err(&chip->pci_dev->dev,
++		dev_err(chip->dev,
+ 			"Recv size(%d) less than available space\n", size);
+ 		for (; i < size; i++) {	/* clear the waiting data anyway */
+ 			status = inb(chip->vendor->base + 1);
+ 			if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+-				dev_err(&chip->pci_dev->dev,
++				dev_err(chip->dev,
+ 					"error reading data\n");
+ 				return -EIO;
+ 			}
+@@ -77,7 +83,7 @@ static int tpm_atml_recv(struct tpm_chip
+ 	for (; i < size; i++) {
+ 		status = inb(chip->vendor->base + 1);
+ 		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
+-			dev_err(&chip->pci_dev->dev,
++			dev_err(chip->dev,
+ 				"error reading data\n");
+ 			return -EIO;
+ 		}
+@@ -87,7 +93,7 @@ static int tpm_atml_recv(struct tpm_chip
+ 	/* make sure data available is gone */
+ 	status = inb(chip->vendor->base + 1);
+ 	if (status & ATML_STATUS_DATA_AVAIL) {
+-		dev_err(&chip->pci_dev->dev, "data available is stuck\n");
++		dev_err(chip->dev, "data available is stuck\n");
+ 		return -EIO;
+ 	}
+ 
+@@ -98,9 +104,9 @@ static int tpm_atml_send(struct tpm_chip
+ {
+ 	int i;
+ 
+-	dev_dbg(&chip->pci_dev->dev, "tpm_atml_send: ");
++	dev_dbg(chip->dev, "tpm_atml_send:\n");
+ 	for (i = 0; i < count; i++) {
+-		dev_dbg(&chip->pci_dev->dev, "0x%x(%d) ", buf[i], buf[i]);
++		dev_dbg(chip->dev, "%d 0x%x(%d)\n",  i, buf[i], buf[i]);
+ 		outb(buf[i], chip->vendor->base);
+ 	}
+ 
+@@ -112,6 +118,11 @@ static void tpm_atml_cancel(struct tpm_c
+ 	outb(ATML_STATUS_ABORT, chip->vendor->base + 1);
+ }
+ 
++static u8 tpm_atml_status(struct tpm_chip *chip)
++{
++	return inb(chip->vendor->base + 1);
++}
++
+ static struct file_operations atmel_ops = {
+ 	.owner = THIS_MODULE,
+ 	.llseek = no_llseek,
+@@ -121,13 +132,30 @@ static struct file_operations atmel_ops 
+ 	.release = tpm_release,
+ };
+ 
++static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
++static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
++static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
++static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
++
++static struct attribute* atmel_attrs[] = {
++	&dev_attr_pubek.attr,
++	&dev_attr_pcrs.attr,
++	&dev_attr_caps.attr,
++	&dev_attr_cancel.attr,
++	0,
++};
++
++static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs };
++
+ static struct tpm_vendor_specific tpm_atmel = {
+ 	.recv = tpm_atml_recv,
+ 	.send = tpm_atml_send,
+ 	.cancel = tpm_atml_cancel,
++	.status = tpm_atml_status,
+ 	.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
+ 	.req_complete_val = ATML_STATUS_DATA_AVAIL,
+-	.base = TPM_ATML_BASE,
++	.req_canceled = ATML_STATUS_READY,
++	.attr_group = &atmel_attr_grp,
+ 	.miscdev = { .fops = &atmel_ops, },
+ };
+ 
+@@ -136,34 +164,36 @@ static int __devinit tpm_atml_init(struc
+ {
+ 	u8 version[4];
+ 	int rc = 0;
++	int lo, hi;
+ 
+ 	if (pci_enable_device(pci_dev))
+ 		return -EIO;
+ 
+-	if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
+-		rc = -ENODEV;
+-		goto out_err;
+-	}
++	lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
++	hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
++
++	tpm_atmel.base = (hi<<8)|lo;
++	dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
+ 
+ 	/* verify that it is an Atmel part */
+-	if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
+-	    || tpm_read_index(6) != 'M' || tpm_read_index(7) != 'L') {
++	if (tpm_read_index(TPM_ADDR, 4) != 'A' || tpm_read_index(TPM_ADDR, 5) != 'T'
++	    || tpm_read_index(TPM_ADDR, 6) != 'M' || tpm_read_index(TPM_ADDR, 7) != 'L') {
+ 		rc = -ENODEV;
+ 		goto out_err;
+ 	}
+ 
+ 	/* query chip for its version number */
+-	if ((version[0] = tpm_read_index(0x00)) != 0xFF) {
+-		version[1] = tpm_read_index(0x01);
+-		version[2] = tpm_read_index(0x02);
+-		version[3] = tpm_read_index(0x03);
++	if ((version[0] = tpm_read_index(TPM_ADDR, 0x00)) != 0xFF) {
++		version[1] = tpm_read_index(TPM_ADDR, 0x01);
++		version[2] = tpm_read_index(TPM_ADDR, 0x02);
++		version[3] = tpm_read_index(TPM_ADDR, 0x03);
+ 	} else {
+ 		dev_info(&pci_dev->dev, "version query failed\n");
+ 		rc = -ENODEV;
+ 		goto out_err;
+ 	}
+ 
+-	if ((rc = tpm_register_hardware(pci_dev, &tpm_atmel)) < 0)
++	if ((rc = tpm_register_hardware(&pci_dev->dev, &tpm_atmel)) < 0)
+ 		goto out_err;
+ 
+ 	dev_info(&pci_dev->dev,
+@@ -176,13 +206,30 @@ out_err:
+ 	return rc;
+ }
+ 
++static void __devexit tpm_atml_remove(struct pci_dev *pci_dev) 
++{
++	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
++
++	if ( chip )
++		tpm_remove_hardware(chip->dev);
++}
++
+ static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0)},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1)},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)},
++#ifndef PCI_DEVICE_ID_SERVERWORKS_CSB6LPC
++#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
++#else
++#warning Remove the define of PCI_DEVICE_ID_SERVERWORKS_CSB6LPC
++#endif
++	{PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6LPC)},
+ 	{0,}
+ };
+ 
+@@ -192,7 +239,7 @@ static struct pci_driver atmel_pci_drive
+ 	.name = "tpm_atmel",
+ 	.id_table = tpm_pci_tbl,
+ 	.probe = tpm_atml_init,
+-	.remove = __devexit_p(tpm_remove),
++	.remove = __devexit_p(tpm_atml_remove),
+ 	.suspend = tpm_pm_suspend,
+ 	.resume = tpm_pm_resume,
+ };
+@@ -207,7 +254,7 @@ static void __exit cleanup_atmel(void)
+ 	pci_unregister_driver(&atmel_pci_driver);
+ }
+ 
+-module_init(init_atmel);
++fs_initcall(init_atmel);
+ module_exit(cleanup_atmel);
+ 
+ MODULE_AUTHOR("Leendert van Doorn (leendert at watson.ibm.com)");
+diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/tpm.c linux-2.6.12-xen/drivers/char/tpm/tpm.c
+--- pristine-linux-2.6.12/drivers/char/tpm/tpm.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/char/tpm/tpm.c	2006-03-05 23:36:31.000000000 +0100
+@@ -19,7 +19,7 @@
+  * 
+  * Note, the TPM chip is not interrupt driven (only polling)
+  * and can have very long timeouts (minutes!). Hence the unusual
+- * calls to schedule_timeout.
++ * calls to msleep.
+  *
+  */
+ 
+@@ -28,19 +28,20 @@
+ #include <linux/spinlock.h>
+ #include "tpm.h"
+ 
+-#define	TPM_MINOR			224	/* officially assigned */
++#define TPM_CHIP_NUM_MASK	0x0000ffff
++#define TPM_CHIP_TYPE_SHIFT	16	
+ 
+-#define	TPM_BUFSIZE			2048
+-
+-/* PCI configuration addresses */
+-#define	PCI_GEN_PMCON_1			0xA0
+-#define	PCI_GEN1_DEC			0xE4
+-#define	PCI_LPC_EN			0xE6
+-#define	PCI_GEN2_DEC			0xEC
++enum tpm_const {
++	TPM_MINOR = 224,	/* officially assigned */
++	TPM_MIN_BUFSIZE = 2048,
++	TPM_MAX_BUFSIZE = 64 * 1024,
++	TPM_NUM_DEVICES = 256,
++	TPM_NUM_MASK_ENTRIES = TPM_NUM_DEVICES / (8 * sizeof(int))
++};
+ 
+ static LIST_HEAD(tpm_chip_list);
+ static DEFINE_SPINLOCK(driver_lock);
+-static int dev_mask[32];
++static int dev_mask[TPM_NUM_MASK_ENTRIES];
+ 
+ static void user_reader_timeout(unsigned long ptr)
+ {
+@@ -48,154 +49,81 @@ static void user_reader_timeout(unsigned
+ 
+ 	down(&chip->buffer_mutex);
+ 	atomic_set(&chip->data_pending, 0);
+-	memset(chip->data_buffer, 0, TPM_BUFSIZE);
++	memset(chip->data_buffer, 0, chip->vendor->buffersize);
+ 	up(&chip->buffer_mutex);
+ }
+ 
+-void tpm_time_expired(unsigned long ptr)
+-{
+-	int *exp = (int *) ptr;
+-	*exp = 1;
+-}
+-
+-EXPORT_SYMBOL_GPL(tpm_time_expired);
+-
+-/*
+- * Initialize the LPC bus and enable the TPM ports
+- */
+-int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
+-{
+-	u32 lpcenable, tmp;
+-	int is_lpcm = 0;
+-
+-	switch (pci_dev->vendor) {
+-	case PCI_VENDOR_ID_INTEL:
+-		switch (pci_dev->device) {
+-		case PCI_DEVICE_ID_INTEL_82801CA_12:
+-		case PCI_DEVICE_ID_INTEL_82801DB_12:
+-			is_lpcm = 1;
+-			break;
+-		}
+-		/* init ICH (enable LPC) */
+-		pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
+-		lpcenable |= 0x20000000;
+-		pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
+-
+-		if (is_lpcm) {
+-			pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
+-					      &lpcenable);
+-			if ((lpcenable & 0x20000000) == 0) {
+-				dev_err(&pci_dev->dev,
+-					"cannot enable LPC\n");
+-				return -ENODEV;
+-			}
+-		}
+-
+-		/* initialize TPM registers */
+-		pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
+-
+-		if (!is_lpcm)
+-			tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
+-		else
+-			tmp =
+-			    (tmp & 0xFFFF0000) | (base & 0xFFF0) |
+-			    0x00000001;
+-
+-		pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
+-
+-		if (is_lpcm) {
+-			pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
+-					      &tmp);
+-			tmp |= 0x00000004;	/* enable CLKRUN */
+-			pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
+-					       tmp);
+-		}
+-		tpm_write_index(0x0D, 0x55);	/* unlock 4F */
+-		tpm_write_index(0x0A, 0x00);	/* int disable */
+-		tpm_write_index(0x08, base);	/* base addr lo */
+-		tpm_write_index(0x09, (base & 0xFF00) >> 8);	/* base addr hi */
+-		tpm_write_index(0x0D, 0xAA);	/* lock 4F */
+-		break;
+-	case PCI_VENDOR_ID_AMD:
+-		/* nothing yet */
+-		break;
+-	}
+-
+-	return 0;
+-}
+-
+-EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
+-
+ /*
+  * Internal kernel interface to transmit TPM commands
+  */
+-static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
++static ssize_t tpm_transmit(struct tpm_chip * chip, const char *buf,
+ 			    size_t bufsiz)
+ {
+-	ssize_t len;
++	ssize_t rc;
+ 	u32 count;
+-	__be32 *native_size;
++	unsigned long stop;
++
++	if (!chip)
++		return -ENODEV;
++
++	if ( !chip )
++		return -ENODEV;
+ 
+-	native_size = (__force __be32 *) (buf + 2);
+-	count = be32_to_cpu(*native_size);
++	count = be32_to_cpu(*((__be32 *) (buf + 2)));
+ 
+ 	if (count == 0)
+ 		return -ENODATA;
+ 	if (count > bufsiz) {
+-		dev_err(&chip->pci_dev->dev,
++		dev_err(chip->dev,
+ 			"invalid count value %x %zx \n", count, bufsiz);
+ 		return -E2BIG;
+ 	}
+ 
+ 	down(&chip->tpm_mutex);
+ 
+-	if ((len = chip->vendor->send(chip, (u8 *) buf, count)) < 0) {
+-		dev_err(&chip->pci_dev->dev,
+-			"tpm_transmit: tpm_send: error %zd\n", len);
+-		return len;
++	if ((rc = chip->vendor->send(chip, (u8 *) buf, count)) < 0) {
++		dev_err(chip->dev,
++			"tpm_transmit: tpm_send: error %zd\n", rc);
++		goto out;
+ 	}
+ 
+-	down(&chip->timer_manipulation_mutex);
+-	chip->time_expired = 0;
+-	init_timer(&chip->device_timer);
+-	chip->device_timer.function = tpm_time_expired;
+-	chip->device_timer.expires = jiffies + 2 * 60 * HZ;
+-	chip->device_timer.data = (unsigned long) &chip->time_expired;
+-	add_timer(&chip->device_timer);
+-	up(&chip->timer_manipulation_mutex);
+-
++	stop = jiffies + 2 * 60 * HZ;
+ 	do {
+-		u8 status = inb(chip->vendor->base + 1);
++		u8 status = chip->vendor->status(chip);
+ 		if ((status & chip->vendor->req_complete_mask) ==
+ 		    chip->vendor->req_complete_val) {
+-			down(&chip->timer_manipulation_mutex);
+-			del_singleshot_timer_sync(&chip->device_timer);
+-			up(&chip->timer_manipulation_mutex);
+ 			goto out_recv;
+ 		}
+-		set_current_state(TASK_UNINTERRUPTIBLE);
+-		schedule_timeout(TPM_TIMEOUT);
++
++		if ((status == chip->vendor->req_canceled)) {
++			dev_err(chip->dev, "Operation Canceled\n");
++			rc = -ECANCELED;
++			goto out;
++		}
++
++		msleep(TPM_TIMEOUT);	/* CHECK */
+ 		rmb();
+-	} while (!chip->time_expired);
++	} while (time_before(jiffies, stop));
+ 
+ 
+ 	chip->vendor->cancel(chip);
+-	dev_err(&chip->pci_dev->dev, "Time expired\n");
+-	up(&chip->tpm_mutex);
+-	return -EIO;
++	dev_err(chip->dev, "Operation Timed out\n");
++	rc = -ETIME;
++	goto out;
+ 
+ out_recv:
+-	len = chip->vendor->recv(chip, (u8 *) buf, bufsiz);
+-	if (len < 0)
+-		dev_err(&chip->pci_dev->dev,
+-			"tpm_transmit: tpm_recv: error %zd\n", len);
++	rc = chip->vendor->recv(chip, (u8 *) buf, bufsiz);
++	if (rc < 0)
++		dev_err(chip->dev,
++			"tpm_transmit: tpm_recv: error %zd\n", rc);
++out:
+ 	up(&chip->tpm_mutex);
+-	return len;
++	return rc;
+ }
+ 
+ #define TPM_DIGEST_SIZE 20
+ #define CAP_PCR_RESULT_SIZE 18
+-static u8 cap_pcr[] = {
++static const u8 cap_pcr[] = {
+ 	0, 193,			/* TPM_TAG_RQU_COMMAND */
+ 	0, 0, 0, 22,		/* length */
+ 	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
+@@ -205,75 +133,102 @@ static u8 cap_pcr[] = {
+ };
+ 
+ #define READ_PCR_RESULT_SIZE 30
+-static u8 pcrread[] = {
++static const u8 pcrread[] = {
+ 	0, 193,			/* TPM_TAG_RQU_COMMAND */
+ 	0, 0, 0, 14,		/* length */
+ 	0, 0, 0, 21,		/* TPM_ORD_PcrRead */
+ 	0, 0, 0, 0		/* PCR index */
+ };
+ 
+-static ssize_t show_pcrs(struct device *dev, char *buf)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
++		      char *buf)
++#else
++ssize_t tpm_show_pcrs(struct device *dev,
++		      char *buf)
++#endif
+ {
+ 	u8 data[READ_PCR_RESULT_SIZE];
+ 	ssize_t len;
+-	int i, j, index, num_pcrs;
++	int i, j, num_pcrs;
++	__be32 index;
+ 	char *str = buf;
+ 
+-	struct tpm_chip *chip =
+-	    pci_get_drvdata(container_of(dev, struct pci_dev, dev));
++	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 	if (chip == NULL)
+ 		return -ENODEV;
+ 
+ 	memcpy(data, cap_pcr, sizeof(cap_pcr));
+ 	if ((len = tpm_transmit(chip, data, sizeof(data)))
+-	    < CAP_PCR_RESULT_SIZE)
+-		return len;
++	    < CAP_PCR_RESULT_SIZE) {
++		dev_dbg(chip->dev, "A TPM error (%d) occurred "
++				"attempting to determine the number of PCRS\n",
++			be32_to_cpu(*((__be32 *) (data + 6))));
++		return 0;
++	}
+ 
+-	num_pcrs = be32_to_cpu(*((__force __be32 *) (data + 14)));
++	num_pcrs = be32_to_cpu(*((__be32 *) (data + 14)));
+ 
+ 	for (i = 0; i < num_pcrs; i++) {
+ 		memcpy(data, pcrread, sizeof(pcrread));
+ 		index = cpu_to_be32(i);
+ 		memcpy(data + 10, &index, 4);
+ 		if ((len = tpm_transmit(chip, data, sizeof(data)))
+-		    < READ_PCR_RESULT_SIZE)
+-			return len;
++		    < READ_PCR_RESULT_SIZE){
++			dev_dbg(chip->dev, "A TPM error (%d) occurred"
++				" attempting to read PCR %d of %d\n",
++				be32_to_cpu(*((__be32 *) (data + 6))), i, num_pcrs);
++			goto out;
++		}
+ 		str += sprintf(str, "PCR-%02d: ", i);
+ 		for (j = 0; j < TPM_DIGEST_SIZE; j++)
+ 			str += sprintf(str, "%02X ", *(data + 10 + j));
+ 		str += sprintf(str, "\n");
+ 	}
++out:
+ 	return str - buf;
+ }
+-
+-static DEVICE_ATTR(pcrs, S_IRUGO, show_pcrs, NULL);
++EXPORT_SYMBOL_GPL(tpm_show_pcrs);
+ 
+ #define  READ_PUBEK_RESULT_SIZE 314
+-static u8 readpubek[] = {
++static const u8 readpubek[] = {
+ 	0, 193,			/* TPM_TAG_RQU_COMMAND */
+ 	0, 0, 0, 30,		/* length */
+ 	0, 0, 0, 124,		/* TPM_ORD_ReadPubek */
+ };
+ 
+-static ssize_t show_pubek(struct device *dev, char *buf)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
++		       char *buf)
++#else
++ssize_t tpm_show_pubek(struct device *dev,
++		       char *buf)
++#endif
+ {
+-	u8 data[READ_PUBEK_RESULT_SIZE];
++	u8 *data;
+ 	ssize_t len;
+-	__be32 *native_val;
+-	int i;
++	int i, rc;
+ 	char *str = buf;
+ 
+-	struct tpm_chip *chip =
+-	    pci_get_drvdata(container_of(dev, struct pci_dev, dev));
++	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 	if (chip == NULL)
+ 		return -ENODEV;
+ 
++	data = kmalloc(READ_PUBEK_RESULT_SIZE, GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
++
+ 	memcpy(data, readpubek, sizeof(readpubek));
+ 	memset(data + sizeof(readpubek), 0, 20);	/* zero nonce */
+ 
+-	if ((len = tpm_transmit(chip, data, sizeof(data))) <
+-	    READ_PUBEK_RESULT_SIZE)
+-		return len;
++	if ((len = tpm_transmit(chip, data, READ_PUBEK_RESULT_SIZE)) <
++	    READ_PUBEK_RESULT_SIZE) {
++		dev_dbg(chip->dev, "A TPM error (%d) occurred "
++				"attempting to read the PUBEK\n",
++			    be32_to_cpu(*((__be32 *) (data + 6))));
++		rc = 0;
++		goto out;
++	}
+ 
+ 	/* 
+ 	   ignore header 10 bytes
+@@ -286,8 +241,6 @@ static ssize_t show_pubek(struct device 
+ 	   ignore checksum 20 bytes
+ 	 */
+ 
+-	native_val = (__force __be32 *) (data + 34);
+-
+ 	str +=
+ 	    sprintf(str,
+ 		    "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
+@@ -298,21 +251,23 @@ static ssize_t show_pubek(struct device 
+ 		    data[15], data[16], data[17], data[22], data[23],
+ 		    data[24], data[25], data[26], data[27], data[28],
+ 		    data[29], data[30], data[31], data[32], data[33],
+-		    be32_to_cpu(*native_val)
+-	    );
++		    be32_to_cpu(*((__be32 *) (data + 34))));
+ 
+ 	for (i = 0; i < 256; i++) {
+-		str += sprintf(str, "%02X ", data[i + 39]);
++		str += sprintf(str, "%02X ", data[i + 38]);
+ 		if ((i + 1) % 16 == 0)
+ 			str += sprintf(str, "\n");
+ 	}
+-	return str - buf;
++	rc = str - buf;
++out:
++	kfree(data);
++	return rc;
+ }
+ 
+-static DEVICE_ATTR(pubek, S_IRUGO, show_pubek, NULL);
++EXPORT_SYMBOL_GPL(tpm_show_pubek);
+ 
+ #define CAP_VER_RESULT_SIZE 18
+-static u8 cap_version[] = {
++static const u8 cap_version[] = {
+ 	0, 193,			/* TPM_TAG_RQU_COMMAND */
+ 	0, 0, 0, 18,		/* length */
+ 	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
+@@ -321,7 +276,7 @@ static u8 cap_version[] = {
+ };
+ 
+ #define CAP_MANUFACTURER_RESULT_SIZE 18
+-static u8 cap_manufacturer[] = {
++static const u8 cap_manufacturer[] = {
+ 	0, 193,			/* TPM_TAG_RQU_COMMAND */
+ 	0, 0, 0, 22,		/* length */
+ 	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
+@@ -330,14 +285,19 @@ static u8 cap_manufacturer[] = {
+ 	0, 0, 1, 3
+ };
+ 
+-static ssize_t show_caps(struct device *dev, char *buf)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
++		      char *buf)
++#else
++ssize_t tpm_show_caps(struct device *dev,
++		      char *buf)
++#endif
+ {
+-	u8 data[READ_PUBEK_RESULT_SIZE];
++	u8 data[sizeof(cap_manufacturer)];
+ 	ssize_t len;
+ 	char *str = buf;
+ 
+-	struct tpm_chip *chip =
+-	    pci_get_drvdata(container_of(dev, struct pci_dev, dev));
++	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 	if (chip == NULL)
+ 		return -ENODEV;
+ 
+@@ -348,7 +308,7 @@ static ssize_t show_caps(struct device *
+ 		return len;
+ 
+ 	str += sprintf(str, "Manufacturer: 0x%x\n",
+-		       be32_to_cpu(*(data + 14)));
++		       be32_to_cpu(*((__be32 *) (data + 14))));
+ 
+ 	memcpy(data, cap_version, sizeof(cap_version));
+ 
+@@ -363,8 +323,25 @@ static ssize_t show_caps(struct device *
+ 
+ 	return str - buf;
+ }
++EXPORT_SYMBOL_GPL(tpm_show_caps);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
++			const char *buf, size_t count)
++#else
++ssize_t tpm_store_cancel(struct device *dev,
++			const char *buf, size_t count)
++#endif
++{
++	struct tpm_chip *chip = dev_get_drvdata(dev);
++	if (chip == NULL)
++		return 0;
++
++	chip->vendor->cancel(chip);
++	return count;
++}
++EXPORT_SYMBOL_GPL(tpm_store_cancel);
+ 
+-static DEVICE_ATTR(caps, S_IRUGO, show_caps, NULL);
+ 
+ /*
+  * Device file system interface to the TPM
+@@ -389,21 +366,21 @@ int tpm_open(struct inode *inode, struct
+ 	}
+ 
+ 	if (chip->num_opens) {
+-		dev_dbg(&chip->pci_dev->dev,
++		dev_dbg(chip->dev,
+ 			"Another process owns this TPM\n");
+ 		rc = -EBUSY;
+ 		goto err_out;
+ 	}
+ 
+ 	chip->num_opens++;
+-	pci_dev_get(chip->pci_dev);
++	get_device(chip->dev);
+ 
+ 	spin_unlock(&driver_lock);
+ 
+-	chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
++	chip->data_buffer = kmalloc(chip->vendor->buffersize * sizeof(u8), GFP_KERNEL);
+ 	if (chip->data_buffer == NULL) {
+ 		chip->num_opens--;
+-		pci_dev_put(chip->pci_dev);
++		put_device(chip->dev);
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -422,24 +399,15 @@ EXPORT_SYMBOL_GPL(tpm_open);
+ int tpm_release(struct inode *inode, struct file *file)
+ {
+ 	struct tpm_chip *chip = file->private_data;
+-	
+-	file->private_data = NULL;
+ 
+ 	spin_lock(&driver_lock);
++	file->private_data = NULL;
+ 	chip->num_opens--;
+-	spin_unlock(&driver_lock);
+-
+-	down(&chip->timer_manipulation_mutex);
+-	if (timer_pending(&chip->user_read_timer))
+-		del_singleshot_timer_sync(&chip->user_read_timer);
+-	else if (timer_pending(&chip->device_timer))
+-		del_singleshot_timer_sync(&chip->device_timer);
+-	up(&chip->timer_manipulation_mutex);
+-
+-	kfree(chip->data_buffer);
++	del_singleshot_timer_sync(&chip->user_read_timer);
+ 	atomic_set(&chip->data_pending, 0);
+-
+-	pci_dev_put(chip->pci_dev);
++	put_device(chip->dev);
++	kfree(chip->data_buffer);
++	spin_unlock(&driver_lock);
+ 	return 0;
+ }
+ 
+@@ -453,15 +421,13 @@ ssize_t tpm_write(struct file * file, co
+ 
+ 	/* cannot perform a write until the read has cleared
+ 	   either via tpm_read or a user_read_timer timeout */
+-	while (atomic_read(&chip->data_pending) != 0) {
+-		set_current_state(TASK_UNINTERRUPTIBLE);
+-		schedule_timeout(TPM_TIMEOUT);
+-	}
++	while (atomic_read(&chip->data_pending) != 0)
++		msleep(TPM_TIMEOUT);
+ 
+ 	down(&chip->buffer_mutex);
+ 
+-	if (in_size > TPM_BUFSIZE)
+-		in_size = TPM_BUFSIZE;
++	if (in_size > chip->vendor->buffersize)
++		in_size = chip->vendor->buffersize;
+ 
+ 	if (copy_from_user
+ 	    (chip->data_buffer, (void __user *) buf, in_size)) {
+@@ -470,19 +436,15 @@ ssize_t tpm_write(struct file * file, co
+ 	}
+ 
+ 	/* atomic tpm command send and result receive */
+-	out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
++	out_size = tpm_transmit(chip, chip->data_buffer, 
++	                        chip->vendor->buffersize);
+ 
+ 	atomic_set(&chip->data_pending, out_size);
++	atomic_set(&chip->data_position, 0);
+ 	up(&chip->buffer_mutex);
+ 
+ 	/* Set a timeout by which the reader must come claim the result */
+-	down(&chip->timer_manipulation_mutex);
+-	init_timer(&chip->user_read_timer);
+-	chip->user_read_timer.function = user_reader_timeout;
+-	chip->user_read_timer.data = (unsigned long) chip;
+-	chip->user_read_timer.expires = jiffies + (60 * HZ);
+-	add_timer(&chip->user_read_timer);
+-	up(&chip->timer_manipulation_mutex);
++	mod_timer(&chip->user_read_timer, jiffies + (60 * HZ));
+ 
+ 	return in_size;
+ }
+@@ -493,43 +455,46 @@ ssize_t tpm_read(struct file * file, cha
+ 		 size_t size, loff_t * off)
+ {
+ 	struct tpm_chip *chip = file->private_data;
+-	int ret_size = -ENODATA;
+-
+-	if (atomic_read(&chip->data_pending) != 0) {	/* Result available */
+-		down(&chip->timer_manipulation_mutex);
+-		del_singleshot_timer_sync(&chip->user_read_timer);
+-		up(&chip->timer_manipulation_mutex);
++	int ret_size;
++	int pos, pending = 0;
+ 
+-		down(&chip->buffer_mutex);
++	ret_size = atomic_read(&chip->data_pending);
++	if (ret_size > 0) {	/* relay data */
++		if (size < ret_size)
++			ret_size = size;
+ 
+-		ret_size = atomic_read(&chip->data_pending);
+-		atomic_set(&chip->data_pending, 0);
++		pos = atomic_read(&chip->data_position);
+ 
+-		if (ret_size == 0)	/* timeout just occurred */
+-			ret_size = -ETIME;
+-		else if (ret_size > 0) {	/* relay data */
+-			if (size < ret_size)
+-				ret_size = size;
+-
+-			if (copy_to_user((void __user *) buf,
+-					 chip->data_buffer, ret_size)) {
+-				ret_size = -EFAULT;
++		down(&chip->buffer_mutex);
++		if (copy_to_user
++		    ((void __user *) buf, &chip->data_buffer[pos], ret_size)) {
++			ret_size = -EFAULT;
++		} else {
++			pending = atomic_read(&chip->data_pending) - ret_size;
++			if ( pending ) {
++				atomic_set( &chip->data_pending, pending );
++				atomic_set( &chip->data_position, pos+ret_size );
+ 			}
+ 		}
+ 		up(&chip->buffer_mutex);
+ 	}
++	
++	if ( ret_size <= 0 || pending == 0 ) {
++		atomic_set( &chip->data_pending, 0 );
++		del_singleshot_timer_sync(&chip->user_read_timer);
++	}
+ 
+ 	return ret_size;
+ }
+ 
+ EXPORT_SYMBOL_GPL(tpm_read);
+ 
+-void __devexit tpm_remove(struct pci_dev *pci_dev)
++void tpm_remove_hardware(struct device *dev)
+ {
+-	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
++	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 
+ 	if (chip == NULL) {
+-		dev_err(&pci_dev->dev, "No device data found\n");
++		dev_err(dev, "No device data found\n");
+ 		return;
+ 	}
+ 
+@@ -539,23 +504,20 @@ void __devexit tpm_remove(struct pci_dev
+ 
+ 	spin_unlock(&driver_lock);
+ 
+-	pci_set_drvdata(pci_dev, NULL);
++	dev_set_drvdata(dev, NULL);
+ 	misc_deregister(&chip->vendor->miscdev);
++	kfree(chip->vendor->miscdev.name);
+ 
+-	device_remove_file(&pci_dev->dev, &dev_attr_pubek);
+-	device_remove_file(&pci_dev->dev, &dev_attr_pcrs);
+-	device_remove_file(&pci_dev->dev, &dev_attr_caps);
+-
+-	pci_disable_device(pci_dev);
++	sysfs_remove_group(&dev->kobj, chip->vendor->attr_group);
+ 
+-	dev_mask[chip->dev_num / 32] &= !(1 << (chip->dev_num % 32));
++	dev_mask[chip->dev_num / TPM_NUM_MASK_ENTRIES ] &= !(1 << (chip->dev_num % TPM_NUM_MASK_ENTRIES));
+ 
+ 	kfree(chip);
+ 
+-	pci_dev_put(pci_dev);
++	put_device(dev);
+ }
+ 
+-EXPORT_SYMBOL_GPL(tpm_remove);
++EXPORT_SYMBOL_GPL(tpm_remove_hardware);
+ 
+ static u8 savestate[] = {
+ 	0, 193,			/* TPM_TAG_RQU_COMMAND */
+@@ -590,10 +552,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
+ 	if (chip == NULL)
+ 		return -ENODEV;
+ 
+-	spin_lock(&driver_lock);
+-	tpm_lpc_bus_init(pci_dev, chip->vendor->base);
+-	spin_unlock(&driver_lock);
+-
+ 	return 0;
+ }
+ 
+@@ -606,10 +564,12 @@ EXPORT_SYMBOL_GPL(tpm_pm_resume);
+  * upon errant exit from this function specific probe function should call
+  * pci_disable_device
+  */
+-int tpm_register_hardware(struct pci_dev *pci_dev,
++int tpm_register_hardware(struct device *dev,
+ 			  struct tpm_vendor_specific *entry)
+ {
+-	char devname[7];
++#define DEVNAME_SIZE 7
++
++	char *devname;
+ 	struct tpm_chip *chip;
+ 	int i, j;
+ 
+@@ -622,24 +582,34 @@ int tpm_register_hardware(struct pci_dev
+ 
+ 	init_MUTEX(&chip->buffer_mutex);
+ 	init_MUTEX(&chip->tpm_mutex);
+-	init_MUTEX(&chip->timer_manipulation_mutex);
+ 	INIT_LIST_HEAD(&chip->list);
+ 
++	init_timer(&chip->user_read_timer);
++	chip->user_read_timer.function = user_reader_timeout;
++	chip->user_read_timer.data = (unsigned long) chip;
++
+ 	chip->vendor = entry;
++	
++	if (entry->buffersize < TPM_MIN_BUFSIZE) {
++		entry->buffersize = TPM_MIN_BUFSIZE;
++	} else if (entry->buffersize > TPM_MAX_BUFSIZE) {
++		entry->buffersize = TPM_MAX_BUFSIZE;
++	}
+ 
+ 	chip->dev_num = -1;
+ 
+-	for (i = 0; i < 32; i++)
+-		for (j = 0; j < 8; j++)
++	for (i = 0; i < TPM_NUM_MASK_ENTRIES; i++)
++		for (j = 0; j < 8 * sizeof(int); j++)
+ 			if ((dev_mask[i] & (1 << j)) == 0) {
+-				chip->dev_num = i * 32 + j;
++				chip->dev_num =
++				    i * TPM_NUM_MASK_ENTRIES + j;
+ 				dev_mask[i] |= 1 << j;
+ 				goto dev_num_search_complete;
+ 			}
+ 
+ dev_num_search_complete:
+ 	if (chip->dev_num < 0) {
+-		dev_err(&pci_dev->dev,
++		dev_err(dev,
+ 			"No available tpm device numbers\n");
+ 		kfree(chip);
+ 		return -ENODEV;
+@@ -648,48 +618,38 @@ dev_num_search_complete:
+ 	else
+ 		chip->vendor->miscdev.minor = MISC_DYNAMIC_MINOR;
+ 
+-	snprintf(devname, sizeof(devname), "%s%d", "tpm", chip->dev_num);
++	devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
++	scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
+ 	chip->vendor->miscdev.name = devname;
+ 
+-	chip->vendor->miscdev.dev = &(pci_dev->dev);
+-	chip->pci_dev = pci_dev_get(pci_dev);
++	chip->vendor->miscdev.dev = dev;
++	chip->dev = get_device(dev);
+ 
+ 	if (misc_register(&chip->vendor->miscdev)) {
+-		dev_err(&chip->pci_dev->dev,
++		dev_err(chip->dev,
+ 			"unable to misc_register %s, minor %d\n",
+ 			chip->vendor->miscdev.name,
+ 			chip->vendor->miscdev.minor);
+-		pci_dev_put(pci_dev);
++		put_device(dev);
+ 		kfree(chip);
+ 		dev_mask[i] &= !(1 << j);
+ 		return -ENODEV;
+ 	}
+ 
+-	pci_set_drvdata(pci_dev, chip);
++	spin_lock(&driver_lock);
+ 
+-	list_add(&chip->list, &tpm_chip_list);
++	dev_set_drvdata(dev, chip);
+ 
+-	device_create_file(&pci_dev->dev, &dev_attr_pubek);
+-	device_create_file(&pci_dev->dev, &dev_attr_pcrs);
+-	device_create_file(&pci_dev->dev, &dev_attr_caps);
++	list_add(&chip->list, &tpm_chip_list);
+ 
+-	return 0;
+-}
++	spin_unlock(&driver_lock);
+ 
+-EXPORT_SYMBOL_GPL(tpm_register_hardware);
++	sysfs_create_group(&dev->kobj, chip->vendor->attr_group);
+ 
+-static int __init init_tpm(void)
+-{
+ 	return 0;
+ }
+ 
+-static void __exit cleanup_tpm(void)
+-{
+-
+-}
+-
+-module_init(init_tpm);
+-module_exit(cleanup_tpm);
++EXPORT_SYMBOL_GPL(tpm_register_hardware);
+ 
+ MODULE_AUTHOR("Leendert van Doorn (leendert at watson.ibm.com)");
+ MODULE_DESCRIPTION("TPM Driver");
+diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/tpm.h linux-2.6.12-xen/drivers/char/tpm/tpm.h
+--- pristine-linux-2.6.12/drivers/char/tpm/tpm.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/char/tpm/tpm.h	2006-03-05 23:36:31.000000000 +0100
+@@ -25,27 +25,56 @@
+ #include <linux/fs.h>
+ #include <linux/miscdevice.h>
+ 
+-#define TPM_TIMEOUT msecs_to_jiffies(5)
++enum tpm_timeout {
++	TPM_TIMEOUT = 5,	/* msecs */
++};
+ 
+ /* TPM addresses */
+-#define	TPM_ADDR			0x4E
+-#define	TPM_DATA			0x4F
++enum tpm_addr {
++	TPM_SUPERIO_ADDR = 0x2E,
++	TPM_ADDR = 0x4E,
++};
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
++extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
++				char *);
++extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
++				char *);
++extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr,
++				char *);
++extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr,
++				const char *, size_t);
++#else
++extern ssize_t tpm_show_pubek(struct device *,
++				char *);
++extern ssize_t tpm_show_pcrs(struct device *,
++				char *);
++extern ssize_t tpm_show_caps(struct device *,
++				char *);
++extern ssize_t tpm_store_cancel(struct device *,
++				const char *, size_t);
++#endif
+ 
+ struct tpm_chip;
+ 
+ struct tpm_vendor_specific {
+ 	u8 req_complete_mask;
+ 	u8 req_complete_val;
++	u8 req_canceled;
+ 	u16 base;		/* TPM base address */
++	int drv_type;
++	u32 buffersize;
+ 
+ 	int (*recv) (struct tpm_chip *, u8 *, size_t);
+ 	int (*send) (struct tpm_chip *, u8 *, size_t);
+ 	void (*cancel) (struct tpm_chip *);
++	u8 (*status) (struct tpm_chip *);
+ 	struct miscdevice miscdev;
++	struct attribute_group *attr_group;
+ };
+ 
+ struct tpm_chip {
+-	struct pci_dev *pci_dev;	/* PCI device stuff */
++	struct device *dev;	/* Device stuff */
+ 
+ 	int dev_num;		/* /dev/tpm# */
+ 	int num_opens;		/* only one allowed */
+@@ -54,40 +83,36 @@ struct tpm_chip {
+ 	/* Data passed to and from the tpm via the read/write calls */
+ 	u8 *data_buffer;
+ 	atomic_t data_pending;
++	atomic_t data_position;
+ 	struct semaphore buffer_mutex;
+ 
+ 	struct timer_list user_read_timer;	/* user needs to claim result */
+ 	struct semaphore tpm_mutex;	/* tpm is processing */
+-	struct timer_list device_timer;	/* tpm is processing */
+-	struct semaphore timer_manipulation_mutex;
+ 
+ 	struct tpm_vendor_specific *vendor;
+ 
+ 	struct list_head list;
+ };
+ 
+-static inline int tpm_read_index(int index)
++static inline int tpm_read_index(int base, int index)
+ {
+-	outb(index, TPM_ADDR);
+-	return inb(TPM_DATA) & 0xFF;
++	outb(index, base);
++	return inb(base+1) & 0xFF;
+ }
+ 
+-static inline void tpm_write_index(int index, int value)
++static inline void tpm_write_index(int base, int index, int value)
+ {
+-	outb(index, TPM_ADDR);
+-	outb(value & 0xFF, TPM_DATA);
++	outb(index, base);
++	outb(value & 0xFF, base+1);
+ }
+ 
+-extern void tpm_time_expired(unsigned long);
+-extern int tpm_lpc_bus_init(struct pci_dev *, u16);
+-
+-extern int tpm_register_hardware(struct pci_dev *,
++extern int tpm_register_hardware(struct device *,
+ 				 struct tpm_vendor_specific *);
+ extern int tpm_open(struct inode *, struct file *);
+ extern int tpm_release(struct inode *, struct file *);
+ extern ssize_t tpm_write(struct file *, const char __user *, size_t,
+ 			 loff_t *);
+ extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
+-extern void __devexit tpm_remove(struct pci_dev *);
++extern void tpm_remove_hardware(struct device *);
+ extern int tpm_pm_suspend(struct pci_dev *, pm_message_t);
+ extern int tpm_pm_resume(struct pci_dev *);
+diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/tpm_nsc.c linux-2.6.12-xen/drivers/char/tpm/tpm_nsc.c
+--- pristine-linux-2.6.12/drivers/char/tpm/tpm_nsc.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/char/tpm/tpm_nsc.c	2006-03-05 23:36:31.000000000 +0100
+@@ -22,43 +22,52 @@
+ #include "tpm.h"
+ 
+ /* National definitions */
+-#define	TPM_NSC_BASE			0x360
+-#define	TPM_NSC_IRQ			0x07
++enum tpm_nsc_addr{
++	TPM_NSC_IRQ = 0x07,
++	TPM_NSC_BASE0_HI = 0x60,
++	TPM_NSC_BASE0_LO = 0x61,
++	TPM_NSC_BASE1_HI = 0x62,
++	TPM_NSC_BASE1_LO = 0x63
++};
++
++enum tpm_nsc_index {
++	NSC_LDN_INDEX = 0x07,
++	NSC_SID_INDEX = 0x20,
++	NSC_LDC_INDEX = 0x30,
++	NSC_DIO_INDEX = 0x60,
++	NSC_CIO_INDEX = 0x62,
++	NSC_IRQ_INDEX = 0x70,
++	NSC_ITS_INDEX = 0x71
++};
+ 
+-#define	NSC_LDN_INDEX			0x07
+-#define	NSC_SID_INDEX			0x20
+-#define	NSC_LDC_INDEX			0x30
+-#define	NSC_DIO_INDEX			0x60
+-#define	NSC_CIO_INDEX			0x62
+-#define	NSC_IRQ_INDEX			0x70
+-#define	NSC_ITS_INDEX			0x71
+-
+-#define	NSC_STATUS			0x01
+-#define	NSC_COMMAND			0x01
+-#define	NSC_DATA			0x00
++enum tpm_nsc_status_loc {
++	NSC_STATUS = 0x01,
++	NSC_COMMAND = 0x01,
++	NSC_DATA = 0x00
++};
+ 
+ /* status bits */
+-#define	NSC_STATUS_OBF			0x01	/* output buffer full */
+-#define	NSC_STATUS_IBF			0x02	/* input buffer full */
+-#define	NSC_STATUS_F0			0x04	/* F0 */
+-#define	NSC_STATUS_A2			0x08	/* A2 */
+-#define	NSC_STATUS_RDY			0x10	/* ready to receive command */
+-#define	NSC_STATUS_IBR			0x20	/* ready to receive data */
++enum tpm_nsc_status {
++	NSC_STATUS_OBF = 0x01,	/* output buffer full */
++	NSC_STATUS_IBF = 0x02,	/* input buffer full */
++	NSC_STATUS_F0 = 0x04,	/* F0 */
++	NSC_STATUS_A2 = 0x08,	/* A2 */
++	NSC_STATUS_RDY = 0x10,	/* ready to receive command */
++	NSC_STATUS_IBR = 0x20	/* ready to receive data */
++};
+ 
+ /* command bits */
+-#define	NSC_COMMAND_NORMAL		0x01	/* normal mode */
+-#define	NSC_COMMAND_EOC			0x03
+-#define	NSC_COMMAND_CANCEL		0x22
+-
++enum tpm_nsc_cmd_mode {
++	NSC_COMMAND_NORMAL = 0x01,	/* normal mode */
++	NSC_COMMAND_EOC = 0x03,
++	NSC_COMMAND_CANCEL = 0x22
++};
+ /*
+  * Wait for a certain status to appear
+  */
+ static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
+ {
+-	int expired = 0;
+-	struct timer_list status_timer =
+-	    TIMER_INITIALIZER(tpm_time_expired, jiffies + 10 * HZ,
+-			      (unsigned long) &expired);
++	unsigned long stop;
+ 
+ 	/* status immediately available check */
+ 	*data = inb(chip->vendor->base + NSC_STATUS);
+@@ -66,17 +75,14 @@ static int wait_for_stat(struct tpm_chip
+ 		return 0;
+ 
+ 	/* wait for status */
+-	add_timer(&status_timer);
++	stop = jiffies + 10 * HZ;
+ 	do {
+-		set_current_state(TASK_UNINTERRUPTIBLE);
+-		schedule_timeout(TPM_TIMEOUT);
++		msleep(TPM_TIMEOUT);
+ 		*data = inb(chip->vendor->base + 1);
+-		if ((*data & mask) == val) {
+-			del_singleshot_timer_sync(&status_timer);
++		if ((*data & mask) == val)
+ 			return 0;
+-		}
+ 	}
+-	while (!expired);
++	while (time_before(jiffies, stop));
+ 
+ 	return -EBUSY;
+ }
+@@ -84,10 +90,7 @@ static int wait_for_stat(struct tpm_chip
+ static int nsc_wait_for_ready(struct tpm_chip *chip)
+ {
+ 	int status;
+-	int expired = 0;
+-	struct timer_list status_timer =
+-	    TIMER_INITIALIZER(tpm_time_expired, jiffies + 100,
+-			      (unsigned long) &expired);
++	unsigned long stop;
+ 
+ 	/* status immediately available check */
+ 	status = inb(chip->vendor->base + NSC_STATUS);
+@@ -97,21 +100,18 @@ static int nsc_wait_for_ready(struct tpm
+ 		return 0;
+ 
+ 	/* wait for status */
+-	add_timer(&status_timer);
++	stop = jiffies + 100;
+ 	do {
+-		set_current_state(TASK_UNINTERRUPTIBLE);
+-		schedule_timeout(TPM_TIMEOUT);
++		msleep(TPM_TIMEOUT);
+ 		status = inb(chip->vendor->base + NSC_STATUS);
+ 		if (status & NSC_STATUS_OBF)
+ 			status = inb(chip->vendor->base + NSC_DATA);
+-		if (status & NSC_STATUS_RDY) {
+-			del_singleshot_timer_sync(&status_timer);
++		if (status & NSC_STATUS_RDY)
+ 			return 0;
+-		}
+ 	}
+-	while (!expired);
++	while (time_before(jiffies, stop));
+ 
+-	dev_info(&chip->pci_dev->dev, "wait for ready failed\n");
++	dev_info(chip->dev, "wait for ready failed\n");
+ 	return -EBUSY;
+ }
+ 
+@@ -127,12 +127,12 @@ static int tpm_nsc_recv(struct tpm_chip 
+ 		return -EIO;
+ 
+ 	if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
+-		dev_err(&chip->pci_dev->dev, "F0 timeout\n");
++		dev_err(chip->dev, "F0 timeout\n");
+ 		return -EIO;
+ 	}
+ 	if ((data =
+ 	     inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
+-		dev_err(&chip->pci_dev->dev, "not in normal mode (0x%x)\n",
++		dev_err(chip->dev, "not in normal mode (0x%x)\n",
+ 			data);
+ 		return -EIO;
+ 	}
+@@ -141,7 +141,7 @@ static int tpm_nsc_recv(struct tpm_chip 
+ 	for (p = buffer; p < &buffer[count]; p++) {
+ 		if (wait_for_stat
+ 		    (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
+-			dev_err(&chip->pci_dev->dev,
++			dev_err(chip->dev,
+ 				"OBF timeout (while reading data)\n");
+ 			return -EIO;
+ 		}
+@@ -150,12 +150,13 @@ static int tpm_nsc_recv(struct tpm_chip 
+ 		*p = inb(chip->vendor->base + NSC_DATA);
+ 	}
+ 
+-	if ((data & NSC_STATUS_F0) == 0) {
+-		dev_err(&chip->pci_dev->dev, "F0 not set\n");
++	if ((data & NSC_STATUS_F0) == 0 &&
++	(wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
++		dev_err(chip->dev, "F0 not set\n");
+ 		return -EIO;
+ 	}
+ 	if ((data = inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_EOC) {
+-		dev_err(&chip->pci_dev->dev,
++		dev_err(chip->dev,
+ 			"expected end of command(0x%x)\n", data);
+ 		return -EIO;
+ 	}
+@@ -186,19 +187,19 @@ static int tpm_nsc_send(struct tpm_chip 
+ 		return -EIO;
+ 
+ 	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+-		dev_err(&chip->pci_dev->dev, "IBF timeout\n");
++		dev_err(chip->dev, "IBF timeout\n");
+ 		return -EIO;
+ 	}
+ 
+ 	outb(NSC_COMMAND_NORMAL, chip->vendor->base + NSC_COMMAND);
+ 	if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
+-		dev_err(&chip->pci_dev->dev, "IBR timeout\n");
++		dev_err(chip->dev, "IBR timeout\n");
+ 		return -EIO;
+ 	}
+ 
+ 	for (i = 0; i < count; i++) {
+ 		if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+-			dev_err(&chip->pci_dev->dev,
++			dev_err(chip->dev,
+ 				"IBF timeout (while writing data)\n");
+ 			return -EIO;
+ 		}
+@@ -206,7 +207,7 @@ static int tpm_nsc_send(struct tpm_chip 
+ 	}
+ 
+ 	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
+-		dev_err(&chip->pci_dev->dev, "IBF timeout\n");
++		dev_err(chip->dev, "IBF timeout\n");
+ 		return -EIO;
+ 	}
+ 	outb(NSC_COMMAND_EOC, chip->vendor->base + NSC_COMMAND);
+@@ -219,6 +220,11 @@ static void tpm_nsc_cancel(struct tpm_ch
+ 	outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
+ }
+ 
++static u8 tpm_nsc_status(struct tpm_chip *chip)
++{
++	return inb(chip->vendor->base + NSC_STATUS);
++}
++
+ static struct file_operations nsc_ops = {
+ 	.owner = THIS_MODULE,
+ 	.llseek = no_llseek,
+@@ -228,102 +234,98 @@ static struct file_operations nsc_ops = 
+ 	.release = tpm_release,
+ };
+ 
++static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
++static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
++static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
++static DEVICE_ATTR(cancel, S_IWUSR|S_IWGRP, NULL, tpm_store_cancel);
++
++static struct attribute * nsc_attrs[] = {
++	&dev_attr_pubek.attr,
++	&dev_attr_pcrs.attr,
++	&dev_attr_caps.attr,
++	&dev_attr_cancel.attr,
++	0,
++};
++
++static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs };
++
+ static struct tpm_vendor_specific tpm_nsc = {
+ 	.recv = tpm_nsc_recv,
+ 	.send = tpm_nsc_send,
+ 	.cancel = tpm_nsc_cancel,
++	.status = tpm_nsc_status,
+ 	.req_complete_mask = NSC_STATUS_OBF,
+ 	.req_complete_val = NSC_STATUS_OBF,
+-	.base = TPM_NSC_BASE,
++	.req_canceled = NSC_STATUS_RDY,
++	.attr_group = &nsc_attr_grp,
+ 	.miscdev = { .fops = &nsc_ops, },
+-	
+ };
+ 
+ static int __devinit tpm_nsc_init(struct pci_dev *pci_dev,
+ 				  const struct pci_device_id *pci_id)
+ {
+ 	int rc = 0;
++	int lo, hi;
++	int nscAddrBase = TPM_ADDR;
++
+ 
+ 	if (pci_enable_device(pci_dev))
+ 		return -EIO;
+ 
+-	if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
+-		rc = -ENODEV;
+-		goto out_err;
+-	}
++	/* select PM channel 1 */
++	tpm_write_index(nscAddrBase,NSC_LDN_INDEX, 0x12);
+ 
+ 	/* verify that it is a National part (SID) */
+-	if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
+-		rc = -ENODEV;
+-		goto out_err;
++	if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
++		nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)|
++			(tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE);
++		if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6) {
++			rc = -ENODEV;
++			goto out_err;
++		}
+ 	}
+ 
++	hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
++	lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
++	tpm_nsc.base = (hi<<8) | lo;
++
+ 	dev_dbg(&pci_dev->dev, "NSC TPM detected\n");
+ 	dev_dbg(&pci_dev->dev,
+ 		"NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n",
+-		tpm_read_index(0x07), tpm_read_index(0x20),
+-		tpm_read_index(0x27));
++		tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20),
++		tpm_read_index(nscAddrBase,0x27));
+ 	dev_dbg(&pci_dev->dev,
+ 		"NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n",
+-		tpm_read_index(0x21), tpm_read_index(0x25),
+-		tpm_read_index(0x26), tpm_read_index(0x28));
++		tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25),
++		tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28));
+ 	dev_dbg(&pci_dev->dev, "NSC IO Base0 0x%x\n",
+-		(tpm_read_index(0x60) << 8) | tpm_read_index(0x61));
++		(tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61));
+ 	dev_dbg(&pci_dev->dev, "NSC IO Base1 0x%x\n",
+-		(tpm_read_index(0x62) << 8) | tpm_read_index(0x63));
++		(tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63));
+ 	dev_dbg(&pci_dev->dev, "NSC Interrupt number and wakeup 0x%x\n",
+-		tpm_read_index(0x70));
++		tpm_read_index(nscAddrBase,0x70));
+ 	dev_dbg(&pci_dev->dev, "NSC IRQ type select 0x%x\n",
+-		tpm_read_index(0x71));
++		tpm_read_index(nscAddrBase,0x71));
+ 	dev_dbg(&pci_dev->dev,
+ 		"NSC DMA channel select0 0x%x, select1 0x%x\n",
+-		tpm_read_index(0x74), tpm_read_index(0x75));
++		tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75));
+ 	dev_dbg(&pci_dev->dev,
+ 		"NSC Config "
+ 		"0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+-		tpm_read_index(0xF0), tpm_read_index(0xF1),
+-		tpm_read_index(0xF2), tpm_read_index(0xF3),
+-		tpm_read_index(0xF4), tpm_read_index(0xF5),
+-		tpm_read_index(0xF6), tpm_read_index(0xF7),
+-		tpm_read_index(0xF8), tpm_read_index(0xF9));
++		tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1),
++		tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3),
++		tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5),
++		tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7),
++		tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9));
+ 
+ 	dev_info(&pci_dev->dev,
+-		 "NSC PC21100 TPM revision %d\n",
+-		 tpm_read_index(0x27) & 0x1F);
+-
+-	if (tpm_read_index(NSC_LDC_INDEX) == 0)
+-		dev_info(&pci_dev->dev, ": NSC TPM not active\n");
+-
+-	/* select PM channel 1 */
+-	tpm_write_index(NSC_LDN_INDEX, 0x12);
+-	tpm_read_index(NSC_LDN_INDEX);
+-
+-	/* disable the DPM module */
+-	tpm_write_index(NSC_LDC_INDEX, 0);
+-	tpm_read_index(NSC_LDC_INDEX);
+-
+-	/* set the data register base addresses */
+-	tpm_write_index(NSC_DIO_INDEX, TPM_NSC_BASE >> 8);
+-	tpm_write_index(NSC_DIO_INDEX + 1, TPM_NSC_BASE);
+-	tpm_read_index(NSC_DIO_INDEX);
+-	tpm_read_index(NSC_DIO_INDEX + 1);
+-
+-	/* set the command register base addresses */
+-	tpm_write_index(NSC_CIO_INDEX, (TPM_NSC_BASE + 1) >> 8);
+-	tpm_write_index(NSC_CIO_INDEX + 1, (TPM_NSC_BASE + 1));
+-	tpm_read_index(NSC_DIO_INDEX);
+-	tpm_read_index(NSC_DIO_INDEX + 1);
+-
+-	/* set the interrupt number to be used for the host interface */
+-	tpm_write_index(NSC_IRQ_INDEX, TPM_NSC_IRQ);
+-	tpm_write_index(NSC_ITS_INDEX, 0x00);
+-	tpm_read_index(NSC_IRQ_INDEX);
++		 "NSC TPM revision %d\n",
++		 tpm_read_index(nscAddrBase, 0x27) & 0x1F);
+ 
+ 	/* enable the DPM module */
+-	tpm_write_index(NSC_LDC_INDEX, 0x01);
+-	tpm_read_index(NSC_LDC_INDEX);
++	tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
+ 
+-	if ((rc = tpm_register_hardware(pci_dev, &tpm_nsc)) < 0)
++	if ((rc = tpm_register_hardware(&pci_dev->dev, &tpm_nsc)) < 0)
+ 		goto out_err;
+ 
+ 	return 0;
+@@ -333,12 +335,23 @@ out_err:
+ 	return rc;
+ }
+ 
++static void __devexit tpm_nsc_remove(struct pci_dev *pci_dev) 
++{
++	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
++
++	if ( chip )
++		tpm_remove_hardware(chip->dev);
++}
++
+ static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0)},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1)},
++	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0)},
+ 	{PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)},
+ 	{0,}
+ };
+@@ -349,7 +362,7 @@ static struct pci_driver nsc_pci_driver 
+ 	.name = "tpm_nsc",
+ 	.id_table = tpm_pci_tbl,
+ 	.probe = tpm_nsc_init,
+-	.remove = __devexit_p(tpm_remove),
++	.remove = __devexit_p(tpm_nsc_remove),
+ 	.suspend = tpm_pm_suspend,
+ 	.resume = tpm_pm_resume,
+ };
+@@ -364,7 +377,7 @@ static void __exit cleanup_nsc(void)
+ 	pci_unregister_driver(&nsc_pci_driver);
+ }
+ 
+-module_init(init_nsc);
++fs_initcall(init_nsc);
+ module_exit(cleanup_nsc);
+ 
+ MODULE_AUTHOR("Leendert van Doorn (leendert at watson.ibm.com)");
+diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/tpm_xen.c linux-2.6.12-xen/drivers/char/tpm/tpm_xen.c
+--- pristine-linux-2.6.12/drivers/char/tpm/tpm_xen.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/char/tpm/tpm_xen.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,525 @@
++/*
++ * Copyright (C) 2004 IBM Corporation
++ *
++ * Authors:
++ * Leendert van Doorn <leendert at watson.ibm.com>
++ * Dave Safford <safford at watson.ibm.com>
++ * Reiner Sailer <sailer at watson.ibm.com>
++ * Kylene Hall <kjhall at us.ibm.com>
++ * Stefan Berger <stefanb at us.ibm.com>
++ *
++ * Maintained by: <tpmdd_devel at lists.sourceforge.net>
++ *
++ * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
++ * Specifications at www.trustedcomputinggroup.org
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ */
++
++#include <asm/uaccess.h>
++#include <linux/list.h>
++#include <asm-xen/tpmfe.h>
++#include <linux/device.h>
++#include <linux/interrupt.h>
++#include "tpm.h"
++
++/* read status bits */
++enum {
++	STATUS_BUSY = 0x01,
++	STATUS_DATA_AVAIL = 0x02,
++	STATUS_READY = 0x04
++};
++
++#define MIN(x,y)  ((x) < (y)) ? (x) : (y)
++
++struct transmission {
++	struct list_head next;
++	unsigned char *request;
++	unsigned int request_len;
++	unsigned char *rcv_buffer;
++	unsigned int  buffersize;
++	struct tpm_chip     *chip;
++	unsigned int flags;
++};
++
++enum {
++	TRANSMISSION_FLAG_WAS_QUEUED = 0x1
++};
++
++struct data_exchange {
++	struct transmission *current_request;
++	spinlock_t           req_list_lock;
++	wait_queue_head_t    req_wait_queue;
++
++	struct list_head     queued_requests;
++
++	struct transmission *current_response;
++	spinlock_t           resp_list_lock;
++	wait_queue_head_t    resp_wait_queue;     // processes waiting for responses
++
++	struct transmission *req_cancelled;       // if a cancellation was encounterd
++
++	unsigned int         fe_status;
++	unsigned int         flags;
++};
++
++enum {
++	DATAEX_FLAG_QUEUED_ONLY = 0x1
++};
++
++static struct data_exchange dataex;
++
++static unsigned long disconnect_time;
++
++/* local function prototypes */
++static void __exit cleanup_xen(void);
++
++
++/* =============================================================
++ * Some utility functions
++ * =============================================================
++ */
++static inline struct transmission *
++transmission_alloc(void)
++{
++	struct transmission *t = kmalloc(sizeof(*t), GFP_KERNEL);
++	if (t) {
++		memset(t, 0x0, sizeof(*t));
++	}
++	return t;
++}
++
++static inline unsigned char *
++transmission_set_buffer(struct transmission *t,
++                        unsigned char *buffer, unsigned int len)
++{
++	kfree(t->request);
++	t->request = kmalloc(len, GFP_KERNEL);
++	if (t->request) {
++		memcpy(t->request,
++		       buffer,
++		       len);
++		t->request_len = len;
++	}
++	return t->request;
++}
++
++static inline void
++transmission_free(struct transmission *t)
++{
++	kfree(t->request);
++	kfree(t->rcv_buffer);
++	kfree(t);
++}
++
++/* =============================================================
++ * Interface with the TPM shared memory driver for XEN
++ * =============================================================
++ */
++static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
++{
++	int ret_size = 0;
++	struct transmission *t, *temp;
++
++	/*
++	 * The list with requests must contain one request
++	 * only and the element there must be the one that
++	 * was passed to me from the front-end.
++	 */
++	if (dataex.current_request != ptr) {
++		printk("WARNING: The request pointer is different than the pointer "
++		       "the shared memory driver returned to me. %p != %p\n",
++		       dataex.current_request, ptr);
++	}
++
++	/*
++	 * If the request has been cancelled, just quit here
++	 */
++	if (dataex.req_cancelled == (struct transmission *)ptr) {
++		if (dataex.current_request == dataex.req_cancelled) {
++			dataex.current_request = NULL;
++		}
++		transmission_free(dataex.req_cancelled);
++		dataex.req_cancelled = NULL;
++		return 0;
++	}
++
++	if (NULL != (temp = dataex.current_request)) {
++		transmission_free(temp);
++		dataex.current_request = NULL;
++	}
++
++	t = transmission_alloc();
++	if (NULL != t) {
++		unsigned long flags;
++		t->rcv_buffer = kmalloc(count, GFP_KERNEL);
++		if (NULL == t->rcv_buffer) {
++			transmission_free(t);
++			return -ENOMEM;
++		}
++		t->buffersize = count;
++		memcpy(t->rcv_buffer, buffer, count);
++		ret_size = count;
++
++		spin_lock_irqsave(&dataex.resp_list_lock ,flags);
++		dataex.current_response = t;
++		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
++		wake_up_interruptible(&dataex.resp_wait_queue);
++	}
++	return ret_size;
++}
++
++
++static void tpm_fe_status(unsigned int flags)
++{
++	dataex.fe_status = flags;
++	if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
++		disconnect_time = jiffies;
++	}
++}
++
++/* =============================================================
++ * Interface with the generic TPM driver
++ * =============================================================
++ */
++static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
++{
++	unsigned long flags;
++	int rc = 0;
++
++	spin_lock_irqsave(&dataex.resp_list_lock, flags);
++	/*
++	 * Check if the previous operation only queued the command
++	 * In this case there won't be a response, so I just
++	 * return from here and reset that flag. In any other
++	 * case I should receive a response from the back-end.
++	 */
++	if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
++		dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
++		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
++		/*
++		 * a little hack here. The first few measurements
++		 * are queued since there's no way to talk to the
++		 * TPM yet (due to slowness of the control channel)
++		 * So we just make IMA happy by giving it 30 NULL
++		 * bytes back where the most important part is
++		 * that the result code is '0'.
++		 */
++
++		count = MIN(count, 30);
++		memset(buf, 0x0, count);
++		return count;
++	}
++	/*
++	 * Check whether something is in the responselist and if
++	 * there's nothing in the list wait for something to appear.
++	 */
++
++	if (NULL == dataex.current_response) {
++		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
++		interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
++		                               1000);
++		spin_lock_irqsave(&dataex.resp_list_lock ,flags);
++	}
++
++	if (NULL != dataex.current_response) {
++		struct transmission *t = dataex.current_response;
++		dataex.current_response = NULL;
++		rc = MIN(count, t->buffersize);
++		memcpy(buf, t->rcv_buffer, rc);
++		transmission_free(t);
++	}
++
++	spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
++	return rc;
++}
++
++static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
++{
++	/*
++	 * We simply pass the packet onto the XEN shared
++	 * memory driver.
++	 */
++	unsigned long flags;
++	int rc;
++	struct transmission *t = transmission_alloc();
++
++	spin_lock_irqsave(&dataex.req_list_lock, flags);
++	/*
++	 * If there's a current request, it must be the
++	 * previous request that has timed out.
++	 */
++	if (dataex.current_request != NULL) {
++		printk("WARNING: Sending although there is a request outstanding.\n"
++		       "         Previous request must have timed out.\n");
++		transmission_free(dataex.current_request);
++		dataex.current_request = NULL;
++	}
++
++	if (t != NULL) {
++		unsigned int error = 0;
++		t->rcv_buffer = NULL;
++		t->buffersize = 0;
++		t->chip = chip;
++
++		/*
++		 * Queue the packet if the driver below is not
++		 * ready, yet, or there is any packet already
++		 * in the queue.
++		 * If the driver below is ready, unqueue all
++		 * packets first before sending our current
++		 * packet.
++		 * For each unqueued packet, except for the
++		 * last (=current) packet, call the function
++		 * tpm_xen_recv to wait for the response to come
++		 * back.
++		 */
++		if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
++			if (time_after(jiffies, disconnect_time + HZ * 10)) {
++				rc = -ENOENT;
++			} else {
++				/*
++				 * copy the request into the buffer
++				 */
++				if (transmission_set_buffer(t, buf, count)
++				    == NULL) {
++					transmission_free(t);
++					rc = -ENOMEM;
++					goto exit;
++				}
++				dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
++				list_add_tail(&t->next, &dataex.queued_requests);
++				rc = 0;
++			}
++		} else {
++			/*
++			 * Check whether there are any packets in the queue
++			 */
++			while (!list_empty(&dataex.queued_requests)) {
++				/*
++				 * Need to dequeue them.
++				 * Read the result into a dummy buffer.
++				 */
++				unsigned char buffer[1];
++				struct transmission *qt = (struct transmission *) dataex.queued_requests.next;
++				list_del(&qt->next);
++				dataex.current_request = qt;
++				spin_unlock_irqrestore(&dataex.req_list_lock, flags);
++
++				rc = tpm_fe_send(qt->request,
++				                 qt->request_len,
++				                 qt);
++
++				if (rc < 0) {
++					spin_lock_irqsave(&dataex.req_list_lock, flags);
++					if ((qt = dataex.current_request) != NULL) {
++						/*
++						 * requeue it at the beginning
++						 * of the list
++						 */
++						list_add(&qt->next,
++						         &dataex.queued_requests);
++					}
++					dataex.current_request = NULL;
++					error = 1;
++					break;
++				}
++				/*
++				 * After this point qt is not valid anymore!
++				 * It is freed when the front-end is delivering the data
++				 * by calling tpm_recv
++				 */
++
++				/*
++				 * Try to receive the response now into the provided dummy
++				 * buffer (I don't really care about this response since
++				 * there is no receiver anymore for this response)
++				 */
++				rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
++
++				spin_lock_irqsave(&dataex.req_list_lock, flags);
++			}
++
++			if (error == 0) {
++				/*
++				 * Finally, send the current request.
++				 */
++				dataex.current_request = t;
++				/*
++				 * Call the shared memory driver
++				 * Pass to it the buffer with the request, the
++				 * amount of bytes in the request and
++				 * a void * pointer (here: transmission structure)
++				 */
++				rc = tpm_fe_send(buf, count, t);
++				/*
++				 * The generic TPM driver will call
++				 * the function to receive the response.
++				 */
++				if (rc < 0) {
++					dataex.current_request = NULL;
++					goto queue_it;
++				}
++			} else {
++queue_it:
++				if (transmission_set_buffer(t, buf, count) == NULL) {
++					transmission_free(t);
++					rc = -ENOMEM;
++					goto exit;
++				}
++				/*
++				 * An error occurred. Don't event try
++				 * to send the current request. Just
++				 * queue it.
++				 */
++				dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
++				list_add_tail(&t->next, &dataex.queued_requests);
++				rc = 0;
++			}
++		}
++	} else {
++		rc = -ENOMEM;
++	}
++
++exit:
++	spin_unlock_irqrestore(&dataex.req_list_lock, flags);
++	return rc;
++}
++
++static void tpm_xen_cancel(struct tpm_chip *chip)
++{
++	unsigned long flags;
++	spin_lock_irqsave(&dataex.resp_list_lock,flags);
++
++	dataex.req_cancelled = dataex.current_request;
++
++	spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
++}
++
++static u8 tpm_xen_status(struct tpm_chip *chip)
++{
++	unsigned long flags;
++	u8 rc = 0;
++	spin_lock_irqsave(&dataex.resp_list_lock, flags);
++	/*
++	 * Data are available if:
++	 *  - there's a current response
++	 *  - the last packet was queued only (this is fake, but necessary to
++	 *      get the generic TPM layer to call the receive function.)
++	 */
++	if (NULL != dataex.current_response ||
++	    0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
++		rc = STATUS_DATA_AVAIL;
++	}
++	spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
++	return rc;
++}
++
++static struct file_operations tpm_xen_ops = {
++	.owner = THIS_MODULE,
++	.llseek = no_llseek,
++	.open = tpm_open,
++	.read = tpm_read,
++	.write = tpm_write,
++	.release = tpm_release,
++};
++
++static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
++static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
++static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
++static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
++
++static struct attribute* xen_attrs[] = {
++	&dev_attr_pubek.attr,
++	&dev_attr_pcrs.attr,
++	&dev_attr_caps.attr,
++	&dev_attr_cancel.attr,
++	NULL,
++};
++
++static struct attribute_group xen_attr_grp = { .attrs = xen_attrs };
++
++static struct tpm_vendor_specific tpm_xen = {
++	.recv = tpm_xen_recv,
++	.send = tpm_xen_send,
++	.cancel = tpm_xen_cancel,
++	.status = tpm_xen_status,
++	.req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
++	.req_complete_val  = STATUS_DATA_AVAIL,
++	.req_canceled = STATUS_READY,
++	.base = 0,
++	.attr_group = &xen_attr_grp,
++	.miscdev.fops = &tpm_xen_ops,
++	.buffersize = 64 * 1024,
++};
++
++static struct device tpm_device = {
++	.bus_id = "vtpm",
++};
++
++static struct tpmfe_device tpmfe = {
++	.receive = tpm_recv,
++	.status  = tpm_fe_status,
++};
++
++
++static int __init init_xen(void)
++{
++	int rc;
++
++	/*
++	 * Register device with the low lever front-end
++	 * driver
++	 */
++	if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
++		return rc;
++	}
++
++	/*
++	 * Register our device with the system.
++	 */
++	if ((rc = device_register(&tpm_device)) < 0) {
++		tpm_fe_unregister_receiver();
++		return rc;
++	}
++
++	tpm_xen.buffersize = tpmfe.max_tx_size;
++
++	if ((rc = tpm_register_hardware(&tpm_device, &tpm_xen)) < 0) {
++		device_unregister(&tpm_device);
++		tpm_fe_unregister_receiver();
++		return rc;
++	}
++
++	dataex.current_request = NULL;
++	spin_lock_init(&dataex.req_list_lock);
++	init_waitqueue_head(&dataex.req_wait_queue);
++	INIT_LIST_HEAD(&dataex.queued_requests);
++
++	dataex.current_response = NULL;
++	spin_lock_init(&dataex.resp_list_lock);
++	init_waitqueue_head(&dataex.resp_wait_queue);
++
++	disconnect_time = jiffies;
++
++	return 0;
++}
++
++static void __exit cleanup_xen(void)
++{
++	tpm_remove_hardware(&tpm_device);
++	device_unregister(&tpm_device);
++	tpm_fe_unregister_receiver();
++}
++
++fs_initcall(init_xen);
++module_exit(cleanup_xen);
++
++MODULE_AUTHOR("Stefan Berger (stefanb at us.ibm.com)");
++MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
++MODULE_VERSION("1.0");
++MODULE_LICENSE("GPL");
+diff -Nurp pristine-linux-2.6.12/drivers/char/tty_io.c linux-2.6.12-xen/drivers/char/tty_io.c
+--- pristine-linux-2.6.12/drivers/char/tty_io.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/char/tty_io.c	2006-03-05 23:36:31.000000000 +0100
+@@ -131,6 +131,8 @@ LIST_HEAD(tty_drivers);			/* linked list
+    vt.c for deeply disgusting hack reasons */
+ DECLARE_MUTEX(tty_sem);
+ 
++int console_use_vt = 1;
++
+ #ifdef CONFIG_UNIX98_PTYS
+ extern struct tty_driver *ptm_driver;	/* Unix98 pty masters; for /dev/ptmx */
+ extern int pty_limit;		/* Config limit on Unix98 ptys */
+@@ -1788,7 +1790,7 @@ retry_open:
+ 		goto got_driver;
+ 	}
+ #ifdef CONFIG_VT
+-	if (device == MKDEV(TTY_MAJOR,0)) {
++	if (console_use_vt && (device == MKDEV(TTY_MAJOR,0))) {
+ 		extern struct tty_driver *console_driver;
+ 		driver = console_driver;
+ 		index = fg_console;
+@@ -2966,14 +2968,19 @@ static int __init tty_init(void)
+ #endif
+ 
+ #ifdef CONFIG_VT
+-	cdev_init(&vc0_cdev, &console_fops);
+-	if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
+-	    register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
+-		panic("Couldn't register /dev/tty0 driver\n");
+-	devfs_mk_cdev(MKDEV(TTY_MAJOR, 0), S_IFCHR|S_IRUSR|S_IWUSR, "vc/0");
+-	class_simple_device_add(tty_class, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
++	if (console_use_vt) {
++		cdev_init(&vc0_cdev, &console_fops);
++		if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
++		    register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1,
++					   "/dev/vc/0") < 0)
++			panic("Couldn't register /dev/tty0 driver\n");
++		devfs_mk_cdev(MKDEV(TTY_MAJOR, 0), S_IFCHR|S_IRUSR|S_IWUSR,
++			      "vc/0");
++		class_simple_device_add(tty_class, MKDEV(TTY_MAJOR, 0), NULL,
++					"tty0");
+ 
+-	vty_init();
++		vty_init();
++	}
+ #endif
+ 	return 0;
+ }
+diff -Nurp pristine-linux-2.6.12/drivers/char/tty_ioctl.c linux-2.6.12-xen/drivers/char/tty_ioctl.c
+--- pristine-linux-2.6.12/drivers/char/tty_ioctl.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/char/tty_ioctl.c	2006-03-05 23:54:36.734090803 +0100
+@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
+ 			ld = tty_ldisc_ref(tty);
+ 			switch (arg) {
+ 			case TCIFLUSH:
+-				if (ld->flush_buffer)
++				if (ld && ld->flush_buffer)
+ 					ld->flush_buffer(tty);
+ 				break;
+ 			case TCIOFLUSH:
+-				if (ld->flush_buffer)
++				if (ld && ld->flush_buffer)
+ 					ld->flush_buffer(tty);
+ 				/* fall through */
+ 			case TCOFLUSH:
+diff -Nurp pristine-linux-2.6.12/drivers/Makefile linux-2.6.12-xen/drivers/Makefile
+--- pristine-linux-2.6.12/drivers/Makefile	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/Makefile	2006-03-05 23:36:31.000000000 +0100
+@@ -30,6 +30,7 @@ obj-y				+= base/ block/ misc/ net/ medi
+ obj-$(CONFIG_NUBUS)		+= nubus/
+ obj-$(CONFIG_ATM)		+= atm/
+ obj-$(CONFIG_PPC_PMAC)		+= macintosh/
++obj-$(CONFIG_ARCH_XEN)		+= xen/
+ obj-$(CONFIG_IDE)		+= ide/
+ obj-$(CONFIG_FC4)		+= fc4/
+ obj-$(CONFIG_SCSI)		+= scsi/
+diff -Nurp pristine-linux-2.6.12/drivers/media/video/cx88/cx88-video.c linux-2.6.12-xen/drivers/media/video/cx88/cx88-video.c
+--- pristine-linux-2.6.12/drivers/media/video/cx88/cx88-video.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/media/video/cx88/cx88-video.c	2006-03-05 23:54:36.802080784 +0100
+@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
+ 			.default_value = 0,
+ 			.type          = V4L2_CTRL_TYPE_INTEGER,
+ 		},
+-		.off                   = 0,
++		.off                   = 128,
+ 		.reg                   = MO_HUE,
+ 		.mask                  = 0x00ff,
+ 		.shift                 = 0,
+diff -Nurp pristine-linux-2.6.12/drivers/net/e1000/e1000_main.c linux-2.6.12-xen/drivers/net/e1000/e1000_main.c
+--- pristine-linux-2.6.12/drivers/net/e1000/e1000_main.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/net/e1000/e1000_main.c	2006-03-05 23:54:36.841075038 +0100
+@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
+ 	tso = e1000_tso(adapter, skb);
+ 	if (tso < 0) {
+ 		dev_kfree_skb_any(skb);
++		spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+diff -Nurp pristine-linux-2.6.12/drivers/net/hamradio/Kconfig linux-2.6.12-xen/drivers/net/hamradio/Kconfig
+--- pristine-linux-2.6.12/drivers/net/hamradio/Kconfig	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/net/hamradio/Kconfig	2006-03-05 23:54:36.842074891 +0100
+@@ -17,7 +17,7 @@ config MKISS
+ 
+ config 6PACK
+ 	tristate "Serial port 6PACK driver"
+-	depends on AX25 && BROKEN_ON_SMP
++	depends on AX25
+ 	---help---
+ 	  6pack is a transmission protocol for the data exchange between your
+ 	  PC and your TNC (the Terminal Node Controller acts as a kind of
+diff -Nurp pristine-linux-2.6.12/drivers/net/shaper.c linux-2.6.12-xen/drivers/net/shaper.c
+--- pristine-linux-2.6.12/drivers/net/shaper.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/net/shaper.c	2006-03-05 23:54:36.881069145 +0100
+@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
+ {
+ 	struct shaper *shaper = dev->priv;
+  	struct sk_buff *ptr;
+-   
+-	if (down_trylock(&shaper->sem))
+-		return -1;
+ 
++	spin_lock(&shaper->lock);
+  	ptr=shaper->sendq.prev;
+  	
+  	/*
+@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
+                 shaper->stats.collisions++;
+  	}
+ 	shaper_kick(shaper);
+-	up(&shaper->sem);
++	spin_unlock(&shaper->lock);
+  	return 0;
+ }
+ 
+@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
+ {
+ 	struct shaper *shaper = (struct shaper *)data;
+ 
+-	if (!down_trylock(&shaper->sem)) {
+-		shaper_kick(shaper);
+-		up(&shaper->sem);
+-	} else
+-		mod_timer(&shaper->timer, jiffies);
++	spin_lock(&shaper->lock);
++	shaper_kick(shaper);
++	spin_unlock(&shaper->lock);
+ }
+ 
+ /*
+@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
+ 
+ 
+ /*
+- *	Flush the shaper queues on a closedown
+- */
+- 
+-static void shaper_flush(struct shaper *shaper)
+-{
+-	struct sk_buff *skb;
+-
+-	down(&shaper->sem);
+-	while((skb=skb_dequeue(&shaper->sendq))!=NULL)
+-		dev_kfree_skb(skb);
+-	shaper_kick(shaper);
+-	up(&shaper->sem);
+-}
+-
+-/*
+  *	Bring the interface up. We just disallow this until a 
+  *	bind.
+  */
+@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
+ static int shaper_close(struct net_device *dev)
+ {
+ 	struct shaper *shaper=dev->priv;
+-	shaper_flush(shaper);
++	struct sk_buff *skb;
++
++	while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
++		dev_kfree_skb(skb);
++
++	spin_lock_bh(&shaper->lock);
++	shaper_kick(shaper);
++	spin_unlock_bh(&shaper->lock);
++
+ 	del_timer_sync(&shaper->timer);
+ 	return 0;
+ }
+@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
+ 	init_timer(&sh->timer);
+ 	sh->timer.function=shaper_timer;
+ 	sh->timer.data=(unsigned long)sh;
++	spin_lock_init(&sh->lock);
+ }
+ 
+ /*
+diff -Nurp pristine-linux-2.6.12/drivers/pci/pci-driver.c linux-2.6.12-xen/drivers/pci/pci-driver.c
+--- pristine-linux-2.6.12/drivers/pci/pci-driver.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/pci/pci-driver.c	2006-03-05 23:54:36.916063988 +0100
+@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
+ 	/* FIXME, once all of the existing PCI drivers have been fixed to set
+ 	 * the pci shutdown function, this test can go away. */
+ 	if (!drv->driver.shutdown)
+-		drv->driver.shutdown = pci_device_shutdown,
++		drv->driver.shutdown = pci_device_shutdown;
+ 	drv->driver.owner = drv->owner;
+ 	drv->driver.kobj.ktype = &pci_driver_kobj_type;
+ 	pci_init_dynids(&drv->dynids);
+diff -Nurp pristine-linux-2.6.12/drivers/scsi/qla2xxx/qla_init.c linux-2.6.12-xen/drivers/scsi/qla2xxx/qla_init.c
+--- pristine-linux-2.6.12/drivers/scsi/qla2xxx/qla_init.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/scsi/qla2xxx/qla_init.c	2006-03-05 23:54:36.917063840 +0100
+@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
+ 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ 
+ 	fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
+-	if (!rport)
++	if (!rport) {
+ 		qla_printk(KERN_WARNING, ha,
+ 		    "Unable to allocate fc remote port!\n");
++		return;
++	}
+ 
+ 	if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
+ 		fcport->os_target_id = rport->scsi_target_id;
+diff -Nurp pristine-linux-2.6.12/drivers/scsi/qla2xxx/qla_os.c linux-2.6.12-xen/drivers/scsi/qla2xxx/qla_os.c
+--- pristine-linux-2.6.12/drivers/scsi/qla2xxx/qla_os.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/scsi/qla2xxx/qla_os.c	2006-03-05 23:54:36.919063546 +0100
+@@ -1150,7 +1150,7 @@ iospace_error_exit:
+  */
+ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
+ {
+-	int	ret;
++	int	ret = -ENODEV;
+ 	device_reg_t __iomem *reg;
+ 	struct Scsi_Host *host;
+ 	scsi_qla_host_t *ha;
+@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
+ 	fc_port_t *fcport;
+ 
+ 	if (pci_enable_device(pdev))
+-		return -1;
++		goto probe_out;
+ 
+ 	host = scsi_host_alloc(&qla2x00_driver_template,
+ 	    sizeof(scsi_qla_host_t));
+@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
+ 
+ 	/* Configure PCI I/O space */
+ 	ret = qla2x00_iospace_config(ha);
+-	if (ret != 0) {
+-		goto probe_alloc_failed;
+-	}
++	if (ret)
++		goto probe_failed;
+ 
+ 	/* Sanitize the information from PCI BIOS. */
+ 	host->irq = pdev->irq;
+@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
+ 		qla_printk(KERN_WARNING, ha,
+ 		    "[ERROR] Failed to allocate memory for adapter\n");
+ 
+-		goto probe_alloc_failed;
++		ret = -ENOMEM;
++		goto probe_failed;
+ 	}
+ 
+-	pci_set_drvdata(pdev, ha);
+-	host->this_id = 255;
+-	host->cmd_per_lun = 3;
+-	host->unique_id = ha->instance;
+-	host->max_cmd_len = MAX_CMDSZ;
+-	host->max_channel = ha->ports - 1;
+-	host->max_id = ha->max_targets;
+-	host->max_lun = ha->max_luns;
+-	host->transportt = qla2xxx_transport_template;
+-	if (scsi_add_host(host, &pdev->dev))
+-		goto probe_alloc_failed;
+-
+-	qla2x00_alloc_sysfs_attr(ha);
+-
+ 	if (qla2x00_initialize_adapter(ha) &&
+ 	    !(ha->device_flags & DFLG_NO_CABLE)) {
+ 
+@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
+ 		    "Adapter flags %x.\n",
+ 		    ha->host_no, ha->device_flags));
+ 
++		ret = -ENODEV;
+ 		goto probe_failed;
+ 	}
+ 
+-	qla2x00_init_host_attr(ha);
+-
+ 	/*
+ 	 * Startup the kernel thread for this host adapter
+ 	 */
+@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
+ 		qla_printk(KERN_WARNING, ha,
+ 		    "Unable to start DPC thread!\n");
+ 
++		ret = -ENODEV;
+ 		goto probe_failed;
+ 	}
+ 	wait_for_completion(&ha->dpc_inited);
+ 
++	host->this_id = 255;
++	host->cmd_per_lun = 3;
++	host->unique_id = ha->instance;
++	host->max_cmd_len = MAX_CMDSZ;
++	host->max_channel = ha->ports - 1;
++	host->max_lun = MAX_LUNS;
++	host->transportt = qla2xxx_transport_template;
++
+ 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ 		ret = request_irq(host->irq, qla2100_intr_handler,
+ 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
+ 	else
+ 		ret = request_irq(host->irq, qla2300_intr_handler,
+ 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
+-	if (ret != 0) {
++	if (ret) {
+ 		qla_printk(KERN_WARNING, ha,
+ 		    "Failed to reserve interrupt %d already in use.\n",
+ 		    host->irq);
+@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
+ 		msleep(10);
+ 	}
+ 
++	pci_set_drvdata(pdev, ha);
+ 	ha->flags.init_done = 1;
+ 	num_hosts++;
+ 
++	ret = scsi_add_host(host, &pdev->dev);
++	if (ret)
++		goto probe_failed;
++
++	qla2x00_alloc_sysfs_attr(ha);
++
++	qla2x00_init_host_attr(ha);
++
+ 	qla_printk(KERN_INFO, ha, "\n"
+ 	    " QLogic Fibre Channel HBA Driver: %s\n"
+ 	    "  QLogic %s - %s\n"
+@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
+ probe_failed:
+ 	fc_remove_host(ha->host);
+ 
+-	scsi_remove_host(host);
+-
+-probe_alloc_failed:
+ 	qla2x00_free_device(ha);
+ 
+ 	scsi_host_put(host);
+@@ -1394,7 +1394,8 @@ probe_alloc_failed:
+ probe_disable_device:
+ 	pci_disable_device(pdev);
+ 
+-	return -1;
++probe_out:
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(qla2x00_probe_one);
+ 
+diff -Nurp pristine-linux-2.6.12/drivers/scsi/sg.c linux-2.6.12-xen/drivers/scsi/sg.c
+--- pristine-linux-2.6.12/drivers/scsi/sg.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/scsi/sg.c	2006-03-05 23:54:36.920063398 +0100
+@@ -2969,23 +2969,22 @@ static void * dev_seq_start(struct seq_f
+ {
+ 	struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
+ 
++	s->private = it;
+ 	if (! it)
+ 		return NULL;
++
+ 	if (NULL == sg_dev_arr)
+-		goto err1;
++		return NULL;
+ 	it->index = *pos;
+ 	it->max = sg_last_dev();
+ 	if (it->index >= it->max)
+-		goto err1;
++		return NULL;
+ 	return it;
+-err1:
+-	kfree(it);
+-	return NULL;
+ }
+ 
+ static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
+ {
+-	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
++	struct sg_proc_deviter * it = s->private;
+ 
+ 	*pos = ++it->index;
+ 	return (it->index < it->max) ? it : NULL;
+@@ -2993,7 +2992,9 @@ static void * dev_seq_next(struct seq_fi
+ 
+ static void dev_seq_stop(struct seq_file *s, void *v)
+ {
+-	kfree (v);
++	struct sg_proc_deviter * it = s->private;
++
++	kfree (it);
+ }
+ 
+ static int sg_proc_open_dev(struct inode *inode, struct file *file)
+diff -Nurp pristine-linux-2.6.12/drivers/usb/net/usbnet.c linux-2.6.12-xen/drivers/usb/net/usbnet.c
+--- pristine-linux-2.6.12/drivers/usb/net/usbnet.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/drivers/usb/net/usbnet.c	2006-03-05 23:54:36.923062956 +0100
+@@ -1922,7 +1922,7 @@ static int genelink_rx_fixup (struct usb
+ 
+ 			// copy the packet data to the new skb
+ 			memcpy(skb_put(gl_skb, size), packet->packet_data, size);
+-			skb_return (dev, skb);
++			skb_return (dev, gl_skb);
+ 		}
+ 
+ 		// advance to the next packet
+diff -Nurp pristine-linux-2.6.12/drivers/xen/balloon/balloon.c linux-2.6.12-xen/drivers/xen/balloon/balloon.c
+--- pristine-linux-2.6.12/drivers/xen/balloon/balloon.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/balloon/balloon.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,587 @@
++/******************************************************************************
++ * balloon.c
++ *
++ * Xen balloon driver - enables returning/claiming memory to/from Xen.
++ *
++ * Copyright (c) 2003, B Dragovic
++ * Copyright (c) 2003-2004, M Williamson, K Fraser
++ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/smp_lock.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <linux/vmalloc.h>
++#include <asm-xen/xen_proc.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/balloon.h>
++#include <asm-xen/xen-public/memory.h>
++#include <asm/pgalloc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <linux/list.h>
++
++#include<asm-xen/xenbus.h>
++
++#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
++
++static struct proc_dir_entry *balloon_pde;
++
++static DECLARE_MUTEX(balloon_mutex);
++
++/*
++ * Protects atomic reservation decrease/increase against concurrent increases.
++ * Also protects non-atomic updates of current_pages and driver_pages, and
++ * balloon lists.
++ */
++spinlock_t balloon_lock = SPIN_LOCK_UNLOCKED;
++
++/* We aim for 'current allocation' == 'target allocation'. */
++static unsigned long current_pages;
++static unsigned long target_pages;
++
++/* VM /proc information for memory */
++extern unsigned long totalram_pages;
++
++/* We may hit the hard limit in Xen. If we do then we remember it. */
++static unsigned long hard_limit;
++
++/*
++ * Drivers may alter the memory reservation independently, but they must
++ * inform the balloon driver so that we can avoid hitting the hard limit.
++ */
++static unsigned long driver_pages;
++
++/* List of ballooned pages, threaded through the mem_map array. */
++static LIST_HEAD(ballooned_pages);
++static unsigned long balloon_low, balloon_high;
++
++/* Main work function, always executed in process context. */
++static void balloon_process(void *unused);
++static DECLARE_WORK(balloon_worker, balloon_process, NULL);
++static struct timer_list balloon_timer;
++
++/* Use the private and mapping fields of struct page as a list. */
++#define PAGE_TO_LIST(p) ((struct list_head *)&p->private)
++#define LIST_TO_PAGE(l)				\
++	(list_entry(((unsigned long *)l), struct page, private))
++#define UNLIST_PAGE(p)				\
++	do {					\
++		list_del(PAGE_TO_LIST(p));	\
++		p->mapping = NULL;		\
++		p->private = 0;			\
++	} while(0)
++
++#define IPRINTK(fmt, args...) \
++	printk(KERN_INFO "xen_mem: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++	printk(KERN_WARNING "xen_mem: " fmt, ##args)
++
++/* balloon_append: add the given page to the balloon. */
++static void balloon_append(struct page *page)
++{
++	/* Lowmem is re-populated first, so highmem pages go at list tail. */
++	if (PageHighMem(page)) {
++		list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
++		balloon_high++;
++	} else {
++		list_add(PAGE_TO_LIST(page), &ballooned_pages);
++		balloon_low++;
++	}
++}
++
++/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
++static struct page *balloon_retrieve(void)
++{
++	struct page *page;
++
++	if (list_empty(&ballooned_pages))
++		return NULL;
++
++	page = LIST_TO_PAGE(ballooned_pages.next);
++	UNLIST_PAGE(page);
++
++	if (PageHighMem(page))
++		balloon_high--;
++	else
++		balloon_low--;
++
++	return page;
++}
++
++static struct page *balloon_first_page(void)
++{
++	if (list_empty(&ballooned_pages))
++		return NULL;
++	return LIST_TO_PAGE(ballooned_pages.next);
++}
++
++static struct page *balloon_next_page(struct page *page)
++{
++	struct list_head *next = PAGE_TO_LIST(page)->next;
++	if (next == &ballooned_pages)
++		return NULL;
++	return LIST_TO_PAGE(next);
++}
++
++static void balloon_alarm(unsigned long unused)
++{
++	schedule_work(&balloon_worker);
++}
++
++static unsigned long current_target(void)
++{
++	unsigned long target = min(target_pages, hard_limit);
++	if (target > (current_pages + balloon_low + balloon_high))
++		target = current_pages + balloon_low + balloon_high;
++	return target;
++}
++
++static int increase_reservation(unsigned long nr_pages)
++{
++	unsigned long *frame_list, pfn, i, flags;
++	struct page   *page;
++	long           rc;
++	struct xen_memory_reservation reservation = {
++		.address_bits = 0,
++		.extent_order = 0,
++		.domid        = DOMID_SELF
++	};
++
++	if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
++		nr_pages = PAGE_SIZE / sizeof(unsigned long);
++
++	frame_list = (unsigned long *)__get_free_page(GFP_KERNEL);
++	if (frame_list == NULL)
++		return -ENOMEM;
++
++	balloon_lock(flags);
++
++	page = balloon_first_page();
++	for (i = 0; i < nr_pages; i++) {
++		BUG_ON(page == NULL);
++		frame_list[i] = page_to_pfn(page);;
++		page = balloon_next_page(page);
++	}
++
++	reservation.extent_start = frame_list;
++	reservation.nr_extents   = nr_pages;
++	rc = HYPERVISOR_memory_op(
++		XENMEM_populate_physmap, &reservation);
++	if (rc < nr_pages) {
++		int ret;
++		/* We hit the Xen hard limit: reprobe. */
++		reservation.extent_start = frame_list;
++		reservation.nr_extents   = rc;
++		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++				&reservation);
++		BUG_ON(ret != rc);
++		hard_limit = current_pages + rc - driver_pages;
++		goto out;
++	}
++
++	for (i = 0; i < nr_pages; i++) {
++		page = balloon_retrieve();
++		BUG_ON(page == NULL);
++
++		pfn = page_to_pfn(page);
++		BUG_ON(phys_to_machine_mapping_valid(pfn));
++
++		/* Update P->M and M->P tables. */
++		set_phys_to_machine(pfn, frame_list[i]);
++		xen_machphys_update(frame_list[i], pfn);
++            
++		/* Link back into the page tables if not highmem. */
++		if (pfn < max_low_pfn) {
++			int ret;
++			ret = HYPERVISOR_update_va_mapping(
++				(unsigned long)__va(pfn << PAGE_SHIFT),
++				pfn_pte_ma(frame_list[i], PAGE_KERNEL),
++				0);
++			BUG_ON(ret);
++		}
++
++		/* Relinquish the page back to the allocator. */
++		ClearPageReserved(page);
++		set_page_count(page, 1);
++		__free_page(page);
++	}
++
++	current_pages += nr_pages;
++	totalram_pages = current_pages;
++
++ out:
++	balloon_unlock(flags);
++
++	free_page((unsigned long)frame_list);
++
++	return 0;
++}
++
++static int decrease_reservation(unsigned long nr_pages)
++{
++	unsigned long *frame_list, pfn, i, flags;
++	struct page   *page;
++	void          *v;
++	int            need_sleep = 0;
++	int ret;
++	struct xen_memory_reservation reservation = {
++		.address_bits = 0,
++		.extent_order = 0,
++		.domid        = DOMID_SELF
++	};
++
++	if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
++		nr_pages = PAGE_SIZE / sizeof(unsigned long);
++
++	frame_list = (unsigned long *)__get_free_page(GFP_KERNEL);
++	if (frame_list == NULL)
++		return -ENOMEM;
++
++	for (i = 0; i < nr_pages; i++) {
++		if ((page = alloc_page(GFP_HIGHUSER)) == NULL) {
++			nr_pages = i;
++			need_sleep = 1;
++			break;
++		}
++
++		pfn = page_to_pfn(page);
++		frame_list[i] = pfn_to_mfn(pfn);
++
++		if (!PageHighMem(page)) {
++			v = phys_to_virt(pfn << PAGE_SHIFT);
++			scrub_pages(v, 1);
++			ret = HYPERVISOR_update_va_mapping(
++				(unsigned long)v, __pte_ma(0), 0);
++			BUG_ON(ret);
++		}
++#ifdef CONFIG_XEN_SCRUB_PAGES
++		else {
++			v = kmap(page);
++			scrub_pages(v, 1);
++			kunmap(page);
++		}
++#endif
++	}
++
++	/* Ensure that ballooned highmem pages don't have kmaps. */
++	kmap_flush_unused();
++	flush_tlb_all();
++
++	balloon_lock(flags);
++
++	/* No more mappings: invalidate P2M and add to balloon. */
++	for (i = 0; i < nr_pages; i++) {
++		pfn = mfn_to_pfn(frame_list[i]);
++		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++		balloon_append(pfn_to_page(pfn));
++	}
++
++	reservation.extent_start = frame_list;
++	reservation.nr_extents   = nr_pages;
++	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
++	BUG_ON(ret != nr_pages);
++
++	current_pages -= nr_pages;
++	totalram_pages = current_pages;
++
++	balloon_unlock(flags);
++
++	free_page((unsigned long)frame_list);
++
++	return need_sleep;
++}
++
++/*
++ * We avoid multiple worker processes conflicting via the balloon mutex.
++ * We may of course race updates of the target counts (which are protected
++ * by the balloon lock), or with changes to the Xen hard limit, but we will
++ * recover from these in time.
++ */
++static void balloon_process(void *unused)
++{
++	int need_sleep = 0;
++	long credit;
++
++	down(&balloon_mutex);
++
++	do {
++		credit = current_target() - current_pages;
++		if (credit > 0)
++			need_sleep = (increase_reservation(credit) != 0);
++		if (credit < 0)
++			need_sleep = (decrease_reservation(-credit) != 0);
++
++#ifndef CONFIG_PREEMPT
++		if (need_resched())
++			schedule();
++#endif
++	} while ((credit != 0) && !need_sleep);
++
++	/* Schedule more work if there is some still to be done. */
++	if (current_target() != current_pages)
++		mod_timer(&balloon_timer, jiffies + HZ);
++
++	up(&balloon_mutex);
++}
++
++/* Resets the Xen limit, sets new target, and kicks off processing. */
++static void set_new_target(unsigned long target)
++{
++	/* No need for lock. Not read-modify-write updates. */
++	hard_limit   = ~0UL;
++	target_pages = target;
++	schedule_work(&balloon_worker);
++}
++
++static struct xenbus_watch target_watch =
++{
++	.node = "memory/target"
++};
++
++/* React to a change in the target key */
++static void watch_target(struct xenbus_watch *watch,
++			 const char **vec, unsigned int len)
++{
++	unsigned long long new_target;
++	int err;
++
++	err = xenbus_scanf(XBT_NULL, "memory", "target", "%llu", &new_target);
++	if (err != 1) {
++		/* This is ok (for domain0 at least) - so just return */
++		return;
++	} 
++        
++	/* The given memory/target value is in KiB, so it needs converting to
++	   pages.  PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
++	*/
++	set_new_target(new_target >> (PAGE_SHIFT - 10));
++    
++}
++
++static int balloon_init_watcher(struct notifier_block *notifier,
++                                unsigned long event,
++                                void *data)
++{
++	int err;
++
++	err = register_xenbus_watch(&target_watch);
++	if (err)
++		printk(KERN_ERR "Failed to set balloon watcher\n");
++
++	return NOTIFY_DONE;
++    
++}
++
++static int balloon_write(struct file *file, const char __user *buffer,
++                         unsigned long count, void *data)
++{
++	char memstring[64], *endchar;
++	unsigned long long target_bytes;
++
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	if (count <= 1)
++		return -EBADMSG; /* runt */
++	if (count > sizeof(memstring))
++		return -EFBIG;   /* too long */
++
++	if (copy_from_user(memstring, buffer, count))
++		return -EFAULT;
++	memstring[sizeof(memstring)-1] = '\0';
++
++	target_bytes = memparse(memstring, &endchar);
++	set_new_target(target_bytes >> PAGE_SHIFT);
++
++	return count;
++}
++
++static int balloon_read(char *page, char **start, off_t off,
++                        int count, int *eof, void *data)
++{
++	int len;
++
++	len = sprintf(
++		page,
++		"Current allocation: %8lu kB\n"
++		"Requested target:   %8lu kB\n"
++		"Low-mem balloon:    %8lu kB\n"
++		"High-mem balloon:   %8lu kB\n"
++		"Xen hard limit:     ",
++		PAGES2KB(current_pages), PAGES2KB(target_pages), 
++		PAGES2KB(balloon_low), PAGES2KB(balloon_high));
++
++	if (hard_limit != ~0UL) {
++		len += sprintf(
++			page + len, 
++			"%8lu kB (inc. %8lu kB driver headroom)\n",
++			PAGES2KB(hard_limit), PAGES2KB(driver_pages));
++	} else {
++		len += sprintf(
++			page + len,
++			"     ??? kB\n");
++	}
++
++	*eof = 1;
++	return len;
++}
++
++static struct notifier_block xenstore_notifier;
++
++static int __init balloon_init(void)
++{
++	unsigned long pfn;
++	struct page *page;
++
++	IPRINTK("Initialising balloon driver.\n");
++
++	if (xen_init() < 0)
++		return -1;
++
++	current_pages = min(xen_start_info->nr_pages, max_pfn);
++	target_pages  = current_pages;
++	balloon_low   = 0;
++	balloon_high  = 0;
++	driver_pages  = 0UL;
++	hard_limit    = ~0UL;
++
++	init_timer(&balloon_timer);
++	balloon_timer.data = 0;
++	balloon_timer.function = balloon_alarm;
++    
++	if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) {
++		WPRINTK("Unable to create /proc/xen/balloon.\n");
++		return -1;
++	}
++
++	balloon_pde->read_proc  = balloon_read;
++	balloon_pde->write_proc = balloon_write;
++    
++	/* Initialise the balloon with excess memory space. */
++	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
++		page = pfn_to_page(pfn);
++		if (!PageReserved(page))
++			balloon_append(page);
++	}
++
++	target_watch.callback = watch_target;
++	xenstore_notifier.notifier_call = balloon_init_watcher;
++
++	register_xenstore_notifier(&xenstore_notifier);
++    
++	return 0;
++}
++
++subsys_initcall(balloon_init);
++
++void balloon_update_driver_allowance(long delta)
++{
++	unsigned long flags;
++
++	balloon_lock(flags);
++	driver_pages += delta;
++	balloon_unlock(flags);
++}
++
++static int dealloc_pte_fn(
++	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
++{
++	unsigned long mfn = pte_mfn(*pte);
++	int ret;
++	struct xen_memory_reservation reservation = {
++		.extent_start = &mfn,
++		.nr_extents   = 1,
++		.extent_order = 0,
++		.domid        = DOMID_SELF
++	};
++	set_pte_at(&init_mm, addr, pte, __pte_ma(0));
++	set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
++	BUG_ON(ret != 1);
++	return 0;
++}
++
++struct page *balloon_alloc_empty_page_range(unsigned long nr_pages)
++{
++	unsigned long vstart, flags;
++	unsigned int  order = get_order(nr_pages * PAGE_SIZE);
++	int ret;
++
++	vstart = __get_free_pages(GFP_KERNEL, order);
++	if (vstart == 0)
++		return NULL;
++
++	scrub_pages(vstart, 1 << order);
++
++	balloon_lock(flags);
++	ret = generic_page_range(
++		&init_mm, vstart, PAGE_SIZE << order, dealloc_pte_fn, NULL);
++	BUG_ON(ret);
++	current_pages -= 1UL << order;
++	balloon_unlock(flags);
++
++	schedule_work(&balloon_worker);
++
++	flush_tlb_all();
++
++	return virt_to_page(vstart);
++}
++
++void balloon_dealloc_empty_page_range(
++	struct page *page, unsigned long nr_pages)
++{
++	unsigned long i, flags;
++	unsigned int  order = get_order(nr_pages * PAGE_SIZE);
++
++	balloon_lock(flags);
++	for (i = 0; i < (1UL << order); i++)
++		balloon_append(page + i);
++	balloon_unlock(flags);
++
++	schedule_work(&balloon_worker);
++}
++
++EXPORT_SYMBOL(balloon_update_driver_allowance);
++EXPORT_SYMBOL(balloon_alloc_empty_page_range);
++EXPORT_SYMBOL(balloon_dealloc_empty_page_range);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/balloon/Makefile linux-2.6.12-xen/drivers/xen/balloon/Makefile
+--- pristine-linux-2.6.12/drivers/xen/balloon/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/balloon/Makefile	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,2 @@
++
++obj-y += balloon.o
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/blkback.c linux-2.6.12-xen/drivers/xen/blkback/blkback.c
+--- pristine-linux-2.6.12/drivers/xen/blkback/blkback.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blkback/blkback.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,590 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/main.c
++ * 
++ * Back-end of the driver for virtual block devices. This portion of the
++ * driver exports a 'unified' block-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A 
++ * reference front-end implementation can be found in:
++ *  arch/xen/drivers/blkif/frontend
++ * 
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Copyright (c) 2005, Christopher Clark
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <asm-xen/balloon.h>
++#include <asm/hypervisor.h>
++#include "common.h"
++
++/*
++ * These are rather arbitrary. They are fairly large because adjacent requests
++ * pulled from a communication ring are quite likely to end up being part of
++ * the same scatter/gather request at the disc.
++ * 
++ * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
++ * 
++ * This will increase the chances of being able to write whole tracks.
++ * 64 should be enough to keep us competitive with Linux.
++ */
++static int blkif_reqs = 64;
++static int mmap_pages;
++
++static int __init set_blkif_reqs(char *str)
++{
++	get_option(&str, &blkif_reqs);
++	return 1;
++}
++__setup("blkif_reqs=", set_blkif_reqs);
++
++/* Run-time switchable: /sys/module/blkback/parameters/ */
++static unsigned int log_stats = 0;
++static unsigned int debug_lvl = 0;
++module_param(log_stats, int, 0644);
++module_param(debug_lvl, int, 0644);
++
++/*
++ * Each outstanding request that we've passed to the lower device layers has a 
++ * 'pending_req' allocated to it. Each buffer_head that completes decrements 
++ * the pendcnt towards zero. When it hits zero, the specified domain has a 
++ * response queued for it, with the saved 'id' passed back.
++ */
++typedef struct {
++	blkif_t       *blkif;
++	unsigned long  id;
++	int            nr_pages;
++	atomic_t       pendcnt;
++	unsigned short operation;
++	int            status;
++	struct list_head free_list;
++} pending_req_t;
++
++static pending_req_t *pending_reqs;
++static struct list_head pending_free;
++static spinlock_t pending_free_lock = SPIN_LOCK_UNLOCKED;
++static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
++
++#define BLKBACK_INVALID_HANDLE (~0)
++
++static unsigned long mmap_vstart;
++static unsigned long *pending_vaddrs;
++static grant_handle_t *pending_grant_handles;
++
++static inline int vaddr_pagenr(pending_req_t *req, int seg)
++{
++	return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
++}
++
++static inline unsigned long vaddr(pending_req_t *req, int seg)
++{
++	return pending_vaddrs[vaddr_pagenr(req, seg)];
++}
++
++#define pending_handle(_req, _seg) \
++	(pending_grant_handles[vaddr_pagenr(_req, _seg)])
++
++
++#ifdef CONFIG_XEN_BLKDEV_TAP_BE
++/*
++ * If the tap driver is used, we may get pages belonging to either the tap
++ * or (more likely) the real frontend.  The backend must specify which domain
++ * a given page belongs to in update_va_mapping though.  For the moment, 
++ * the tap rewrites the ID field of the request to contain the request index
++ * and the id of the real front end domain.
++ */
++#define BLKTAP_COOKIE 0xbeadfeed
++static inline domid_t ID_TO_DOM(unsigned long id) { return (id >> 16); }
++#endif
++
++static int do_block_io_op(blkif_t *blkif);
++static void dispatch_rw_block_io(blkif_t *blkif,
++				 blkif_request_t *req,
++				 pending_req_t *pending_req);
++static void make_response(blkif_t *blkif, unsigned long id, 
++                          unsigned short op, int st);
++
++/******************************************************************
++ * misc small helpers
++ */
++static pending_req_t* alloc_req(void)
++{
++	pending_req_t *req = NULL;
++	unsigned long flags;
++
++	spin_lock_irqsave(&pending_free_lock, flags);
++	if (!list_empty(&pending_free)) {
++		req = list_entry(pending_free.next, pending_req_t, free_list);
++		list_del(&req->free_list);
++	}
++	spin_unlock_irqrestore(&pending_free_lock, flags);
++	return req;
++}
++
++static void free_req(pending_req_t *req)
++{
++	unsigned long flags;
++	int was_empty;
++
++	spin_lock_irqsave(&pending_free_lock, flags);
++	was_empty = list_empty(&pending_free);
++	list_add(&req->free_list, &pending_free);
++	spin_unlock_irqrestore(&pending_free_lock, flags);
++	if (was_empty)
++		wake_up(&pending_free_wq);
++}
++
++static void unplug_queue(blkif_t *blkif)
++{
++	if (blkif->plug == NULL)
++		return;
++	if (blkif->plug->unplug_fn)
++		blkif->plug->unplug_fn(blkif->plug);
++	blk_put_queue(blkif->plug);
++	blkif->plug = NULL;
++}
++
++static void plug_queue(blkif_t *blkif, struct bio *bio)
++{
++	request_queue_t *q = bdev_get_queue(bio->bi_bdev);
++
++	if (q == blkif->plug)
++		return;
++	unplug_queue(blkif);
++	blk_get_queue(q);
++	blkif->plug = q;
++}
++
++static void fast_flush_area(pending_req_t *req)
++{
++	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++	unsigned int i, invcount = 0;
++	grant_handle_t handle;
++	int ret;
++
++	for (i = 0; i < req->nr_pages; i++) {
++		handle = pending_handle(req, i);
++		if (handle == BLKBACK_INVALID_HANDLE)
++			continue;
++		unmap[invcount].host_addr    = vaddr(req, i);
++		unmap[invcount].dev_bus_addr = 0;
++		unmap[invcount].handle       = handle;
++		pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
++		invcount++;
++	}
++
++	ret = HYPERVISOR_grant_table_op(
++		GNTTABOP_unmap_grant_ref, unmap, invcount);
++	BUG_ON(ret);
++}
++
++/******************************************************************
++ * SCHEDULER FUNCTIONS
++ */
++
++static void print_stats(blkif_t *blkif)
++{
++	printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d\n",
++	       current->comm, blkif->st_oo_req,
++	       blkif->st_rd_req, blkif->st_wr_req);
++	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
++	blkif->st_rd_req = 0;
++	blkif->st_wr_req = 0;
++	blkif->st_oo_req = 0;
++}
++
++int blkif_schedule(void *arg)
++{
++	blkif_t          *blkif = arg;
++
++	blkif_get(blkif);
++	if (debug_lvl)
++		printk(KERN_DEBUG "%s: started\n", current->comm);
++	for (;;) {
++		if (kthread_should_stop()) {
++			/* asked to quit? */
++			if (!atomic_read(&blkif->io_pending))
++				break;
++			if (debug_lvl)
++				printk(KERN_DEBUG "%s: I/O pending, "
++				       "delaying exit\n", current->comm);
++		}
++
++		if (!atomic_read(&blkif->io_pending)) {
++			/* Wait for work to do. */
++			wait_event_interruptible(
++				blkif->wq,
++				(atomic_read(&blkif->io_pending) ||
++				 kthread_should_stop()));
++		} else if (list_empty(&pending_free)) {
++			/* Wait for pending_req becoming available. */
++			wait_event_interruptible(
++				pending_free_wq,
++				!list_empty(&pending_free));
++		}
++
++		if (blkif->status != CONNECTED) {
++			/* make sure we are connected */
++			if (debug_lvl)
++				printk(KERN_DEBUG "%s: not connected "
++				       "(%d pending)\n",
++				       current->comm,
++				       atomic_read(&blkif->io_pending));
++			wait_event_interruptible(
++				blkif->wq,
++				(blkif->status == CONNECTED ||
++				 kthread_should_stop()));
++			continue;
++		}
++
++		/* Schedule I/O */
++		atomic_set(&blkif->io_pending, 0);
++		if (do_block_io_op(blkif))
++			atomic_inc(&blkif->io_pending);
++		unplug_queue(blkif);
++
++		if (log_stats && time_after(jiffies, blkif->st_print))
++			print_stats(blkif);
++	}
++
++	if (log_stats)
++		print_stats(blkif);
++	if (debug_lvl)
++		printk(KERN_DEBUG "%s: exiting\n", current->comm);
++	blkif->xenblkd = NULL;
++	blkif_put(blkif);
++	return 0;
++}
++
++/******************************************************************
++ * COMPLETION CALLBACK -- Called as bh->b_end_io()
++ */
++
++static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
++{
++	/* An error fails the entire request. */
++	if (!uptodate) {
++		DPRINTK("Buffer not up-to-date at end of operation\n");
++		pending_req->status = BLKIF_RSP_ERROR;
++	}
++
++	if (atomic_dec_and_test(&pending_req->pendcnt)) {
++		fast_flush_area(pending_req);
++		make_response(pending_req->blkif, pending_req->id,
++			      pending_req->operation, pending_req->status);
++		blkif_put(pending_req->blkif);
++		free_req(pending_req);
++	}
++}
++
++static int end_block_io_op(struct bio *bio, unsigned int done, int error)
++{
++	if (bio->bi_size != 0)
++		return 1;
++	__end_block_io_op(bio->bi_private, !error);
++	bio_put(bio);
++	return error;
++}
++
++
++/******************************************************************************
++ * NOTIFICATION FROM GUEST OS.
++ */
++
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++	blkif_t *blkif = dev_id;
++
++	atomic_inc(&blkif->io_pending);
++	wake_up(&blkif->wq);
++	return IRQ_HANDLED;
++}
++
++
++
++/******************************************************************
++ * DOWNWARD CALLS -- These interface with the block-device layer proper.
++ */
++
++static int do_block_io_op(blkif_t *blkif)
++{
++	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
++	blkif_request_t *req;
++	pending_req_t *pending_req;
++	RING_IDX rc, rp;
++	int more_to_do = 0;
++
++	rc = blk_ring->req_cons;
++	rp = blk_ring->sring->req_prod;
++	rmb(); /* Ensure we see queued requests up to 'rp'. */
++
++	while ((rc != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
++
++		pending_req = alloc_req();
++		if (NULL == pending_req) {
++			blkif->st_oo_req++;
++			more_to_do = 1;
++			break;
++		}
++
++		req = RING_GET_REQUEST(blk_ring, rc);
++		blk_ring->req_cons = ++rc; /* before make_response() */
++
++		switch (req->operation) {
++		case BLKIF_OP_READ:
++			blkif->st_rd_req++;
++			dispatch_rw_block_io(blkif, req, pending_req);
++			break;
++		case BLKIF_OP_WRITE:
++			blkif->st_wr_req++;
++			dispatch_rw_block_io(blkif, req, pending_req);
++			break;
++		default:
++			DPRINTK("error: unknown block io operation [%d]\n",
++				req->operation);
++			make_response(blkif, req->id, req->operation,
++				      BLKIF_RSP_ERROR);
++			free_req(pending_req);
++			break;
++		}
++	}
++	return more_to_do;
++}
++
++static void dispatch_rw_block_io(blkif_t *blkif,
++				 blkif_request_t *req,
++				 pending_req_t *pending_req)
++{
++	extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
++	int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
++	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++	struct phys_req preq;
++	struct { 
++		unsigned long buf; unsigned int nsec;
++	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++	unsigned int nseg;
++	struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++	int ret, i, nbio = 0;
++
++	/* Check that number of segments is sane. */
++	nseg = req->nr_segments;
++	if (unlikely(nseg == 0) || 
++	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
++		DPRINTK("Bad number of segments in request (%d)\n", nseg);
++		goto fail_response;
++	}
++
++	preq.dev           = req->handle;
++	preq.sector_number = req->sector_number;
++	preq.nr_sects      = 0;
++
++	pending_req->blkif     = blkif;
++	pending_req->id        = req->id;
++	pending_req->operation = operation;
++	pending_req->status    = BLKIF_RSP_OKAY;
++	pending_req->nr_pages  = nseg;
++
++	for (i = 0; i < nseg; i++) {
++		seg[i].nsec = req->seg[i].last_sect -
++			req->seg[i].first_sect + 1;
++
++		if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
++		    (seg[i].nsec <= 0))
++			goto fail_response;
++		preq.nr_sects += seg[i].nsec;
++
++		map[i].host_addr = vaddr(pending_req, i);
++		map[i].dom = blkif->domid;
++		map[i].ref = req->seg[i].gref;
++		map[i].flags = GNTMAP_host_map;
++		if ( operation == WRITE )
++			map[i].flags |= GNTMAP_readonly;
++	}
++
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
++	BUG_ON(ret);
++
++	for (i = 0; i < nseg; i++) {
++		if (unlikely(map[i].status != 0)) {
++			DPRINTK("invalid buffer -- could not remap it\n");
++			goto fail_flush;
++		}
++
++		pending_handle(pending_req, i) = map[i].handle;
++#ifdef __ia64__
++		pending_vaddrs[vaddr_pagenr(pending_req, i)] =
++			(unsigned long)gnttab_map_vaddr(map[i]);
++#else
++		set_phys_to_machine(__pa(vaddr(
++			pending_req, i)) >> PAGE_SHIFT,
++			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
++#endif
++		seg[i].buf  = map[i].dev_bus_addr | 
++			(req->seg[i].first_sect << 9);
++	}
++
++	if (vbd_translate(&preq, blkif, operation) != 0) {
++		DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
++			operation == READ ? "read" : "write",
++			preq.sector_number,
++			preq.sector_number + preq.nr_sects, preq.dev); 
++		goto fail_flush;
++	}
++
++	for (i = 0; i < nseg; i++) {
++		if (((int)preq.sector_number|(int)seg[i].nsec) &
++		    ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
++			DPRINTK("Misaligned I/O request from domain %d",
++				blkif->domid);
++			goto fail_put_bio;
++		}
++
++		while ((bio == NULL) ||
++		       (bio_add_page(bio,
++				     virt_to_page(vaddr(pending_req, i)),
++				     seg[i].nsec << 9,
++				     seg[i].buf & ~PAGE_MASK) == 0)) {
++			bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
++			if (unlikely(bio == NULL))
++				goto fail_put_bio;
++                
++			bio->bi_bdev    = preq.bdev;
++			bio->bi_private = pending_req;
++			bio->bi_end_io  = end_block_io_op;
++			bio->bi_sector  = preq.sector_number;
++		}
++
++		preq.sector_number += seg[i].nsec;
++	}
++
++	plug_queue(blkif, bio);
++	atomic_set(&pending_req->pendcnt, nbio);
++	blkif_get(blkif);
++
++	for (i = 0; i < nbio; i++)
++		submit_bio(operation, biolist[i]);
++
++	return;
++
++ fail_put_bio:
++	for (i = 0; i < (nbio-1); i++)
++		bio_put(biolist[i]);
++ fail_flush:
++	fast_flush_area(pending_req);
++ fail_response:
++	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
++	free_req(pending_req);
++} 
++
++
++
++/******************************************************************
++ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
++ */
++
++
++static void make_response(blkif_t *blkif, unsigned long id, 
++                          unsigned short op, int st)
++{
++	blkif_response_t *resp;
++	unsigned long     flags;
++	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
++	int more_to_do = 0;
++	int notify;
++
++	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
++
++	/* Place on the response ring for the relevant domain. */ 
++	resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
++	resp->id        = id;
++	resp->operation = op;
++	resp->status    = st;
++	blk_ring->rsp_prod_pvt++;
++	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
++
++	if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
++		/*
++		 * Tail check for pending requests. Allows frontend to avoid
++		 * notifications if requests are already in flight (lower
++		 * overheads and promotes batching).
++		 */
++		RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
++
++	} else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
++		more_to_do = 1;
++
++	}
++	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
++
++	if (more_to_do) {
++		atomic_inc(&blkif->io_pending);
++		wake_up(&blkif->wq);
++	}
++	if (notify)
++		notify_remote_via_irq(blkif->irq);
++}
++
++static int __init blkif_init(void)
++{
++	struct page *page;
++	int i;
++
++	if (xen_init() < 0)
++		return -ENODEV;
++
++	mmap_pages            = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
++	pending_reqs          = kmalloc(sizeof(pending_reqs[0]) *
++					blkif_reqs, GFP_KERNEL);
++	pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
++					mmap_pages, GFP_KERNEL);
++	pending_vaddrs        = kmalloc(sizeof(pending_vaddrs[0]) *
++					mmap_pages, GFP_KERNEL);
++	if (!pending_reqs || !pending_grant_handles || !pending_vaddrs) {
++		kfree(pending_reqs);
++		kfree(pending_grant_handles);
++		kfree(pending_vaddrs);
++		printk("%s: out of memory\n", __FUNCTION__);
++		return -ENOMEM;
++	}
++
++	blkif_interface_init();
++	
++#ifdef __ia64__
++	extern unsigned long alloc_empty_foreign_map_page_range(
++		unsigned long pages);
++	mmap_vstart = (unsigned long)
++		alloc_empty_foreign_map_page_range(mmap_pages);
++#else /* ! ia64 */
++	page = balloon_alloc_empty_page_range(mmap_pages);
++	BUG_ON(page == NULL);
++	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
++#endif
++	printk("%s: reqs=%d, pages=%d, mmap_vstart=0x%lx\n",
++	       __FUNCTION__, blkif_reqs, mmap_pages, mmap_vstart);
++	BUG_ON(mmap_vstart == 0);
++	for (i = 0; i < mmap_pages; i++) {
++		pending_vaddrs[i] = mmap_vstart + (i << PAGE_SHIFT);
++		pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
++	}
++
++	memset(pending_reqs, 0, sizeof(pending_reqs));
++	INIT_LIST_HEAD(&pending_free);
++
++	for (i = 0; i < blkif_reqs; i++)
++		list_add_tail(&pending_reqs[i].free_list, &pending_free);
++    
++	blkif_xenbus_init();
++	return 0;
++}
++
++__initcall(blkif_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/common.h linux-2.6.12-xen/drivers/xen/blkback/common.h
+--- pristine-linux-2.6.12/drivers/xen/blkback/common.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blkback/common.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,123 @@
++
++#ifndef __BLKIF__BACKEND__COMMON_H__
++#define __BLKIF__BACKEND__COMMON_H__
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <asm-xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/xen-public/io/blkif.h>
++#include <asm-xen/xen-public/io/ring.h>
++#include <asm-xen/gnttab.h>
++#include <asm-xen/driver_util.h>
++
++#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
++                                    __FILE__ , __LINE__ , ## _a )
++
++struct vbd {
++	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
++	unsigned char  readonly;    /* Non-zero -> read-only */
++	unsigned char  type;        /* VDISK_xxx */
++	u32            pdevice;     /* phys device that this vbd maps to */
++	struct block_device *bdev;
++}; 
++
++struct backend_info; 
++
++typedef struct blkif_st {
++	/* Unique identifier for this interface. */
++	domid_t           domid;
++	unsigned int      handle;
++	/* Physical parameters of the comms window. */
++	unsigned int      evtchn;
++	unsigned int      irq;
++	/* Comms information. */
++	blkif_back_ring_t blk_ring;
++	struct vm_struct *blk_ring_area;
++	/* The VBD attached to this interface. */
++	struct vbd        vbd;
++	/* Back pointer to the backend_info. */
++	struct backend_info *be; 
++	/* Private fields. */
++	enum { DISCONNECTED, CONNECTED } status;
++#ifdef CONFIG_XEN_BLKDEV_TAP_BE
++	/* Is this a blktap frontend */
++	unsigned int     is_blktap;
++#endif
++	spinlock_t       blk_ring_lock;
++	atomic_t         refcnt;
++
++	wait_queue_head_t   wq;
++	struct task_struct  *xenblkd;
++	atomic_t            io_pending;
++	request_queue_t     *plug;
++
++	/* statistics */
++	unsigned long       st_print;
++	int                 st_rd_req;
++	int                 st_wr_req;
++	int                 st_oo_req;
++
++	struct work_struct free_work;
++
++	grant_handle_t shmem_handle;
++	grant_ref_t    shmem_ref;
++} blkif_t;
++
++blkif_t *alloc_blkif(domid_t domid);
++void free_blkif_callback(blkif_t *blkif);
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
++
++#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define blkif_put(_b)					\
++	do {						\
++		if (atomic_dec_and_test(&(_b)->refcnt))	\
++			free_blkif_callback(_b);	\
++	} while (0)
++
++/* Create a vbd. */
++int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, unsigned major,
++	       unsigned minor, int readonly);
++void vbd_free(struct vbd *vbd);
++
++unsigned long vbd_size(struct vbd *vbd);
++unsigned int vbd_info(struct vbd *vbd);
++unsigned long vbd_secsize(struct vbd *vbd);
++
++struct phys_req {
++	unsigned short       dev;
++	unsigned short       nr_sects;
++	struct block_device *bdev;
++	blkif_sector_t       sector_number;
++};
++
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
++
++void blkif_interface_init(void);
++
++void blkif_xenbus_init(void);
++
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++int blkif_schedule(void *arg);
++
++void update_blkif_status(blkif_t *blkif); 
++
++#endif /* __BLKIF__BACKEND__COMMON_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/interface.c linux-2.6.12-xen/drivers/xen/blkback/interface.c
+--- pristine-linux-2.6.12/drivers/xen/blkback/interface.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blkback/interface.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,164 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/interface.c
++ * 
++ * Block-device interface management.
++ * 
++ * Copyright (c) 2004, Keir Fraser
++ */
++
++#include "common.h"
++#include <asm-xen/evtchn.h>
++
++static kmem_cache_t *blkif_cachep;
++
++blkif_t *alloc_blkif(domid_t domid)
++{
++	blkif_t *blkif;
++
++	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
++	if (!blkif)
++		return ERR_PTR(-ENOMEM);
++
++	memset(blkif, 0, sizeof(*blkif));
++	blkif->domid = domid;
++	blkif->status = DISCONNECTED;
++	spin_lock_init(&blkif->blk_ring_lock);
++	atomic_set(&blkif->refcnt, 1);
++	init_waitqueue_head(&blkif->wq);
++	blkif->st_print = jiffies;
++
++	return blkif;
++}
++
++static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++{
++	struct gnttab_map_grant_ref op;
++	int ret;
++
++	op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
++	op.flags     = GNTMAP_host_map;
++	op.ref       = shared_page;
++	op.dom       = blkif->domid;
++
++	lock_vm_area(blkif->blk_ring_area);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
++	unlock_vm_area(blkif->blk_ring_area);
++	BUG_ON(ret);
++
++	if (op.status) {
++		DPRINTK(" Grant table operation failure !\n");
++		return op.status;
++	}
++
++	blkif->shmem_ref = shared_page;
++	blkif->shmem_handle = op.handle;
++
++#ifdef __ia64__
++	/* on some arch's, map_grant_ref behaves like mmap, in that the
++	 * passed address is a hint and a different address may be returned */
++	blkif->blk_ring_area->addr = gnttab_map_vaddr(op);
++#endif
++
++	return 0;
++}
++
++static void unmap_frontend_page(blkif_t *blkif)
++{
++	struct gnttab_unmap_grant_ref op;
++	int ret;
++
++	op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
++	op.handle       = blkif->shmem_handle;
++	op.dev_bus_addr = 0;
++
++	lock_vm_area(blkif->blk_ring_area);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
++	unlock_vm_area(blkif->blk_ring_area);
++	BUG_ON(ret);
++}
++
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
++{
++	blkif_sring_t *sring;
++	int err;
++	evtchn_op_t op = {
++		.cmd = EVTCHNOP_bind_interdomain,
++		.u.bind_interdomain.remote_dom = blkif->domid,
++		.u.bind_interdomain.remote_port = evtchn };
++
++	/* Already connected through? */
++	if (blkif->irq)
++		return 0;
++
++	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
++		return -ENOMEM;
++
++	err = map_frontend_page(blkif, shared_page);
++	if (err) {
++		free_vm_area(blkif->blk_ring_area);
++		return err;
++	}
++
++	err = HYPERVISOR_event_channel_op(&op);
++	if (err) {
++		unmap_frontend_page(blkif);
++		free_vm_area(blkif->blk_ring_area);
++		return err;
++	}
++
++	blkif->evtchn = op.u.bind_interdomain.local_port;
++
++	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
++	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
++
++	blkif->irq = bind_evtchn_to_irqhandler(
++		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
++
++	/* We're potentially connected now */
++	update_blkif_status(blkif); 
++
++	return 0;
++}
++
++static void free_blkif(void *arg)
++{
++	blkif_t *blkif = (blkif_t *)arg;
++
++	/* Already disconnected? */
++	if (blkif->irq) {
++		unbind_from_irqhandler(blkif->irq, blkif);
++		blkif->irq = 0;
++	}
++
++	vbd_free(&blkif->vbd);
++
++	if (blkif->blk_ring.sring) {
++		unmap_frontend_page(blkif);
++		free_vm_area(blkif->blk_ring_area);
++		blkif->blk_ring.sring = NULL;
++	}
++
++	kmem_cache_free(blkif_cachep, blkif);
++}
++
++void free_blkif_callback(blkif_t *blkif)
++{
++	INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
++	schedule_work(&blkif->free_work);
++}
++
++void __init blkif_interface_init(void)
++{
++	blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
++					 0, 0, NULL, NULL);
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/Makefile linux-2.6.12-xen/drivers/xen/blkback/Makefile
+--- pristine-linux-2.6.12/drivers/xen/blkback/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blkback/Makefile	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,2 @@
++
++obj-y	:= blkback.o xenbus.o interface.o vbd.o
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/vbd.c linux-2.6.12-xen/drivers/xen/blkback/vbd.c
+--- pristine-linux-2.6.12/drivers/xen/blkback/vbd.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blkback/vbd.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,102 @@
++/******************************************************************************
++ * blkback/vbd.c
++ * 
++ * Routines for managing virtual block devices (VBDs).
++ * 
++ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
++ */
++
++#include "common.h"
++#include <asm-xen/xenbus.h>
++
++#define vbd_sz(_v)   ((_v)->bdev->bd_part ?				\
++	(_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
++
++unsigned long vbd_size(struct vbd *vbd)
++{
++	return vbd_sz(vbd);
++}
++
++unsigned int vbd_info(struct vbd *vbd)
++{
++	return vbd->type | (vbd->readonly?VDISK_READONLY:0);
++}
++
++unsigned long vbd_secsize(struct vbd *vbd)
++{
++	return bdev_hardsect_size(vbd->bdev);
++}
++
++int vbd_create(blkif_t *blkif, blkif_vdev_t handle, unsigned major,
++	       unsigned minor, int readonly)
++{
++	struct vbd *vbd;
++
++	vbd = &blkif->vbd;
++	vbd->handle   = handle; 
++	vbd->readonly = readonly;
++	vbd->type     = 0;
++
++	vbd->pdevice  = MKDEV(major, minor);
++
++	vbd->bdev = open_by_devnum(
++		vbd->pdevice,
++		vbd->readonly ? FMODE_READ : FMODE_WRITE);
++	if (IS_ERR(vbd->bdev)) {
++		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
++			vbd->pdevice);
++		return -ENOENT;
++	}
++
++	if (vbd->bdev->bd_disk == NULL) {
++		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
++			vbd->pdevice);
++		vbd_free(vbd);
++		return -ENOENT;
++	}
++
++	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
++		vbd->type |= VDISK_CDROM;
++	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
++		vbd->type |= VDISK_REMOVABLE;
++
++	DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
++		handle, blkif->domid);
++	return 0;
++}
++
++void vbd_free(struct vbd *vbd)
++{
++	if (vbd->bdev)
++		blkdev_put(vbd->bdev);
++	vbd->bdev = NULL;
++}
++
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
++{
++	struct vbd *vbd = &blkif->vbd;
++	int rc = -EACCES;
++
++	if ((operation == WRITE) && vbd->readonly)
++		goto out;
++
++	if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
++		goto out;
++
++	req->dev  = vbd->pdevice;
++	req->bdev = vbd->bdev;
++	rc = 0;
++
++ out:
++	return rc;
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/xenbus.c linux-2.6.12-xen/drivers/xen/blkback/xenbus.c
+--- pristine-linux-2.6.12/drivers/xen/blkback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blkback/xenbus.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,419 @@
++/*  Xenbus code for blkif backend
++    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
++    Copyright (C) 2005 XenSource Ltd
++
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation; either version 2 of the License, or
++    (at your option) any later version.
++
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
++
++    You should have received a copy of the GNU General Public License
++    along with this program; if not, write to the Free Software
++    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++*/
++
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include <asm-xen/xenbus.h>
++#include "common.h"
++
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++    pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++
++
++struct backend_info
++{
++	struct xenbus_device *dev;
++	blkif_t *blkif;
++	struct xenbus_watch backend_watch;
++
++	unsigned major;
++	unsigned minor;
++	char *mode;
++};
++
++
++static void maybe_connect(struct backend_info *);
++static void connect(struct backend_info *);
++static int connect_ring(struct backend_info *);
++static void backend_changed(struct xenbus_watch *, const char **,
++			    unsigned int);
++
++
++void update_blkif_status(blkif_t *blkif)
++{ 
++	if(blkif->irq && blkif->vbd.bdev) {
++		blkif->status = CONNECTED; 
++		(void)blkif_be_int(0, blkif, NULL); 
++	}
++	maybe_connect(blkif->be); 
++}
++
++
++static ssize_t show_physical_device(struct device *_dev, char *buf)
++{
++	struct xenbus_device *dev = to_xenbus_device(_dev);
++	struct backend_info *be = dev->data;
++	return sprintf(buf, "%x:%x\n", be->major, be->minor);
++}
++DEVICE_ATTR(physical_device, S_IRUSR | S_IRGRP | S_IROTH,
++	    show_physical_device, NULL);
++
++
++static ssize_t show_mode(struct device *_dev, char *buf)
++{
++	struct xenbus_device *dev = to_xenbus_device(_dev);
++	struct backend_info *be = dev->data;
++	return sprintf(buf, "%s\n", be->mode);
++}
++DEVICE_ATTR(mode, S_IRUSR | S_IRGRP | S_IROTH, show_mode, NULL);
++
++
++static int blkback_remove(struct xenbus_device *dev)
++{
++	struct backend_info *be = dev->data;
++
++	DPRINTK("");
++
++	if (be->backend_watch.node) {
++		unregister_xenbus_watch(&be->backend_watch);
++		kfree(be->backend_watch.node);
++		be->backend_watch.node = NULL;
++	}
++	if (be->blkif) {
++		be->blkif->status = DISCONNECTED; 
++		if (be->blkif->xenblkd)
++			kthread_stop(be->blkif->xenblkd);
++		blkif_put(be->blkif);
++		be->blkif = NULL;
++	}
++
++	device_remove_file(&dev->dev, &dev_attr_physical_device);
++	device_remove_file(&dev->dev, &dev_attr_mode);
++
++	kfree(be);
++	dev->data = NULL;
++	return 0;
++}
++
++
++/**
++ * Entry point to this code when a new device is created.  Allocate the basic
++ * structures, and watch the store waiting for the hotplug scripts to tell us
++ * the device's physical major and minor numbers.  Switch to InitWait.
++ */
++static int blkback_probe(struct xenbus_device *dev,
++			 const struct xenbus_device_id *id)
++{
++	int err;
++	struct backend_info *be = kmalloc(sizeof(struct backend_info),
++					  GFP_KERNEL);
++	if (!be) {
++		xenbus_dev_fatal(dev, -ENOMEM,
++				 "allocating backend structure");
++		return -ENOMEM;
++	}
++	memset(be, 0, sizeof(*be));
++
++	be->dev = dev;
++	dev->data = be;
++
++	be->blkif = alloc_blkif(dev->otherend_id);
++	if (IS_ERR(be->blkif)) {
++		err = PTR_ERR(be->blkif);
++		be->blkif = NULL;
++		xenbus_dev_fatal(dev, err, "creating block interface");
++		goto fail;
++	}
++
++	/* setup back pointer */
++	be->blkif->be = be; 
++
++	err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
++				 &be->backend_watch, backend_changed);
++	if (err)
++		goto fail;
++
++	err = xenbus_switch_state(dev, XBT_NULL, XenbusStateInitWait);
++	if (err)
++		goto fail;
++
++	return 0;
++
++fail:
++	DPRINTK("failed");
++	blkback_remove(dev);
++	return err;
++}
++
++
++/**
++ * Callback received when the hotplug scripts have placed the physical-device
++ * node.  Read it and the mode node, and create a vbd.  If the frontend is
++ * ready, connect.
++ */
++static void backend_changed(struct xenbus_watch *watch,
++			    const char **vec, unsigned int len)
++{
++	int err;
++	unsigned major;
++	unsigned minor;
++	struct backend_info *be
++		= container_of(watch, struct backend_info, backend_watch);
++	struct xenbus_device *dev = be->dev;
++
++	DPRINTK("");
++
++	err = xenbus_scanf(XBT_NULL, dev->nodename, "physical-device", "%x:%x",
++			   &major, &minor);
++	if (XENBUS_EXIST_ERR(err)) {
++		/* Since this watch will fire once immediately after it is
++		   registered, we expect this.  Ignore it, and wait for the
++		   hotplug scripts. */
++		return;
++	}
++	if (err != 2) {
++		xenbus_dev_fatal(dev, err, "reading physical-device");
++		return;
++	}
++
++	if (be->major && be->minor &&
++	    (be->major != major || be->minor != minor)) {
++		printk(KERN_WARNING
++		       "blkback: changing physical device (from %x:%x to "
++		       "%x:%x) not supported.\n", be->major, be->minor,
++		       major, minor);
++		return;
++	}
++
++	be->mode = xenbus_read(XBT_NULL, dev->nodename, "mode", NULL);
++	if (IS_ERR(be->mode)) {
++		err = PTR_ERR(be->mode);
++		be->mode = NULL;
++		xenbus_dev_fatal(dev, err, "reading mode");
++		return;
++	}
++
++	if (be->major == 0 && be->minor == 0) {
++		/* Front end dir is a number, which is used as the handle. */
++
++		char *p = strrchr(dev->otherend, '/') + 1;
++		long handle = simple_strtoul(p, NULL, 0);
++
++		be->major = major;
++		be->minor = minor;
++
++		err = vbd_create(be->blkif, handle, major, minor,
++				 (NULL == strchr(be->mode, 'w')));
++		if (err) {
++			be->major = 0;
++			be->minor = 0;
++			xenbus_dev_fatal(dev, err, "creating vbd structure");
++			return;
++		}
++
++		be->blkif->xenblkd = kthread_run(blkif_schedule, be->blkif,
++						 "xvd %d %02x:%02x",
++						 be->blkif->domid,
++						 be->major, be->minor);
++		if (IS_ERR(be->blkif->xenblkd)) {
++			err = PTR_ERR(be->blkif->xenblkd);
++			be->blkif->xenblkd = NULL;
++			xenbus_dev_error(dev, err, "start xenblkd");
++			return;
++		}
++
++		device_create_file(&dev->dev, &dev_attr_physical_device);
++		device_create_file(&dev->dev, &dev_attr_mode);
++
++		/* We're potentially connected now */
++		update_blkif_status(be->blkif); 
++	}
++}
++
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++			     XenbusState frontend_state)
++{
++	struct backend_info *be = dev->data;
++	int err;
++
++	DPRINTK("");
++
++	switch (frontend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateConnected:
++		break;
++
++	case XenbusStateInitialised:
++		err = connect_ring(be);
++		if (err) {
++			return;
++		}
++		update_blkif_status(be->blkif); 
++		break;
++
++	case XenbusStateClosing:
++		xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
++		break;
++
++	case XenbusStateClosed:
++		device_unregister(&dev->dev);
++		break;
++
++	case XenbusStateUnknown:
++	case XenbusStateInitWait:
++	default:
++		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++				 frontend_state);
++		break;
++	}
++}
++
++
++/* ** Connection ** */
++
++
++static void maybe_connect(struct backend_info *be)
++{
++	if ((be->major != 0 || be->minor != 0) &&
++	    be->blkif->status == CONNECTED)
++		connect(be);
++}
++
++
++/**
++ * Write the physical details regarding the block device to the store, and
++ * switch to Connected state.
++ */
++static void connect(struct backend_info *be)
++{
++	xenbus_transaction_t xbt;
++	int err;
++	struct xenbus_device *dev = be->dev;
++
++	DPRINTK("%s", dev->otherend);
++
++	/* Supply the information about the device the frontend needs */
++again:
++	err = xenbus_transaction_start(&xbt);
++
++	if (err) {
++		xenbus_dev_fatal(dev, err, "starting transaction");
++		return;
++	}
++
++	err = xenbus_printf(xbt, dev->nodename, "sectors", "%lu",
++			    vbd_size(&be->blkif->vbd));
++	if (err) {
++		xenbus_dev_fatal(dev, err, "writing %s/sectors",
++				 dev->nodename);
++		goto abort;
++	}
++
++	/* FIXME: use a typename instead */
++	err = xenbus_printf(xbt, dev->nodename, "info", "%u",
++			    vbd_info(&be->blkif->vbd));
++	if (err) {
++		xenbus_dev_fatal(dev, err, "writing %s/info",
++				 dev->nodename);
++		goto abort;
++	}
++	err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
++			    vbd_secsize(&be->blkif->vbd));
++	if (err) {
++		xenbus_dev_fatal(dev, err, "writing %s/sector-size",
++				 dev->nodename);
++		goto abort;
++	}
++
++	err = xenbus_switch_state(dev, xbt, XenbusStateConnected);
++	if (err)
++		goto abort;
++
++	err = xenbus_transaction_end(xbt, 0);
++	if (err == -EAGAIN)
++		goto again;
++	if (err)
++		xenbus_dev_fatal(dev, err, "ending transaction");
++	return;
++ abort:
++	xenbus_transaction_end(xbt, 1);
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++	struct xenbus_device *dev = be->dev;
++	unsigned long ring_ref;
++	unsigned int evtchn;
++	int err;
++
++	DPRINTK("%s", dev->otherend);
++
++	err = xenbus_gather(XBT_NULL, dev->otherend, "ring-ref", "%lu", &ring_ref,
++			    "event-channel", "%u", &evtchn, NULL);
++	if (err) {
++		xenbus_dev_fatal(dev, err,
++				 "reading %s/ring-ref and event-channel",
++				 dev->otherend);
++		return err;
++	}
++
++	/* Map the shared frame, irq etc. */
++	err = blkif_map(be->blkif, ring_ref, evtchn);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
++				 ring_ref, evtchn);
++		return err;
++	}
++
++	return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static struct xenbus_device_id blkback_ids[] = {
++	{ "vbd" },
++	{ "" }
++};
++
++
++static struct xenbus_driver blkback = {
++	.name = "vbd",
++	.owner = THIS_MODULE,
++	.ids = blkback_ids,
++	.probe = blkback_probe,
++	.remove = blkback_remove,
++	.otherend_changed = frontend_changed
++};
++
++
++void blkif_xenbus_init(void)
++{
++	xenbus_register_backend(&blkback);
++}
++
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blkfront/blkfront.c linux-2.6.12-xen/drivers/xen/blkfront/blkfront.c
+--- pristine-linux-2.6.12/drivers/xen/blkfront/blkfront.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blkfront/blkfront.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,834 @@
++/******************************************************************************
++ * blkfront.c
++ * 
++ * XenLinux virtual block-device driver.
++ * 
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004, Christian Limpach
++ * Copyright (c) 2004, Andrew Warfield
++ * Copyright (c) 2005, Christopher Clark
++ * Copyright (c) 2005, XenSource Ltd
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/version.h>
++#include "block.h"
++#include <linux/cdrom.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <scsi/scsi.h>
++#include <asm-xen/evtchn.h>
++#include <asm-xen/xenbus.h>
++#include <asm-xen/xen-public/grant_table.h>
++#include <asm-xen/gnttab.h>
++#include <asm/hypervisor.h>
++
++#define BLKIF_STATE_DISCONNECTED 0
++#define BLKIF_STATE_CONNECTED    1
++#define BLKIF_STATE_SUSPENDED    2
++
++#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
++    (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
++#define GRANT_INVALID_REF	0
++
++static void connect(struct blkfront_info *);
++static void blkfront_closing(struct xenbus_device *);
++static int blkfront_remove(struct xenbus_device *);
++static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
++static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
++
++static void kick_pending_request_queues(struct blkfront_info *);
++
++static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++static void blkif_restart_queue(void *arg);
++static void blkif_recover(struct blkfront_info *);
++static void blkif_completion(struct blk_shadow *);
++static void blkif_free(struct blkfront_info *, int);
++
++
++/**
++ * Entry point to this code when a new device is created.  Allocate the basic
++ * structures and the ring buffer for communication with the backend, and
++ * inform the backend of the appropriate details for those.  Switch to
++ * Initialised state.
++ */
++static int blkfront_probe(struct xenbus_device *dev,
++			  const struct xenbus_device_id *id)
++{
++	int err, vdevice, i;
++	struct blkfront_info *info;
++
++	/* FIXME: Use dynamic device id if this is not set. */
++	err = xenbus_scanf(XBT_NULL, dev->nodename,
++			   "virtual-device", "%i", &vdevice);
++	if (err != 1) {
++		xenbus_dev_fatal(dev, err, "reading virtual-device");
++		return err;
++	}
++
++	info = kmalloc(sizeof(*info), GFP_KERNEL);
++	if (!info) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++		return -ENOMEM;
++	}
++	info->xbdev = dev;
++	info->vdevice = vdevice;
++	info->connected = BLKIF_STATE_DISCONNECTED;
++	info->mi = NULL;
++	info->gd = NULL;
++	INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
++
++	info->shadow_free = 0;
++	memset(info->shadow, 0, sizeof(info->shadow));
++	for (i = 0; i < BLK_RING_SIZE; i++)
++		info->shadow[i].req.id = i+1;
++	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
++
++	info->users = 0;
++
++	/* Front end dir is a number, which is used as the id. */
++	info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
++	dev->data = info;
++
++	err = talk_to_backend(dev, info);
++	if (err) {
++		kfree(info);
++		dev->data = NULL;
++		return err;
++	}
++
++	return 0;
++}
++
++
++/**
++ * We are reconnecting to the backend, due to a suspend/resume, or a backend
++ * driver restart.  We tear down our blkif structure and recreate it, but
++ * leave the device-layer structures intact so that this is transparent to the
++ * rest of the kernel.
++ */
++static int blkfront_resume(struct xenbus_device *dev)
++{
++	struct blkfront_info *info = dev->data;
++	int err;
++
++	DPRINTK("blkfront_resume: %s\n", dev->nodename);
++
++	blkif_free(info, 1);
++
++	err = talk_to_backend(dev, info);
++	if (!err)
++		blkif_recover(info);
++
++	return err;
++}
++
++
++/* Common code used when first setting up, and when resuming. */
++static int talk_to_backend(struct xenbus_device *dev,
++			   struct blkfront_info *info)
++{
++	const char *message = NULL;
++	xenbus_transaction_t xbt;
++	int err;
++
++	/* Create shared ring, alloc event channel. */
++	err = setup_blkring(dev, info);
++	if (err)
++		goto out;
++
++again:
++	err = xenbus_transaction_start(&xbt);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "starting transaction");
++		goto destroy_blkring;
++	}
++
++	err = xenbus_printf(xbt, dev->nodename,
++			    "ring-ref","%u", info->ring_ref);
++	if (err) {
++		message = "writing ring-ref";
++		goto abort_transaction;
++	}
++	err = xenbus_printf(xbt, dev->nodename,
++			    "event-channel", "%u", info->evtchn);
++	if (err) {
++		message = "writing event-channel";
++		goto abort_transaction;
++	}
++
++	err = xenbus_switch_state(dev, xbt, XenbusStateInitialised);
++	if (err) {
++		goto abort_transaction;
++	}
++
++	err = xenbus_transaction_end(xbt, 0);
++	if (err) {
++		if (err == -EAGAIN)
++			goto again;
++		xenbus_dev_fatal(dev, err, "completing transaction");
++		goto destroy_blkring;
++	}
++
++	return 0;
++
++ abort_transaction:
++	xenbus_transaction_end(xbt, 1);
++	if (message)
++		xenbus_dev_fatal(dev, err, "%s", message);
++ destroy_blkring:
++	blkif_free(info, 0);
++ out:
++	return err;
++}
++
++
++static int setup_blkring(struct xenbus_device *dev,
++			 struct blkfront_info *info)
++{
++	blkif_sring_t *sring;
++	int err;
++
++	info->ring_ref = GRANT_INVALID_REF;
++
++	sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
++	if (!sring) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++		return -ENOMEM;
++	}
++	SHARED_RING_INIT(sring);
++	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
++
++	err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
++	if (err < 0) {
++		free_page((unsigned long)sring);
++		info->ring.sring = NULL;
++		goto fail;
++	}
++	info->ring_ref = err;
++
++	err = xenbus_alloc_evtchn(dev, &info->evtchn);
++	if (err)
++		goto fail;
++
++	err = bind_evtchn_to_irqhandler(
++		info->evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
++	if (err <= 0) {
++		xenbus_dev_fatal(dev, err,
++				 "bind_evtchn_to_irqhandler failed");
++		goto fail;
++	}
++	info->irq = err;
++
++	return 0;
++fail:
++	blkif_free(info, 0);
++	return err;
++}
++
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++			    XenbusState backend_state)
++{
++	struct blkfront_info *info = dev->data;
++	struct block_device *bd;
++
++	DPRINTK("blkfront:backend_changed.\n");
++
++	switch (backend_state) {
++	case XenbusStateUnknown:
++	case XenbusStateInitialising:
++	case XenbusStateInitWait:
++	case XenbusStateInitialised:
++	case XenbusStateClosed:
++		break;
++
++	case XenbusStateConnected:
++		connect(info);
++		break;
++
++	case XenbusStateClosing:
++		bd = bdget(info->dev);
++		if (bd == NULL)
++			xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
++
++		down(&bd->bd_sem);
++		if (info->users > 0)
++			xenbus_dev_error(dev, -EBUSY,
++					 "Device in use; refusing to close");
++		else
++			blkfront_closing(dev);
++		up(&bd->bd_sem);
++		bdput(bd);
++		break;
++	}
++}
++
++
++/* ** Connection ** */
++
++
++/* 
++** Invoked when the backend is finally 'ready' (and has told produced 
++** the details about the physical device - #sectors, size, etc). 
++*/
++static void connect(struct blkfront_info *info)
++{
++	unsigned long sectors, sector_size;
++	unsigned int binfo;
++	int err;
++
++        if( (info->connected == BLKIF_STATE_CONNECTED) || 
++	    (info->connected == BLKIF_STATE_SUSPENDED) )
++		return;
++
++	DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
++
++	err = xenbus_gather(XBT_NULL, info->xbdev->otherend,
++			    "sectors", "%lu", &sectors,
++			    "info", "%u", &binfo,
++			    "sector-size", "%lu", &sector_size,
++			    NULL);
++	if (err) {
++		xenbus_dev_fatal(info->xbdev, err,
++				 "reading backend fields at %s",
++				 info->xbdev->otherend);
++		return;
++	}
++
++	err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
++	if (err) {
++		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
++		                 info->xbdev->otherend);
++		return;
++	}
++
++	(void)xenbus_switch_state(info->xbdev, XBT_NULL, XenbusStateConnected); 
++
++	/* Kick pending requests. */
++	spin_lock_irq(&blkif_io_lock);
++	info->connected = BLKIF_STATE_CONNECTED;
++	kick_pending_request_queues(info);
++	spin_unlock_irq(&blkif_io_lock);
++
++	add_disk(info->gd);
++}
++
++/**
++ * Handle the change of state of the backend to Closing.  We must delete our
++ * device-layer structures now, to ensure that writes are flushed through to
++ * the backend.  Once is this done, we can switch to Closed in
++ * acknowledgement.
++ */
++static void blkfront_closing(struct xenbus_device *dev)
++{
++	struct blkfront_info *info = dev->data;
++
++	DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
++
++	if (info->mi) {
++		DPRINTK("Calling xlvbd_del\n");
++		xlvbd_del(info);
++		info->mi = NULL;
++	}
++
++	xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
++}
++
++
++static int blkfront_remove(struct xenbus_device *dev)
++{
++	struct blkfront_info *info = dev->data;
++
++	DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
++
++	blkif_free(info, 0);
++
++	kfree(info);
++
++	return 0;
++}
++
++
++static inline int GET_ID_FROM_FREELIST(
++	struct blkfront_info *info)
++{
++	unsigned long free = info->shadow_free;
++	BUG_ON(free > BLK_RING_SIZE);
++	info->shadow_free = info->shadow[free].req.id;
++	info->shadow[free].req.id = 0x0fffffee; /* debug */
++	return free;
++}
++
++static inline void ADD_ID_TO_FREELIST(
++	struct blkfront_info *info, unsigned long id)
++{
++	info->shadow[id].req.id  = info->shadow_free;
++	info->shadow[id].request = 0;
++	info->shadow_free = id;
++}
++
++static inline void flush_requests(struct blkfront_info *info)
++{
++	int notify;
++
++	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
++
++	if (notify)
++		notify_remote_via_irq(info->irq);
++}
++
++static void kick_pending_request_queues(struct blkfront_info *info)
++{
++	if (!RING_FULL(&info->ring)) {
++		/* Re-enable calldowns. */
++		blk_start_queue(info->rq);
++		/* Kick things off immediately. */
++		do_blkif_request(info->rq);
++	}
++}
++
++static void blkif_restart_queue(void *arg)
++{
++	struct blkfront_info *info = (struct blkfront_info *)arg;
++	spin_lock_irq(&blkif_io_lock);
++	kick_pending_request_queues(info);
++	spin_unlock_irq(&blkif_io_lock);
++}
++
++static void blkif_restart_queue_callback(void *arg)
++{
++	struct blkfront_info *info = (struct blkfront_info *)arg;
++	schedule_work(&info->work);
++}
++
++int blkif_open(struct inode *inode, struct file *filep)
++{
++	struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
++	info->users++;
++	return 0;
++}
++
++
++int blkif_release(struct inode *inode, struct file *filep)
++{
++	struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
++	info->users--;
++	if (info->users == 0) {
++		/* Check whether we have been instructed to close.  We will
++		   have ignored this request initially, as the device was
++		   still mounted. */
++		struct xenbus_device * dev = info->xbdev;
++		XenbusState state = xenbus_read_driver_state(dev->otherend);
++
++		if (state == XenbusStateClosing)
++			blkfront_closing(dev);
++	}
++	return 0;
++}
++
++
++int blkif_ioctl(struct inode *inode, struct file *filep,
++                unsigned command, unsigned long argument)
++{
++	int i;
++
++	DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
++		      command, (long)argument, inode->i_rdev);
++
++	switch ( command )
++	{
++	case HDIO_GETGEO:
++		/* return ENOSYS to use defaults */
++		return -ENOSYS;
++
++	case CDROMMULTISESSION:
++		DPRINTK("FIXME: support multisession CDs later\n");
++		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
++			if (put_user(0, (char __user *)(argument + i)))
++				return -EFAULT;
++		return 0;
++
++	default:
++		/*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
++		  command);*/
++		return -EINVAL; /* same return as native Linux */
++	}
++
++	return 0;
++}
++
++
++/*
++ * blkif_queue_request
++ *
++ * request block io
++ * 
++ * id: for guest use only.
++ * operation: BLKIF_OP_{READ,WRITE,PROBE}
++ * buffer: buffer to read/write into. this should be a
++ *   virtual address in the guest os.
++ */
++static int blkif_queue_request(struct request *req)
++{
++	struct blkfront_info *info = req->rq_disk->private_data;
++	unsigned long buffer_mfn;
++	blkif_request_t *ring_req;
++	struct bio *bio;
++	struct bio_vec *bvec;
++	int idx;
++	unsigned long id;
++	unsigned int fsect, lsect;
++	int ref;
++	grant_ref_t gref_head;
++
++	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
++		return 1;
++
++	if (gnttab_alloc_grant_references(
++		BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
++		gnttab_request_free_callback(
++			&info->callback,
++			blkif_restart_queue_callback,
++			info,
++			BLKIF_MAX_SEGMENTS_PER_REQUEST);
++		return 1;
++	}
++
++	/* Fill out a communications ring structure. */
++	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
++	id = GET_ID_FROM_FREELIST(info);
++	info->shadow[id].request = (unsigned long)req;
++
++	ring_req->id = id;
++	ring_req->operation = rq_data_dir(req) ?
++		BLKIF_OP_WRITE : BLKIF_OP_READ;
++	ring_req->sector_number = (blkif_sector_t)req->sector;
++	ring_req->handle = info->handle;
++
++	ring_req->nr_segments = 0;
++	rq_for_each_bio (bio, req) {
++		bio_for_each_segment (bvec, bio, idx) {
++			BUG_ON(ring_req->nr_segments
++			       == BLKIF_MAX_SEGMENTS_PER_REQUEST);
++			buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
++			fsect = bvec->bv_offset >> 9;
++			lsect = fsect + (bvec->bv_len >> 9) - 1;
++			/* install a grant reference. */
++			ref = gnttab_claim_grant_reference(&gref_head);
++			BUG_ON(ref == -ENOSPC);
++
++			gnttab_grant_foreign_access_ref(
++				ref,
++				info->xbdev->otherend_id,
++				buffer_mfn,
++				rq_data_dir(req) );
++
++			info->shadow[id].frame[ring_req->nr_segments] =
++				mfn_to_pfn(buffer_mfn);
++
++			ring_req->seg[ring_req->nr_segments] =
++				(struct blkif_request_segment) {
++					.gref       = ref,
++					.first_sect = fsect, 
++					.last_sect  = lsect };
++
++			ring_req->nr_segments++;
++		}
++	}
++
++	info->ring.req_prod_pvt++;
++
++	/* Keep a private copy so we can reissue requests when recovering. */
++	info->shadow[id].req = *ring_req;
++
++	gnttab_free_grant_references(gref_head);
++
++	return 0;
++}
++
++/*
++ * do_blkif_request
++ *  read a block; request is in a request queue
++ */
++void do_blkif_request(request_queue_t *rq)
++{
++	struct blkfront_info *info = NULL;
++	struct request *req;
++	int queued;
++
++	DPRINTK("Entered do_blkif_request\n");
++
++	queued = 0;
++
++	while ((req = elv_next_request(rq)) != NULL) {
++		info = req->rq_disk->private_data;
++		if (!blk_fs_request(req)) {
++			end_request(req, 0);
++			continue;
++		}
++
++		if (RING_FULL(&info->ring))
++			goto wait;
++
++		DPRINTK("do_blk_req %p: cmd %p, sec %lx, "
++			"(%u/%li) buffer:%p [%s]\n",
++			req, req->cmd, req->sector, req->current_nr_sectors,
++			req->nr_sectors, req->buffer,
++			rq_data_dir(req) ? "write" : "read");
++
++
++		blkdev_dequeue_request(req);
++		if (blkif_queue_request(req)) {
++			blk_requeue_request(rq, req);
++		wait:
++			/* Avoid pointless unplugs. */
++			blk_stop_queue(rq);
++			break;
++		}
++
++		queued++;
++	}
++
++	if (queued != 0)
++		flush_requests(info);
++}
++
++
++static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++{
++	struct request *req;
++	blkif_response_t *bret;
++	RING_IDX i, rp;
++	unsigned long flags;
++	struct blkfront_info *info = (struct blkfront_info *)dev_id;
++
++	spin_lock_irqsave(&blkif_io_lock, flags);
++
++	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
++		spin_unlock_irqrestore(&blkif_io_lock, flags);
++		return IRQ_HANDLED;
++	}
++
++ again:
++	rp = info->ring.sring->rsp_prod;
++	rmb(); /* Ensure we see queued responses up to 'rp'. */
++
++	for (i = info->ring.rsp_cons; i != rp; i++) {
++		unsigned long id;
++		int ret;
++
++		bret = RING_GET_RESPONSE(&info->ring, i);
++		id   = bret->id;
++		req  = (struct request *)info->shadow[id].request;
++
++		blkif_completion(&info->shadow[id]);
++
++		ADD_ID_TO_FREELIST(info, id);
++
++		switch (bret->operation) {
++		case BLKIF_OP_READ:
++		case BLKIF_OP_WRITE:
++			if (unlikely(bret->status != BLKIF_RSP_OKAY))
++				DPRINTK("Bad return from blkdev data "
++					"request: %x\n", bret->status);
++
++			ret = end_that_request_first(
++				req, (bret->status == BLKIF_RSP_OKAY),
++				req->hard_nr_sectors);
++			BUG_ON(ret);
++			end_that_request_last(req);
++			break;
++		default:
++			BUG();
++		}
++	}
++
++	info->ring.rsp_cons = i;
++
++	if (i != info->ring.req_prod_pvt) {
++		int more_to_do;
++		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
++		if (more_to_do)
++			goto again;
++	} else {
++		info->ring.sring->rsp_event = i + 1;
++	}
++
++	kick_pending_request_queues(info);
++
++	spin_unlock_irqrestore(&blkif_io_lock, flags);
++
++	return IRQ_HANDLED;
++}
++
++static void blkif_free(struct blkfront_info *info, int suspend)
++{
++	/* Prevent new requests being issued until we fix things up. */
++	spin_lock_irq(&blkif_io_lock);
++	info->connected = suspend ? 
++		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; 
++	spin_unlock_irq(&blkif_io_lock);
++
++	/* Free resources associated with old device channel. */
++	if (info->ring_ref != GRANT_INVALID_REF) {
++		gnttab_end_foreign_access(info->ring_ref, 0,
++					  (unsigned long)info->ring.sring);
++		info->ring_ref = GRANT_INVALID_REF;
++		info->ring.sring = NULL;
++	}
++	if (info->irq)
++		unbind_from_irqhandler(info->irq, info); 
++	info->evtchn = info->irq = 0;
++
++}
++
++static void blkif_completion(struct blk_shadow *s)
++{
++	int i;
++	for (i = 0; i < s->req.nr_segments; i++)
++		gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
++}
++
++static void blkif_recover(struct blkfront_info *info)
++{
++	int i;
++	blkif_request_t *req;
++	struct blk_shadow *copy;
++	int j;
++
++	/* Stage 1: Make a safe copy of the shadow state. */
++	copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
++	BUG_ON(copy == NULL);
++	memcpy(copy, info->shadow, sizeof(info->shadow));
++
++	/* Stage 2: Set up free list. */
++	memset(&info->shadow, 0, sizeof(info->shadow));
++	for (i = 0; i < BLK_RING_SIZE; i++)
++		info->shadow[i].req.id = i+1;
++	info->shadow_free = info->ring.req_prod_pvt;
++	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
++
++	/* Stage 3: Find pending requests and requeue them. */
++	for (i = 0; i < BLK_RING_SIZE; i++) {
++		/* Not in use? */
++		if (copy[i].request == 0)
++			continue;
++
++		/* Grab a request slot and copy shadow state into it. */
++		req = RING_GET_REQUEST(
++			&info->ring, info->ring.req_prod_pvt);
++		*req = copy[i].req;
++
++		/* We get a new request id, and must reset the shadow state. */
++		req->id = GET_ID_FROM_FREELIST(info);
++		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
++
++		/* Rewrite any grant references invalidated by susp/resume. */
++		for (j = 0; j < req->nr_segments; j++)
++			gnttab_grant_foreign_access_ref(
++				req->seg[j].gref,
++				info->xbdev->otherend_id,
++				pfn_to_mfn(info->shadow[req->id].frame[j]),
++				rq_data_dir(
++					(struct request *)
++					info->shadow[req->id].request));
++		info->shadow[req->id].req = *req;
++
++		info->ring.req_prod_pvt++;
++	}
++
++	kfree(copy);
++
++	(void)xenbus_switch_state(info->xbdev, XBT_NULL, XenbusStateConnected); 
++	
++	/* Now safe for us to use the shared ring */
++	spin_lock_irq(&blkif_io_lock);
++        info->connected = BLKIF_STATE_CONNECTED;
++	spin_unlock_irq(&blkif_io_lock);
++
++	/* Send off requeued requests */
++	flush_requests(info);
++
++	/* Kick any other new requests queued since we resumed */
++	spin_lock_irq(&blkif_io_lock);
++	kick_pending_request_queues(info);
++	spin_unlock_irq(&blkif_io_lock);
++}
++
++
++/* ** Driver Registration ** */
++
++
++static struct xenbus_device_id blkfront_ids[] = {
++	{ "vbd" },
++	{ "" }
++};
++
++
++static struct xenbus_driver blkfront = {
++	.name = "vbd",
++	.owner = THIS_MODULE,
++	.ids = blkfront_ids,
++	.probe = blkfront_probe,
++	.remove = blkfront_remove,
++	.resume = blkfront_resume,
++	.otherend_changed = backend_changed,
++};
++
++
++static int __init xlblk_init(void)
++{
++	if (xen_init() < 0)
++		return -ENODEV;
++
++	return xenbus_register_frontend(&blkfront);
++}
++module_init(xlblk_init);
++
++
++static void xlblk_exit(void)
++{
++	return xenbus_unregister_driver(&blkfront);
++}
++module_exit(xlblk_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blkfront/block.h linux-2.6.12-xen/drivers/xen/blkfront/block.h
+--- pristine-linux-2.6.12/drivers/xen/blkfront/block.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blkfront/block.h	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,162 @@
++/******************************************************************************
++ * block.h
++ * 
++ * Shared definitions between all levels of XenLinux Virtual block devices.
++ * 
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004-2005, Christian Limpach
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_DRIVERS_BLOCK_H__
++#define __XEN_DRIVERS_BLOCK_H__
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/hdreg.h>
++#include <linux/blkdev.h>
++#include <linux/major.h>
++#include <linux/devfs_fs_kernel.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/xenbus.h>
++#include <asm-xen/gnttab.h>
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/xen-public/io/blkif.h>
++#include <asm-xen/xen-public/io/ring.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/uaccess.h>
++
++#if 1 
++#define IPRINTK(fmt, args...) \
++    printk(KERN_INFO "xen_blk: " fmt, ##args)
++#else
++#define IPRINTK(fmt, args...) ((void)0)
++#endif
++
++#if 1 
++#define WPRINTK(fmt, args...) \
++    printk(KERN_WARNING "xen_blk: " fmt, ##args)
++#else
++#define WPRINTK(fmt, args...) ((void)0)
++#endif
++ 
++#define DPRINTK(_f, _a...) pr_debug ( _f , ## _a )
++
++#if 0
++#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
++#else
++#define DPRINTK_IOCTL(_f, _a...) ((void)0)
++#endif
++
++struct xlbd_type_info
++{
++	int partn_shift;
++	int disks_per_major;
++	char *devname;
++	char *diskname;
++};
++
++struct xlbd_major_info
++{
++	int major;
++	int index;
++	int usage;
++	struct xlbd_type_info *type;
++};
++
++struct blk_shadow {
++	blkif_request_t req;
++	unsigned long request;
++	unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++
++#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
++
++/*
++ * We have one of these per vbd, whether ide, scsi or 'other'.  They
++ * hang in private_data off the gendisk structure. We may end up
++ * putting all kinds of interesting stuff here :-)
++ */
++struct blkfront_info
++{
++	struct xenbus_device *xbdev;
++	dev_t dev;
++ 	struct gendisk *gd;
++	int vdevice;
++	blkif_vdev_t handle;
++	int connected;
++	int ring_ref;
++	blkif_front_ring_t ring;
++	unsigned int evtchn, irq;
++	struct xlbd_major_info *mi;
++	request_queue_t *rq;
++	struct work_struct work;
++	struct gnttab_free_callback callback;
++	struct blk_shadow shadow[BLK_RING_SIZE];
++	unsigned long shadow_free;
++
++	/**
++	 * The number of people holding this device open.  We won't allow a
++	 * hot-unplug unless this is 0.
++	 */
++	int users;
++};
++
++extern spinlock_t blkif_io_lock;
++
++extern int blkif_open(struct inode *inode, struct file *filep);
++extern int blkif_release(struct inode *inode, struct file *filep);
++extern int blkif_ioctl(struct inode *inode, struct file *filep,
++                       unsigned command, unsigned long argument);
++extern int blkif_check(dev_t dev);
++extern int blkif_revalidate(dev_t dev);
++extern void do_blkif_request (request_queue_t *rq); 
++
++/* Virtual block-device subsystem. */
++/* Note that xlvbd_add doesn't call add_disk for you: you're expected
++   to call add_disk on info->gd once the disk is properly connected
++   up. */
++int xlvbd_add(blkif_sector_t capacity, int device,
++	      u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
++void xlvbd_del(struct blkfront_info *info);
++
++#endif /* __XEN_DRIVERS_BLOCK_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blkfront/Makefile linux-2.6.12-xen/drivers/xen/blkfront/Makefile
+--- pristine-linux-2.6.12/drivers/xen/blkfront/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blkfront/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,5 @@
++
++obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	:= xenblk.o
++
++xenblk-objs := blkfront.o vbd.o
++
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blkfront/vbd.c linux-2.6.12-xen/drivers/xen/blkfront/vbd.c
+--- pristine-linux-2.6.12/drivers/xen/blkfront/vbd.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blkfront/vbd.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,323 @@
++/******************************************************************************
++ * vbd.c
++ * 
++ * XenLinux virtual block-device driver (xvd).
++ * 
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004-2005, Christian Limpach
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "block.h"
++#include <linux/blkdev.h>
++#include <linux/list.h>
++
++#define BLKIF_MAJOR(dev) ((dev)>>8)
++#define BLKIF_MINOR(dev) ((dev) & 0xff)
++
++/*
++ * For convenience we distinguish between ide, scsi and 'other' (i.e.,
++ * potentially combinations of the two) in the naming scheme and in a few other
++ * places.
++ */
++
++#define NUM_IDE_MAJORS 10
++#define NUM_SCSI_MAJORS 9
++#define NUM_VBD_MAJORS 1
++
++static struct xlbd_type_info xlbd_ide_type = {
++	.partn_shift = 6,
++	.disks_per_major = 2,
++	.devname = "ide",
++	.diskname = "hd",
++};
++
++static struct xlbd_type_info xlbd_scsi_type = {
++	.partn_shift = 4,
++	.disks_per_major = 16,
++	.devname = "sd",
++	.diskname = "sd",
++};
++
++static struct xlbd_type_info xlbd_vbd_type = {
++	.partn_shift = 4,
++	.disks_per_major = 16,
++	.devname = "xvd",
++	.diskname = "xvd",
++};
++
++static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
++					 NUM_VBD_MAJORS];
++
++#define XLBD_MAJOR_IDE_START	0
++#define XLBD_MAJOR_SCSI_START	(NUM_IDE_MAJORS)
++#define XLBD_MAJOR_VBD_START	(NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
++
++#define XLBD_MAJOR_IDE_RANGE	XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1
++#define XLBD_MAJOR_SCSI_RANGE	XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1
++#define XLBD_MAJOR_VBD_RANGE	XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1
++
++/* Information about our VBDs. */
++#define MAX_VBDS 64
++static LIST_HEAD(vbds_list);
++
++static struct block_device_operations xlvbd_block_fops =
++{
++	.owner = THIS_MODULE,
++	.open = blkif_open,
++	.release = blkif_release,
++	.ioctl  = blkif_ioctl,
++};
++
++spinlock_t blkif_io_lock = SPIN_LOCK_UNLOCKED;
++
++static struct xlbd_major_info *
++xlbd_alloc_major_info(int major, int minor, int index)
++{
++	struct xlbd_major_info *ptr;
++
++	ptr = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
++	if (ptr == NULL)
++		return NULL;
++
++	memset(ptr, 0, sizeof(struct xlbd_major_info));
++
++	ptr->major = major;
++
++	switch (index) {
++	case XLBD_MAJOR_IDE_RANGE:
++		ptr->type = &xlbd_ide_type;
++		ptr->index = index - XLBD_MAJOR_IDE_START;
++		break;
++	case XLBD_MAJOR_SCSI_RANGE:
++		ptr->type = &xlbd_scsi_type;
++		ptr->index = index - XLBD_MAJOR_SCSI_START;
++		break;
++	case XLBD_MAJOR_VBD_RANGE:
++		ptr->type = &xlbd_vbd_type;
++		ptr->index = index - XLBD_MAJOR_VBD_START;
++		break;
++	}
++
++	printk("Registering block device major %i\n", ptr->major);
++	if (register_blkdev(ptr->major, ptr->type->devname)) {
++		WPRINTK("can't get major %d with name %s\n",
++			ptr->major, ptr->type->devname);
++		kfree(ptr);
++		return NULL;
++	}
++
++	devfs_mk_dir(ptr->type->devname);
++	major_info[index] = ptr;
++	return ptr;
++}
++
++static struct xlbd_major_info *
++xlbd_get_major_info(int vdevice)
++{
++	struct xlbd_major_info *mi;
++	int major, minor, index;
++
++	major = BLKIF_MAJOR(vdevice);
++	minor = BLKIF_MINOR(vdevice);
++
++	switch (major) {
++	case IDE0_MAJOR: index = 0; break;
++	case IDE1_MAJOR: index = 1; break;
++	case IDE2_MAJOR: index = 2; break;
++	case IDE3_MAJOR: index = 3; break;
++	case IDE4_MAJOR: index = 4; break;
++	case IDE5_MAJOR: index = 5; break;
++	case IDE6_MAJOR: index = 6; break;
++	case IDE7_MAJOR: index = 7; break;
++	case IDE8_MAJOR: index = 8; break;
++	case IDE9_MAJOR: index = 9; break;
++	case SCSI_DISK0_MAJOR: index = 10; break;
++	case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
++		index = 11 + major - SCSI_DISK1_MAJOR;
++		break;
++	case SCSI_CDROM_MAJOR: index = 18; break;
++	default: index = 19; break;
++	}
++
++	mi = ((major_info[index] != NULL) ? major_info[index] :
++	      xlbd_alloc_major_info(major, minor, index));
++	if (mi)
++		mi->usage++;
++	return mi;
++}
++
++static void
++xlbd_put_major_info(struct xlbd_major_info *mi)
++{
++	mi->usage--;
++	/* XXX: release major if 0 */
++}
++
++static int
++xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
++{
++	request_queue_t *rq;
++
++	rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
++	if (rq == NULL)
++		return -1;
++
++	elevator_init(rq, "noop");
++
++	/* Hard sector size and max sectors impersonate the equiv. hardware. */
++	blk_queue_hardsect_size(rq, sector_size);
++	blk_queue_max_sectors(rq, 512);
++
++	/* Each segment in a request is up to an aligned page in size. */
++	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
++	blk_queue_max_segment_size(rq, PAGE_SIZE);
++
++	/* Ensure a merged request will fit in a single I/O ring slot. */
++	blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++	blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++
++	/* Make sure buffer addresses are sector-aligned. */
++	blk_queue_dma_alignment(rq, 511);
++
++	gd->queue = rq;
++
++	return 0;
++}
++
++static int
++xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, int vdevice,
++		    u16 vdisk_info, u16 sector_size,
++		    struct blkfront_info *info)
++{
++	struct gendisk *gd;
++	struct xlbd_major_info *mi;
++	int nr_minors = 1;
++	int err = -ENODEV;
++
++	mi = xlbd_get_major_info(vdevice);
++	if (mi == NULL)
++		goto out;
++	info->mi = mi;
++
++	if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
++		nr_minors = 1 << mi->type->partn_shift;
++
++	gd = alloc_disk(nr_minors);
++	if (gd == NULL)
++		goto out;
++
++	if (nr_minors > 1)
++		sprintf(gd->disk_name, "%s%c", mi->type->diskname,
++			'a' + mi->index * mi->type->disks_per_major +
++			(minor >> mi->type->partn_shift));
++	else
++		sprintf(gd->disk_name, "%s%c%d", mi->type->diskname,
++			'a' + mi->index * mi->type->disks_per_major +
++			(minor >> mi->type->partn_shift),
++			minor & ((1 << mi->type->partn_shift) - 1));
++
++	gd->major = mi->major;
++	gd->first_minor = minor;
++	gd->fops = &xlvbd_block_fops;
++	gd->private_data = info;
++	gd->driverfs_dev = &(info->xbdev->dev);
++	set_capacity(gd, capacity);
++
++	if (xlvbd_init_blk_queue(gd, sector_size)) {
++		del_gendisk(gd);
++		goto out;
++	}
++
++	info->rq = gd->queue;
++
++	if (vdisk_info & VDISK_READONLY)
++		set_disk_ro(gd, 1);
++
++	if (vdisk_info & VDISK_REMOVABLE)
++		gd->flags |= GENHD_FL_REMOVABLE;
++
++	if (vdisk_info & VDISK_CDROM)
++		gd->flags |= GENHD_FL_CD;
++
++	info->gd = gd;
++
++	return 0;
++
++ out:
++	if (mi)
++		xlbd_put_major_info(mi);
++	return err;
++}
++
++int
++xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
++	  u16 sector_size, struct blkfront_info *info)
++{
++	struct block_device *bd;
++	int err = 0;
++
++	info->dev = MKDEV(BLKIF_MAJOR(vdevice), BLKIF_MINOR(vdevice));
++
++	bd = bdget(info->dev);
++	if (bd == NULL)
++		return -ENODEV;
++
++	err = xlvbd_alloc_gendisk(BLKIF_MINOR(vdevice), capacity, vdevice,
++				  vdisk_info, sector_size, info);
++
++	bdput(bd);
++	return err;
++}
++
++void
++xlvbd_del(struct blkfront_info *info)
++{
++	struct block_device *bd;
++
++	bd = bdget(info->dev);
++	if (bd == NULL)
++		return;
++
++	if (info->gd == NULL)
++		return;
++
++	del_gendisk(info->gd);
++	put_disk(info->gd);
++	xlbd_put_major_info(info->mi);
++	info->mi = NULL;
++	blk_cleanup_queue(info->rq);
++
++	bdput(bd);
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blktap/blktap.c linux-2.6.12-xen/drivers/xen/blktap/blktap.c
+--- pristine-linux-2.6.12/drivers/xen/blktap/blktap.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blktap/blktap.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,910 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/blktap/blktap.c
++ * 
++ * This is a modified version of the block backend driver that remaps requests
++ * to a user-space memory region.  It is intended to be used to write 
++ * application-level servers that provide block interfaces to client VMs.
++ */
++
++#include <linux/kernel.h>
++#include <linux/spinlock.h>
++#include <asm-xen/balloon.h>
++#include <linux/kernel.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/miscdevice.h>
++#include <linux/errno.h>
++#include <linux/major.h>
++#include <linux/gfp.h>
++#include <linux/poll.h>
++#include <asm/tlbflush.h>
++#include "common.h"
++
++/* Only one process may open /dev/xen/blktap at any time. */
++static unsigned long blktap_dev_inuse;
++unsigned long blktap_ring_ok; /* make this ring->state */
++
++/* Rings up to user space. */
++static blkif_front_ring_t blktap_ufe_ring;
++
++/* for poll: */
++static wait_queue_head_t blktap_wait;
++
++/* current switching mode */
++static unsigned long blktap_mode;
++
++/* local prototypes */
++static int blktap_read_ufe_ring(void);
++
++
++/* /dev/xen/blktap resides at device number major=10, minor=200        */ 
++#define BLKTAP_MINOR 202
++
++/* blktap IOCTLs:                                                      */
++#define BLKTAP_IOCTL_KICK_FE         1
++#define BLKTAP_IOCTL_KICK_BE         2 /* currently unused */
++#define BLKTAP_IOCTL_SETMODE         3
++#define BLKTAP_IOCTL_PRINT_IDXS      100  
++
++/* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE)             */
++#define BLKTAP_MODE_PASSTHROUGH      0x00000000  /* default            */
++#define BLKTAP_MODE_INTERCEPT_FE     0x00000001
++#define BLKTAP_MODE_INTERCEPT_BE     0x00000002  /* unimp. */
++#define BLKTAP_MODE_COPY_FE          0x00000004  /* unimp. */
++#define BLKTAP_MODE_COPY_BE          0x00000008  /* unimp. */
++#define BLKTAP_MODE_COPY_FE_PAGES    0x00000010  /* unimp. */
++#define BLKTAP_MODE_COPY_BE_PAGES    0x00000020  /* unimp. */
++
++#define BLKTAP_MODE_INTERPOSE \
++           (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
++
++#define BLKTAP_MODE_COPY_BOTH \
++           (BLKTAP_MODE_COPY_FE | BLKTAP_MODE_COPY_BE)
++
++#define BLKTAP_MODE_COPY_BOTH_PAGES \
++           (BLKTAP_MODE_COPY_FE_PAGES | BLKTAP_MODE_COPY_BE_PAGES)
++
++static inline int BLKTAP_MODE_VALID(unsigned long arg)
++{
++	return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
++		(arg == BLKTAP_MODE_INTERCEPT_FE) ||
++		(arg == BLKTAP_MODE_INTERPOSE   ));
++/*
++  return (
++  ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
++  ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
++  ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
++  ( arg == BLKTAP_MODE_INTERPOSE    ) ||
++  ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
++  ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
++  ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
++  );
++*/
++}
++
++
++/******************************************************************
++ * MMAP REGION
++ */
++
++/*
++ * We use a big chunk of address space to map in-flight requests into,
++ * and export this region up to user-space.  See the comments in blkback
++ * about this -- the two must be kept in sync if the tap is used as a 
++ * passthrough.
++ */
++
++#define MAX_PENDING_REQS 64
++#define BATCH_PER_DOMAIN 16
++
++/* immediately before the mmap area, we have a bunch of pages reserved
++ * for shared memory rings.
++ */
++#define RING_PAGES 1 /* Front */ 
++
++/* Where things are inside the device mapping. */
++struct vm_area_struct *blktap_vma = NULL;
++unsigned long mmap_vstart;  /* Kernel pages for mapping in data. */
++unsigned long rings_vstart; /* start of mmaped vma               */
++unsigned long user_vstart;  /* start of user mappings            */
++
++#define MMAP_PAGES						\
++	(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
++#define MMAP_VADDR(_start, _req,_seg)					\
++	(_start +							\
++	 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +	\
++	 ((_seg) * PAGE_SIZE))
++
++/*
++ * Each outstanding request that we've passed to the lower device layers has a 
++ * 'pending_req' allocated to it. Each buffer_head that completes decrements 
++ * the pendcnt towards zero. When it hits zero, the specified domain has a 
++ * response queued for it, with the saved 'id' passed back.
++ */
++typedef struct {
++	blkif_t       *blkif;
++	unsigned long  id;
++	int            nr_pages;
++	atomic_t       pendcnt;
++	unsigned short operation;
++	int            status;
++} pending_req_t;
++
++/*
++ * We can't allocate pending_req's in order, since they may complete out of 
++ * order. We therefore maintain an allocation ring. This ring also indicates 
++ * when enough work has been passed down -- at that point the allocation ring 
++ * will be empty.
++ */
++static pending_req_t pending_reqs[MAX_PENDING_REQS];
++static unsigned char pending_ring[MAX_PENDING_REQS];
++static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
++/* NB. We use a different index type to differentiate from shared blk rings. */
++typedef unsigned int PEND_RING_IDX;
++#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
++static PEND_RING_IDX pending_prod, pending_cons;
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
++
++/* Requests passing through the tap to the backend hijack the id field
++ * in the request message.  In it we put the AR index _AND_ the fe domid.
++ * the domid is used by the backend to map the pages properly.
++ */
++
++static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
++{
++	return ((fe_dom << 16) | MASK_PEND_IDX(idx));
++}
++
++extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id) 
++{ 
++	return (PEND_RING_IDX)(id & 0x0000ffff);
++}
++
++extern inline domid_t ID_TO_DOM(unsigned long id) 
++{ 
++	return (domid_t)(id >> 16); 
++}
++
++
++
++/******************************************************************
++ * GRANT HANDLES
++ */
++
++/* When using grant tables to map a frame for device access then the
++ * handle returned must be used to unmap the frame. This is needed to
++ * drop the ref count on the frame.
++ */
++struct grant_handle_pair
++{
++	grant_handle_t kernel;
++	grant_handle_t user;
++};
++static struct grant_handle_pair pending_grant_handles[MMAP_PAGES];
++#define pending_handle(_idx, _i) \
++    (pending_grant_handles[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
++#define BLKTAP_INVALID_HANDLE(_g) \
++    (((_g->kernel) == 0xFFFF) && ((_g->user) == 0xFFFF))
++#define BLKTAP_INVALIDATE_HANDLE(_g) do {       \
++    (_g)->kernel = 0xFFFF; (_g)->user = 0xFFFF; \
++    } while(0)
++
++
++/******************************************************************
++ * BLKTAP VM OPS
++ */
++
++static struct page *blktap_nopage(struct vm_area_struct *vma,
++				  unsigned long address,
++				  int *type)
++{
++	/*
++	 * if the page has not been mapped in by the driver then generate
++	 * a SIGBUS to the domain.
++	 */
++	force_sig(SIGBUS, current);
++
++	return 0;
++}
++
++struct vm_operations_struct blktap_vm_ops = {
++	.nopage = blktap_nopage,
++};
++
++/******************************************************************
++ * BLKTAP FILE OPS
++ */
++
++static int blktap_open(struct inode *inode, struct file *filp)
++{
++	blkif_sring_t *sring;
++
++	if (test_and_set_bit(0, &blktap_dev_inuse))
++		return -EBUSY;
++    
++	/* Allocate the fe ring. */
++	sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
++	if (sring == NULL)
++		return -ENOMEM;
++
++	SetPageReserved(virt_to_page(sring));
++    
++	SHARED_RING_INIT(sring);
++	FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
++
++	return 0;
++}
++
++static int blktap_release(struct inode *inode, struct file *filp)
++{
++	blktap_dev_inuse = 0;
++	blktap_ring_ok = 0;
++
++	/* Free the ring page. */
++	ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
++	free_page((unsigned long) blktap_ufe_ring.sring);
++
++	/* Clear any active mappings and free foreign map table */
++	if (blktap_vma != NULL) {
++		zap_page_range(
++			blktap_vma, blktap_vma->vm_start, 
++			blktap_vma->vm_end - blktap_vma->vm_start, NULL);
++		blktap_vma = NULL;
++	}
++
++	return 0;
++}
++
++
++/* Note on mmap:
++ * We need to map pages to user space in a way that will allow the block
++ * subsystem set up direct IO to them.  This couldn't be done before, because
++ * there isn't really a sane way to translate a user virtual address down to a 
++ * physical address when the page belongs to another domain.
++ *
++ * My first approach was to map the page in to kernel memory, add an entry
++ * for it in the physical frame list (using alloc_lomem_region as in blkback)
++ * and then attempt to map that page up to user space.  This is disallowed
++ * by xen though, which realizes that we don't really own the machine frame
++ * underlying the physical page.
++ *
++ * The new approach is to provide explicit support for this in xen linux.
++ * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
++ * mapped from other vms.  vma->vm_private_data is set up as a mapping 
++ * from pages to actual page structs.  There is a new clause in get_user_pages
++ * that does the right thing for this sort of mapping.
++ */
++static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++	int size;
++	struct page **map;
++	int i;
++
++	DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
++		vma->vm_start, vma->vm_end);
++
++	vma->vm_flags |= VM_RESERVED;
++	vma->vm_ops = &blktap_vm_ops;
++
++	size = vma->vm_end - vma->vm_start;
++	if (size != ((MMAP_PAGES + RING_PAGES) << PAGE_SHIFT)) {
++		printk(KERN_INFO 
++		       "blktap: you _must_ map exactly %d pages!\n",
++		       MMAP_PAGES + RING_PAGES);
++		return -EAGAIN;
++	}
++
++	size >>= PAGE_SHIFT;
++	DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
++    
++	rings_vstart = vma->vm_start;
++	user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
++    
++	/* Map the ring pages to the start of the region and reserve it. */
++
++	/* not sure if I really need to do this... */
++	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++	if (remap_pfn_range(vma, vma->vm_start, 
++			    __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
++			    PAGE_SIZE, vma->vm_page_prot)) {
++		WPRINTK("Mapping user ring failed!\n");
++		goto fail;
++	}
++
++	/* Mark this VM as containing foreign pages, and set up mappings. */
++	map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
++		      * sizeof(struct page_struct*),
++		      GFP_KERNEL);
++	if (map == NULL) {
++		WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
++		goto fail;
++	}
++
++	for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
++		map[i] = NULL;
++    
++	vma->vm_private_data = map;
++	vma->vm_flags |= VM_FOREIGN;
++
++	blktap_vma = vma;
++	blktap_ring_ok = 1;
++
++	return 0;
++ fail:
++	/* Clear any active mappings. */
++	zap_page_range(vma, vma->vm_start, 
++		       vma->vm_end - vma->vm_start, NULL);
++
++	return -ENOMEM;
++}
++
++static int blktap_ioctl(struct inode *inode, struct file *filp,
++                        unsigned int cmd, unsigned long arg)
++{
++	switch(cmd) {
++	case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
++		return blktap_read_ufe_ring();
++
++	case BLKTAP_IOCTL_SETMODE:
++		if (BLKTAP_MODE_VALID(arg)) {
++			blktap_mode = arg;
++			/* XXX: may need to flush rings here. */
++			printk(KERN_INFO "blktap: set mode to %lx\n", arg);
++			return 0;
++		}
++	case BLKTAP_IOCTL_PRINT_IDXS:
++        {
++		//print_fe_ring_idxs();
++		WPRINTK("User Rings: \n-----------\n");
++		WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
++			"| req_prod: %2d, rsp_prod: %2d\n",
++			blktap_ufe_ring.rsp_cons,
++			blktap_ufe_ring.req_prod_pvt,
++			blktap_ufe_ring.sring->req_prod,
++			blktap_ufe_ring.sring->rsp_prod);
++            
++        }
++	}
++	return -ENOIOCTLCMD;
++}
++
++static unsigned int blktap_poll(struct file *file, poll_table *wait)
++{
++	poll_wait(file, &blktap_wait, wait);
++	if (blktap_ufe_ring.req_prod_pvt != blktap_ufe_ring.sring->req_prod) {
++		flush_tlb_all();
++		RING_PUSH_REQUESTS(&blktap_ufe_ring);
++		return POLLIN | POLLRDNORM;
++	}
++
++	return 0;
++}
++
++void blktap_kick_user(void)
++{
++	/* blktap_ring->req_prod = blktap_req_prod; */
++	wake_up_interruptible(&blktap_wait);
++}
++
++static struct file_operations blktap_fops = {
++	.owner   = THIS_MODULE,
++	.poll    = blktap_poll,
++	.ioctl   = blktap_ioctl,
++	.open    = blktap_open,
++	.release = blktap_release,
++	.mmap    = blktap_mmap,
++};
++
++
++
++static int do_block_io_op(blkif_t *blkif, int max_to_do);
++static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req);
++static void make_response(blkif_t *blkif, unsigned long id, 
++                          unsigned short op, int st);
++
++
++static void fast_flush_area(int idx, int nr_pages)
++{
++	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
++	unsigned int i, op = 0;
++	struct grant_handle_pair *handle;
++	uint64_t ptep;
++	int ret;
++
++	for ( i = 0; i < nr_pages; i++)
++	{
++		handle = &pending_handle(idx, i);
++		if (BLKTAP_INVALID_HANDLE(handle))
++			continue;
++
++		unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
++		unmap[op].dev_bus_addr = 0;
++		unmap[op].handle = handle->kernel;
++		op++;
++
++		if (create_lookup_pte_addr(
++			    blktap_vma->vm_mm,
++			    MMAP_VADDR(user_vstart, idx, i), 
++			    &ptep) !=0) {
++			DPRINTK("Couldn't get a pte addr!\n");
++			return;
++		}
++		unmap[op].host_addr    = ptep;
++		unmap[op].dev_bus_addr = 0;
++		unmap[op].handle       = handle->user;
++		op++;
++            
++		BLKTAP_INVALIDATE_HANDLE(handle);
++	}
++
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, op);
++	BUG_ON(ret);
++
++	if (blktap_vma != NULL)
++		zap_page_range(blktap_vma, 
++			       MMAP_VADDR(user_vstart, idx, 0), 
++			       nr_pages << PAGE_SHIFT, NULL);
++}
++
++/******************************************************************
++ * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE
++ */
++
++static struct list_head blkio_schedule_list;
++static spinlock_t blkio_schedule_list_lock;
++
++static int __on_blkdev_list(blkif_t *blkif)
++{
++	return blkif->blkdev_list.next != NULL;
++}
++
++static void remove_from_blkdev_list(blkif_t *blkif)
++{
++	unsigned long flags;
++
++	if (!__on_blkdev_list(blkif))
++		return;
++
++	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
++	if (__on_blkdev_list(blkif)) {
++		list_del(&blkif->blkdev_list);
++		blkif->blkdev_list.next = NULL;
++		blkif_put(blkif);
++	}
++	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
++}
++
++static void add_to_blkdev_list_tail(blkif_t *blkif)
++{
++	unsigned long flags;
++
++	if (__on_blkdev_list(blkif))
++		return;
++
++	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
++	if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
++		list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
++		blkif_get(blkif);
++	}
++	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
++}
++
++
++/******************************************************************
++ * SCHEDULER FUNCTIONS
++ */
++
++static DECLARE_WAIT_QUEUE_HEAD(blkio_schedule_wait);
++
++static int blkio_schedule(void *arg)
++{
++	DECLARE_WAITQUEUE(wq, current);
++
++	blkif_t          *blkif;
++	struct list_head *ent;
++
++	daemonize("xenblkd");
++
++	for (;;) {
++		/* Wait for work to do. */
++		add_wait_queue(&blkio_schedule_wait, &wq);
++		set_current_state(TASK_INTERRUPTIBLE);
++		if ((NR_PENDING_REQS == MAX_PENDING_REQS) || 
++		    list_empty(&blkio_schedule_list))
++			schedule();
++		__set_current_state(TASK_RUNNING);
++		remove_wait_queue(&blkio_schedule_wait, &wq);
++
++		/* Queue up a batch of requests. */
++		while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
++		       !list_empty(&blkio_schedule_list)) {
++			ent = blkio_schedule_list.next;
++			blkif = list_entry(ent, blkif_t, blkdev_list);
++			blkif_get(blkif);
++			remove_from_blkdev_list(blkif);
++			if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
++				add_to_blkdev_list_tail(blkif);
++			blkif_put(blkif);
++		}
++	}
++}
++
++static void maybe_trigger_blkio_schedule(void)
++{
++	/*
++	 * Needed so that two processes, who together make the following
++	 * predicate true, don't both read stale values and evaluate the
++	 * predicate incorrectly. Incredibly unlikely to stall the scheduler
++	 * on the x86, but...
++	 */
++	smp_mb();
++
++	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
++	    !list_empty(&blkio_schedule_list))
++		wake_up(&blkio_schedule_wait);
++}
++
++
++
++/******************************************************************
++ * COMPLETION CALLBACK -- Called as bh->b_end_io()
++ */
++
++
++static int blktap_read_ufe_ring(void)
++{
++	/* This is called to read responses from the UFE ring. */
++
++	RING_IDX i, j, rp;
++	blkif_response_t *resp;
++	blkif_t *blkif;
++	int pending_idx;
++	pending_req_t *pending_req;
++	unsigned long     flags;
++
++	/* if we are forwarding from UFERring to FERing */
++	if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
++
++		/* for each outstanding message on the UFEring  */
++		rp = blktap_ufe_ring.sring->rsp_prod;
++		rmb();
++        
++		for (i = blktap_ufe_ring.rsp_cons; i != rp; i++) {
++			resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
++			pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
++			pending_req = &pending_reqs[pending_idx];
++            
++			blkif = pending_req->blkif;
++			for (j = 0; j < pending_req->nr_pages; j++) {
++				unsigned long vaddr;
++				struct page **map = blktap_vma->vm_private_data;
++				int offset; 
++
++				vaddr  = MMAP_VADDR(user_vstart, pending_idx, j);
++				offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
++
++				//ClearPageReserved(virt_to_page(vaddr));
++				ClearPageReserved((struct page *)map[offset]);
++				map[offset] = NULL;
++			}
++
++			fast_flush_area(pending_idx, pending_req->nr_pages);
++			make_response(blkif, pending_req->id, resp->operation, 
++				      resp->status);
++			blkif_put(pending_req->blkif);
++			spin_lock_irqsave(&pend_prod_lock, flags);
++			pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++			spin_unlock_irqrestore(&pend_prod_lock, flags);
++		}
++		blktap_ufe_ring.rsp_cons = i;
++		maybe_trigger_blkio_schedule();
++	}
++	return 0;
++}
++
++
++/******************************************************************************
++ * NOTIFICATION FROM GUEST OS.
++ */
++
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++	blkif_t *blkif = dev_id;
++	add_to_blkdev_list_tail(blkif);
++	maybe_trigger_blkio_schedule();
++	return IRQ_HANDLED;
++}
++
++
++
++/******************************************************************
++ * DOWNWARD CALLS -- These interface with the block-device layer proper.
++ */
++
++static int do_block_io_op(blkif_t *blkif, int max_to_do)
++{
++	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
++	blkif_request_t *req;
++	RING_IDX i, rp;
++	int more_to_do = 0;
++    
++	rp = blk_ring->sring->req_prod;
++	rmb(); /* Ensure we see queued requests up to 'rp'. */
++
++	for (i = blk_ring->req_cons; 
++	     (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
++	     i++ ) {
++		if ((max_to_do-- == 0) ||
++		    (NR_PENDING_REQS == MAX_PENDING_REQS)) {
++			more_to_do = 1;
++			break;
++		}
++        
++		req = RING_GET_REQUEST(blk_ring, i);
++		switch (req->operation) {
++		case BLKIF_OP_READ:
++		case BLKIF_OP_WRITE:
++			dispatch_rw_block_io(blkif, req);
++			break;
++
++		default:
++			DPRINTK("error: unknown block io operation [%d]\n",
++				req->operation);
++			make_response(blkif, req->id, req->operation,
++				      BLKIF_RSP_ERROR);
++			break;
++		}
++	}
++
++	blk_ring->req_cons = i;
++	blktap_kick_user();
++
++	return more_to_do;
++}
++
++static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
++{
++	blkif_request_t *target;
++	int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
++	pending_req_t *pending_req;
++	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
++	int op, ret;
++	unsigned int nseg;
++	int retval;
++
++	/* Check that number of segments is sane. */
++	nseg = req->nr_segments;
++	if (unlikely(nseg == 0) || 
++	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
++		DPRINTK("Bad number of segments in request (%d)\n", nseg);
++		goto bad_descriptor;
++	}
++
++	/* Make sure userspace is ready. */
++	if (!blktap_ring_ok) {
++		DPRINTK("blktap: ring not ready for requests!\n");
++		goto bad_descriptor;
++	}
++    
++
++	if (RING_FULL(&blktap_ufe_ring)) {
++		WPRINTK("blktap: fe_ring is full, can't add "
++			"(very broken!).\n");
++		goto bad_descriptor;
++	}
++
++	flush_cache_all(); /* a noop on intel... */
++
++	/* Map the foreign pages directly in to the application */    
++	op = 0;
++	for (i = 0; i < req->nr_segments; i++) {
++
++		unsigned long uvaddr;
++		unsigned long kvaddr;
++		uint64_t ptep;
++
++		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
++		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
++
++		/* Map the remote page to kernel. */
++		map[op].host_addr = kvaddr;
++		map[op].dom   = blkif->domid;
++		map[op].ref   = req->seg[i].gref;
++		map[op].flags = GNTMAP_host_map;
++		/* This needs a bit more thought in terms of interposition: 
++		 * If we want to be able to modify pages during write using 
++		 * grant table mappings, the guest will either need to allow 
++		 * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
++		if (req->operation == BLKIF_OP_WRITE)
++			map[op].flags |= GNTMAP_readonly;
++		op++;
++
++		/* Now map it to user. */
++		ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
++		if (ret) {
++			DPRINTK("Couldn't get a pte addr!\n");
++			fast_flush_area(pending_idx, req->nr_segments);
++			goto bad_descriptor;
++		}
++
++		map[op].host_addr = ptep;
++		map[op].dom       = blkif->domid;
++		map[op].ref       = req->seg[i].gref;
++		map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
++			| GNTMAP_contains_pte;
++		/* Above interposition comment applies here as well. */
++		if (req->operation == BLKIF_OP_WRITE)
++			map[op].flags |= GNTMAP_readonly;
++		op++;
++	}
++
++	retval = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
++	BUG_ON(retval);
++
++	op = 0;
++	for (i = 0; i < (req->nr_segments*2); i += 2) {
++		unsigned long uvaddr;
++		unsigned long kvaddr;
++		unsigned long offset;
++		int cancel = 0;
++
++		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
++		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
++
++		if (unlikely(map[i].status)) {
++			DPRINTK("Error on kernel grant mapping (%d)\n",
++				map[i].status);
++			ret = map[i].status;
++			cancel = 1;
++		}
++
++		if (unlikely(map[i+1].status)) {
++			DPRINTK("Error on user grant mapping (%d)\n",
++				map[i+1].status);
++			ret = map[i+1].status;
++			cancel = 1;
++		}
++
++		if (cancel) {
++			fast_flush_area(pending_idx, req->nr_segments);
++			goto bad_descriptor;
++		}
++
++		/* Set the necessary mappings in p2m and in the VM_FOREIGN 
++		 * vm_area_struct to allow user vaddr -> struct page lookups
++		 * to work.  This is needed for direct IO to foreign pages. */
++		set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
++				FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
++
++		offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
++		((struct page **)blktap_vma->vm_private_data)[offset] =
++			pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++
++		/* Save handles for unmapping later. */
++		pending_handle(pending_idx, i/2).kernel = map[i].handle;
++		pending_handle(pending_idx, i/2).user   = map[i+1].handle;
++	}
++
++	/* Mark mapped pages as reserved: */
++	for (i = 0; i < req->nr_segments; i++) {
++		unsigned long kvaddr;
++		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
++		SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
++	}
++
++	pending_req = &pending_reqs[pending_idx];
++	pending_req->blkif     = blkif;
++	pending_req->id        = req->id;
++	pending_req->operation = req->operation;
++	pending_req->status    = BLKIF_RSP_OKAY;
++	pending_req->nr_pages  = nseg;
++	req->id = MAKE_ID(blkif->domid, pending_idx);
++	//atomic_set(&pending_req->pendcnt, nbio);
++	pending_cons++;
++	blkif_get(blkif);
++
++	/* Finally, write the request message to the user ring. */
++	target = RING_GET_REQUEST(&blktap_ufe_ring,
++				  blktap_ufe_ring.req_prod_pvt);
++	memcpy(target, req, sizeof(*req));
++	blktap_ufe_ring.req_prod_pvt++;
++	return;
++
++ bad_descriptor:
++	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
++} 
++
++
++
++/******************************************************************
++ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
++ */
++
++
++static void make_response(blkif_t *blkif, unsigned long id, 
++                          unsigned short op, int st)
++{
++	blkif_response_t *resp;
++	unsigned long     flags;
++	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
++
++	/* Place on the response ring for the relevant domain. */ 
++	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
++	resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
++	resp->id        = id;
++	resp->operation = op;
++	resp->status    = st;
++	wmb(); /* Ensure other side can see the response fields. */
++	blk_ring->rsp_prod_pvt++;
++	RING_PUSH_RESPONSES(blk_ring);
++	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
++
++	/* Kick the relevant domain. */
++	notify_remote_via_irq(blkif->irq);
++}
++
++static struct miscdevice blktap_miscdev = {
++	.minor        = BLKTAP_MINOR,
++	.name         = "blktap",
++	.fops         = &blktap_fops,
++	.devfs_name   = "misc/blktap",
++};
++
++void blkif_deschedule(blkif_t *blkif)
++{
++	remove_from_blkdev_list(blkif);
++}
++
++static int __init blkif_init(void)
++{
++	int i, j, err;
++	struct page *page;
++
++	blkif_interface_init();
++
++	page = balloon_alloc_empty_page_range(MMAP_PAGES);
++	BUG_ON(page == NULL);
++	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
++
++	pending_cons = 0;
++	pending_prod = MAX_PENDING_REQS;
++	memset(pending_reqs, 0, sizeof(pending_reqs));
++	for ( i = 0; i < MAX_PENDING_REQS; i++ )
++		pending_ring[i] = i;
++    
++	spin_lock_init(&blkio_schedule_list_lock);
++	INIT_LIST_HEAD(&blkio_schedule_list);
++
++	i = kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES);
++	BUG_ON(i<0);
++
++	blkif_xenbus_init();
++
++	for (i = 0; i < MAX_PENDING_REQS ; i++)
++		for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
++			BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
++
++	err = misc_register(&blktap_miscdev);
++	if (err != 0) {
++		printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n",
++		       err);
++		return err;
++	}
++
++	init_waitqueue_head(&blktap_wait);
++
++	return 0;
++}
++
++__initcall(blkif_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blktap/common.h linux-2.6.12-xen/drivers/xen/blktap/common.h
+--- pristine-linux-2.6.12/drivers/xen/blktap/common.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blktap/common.h	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,110 @@
++
++#ifndef __BLKIF__BACKEND__COMMON_H__
++#define __BLKIF__BACKEND__COMMON_H__
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <asm-xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/xen-public/io/blkif.h>
++#include <asm-xen/xen-public/io/ring.h>
++#include <asm-xen/gnttab.h>
++#include <asm-xen/driver_util.h>
++
++#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
++                                    __FILE__ , __LINE__ , ## _a )
++
++#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
++
++struct vbd {
++	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
++	unsigned char  readonly;    /* Non-zero -> read-only */
++	unsigned char  type;        /* VDISK_xxx */
++	u32            pdevice;     /* phys device that this vbd maps to */
++	struct block_device *bdev;
++}; 
++
++typedef struct blkif_st {
++	/* Unique identifier for this interface. */
++	domid_t           domid;
++	unsigned int      handle;
++	/* Physical parameters of the comms window. */
++	unsigned int      evtchn;
++	unsigned int      irq;
++	/* Comms information. */
++	blkif_back_ring_t blk_ring;
++	struct vm_struct *blk_ring_area;
++	/* VBDs attached to this interface. */
++	struct vbd        vbd;
++	/* Private fields. */
++	enum { DISCONNECTED, CONNECTED } status;
++#ifdef CONFIG_XEN_BLKDEV_TAP_BE
++	/* Is this a blktap frontend */
++	unsigned int     is_blktap;
++#endif
++	struct list_head blkdev_list;
++	spinlock_t       blk_ring_lock;
++	atomic_t         refcnt;
++
++	struct work_struct free_work;
++
++	grant_handle_t   shmem_handle;
++	grant_ref_t      shmem_ref;
++} blkif_t;
++
++blkif_t *alloc_blkif(domid_t domid);
++void free_blkif_callback(blkif_t *blkif);
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
++
++#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define blkif_put(_b)                             \
++    do {                                          \
++        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
++            free_blkif_callback(_b);		  \
++    } while (0)
++
++/* Create a vbd. */
++int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, u32 pdevice,
++	       int readonly);
++void vbd_free(struct vbd *vbd);
++
++unsigned long vbd_size(struct vbd *vbd);
++unsigned int vbd_info(struct vbd *vbd);
++unsigned long vbd_secsize(struct vbd *vbd);
++
++struct phys_req {
++	unsigned short       dev;
++	unsigned short       nr_sects;
++	struct block_device *bdev;
++	blkif_sector_t       sector_number;
++};
++
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
++
++void blkif_interface_init(void);
++
++void blkif_deschedule(blkif_t *blkif);
++
++void blkif_xenbus_init(void);
++
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++
++#endif /* __BLKIF__BACKEND__COMMON_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blktap/interface.c linux-2.6.12-xen/drivers/xen/blktap/interface.c
+--- pristine-linux-2.6.12/drivers/xen/blktap/interface.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blktap/interface.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,146 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/interface.c
++ * 
++ * Block-device interface management.
++ * 
++ * Copyright (c) 2004, Keir Fraser
++ */
++
++#include "common.h"
++#include <asm-xen/evtchn.h>
++
++static kmem_cache_t *blkif_cachep;
++
++blkif_t *alloc_blkif(domid_t domid)
++{
++	blkif_t *blkif;
++
++	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
++	if (!blkif)
++		return ERR_PTR(-ENOMEM);
++
++	memset(blkif, 0, sizeof(*blkif));
++	blkif->domid = domid;
++	blkif->status = DISCONNECTED;
++	spin_lock_init(&blkif->blk_ring_lock);
++	atomic_set(&blkif->refcnt, 1);
++
++	return blkif;
++}
++
++static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++{
++	struct gnttab_map_grant_ref op;
++	int ret;
++
++	op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
++	op.flags     = GNTMAP_host_map;
++	op.ref       = shared_page;
++	op.dom       = blkif->domid;
++
++	lock_vm_area(blkif->blk_ring_area);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
++	unlock_vm_area(blkif->blk_ring_area);
++	BUG_ON(ret);
++
++	if (op.status) {
++		DPRINTK(" Grant table operation failure !\n");
++		return op.status;
++	}
++
++	blkif->shmem_ref    = shared_page;
++	blkif->shmem_handle = op.handle;
++
++	return 0;
++}
++
++static void unmap_frontend_page(blkif_t *blkif)
++{
++	struct gnttab_unmap_grant_ref op;
++	int ret;
++
++	op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
++	op.handle       = blkif->shmem_handle;
++	op.dev_bus_addr = 0;
++
++	lock_vm_area(blkif->blk_ring_area);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
++	unlock_vm_area(blkif->blk_ring_area);
++	BUG_ON(ret);
++}
++
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
++{
++	blkif_sring_t *sring;
++	int err;
++	evtchn_op_t op = {
++		.cmd = EVTCHNOP_bind_interdomain,
++		.u.bind_interdomain.remote_dom  = blkif->domid,
++		.u.bind_interdomain.remote_port = evtchn };
++
++	if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
++		return -ENOMEM;
++
++	err = map_frontend_page(blkif, shared_page);
++	if (err) {
++		free_vm_area(blkif->blk_ring_area);
++		return err;
++	}
++
++	err = HYPERVISOR_event_channel_op(&op);
++	if (err) {
++		unmap_frontend_page(blkif);
++		free_vm_area(blkif->blk_ring_area);
++		return err;
++	}
++
++	blkif->evtchn = op.u.bind_interdomain.local_port;
++
++	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
++	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
++
++	blkif->irq = bind_evtchn_to_irqhandler(
++		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
++
++	blkif->status = CONNECTED;
++
++	return 0;
++}
++
++static void free_blkif(void *arg)
++{
++	blkif_t *blkif = (blkif_t *)arg;
++
++	if (blkif->irq)
++		unbind_from_irqhandler(blkif->irq, blkif);
++
++	if (blkif->blk_ring.sring) {
++		unmap_frontend_page(blkif);
++		free_vm_area(blkif->blk_ring_area);
++		blkif->blk_ring.sring = NULL;
++	}
++
++	kmem_cache_free(blkif_cachep, blkif);
++}
++
++void free_blkif_callback(blkif_t *blkif)
++{
++	INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
++	schedule_work(&blkif->free_work);
++}
++
++void __init blkif_interface_init(void)
++{
++	blkif_cachep = kmem_cache_create(
++		"blkif_cache", sizeof(blkif_t), 0, 0, NULL, NULL);
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blktap/Makefile linux-2.6.12-xen/drivers/xen/blktap/Makefile
+--- pristine-linux-2.6.12/drivers/xen/blktap/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blktap/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,3 @@
++
++obj-y	:= xenbus.o interface.o blktap.o 
++
+diff -Nurp pristine-linux-2.6.12/drivers/xen/blktap/xenbus.c linux-2.6.12-xen/drivers/xen/blktap/xenbus.c
+--- pristine-linux-2.6.12/drivers/xen/blktap/xenbus.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/blktap/xenbus.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,234 @@
++/*  Xenbus code for blkif tap
++
++    A Warfield.
++
++    Hastily modified from the oroginal backend code:
++
++    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
++
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation; either version 2 of the License, or
++    (at your option) any later version.
++
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
++
++    You should have received a copy of the GNU General Public License
++    along with this program; if not, write to the Free Software
++    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++*/
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <asm-xen/xenbus.h>
++#include "common.h"
++
++struct backend_info
++{
++	struct xenbus_device *dev;
++
++	/* our communications channel */
++	blkif_t *blkif;
++
++	long int frontend_id;
++
++	/* watch back end for changes */
++	struct xenbus_watch backend_watch;
++
++	/* watch front end for changes */
++	struct xenbus_watch watch;
++	char *frontpath;
++};
++
++static int blkback_remove(struct xenbus_device *dev)
++{
++	struct backend_info *be = dev->data;
++
++	if (be->watch.node)
++		unregister_xenbus_watch(&be->watch);
++	unregister_xenbus_watch(&be->backend_watch);
++	if (be->blkif)
++		blkif_put(be->blkif);
++	kfree(be->frontpath);
++	kfree(be);
++	return 0;
++}
++
++/* Front end tells us frame. */
++static void frontend_changed(struct xenbus_watch *watch,
++			     const char **vec, unsigned int len)
++{
++	unsigned long ring_ref;
++	unsigned int evtchn;
++	int err;
++	struct backend_info *be
++		= container_of(watch, struct backend_info, watch);
++
++	/* If other end is gone, delete ourself. */
++	if (vec && !xenbus_exists(be->frontpath, "")) {
++		xenbus_rm(be->dev->nodename, "");
++		device_unregister(&be->dev->dev);
++		return;
++	}
++	if (be->blkif == NULL || be->blkif->status == CONNECTED)
++		return;
++
++	err = xenbus_gather(be->frontpath, "ring-ref", "%lu", &ring_ref,
++			    "event-channel", "%u", &evtchn, NULL);
++	if (err) {
++		xenbus_dev_error(be->dev, err,
++				 "reading %s/ring-ref and event-channel",
++				 be->frontpath);
++		return;
++	}
++
++	/* Map the shared frame, irq etc. */
++	err = blkif_map(be->blkif, ring_ref, evtchn);
++	if (err) {
++		xenbus_dev_error(be->dev, err, "mapping ring-ref %lu port %u",
++				 ring_ref, evtchn);
++		goto abort;
++	}
++
++	xenbus_dev_ok(be->dev);
++
++	return;
++
++abort:
++	xenbus_transaction_end(1);
++}
++
++/* 
++   Setup supplies physical device.  
++   We provide event channel and device details to front end.
++   Frontend supplies shared frame and event channel.
++ */
++static void backend_changed(struct xenbus_watch *watch,
++			    const char **vec, unsigned int len)
++{
++	int err;
++	char *p;
++	long int handle;
++	struct backend_info *be
++		= container_of(watch, struct backend_info, backend_watch);
++	struct xenbus_device *dev = be->dev;
++
++	if (be->blkif == NULL) {
++		/* Front end dir is a number, which is used as the handle. */
++		p = strrchr(be->frontpath, '/') + 1;
++		handle = simple_strtoul(p, NULL, 0);
++
++		be->blkif = alloc_blkif(be->frontend_id);
++		if (IS_ERR(be->blkif)) {
++			err = PTR_ERR(be->blkif);
++			be->blkif = NULL;
++			xenbus_dev_error(dev, err, "creating block interface");
++			return;
++		}
++
++		/* Pass in NULL node to skip exist test. */
++		frontend_changed(&be->watch, NULL, 0);
++	}
++}
++
++static int blkback_probe(struct xenbus_device *dev,
++			 const struct xenbus_device_id *id)
++{
++	struct backend_info *be;
++	char *frontend;
++	int err;
++
++	be = kmalloc(sizeof(*be), GFP_KERNEL);
++	if (!be) {
++		xenbus_dev_error(dev, -ENOMEM, "allocating backend structure");
++		return -ENOMEM;
++	}
++	memset(be, 0, sizeof(*be));
++
++	frontend = NULL;
++	err = xenbus_gather(dev->nodename,
++			    "frontend-id", "%li", &be->frontend_id,
++			    "frontend", NULL, &frontend,
++			    NULL);
++	if (XENBUS_EXIST_ERR(err))
++		goto free_be;
++	if (err < 0) {
++		xenbus_dev_error(dev, err,
++				 "reading %s/frontend or frontend-id",
++				 dev->nodename);
++		goto free_be;
++	}
++	if (strlen(frontend) == 0 || !xenbus_exists(frontend, "")) {
++		/* If we can't get a frontend path and a frontend-id,
++		 * then our bus-id is no longer valid and we need to
++		 * destroy the backend device.
++		 */
++		err = -ENOENT;
++		goto free_be;
++	}
++
++	be->dev = dev;
++	be->backend_watch.node = dev->nodename;
++	be->backend_watch.callback = backend_changed;
++	/* Registration implicitly fires backend_changed once */
++	err = register_xenbus_watch(&be->backend_watch);
++	if (err) {
++		be->backend_watch.node = NULL;
++		xenbus_dev_error(dev, err, "adding backend watch on %s",
++				 dev->nodename);
++		goto free_be;
++	}
++
++	be->frontpath = frontend;
++	be->watch.node = be->frontpath;
++	be->watch.callback = frontend_changed;
++	err = register_xenbus_watch(&be->watch);
++	if (err) {
++		be->watch.node = NULL;
++		xenbus_dev_error(dev, err,
++				 "adding frontend watch on %s",
++				 be->frontpath);
++		goto free_be;
++	}
++
++	dev->data = be;
++	return 0;
++
++ free_be:
++	if (be->backend_watch.node)
++		unregister_xenbus_watch(&be->backend_watch);
++	kfree(frontend);
++	kfree(be);
++	return err;
++}
++
++static struct xenbus_device_id blkback_ids[] = {
++	{ "vbd" },
++	{ "" }
++};
++
++static struct xenbus_driver blkback = {
++	.name = "vbd",
++	.owner = THIS_MODULE,
++	.ids = blkback_ids,
++	.probe = blkback_probe,
++	.remove = blkback_remove,
++};
++
++void blkif_xenbus_init(void)
++{
++	xenbus_register_backend(&blkback);
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/char/Makefile linux-2.6.12-xen/drivers/xen/char/Makefile
+--- pristine-linux-2.6.12/drivers/xen/char/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/char/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,2 @@
++
++obj-y	:= mem.o
+diff -Nurp pristine-linux-2.6.12/drivers/xen/char/mem.c linux-2.6.12-xen/drivers/xen/char/mem.c
+--- pristine-linux-2.6.12/drivers/xen/char/mem.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/char/mem.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,157 @@
++/*
++ *  Originally from linux/drivers/char/mem.c
++ *
++ *  Copyright (C) 1991, 1992  Linus Torvalds
++ *
++ *  Added devfs support. 
++ *    Jan-11-1998, C. Scott Ananian <cananian at alumni.princeton.edu>
++ *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj at sgi.com>
++ */
++
++#include <linux/config.h>
++#include <linux/mm.h>
++#include <linux/miscdevice.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/mman.h>
++#include <linux/random.h>
++#include <linux/init.h>
++#include <linux/raw.h>
++#include <linux/tty.h>
++#include <linux/capability.h>
++#include <linux/smp_lock.h>
++#include <linux/devfs_fs_kernel.h>
++#include <linux/ptrace.h>
++#include <linux/device.h>
++#include <asm/pgalloc.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/hypervisor.h>
++
++static inline int uncached_access(struct file *file)
++{
++        if (file->f_flags & O_SYNC)
++                return 1;
++        /* Xen sets correct MTRR type on non-RAM for us. */
++        return 0;
++}
++
++/*
++ * This funcion reads the *physical* memory. The f_pos points directly to the 
++ * memory location. 
++ */
++static ssize_t read_mem(struct file * file, char __user * buf,
++			size_t count, loff_t *ppos)
++{
++	unsigned long i, p = *ppos;
++	ssize_t read = -EFAULT;
++	void __iomem *v;
++
++	if ((v = ioremap(p, count)) == NULL) {
++		/*
++		 * Some programs (e.g., dmidecode) groove off into weird RAM
++		 * areas where no table scan possibly exist (because Xen will
++		 * have stomped on them!). These programs get rather upset if
++                 * we let them know that Xen failed their access, so we fake
++                 * out a read of all zeroes. :-)
++		 */
++		for (i = 0; i < count; i++)
++			if (put_user(0, buf+i))
++				return -EFAULT;
++		return count;
++	}
++	if (copy_to_user(buf, v, count))
++		goto out;
++
++	read = count;
++	*ppos += read;
++out:
++	iounmap(v);
++	return read;
++}
++
++static ssize_t write_mem(struct file * file, const char __user * buf, 
++			 size_t count, loff_t *ppos)
++{
++	unsigned long p = *ppos;
++	ssize_t written = -EFAULT;
++	void __iomem *v;
++
++	if ((v = ioremap(p, count)) == NULL)
++		return -EFAULT;
++	if (copy_from_user(v, buf, count))
++		goto out;
++
++	written = count;
++	*ppos += written;
++out:
++	iounmap(v);
++	return written;
++}
++
++static int mmap_mem(struct file * file, struct vm_area_struct * vma)
++{
++	if (uncached_access(file))
++		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++	if (direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++				   vma->vm_end - vma->vm_start,
++				   vma->vm_page_prot, DOMID_IO))
++		return -EAGAIN;
++
++	return 0;
++}
++
++/*
++ * The memory devices use the full 32/64 bits of the offset, and so we cannot
++ * check against negative addresses: they are ok. The return value is weird,
++ * though, in that case (0).
++ *
++ * also note that seeking relative to the "end of file" isn't supported:
++ * it has no meaning, so it returns -EINVAL.
++ */
++static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
++{
++	loff_t ret;
++
++	down(&file->f_dentry->d_inode->i_sem);
++	switch (orig) {
++		case 0:
++			file->f_pos = offset;
++			ret = file->f_pos;
++			force_successful_syscall_return();
++			break;
++		case 1:
++			file->f_pos += offset;
++			ret = file->f_pos;
++			force_successful_syscall_return();
++			break;
++		default:
++			ret = -EINVAL;
++	}
++	up(&file->f_dentry->d_inode->i_sem);
++	return ret;
++}
++
++static int open_mem(struct inode * inode, struct file * filp)
++{
++	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++struct file_operations mem_fops = {
++	.llseek		= memory_lseek,
++	.read		= read_mem,
++	.write		= write_mem,
++	.mmap		= mmap_mem,
++	.open		= open_mem,
++};
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/console/console.c linux-2.6.12-xen/drivers/xen/console/console.c
+--- pristine-linux-2.6.12/drivers/xen/console/console.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/console/console.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,688 @@
++/******************************************************************************
++ * console.c
++ * 
++ * Virtual console driver.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser.
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/major.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/console.h>
++#include <linux/bootmem.h>
++#include <linux/sysrq.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/uaccess.h>
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/xen-public/event_channel.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/evtchn.h>
++#include <asm-xen/xencons.h>
++
++/*
++ * Modes:
++ *  'xencons=off'  [XC_OFF]:     Console is disabled.
++ *  'xencons=tty'  [XC_TTY]:     Console attached to '/dev/tty[0-9]+'.
++ *  'xencons=ttyS' [XC_SERIAL]:  Console attached to '/dev/ttyS[0-9]+'.
++ *                 [XC_DEFAULT]: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
++ * 
++ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
++ * warnings from standard distro startup scripts.
++ */
++static enum { XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL } xc_mode = XC_DEFAULT;
++static int xc_num = -1;
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static unsigned long sysrq_requested;
++extern int sysrq_enabled;
++#endif
++
++static int __init xencons_setup(char *str)
++{
++	char *q;
++	int n;
++
++	if (!strncmp(str, "ttyS", 4))
++		xc_mode = XC_SERIAL;
++	else if (!strncmp(str, "tty", 3))
++		xc_mode = XC_TTY;
++	else if (!strncmp(str, "off", 3))
++		xc_mode = XC_OFF;
++
++	switch ( xc_mode )
++	{
++	case XC_SERIAL:
++		n = simple_strtol(str+4, &q, 10);
++		if (q > (str + 4))
++			xc_num = n;
++		break;
++	case XC_TTY:
++		n = simple_strtol(str+3, &q, 10);
++		if (q > (str + 3))
++			xc_num = n;
++		break;
++	default:
++		break;
++	}
++
++	return 1;
++}
++__setup("xencons=", xencons_setup);
++
++/* The kernel and user-land drivers share a common transmit buffer. */
++static unsigned int wbuf_size = 4096;
++#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
++static char *wbuf;
++static unsigned int wc, wp; /* write_cons, write_prod */
++
++static int __init xencons_bufsz_setup(char *str)
++{
++	unsigned int goal;
++	goal = simple_strtoul(str, NULL, 0);
++	while (wbuf_size < goal)
++		wbuf_size <<= 1;
++	return 1;
++}
++__setup("xencons_bufsz=", xencons_bufsz_setup);
++
++/* This lock protects accesses to the common transmit buffer. */
++static spinlock_t xencons_lock = SPIN_LOCK_UNLOCKED;
++
++/* Common transmit-kick routine. */
++static void __xencons_tx_flush(void);
++
++static struct tty_driver *xencons_driver;
++
++/******************** Kernel console driver ********************************/
++
++static void kcons_write(
++	struct console *c, const char *s, unsigned int count)
++{
++	int           i = 0;
++	unsigned long flags;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++
++	while (i < count) {
++		for (; i < count; i++) {
++			if ((wp - wc) >= (wbuf_size - 1))
++				break;
++			if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
++				wbuf[WBUF_MASK(wp++)] = '\r';
++		}
++
++		__xencons_tx_flush();
++	}
++
++	spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void kcons_write_dom0(
++	struct console *c, const char *s, unsigned int count)
++{
++	int rc;
++
++	while ((count > 0) &&
++	       ((rc = HYPERVISOR_console_io(
++			CONSOLEIO_write, count, (char *)s)) > 0)) {
++		count -= rc;
++		s += rc;
++	}
++}
++
++static struct tty_driver *kcons_device(struct console *c, int *index)
++{
++	*index = 0;
++	return xencons_driver;
++}
++
++static struct console kcons_info = {
++	.device	= kcons_device,
++	.flags	= CON_PRINTBUFFER,
++	.index	= -1,
++};
++
++#define __RETCODE 0
++static int __init xen_console_init(void)
++{
++	if (xen_init() < 0)
++		return __RETCODE;
++
++	if (xen_start_info->flags & SIF_INITDOMAIN) {
++		if (xc_mode == XC_DEFAULT)
++			xc_mode = XC_SERIAL;
++		kcons_info.write = kcons_write_dom0;
++		if (xc_mode == XC_SERIAL)
++			kcons_info.flags |= CON_ENABLED;
++	} else {
++		if (xc_mode == XC_DEFAULT)
++			xc_mode = XC_TTY;
++		kcons_info.write = kcons_write;
++	}
++
++	switch (xc_mode) {
++	case XC_SERIAL:
++		strcpy(kcons_info.name, "ttyS");
++		if (xc_num == -1)
++			xc_num = 0;
++		break;
++
++	case XC_TTY:
++		strcpy(kcons_info.name, "tty");
++		if (xc_num == -1)
++			xc_num = 1;
++		break;
++
++	default:
++		return __RETCODE;
++	}
++
++	wbuf = alloc_bootmem(wbuf_size);
++
++	register_console(&kcons_info);
++
++	return __RETCODE;
++}
++console_initcall(xen_console_init);
++
++/*** Useful function for console debugging -- goes straight to Xen. ***/
++asmlinkage int xprintk(const char *fmt, ...)
++{
++	va_list args;
++	int printk_len;
++	static char printk_buf[1024];
++    
++	/* Emit the output into the temporary buffer */
++	va_start(args, fmt);
++	printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
++	va_end(args);
++
++	/* Send the processed output directly to Xen. */
++	kcons_write_dom0(NULL, printk_buf, printk_len);
++
++	return 0;
++}
++
++/*** Forcibly flush console data before dying. ***/
++void xencons_force_flush(void)
++{
++	int sz;
++
++	/* Emergency console is synchronous, so there's nothing to flush. */
++	if (xen_start_info->flags & SIF_INITDOMAIN)
++		return;
++
++	/* Spin until console data is flushed through to the daemon. */
++	while (wc != wp) {
++		int sent = 0;
++		if ((sz = wp - wc) == 0)
++			continue;
++		sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
++		if (sent > 0)
++			wc += sent;
++	}
++}
++
++
++/******************** User-space console driver (/dev/console) ************/
++
++#define DRV(_d)         (_d)
++#define TTY_INDEX(_tty) ((_tty)->index)
++
++static struct termios *xencons_termios[MAX_NR_CONSOLES];
++static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
++static struct tty_struct *xencons_tty;
++static int xencons_priv_irq;
++static char x_char;
++
++void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
++{
++	int           i;
++	unsigned long flags;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++	if (xencons_tty == NULL)
++		goto out;
++
++	for (i = 0; i < len; i++) {
++#ifdef CONFIG_MAGIC_SYSRQ
++		if (sysrq_enabled) {
++			if (buf[i] == '\x0f') { /* ^O */
++				sysrq_requested = jiffies;
++				continue; /* don't print the sysrq key */
++			} else if (sysrq_requested) {
++				unsigned long sysrq_timeout =
++					sysrq_requested + HZ*2;
++				sysrq_requested = 0;
++				if (time_before(jiffies, sysrq_timeout)) {
++					spin_unlock_irqrestore(
++						&xencons_lock, flags);
++					handle_sysrq(
++						buf[i], regs, xencons_tty);
++					spin_lock_irqsave(
++						&xencons_lock, flags);
++					continue;
++				}
++			}
++		}
++#endif
++		tty_insert_flip_char(xencons_tty, buf[i], 0);
++	}
++	tty_flip_buffer_push(xencons_tty);
++
++ out:
++	spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void __xencons_tx_flush(void)
++{
++	int sent, sz, work_done = 0;
++
++	if (x_char) {
++		if (xen_start_info->flags & SIF_INITDOMAIN)
++			kcons_write_dom0(NULL, &x_char, 1);
++		else
++			while (x_char)
++				if (xencons_ring_send(&x_char, 1) == 1)
++					break;
++		x_char = 0;
++		work_done = 1;
++	}
++
++	while (wc != wp) {
++		sz = wp - wc;
++		if (sz > (wbuf_size - WBUF_MASK(wc)))
++			sz = wbuf_size - WBUF_MASK(wc);
++		if (xen_start_info->flags & SIF_INITDOMAIN) {
++			kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
++			wc += sz;
++		} else {
++			sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
++			if (sent == 0)
++				break;
++			wc += sent;
++		}
++		work_done = 1;
++	}
++
++	if (work_done && (xencons_tty != NULL)) {
++		wake_up_interruptible(&xencons_tty->write_wait);
++		if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
++		    (xencons_tty->ldisc.write_wakeup != NULL))
++			(xencons_tty->ldisc.write_wakeup)(xencons_tty);
++	}
++}
++
++void xencons_tx(void)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++	__xencons_tx_flush();
++	spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++/* Privileged receive callback and transmit kicker. */
++static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
++                                          struct pt_regs *regs)
++{
++	static char rbuf[16];
++	int         l;
++
++	while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
++		xencons_rx(rbuf, l, regs);
++
++	xencons_tx();
++
++	return IRQ_HANDLED;
++}
++
++static int xencons_write_room(struct tty_struct *tty)
++{
++	return wbuf_size - (wp - wc);
++}
++
++static int xencons_chars_in_buffer(struct tty_struct *tty)
++{
++	return wp - wc;
++}
++
++static void xencons_send_xchar(struct tty_struct *tty, char ch)
++{
++	unsigned long flags;
++
++	if (TTY_INDEX(tty) != 0)
++		return;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++	x_char = ch;
++	__xencons_tx_flush();
++	spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_throttle(struct tty_struct *tty)
++{
++	if (TTY_INDEX(tty) != 0)
++		return;
++
++	if (I_IXOFF(tty))
++		xencons_send_xchar(tty, STOP_CHAR(tty));
++}
++
++static void xencons_unthrottle(struct tty_struct *tty)
++{
++	if (TTY_INDEX(tty) != 0)
++		return;
++
++	if (I_IXOFF(tty)) {
++		if (x_char != 0)
++			x_char = 0;
++		else
++			xencons_send_xchar(tty, START_CHAR(tty));
++	}
++}
++
++static void xencons_flush_buffer(struct tty_struct *tty)
++{
++	unsigned long flags;
++
++	if (TTY_INDEX(tty) != 0)
++		return;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++	wc = wp = 0;
++	spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static inline int __xencons_put_char(int ch)
++{
++	char _ch = (char)ch;
++	if ((wp - wc) == wbuf_size)
++		return 0;
++	wbuf[WBUF_MASK(wp++)] = _ch;
++	return 1;
++}
++
++static int xencons_write(
++	struct tty_struct *tty,
++	const unsigned char *buf,
++	int count)
++{
++	int i;
++	unsigned long flags;
++
++	if (TTY_INDEX(tty) != 0)
++		return count;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++
++	for (i = 0; i < count; i++)
++		if (!__xencons_put_char(buf[i]))
++			break;
++
++	if (i != 0)
++		__xencons_tx_flush();
++
++	spin_unlock_irqrestore(&xencons_lock, flags);
++
++	return i;
++}
++
++static void xencons_put_char(struct tty_struct *tty, u_char ch)
++{
++	unsigned long flags;
++
++	if (TTY_INDEX(tty) != 0)
++		return;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++	(void)__xencons_put_char(ch);
++	spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_flush_chars(struct tty_struct *tty)
++{
++	unsigned long flags;
++
++	if (TTY_INDEX(tty) != 0)
++		return;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++	__xencons_tx_flush();
++	spin_unlock_irqrestore(&xencons_lock, flags);    
++}
++
++static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
++{
++	unsigned long orig_jiffies = jiffies;
++
++	if (TTY_INDEX(tty) != 0)
++		return;
++
++	while (DRV(tty->driver)->chars_in_buffer(tty))
++	{
++		set_current_state(TASK_INTERRUPTIBLE);
++		schedule_timeout(1);
++		if (signal_pending(current))
++			break;
++		if ( (timeout != 0) &&
++		     time_after(jiffies, orig_jiffies + timeout) )
++			break;
++	}
++    
++	set_current_state(TASK_RUNNING);
++}
++
++static int xencons_open(struct tty_struct *tty, struct file *filp)
++{
++	unsigned long flags;
++
++	if (TTY_INDEX(tty) != 0)
++		return 0;
++
++	spin_lock_irqsave(&xencons_lock, flags);
++	tty->driver_data = NULL;
++	if (xencons_tty == NULL)
++		xencons_tty = tty;
++	__xencons_tx_flush();
++	spin_unlock_irqrestore(&xencons_lock, flags);    
++
++	return 0;
++}
++
++static void xencons_close(struct tty_struct *tty, struct file *filp)
++{
++	unsigned long flags;
++
++	if (TTY_INDEX(tty) != 0)
++		return;
++
++	if (tty->count == 1) {
++		tty->closing = 1;
++		tty_wait_until_sent(tty, 0);
++		if (DRV(tty->driver)->flush_buffer != NULL)
++			DRV(tty->driver)->flush_buffer(tty);
++		if (tty->ldisc.flush_buffer != NULL)
++			tty->ldisc.flush_buffer(tty);
++		tty->closing = 0;
++		spin_lock_irqsave(&xencons_lock, flags);
++		xencons_tty = NULL;
++		spin_unlock_irqrestore(&xencons_lock, flags);    
++	}
++}
++
++static struct tty_operations xencons_ops = {
++	.open = xencons_open,
++	.close = xencons_close,
++	.write = xencons_write,
++	.write_room = xencons_write_room,
++	.put_char = xencons_put_char,
++	.flush_chars = xencons_flush_chars,
++	.chars_in_buffer = xencons_chars_in_buffer,
++	.send_xchar = xencons_send_xchar,
++	.flush_buffer = xencons_flush_buffer,
++	.throttle = xencons_throttle,
++	.unthrottle = xencons_unthrottle,
++	.wait_until_sent = xencons_wait_until_sent,
++};
++
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++static const char *xennullcon_startup(void)
++{
++	return NULL;
++}
++
++static int xennullcon_dummy(void)
++{
++	return 0;
++}
++
++#define DUMMY (void *)xennullcon_dummy
++
++/*
++ *  The console `switch' structure for the dummy console
++ *
++ *  Most of the operations are dummies.
++ */
++
++const struct consw xennull_con = {
++	.owner =		THIS_MODULE,
++	.con_startup =	xennullcon_startup,
++	.con_init =		DUMMY,
++	.con_deinit =	DUMMY,
++	.con_clear =	DUMMY,
++	.con_putc =		DUMMY,
++	.con_putcs =	DUMMY,
++	.con_cursor =	DUMMY,
++	.con_scroll =	DUMMY,
++	.con_bmove =	DUMMY,
++	.con_switch =	DUMMY,
++	.con_blank =	DUMMY,
++	.con_font_set =	DUMMY,
++	.con_font_get =	DUMMY,
++	.con_font_default =	DUMMY,
++	.con_font_copy =	DUMMY,
++	.con_set_palette =	DUMMY,
++	.con_scrolldelta =	DUMMY,
++};
++#endif
++
++static int __init xencons_init(void)
++{
++	int rc;
++
++	if (xen_init() < 0)
++		return -ENODEV;
++
++	if (xc_mode == XC_OFF)
++		return 0;
++
++	xencons_ring_init();
++
++	xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
++					  1 : MAX_NR_CONSOLES);
++	if (xencons_driver == NULL)
++		return -ENOMEM;
++
++	DRV(xencons_driver)->name            = "xencons";
++	DRV(xencons_driver)->major           = TTY_MAJOR;
++	DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
++	DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
++	DRV(xencons_driver)->init_termios    = tty_std_termios;
++	DRV(xencons_driver)->flags           = 
++		TTY_DRIVER_REAL_RAW |
++		TTY_DRIVER_RESET_TERMIOS |
++		TTY_DRIVER_NO_DEVFS;
++	DRV(xencons_driver)->termios         = xencons_termios;
++	DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
++
++	if (xc_mode == XC_SERIAL)
++	{
++		DRV(xencons_driver)->name        = "ttyS";
++		DRV(xencons_driver)->minor_start = 64 + xc_num;
++		DRV(xencons_driver)->name_base   = 0 + xc_num;
++	} else {
++		DRV(xencons_driver)->name        = "tty";
++		DRV(xencons_driver)->minor_start = xc_num;
++		DRV(xencons_driver)->name_base   = xc_num;
++	}
++
++	tty_set_operations(xencons_driver, &xencons_ops);
++
++	if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
++		printk("WARNING: Failed to register Xen virtual "
++		       "console driver as '%s%d'\n",
++		       DRV(xencons_driver)->name,
++		       DRV(xencons_driver)->name_base);
++		put_tty_driver(xencons_driver);
++		xencons_driver = NULL;
++		return rc;
++	}
++
++	tty_register_device(xencons_driver, 0, NULL);
++
++	if (xen_start_info->flags & SIF_INITDOMAIN) {
++		xencons_priv_irq = bind_virq_to_irqhandler(
++			VIRQ_CONSOLE,
++			0,
++			xencons_priv_interrupt,
++			0,
++			"console",
++			NULL);
++		BUG_ON(xencons_priv_irq < 0);
++	}
++
++	printk("Xen virtual console successfully installed as %s%d\n",
++	       DRV(xencons_driver)->name,
++	       DRV(xencons_driver)->name_base );
++    
++	return 0;
++}
++
++module_init(xencons_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/console/Makefile linux-2.6.12-xen/drivers/xen/console/Makefile
+--- pristine-linux-2.6.12/drivers/xen/console/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/console/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,2 @@
++
++obj-y	:= console.o xencons_ring.o
+diff -Nurp pristine-linux-2.6.12/drivers/xen/console/xencons_ring.c linux-2.6.12-xen/drivers/xen/console/xencons_ring.c
+--- pristine-linux-2.6.12/drivers/xen/console/xencons_ring.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/console/xencons_ring.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,125 @@
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/major.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++
++#include <asm/hypervisor.h>
++#include <asm-xen/evtchn.h>
++#include <asm-xen/xencons.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/err.h>
++#include <asm-xen/xen-public/io/console.h>
++
++static int xencons_irq;
++
++static inline struct xencons_interface *xencons_interface(void)
++{
++	return mfn_to_virt(xen_start_info->console_mfn);
++}
++
++static inline void notify_daemon(void)
++{
++	/* Use evtchn: this is called early, before irq is set up. */
++	notify_remote_via_evtchn(xen_start_info->console_evtchn);
++}
++
++int xencons_ring_send(const char *data, unsigned len)
++{
++	int sent = 0;
++	struct xencons_interface *intf = xencons_interface();
++	XENCONS_RING_IDX cons, prod;
++
++	cons = intf->out_cons;
++	prod = intf->out_prod;
++	mb();
++	BUG_ON((prod - cons) > sizeof(intf->out));
++
++	while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
++		intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
++
++	wmb();
++	intf->out_prod = prod;
++
++	notify_daemon();
++
++	return sent;
++}	
++
++static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
++{
++	struct xencons_interface *intf = xencons_interface();
++	XENCONS_RING_IDX cons, prod;
++
++	cons = intf->in_cons;
++	prod = intf->in_prod;
++	mb();
++	BUG_ON((prod - cons) > sizeof(intf->in));
++
++	while (cons != prod) {
++		xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1, regs);
++		cons++;
++	}
++
++	mb();
++	intf->in_cons = cons;
++
++	notify_daemon();
++
++	xencons_tx();
++
++	return IRQ_HANDLED;
++}
++
++int xencons_ring_init(void)
++{
++	int err;
++
++	if (xencons_irq)
++		unbind_from_irqhandler(xencons_irq, NULL);
++	xencons_irq = 0;
++
++	if (!xen_start_info->console_evtchn)
++		return 0;
++
++	err = bind_evtchn_to_irqhandler(
++		xen_start_info->console_evtchn,
++		handle_input, 0, "xencons", NULL);
++	if (err <= 0) {
++		printk(KERN_ERR "XEN console request irq failed %i\n", err);
++		return err;
++	}
++
++	xencons_irq = err;
++
++	/* In case we have in-flight data after save/restore... */
++	notify_daemon();
++
++	return 0;
++}
++
++void xencons_resume(void)
++{
++	(void)xencons_ring_init();
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/evtchn/evtchn.c linux-2.6.12-xen/drivers/xen/evtchn/evtchn.c
+--- pristine-linux-2.6.12/drivers/xen/evtchn/evtchn.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/evtchn/evtchn.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,459 @@
++/******************************************************************************
++ * evtchn.c
++ * 
++ * Driver for receiving and demuxing event-channel signals.
++ * 
++ * Copyright (c) 2004-2005, K A Fraser
++ * Multi-process extensions Copyright (c) 2004, Steven Smith
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/errno.h>
++#include <linux/miscdevice.h>
++#include <linux/major.h>
++#include <linux/proc_fs.h>
++#include <linux/stat.h>
++#include <linux/poll.h>
++#include <linux/irq.h>
++#include <linux/init.h>
++#include <linux/gfp.h>
++#include <asm-xen/evtchn.h>
++#include <asm-xen/linux-public/evtchn.h>
++
++struct per_user_data {
++	/* Notification ring, accessed via /dev/xen/evtchn. */
++#define EVTCHN_RING_SIZE     (PAGE_SIZE / sizeof(evtchn_port_t))
++#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
++	evtchn_port_t *ring;
++	unsigned int ring_cons, ring_prod, ring_overflow;
++
++	/* Processes wait on this queue when ring is empty. */
++	wait_queue_head_t evtchn_wait;
++	struct fasync_struct *evtchn_async_queue;
++};
++
++/* Who's bound to each port? */
++static struct per_user_data *port_user[NR_EVENT_CHANNELS];
++static spinlock_t port_user_lock;
++
++void evtchn_device_upcall(int port)
++{
++	struct per_user_data *u;
++
++	spin_lock(&port_user_lock);
++
++	mask_evtchn(port);
++	clear_evtchn(port);
++
++	if ((u = port_user[port]) != NULL) {
++		if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
++			u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
++			if (u->ring_cons == u->ring_prod++) {
++				wake_up_interruptible(&u->evtchn_wait);
++				kill_fasync(&u->evtchn_async_queue,
++					    SIGIO, POLL_IN);
++			}
++		} else {
++			u->ring_overflow = 1;
++		}
++	}
++
++	spin_unlock(&port_user_lock);
++}
++
++static ssize_t evtchn_read(struct file *file, char __user *buf,
++                           size_t count, loff_t *ppos)
++{
++	int rc;
++	unsigned int c, p, bytes1 = 0, bytes2 = 0;
++	struct per_user_data *u = file->private_data;
++
++	/* Whole number of ports. */
++	count &= ~(sizeof(evtchn_port_t)-1);
++
++	if (count == 0)
++		return 0;
++
++	if (count > PAGE_SIZE)
++		count = PAGE_SIZE;
++
++	for (;;) {
++		if (u->ring_overflow)
++			return -EFBIG;
++
++		if ((c = u->ring_cons) != (p = u->ring_prod))
++			break;
++
++		if (file->f_flags & O_NONBLOCK)
++			return -EAGAIN;
++
++		rc = wait_event_interruptible(
++			u->evtchn_wait, u->ring_cons != u->ring_prod);
++		if (rc)
++			return rc;
++	}
++
++	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
++	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
++		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
++			sizeof(evtchn_port_t);
++		bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
++	} else {
++		bytes1 = (p - c) * sizeof(evtchn_port_t);
++		bytes2 = 0;
++	}
++
++	/* Truncate chunks according to caller's maximum byte count. */
++	if (bytes1 > count) {
++		bytes1 = count;
++		bytes2 = 0;
++	} else if ((bytes1 + bytes2) > count) {
++		bytes2 = count - bytes1;
++	}
++
++	if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
++	    ((bytes2 != 0) &&
++	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
++		return -EFAULT;
++
++	u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
++
++	return bytes1 + bytes2;
++}
++
++static ssize_t evtchn_write(struct file *file, const char __user *buf,
++                            size_t count, loff_t *ppos)
++{
++	int  rc, i;
++	evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
++	struct per_user_data *u = file->private_data;
++
++	if (kbuf == NULL)
++		return -ENOMEM;
++
++	/* Whole number of ports. */
++	count &= ~(sizeof(evtchn_port_t)-1);
++
++	if (count == 0) {
++		rc = 0;
++		goto out;
++	}
++
++	if (count > PAGE_SIZE)
++		count = PAGE_SIZE;
++
++	if (copy_from_user(kbuf, buf, count) != 0) {
++		rc = -EFAULT;
++		goto out;
++	}
++
++	spin_lock_irq(&port_user_lock);
++	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
++		if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
++			unmask_evtchn(kbuf[i]);
++	spin_unlock_irq(&port_user_lock);
++
++	rc = count;
++
++ out:
++	free_page((unsigned long)kbuf);
++	return rc;
++}
++
++static void evtchn_bind_to_user(struct per_user_data *u, int port)
++{
++	spin_lock_irq(&port_user_lock);
++	BUG_ON(port_user[port] != NULL);
++	port_user[port] = u;
++	unmask_evtchn(port);
++	spin_unlock_irq(&port_user_lock);
++}
++
++static int evtchn_ioctl(struct inode *inode, struct file *file,
++                        unsigned int cmd, unsigned long arg)
++{
++	int rc;
++	struct per_user_data *u = file->private_data;
++	void __user *uarg = (void __user *) arg;
++	evtchn_op_t op = { 0 };
++
++	switch (cmd) {
++	case IOCTL_EVTCHN_BIND_VIRQ: {
++		struct ioctl_evtchn_bind_virq bind;
++
++		rc = -EFAULT;
++		if (copy_from_user(&bind, uarg, sizeof(bind)))
++			break;
++
++		op.cmd = EVTCHNOP_bind_virq;
++		op.u.bind_virq.virq = bind.virq;
++		op.u.bind_virq.vcpu = 0;
++		rc = HYPERVISOR_event_channel_op(&op);
++		if (rc != 0)
++			break;
++
++		rc = op.u.bind_virq.port;
++		evtchn_bind_to_user(u, rc);
++		break;
++	}
++
++	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
++		struct ioctl_evtchn_bind_interdomain bind;
++
++		rc = -EFAULT;
++		if (copy_from_user(&bind, uarg, sizeof(bind)))
++			break;
++
++		op.cmd = EVTCHNOP_bind_interdomain;
++		op.u.bind_interdomain.remote_dom  = bind.remote_domain;
++		op.u.bind_interdomain.remote_port = bind.remote_port;
++		rc = HYPERVISOR_event_channel_op(&op);
++		if (rc != 0)
++			break;
++
++		rc = op.u.bind_interdomain.local_port;
++		evtchn_bind_to_user(u, rc);
++		break;
++	}
++
++	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
++		struct ioctl_evtchn_bind_unbound_port bind;
++
++		rc = -EFAULT;
++		if (copy_from_user(&bind, uarg, sizeof(bind)))
++			break;
++
++		op.cmd = EVTCHNOP_alloc_unbound;
++		op.u.alloc_unbound.dom        = DOMID_SELF;
++		op.u.alloc_unbound.remote_dom = bind.remote_domain;
++		rc = HYPERVISOR_event_channel_op(&op);
++		if (rc != 0)
++			break;
++
++		rc = op.u.alloc_unbound.port;
++		evtchn_bind_to_user(u, rc);
++		break;
++	}
++
++	case IOCTL_EVTCHN_UNBIND: {
++		struct ioctl_evtchn_unbind unbind;
++		int ret;
++
++		rc = -EFAULT;
++		if (copy_from_user(&unbind, uarg, sizeof(unbind)))
++			break;
++
++		rc = -EINVAL;
++		if (unbind.port >= NR_EVENT_CHANNELS)
++			break;
++
++		spin_lock_irq(&port_user_lock);
++    
++		rc = -ENOTCONN;
++		if (port_user[unbind.port] != u) {
++			spin_unlock_irq(&port_user_lock);
++			break;
++		}
++
++		port_user[unbind.port] = NULL;
++		mask_evtchn(unbind.port);
++
++		spin_unlock_irq(&port_user_lock);
++
++		op.cmd = EVTCHNOP_close;
++		op.u.close.port = unbind.port;
++		ret = HYPERVISOR_event_channel_op(&op);
++		BUG_ON(ret);
++
++		rc = 0;
++		break;
++	}
++
++	case IOCTL_EVTCHN_NOTIFY: {
++		struct ioctl_evtchn_notify notify;
++
++		rc = -EFAULT;
++		if (copy_from_user(&notify, uarg, sizeof(notify)))
++			break;
++
++		if (notify.port >= NR_EVENT_CHANNELS) {
++			rc = -EINVAL;
++		} else if (port_user[notify.port] != u) {
++			rc = -ENOTCONN;
++		} else {
++			notify_remote_via_evtchn(notify.port);
++			rc = 0;
++		}
++		break;
++	}
++
++	case IOCTL_EVTCHN_RESET: {
++		/* Initialise the ring to empty. Clear errors. */
++		spin_lock_irq(&port_user_lock);
++		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
++		spin_unlock_irq(&port_user_lock);
++		rc = 0;
++		break;
++	}
++
++	default:
++		rc = -ENOSYS;
++		break;
++	}
++
++	return rc;
++}
++
++static unsigned int evtchn_poll(struct file *file, poll_table *wait)
++{
++	unsigned int mask = POLLOUT | POLLWRNORM;
++	struct per_user_data *u = file->private_data;
++
++	poll_wait(file, &u->evtchn_wait, wait);
++	if (u->ring_cons != u->ring_prod)
++		mask |= POLLIN | POLLRDNORM;
++	if (u->ring_overflow)
++		mask = POLLERR;
++	return mask;
++}
++
++static int evtchn_fasync(int fd, struct file *filp, int on)
++{
++	struct per_user_data *u = filp->private_data;
++	return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
++}
++
++static int evtchn_open(struct inode *inode, struct file *filp)
++{
++	struct per_user_data *u;
++
++	if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
++		return -ENOMEM;
++
++	memset(u, 0, sizeof(*u));
++	init_waitqueue_head(&u->evtchn_wait);
++
++	u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
++	if (u->ring == NULL) {
++		kfree(u);
++		return -ENOMEM;
++	}
++
++	filp->private_data = u;
++
++	return 0;
++}
++
++static int evtchn_release(struct inode *inode, struct file *filp)
++{
++	int i;
++	struct per_user_data *u = filp->private_data;
++	evtchn_op_t op = { 0 };
++
++	spin_lock_irq(&port_user_lock);
++
++	free_page((unsigned long)u->ring);
++
++	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
++		int ret;
++		if (port_user[i] != u)
++			continue;
++
++		port_user[i] = NULL;
++		mask_evtchn(i);
++
++		op.cmd = EVTCHNOP_close;
++		op.u.close.port = i;
++		ret = HYPERVISOR_event_channel_op(&op);
++		BUG_ON(ret);
++	}
++
++	spin_unlock_irq(&port_user_lock);
++
++	kfree(u);
++
++	return 0;
++}
++
++static struct file_operations evtchn_fops = {
++	.owner   = THIS_MODULE,
++	.read    = evtchn_read,
++	.write   = evtchn_write,
++	.ioctl   = evtchn_ioctl,
++	.poll    = evtchn_poll,
++	.fasync  = evtchn_fasync,
++	.open    = evtchn_open,
++	.release = evtchn_release,
++};
++
++static struct miscdevice evtchn_miscdev = {
++	.minor        = EVTCHN_MINOR,
++	.name         = "evtchn",
++	.fops         = &evtchn_fops,
++	.devfs_name   = "misc/evtchn",
++};
++
++static int __init evtchn_init(void)
++{
++	int err;
++
++	spin_lock_init(&port_user_lock);
++	memset(port_user, 0, sizeof(port_user));
++
++	/* Create '/dev/misc/evtchn'. */
++	err = misc_register(&evtchn_miscdev);
++	if (err != 0) {
++		printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
++		return err;
++	}
++
++	printk("Event-channel device installed.\n");
++
++	return 0;
++}
++
++static void evtchn_cleanup(void)
++{
++	misc_deregister(&evtchn_miscdev);
++}
++
++module_init(evtchn_init);
++module_exit(evtchn_cleanup);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/evtchn/Makefile linux-2.6.12-xen/drivers/xen/evtchn/Makefile
+--- pristine-linux-2.6.12/drivers/xen/evtchn/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/evtchn/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,2 @@
++
++obj-y	:= evtchn.o
+diff -Nurp pristine-linux-2.6.12/drivers/xen/Makefile linux-2.6.12-xen/drivers/xen/Makefile
+--- pristine-linux-2.6.12/drivers/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,19 @@
++
++obj-y	+= net_driver_util.o
++obj-y	+= util.o
++
++obj-y	+= char/
++obj-y	+= console/
++obj-y	+= evtchn/
++obj-y	+= balloon/
++obj-y	+= privcmd/
++obj-y	+= xenbus/
++
++obj-$(CONFIG_XEN_BLKDEV_BACKEND)	+= blkback/
++obj-$(CONFIG_XEN_NETDEV_BACKEND)	+= netback/
++obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmback/
++obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= blkfront/
++obj-$(CONFIG_XEN_NETDEV_FRONTEND)	+= netfront/
++obj-$(CONFIG_XEN_BLKDEV_TAP)    	+= blktap/
++obj-$(CONFIG_XEN_TPMDEV_FRONTEND)	+= tpmfront/
++
+diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/common.h linux-2.6.12-xen/drivers/xen/netback/common.h
+--- pristine-linux-2.6.12/drivers/xen/netback/common.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/netback/common.h	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,110 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/common.h
++ */
++
++#ifndef __NETIF__BACKEND__COMMON_H__
++#define __NETIF__BACKEND__COMMON_H__
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <asm-xen/evtchn.h>
++#include <asm-xen/xen-public/io/netif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
++#include <asm-xen/xen-public/grant_table.h>
++#include <asm-xen/gnttab.h>
++#include <asm-xen/driver_util.h>
++
++#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
++                                    __FILE__ , __LINE__ , ## _a )
++#define IPRINTK(fmt, args...) \
++    printk(KERN_INFO "xen_net: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++    printk(KERN_WARNING "xen_net: " fmt, ##args)
++
++typedef struct netif_st {
++	/* Unique identifier for this interface. */
++	domid_t          domid;
++	unsigned int     handle;
++
++	u8               fe_dev_addr[6];
++
++	/* Physical parameters of the comms window. */
++	grant_handle_t   tx_shmem_handle;
++	grant_ref_t      tx_shmem_ref; 
++	grant_handle_t   rx_shmem_handle;
++	grant_ref_t      rx_shmem_ref; 
++	unsigned int     evtchn;
++	unsigned int     irq;
++
++	/* The shared rings and indexes. */
++	netif_tx_back_ring_t tx;
++	netif_rx_back_ring_t rx;
++	struct vm_struct *tx_comms_area;
++	struct vm_struct *rx_comms_area;
++
++	/* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
++	RING_IDX rx_req_cons_peek;
++
++	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
++	unsigned long   credit_bytes;
++	unsigned long   credit_usec;
++	unsigned long   remaining_credit;
++	struct timer_list credit_timeout;
++
++	/* Miscellaneous private stuff. */
++	enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
++	int active;
++	struct list_head list;  /* scheduling list */
++	atomic_t         refcnt;
++	struct net_device *dev;
++	struct net_device_stats stats;
++
++	struct work_struct free_work;
++} netif_t;
++
++#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
++
++void netif_creditlimit(netif_t *netif);
++void netif_disconnect(netif_t *netif);
++
++netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]);
++void free_netif(netif_t *netif);
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++	      unsigned long rx_ring_ref, unsigned int evtchn);
++
++#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define netif_put(_b)						\
++	do {							\
++		if ( atomic_dec_and_test(&(_b)->refcnt) )	\
++			free_netif(_b);				\
++	} while (0)
++
++void netif_xenbus_init(void);
++
++void netif_schedule_work(netif_t *netif);
++void netif_deschedule_work(netif_t *netif);
++
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
++struct net_device_stats *netif_be_get_stats(struct net_device *dev);
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++
++#endif /* __NETIF__BACKEND__COMMON_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/interface.c linux-2.6.12-xen/drivers/xen/netback/interface.c
+--- pristine-linux-2.6.12/drivers/xen/netback/interface.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/netback/interface.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,320 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/interface.c
++ * 
++ * Network-device interface management.
++ * 
++ * Copyright (c) 2004-2005, Keir Fraser
++ */
++
++#include "common.h"
++#include <linux/rtnetlink.h>
++
++static void __netif_up(netif_t *netif)
++{
++	struct net_device *dev = netif->dev;
++	spin_lock_bh(&dev->xmit_lock);
++	netif->active = 1;
++	spin_unlock_bh(&dev->xmit_lock);
++	enable_irq(netif->irq);
++	netif_schedule_work(netif);
++}
++
++static void __netif_down(netif_t *netif)
++{
++	struct net_device *dev = netif->dev;
++	disable_irq(netif->irq);
++	spin_lock_bh(&dev->xmit_lock);
++	netif->active = 0;
++	spin_unlock_bh(&dev->xmit_lock);
++	netif_deschedule_work(netif);
++}
++
++static int net_open(struct net_device *dev)
++{
++	netif_t *netif = netdev_priv(dev);
++	if (netif->status == CONNECTED)
++		__netif_up(netif);
++	netif_start_queue(dev);
++	return 0;
++}
++
++static int net_close(struct net_device *dev)
++{
++	netif_t *netif = netdev_priv(dev);
++	netif_stop_queue(dev);
++	if (netif->status == CONNECTED)
++		__netif_down(netif);
++	return 0;
++}
++
++netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN])
++{
++	int err = 0, i;
++	struct net_device *dev;
++	netif_t *netif;
++	char name[IFNAMSIZ] = {};
++
++	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
++	dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
++	if (dev == NULL) {
++		DPRINTK("Could not create netif: out of memory\n");
++		return ERR_PTR(-ENOMEM);
++	}
++
++	netif = netdev_priv(dev);
++	memset(netif, 0, sizeof(*netif));
++	netif->domid  = domid;
++	netif->handle = handle;
++	netif->status = DISCONNECTED;
++	atomic_set(&netif->refcnt, 0);
++	netif->dev = dev;
++
++	netif->credit_bytes = netif->remaining_credit = ~0UL;
++	netif->credit_usec  = 0UL;
++	init_timer(&netif->credit_timeout);
++
++	dev->hard_start_xmit = netif_be_start_xmit;
++	dev->get_stats       = netif_be_get_stats;
++	dev->open            = net_open;
++	dev->stop            = net_close;
++	dev->features        = NETIF_F_NO_CSUM;
++
++	/* Disable queuing. */
++	dev->tx_queue_len = 0;
++
++	for (i = 0; i < ETH_ALEN; i++)
++		if (be_mac[i] != 0)
++			break;
++	if (i == ETH_ALEN) {
++		/*
++		 * Initialise a dummy MAC address. We choose the numerically
++		 * largest non-broadcast address to prevent the address getting
++		 * stolen by an Ethernet bridge for STP purposes.
++                 * (FE:FF:FF:FF:FF:FF) 
++		 */ 
++		memset(dev->dev_addr, 0xFF, ETH_ALEN);
++		dev->dev_addr[0] &= ~0x01;
++	} else
++		memcpy(dev->dev_addr, be_mac, ETH_ALEN);
++
++	rtnl_lock();
++	err = register_netdevice(dev);
++	rtnl_unlock();
++	if (err) {
++		DPRINTK("Could not register new net device %s: err=%d\n",
++			dev->name, err);
++		free_netdev(dev);
++		return ERR_PTR(err);
++	}
++
++	DPRINTK("Successfully created netif\n");
++	return netif;
++}
++
++static int map_frontend_pages(
++	netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
++{
++	struct gnttab_map_grant_ref op;
++	int ret;
++
++	op.host_addr = (unsigned long)netif->tx_comms_area->addr;
++	op.flags     = GNTMAP_host_map;
++	op.ref       = tx_ring_ref;
++	op.dom       = netif->domid;
++    
++	lock_vm_area(netif->tx_comms_area);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
++	unlock_vm_area(netif->tx_comms_area);
++	BUG_ON(ret);
++
++	if (op.status) { 
++		DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
++		return op.status;
++	}
++
++	netif->tx_shmem_ref    = tx_ring_ref;
++	netif->tx_shmem_handle = op.handle;
++
++	op.host_addr = (unsigned long)netif->rx_comms_area->addr;
++	op.flags     = GNTMAP_host_map;
++	op.ref       = rx_ring_ref;
++	op.dom       = netif->domid;
++
++	lock_vm_area(netif->rx_comms_area);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
++	unlock_vm_area(netif->rx_comms_area);
++	BUG_ON(ret);
++
++	if (op.status) {
++		DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
++		return op.status;
++	}
++
++	netif->rx_shmem_ref    = rx_ring_ref;
++	netif->rx_shmem_handle = op.handle;
++
++	return 0;
++}
++
++static void unmap_frontend_pages(netif_t *netif)
++{
++	struct gnttab_unmap_grant_ref op;
++	int ret;
++
++	op.host_addr    = (unsigned long)netif->tx_comms_area->addr;
++	op.handle       = netif->tx_shmem_handle;
++	op.dev_bus_addr = 0;
++
++	lock_vm_area(netif->tx_comms_area);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
++	unlock_vm_area(netif->tx_comms_area);
++	BUG_ON(ret);
++
++	op.host_addr    = (unsigned long)netif->rx_comms_area->addr;
++	op.handle       = netif->rx_shmem_handle;
++	op.dev_bus_addr = 0;
++
++	lock_vm_area(netif->rx_comms_area);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
++	unlock_vm_area(netif->rx_comms_area);
++	BUG_ON(ret);
++}
++
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++	      unsigned long rx_ring_ref, unsigned int evtchn)
++{
++	int err = -ENOMEM;
++	netif_tx_sring_t *txs;
++	netif_rx_sring_t *rxs;
++	evtchn_op_t op = {
++		.cmd = EVTCHNOP_bind_interdomain,
++		.u.bind_interdomain.remote_dom = netif->domid,
++		.u.bind_interdomain.remote_port = evtchn };
++
++	/* Already connected through? */
++	if (netif->irq)
++		return 0;
++
++	netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
++	if (netif->tx_comms_area == NULL)
++		return -ENOMEM;
++	netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
++	if (netif->rx_comms_area == NULL)
++		goto err_rx;
++
++	err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
++	if (err)
++		goto err_map;
++
++	err = HYPERVISOR_event_channel_op(&op);
++	if (err)
++		goto err_hypervisor;
++
++	netif->evtchn = op.u.bind_interdomain.local_port;
++
++	netif->irq = bind_evtchn_to_irqhandler(
++		netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
++	disable_irq(netif->irq);
++
++	txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
++	BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
++
++	rxs = (netif_rx_sring_t *)
++		((char *)netif->rx_comms_area->addr);
++	BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
++
++	netif->rx_req_cons_peek = 0;
++
++	netif_get(netif);
++	wmb(); /* Other CPUs see new state before interface is started. */
++
++	rtnl_lock();
++	netif->status = CONNECTED;
++	wmb();
++	if (netif_running(netif->dev))
++		__netif_up(netif);
++	rtnl_unlock();
++
++	return 0;
++err_hypervisor:
++	unmap_frontend_pages(netif);
++err_map:
++	free_vm_area(netif->rx_comms_area);
++err_rx:
++	free_vm_area(netif->tx_comms_area);
++	return err;
++}
++
++static void free_netif_callback(void *arg)
++{
++	netif_t *netif = (netif_t *)arg;
++
++	if (netif->irq)
++		unbind_from_irqhandler(netif->irq, netif);
++	
++	unregister_netdev(netif->dev);
++
++	if (netif->tx.sring) {
++		unmap_frontend_pages(netif);
++		free_vm_area(netif->tx_comms_area);
++		free_vm_area(netif->rx_comms_area);
++	}
++
++	free_netdev(netif->dev);
++}
++
++void free_netif(netif_t *netif)
++{
++	INIT_WORK(&netif->free_work, free_netif_callback, (void *)netif);
++	schedule_work(&netif->free_work);
++}
++
++void netif_creditlimit(netif_t *netif)
++{
++#if 0
++	/* Set the credit limit (reset remaining credit to new limit). */
++	netif->credit_bytes     = creditlimit->credit_bytes;
++	netif->remaining_credit = creditlimit->credit_bytes;
++	netif->credit_usec      = creditlimit->period_usec;
++
++	if (netif->status == CONNECTED) {
++		/*
++		 * Schedule work so that any packets waiting under previous
++		 * credit limit are dealt with (acts as a replenishment point).
++		 */
++		netif->credit_timeout.expires = jiffies;
++		netif_schedule_work(netif);
++	}
++#endif
++}
++
++void netif_disconnect(netif_t *netif)
++{
++	switch (netif->status) {
++	case CONNECTED:
++		rtnl_lock();
++		netif->status = DISCONNECTING;
++		wmb();
++		if (netif_running(netif->dev))
++			__netif_down(netif);
++		rtnl_unlock();
++		netif_put(netif);
++		break;
++	case DISCONNECTED:
++		BUG_ON(atomic_read(&netif->refcnt) != 0);
++		free_netif(netif);
++		break;
++	default:
++		BUG();
++	}
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/loopback.c linux-2.6.12-xen/drivers/xen/netback/loopback.c
+--- pristine-linux-2.6.12/drivers/xen/netback/loopback.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/netback/loopback.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,198 @@
++/******************************************************************************
++ * netback/loopback.c
++ * 
++ * A two-interface loopback device to emulate a local netfront-netback
++ * connection. This ensures that local packet delivery looks identical
++ * to inter-domain delivery. Most importantly, packets delivered locally
++ * originating from other domains will get *copied* when they traverse this
++ * driver. This prevents unbounded delays in socket-buffer queues from
++ * causing the netback driver to "seize up".
++ * 
++ * This driver creates a symmetric pair of loopback interfaces with names
++ * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
++ * bridge, just like a proper netback interface, while a local IP interface
++ * is configured on 'veth0'.
++ * 
++ * As with a real netback interface, vif0.0 is configured with a suitable
++ * dummy MAC address. No default is provided for veth0: a reasonable strategy
++ * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
++ * (to avoid confusing the Etherbridge).
++ * 
++ * Copyright (c) 2005 K A Fraser
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/ethtool.h>
++#include <net/dst.h>
++
++static int nloopbacks = 8;
++module_param(nloopbacks, int, 0);
++MODULE_PARM_DESC(nloopbacks, "Number of netback-loopback devices to create");
++
++struct net_private {
++	struct net_device *loopback_dev;
++	struct net_device_stats stats;
++};
++
++static int loopback_open(struct net_device *dev)
++{
++	struct net_private *np = netdev_priv(dev);
++	memset(&np->stats, 0, sizeof(np->stats));
++	netif_start_queue(dev);
++	return 0;
++}
++
++static int loopback_close(struct net_device *dev)
++{
++	netif_stop_queue(dev);
++	return 0;
++}
++
++static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++	struct net_private *np = netdev_priv(dev);
++
++	dst_release(skb->dst);
++	skb->dst = NULL;
++
++	skb_orphan(skb);
++
++	np->stats.tx_bytes += skb->len;
++	np->stats.tx_packets++;
++
++	/* Switch to loopback context. */
++	dev = np->loopback_dev;
++	np  = netdev_priv(dev);
++
++	np->stats.rx_bytes += skb->len;
++	np->stats.rx_packets++;
++
++	if (skb->ip_summed == CHECKSUM_HW) {
++		/* Defer checksum calculation. */
++		skb->proto_csum_blank = 1;
++		/* Must be a local packet: assert its integrity. */
++		skb->proto_csum_valid = 1;
++	}
++
++	skb->ip_summed = skb->proto_csum_valid ?
++		CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
++
++	skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
++	skb->protocol = eth_type_trans(skb, dev);
++	skb->dev      = dev;
++	dev->last_rx  = jiffies;
++	netif_rx(skb);
++
++	return 0;
++}
++
++static struct net_device_stats *loopback_get_stats(struct net_device *dev)
++{
++	struct net_private *np = netdev_priv(dev);
++	return &np->stats;
++}
++
++static void loopback_construct(struct net_device *dev, struct net_device *lo)
++{
++	struct net_private *np = netdev_priv(dev);
++
++	np->loopback_dev     = lo;
++
++	dev->open            = loopback_open;
++	dev->stop            = loopback_close;
++	dev->hard_start_xmit = loopback_start_xmit;
++	dev->get_stats       = loopback_get_stats;
++
++	dev->tx_queue_len    = 0;
++
++	dev->features        = NETIF_F_HIGHDMA | NETIF_F_LLTX;
++
++	/*
++	 * We do not set a jumbo MTU on the interface. Otherwise the network
++	 * stack will try to send large packets that will get dropped by the
++	 * Ethernet bridge (unless the physical Ethernet interface is
++	 * configured to transfer jumbo packets). If a larger MTU is desired
++	 * then the system administrator can specify it using the 'ifconfig'
++	 * command.
++	 */
++	/*dev->mtu             = 16*1024;*/
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++	.get_tx_csum = ethtool_op_get_tx_csum,
++	.set_tx_csum = ethtool_op_set_tx_csum,
++};
++
++static int __init make_loopback(int i)
++{
++	struct net_device *dev1, *dev2;
++	char dev_name[IFNAMSIZ];
++	int err = -ENOMEM;
++
++	sprintf(dev_name, "vif0.%d", i);
++	dev1 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
++	sprintf(dev_name, "veth%d", i);
++	dev2 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
++	if ((dev1 == NULL) || (dev2 == NULL))
++		goto fail;
++
++	loopback_construct(dev1, dev2);
++	loopback_construct(dev2, dev1);
++
++	dev1->features |= NETIF_F_NO_CSUM;
++	dev2->features |= NETIF_F_IP_CSUM;
++
++	SET_ETHTOOL_OPS(dev2, &network_ethtool_ops);
++
++	/*
++	 * Initialise a dummy MAC address for the 'dummy backend' interface. We
++	 * choose the numerically largest non-broadcast address to prevent the
++	 * address getting stolen by an Ethernet bridge for STP purposes.
++	 */
++	memset(dev1->dev_addr, 0xFF, ETH_ALEN);
++	dev1->dev_addr[0] &= ~0x01;
++
++	if ((err = register_netdev(dev1)) != 0)
++		goto fail;
++
++	if ((err = register_netdev(dev2)) != 0) {
++		unregister_netdev(dev1);
++		goto fail;
++	}
++
++	return 0;
++
++ fail:
++	kfree(dev1);
++	kfree(dev2);
++	return err;
++}
++
++static int __init loopback_init(void)
++{
++	int i, err = 0;
++
++	for (i = 0; i < nloopbacks; i++)
++		if ((err = make_loopback(i)) != 0)
++			break;
++
++	return err;
++}
++
++module_init(loopback_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/Makefile linux-2.6.12-xen/drivers/xen/netback/Makefile
+--- pristine-linux-2.6.12/drivers/xen/netback/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/netback/Makefile	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,2 @@
++
++obj-y	:= netback.o xenbus.o interface.o loopback.o
+diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/netback.c linux-2.6.12-xen/drivers/xen/netback/netback.c
+--- pristine-linux-2.6.12/drivers/xen/netback/netback.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/netback/netback.c	2006-03-05 23:36:30.000000000 +0100
+@@ -0,0 +1,828 @@
++/******************************************************************************
++ * drivers/xen/netback/netback.c
++ * 
++ * Back-end of the driver for virtual network devices. This portion of the
++ * driver exports a 'unified' network-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A 
++ * reference front-end implementation can be found in:
++ *  drivers/xen/netfront/netfront.c
++ * 
++ * Copyright (c) 2002-2005, K A Fraser
++ */
++
++#include "common.h"
++#include <asm-xen/balloon.h>
++#include <asm-xen/xen-public/memory.h>
++
++/*#define NETBE_DEBUG_INTERRUPT*/
++
++static void netif_idx_release(u16 pending_idx);
++static void netif_page_release(struct page *page);
++static void make_tx_response(netif_t *netif, 
++                             u16      id,
++                             s8       st);
++static int  make_rx_response(netif_t *netif, 
++                             u16      id, 
++                             s8       st,
++                             u16      offset,
++                             u16      size,
++                             u16      flags);
++
++static void net_tx_action(unsigned long unused);
++static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
++
++static void net_rx_action(unsigned long unused);
++static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
++
++static struct timer_list net_timer;
++
++#define MAX_PENDING_REQS 256
++
++static struct sk_buff_head rx_queue;
++static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
++static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
++static gnttab_transfer_t grant_rx_op[NET_RX_RING_SIZE];
++static unsigned char rx_notify[NR_IRQS];
++
++static unsigned long mmap_vstart;
++#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
++
++#define PKT_PROT_LEN 64
++
++static struct {
++	netif_tx_request_t req;
++	netif_t *netif;
++} pending_tx_info[MAX_PENDING_REQS];
++static u16 pending_ring[MAX_PENDING_REQS];
++typedef unsigned int PEND_RING_IDX;
++#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
++static PEND_RING_IDX pending_prod, pending_cons;
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
++
++/* Freed TX SKBs get batched on this ring before return to pending_ring. */
++static u16 dealloc_ring[MAX_PENDING_REQS];
++static PEND_RING_IDX dealloc_prod, dealloc_cons;
++
++static struct sk_buff_head tx_queue;
++
++static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
++static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
++static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
++
++static struct list_head net_schedule_list;
++static spinlock_t net_schedule_list_lock;
++
++#define MAX_MFN_ALLOC 64
++static unsigned long mfn_list[MAX_MFN_ALLOC];
++static unsigned int alloc_index = 0;
++static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
++
++static unsigned long alloc_mfn(void)
++{
++	unsigned long mfn = 0, flags;
++	struct xen_memory_reservation reservation = {
++		.extent_start = mfn_list,
++		.nr_extents   = MAX_MFN_ALLOC,
++		.extent_order = 0,
++		.domid        = DOMID_SELF
++	};
++	spin_lock_irqsave(&mfn_lock, flags);
++	if ( unlikely(alloc_index == 0) )
++		alloc_index = HYPERVISOR_memory_op(
++			XENMEM_increase_reservation, &reservation);
++	if ( alloc_index != 0 )
++		mfn = mfn_list[--alloc_index];
++	spin_unlock_irqrestore(&mfn_lock, flags);
++	return mfn;
++}
++
++static inline void maybe_schedule_tx_action(void)
++{
++	smp_mb();
++	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
++	    !list_empty(&net_schedule_list))
++		tasklet_schedule(&net_tx_tasklet);
++}
++
++/*
++ * A gross way of confirming the origin of an skb data page. The slab
++ * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
++ */
++static inline int is_xen_skb(struct sk_buff *skb)
++{
++	extern kmem_cache_t *skbuff_cachep;
++	kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
++	return (cp == skbuff_cachep);
++}
++
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++	netif_t *netif = netdev_priv(dev);
++
++	BUG_ON(skb->dev != dev);
++
++	/* Drop the packet if the target domain has no receive buffers. */
++	if (!netif->active || 
++	    (netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
++	    ((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
++	     NET_RX_RING_SIZE))
++		goto drop;
++
++	/*
++	 * We do not copy the packet unless:
++	 *  1. The data is shared; or
++	 *  2. The data is not allocated from our special cache.
++	 * NB. We also couldn't cope with fragmented packets, but we won't get
++	 *     any because we not advertise the NETIF_F_SG feature.
++	 */
++	if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) {
++		int hlen = skb->data - skb->head;
++		int ret;
++		struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
++		if ( unlikely(nskb == NULL) )
++			goto drop;
++		skb_reserve(nskb, hlen);
++		__skb_put(nskb, skb->len);
++		ret = skb_copy_bits(skb, -hlen, nskb->data - hlen,
++				     skb->len + hlen);
++		BUG_ON(ret);
++		nskb->dev = skb->dev;
++		nskb->proto_csum_valid = skb->proto_csum_valid;
++		dev_kfree_skb(skb);
++		skb = nskb;
++	}
++
++	netif->rx_req_cons_peek++;
++	netif_get(netif);
++
++	skb_queue_tail(&rx_queue, skb);
++	tasklet_schedule(&net_rx_tasklet);
++
++	return 0;
++
++ drop:
++	netif->stats.tx_dropped++;
++	dev_kfree_skb(skb);
++	return 0;
++}
++
++#if 0
++static void xen_network_done_notify(void)
++{
++	static struct net_device *eth0_dev = NULL;
++	if (unlikely(eth0_dev == NULL))
++		eth0_dev = __dev_get_by_name("eth0");
++	netif_rx_schedule(eth0_dev);
++}
++/* 
++ * Add following to poll() function in NAPI driver (Tigon3 is example):
++ *  if ( xen_network_done() )
++ *      tg3_enable_ints(tp); 
++ */
++int xen_network_done(void)
++{
++	return skb_queue_empty(&rx_queue);
++}
++#endif
++
++static void net_rx_action(unsigned long unused)
++{
++	netif_t *netif = NULL; 
++	s8 status;
++	u16 size, id, irq;
++	multicall_entry_t *mcl;
++	mmu_update_t *mmu;
++	gnttab_transfer_t *gop;
++	unsigned long vdata, old_mfn, new_mfn;
++	struct sk_buff_head rxq;
++	struct sk_buff *skb;
++	u16 notify_list[NET_RX_RING_SIZE];
++	int notify_nr = 0;
++	int ret;
++
++	skb_queue_head_init(&rxq);
++
++	mcl = rx_mcl;
++	mmu = rx_mmu;
++	gop = grant_rx_op;
++
++	while ((skb = skb_dequeue(&rx_queue)) != NULL) {
++		netif   = netdev_priv(skb->dev);
++		vdata   = (unsigned long)skb->data;
++		old_mfn = virt_to_mfn(vdata);
++
++		/* Memory squeeze? Back off for an arbitrary while. */
++		if ((new_mfn = alloc_mfn()) == 0) {
++			if ( net_ratelimit() )
++				WPRINTK("Memory squeeze in netback driver.\n");
++			mod_timer(&net_timer, jiffies + HZ);
++			skb_queue_head(&rx_queue, skb);
++			break;
++		}
++		/*
++		 * Set the new P2M table entry before reassigning the old data
++		 * page. Heed the comment in pgtable-2level.h:pte_page(). :-)
++		 */
++		set_phys_to_machine(__pa(skb->data) >> PAGE_SHIFT, new_mfn);
++
++		MULTI_update_va_mapping(mcl, vdata,
++					pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
++		mcl++;
++
++		gop->mfn = old_mfn;
++		gop->domid = netif->domid;
++		gop->ref = RING_GET_REQUEST(
++			&netif->rx, netif->rx.req_cons)->gref;
++		netif->rx.req_cons++;
++		gop++;
++
++		mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
++			MMU_MACHPHYS_UPDATE;
++		mmu->val = __pa(vdata) >> PAGE_SHIFT;  
++		mmu++;
++
++		__skb_queue_tail(&rxq, skb);
++
++		/* Filled the batch queue? */
++		if ((gop - grant_rx_op) == ARRAY_SIZE(grant_rx_op))
++			break;
++	}
++
++	if (mcl == rx_mcl)
++		return;
++
++	mcl->op = __HYPERVISOR_mmu_update;
++	mcl->args[0] = (unsigned long)rx_mmu;
++	mcl->args[1] = mmu - rx_mmu;
++	mcl->args[2] = 0;
++	mcl->args[3] = DOMID_SELF;
++	mcl++;
++
++	mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
++	ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
++	BUG_ON(ret != 0);
++
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
++					gop - grant_rx_op);
++	BUG_ON(ret != 0);
++
++	mcl = rx_mcl;
++	gop = grant_rx_op;
++	while ((skb = __skb_dequeue(&rxq)) != NULL) {
++		netif   = netdev_priv(skb->dev);
++		size    = skb->tail - skb->data;
++
++		/* Rederive the machine addresses. */
++		new_mfn = mcl->args[1] >> PAGE_SHIFT;
++		old_mfn = gop->mfn;
++		atomic_set(&(skb_shinfo(skb)->dataref), 1);
++		skb_shinfo(skb)->nr_frags = 0;
++		skb_shinfo(skb)->frag_list = NULL;
++
++		netif->stats.tx_bytes += size;
++		netif->stats.tx_packets++;
++
++		/* The update_va_mapping() must not fail. */
++		BUG_ON(mcl->result != 0);
++
++		/* Check the reassignment error code. */
++		status = NETIF_RSP_OKAY;
++		if (gop->status != 0) { 
++			DPRINTK("Bad status %d from grant transfer to DOM%u\n",
++				gop->status, netif->domid);
++			/*
++                         * Page no longer belongs to us unless GNTST_bad_page,
++                         * but that should be a fatal error anyway.
++                         */
++			BUG_ON(gop->status == GNTST_bad_page);
++			status = NETIF_RSP_ERROR; 
++		}
++		irq = netif->irq;
++		id = RING_GET_REQUEST(&netif->rx, netif->rx.rsp_prod_pvt)->id;
++		if (make_rx_response(netif, id, status,
++				     (unsigned long)skb->data & ~PAGE_MASK,
++				     size, skb->proto_csum_valid ?
++				     NETRXF_csum_valid : 0) &&
++		    (rx_notify[irq] == 0)) {
++			rx_notify[irq] = 1;
++			notify_list[notify_nr++] = irq;
++		}
++
++		netif_put(netif);
++		dev_kfree_skb(skb);
++		mcl++;
++		gop++;
++	}
++
++	while (notify_nr != 0) {
++		irq = notify_list[--notify_nr];
++		rx_notify[irq] = 0;
++		notify_remote_via_irq(irq);
++	}
++
++	/* More work to do? */
++	if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
++		tasklet_schedule(&net_rx_tasklet);
++#if 0
++	else
++		xen_network_done_notify();
++#endif
++}
++
++static void net_alarm(unsigned long unused)
++{
++	tasklet_schedule(&net_rx_tasklet);
++}
++
++struct net_device_stats *netif_be_get_stats(struct net_device *dev)
++{
++	netif_t *netif = netdev_priv(dev);
++	return &netif->stats;
++}
++
++static int __on_net_schedule_list(netif_t *netif)
++{
++	return netif->list.next != NULL;
++}
++
++static void remove_from_net_schedule_list(netif_t *netif)
++{
++	spin_lock_irq(&net_schedule_list_lock);
++	if (likely(__on_net_schedule_list(netif))) {
++		list_del(&netif->list);
++		netif->list.next = NULL;
++		netif_put(netif);
++	}
++	spin_unlock_irq(&net_schedule_list_lock);
++}
++
++static void add_to_net_schedule_list_tail(netif_t *netif)
++{
++	if (__on_net_schedule_list(netif))
++		return;
++
++	spin_lock_irq(&net_schedule_list_lock);
++	if (!__on_net_schedule_list(netif) && netif->active) {
++		list_add_tail(&netif->list, &net_schedule_list);
++		netif_get(netif);
++	}
++	spin_unlock_irq(&net_schedule_list_lock);
++}
++
++/*
++ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
++ * If this driver is pipelining transmit requests then we can be very
++ * aggressive in avoiding new-packet notifications -- frontend only needs to
++ * send a notification if there are no outstanding unreceived responses.
++ * If we may be buffer transmit buffers for any reason then we must be rather
++ * more conservative and treat this as the final check for pending work.
++ */
++void netif_schedule_work(netif_t *netif)
++{
++	int more_to_do;
++
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++	more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
++#else
++	RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++#endif
++
++	if (more_to_do) {
++		add_to_net_schedule_list_tail(netif);
++		maybe_schedule_tx_action();
++	}
++}
++
++void netif_deschedule_work(netif_t *netif)
++{
++	remove_from_net_schedule_list(netif);
++}
++
++
++static void tx_credit_callback(unsigned long data)
++{
++	netif_t *netif = (netif_t *)data;
++	netif->remaining_credit = netif->credit_bytes;
++	netif_schedule_work(netif);
++}
++
++inline static void net_tx_action_dealloc(void)
++{
++	gnttab_unmap_grant_ref_t *gop;
++	u16 pending_idx;
++	PEND_RING_IDX dc, dp;
++	netif_t *netif;
++	int ret;
++
++	dc = dealloc_cons;
++	dp = dealloc_prod;
++
++	/*
++	 * Free up any grants we have finished using
++	 */
++	gop = tx_unmap_ops;
++	while (dc != dp) {
++		pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
++		gop->host_addr    = MMAP_VADDR(pending_idx);
++		gop->dev_bus_addr = 0;
++		gop->handle       = grant_tx_handle[pending_idx];
++		gop++;
++	}
++	ret = HYPERVISOR_grant_table_op(
++		GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
++	BUG_ON(ret);
++
++	while (dealloc_cons != dp) {
++		pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
++
++		netif = pending_tx_info[pending_idx].netif;
++
++		make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
++				 NETIF_RSP_OKAY);
++        
++		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++
++		netif_put(netif);
++	}
++}
++
++/* Called after netfront has transmitted */
++static void net_tx_action(unsigned long unused)
++{
++	struct list_head *ent;
++	struct sk_buff *skb;
++	netif_t *netif;
++	netif_tx_request_t txreq;
++	u16 pending_idx;
++	RING_IDX i;
++	gnttab_map_grant_ref_t *mop;
++	unsigned int data_len;
++	int ret, work_to_do;
++
++	if (dealloc_cons != dealloc_prod)
++		net_tx_action_dealloc();
++
++	mop = tx_map_ops;
++	while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
++		!list_empty(&net_schedule_list)) {
++		/* Get a netif from the list with work to do. */
++		ent = net_schedule_list.next;
++		netif = list_entry(ent, netif_t, list);
++		netif_get(netif);
++		remove_from_net_schedule_list(netif);
++
++		RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
++		if (!work_to_do) {
++			netif_put(netif);
++			continue;
++		}
++
++		i = netif->tx.req_cons;
++		rmb(); /* Ensure that we see the request before we copy it. */
++		memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
++		/* Credit-based scheduling. */
++		if (txreq.size > netif->remaining_credit) {
++			unsigned long now = jiffies;
++			unsigned long next_credit = 
++				netif->credit_timeout.expires +
++				msecs_to_jiffies(netif->credit_usec / 1000);
++
++			/* Timer could already be pending in rare cases. */
++			if (timer_pending(&netif->credit_timeout))
++				break;
++
++			/* Passed the point where we can replenish credit? */
++			if (time_after_eq(now, next_credit)) {
++				netif->credit_timeout.expires = now;
++				netif->remaining_credit = netif->credit_bytes;
++			}
++
++			/* Still too big to send right now? Set a callback. */
++			if (txreq.size > netif->remaining_credit) {
++				netif->remaining_credit = 0;
++				netif->credit_timeout.expires  = 
++					next_credit;
++				netif->credit_timeout.data     =
++					(unsigned long)netif;
++				netif->credit_timeout.function =
++					tx_credit_callback;
++				add_timer_on(&netif->credit_timeout,
++					     smp_processor_id());
++				break;
++			}
++		}
++		netif->remaining_credit -= txreq.size;
++
++		netif->tx.req_cons++;
++
++		netif_schedule_work(netif);
++
++		if (unlikely(txreq.size < ETH_HLEN) || 
++		    unlikely(txreq.size > ETH_FRAME_LEN)) {
++			DPRINTK("Bad packet size: %d\n", txreq.size);
++			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
++			netif_put(netif);
++			continue; 
++		}
++
++		/* No crossing a page as the payload mustn't fragment. */
++		if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) {
++			DPRINTK("txreq.offset: %x, size: %u, end: %lu\n", 
++				txreq.offset, txreq.size, 
++				(txreq.offset &~PAGE_MASK) + txreq.size);
++			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
++			netif_put(netif);
++			continue;
++		}
++
++		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
++
++		data_len = (txreq.size > PKT_PROT_LEN) ?
++			PKT_PROT_LEN : txreq.size;
++
++		skb = alloc_skb(data_len+16, GFP_ATOMIC);
++		if (unlikely(skb == NULL)) {
++			DPRINTK("Can't allocate a skb in start_xmit.\n");
++			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
++			netif_put(netif);
++			break;
++		}
++
++		/* Packets passed to netif_rx() must have some headroom. */
++		skb_reserve(skb, 16);
++
++		mop->host_addr = MMAP_VADDR(pending_idx);
++		mop->dom       = netif->domid;
++		mop->ref       = txreq.gref;
++		mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
++		mop++;
++
++		memcpy(&pending_tx_info[pending_idx].req,
++		       &txreq, sizeof(txreq));
++		pending_tx_info[pending_idx].netif = netif;
++		*((u16 *)skb->data) = pending_idx;
++
++		__skb_queue_tail(&tx_queue, skb);
++
++		pending_cons++;
++
++		if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
++			break;
++	}
++
++	if (mop == tx_map_ops)
++		return;
++
++	ret = HYPERVISOR_grant_table_op(
++		GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
++	BUG_ON(ret);
++
++	mop = tx_map_ops;
++	while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
++		pending_idx = *((u16 *)skb->data);
++		netif       = pending_tx_info[pending_idx].netif;
++		memcpy(&txreq, &pending_tx_info[pending_idx].req,
++		       sizeof(txreq));
++
++		/* Check the remap error code. */
++		if (unlikely(mop->status)) {
++			printk(KERN_ALERT "#### netback grant fails\n");
++			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
++			netif_put(netif);
++			kfree_skb(skb);
++			mop++;
++			pending_ring[MASK_PEND_IDX(pending_prod++)] =
++				pending_idx;
++			continue;
++		}
++		set_phys_to_machine(
++			__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT,
++			FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
++		grant_tx_handle[pending_idx] = mop->handle;
++
++		data_len = (txreq.size > PKT_PROT_LEN) ?
++			PKT_PROT_LEN : txreq.size;
++
++		__skb_put(skb, data_len);
++		memcpy(skb->data, 
++		       (void *)(MMAP_VADDR(pending_idx)|txreq.offset),
++		       data_len);
++		if (data_len < txreq.size) {
++			/* Append the packet payload as a fragment. */
++			skb_shinfo(skb)->frags[0].page        = 
++				virt_to_page(MMAP_VADDR(pending_idx));
++			skb_shinfo(skb)->frags[0].size        =
++				txreq.size - data_len;
++			skb_shinfo(skb)->frags[0].page_offset = 
++				txreq.offset + data_len;
++			skb_shinfo(skb)->nr_frags = 1;
++		} else {
++			/* Schedule a response immediately. */
++			netif_idx_release(pending_idx);
++		}
++
++		skb->data_len  = txreq.size - data_len;
++		skb->len      += skb->data_len;
++
++		skb->dev      = netif->dev;
++		skb->protocol = eth_type_trans(skb, skb->dev);
++
++		/*
++                 * No checking needed on localhost, but remember the field is
++                 * blank. 
++                 */
++		skb->ip_summed        = CHECKSUM_UNNECESSARY;
++		skb->proto_csum_valid = 1;
++		skb->proto_csum_blank = !!(txreq.flags & NETTXF_csum_blank);
++
++		netif->stats.rx_bytes += txreq.size;
++		netif->stats.rx_packets++;
++
++		netif_rx(skb);
++		netif->dev->last_rx = jiffies;
++
++		mop++;
++	}
++}
++
++static void netif_idx_release(u16 pending_idx)
++{
++	static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
++	unsigned long flags;
++
++	spin_lock_irqsave(&_lock, flags);
++	dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
++	spin_unlock_irqrestore(&_lock, flags);
++
++	tasklet_schedule(&net_tx_tasklet);
++}
++
++static void netif_page_release(struct page *page)
++{
++	u16 pending_idx = page - virt_to_page(mmap_vstart);
++
++	/* Ready for next use. */
++	set_page_count(page, 1);
++
++	netif_idx_release(pending_idx);
++}
++
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++	netif_t *netif = dev_id;
++	add_to_net_schedule_list_tail(netif);
++	maybe_schedule_tx_action();
++	return IRQ_HANDLED;
++}
++
++static void make_tx_response(netif_t *netif, 
++                             u16      id,
++                             s8       st)
++{
++	RING_IDX i = netif->tx.rsp_prod_pvt;
++	netif_tx_response_t *resp;
++	int notify;
++
++	resp = RING_GET_RESPONSE(&netif->tx, i);
++	resp->id     = id;
++	resp->status = st;
++
++	netif->tx.rsp_prod_pvt = ++i;
++	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
++	if (notify)
++		notify_remote_via_irq(netif->irq);
++
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++	if (i == netif->tx.req_cons) {
++		int more_to_do;
++		RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++		if (more_to_do)
++			add_to_net_schedule_list_tail(netif);
++	}
++#endif
++}
++
++static int make_rx_response(netif_t *netif, 
++                            u16      id, 
++                            s8       st,
++                            u16      offset,
++                            u16      size,
++                            u16      flags)
++{
++	RING_IDX i = netif->rx.rsp_prod_pvt;
++	netif_rx_response_t *resp;
++	int notify;
++
++	resp = RING_GET_RESPONSE(&netif->rx, i);
++	resp->offset     = offset;
++	resp->flags      = flags;
++	resp->id         = id;
++	resp->status     = (s16)size;
++	if (st < 0)
++		resp->status = (s16)st;
++
++	netif->rx.rsp_prod_pvt = ++i;
++	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, notify);
++
++	return notify;
++}
++
++#ifdef NETBE_DEBUG_INTERRUPT
++static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
++{
++	struct list_head *ent;
++	netif_t *netif;
++	int i = 0;
++
++	printk(KERN_ALERT "netif_schedule_list:\n");
++	spin_lock_irq(&net_schedule_list_lock);
++
++	list_for_each (ent, &net_schedule_list) {
++		netif = list_entry(ent, netif_t, list);
++		printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
++		       "rx_resp_prod=%08x\n",
++		       i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
++		printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
++		       netif->tx.req_cons, netif->tx.rsp_prod_pvt);
++		printk(KERN_ALERT "   shared(rx_req_prod=%08x "
++		       "rx_resp_prod=%08x\n",
++		       netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
++		printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
++		       netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
++		printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
++		       netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
++		i++;
++	}
++
++	spin_unlock_irq(&net_schedule_list_lock);
++	printk(KERN_ALERT " ** End of netif_schedule_list **\n");
++
++	return IRQ_HANDLED;
++}
++#endif
++
++static int __init netback_init(void)
++{
++	int i;
++	struct page *page;
++
++	/* We can increase reservation by this much in net_rx_action(). */
++	balloon_update_driver_allowance(NET_RX_RING_SIZE);
++
++	skb_queue_head_init(&rx_queue);
++	skb_queue_head_init(&tx_queue);
++
++	init_timer(&net_timer);
++	net_timer.data = 0;
++	net_timer.function = net_alarm;
++    
++	page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
++	BUG_ON(page == NULL);
++	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
++
++	for (i = 0; i < MAX_PENDING_REQS; i++) {
++		page = virt_to_page(MMAP_VADDR(i));
++		set_page_count(page, 1);
++		SetPageForeign(page, netif_page_release);
++	}
++
++	pending_cons = 0;
++	pending_prod = MAX_PENDING_REQS;
++	for (i = 0; i < MAX_PENDING_REQS; i++)
++		pending_ring[i] = i;
++
++	spin_lock_init(&net_schedule_list_lock);
++	INIT_LIST_HEAD(&net_schedule_list);
++
++	netif_xenbus_init();
++
++#ifdef NETBE_DEBUG_INTERRUPT
++	(void)bind_virq_to_irqhandler(
++		VIRQ_DEBUG,
++		0,
++		netif_be_dbg,
++		SA_SHIRQ, 
++		"net-be-dbg",
++		&netif_be_dbg);
++#endif
++
++	return 0;
++}
++
++static void netback_cleanup(void)
++{
++	BUG();
++}
++
++module_init(netback_init);
++module_exit(netback_cleanup);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/xenbus.c linux-2.6.12-xen/drivers/xen/netback/xenbus.c
+--- pristine-linux-2.6.12/drivers/xen/netback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/netback/xenbus.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,329 @@
++/*  Xenbus code for netif backend
++    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
++    Copyright (C) 2005 XenSource Ltd
++
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation; either version 2 of the License, or
++    (at your option) any later version.
++
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
++
++    You should have received a copy of the GNU General Public License
++    along with this program; if not, write to the Free Software
++    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++*/
++
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <asm-xen/xenbus.h>
++#include <asm-xen/net_driver_util.h>
++#include "common.h"
++
++
++#if 0
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++    printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++#endif
++
++
++struct backend_info
++{
++	struct xenbus_device *dev;
++	netif_t *netif;
++	struct xenbus_watch backend_watch;
++	XenbusState frontend_state;
++};
++
++
++static int connect_rings(struct backend_info *);
++static void connect(struct backend_info *);
++static void maybe_connect(struct backend_info *);
++static void backend_changed(struct xenbus_watch *, const char **,
++			    unsigned int);
++
++
++static int netback_remove(struct xenbus_device *dev)
++{
++	struct backend_info *be = dev->data;
++
++	if (be->backend_watch.node) {
++		unregister_xenbus_watch(&be->backend_watch);
++		kfree(be->backend_watch.node);
++		be->backend_watch.node = NULL;
++	}
++	if (be->netif) {
++		netif_disconnect(be->netif);
++		be->netif = NULL;
++	}
++	kfree(be);
++	dev->data = NULL;
++	return 0;
++}
++
++
++/**
++ * Entry point to this code when a new device is created.  Allocate the basic
++ * structures, and watch the store waiting for the hotplug scripts to tell us
++ * the device's handle.  Switch to InitWait.
++ */
++static int netback_probe(struct xenbus_device *dev,
++			 const struct xenbus_device_id *id)
++{
++	int err;
++	struct backend_info *be = kmalloc(sizeof(struct backend_info),
++					  GFP_KERNEL);
++	if (!be) {
++		xenbus_dev_fatal(dev, -ENOMEM,
++				 "allocating backend structure");
++		return -ENOMEM;
++	}
++	memset(be, 0, sizeof(*be));
++
++	be->dev = dev;
++	dev->data = be;
++
++	err = xenbus_watch_path2(dev, dev->nodename, "handle",
++				 &be->backend_watch, backend_changed);
++	if (err)
++		goto fail;
++
++	err = xenbus_switch_state(dev, XBT_NULL, XenbusStateInitWait);
++	if (err) {
++		goto fail;
++	}
++
++	return 0;
++
++fail:
++	DPRINTK("failed");
++	netback_remove(dev);
++	return err;
++}
++
++
++/**
++ * Handle the creation of the hotplug script environment.  We add the script
++ * and vif variables to the environment, for the benefit of the vif-* hotplug
++ * scripts.
++ */
++static int netback_hotplug(struct xenbus_device *xdev, char **envp,
++			   int num_envp, char *buffer, int buffer_size)
++{
++	struct backend_info *be = xdev->data;
++	netif_t *netif = be->netif;
++	int i = 0, length = 0;
++	char *val;
++
++	DPRINTK("netback_hotplug");
++
++	val = xenbus_read(XBT_NULL, xdev->nodename, "script", NULL);
++	if (IS_ERR(val)) {
++		int err = PTR_ERR(val);
++		xenbus_dev_fatal(xdev, err, "reading script");
++		return err;
++	}
++	else {
++		add_hotplug_env_var(envp, num_envp, &i,
++				    buffer, buffer_size, &length,
++				    "script=%s", val);
++		kfree(val);
++	}
++
++	add_hotplug_env_var(envp, num_envp, &i,
++			    buffer, buffer_size, &length,
++			    "vif=%s", netif->dev->name);
++
++	envp[i] = NULL;
++
++	return 0;
++}
++
++
++/**
++ * Callback received when the hotplug scripts have placed the handle node.
++ * Read it, and create a netif structure.  If the frontend is ready, connect.
++ */
++static void backend_changed(struct xenbus_watch *watch,
++			    const char **vec, unsigned int len)
++{
++	int err;
++	long handle;
++	struct backend_info *be
++		= container_of(watch, struct backend_info, backend_watch);
++	struct xenbus_device *dev = be->dev;
++
++	DPRINTK("");
++
++	err = xenbus_scanf(XBT_NULL, dev->nodename, "handle", "%li", &handle);
++	if (XENBUS_EXIST_ERR(err)) {
++		/* Since this watch will fire once immediately after it is
++		   registered, we expect this.  Ignore it, and wait for the
++		   hotplug scripts. */
++		return;
++	}
++	if (err != 1) {
++		xenbus_dev_fatal(dev, err, "reading handle");
++		return;
++	}
++
++	if (be->netif == NULL) {
++		u8 be_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
++
++		be->netif = alloc_netif(dev->otherend_id, handle, be_mac);
++		if (IS_ERR(be->netif)) {
++			err = PTR_ERR(be->netif);
++			be->netif = NULL;
++			xenbus_dev_fatal(dev, err, "creating interface");
++			return;
++		}
++
++		kobject_hotplug(&dev->dev.kobj, KOBJ_ONLINE);
++
++		maybe_connect(be);
++	}
++}
++
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++			     XenbusState frontend_state)
++{
++	struct backend_info *be = dev->data;
++
++	DPRINTK("");
++
++	be->frontend_state = frontend_state;
++
++	switch (frontend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitialised:
++		break;
++
++	case XenbusStateConnected:
++		maybe_connect(be);
++		break;
++
++	case XenbusStateClosing:
++		xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
++		break;
++
++	case XenbusStateClosed:
++		kobject_hotplug(&dev->dev.kobj, KOBJ_OFFLINE);
++		device_unregister(&dev->dev);
++		break;
++
++	case XenbusStateUnknown:
++	case XenbusStateInitWait:
++	default:
++		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++				 frontend_state);
++		break;
++	}
++}
++
++
++/* ** Connection ** */
++
++
++static void maybe_connect(struct backend_info *be)
++{
++	if (be->netif != NULL && be->frontend_state == XenbusStateConnected) {
++		connect(be);
++	}
++}
++
++
++static void connect(struct backend_info *be)
++{
++	int err;
++	struct xenbus_device *dev = be->dev;
++
++	err = connect_rings(be);
++	if (err)
++		return;
++
++	err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
++		return;
++	}
++
++	xenbus_switch_state(dev, XBT_NULL, XenbusStateConnected);
++}
++
++
++static int connect_rings(struct backend_info *be)
++{
++	struct xenbus_device *dev = be->dev;
++	unsigned long tx_ring_ref, rx_ring_ref;
++	unsigned int evtchn;
++	int err;
++
++	DPRINTK("");
++
++	err = xenbus_gather(XBT_NULL, dev->otherend,
++			    "tx-ring-ref", "%lu", &tx_ring_ref,
++			    "rx-ring-ref", "%lu", &rx_ring_ref,
++			    "event-channel", "%u", &evtchn, NULL);
++	if (err) {
++		xenbus_dev_fatal(dev, err,
++				 "reading %s/ring-ref and event-channel",
++				 dev->otherend);
++		return err;
++	}
++
++	/* Map the shared frame, irq etc. */
++	err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
++	if (err) {
++		xenbus_dev_fatal(dev, err,
++				 "mapping shared-frames %lu/%lu port %u",
++				 tx_ring_ref, rx_ring_ref, evtchn);
++		return err;
++	}
++	return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static struct xenbus_device_id netback_ids[] = {
++	{ "vif" },
++	{ "" }
++};
++
++
++static struct xenbus_driver netback = {
++	.name = "vif",
++	.owner = THIS_MODULE,
++	.ids = netback_ids,
++	.probe = netback_probe,
++	.remove = netback_remove,
++	.hotplug = netback_hotplug,
++	.otherend_changed = frontend_changed,
++};
++
++
++void netif_xenbus_init(void)
++{
++	xenbus_register_backend(&netback);
++}
++
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/net_driver_util.c linux-2.6.12-xen/drivers/xen/net_driver_util.c
+--- pristine-linux-2.6.12/drivers/xen/net_driver_util.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/net_driver_util.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,67 @@
++/*****************************************************************************
++ *
++ * Utility functions for Xen network devices.
++ *
++ * Copyright (c) 2005 XenSource Ltd.
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following
++ * license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject
++ * to the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++
++#include <linux/if_ether.h>
++#include <linux/err.h>
++#include <asm-xen/net_driver_util.h>
++
++
++int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++	char *s;
++	int i;
++	char *e;
++	char *macstr = xenbus_read(XBT_NULL, dev->nodename, "mac", NULL);
++	if (IS_ERR(macstr)) {
++		return PTR_ERR(macstr);
++	}
++	s = macstr;
++	for (i = 0; i < ETH_ALEN; i++) {
++		mac[i] = simple_strtoul(s, &e, 16);
++		if (s == e || (e[0] != ':' && e[0] != 0)) {
++			kfree(macstr);
++			return -ENOENT;
++		}
++		s = &e[1];
++	}
++	kfree(macstr);
++	return 0;
++}
++
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/netfront/Makefile linux-2.6.12-xen/drivers/xen/netfront/Makefile
+--- pristine-linux-2.6.12/drivers/xen/netfront/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/netfront/Makefile	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,4 @@
++
++obj-$(CONFIG_XEN_NETDEV_FRONTEND)	:= xennet.o
++
++xennet-objs := netfront.o
+diff -Nurp pristine-linux-2.6.12/drivers/xen/netfront/netfront.c linux-2.6.12-xen/drivers/xen/netfront/netfront.c
+--- pristine-linux-2.6.12/drivers/xen/netfront/netfront.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/netfront/netfront.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,1487 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
++ * 
++ * Copyright (c) 2002-2005, K A Fraser
++ * Copyright (c) 2005, XenSource Ltd
++ * 
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/init.h>
++#include <linux/bitops.h>
++#include <linux/proc_fs.h>
++#include <linux/ethtool.h>
++#include <net/sock.h>
++#include <net/pkt_sched.h>
++#include <net/arp.h>
++#include <net/route.h>
++#include <asm/io.h>
++#include <asm/uaccess.h>
++#include <asm-xen/evtchn.h>
++#include <asm-xen/xenbus.h>
++#include <asm-xen/xen-public/io/netif.h>
++#include <asm-xen/xen-public/memory.h>
++#include <asm-xen/balloon.h>
++#include <asm/page.h>
++#include <asm/uaccess.h>
++#include <asm-xen/xen-public/grant_table.h>
++#include <asm-xen/gnttab.h>
++#include <asm-xen/net_driver_util.h>
++
++#define GRANT_INVALID_REF	0
++
++#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
++
++#ifndef __GFP_NOWARN
++#define __GFP_NOWARN 0
++#endif
++#define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
++
++#define init_skb_shinfo(_skb)                         \
++    do {                                              \
++        atomic_set(&(skb_shinfo(_skb)->dataref), 1);  \
++        skb_shinfo(_skb)->nr_frags = 0;               \
++        skb_shinfo(_skb)->frag_list = NULL;           \
++    } while (0)
++
++static unsigned long rx_pfn_array[NET_RX_RING_SIZE];
++static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
++static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
++
++struct netfront_info
++{
++	struct list_head list;
++	struct net_device *netdev;
++
++	struct net_device_stats stats;
++	unsigned int tx_full;
++    
++	netif_tx_front_ring_t tx;
++	netif_rx_front_ring_t rx;
++
++	spinlock_t   tx_lock;
++	spinlock_t   rx_lock;
++
++	unsigned int handle;
++	unsigned int evtchn, irq;
++
++	/* What is the status of our connection to the remote backend? */
++#define BEST_CLOSED       0
++#define BEST_DISCONNECTED 1
++#define BEST_CONNECTED    2
++	unsigned int backend_state;
++
++	/* Is this interface open or closed (down or up)? */
++#define UST_CLOSED        0
++#define UST_OPEN          1
++	unsigned int user_state;
++
++	/* Receive-ring batched refills. */
++#define RX_MIN_TARGET 8
++#define RX_DFL_MIN_TARGET 64
++#define RX_MAX_TARGET NET_RX_RING_SIZE
++	int rx_min_target, rx_max_target, rx_target;
++	struct sk_buff_head rx_batch;
++
++	struct timer_list rx_refill_timer;
++
++	/*
++	 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
++	 * array is an index into a chain of free entries.
++	 */
++	struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
++	struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
++
++	grant_ref_t gref_tx_head;
++	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 
++	grant_ref_t gref_rx_head;
++	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 
++
++	struct xenbus_device *xbdev;
++	int tx_ring_ref;
++	int rx_ring_ref;
++	u8 mac[ETH_ALEN];
++};
++
++/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
++#define ADD_ID_TO_FREELIST(_list, _id)			\
++	(_list)[(_id)] = (_list)[0];			\
++	(_list)[0]     = (void *)(unsigned long)(_id);
++#define GET_ID_FROM_FREELIST(_list)				\
++	({ unsigned long _id = (unsigned long)(_list)[0];	\
++	   (_list)[0]  = (_list)[_id];				\
++	   (unsigned short)_id; })
++
++#ifdef DEBUG
++static char *be_state_name[] = {
++	[BEST_CLOSED]       = "closed",
++	[BEST_DISCONNECTED] = "disconnected",
++	[BEST_CONNECTED]    = "connected",
++};
++#endif
++
++#define DPRINTK(fmt, args...) pr_debug("netfront (%s:%d) " fmt, \
++                                       __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...)				\
++	printk(KERN_INFO "netfront: " fmt, ##args)
++#define WPRINTK(fmt, args...)				\
++	printk(KERN_WARNING "netfront: " fmt, ##args)
++
++
++static int talk_to_backend(struct xenbus_device *, struct netfront_info *);
++static int setup_device(struct xenbus_device *, struct netfront_info *);
++static int create_netdev(int, struct xenbus_device *, struct net_device **);
++
++static void netfront_closing(struct xenbus_device *);
++
++static void end_access(int, void *);
++static void netif_disconnect_backend(struct netfront_info *);
++static void close_netdev(struct netfront_info *);
++static void netif_free(struct netfront_info *);
++
++static void show_device(struct netfront_info *);
++
++static void network_connect(struct net_device *);
++static void network_tx_buf_gc(struct net_device *);
++static void network_alloc_rx_buffers(struct net_device *);
++static int send_fake_arp(struct net_device *);
++
++static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++
++#ifdef CONFIG_PROC_FS
++static int xennet_proc_init(void);
++static int xennet_proc_addif(struct net_device *dev);
++static void xennet_proc_delif(struct net_device *dev);
++#else
++#define xennet_proc_init()   (0)
++#define xennet_proc_addif(d) (0)
++#define xennet_proc_delif(d) ((void)0)
++#endif
++
++
++/**
++ * Entry point to this code when a new device is created.  Allocate the basic
++ * structures and the ring buffers for communication with the backend, and
++ * inform the backend of the appropriate details for those.  Switch to
++ * Connected state.
++ */
++static int netfront_probe(struct xenbus_device *dev,
++			  const struct xenbus_device_id *id)
++{
++	int err;
++	struct net_device *netdev;
++	struct netfront_info *info;
++	unsigned int handle;
++
++	err = xenbus_scanf(XBT_NULL, dev->nodename, "handle", "%u", &handle);
++	if (err != 1) {
++		xenbus_dev_fatal(dev, err, "reading handle");
++		return err;
++	}
++
++	err = create_netdev(handle, dev, &netdev);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "creating netdev");
++		return err;
++	}
++
++	info = netdev_priv(netdev);
++	dev->data = info;
++
++	err = talk_to_backend(dev, info);
++	if (err) {
++		kfree(info);
++		dev->data = NULL;
++		return err;
++	}
++
++	return 0;
++}
++
++
++/**
++ * We are reconnecting to the backend, due to a suspend/resume, or a backend
++ * driver restart.  We tear down our netif structure and recreate it, but
++ * leave the device-layer structures intact so that this is transparent to the
++ * rest of the kernel.
++ */
++static int netfront_resume(struct xenbus_device *dev)
++{
++	struct netfront_info *info = dev->data;
++
++	DPRINTK("%s\n", dev->nodename);
++
++	netif_disconnect_backend(info);
++	return talk_to_backend(dev, info);
++}
++
++
++/* Common code used when first setting up, and when resuming. */
++static int talk_to_backend(struct xenbus_device *dev,
++			   struct netfront_info *info)
++{
++	const char *message;
++	xenbus_transaction_t xbt;
++	int err;
++
++	err = xen_net_read_mac(dev, info->mac);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
++		goto out;
++	}
++
++	/* Create shared ring, alloc event channel. */
++	err = setup_device(dev, info);
++	if (err)
++		goto out;
++
++again:
++	err = xenbus_transaction_start(&xbt);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "starting transaction");
++		goto destroy_ring;
++	}
++
++	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
++			    info->tx_ring_ref);
++	if (err) {
++		message = "writing tx ring-ref";
++		goto abort_transaction;
++	}
++	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
++			    info->rx_ring_ref);
++	if (err) {
++		message = "writing rx ring-ref";
++		goto abort_transaction;
++	}
++	err = xenbus_printf(xbt, dev->nodename,
++			    "event-channel", "%u", info->evtchn);
++	if (err) {
++		message = "writing event-channel";
++		goto abort_transaction;
++	}
++
++	err = xenbus_printf(xbt, dev->nodename,
++			    "state", "%d", XenbusStateConnected);
++	if (err) {
++		message = "writing frontend XenbusStateConnected";
++		goto abort_transaction;
++	}
++
++	err = xenbus_transaction_end(xbt, 0);
++	if (err) {
++		if (err == -EAGAIN)
++			goto again;
++		xenbus_dev_fatal(dev, err, "completing transaction");
++		goto destroy_ring;
++	}
++
++	return 0;
++
++ abort_transaction:
++	xenbus_transaction_end(xbt, 1);
++	xenbus_dev_fatal(dev, err, "%s", message);
++ destroy_ring:
++	netif_free(info);
++ out:
++	return err;
++}
++
++
++static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
++{
++	netif_tx_sring_t *txs;
++	netif_rx_sring_t *rxs;
++	int err;
++	struct net_device *netdev = info->netdev;
++
++	info->tx_ring_ref = GRANT_INVALID_REF;
++	info->rx_ring_ref = GRANT_INVALID_REF;
++	info->rx.sring = NULL;
++	info->tx.sring = NULL;
++	info->irq = 0;
++
++	txs = (netif_tx_sring_t *)__get_free_page(GFP_KERNEL);
++	if (!txs) {
++		err = -ENOMEM;
++		xenbus_dev_fatal(dev, err, "allocating tx ring page");
++		goto fail;
++	}
++	rxs = (netif_rx_sring_t *)__get_free_page(GFP_KERNEL);
++	if (!rxs) {
++		err = -ENOMEM;
++		xenbus_dev_fatal(dev, err, "allocating rx ring page");
++		goto fail;
++	}
++	memset(txs, 0, PAGE_SIZE);
++	memset(rxs, 0, PAGE_SIZE);
++	info->backend_state = BEST_DISCONNECTED;
++
++	SHARED_RING_INIT(txs);
++	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
++
++	SHARED_RING_INIT(rxs);
++	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
++
++	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
++	if (err < 0)
++		goto fail;
++	info->tx_ring_ref = err;
++
++	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
++	if (err < 0)
++		goto fail;
++	info->rx_ring_ref = err;
++
++	err = xenbus_alloc_evtchn(dev, &info->evtchn);
++	if (err)
++		goto fail;
++
++	memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
++	network_connect(netdev);
++	info->irq = bind_evtchn_to_irqhandler(
++		info->evtchn, netif_int, SA_SAMPLE_RANDOM, netdev->name,
++		netdev);
++	(void)send_fake_arp(netdev);
++	show_device(info);
++
++	return 0;
++
++ fail:
++	netif_free(info);
++	return err;
++}
++
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++			    XenbusState backend_state)
++{
++	DPRINTK("\n");
++
++	switch (backend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitWait:
++	case XenbusStateInitialised:
++	case XenbusStateConnected:
++	case XenbusStateUnknown:
++	case XenbusStateClosed:
++		break;
++
++	case XenbusStateClosing:
++		netfront_closing(dev);
++		break;
++	}
++}
++
++
++/** Send a packet on a net device to encourage switches to learn the
++ * MAC. We send a fake ARP request.
++ *
++ * @param dev device
++ * @return 0 on success, error code otherwise
++ */
++static int send_fake_arp(struct net_device *dev)
++{
++	struct sk_buff *skb;
++	u32             src_ip, dst_ip;
++
++	dst_ip = INADDR_BROADCAST;
++	src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
++
++	/* No IP? Then nothing to do. */
++	if (src_ip == 0)
++		return 0;
++
++	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
++			 dst_ip, dev, src_ip,
++			 /*dst_hw*/ NULL, /*src_hw*/ NULL, 
++			 /*target_hw*/ dev->dev_addr);
++	if (skb == NULL)
++		return -ENOMEM;
++
++	return dev_queue_xmit(skb);
++}
++
++
++static int network_open(struct net_device *dev)
++{
++	struct netfront_info *np = netdev_priv(dev);
++
++	memset(&np->stats, 0, sizeof(np->stats));
++
++	np->user_state = UST_OPEN;
++
++	network_alloc_rx_buffers(dev);
++	np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
++
++	netif_start_queue(dev);
++
++	return 0;
++}
++
++static void network_tx_buf_gc(struct net_device *dev)
++{
++	RING_IDX i, prod;
++	unsigned short id;
++	struct netfront_info *np = netdev_priv(dev);
++	struct sk_buff *skb;
++
++	if (np->backend_state != BEST_CONNECTED)
++		return;
++
++	do {
++		prod = np->tx.sring->rsp_prod;
++		rmb(); /* Ensure we see responses up to 'rp'. */
++
++		for (i = np->tx.rsp_cons; i != prod; i++) {
++			id  = RING_GET_RESPONSE(&np->tx, i)->id;
++			skb = np->tx_skbs[id];
++			if (unlikely(gnttab_query_foreign_access(
++				np->grant_tx_ref[id]) != 0)) {
++				printk(KERN_ALERT "network_tx_buf_gc: warning "
++				       "-- grant still in use by backend "
++				       "domain.\n");
++				goto out; 
++			}
++			gnttab_end_foreign_access_ref(
++				np->grant_tx_ref[id], GNTMAP_readonly);
++			gnttab_release_grant_reference(
++				&np->gref_tx_head, np->grant_tx_ref[id]);
++			np->grant_tx_ref[id] = GRANT_INVALID_REF;
++			ADD_ID_TO_FREELIST(np->tx_skbs, id);
++			dev_kfree_skb_irq(skb);
++		}
++        
++		np->tx.rsp_cons = prod;
++        
++		/*
++		 * Set a new event, then check for race with update of tx_cons.
++		 * Note that it is essential to schedule a callback, no matter
++		 * how few buffers are pending. Even if there is space in the
++		 * transmit ring, higher layers may be blocked because too much
++		 * data is outstanding: in such cases notification from Xen is
++		 * likely to be the only kick that we'll get.
++		 */
++		np->tx.sring->rsp_event =
++			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
++		mb();
++	} while (prod != np->tx.sring->rsp_prod);
++
++ out: 
++	if (np->tx_full &&
++	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
++		np->tx_full = 0;
++		if (np->user_state == UST_OPEN)
++			netif_wake_queue(dev);
++	}
++}
++
++
++static void rx_refill_timeout(unsigned long data)
++{
++	struct net_device *dev = (struct net_device *)data;
++	netif_rx_schedule(dev);
++}
++
++
++static void network_alloc_rx_buffers(struct net_device *dev)
++{
++	unsigned short id;
++	struct netfront_info *np = netdev_priv(dev);
++	struct sk_buff *skb;
++	int i, batch_target;
++	RING_IDX req_prod = np->rx.req_prod_pvt;
++	struct xen_memory_reservation reservation;
++	grant_ref_t ref;
++
++	if (unlikely(np->backend_state != BEST_CONNECTED))
++		return;
++
++	/*
++	 * Allocate skbuffs greedily, even though we batch updates to the
++	 * receive ring. This creates a less bursty demand on the memory
++	 * allocator, so should reduce the chance of failed allocation requests
++	 * both for ourself and for other kernel subsystems.
++	 */
++	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
++	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
++		/*
++		 * Subtract dev_alloc_skb headroom (16 bytes) and shared info
++		 * tailroom then round down to SKB_DATA_ALIGN boundary.
++		 */
++		skb = alloc_xen_skb(
++			((PAGE_SIZE - sizeof(struct skb_shared_info)) &
++			 (-SKB_DATA_ALIGN(1))) - 16);
++		if (skb == NULL) {
++			/* Any skbuffs queued for refill? Force them out. */
++			if (i != 0)
++				goto refill;
++			/* Could not allocate any skbuffs. Try again later. */
++			mod_timer(&np->rx_refill_timer,
++				  jiffies + (HZ/10));
++			return;
++		}
++		__skb_queue_tail(&np->rx_batch, skb);
++	}
++
++	/* Is the batch large enough to be worthwhile? */
++	if (i < (np->rx_target/2))
++		return;
++
++	/* Adjust our fill target if we risked running out of buffers. */
++	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
++	    ((np->rx_target *= 2) > np->rx_max_target))
++		np->rx_target = np->rx_max_target;
++
++ refill:
++	for (i = 0; ; i++) {
++		if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
++			break;
++
++		skb->dev = dev;
++
++		id = GET_ID_FROM_FREELIST(np->rx_skbs);
++
++		np->rx_skbs[id] = skb;
++        
++		RING_GET_REQUEST(&np->rx, req_prod + i)->id = id;
++		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
++		BUG_ON((signed short)ref < 0);
++		np->grant_rx_ref[id] = ref;
++		gnttab_grant_foreign_transfer_ref(ref,
++						  np->xbdev->otherend_id);
++		RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
++		rx_pfn_array[i] = virt_to_mfn(skb->head);
++
++		/* Remove this page from map before passing back to Xen. */
++		set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
++				    INVALID_P2M_ENTRY);
++
++		MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
++					__pte(0), 0);
++	}
++
++	/* After all PTEs have been zapped we blow away stale TLB entries. */
++	rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
++
++	/* Give away a batch of pages. */
++	rx_mcl[i].op = __HYPERVISOR_memory_op;
++	rx_mcl[i].args[0] = XENMEM_decrease_reservation;
++	rx_mcl[i].args[1] = (unsigned long)&reservation;
++
++	reservation.extent_start = rx_pfn_array;
++	reservation.nr_extents   = i;
++	reservation.extent_order = 0;
++	reservation.address_bits = 0;
++	reservation.domid        = DOMID_SELF;
++
++	/* Tell the ballon driver what is going on. */
++	balloon_update_driver_allowance(i);
++
++	/* Zap PTEs and give away pages in one big multicall. */
++	(void)HYPERVISOR_multicall(rx_mcl, i+1);
++
++	/* Check return status of HYPERVISOR_memory_op(). */
++	if (unlikely(rx_mcl[i].result != i))
++		panic("Unable to reduce memory reservation\n");
++
++	/* Above is a suitable barrier to ensure backend will see requests. */
++	np->rx.req_prod_pvt = req_prod + i;
++	RING_PUSH_REQUESTS(&np->rx);
++}
++
++
++static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++	unsigned short id;
++	struct netfront_info *np = netdev_priv(dev);
++	netif_tx_request_t *tx;
++	RING_IDX i;
++	grant_ref_t ref;
++	unsigned long mfn;
++	int notify;
++
++	if (unlikely(np->tx_full)) {
++		printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
++		       dev->name);
++		netif_stop_queue(dev);
++		goto drop;
++	}
++
++	if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
++		     PAGE_SIZE)) {
++		struct sk_buff *nskb;
++		if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
++			goto drop;
++		skb_put(nskb, skb->len);
++		memcpy(nskb->data, skb->data, skb->len);
++		nskb->dev = skb->dev;
++		dev_kfree_skb(skb);
++		skb = nskb;
++	}
++    
++	spin_lock_irq(&np->tx_lock);
++
++	if (np->backend_state != BEST_CONNECTED) {
++		spin_unlock_irq(&np->tx_lock);
++		goto drop;
++	}
++
++	i = np->tx.req_prod_pvt;
++
++	id = GET_ID_FROM_FREELIST(np->tx_skbs);
++	np->tx_skbs[id] = skb;
++
++	tx = RING_GET_REQUEST(&np->tx, i);
++
++	tx->id   = id;
++	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++	BUG_ON((signed short)ref < 0);
++	mfn = virt_to_mfn(skb->data);
++	gnttab_grant_foreign_access_ref(
++		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
++	tx->gref = np->grant_tx_ref[id] = ref;
++	tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
++	tx->size = skb->len;
++	tx->flags = (skb->ip_summed == CHECKSUM_HW) ? NETTXF_csum_blank : 0;
++
++	np->tx.req_prod_pvt = i + 1;
++	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
++	if (notify)
++		notify_remote_via_irq(np->irq);
++
++	network_tx_buf_gc(dev);
++
++	if (RING_FULL(&np->tx)) {
++		np->tx_full = 1;
++		netif_stop_queue(dev);
++	}
++
++	spin_unlock_irq(&np->tx_lock);
++
++	np->stats.tx_bytes += skb->len;
++	np->stats.tx_packets++;
++
++	return 0;
++
++ drop:
++	np->stats.tx_dropped++;
++	dev_kfree_skb(skb);
++	return 0;
++}
++
++static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++{
++	struct net_device *dev = dev_id;
++	struct netfront_info *np = netdev_priv(dev);
++	unsigned long flags;
++
++	spin_lock_irqsave(&np->tx_lock, flags);
++	network_tx_buf_gc(dev);
++	spin_unlock_irqrestore(&np->tx_lock, flags);
++
++	if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx) &&
++	    (np->user_state == UST_OPEN))
++		netif_rx_schedule(dev);
++
++	return IRQ_HANDLED;
++}
++
++
++static int netif_poll(struct net_device *dev, int *pbudget)
++{
++	struct netfront_info *np = netdev_priv(dev);
++	struct sk_buff *skb, *nskb;
++	netif_rx_response_t *rx;
++	RING_IDX i, rp;
++	mmu_update_t *mmu = rx_mmu;
++	multicall_entry_t *mcl = rx_mcl;
++	int work_done, budget, more_to_do = 1;
++	struct sk_buff_head rxq;
++	unsigned long flags;
++	unsigned long mfn;
++	grant_ref_t ref;
++
++	spin_lock(&np->rx_lock);
++
++	if (np->backend_state != BEST_CONNECTED) {
++		spin_unlock(&np->rx_lock);
++		return 0;
++	}
++
++	skb_queue_head_init(&rxq);
++
++	if ((budget = *pbudget) > dev->quota)
++		budget = dev->quota;
++	rp = np->rx.sring->rsp_prod;
++	rmb(); /* Ensure we see queued responses up to 'rp'. */
++
++	for (i = np->rx.rsp_cons, work_done = 0; 
++	     (i != rp) && (work_done < budget);
++	     i++, work_done++) {
++		rx = RING_GET_RESPONSE(&np->rx, i);
++
++		/*
++                 * This definitely indicates a bug, either in this driver or
++                 * in the backend driver. In future this should flag the bad
++                 * situation to the system controller to reboot the backed.
++                 */
++		if ((ref = np->grant_rx_ref[rx->id]) == GRANT_INVALID_REF) {
++			WPRINTK("Bad rx response id %d.\n", rx->id);
++			work_done--;
++			continue;
++		}
++
++		/* Memory pressure, insufficient buffer headroom, ... */
++		if ((mfn = gnttab_end_foreign_transfer_ref(ref)) == 0) {
++			if (net_ratelimit())
++				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
++					rx->id, rx->status);
++			RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id =
++				rx->id;
++			RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref =
++				ref;
++			np->rx.req_prod_pvt++;
++			RING_PUSH_REQUESTS(&np->rx);
++			work_done--;
++			continue;
++		}
++
++		gnttab_release_grant_reference(&np->gref_rx_head, ref);
++		np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
++
++		skb = np->rx_skbs[rx->id];
++		ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
++
++		/* NB. We handle skb overflow later. */
++		skb->data = skb->head + rx->offset;
++		skb->len  = rx->status;
++		skb->tail = skb->data + skb->len;
++
++		if ( rx->flags & NETRXF_csum_valid )
++			skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++		np->stats.rx_packets++;
++		np->stats.rx_bytes += rx->status;
++
++		/* Remap the page. */
++		mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
++		mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
++		mmu++;
++		MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
++					pfn_pte_ma(mfn, PAGE_KERNEL), 0);
++		mcl++;
++
++		set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT, mfn);
++
++		__skb_queue_tail(&rxq, skb);
++	}
++
++	/* Some pages are no longer absent... */
++	balloon_update_driver_allowance(-work_done);
++
++	/* Do all the remapping work, and M2P updates, in one big hypercall. */
++	if (likely((mcl - rx_mcl) != 0)) {
++		mcl->op = __HYPERVISOR_mmu_update;
++		mcl->args[0] = (unsigned long)rx_mmu;
++		mcl->args[1] = mmu - rx_mmu;
++		mcl->args[2] = 0;
++		mcl->args[3] = DOMID_SELF;
++		mcl++;
++		(void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
++	}
++
++	while ((skb = __skb_dequeue(&rxq)) != NULL) {
++		if (skb->len > (dev->mtu + ETH_HLEN)) {
++			if (net_ratelimit())
++				printk(KERN_INFO "Received packet too big for "
++				       "MTU (%d > %d)\n",
++				       skb->len - ETH_HLEN, dev->mtu);
++			skb->len  = 0;
++			skb->tail = skb->data;
++			init_skb_shinfo(skb);
++			dev_kfree_skb(skb);
++			continue;
++		}
++
++		/*
++		 * Enough room in skbuff for the data we were passed? Also,
++		 * Linux expects at least 16 bytes headroom in each rx buffer.
++		 */
++		if (unlikely(skb->tail > skb->end) || 
++		    unlikely((skb->data - skb->head) < 16)) {
++			if (net_ratelimit()) {
++				if (skb->tail > skb->end)
++					printk(KERN_INFO "Received packet "
++					       "is %zd bytes beyond tail.\n",
++					       skb->tail - skb->end);
++				else
++					printk(KERN_INFO "Received packet "
++					       "is %zd bytes before head.\n",
++					       16 - (skb->data - skb->head));
++			}
++
++			nskb = alloc_xen_skb(skb->len + 2);
++			if (nskb != NULL) {
++				skb_reserve(nskb, 2);
++				skb_put(nskb, skb->len);
++				memcpy(nskb->data, skb->data, skb->len);
++				nskb->dev = skb->dev;
++				nskb->ip_summed = skb->ip_summed;
++			}
++
++			/* Reinitialise and then destroy the old skbuff. */
++			skb->len  = 0;
++			skb->tail = skb->data;
++			init_skb_shinfo(skb);
++			dev_kfree_skb(skb);
++
++			/* Switch old for new, if we copied the buffer. */
++			if ((skb = nskb) == NULL)
++				continue;
++		}
++        
++		/* Set the shinfo area, which is hidden behind the data. */
++		init_skb_shinfo(skb);
++		/* Ethernet work: Delayed to here as it peeks the header. */
++		skb->protocol = eth_type_trans(skb, dev);
++
++		/* Pass it up. */
++		netif_receive_skb(skb);
++		dev->last_rx = jiffies;
++	}
++
++	np->rx.rsp_cons = i;
++
++	/* If we get a callback with very few responses, reduce fill target. */
++	/* NB. Note exponential increase, linear decrease. */
++	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
++	     ((3*np->rx_target) / 4)) &&
++	    (--np->rx_target < np->rx_min_target))
++		np->rx_target = np->rx_min_target;
++
++	network_alloc_rx_buffers(dev);
++
++	*pbudget   -= work_done;
++	dev->quota -= work_done;
++
++	if (work_done < budget) {
++		local_irq_save(flags);
++
++		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
++		if (!more_to_do)
++			__netif_rx_complete(dev);
++
++		local_irq_restore(flags);
++	}
++
++	spin_unlock(&np->rx_lock);
++
++	return more_to_do;
++}
++
++
++static int network_close(struct net_device *dev)
++{
++	struct netfront_info *np = netdev_priv(dev);
++	np->user_state = UST_CLOSED;
++	netif_stop_queue(np->netdev);
++	return 0;
++}
++
++
++static struct net_device_stats *network_get_stats(struct net_device *dev)
++{
++	struct netfront_info *np = netdev_priv(dev);
++	return &np->stats;
++}
++
++static void network_connect(struct net_device *dev)
++{
++	struct netfront_info *np;
++	int i, requeue_idx;
++	netif_tx_request_t *tx;
++	struct sk_buff *skb;
++
++	np = netdev_priv(dev);
++	spin_lock_irq(&np->tx_lock);
++	spin_lock(&np->rx_lock);
++
++	/* Recovery procedure: */
++
++	/* Step 1: Reinitialise variables. */
++	np->tx_full = 0;
++
++	/*
++	 * Step 2: Rebuild the RX and TX ring contents.
++	 * NB. We could just free the queued TX packets now but we hope
++	 * that sending them out might do some good.  We have to rebuild
++	 * the RX ring because some of our pages are currently flipped out
++	 * so we can't just free the RX skbs.
++	 * NB2. Freelist index entries are always going to be less than
++	 *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
++	 * greater than __PAGE_OFFSET: we use this property to distinguish
++	 * them.
++	 */
++
++	/*
++	 * Rebuild the TX buffer freelist and the TX ring itself.
++	 * NB. This reorders packets.  We could keep more private state
++	 * to avoid this but maybe it doesn't matter so much given the
++	 * interface has been down.
++	 */
++	for (requeue_idx = 0, i = 1; i <= NET_TX_RING_SIZE; i++) {
++		if ((unsigned long)np->tx_skbs[i] < __PAGE_OFFSET)
++			continue;
++
++		skb = np->tx_skbs[i];
++
++		tx = RING_GET_REQUEST(&np->tx, requeue_idx);
++		requeue_idx++;
++
++		tx->id = i;
++		gnttab_grant_foreign_access_ref(
++			np->grant_tx_ref[i], np->xbdev->otherend_id, 
++			virt_to_mfn(np->tx_skbs[i]->data),
++			GNTMAP_readonly); 
++		tx->gref = np->grant_tx_ref[i];
++		tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
++		tx->size = skb->len;
++		tx->flags = (skb->ip_summed == CHECKSUM_HW) ?
++			NETTXF_csum_blank : 0;
++
++		np->stats.tx_bytes += skb->len;
++		np->stats.tx_packets++;
++	}
++
++	np->tx.req_prod_pvt = requeue_idx;
++	RING_PUSH_REQUESTS(&np->tx);
++
++	/* Rebuild the RX buffer freelist and the RX ring itself. */
++	for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) { 
++		if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
++			continue;
++		gnttab_grant_foreign_transfer_ref(
++			np->grant_rx_ref[i], np->xbdev->otherend_id);
++		RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
++			np->grant_rx_ref[i];
++		RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
++		requeue_idx++; 
++	}
++
++	np->rx.req_prod_pvt = requeue_idx;
++	RING_PUSH_REQUESTS(&np->rx);
++
++	/*
++	 * Step 3: All public and private state should now be sane.  Get
++	 * ready to start sending and receiving packets and give the driver
++	 * domain a kick because we've probably just requeued some
++	 * packets.
++	 */
++	np->backend_state = BEST_CONNECTED;
++	notify_remote_via_irq(np->irq);
++	network_tx_buf_gc(dev);
++
++	if (np->user_state == UST_OPEN)
++		netif_start_queue(dev);
++
++	spin_unlock(&np->rx_lock);
++	spin_unlock_irq(&np->tx_lock);
++}
++
++static void show_device(struct netfront_info *np)
++{
++#ifdef DEBUG
++	if (np) {
++		IPRINTK("<vif handle=%u %s(%s) evtchn=%u tx=%p rx=%p>\n",
++			np->handle,
++			be_state_name[np->backend_state],
++			np->user_state ? "open" : "closed",
++			np->evtchn,
++			np->tx,
++			np->rx);
++	} else {
++		IPRINTK("<vif NULL>\n");
++	}
++#endif
++}
++
++static void netif_uninit(struct net_device *dev)
++{
++	struct netfront_info *np = netdev_priv(dev);
++	gnttab_free_grant_references(np->gref_tx_head);
++	gnttab_free_grant_references(np->gref_rx_head);
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++	.get_tx_csum = ethtool_op_get_tx_csum,
++	.set_tx_csum = ethtool_op_set_tx_csum,
++};
++
++/** Create a network device.
++ * @param handle device handle
++ * @param val return parameter for created device
++ * @return 0 on success, error code otherwise
++ */
++static int create_netdev(int handle, struct xenbus_device *dev,
++			 struct net_device **val)
++{
++	int i, err = 0;
++	struct net_device *netdev = NULL;
++	struct netfront_info *np = NULL;
++
++	if ((netdev = alloc_etherdev(sizeof(struct netfront_info))) == NULL) {
++		printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
++		       __FUNCTION__);
++		err = -ENOMEM;
++		goto exit;
++	}
++
++	np                = netdev_priv(netdev);
++	np->backend_state = BEST_CLOSED;
++	np->user_state    = UST_CLOSED;
++	np->handle        = handle;
++	np->xbdev         = dev;
++
++	spin_lock_init(&np->tx_lock);
++	spin_lock_init(&np->rx_lock);
++
++	skb_queue_head_init(&np->rx_batch);
++	np->rx_target     = RX_DFL_MIN_TARGET;
++	np->rx_min_target = RX_DFL_MIN_TARGET;
++	np->rx_max_target = RX_MAX_TARGET;
++
++	init_timer(&np->rx_refill_timer);
++	np->rx_refill_timer.data = (unsigned long)netdev;
++	np->rx_refill_timer.function = rx_refill_timeout;
++
++	/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
++	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
++		np->tx_skbs[i] = (void *)((unsigned long) i+1);
++		np->grant_tx_ref[i] = GRANT_INVALID_REF;
++	}
++
++	for (i = 0; i <= NET_RX_RING_SIZE; i++) {
++		np->rx_skbs[i] = (void *)((unsigned long) i+1);
++		np->grant_rx_ref[i] = GRANT_INVALID_REF;
++	}
++
++	/* A grant for every tx ring slot */
++	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
++					  &np->gref_tx_head) < 0) {
++		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
++		err = -ENOMEM;
++		goto exit;
++	}
++	/* A grant for every rx ring slot */
++	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
++					  &np->gref_rx_head) < 0) {
++		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
++		gnttab_free_grant_references(np->gref_tx_head);
++		err = -ENOMEM;
++		goto exit;
++	}
++
++	netdev->open            = network_open;
++	netdev->hard_start_xmit = network_start_xmit;
++	netdev->stop            = network_close;
++	netdev->get_stats       = network_get_stats;
++	netdev->poll            = netif_poll;
++	netdev->uninit          = netif_uninit;
++	netdev->weight          = 64;
++	netdev->features        = NETIF_F_IP_CSUM;
++
++	SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
++	SET_MODULE_OWNER(netdev);
++	SET_NETDEV_DEV(netdev, &dev->dev);
++    
++	if ((err = register_netdev(netdev)) != 0) {
++		printk(KERN_WARNING "%s> register_netdev err=%d\n",
++		       __FUNCTION__, err);
++		goto exit_free_grefs;
++	}
++
++	if ((err = xennet_proc_addif(netdev)) != 0) {
++		unregister_netdev(netdev);
++		goto exit_free_grefs;
++	}
++
++	np->netdev = netdev;
++
++ exit:
++	if (err != 0)
++		kfree(netdev);
++	else if (val != NULL)
++		*val = netdev;
++	return err;
++
++ exit_free_grefs:
++	gnttab_free_grant_references(np->gref_tx_head);
++	gnttab_free_grant_references(np->gref_rx_head);
++	goto exit;
++}
++
++/*
++ * We use this notifier to send out a fake ARP reply to reset switches and
++ * router ARP caches when an IP interface is brought up on a VIF.
++ */
++static int 
++inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
++{
++	struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
++	struct net_device *dev = ifa->ifa_dev->dev;
++
++	/* UP event and is it one of our devices? */
++	if (event == NETDEV_UP && dev->open == network_open)
++		(void)send_fake_arp(dev);
++        
++	return NOTIFY_DONE;
++}
++
++
++/* ** Close down ** */
++
++
++/**
++ * Handle the change of state of the backend to Closing.  We must delete our
++ * device-layer structures now, to ensure that writes are flushed through to
++ * the backend.  Once is this done, we can switch to Closed in
++ * acknowledgement.
++ */
++static void netfront_closing(struct xenbus_device *dev)
++{
++	struct netfront_info *info = dev->data;
++
++	DPRINTK("netfront_closing: %s removed\n", dev->nodename);
++
++	close_netdev(info);
++
++	xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
++}
++
++
++static int netfront_remove(struct xenbus_device *dev)
++{
++	struct netfront_info *info = dev->data;
++
++	DPRINTK("%s\n", dev->nodename);
++
++	netif_disconnect_backend(info);
++	free_netdev(info->netdev);
++
++	return 0;
++}
++
++
++static void close_netdev(struct netfront_info *info)
++{
++	spin_lock_irq(&info->netdev->xmit_lock);
++	netif_stop_queue(info->netdev);
++	spin_unlock_irq(&info->netdev->xmit_lock);
++
++#ifdef CONFIG_PROC_FS
++	xennet_proc_delif(info->netdev);
++#endif
++
++	del_timer_sync(&info->rx_refill_timer);
++
++	unregister_netdev(info->netdev);
++}
++
++
++static void netif_disconnect_backend(struct netfront_info *info)
++{
++	/* Stop old i/f to prevent errors whilst we rebuild the state. */
++	spin_lock_irq(&info->tx_lock);
++	spin_lock(&info->rx_lock);
++	info->backend_state = BEST_DISCONNECTED;
++	spin_unlock(&info->rx_lock);
++	spin_unlock_irq(&info->tx_lock);
++
++	if (info->irq)
++		unbind_from_irqhandler(info->irq, info->netdev);
++	info->evtchn = info->irq = 0;
++
++	end_access(info->tx_ring_ref, info->tx.sring);
++	end_access(info->rx_ring_ref, info->rx.sring);
++	info->tx_ring_ref = GRANT_INVALID_REF;
++	info->rx_ring_ref = GRANT_INVALID_REF;
++	info->tx.sring = NULL;
++	info->rx.sring = NULL;
++}
++
++
++static void netif_free(struct netfront_info *info)
++{
++	close_netdev(info);
++	netif_disconnect_backend(info);
++	free_netdev(info->netdev);
++}
++
++
++static void end_access(int ref, void *page)
++{
++	if (ref != GRANT_INVALID_REF)
++		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
++}
++
++
++/* ** Driver registration ** */
++
++
++static struct xenbus_device_id netfront_ids[] = {
++	{ "vif" },
++	{ "" }
++};
++
++
++static struct xenbus_driver netfront = {
++	.name = "vif",
++	.owner = THIS_MODULE,
++	.ids = netfront_ids,
++	.probe = netfront_probe,
++	.remove = netfront_remove,
++	.resume = netfront_resume,
++	.otherend_changed = backend_changed,
++};
++
++
++static struct notifier_block notifier_inetdev = {
++	.notifier_call  = inetdev_notify,
++	.next           = NULL,
++	.priority       = 0
++};
++
++static int __init netif_init(void)
++{
++	int err = 0;
++
++	if (xen_start_info->flags & SIF_INITDOMAIN)
++		return 0;
++
++	if ((err = xennet_proc_init()) != 0)
++		return err;
++
++	IPRINTK("Initialising virtual ethernet driver.\n");
++
++	(void)register_inetaddr_notifier(&notifier_inetdev);
++
++	return xenbus_register_frontend(&netfront);
++}
++module_init(netif_init);
++
++
++static void netif_exit(void)
++{
++	unregister_inetaddr_notifier(&notifier_inetdev);
++
++	return xenbus_unregister_driver(&netfront);
++}
++module_exit(netif_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++ 
++ 
++/* ** /proc **/
++
++
++#ifdef CONFIG_PROC_FS
++
++#define TARGET_MIN 0UL
++#define TARGET_MAX 1UL
++#define TARGET_CUR 2UL
++
++static int xennet_proc_read(
++	char *page, char **start, off_t off, int count, int *eof, void *data)
++{
++	struct net_device *dev =
++		(struct net_device *)((unsigned long)data & ~3UL);
++	struct netfront_info *np = netdev_priv(dev);
++	int len = 0, which_target = (long)data & 3;
++    
++	switch (which_target)
++	{
++	case TARGET_MIN:
++		len = sprintf(page, "%d\n", np->rx_min_target);
++		break;
++	case TARGET_MAX:
++		len = sprintf(page, "%d\n", np->rx_max_target);
++		break;
++	case TARGET_CUR:
++		len = sprintf(page, "%d\n", np->rx_target);
++		break;
++	}
++
++	*eof = 1;
++	return len;
++}
++
++static int xennet_proc_write(
++	struct file *file, const char __user *buffer,
++	unsigned long count, void *data)
++{
++	struct net_device *dev =
++		(struct net_device *)((unsigned long)data & ~3UL);
++	struct netfront_info *np = netdev_priv(dev);
++	int which_target = (long)data & 3;
++	char string[64];
++	long target;
++
++	if (!capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	if (count <= 1)
++		return -EBADMSG; /* runt */
++	if (count > sizeof(string))
++		return -EFBIG;   /* too long */
++
++	if (copy_from_user(string, buffer, count))
++		return -EFAULT;
++	string[sizeof(string)-1] = '\0';
++
++	target = simple_strtol(string, NULL, 10);
++	if (target < RX_MIN_TARGET)
++		target = RX_MIN_TARGET;
++	if (target > RX_MAX_TARGET)
++		target = RX_MAX_TARGET;
++
++	spin_lock(&np->rx_lock);
++
++	switch (which_target)
++	{
++	case TARGET_MIN:
++		if (target > np->rx_max_target)
++			np->rx_max_target = target;
++		np->rx_min_target = target;
++		if (target > np->rx_target)
++			np->rx_target = target;
++		break;
++	case TARGET_MAX:
++		if (target < np->rx_min_target)
++			np->rx_min_target = target;
++		np->rx_max_target = target;
++		if (target < np->rx_target)
++			np->rx_target = target;
++		break;
++	case TARGET_CUR:
++		break;
++	}
++
++	network_alloc_rx_buffers(dev);
++
++	spin_unlock(&np->rx_lock);
++
++	return count;
++}
++
++static int xennet_proc_init(void)
++{
++	if (proc_mkdir("xen/net", NULL) == NULL)
++		return -ENOMEM;
++	return 0;
++}
++
++static int xennet_proc_addif(struct net_device *dev)
++{
++	struct proc_dir_entry *dir, *min, *max, *cur;
++	char name[30];
++
++	sprintf(name, "xen/net/%s", dev->name);
++
++	dir = proc_mkdir(name, NULL);
++	if (!dir)
++		goto nomem;
++
++	min = create_proc_entry("rxbuf_min", 0644, dir);
++	max = create_proc_entry("rxbuf_max", 0644, dir);
++	cur = create_proc_entry("rxbuf_cur", 0444, dir);
++	if (!min || !max || !cur)
++		goto nomem;
++
++	min->read_proc  = xennet_proc_read;
++	min->write_proc = xennet_proc_write;
++	min->data       = (void *)((unsigned long)dev | TARGET_MIN);
++
++	max->read_proc  = xennet_proc_read;
++	max->write_proc = xennet_proc_write;
++	max->data       = (void *)((unsigned long)dev | TARGET_MAX);
++
++	cur->read_proc  = xennet_proc_read;
++	cur->write_proc = xennet_proc_write;
++	cur->data       = (void *)((unsigned long)dev | TARGET_CUR);
++
++	return 0;
++
++ nomem:
++	xennet_proc_delif(dev);
++	return -ENOMEM;
++}
++
++static void xennet_proc_delif(struct net_device *dev)
++{
++	char name[30];
++
++	sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
++	remove_proc_entry(name, NULL);
++
++	sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
++	remove_proc_entry(name, NULL);
++
++	sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
++	remove_proc_entry(name, NULL);
++
++	sprintf(name, "xen/net/%s", dev->name);
++	remove_proc_entry(name, NULL);
++}
++
++#endif
++
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/privcmd/Makefile linux-2.6.12-xen/drivers/xen/privcmd/Makefile
+--- pristine-linux-2.6.12/drivers/xen/privcmd/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/privcmd/Makefile	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,2 @@
++
++obj-y	:= privcmd.o
+diff -Nurp pristine-linux-2.6.12/drivers/xen/privcmd/privcmd.c linux-2.6.12-xen/drivers/xen/privcmd/privcmd.c
+--- pristine-linux-2.6.12/drivers/xen/privcmd/privcmd.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/privcmd/privcmd.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,279 @@
++/******************************************************************************
++ * privcmd.c
++ * 
++ * Interface to privileged domain-0 commands.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
++ */
++
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/swap.h>
++#include <linux/smp_lock.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/kthread.h>
++#include <asm/hypervisor.h>
++
++#include <asm/pgalloc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/linux-public/privcmd.h>
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/xen-public/dom0_ops.h>
++#include <asm-xen/xen_proc.h>
++
++static struct proc_dir_entry *privcmd_intf;
++static struct proc_dir_entry *capabilities_intf;
++
++static int privcmd_ioctl(struct inode *inode, struct file *file,
++                         unsigned int cmd, unsigned long data)
++{
++	int ret = -ENOSYS;
++	void __user *udata = (void __user *) data;
++
++	switch (cmd) {
++	case IOCTL_PRIVCMD_HYPERCALL: {
++		privcmd_hypercall_t hypercall;
++  
++		if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
++			return -EFAULT;
++
++#if defined(__i386__)
++		__asm__ __volatile__ (
++			"pushl %%ebx; pushl %%ecx; pushl %%edx; "
++			"pushl %%esi; pushl %%edi; "
++			"movl  4(%%eax),%%ebx ;"
++			"movl  8(%%eax),%%ecx ;"
++			"movl 12(%%eax),%%edx ;"
++			"movl 16(%%eax),%%esi ;"
++			"movl 20(%%eax),%%edi ;"
++			"movl   (%%eax),%%eax ;"
++			"shll $5,%%eax ;"
++			"addl $hypercall_page,%%eax ;"
++			"call *%%eax ;"
++			"popl %%edi; popl %%esi; popl %%edx; "
++			"popl %%ecx; popl %%ebx"
++			: "=a" (ret) : "0" (&hypercall) : "memory" );
++#elif defined (__x86_64__)
++		{
++			long ign1, ign2, ign3;
++			__asm__ __volatile__ (
++				"movq %8,%%r10; movq %9,%%r8;"
++				"shlq $5,%%rax ;"
++				"addq $hypercall_page,%%rax ;"
++				"call *%%rax"
++				: "=a" (ret), "=D" (ign1),
++				  "=S" (ign2), "=d" (ign3)
++				: "0" ((unsigned long)hypercall.op), 
++				"1" ((unsigned long)hypercall.arg[0]), 
++				"2" ((unsigned long)hypercall.arg[1]),
++				"3" ((unsigned long)hypercall.arg[2]), 
++				"g" ((unsigned long)hypercall.arg[3]),
++				"g" ((unsigned long)hypercall.arg[4])
++				: "r8", "r10", "memory" );
++		}
++#elif defined (__ia64__)
++		__asm__ __volatile__ (
++			";; mov r14=%2; mov r15=%3; "
++			"mov r16=%4; mov r17=%5; mov r18=%6;"
++			"mov r2=%1; break 0x1000;; mov %0=r8 ;;"
++			: "=r" (ret)
++			: "r" (hypercall.op),
++			"r" (hypercall.arg[0]),
++			"r" (hypercall.arg[1]),
++			"r" (hypercall.arg[2]),
++			"r" (hypercall.arg[3]),
++			"r" (hypercall.arg[4])
++			: "r14","r15","r16","r17","r18","r2","r8","memory");
++#endif
++	}
++	break;
++
++#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
++	case IOCTL_PRIVCMD_MMAP: {
++#define PRIVCMD_MMAP_SZ 32
++		privcmd_mmap_t mmapcmd;
++		privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ];
++		privcmd_mmap_entry_t __user *p;
++		int i, rc;
++
++		if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
++			return -EFAULT;
++
++		p = mmapcmd.entry;
++
++		for (i = 0; i < mmapcmd.num;
++		     i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) {
++			int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
++				PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
++
++			if (copy_from_user(&msg, p,
++					   n*sizeof(privcmd_mmap_entry_t)))
++				return -EFAULT;
++     
++			for (j = 0; j < n; j++) {
++				struct vm_area_struct *vma = 
++					find_vma( current->mm, msg[j].va );
++
++				if (!vma)
++					return -EINVAL;
++
++				if (msg[j].va > PAGE_OFFSET)
++					return -EINVAL;
++
++				if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
++				    > vma->vm_end )
++					return -EINVAL;
++
++				if ((rc = direct_remap_pfn_range(
++					vma,
++					msg[j].va&PAGE_MASK, 
++					msg[j].mfn, 
++					msg[j].npages<<PAGE_SHIFT, 
++					vma->vm_page_prot,
++					mmapcmd.dom)) < 0)
++					return rc;
++			}
++		}
++		ret = 0;
++	}
++	break;
++
++	case IOCTL_PRIVCMD_MMAPBATCH: {
++		mmu_update_t u;
++		privcmd_mmapbatch_t m;
++		struct vm_area_struct *vma = NULL;
++		unsigned long __user *p;
++		unsigned long addr, mfn; 
++		uint64_t ptep;
++		int i;
++
++		if (copy_from_user(&m, udata, sizeof(m))) {
++			ret = -EFAULT;
++			goto batch_err;
++		}
++
++		vma = find_vma( current->mm, m.addr );
++		if (!vma) {
++			ret = -EINVAL;
++			goto batch_err;
++		}
++
++		if (m.addr > PAGE_OFFSET) {
++			ret = -EFAULT;
++			goto batch_err;
++		}
++
++		if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
++			ret = -EFAULT;
++			goto batch_err;
++		}
++
++		p = m.arr;
++		addr = m.addr;
++		for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
++			if (get_user(mfn, p))
++				return -EFAULT;
++#ifdef __ia64__
++			ret = remap_pfn_range(vma,
++					      addr&PAGE_MASK,
++					      mfn,
++					      1<<PAGE_SHIFT,
++					      vma->vm_page_prot);
++			if (ret < 0)
++			    goto batch_err;
++#else
++
++			ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
++			if (ret)
++				goto batch_err;
++
++			u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
++			u.ptr = ptep;
++
++			if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
++				put_user(0xF0000000 | mfn, p);
++#endif
++		}
++
++		ret = 0;
++		break;
++
++	batch_err:
++		printk("batch_err ret=%d vma=%p addr=%lx "
++		       "num=%d arr=%p %lx-%lx\n", 
++		       ret, vma, m.addr, m.num, m.arr,
++		       vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
++		break;
++	}
++	break;
++#endif
++
++	default:
++		ret = -EINVAL;
++		break;
++	}
++
++	return ret;
++}
++
++static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
++{
++	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
++	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
++
++	return 0;
++}
++
++static struct file_operations privcmd_file_ops = {
++	.ioctl = privcmd_ioctl,
++	.mmap  = privcmd_mmap,
++};
++
++static int capabilities_read(char *page, char **start, off_t off,
++                        int count, int *eof, void *data)
++{
++	int len = 0;
++	*page = 0;
++
++	if (xen_start_info->flags & SIF_INITDOMAIN)
++		len = sprintf( page, "control_d\n" );
++
++	*eof = 1;
++	return len;
++}
++
++static int __init privcmd_init(void)
++{
++	privcmd_intf = create_xen_proc_entry("privcmd", 0400);
++	if (privcmd_intf != NULL)
++		privcmd_intf->proc_fops = &privcmd_file_ops;
++
++	capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
++	if (capabilities_intf != NULL)
++		capabilities_intf->read_proc = capabilities_read;
++
++	return 0;
++}
++
++__initcall(privcmd_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmback/common.h linux-2.6.12-xen/drivers/xen/tpmback/common.h
+--- pristine-linux-2.6.12/drivers/xen/tpmback/common.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/tpmback/common.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,89 @@
++/******************************************************************************
++ * drivers/xen/tpmback/common.h
++ */
++
++#ifndef __NETIF__BACKEND__COMMON_H__
++#define __NETIF__BACKEND__COMMON_H__
++
++#include <linux/config.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <asm-xen/evtchn.h>
++#include <asm-xen/driver_util.h>
++#include <asm-xen/xen-public/grant_table.h>
++#include <asm-xen/xen-public/io/tpmif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
++
++#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
++                                    __FILE__ , __LINE__ , ## _a )
++
++typedef struct tpmif_st {
++	struct list_head tpmif_list;
++	/* Unique identifier for this interface. */
++	domid_t domid;
++	unsigned int handle;
++
++	/* Physical parameters of the comms window. */
++	unsigned int evtchn;
++	unsigned int irq;
++
++	/* The shared rings and indexes. */
++	tpmif_tx_interface_t *tx;
++	struct vm_struct *tx_area;
++
++	/* Miscellaneous private stuff. */
++	enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
++	int active;
++
++	struct tpmif_st *hash_next;
++	struct list_head list;	/* scheduling list */
++	atomic_t refcnt;
++
++	long int tpm_instance;
++	unsigned long mmap_vstart;
++
++	struct work_struct work;
++
++	grant_handle_t shmem_handle;
++	grant_ref_t shmem_ref;
++} tpmif_t;
++
++void tpmif_disconnect_complete(tpmif_t * tpmif);
++tpmif_t *tpmif_find(domid_t domid, long int instance);
++void tpmif_interface_init(void);
++void tpmif_schedule_work(tpmif_t * tpmif);
++void tpmif_deschedule_work(tpmif_t * tpmif);
++void tpmif_xenbus_init(void);
++int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
++irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domain, u32 instance);
++int tpmif_vtpm_close(u32 instance);
++
++int vtpm_release_packets(tpmif_t * tpmif, int send_msgs);
++
++#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define tpmif_put(_b)                             \
++    do {                                          \
++        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
++            tpmif_disconnect_complete(_b);        \
++    } while (0)
++
++
++extern int num_frontends;
++
++#define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE))
++
++#endif /* __TPMIF__BACKEND__COMMON_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmback/interface.c linux-2.6.12-xen/drivers/xen/tpmback/interface.c
+--- pristine-linux-2.6.12/drivers/xen/tpmback/interface.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/tpmback/interface.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,200 @@
++ /*****************************************************************************
++ * drivers/xen/tpmback/interface.c
++ *
++ * Vritual TPM interface management.
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb at us.ibm.com
++ *
++ * This code has been derived from drivers/xen/netback/interface.c
++ * Copyright (c) 2004, Keir Fraser
++ */
++
++#include "common.h"
++#include <asm-xen/balloon.h>
++
++#define TPMIF_HASHSZ (2 << 5)
++#define TPMIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(TPMIF_HASHSZ-1))
++
++static kmem_cache_t *tpmif_cachep;
++int num_frontends = 0;
++
++LIST_HEAD(tpmif_list);
++
++tpmif_t *
++alloc_tpmif(domid_t domid, long int instance)
++{
++	struct page *page;
++	tpmif_t *tpmif;
++
++	tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL);
++	if (!tpmif)
++		return ERR_PTR(-ENOMEM);
++
++	memset(tpmif, 0, sizeof (*tpmif));
++	tpmif->domid = domid;
++	tpmif->status = DISCONNECTED;
++	tpmif->tpm_instance = instance;
++	atomic_set(&tpmif->refcnt, 1);
++
++	page = balloon_alloc_empty_page_range(TPMIF_TX_RING_SIZE);
++	BUG_ON(page == NULL);
++	tpmif->mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
++
++	list_add(&tpmif->tpmif_list, &tpmif_list);
++	num_frontends++;
++
++	return tpmif;
++}
++
++void
++free_tpmif(tpmif_t * tpmif)
++{
++	num_frontends--;
++	list_del(&tpmif->tpmif_list);
++	kmem_cache_free(tpmif_cachep, tpmif);
++}
++
++tpmif_t *
++tpmif_find(domid_t domid, long int instance)
++{
++	tpmif_t *tpmif;
++
++	list_for_each_entry(tpmif, &tpmif_list, tpmif_list) {
++		if (tpmif->tpm_instance == instance) {
++			if (tpmif->domid == domid) {
++				tpmif_get(tpmif);
++				return tpmif;
++			} else {
++				return ERR_PTR(-EEXIST);
++			}
++		}
++	}
++
++	return alloc_tpmif(domid, instance);
++}
++
++static int
++map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
++{
++	int ret;
++	struct gnttab_map_grant_ref op = {
++		.host_addr = (unsigned long)tpmif->tx_area->addr,
++		.flags = GNTMAP_host_map,
++		.ref = shared_page,
++		.dom = tpmif->domid,
++	};
++
++	lock_vm_area(tpmif->tx_area);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
++	unlock_vm_area(tpmif->tx_area);
++	BUG_ON(ret);
++
++	if (op.status) {
++		DPRINTK(" Grant table operation failure !\n");
++		return op.status;
++	}
++
++	tpmif->shmem_ref = shared_page;
++	tpmif->shmem_handle = op.handle;
++
++	return 0;
++}
++
++static void
++unmap_frontend_page(tpmif_t *tpmif)
++{
++	struct gnttab_unmap_grant_ref op;
++	int ret;
++
++	op.host_addr    = (unsigned long)tpmif->tx_area->addr;
++	op.handle       = tpmif->shmem_handle;
++	op.dev_bus_addr = 0;
++
++	lock_vm_area(tpmif->tx_area);
++	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
++	unlock_vm_area(tpmif->tx_area);
++	BUG_ON(ret);
++}
++
++int
++tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
++{
++	int err;
++	evtchn_op_t op = {
++		.cmd = EVTCHNOP_bind_interdomain,
++		.u.bind_interdomain.remote_dom = tpmif->domid,
++		.u.bind_interdomain.remote_port = evtchn };
++
++        if (tpmif->irq) {
++                return 0;
++        }
++
++	if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
++		return -ENOMEM;
++
++	err = map_frontend_page(tpmif, shared_page);
++	if (err) {
++		free_vm_area(tpmif->tx_area);
++		return err;
++	}
++
++	err = HYPERVISOR_event_channel_op(&op);
++	if (err) {
++		unmap_frontend_page(tpmif);
++		free_vm_area(tpmif->tx_area);
++		return err;
++	}
++
++	tpmif->evtchn = op.u.bind_interdomain.local_port;
++
++	tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
++
++	tpmif->irq = bind_evtchn_to_irqhandler(
++		tpmif->evtchn, tpmif_be_int, 0, "tpmif-backend", tpmif);
++	tpmif->shmem_ref = shared_page;
++	tpmif->active = 1;
++
++	return 0;
++}
++
++static void
++__tpmif_disconnect_complete(void *arg)
++{
++	tpmif_t *tpmif = (tpmif_t *) arg;
++
++	if (tpmif->irq)
++		unbind_from_irqhandler(tpmif->irq, tpmif);
++
++	if (tpmif->tx) {
++		unmap_frontend_page(tpmif);
++		free_vm_area(tpmif->tx_area);
++	}
++
++	free_tpmif(tpmif);
++}
++
++void
++tpmif_disconnect_complete(tpmif_t * tpmif)
++{
++	INIT_WORK(&tpmif->work, __tpmif_disconnect_complete, (void *)tpmif);
++	schedule_work(&tpmif->work);
++}
++
++void __init
++tpmif_interface_init(void)
++{
++	tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
++					 0, 0, NULL, NULL);
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmback/Makefile linux-2.6.12-xen/drivers/xen/tpmback/Makefile
+--- pristine-linux-2.6.12/drivers/xen/tpmback/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/tpmback/Makefile	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,4 @@
++
++obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmbk.o
++
++tpmbk-y += tpmback.o interface.o xenbus.o
+diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmback/tpmback.c linux-2.6.12-xen/drivers/xen/tpmback/tpmback.c
+--- pristine-linux-2.6.12/drivers/xen/tpmback/tpmback.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/tpmback/tpmback.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,1109 @@
++/******************************************************************************
++ * drivers/xen/tpmback/tpmback.c
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb at us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netback/netback.c
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ */
++
++#include "common.h"
++#include <asm-xen/evtchn.h>
++
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/miscdevice.h>
++#include <linux/poll.h>
++#include <asm/uaccess.h>
++#include <asm-xen/xenbus.h>
++#include <asm-xen/xen-public/grant_table.h>
++
++
++/* local data structures */
++struct data_exchange {
++	struct list_head pending_pak;
++	struct list_head current_pak;
++	unsigned int copied_so_far;
++	u8 has_opener;
++	rwlock_t pak_lock;  // protects all of the previous fields
++	wait_queue_head_t wait_queue;
++};
++
++struct packet {
++	struct list_head next;
++	unsigned int data_len;
++	u8 *data_buffer;
++	tpmif_t *tpmif;
++	u32 tpm_instance;
++	u8 req_tag;
++	u32 last_read;
++	u8 flags;
++	struct timer_list processing_timer;
++};
++
++enum {
++	PACKET_FLAG_DISCARD_RESPONSE = 1,
++	PACKET_FLAG_CHECK_RESPONSESTATUS = 2,
++};
++
++static struct data_exchange dataex;
++
++/* local function prototypes */
++static int vtpm_queue_packet(struct packet *pak);
++static int _packet_write(struct packet *pak,
++                         const char *data, size_t size,
++                         int userbuffer);
++static void processing_timeout(unsigned long ptr);
++static int  packet_read_shmem(struct packet *pak,
++                              tpmif_t *tpmif,
++                              u32 offset,
++                              char *buffer,
++                              int isuserbuffer,
++                              u32 left);
++
++
++#define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
++
++#define MIN(x,y)  (x) < (y) ? (x) : (y)
++
++
++/***************************************************************
++ Buffer copying
++***************************************************************/
++static inline int
++copy_from_buffer(void *to,
++                 const void *from,
++                 unsigned long size,
++                 int userbuffer)
++{
++	if (userbuffer) {
++		if (copy_from_user(to, from, size))
++			return -EFAULT;
++	} else {
++		memcpy(to, from, size);
++	}
++	return 0;
++}
++
++/***************************************************************
++ Packet-related functions
++***************************************************************/
++
++static struct packet *
++packet_find_instance(struct list_head *head, u32 tpm_instance)
++{
++	struct packet *pak;
++	struct list_head *p;
++	/*
++	 * traverse the list of packets and return the first
++	 * one with the given instance number
++	 */
++	list_for_each(p, head) {
++		pak = list_entry(p, struct packet, next);
++		if (pak->tpm_instance == tpm_instance) {
++			return pak;
++		}
++	}
++	return NULL;
++}
++
++static struct packet *
++packet_find_packet(struct list_head *head, void *packet)
++{
++	struct packet *pak;
++	struct list_head *p;
++	/*
++	 * traverse the list of packets and return the first
++	 * one with the given instance number
++	 */
++	list_for_each(p, head) {
++		pak = list_entry(p, struct packet, next);
++		if (pak == packet) {
++			return pak;
++		}
++	}
++	return NULL;
++}
++
++static struct packet *
++packet_alloc(tpmif_t *tpmif, u32 size, u8 req_tag, u8 flags)
++{
++	struct packet *pak = NULL;
++	pak = kmalloc(sizeof(struct packet),
++                      GFP_KERNEL);
++	if (NULL != pak) {
++		memset(pak, 0x0, sizeof(*pak));
++		if (tpmif) {
++			pak->tpmif = tpmif;
++			pak->tpm_instance = tpmif->tpm_instance;
++		}
++		pak->data_len  = size;
++		pak->req_tag   = req_tag;
++		pak->last_read = 0;
++		pak->flags     = flags;
++
++		/*
++		 * cannot do tpmif_get(tpmif); bad things happen
++		 * on the last tpmif_put()
++		 */
++		init_timer(&pak->processing_timer);
++		pak->processing_timer.function = processing_timeout;
++		pak->processing_timer.data = (unsigned long)pak;
++	}
++	return pak;
++}
++
++static void inline
++packet_reset(struct packet *pak)
++{
++	pak->last_read = 0;
++}
++
++static void inline
++packet_free(struct packet *pak)
++{
++	del_singleshot_timer_sync(&pak->processing_timer);
++	kfree(pak->data_buffer);
++	/*
++	 * cannot do tpmif_put(pak->tpmif); bad things happen
++	 * on the last tpmif_put()
++	 */
++	kfree(pak);
++}
++
++static int
++packet_set(struct packet *pak,
++           const unsigned char *buffer, u32 size)
++{
++	int rc = 0;
++	unsigned char *buf = kmalloc(size, GFP_KERNEL);
++	if (NULL != buf) {
++		pak->data_buffer = buf;
++		memcpy(buf, buffer, size);
++		pak->data_len = size;
++	} else {
++		rc = -ENOMEM;
++	}
++	return rc;
++}
++
++
++/*
++ * Write data to the shared memory and send it to the FE.
++ */
++static int
++packet_write(struct packet *pak,
++             const char *data, size_t size,
++             int userbuffer)
++{
++	int rc = 0;
++
++	DPRINTK("Supposed to send %d bytes to front-end!\n",
++	        size);
++
++	if (0 != (pak->flags & PACKET_FLAG_CHECK_RESPONSESTATUS)) {
++#ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
++		u32 res;
++		if (copy_from_buffer(&res,
++		                     &data[2+4],
++		                     sizeof(res),
++		                     userbuffer)) {
++			return -EFAULT;
++		}
++
++		if (res != 0) {
++			/*
++			 * Close down this device. Should have the
++			 * FE notified about closure.
++			 */
++			if (!pak->tpmif) {
++				return -EFAULT;
++			}
++			pak->tpmif->status = DISCONNECTING;
++		}
++#endif
++	}
++
++	if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
++		/* Don't send a respone to this packet. Just acknowledge it. */
++		rc = size;
++	} else {
++		rc = _packet_write(pak, data, size, userbuffer);
++	}
++
++	return rc;
++}
++
++
++static int
++_packet_write(struct packet *pak,
++              const char *data, size_t size,
++              int userbuffer)
++{
++	/*
++	 * Write into the shared memory pages directly
++	 * and send it to the front end.
++	 */
++	tpmif_t *tpmif = pak->tpmif;
++	grant_handle_t handle;
++	int rc = 0;
++	unsigned int i = 0;
++	unsigned int offset = 0;
++
++	if (tpmif == NULL) {
++		return -EFAULT;
++        }
++
++	if (tpmif->status == DISCONNECTED) {
++		return size;
++	}
++
++	while (offset < size && i < TPMIF_TX_RING_SIZE) {
++		unsigned int tocopy;
++		struct gnttab_map_grant_ref map_op;
++		struct gnttab_unmap_grant_ref unmap_op;
++		tpmif_tx_request_t *tx;
++
++		tx = &tpmif->tx->ring[i].req;
++
++		if (0 == tx->addr) {
++			DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
++			return 0;
++		}
++
++		map_op.host_addr  = MMAP_VADDR(tpmif, i);
++		map_op.flags      = GNTMAP_host_map;
++		map_op.ref        = tx->ref;
++		map_op.dom        = tpmif->domid;
++
++		if(unlikely(
++		    HYPERVISOR_grant_table_op(
++		        GNTTABOP_map_grant_ref,
++		        &map_op,
++		        1))) {
++			BUG();
++		}
++
++		handle = map_op.handle;
++
++		if (map_op.status) {
++			DPRINTK(" Grant table operation failure !\n");
++			return 0;
++		}
++		set_phys_to_machine(__pa(MMAP_VADDR(tpmif,i)) >> PAGE_SHIFT,
++			FOREIGN_FRAME(map_op.dev_bus_addr >> PAGE_SHIFT));
++
++		tocopy = MIN(size - offset, PAGE_SIZE);
++
++		if (copy_from_buffer((void *)(MMAP_VADDR(tpmif,i)|
++		                     (tx->addr & ~PAGE_MASK)),
++		                     &data[offset],
++		                     tocopy,
++		                     userbuffer)) {
++			tpmif_put(tpmif);
++			return -EFAULT;
++		}
++		tx->size = tocopy;
++
++		unmap_op.host_addr    = MMAP_VADDR(tpmif, i);
++		unmap_op.handle       = handle;
++		unmap_op.dev_bus_addr = 0;
++
++		if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
++		                                      &unmap_op,
++		                                      1))) {
++			BUG();
++		}
++
++		offset += tocopy;
++		i++;
++	}
++
++	rc = offset;
++	DPRINTK("Notifying frontend via irq %d\n",
++	        tpmif->irq);
++	notify_remote_via_irq(tpmif->irq);
++
++	return rc;
++}
++
++/*
++ * Read data from the shared memory and copy it directly into the
++ * provided buffer. Advance the read_last indicator which tells
++ * how many bytes have already been read.
++ */
++static int
++packet_read(struct packet *pak, size_t numbytes,
++            char *buffer, size_t buffersize,
++            int userbuffer)
++{
++	tpmif_t *tpmif = pak->tpmif;
++	/*
++	 * I am supposed to read 'numbytes' of data from the
++	 * buffer.
++	 * The first 4 bytes that are read are the instance number in
++	 * network byte order, after that comes the data from the
++	 * shared memory buffer.
++	 */
++	u32 to_copy;
++	u32 offset = 0;
++	u32 room_left = buffersize;
++	/*
++	 * Ensure that we see the request when we copy it.
++	 */
++	mb();
++
++	if (pak->last_read < 4) {
++		/*
++		 * copy the instance number into the buffer
++		 */
++		u32 instance_no = htonl(pak->tpm_instance);
++		u32 last_read = pak->last_read;
++		to_copy = MIN(4 - last_read, numbytes);
++
++		if (userbuffer) {
++			if (copy_to_user(&buffer[0],
++			                 &(((u8 *)&instance_no)[last_read]),
++			                 to_copy)) {
++				return -EFAULT;
++			}
++		} else {
++			memcpy(&buffer[0],
++			       &(((u8 *)&instance_no)[last_read]),
++			       to_copy);
++		}
++
++		pak->last_read += to_copy;
++		offset += to_copy;
++		room_left -= to_copy;
++	}
++
++	/*
++	 * If the packet has a data buffer appended, read from it...
++	 */
++
++	if (room_left > 0) {
++		if (pak->data_buffer) {
++			u32 to_copy = MIN(pak->data_len - offset, room_left);
++			u32 last_read = pak->last_read - 4;
++			if (userbuffer) {
++				if (copy_to_user(&buffer[offset],
++				                 &pak->data_buffer[last_read],
++				                 to_copy)) {
++					return -EFAULT;
++				}
++			} else {
++				memcpy(&buffer[offset],
++				       &pak->data_buffer[last_read],
++				       to_copy);
++			}
++			pak->last_read += to_copy;
++			offset += to_copy;
++		} else {
++			offset = packet_read_shmem(pak,
++			                           tpmif,
++			                           offset,
++			                           buffer,
++			                           userbuffer,
++			                           room_left);
++		}
++	}
++	return offset;
++}
++
++
++static int
++packet_read_shmem(struct packet *pak,
++                  tpmif_t *tpmif,
++                  u32 offset,
++                  char *buffer,
++                  int isuserbuffer,
++                  u32 room_left) {
++	u32 last_read = pak->last_read - 4;
++	u32 i = (last_read / PAGE_SIZE);
++	u32 pg_offset = last_read & (PAGE_SIZE - 1);
++	u32 to_copy;
++	grant_handle_t handle;
++
++	tpmif_tx_request_t *tx;
++	tx = &tpmif->tx->ring[0].req;
++	/*
++	 * Start copying data at the page with index 'index'
++	 * and within that page at offset 'offset'.
++	 * Copy a maximum of 'room_left' bytes.
++	 */
++	to_copy = MIN(PAGE_SIZE - pg_offset, room_left);
++	while (to_copy > 0) {
++		void *src;
++		struct gnttab_map_grant_ref map_op;
++		struct gnttab_unmap_grant_ref unmap_op;
++
++		tx = &tpmif->tx->ring[i].req;
++
++		map_op.host_addr = MMAP_VADDR(tpmif, i);
++		map_op.flags     = GNTMAP_host_map;
++		map_op.ref       = tx->ref;
++		map_op.dom       = tpmif->domid;
++
++		if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++		                                      &map_op,
++		                                      1))) {
++			BUG();
++		}
++
++		if (map_op.status) {
++			DPRINTK(" Grant table operation failure !\n");
++			return -EFAULT;
++		}
++
++		handle = map_op.handle;
++
++		if (to_copy > tx->size) {
++			/*
++			 * This is the case when the user wants to read more
++			 * than what we have. So we just give him what we
++			 * have.
++			 */
++			to_copy = MIN(tx->size, to_copy);
++		}
++
++		DPRINTK("Copying from mapped memory at %08lx\n",
++		        (unsigned long)(MMAP_VADDR(tpmif,i) |
++			(tx->addr & ~PAGE_MASK)));
++
++		src = (void *)(MMAP_VADDR(tpmif,i) | ((tx->addr & ~PAGE_MASK) + pg_offset));
++		if (isuserbuffer) {
++			if (copy_to_user(&buffer[offset],
++			                 src,
++			                 to_copy)) {
++				return -EFAULT;
++			}
++		} else {
++			memcpy(&buffer[offset],
++			       src,
++			       to_copy);
++		}
++
++
++		DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
++		        tpmif->domid, buffer[offset], buffer[offset+1],buffer[offset+2],buffer[offset+3]);
++
++		unmap_op.host_addr    = MMAP_VADDR(tpmif, i);
++		unmap_op.handle       = handle;
++		unmap_op.dev_bus_addr = 0;
++
++		if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
++		                                      &unmap_op,
++		                                      1))) {
++			BUG();
++		}
++
++		offset += to_copy;
++		pg_offset = 0;
++		last_read += to_copy;
++		room_left -= to_copy;
++
++		to_copy = MIN(PAGE_SIZE, room_left);
++		i++;
++	} /* while (to_copy > 0) */
++	/*
++	 * Adjust the last_read pointer
++	 */
++	pak->last_read = last_read + 4;
++	return offset;
++}
++
++
++/* ============================================================
++ * The file layer for reading data from this device
++ * ============================================================
++ */
++static int
++vtpm_op_open(struct inode *inode, struct file *f)
++{
++	int rc = 0;
++	unsigned long flags;
++
++	write_lock_irqsave(&dataex.pak_lock, flags);
++	if (dataex.has_opener == 0) {
++		dataex.has_opener = 1;
++	} else {
++		rc = -EPERM;
++	}
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
++	return rc;
++}
++
++static ssize_t
++vtpm_op_read(struct file *file,
++	     char __user * data, size_t size, loff_t * offset)
++{
++	int ret_size = -ENODATA;
++	struct packet *pak = NULL;
++	unsigned long flags;
++
++	write_lock_irqsave(&dataex.pak_lock, flags);
++
++	if (list_empty(&dataex.pending_pak)) {
++		write_unlock_irqrestore(&dataex.pak_lock, flags);
++		wait_event_interruptible(dataex.wait_queue,
++		                         !list_empty(&dataex.pending_pak));
++		write_lock_irqsave(&dataex.pak_lock, flags);
++	}
++
++	if (!list_empty(&dataex.pending_pak)) {
++		unsigned int left;
++		pak = list_entry(dataex.pending_pak.next, struct packet, next);
++
++		left = pak->data_len - dataex.copied_so_far;
++
++		DPRINTK("size given by app: %d, available: %d\n", size, left);
++
++		ret_size = MIN(size,left);
++
++		ret_size = packet_read(pak, ret_size, data, size, 1);
++		if (ret_size < 0) {
++			ret_size = -EFAULT;
++		} else {
++			DPRINTK("Copied %d bytes to user buffer\n", ret_size);
++
++			dataex.copied_so_far += ret_size;
++			if (dataex.copied_so_far >= pak->data_len + 4) {
++				DPRINTK("All data from this packet given to app.\n");
++				/* All data given to app */
++
++				del_singleshot_timer_sync(&pak->processing_timer);
++				list_del(&pak->next);
++				list_add_tail(&pak->next, &dataex.current_pak);
++				/*
++				 * The more fontends that are handled at the same time,
++				 * the more time we give the TPM to process the request.
++				 */
++				mod_timer(&pak->processing_timer,
++				          jiffies + (num_frontends * 60 * HZ));
++				dataex.copied_so_far = 0;
++			}
++		}
++	}
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++	DPRINTK("Returning result from read to app: %d\n", ret_size);
++
++	return ret_size;
++}
++
++/*
++ * Write operation - only works after a previous read operation!
++ */
++static ssize_t
++vtpm_op_write(struct file *file, const char __user * data, size_t size,
++	      loff_t * offset)
++{
++	struct packet *pak;
++	int rc = 0;
++	unsigned int off = 4;
++	unsigned long flags;
++	u32 instance_no = 0;
++	u32 len_no = 0;
++
++	/*
++	 * Minimum required packet size is:
++	 * 4 bytes for instance number
++	 * 2 bytes for tag
++	 * 4 bytes for paramSize
++	 * 4 bytes for the ordinal
++	 * sum: 14 bytes
++	 */
++	if ( size < off + 10 ) {
++		return -EFAULT;
++	}
++
++	if (copy_from_user(&instance_no,
++	                   (void __user *)&data[0],
++	                   4)) {
++		return -EFAULT;
++	}
++
++	if (copy_from_user(&len_no,
++	                   (void __user *)&data[off+2],
++	                   4) ||
++	    (off + ntohl(len_no) != size)) {
++		return -EFAULT;
++	}
++
++	write_lock_irqsave(&dataex.pak_lock, flags);
++	pak = packet_find_instance(&dataex.current_pak, ntohl(instance_no));
++
++	if (pak == NULL) {
++		write_unlock_irqrestore(&dataex.pak_lock, flags);
++		printk(KERN_ALERT "No associated packet!\n");
++		return -EFAULT;
++	} else {
++		del_singleshot_timer_sync(&pak->processing_timer);
++		list_del(&pak->next);
++	}
++
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++	/*
++	 * The first 'offset' bytes must be the instance number.
++	 * I will just pull that from the packet.
++	 */
++	size -= off;
++	data = &data[off];
++
++	rc = packet_write(pak, data, size, 1);
++
++	if (rc > 0) {
++		/* I neglected the first 4 bytes */
++		rc += off;
++	}
++	packet_free(pak);
++	return rc;
++}
++
++static int
++vtpm_op_release(struct inode *inode, struct file *file)
++{
++	unsigned long flags;
++	vtpm_release_packets(NULL, 1);
++	write_lock_irqsave(&dataex.pak_lock, flags);
++	dataex.has_opener = 0;
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
++	return 0;
++}
++
++static unsigned int
++vtpm_op_poll(struct file *file, struct poll_table_struct *pts)
++{
++	unsigned int flags = POLLOUT | POLLWRNORM;
++	poll_wait(file, &dataex.wait_queue, pts);
++	if (!list_empty(&dataex.pending_pak)) {
++		flags |= POLLIN | POLLRDNORM;
++	}
++	return flags;
++}
++
++static struct file_operations vtpm_ops = {
++	.owner = THIS_MODULE,
++	.llseek = no_llseek,
++	.open = vtpm_op_open,
++	.read = vtpm_op_read,
++	.write = vtpm_op_write,
++	.release = vtpm_op_release,
++	.poll = vtpm_op_poll,
++};
++
++static struct miscdevice ibmvtpms_miscdevice = {
++	.minor = 225,
++	.name = "vtpm",
++	.fops = &vtpm_ops,
++};
++
++
++/***************************************************************
++ Virtual TPM functions and data stuctures
++***************************************************************/
++
++static u8 create_cmd[] = {
++        1,193,		/* 0: TPM_TAG_RQU_COMMAMD */
++        0,0,0,19,	/* 2: length */
++        0,0,0,0x1,	/* 6: VTPM_ORD_OPEN */
++        0,		/* 10: VTPM type */
++        0,0,0,0,	/* 11: domain id */
++        0,0,0,0		/* 15: instance id */
++};
++
++static u8 destroy_cmd[] = {
++        1,193,		/* 0: TPM_TAG_RQU_COMMAMD */
++        0,0,0,14,	/* 2: length */
++        0,0,0,0x2,	/* 6: VTPM_ORD_CLOSE */
++        0,0,0,0		/* 10: instance id */
++};
++
++int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domid, u32 instance)
++{
++	int rc = 0;
++	struct packet *pak;
++
++	pak = packet_alloc(tpmif,
++	                   sizeof(create_cmd),
++	                   create_cmd[0],
++	                   PACKET_FLAG_DISCARD_RESPONSE|
++	                   PACKET_FLAG_CHECK_RESPONSESTATUS);
++	if (pak) {
++		u8 buf[sizeof(create_cmd)];
++		u32 domid_no = htonl((u32)domid);
++		u32 instance_no = htonl(instance);
++		memcpy(buf, create_cmd, sizeof(create_cmd));
++
++		memcpy(&buf[11], &domid_no, sizeof(u32));
++		memcpy(&buf[15], &instance_no, sizeof(u32));
++
++		/* copy the buffer into the packet */
++		rc = packet_set(pak, buf, sizeof(buf));
++
++		if (rc == 0) {
++			pak->tpm_instance = 0;
++			rc = vtpm_queue_packet(pak);
++		}
++		if (rc < 0) {
++			/* could not be queued or built */
++			packet_free(pak);
++		}
++	} else {
++		rc = -ENOMEM;
++	}
++	return rc;
++}
++
++int tpmif_vtpm_close(u32 instid)
++{
++	int rc = 0;
++	struct packet *pak;
++
++	pak = packet_alloc(NULL,
++	                   sizeof(create_cmd),
++	                   create_cmd[0],
++	                   PACKET_FLAG_DISCARD_RESPONSE);
++	if (pak) {
++		u8 buf[sizeof(destroy_cmd)];
++		u32 instid_no = htonl(instid);
++		memcpy(buf, destroy_cmd, sizeof(destroy_cmd));
++		memcpy(&buf[10], &instid_no, sizeof(u32));
++
++		/* copy the buffer into the packet */
++		rc = packet_set(pak, buf, sizeof(buf));
++
++		if (rc == 0) {
++			pak->tpm_instance = 0;
++			rc = vtpm_queue_packet(pak);
++		}
++		if (rc < 0) {
++			/* could not be queued or built */
++			packet_free(pak);
++		}
++	} else {
++		rc = -ENOMEM;
++	}
++	return rc;
++}
++
++
++/***************************************************************
++ Utility functions
++***************************************************************/
++
++static int
++tpm_send_fail_message(struct packet *pak, u8 req_tag)
++{
++	int rc;
++	static const unsigned char tpm_error_message_fail[] = {
++		0x00, 0x00,
++		0x00, 0x00, 0x00, 0x0a,
++		0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
++	};
++	unsigned char buffer[sizeof(tpm_error_message_fail)];
++
++	memcpy(buffer, tpm_error_message_fail, sizeof(tpm_error_message_fail));
++	/*
++	 * Insert the right response tag depending on the given tag
++	 * All response tags are '+3' to the request tag.
++	 */
++	buffer[1] = req_tag + 3;
++
++	/*
++	 * Write the data to shared memory and notify the front-end
++	 */
++	rc = packet_write(pak, buffer, sizeof(buffer), 0);
++
++	return rc;
++}
++
++
++static void
++_vtpm_release_packets(struct list_head *head, tpmif_t *tpmif,
++                      int send_msgs)
++{
++	struct packet *pak;
++	struct list_head *pos, *tmp;
++
++	list_for_each_safe(pos, tmp, head) {
++		pak = list_entry(pos, struct packet, next);
++		if (tpmif == NULL || pak->tpmif == tpmif) {
++			int can_send = 0;
++			del_singleshot_timer_sync(&pak->processing_timer);
++			list_del(&pak->next);
++
++			if (pak->tpmif && pak->tpmif->status == CONNECTED) {
++				can_send = 1;
++			}
++
++			if (send_msgs && can_send) {
++				tpm_send_fail_message(pak, pak->req_tag);
++			}
++			packet_free(pak);
++		}
++	}
++}
++
++
++int
++vtpm_release_packets(tpmif_t *tpmif, int send_msgs)
++{
++	unsigned long flags;
++
++	write_lock_irqsave(&dataex.pak_lock, flags);
++
++	_vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs);
++	_vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
++
++	write_unlock_irqrestore(&dataex.pak_lock,
++	                        flags);
++	return 0;
++}
++
++
++static int vtpm_queue_packet(struct packet *pak)
++{
++	int rc = 0;
++	if (dataex.has_opener) {
++		unsigned long flags;
++		write_lock_irqsave(&dataex.pak_lock, flags);
++		list_add_tail(&pak->next, &dataex.pending_pak);
++		/* give the TPM some time to pick up the request */
++		mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
++		write_unlock_irqrestore(&dataex.pak_lock,
++		                        flags);
++
++		wake_up_interruptible(&dataex.wait_queue);
++	} else {
++		rc = -EFAULT;
++	}
++	return rc;
++}
++
++
++static int vtpm_receive(tpmif_t *tpmif, u32 size)
++{
++	int rc = 0;
++	unsigned char buffer[10];
++	__be32 *native_size;
++
++	struct packet *pak = packet_alloc(tpmif, size, buffer[4], 0);
++	if (NULL == pak) {
++		return -ENOMEM;
++	}
++	/*
++	 * Read 10 bytes from the received buffer to test its
++	 * content for validity.
++	 */
++	if (sizeof(buffer) != packet_read(pak,
++	                                  sizeof(buffer), buffer,
++	                                  sizeof(buffer), 0)) {
++		goto failexit;
++	}
++	/*
++	 * Reset the packet read pointer so we can read all its
++	 * contents again.
++	 */
++	packet_reset(pak);
++
++	native_size = (__force __be32 *)(&buffer[4+2]);
++	/*
++	 * Verify that the size of the packet is correct
++	 * as indicated and that there's actually someone reading packets.
++	 * The minimum size of the packet is '10' for tag, size indicator
++	 * and ordinal.
++	 */
++	if (size < 10 ||
++	    be32_to_cpu(*native_size) != size ||
++	    0 == dataex.has_opener ||
++	    tpmif->status != CONNECTED) {
++	    	rc = -EINVAL;
++	    	goto failexit;
++	} else {
++		if ((rc = vtpm_queue_packet(pak)) < 0) {
++			goto failexit;
++		}
++	}
++	return 0;
++
++failexit:
++	if (pak) {
++		tpm_send_fail_message(pak, buffer[4+1]);
++		packet_free(pak);
++	}
++	return rc;
++}
++
++
++/*
++ * Timeout function that gets invoked when a packet has not been processed
++ * during the timeout period.
++ * The packet must be on a list when this function is invoked. This
++ * also means that once its taken off a list, the timer must be
++ * destroyed as well.
++ */
++static void processing_timeout(unsigned long ptr)
++{
++	struct packet *pak = (struct packet *)ptr;
++	unsigned long flags;
++	write_lock_irqsave(&dataex.pak_lock, flags);
++	/*
++	 * The packet needs to be searched whether it
++	 * is still on the list.
++	 */
++	if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
++	    pak == packet_find_packet(&dataex.current_pak, pak) ) {
++		list_del(&pak->next);
++		tpm_send_fail_message(pak, pak->req_tag);
++		packet_free(pak);
++	}
++
++	write_unlock_irqrestore(&dataex.pak_lock, flags);
++}
++
++
++
++static void tpm_tx_action(unsigned long unused);
++static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
++
++#define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
++
++static struct list_head tpm_schedule_list;
++static spinlock_t tpm_schedule_list_lock;
++
++static inline void
++maybe_schedule_tx_action(void)
++{
++	smp_mb();
++	tasklet_schedule(&tpm_tx_tasklet);
++}
++
++static inline int
++__on_tpm_schedule_list(tpmif_t * tpmif)
++{
++	return tpmif->list.next != NULL;
++}
++
++static void
++remove_from_tpm_schedule_list(tpmif_t * tpmif)
++{
++	spin_lock_irq(&tpm_schedule_list_lock);
++	if (likely(__on_tpm_schedule_list(tpmif))) {
++		list_del(&tpmif->list);
++		tpmif->list.next = NULL;
++		tpmif_put(tpmif);
++	}
++	spin_unlock_irq(&tpm_schedule_list_lock);
++}
++
++static void
++add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
++{
++	if (__on_tpm_schedule_list(tpmif))
++		return;
++
++	spin_lock_irq(&tpm_schedule_list_lock);
++	if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
++		list_add_tail(&tpmif->list, &tpm_schedule_list);
++		tpmif_get(tpmif);
++	}
++	spin_unlock_irq(&tpm_schedule_list_lock);
++}
++
++void
++tpmif_schedule_work(tpmif_t * tpmif)
++{
++	add_to_tpm_schedule_list_tail(tpmif);
++	maybe_schedule_tx_action();
++}
++
++void
++tpmif_deschedule_work(tpmif_t * tpmif)
++{
++	remove_from_tpm_schedule_list(tpmif);
++}
++
++
++static void
++tpm_tx_action(unsigned long unused)
++{
++	struct list_head *ent;
++	tpmif_t *tpmif;
++	tpmif_tx_request_t *tx;
++
++	DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
++
++	while (!list_empty(&tpm_schedule_list)) {
++		/* Get a tpmif from the list with work to do. */
++		ent = tpm_schedule_list.next;
++		tpmif = list_entry(ent, tpmif_t, list);
++		tpmif_get(tpmif);
++		remove_from_tpm_schedule_list(tpmif);
++		/*
++		 * Ensure that we see the request when we read from it.
++		 */
++		mb();
++
++		tx = &tpmif->tx->ring[0].req;
++
++		/* pass it up */
++		vtpm_receive(tpmif, tx->size);
++
++		tpmif_put(tpmif);
++	}
++}
++
++irqreturn_t
++tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++	tpmif_t *tpmif = dev_id;
++	add_to_tpm_schedule_list_tail(tpmif);
++	maybe_schedule_tx_action();
++	return IRQ_HANDLED;
++}
++
++static int __init
++tpmback_init(void)
++{
++	int rc;
++
++	if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) {
++		printk(KERN_ALERT "Could not register misc device for TPM BE.\n");
++		return rc;
++	}
++
++	INIT_LIST_HEAD(&dataex.pending_pak);
++	INIT_LIST_HEAD(&dataex.current_pak);
++	dataex.has_opener = 0;
++	rwlock_init(&dataex.pak_lock);
++	init_waitqueue_head(&dataex.wait_queue);
++
++	spin_lock_init(&tpm_schedule_list_lock);
++	INIT_LIST_HEAD(&tpm_schedule_list);
++
++	tpmif_interface_init();
++	tpmif_xenbus_init();
++
++	printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
++
++	return 0;
++}
++
++__initcall(tpmback_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmback/xenbus.c linux-2.6.12-xen/drivers/xen/tpmback/xenbus.c
+--- pristine-linux-2.6.12/drivers/xen/tpmback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/tpmback/xenbus.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,328 @@
++/*  Xenbus code for tpmif backend
++    Copyright (C) 2005 IBM Corporation
++    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
++
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation; either version 2 of the License, or
++    (at your option) any later version.
++
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
++
++    You should have received a copy of the GNU General Public License
++    along with this program; if not, write to the Free Software
++    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++*/
++#include <stdarg.h>
++#include <linux/module.h>
++#include <asm-xen/xenbus.h>
++#include "common.h"
++
++struct backend_info
++{
++	struct xenbus_device *dev;
++
++	/* our communications channel */
++	tpmif_t *tpmif;
++
++	long int frontend_id;
++	long int instance; // instance of TPM
++	u8 is_instance_set;// whether instance number has been set
++
++	/* watch front end for changes */
++	struct xenbus_watch backend_watch;
++	XenbusState frontend_state;
++};
++
++static void maybe_connect(struct backend_info *be);
++static void connect(struct backend_info *be);
++static int connect_ring(struct backend_info *be);
++static void backend_changed(struct xenbus_watch *watch,
++                            const char **vec, unsigned int len);
++static void frontend_changed(struct xenbus_device *dev,
++                             XenbusState frontend_state);
++
++static int tpmback_remove(struct xenbus_device *dev)
++{
++	struct backend_info *be = dev->data;
++
++	if (be->backend_watch.node) {
++		unregister_xenbus_watch(&be->backend_watch);
++		kfree(be->backend_watch.node);
++		be->backend_watch.node = NULL;
++	}
++	if (be->tpmif) {
++		tpmif_put(be->tpmif);
++		be->tpmif = NULL;
++	}
++	kfree(be);
++	dev->data = NULL;
++	return 0;
++}
++
++static int tpmback_probe(struct xenbus_device *dev,
++                         const struct xenbus_device_id *id)
++{
++	int err;
++	struct backend_info *be = kmalloc(sizeof(struct backend_info),
++	                                  GFP_KERNEL);
++
++	if (!be) {
++		xenbus_dev_fatal(dev, -ENOMEM,
++		                 "allocating backend structure");
++		return -ENOMEM;
++	}
++
++	memset(be, 0, sizeof(*be));
++
++	be->is_instance_set = 0;
++	be->dev = dev;
++	dev->data = be;
++
++	err = xenbus_watch_path2(dev, dev->nodename,
++	                        "instance", &be->backend_watch,
++	                        backend_changed);
++	if (err) {
++		goto fail;
++	}
++
++	err = xenbus_switch_state(dev, XBT_NULL, XenbusStateInitWait);
++	if (err) {
++		goto fail;
++	}
++	return 0;
++fail:
++	tpmback_remove(dev);
++	return err;
++}
++
++
++static void backend_changed(struct xenbus_watch *watch,
++                            const char **vec, unsigned int len)
++{
++	int err;
++	long instance;
++	struct backend_info *be
++		= container_of(watch, struct backend_info, backend_watch);
++	struct xenbus_device *dev = be->dev;
++
++	err = xenbus_scanf(XBT_NULL, dev->nodename,
++	                   "instance","%li", &instance);
++	if (XENBUS_EXIST_ERR(err)) {
++		return;
++	}
++
++	if (err != 1) {
++		xenbus_dev_fatal(dev, err, "reading instance");
++		return;
++	}
++
++	if (be->is_instance_set != 0 && be->instance != instance) {
++		printk(KERN_WARNING
++		       "tpmback: changing instance (from %ld to %ld) "
++		       "not allowed.\n",
++		       be->instance, instance);
++		return;
++	}
++
++	if (be->is_instance_set == 0) {
++		be->tpmif = tpmif_find(dev->otherend_id,
++		                       instance);
++		if (IS_ERR(be->tpmif)) {
++			err = PTR_ERR(be->tpmif);
++			be->tpmif = NULL;
++			xenbus_dev_fatal(dev,err,"creating block interface");
++			return;
++		}
++		be->instance = instance;
++		be->is_instance_set = 1;
++
++		/*
++		 * There's an unfortunate problem:
++		 * Sometimes after a suspend/resume the
++		 * state switch to XenbusStateInitialised happens
++		 * *before* I get to this point here. Since then
++		 * the connect_ring() must have failed (be->tpmif is
++		 * still NULL), I just call it here again indirectly.
++		 */
++		if (be->frontend_state == XenbusStateInitialised) {
++			frontend_changed(dev, be->frontend_state);
++		}
++	}
++}
++
++
++static void frontend_changed(struct xenbus_device *dev,
++                             XenbusState frontend_state)
++{
++	struct backend_info *be = dev->data;
++	int err;
++
++	be->frontend_state = frontend_state;
++
++	switch (frontend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateConnected:
++		break;
++
++	case XenbusStateInitialised:
++		err = connect_ring(be);
++		if (err) {
++			return;
++		}
++		maybe_connect(be);
++		break;
++
++	case XenbusStateClosing:
++		xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
++		break;
++
++	case XenbusStateClosed:
++		/*
++		 * Notify the vTPM manager about the front-end
++		 * having left.
++		 */
++		tpmif_vtpm_close(be->instance);
++		device_unregister(&be->dev->dev);
++		break;
++
++	case XenbusStateUnknown:
++	case XenbusStateInitWait:
++	default:
++		xenbus_dev_fatal(dev, -EINVAL,
++		                 "saw state %d at frontend",
++		                 frontend_state);
++		break;
++	}
++}
++
++
++
++static void maybe_connect(struct backend_info *be)
++{
++	int err;
++
++	if (be->tpmif == NULL || be->tpmif->status == CONNECTED)
++		return;
++
++	connect(be);
++
++	/*
++	 * Notify the vTPM manager about a new front-end.
++	 */
++	err = tpmif_vtpm_open(be->tpmif,
++	                      be->frontend_id,
++	                      be->instance);
++	if (err) {
++		xenbus_dev_error(be->dev, err,
++		                 "queueing vtpm open packet");
++		/*
++		 * Should close down this device and notify FE
++		 * about closure.
++		 */
++		return;
++	}
++}
++
++
++static void connect(struct backend_info *be)
++{
++	xenbus_transaction_t xbt;
++	int err;
++	struct xenbus_device *dev = be->dev;
++	unsigned long ready = 1;
++
++again:
++	err = xenbus_transaction_start(&xbt);
++	if (err) {
++		xenbus_dev_fatal(be->dev, err, "starting transaction");
++		return;
++	}
++
++	err = xenbus_printf(xbt, be->dev->nodename,
++	                    "ready", "%lu", ready);
++	if (err) {
++		xenbus_dev_fatal(be->dev, err, "writing 'ready'");
++		goto abort;
++	}
++
++	err = xenbus_switch_state(dev, xbt, XenbusStateConnected);
++	if (err)
++		goto abort;
++
++	be->tpmif->status = CONNECTED;
++
++	err = xenbus_transaction_end(xbt, 0);
++	if (err == -EAGAIN)
++		goto again;
++	if (err) {
++		xenbus_dev_fatal(be->dev, err, "end of transaction");
++	}
++	return;
++abort:
++	xenbus_transaction_end(xbt, 1);
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++	struct xenbus_device *dev = be->dev;
++	unsigned long ring_ref;
++	unsigned int evtchn;
++	int err;
++
++	err = xenbus_gather(XBT_NULL, dev->otherend,
++	                    "ring-ref", "%lu", &ring_ref,
++			    "event-channel", "%u", &evtchn, NULL);
++	if (err) {
++		xenbus_dev_error(dev, err,
++				 "reading %s/ring-ref and event-channel",
++				 dev->otherend);
++		return err;
++	}
++	if (be->tpmif != NULL) {
++		err = tpmif_map(be->tpmif, ring_ref, evtchn);
++		if (err) {
++			xenbus_dev_error(dev, err,
++			    	         "mapping shared-frame %lu port %u",
++				         ring_ref, evtchn);
++			return err;
++		}
++	}
++	return 0;
++}
++
++
++static struct xenbus_device_id tpmback_ids[] = {
++	{ "vtpm" },
++	{ "" }
++};
++
++
++static struct xenbus_driver tpmback = {
++	.name = "vtpm",
++	.owner = THIS_MODULE,
++	.ids = tpmback_ids,
++	.probe = tpmback_probe,
++	.remove = tpmback_remove,
++	.otherend_changed = frontend_changed,
++};
++
++
++void tpmif_xenbus_init(void)
++{
++	xenbus_register_backend(&tpmback);
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmfront/Makefile linux-2.6.12-xen/drivers/xen/tpmfront/Makefile
+--- pristine-linux-2.6.12/drivers/xen/tpmfront/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/tpmfront/Makefile	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,2 @@
++
++obj-$(CONFIG_XEN_TPMDEV_FRONTEND)	+= tpmfront.o
+diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmfront/tpmfront.c linux-2.6.12-xen/drivers/xen/tpmfront/tpmfront.c
+--- pristine-linux-2.6.12/drivers/xen/tpmfront/tpmfront.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/tpmfront/tpmfront.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,703 @@
++/*
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb at us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netfront/netfront.c
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <asm-xen/tpmfe.h>
++#include <linux/err.h>
++
++#include <asm/semaphore.h>
++#include <asm/io.h>
++#include <asm-xen/evtchn.h>
++#include <asm-xen/xen-public/grant_table.h>
++#include <asm-xen/xen-public/io/tpmif.h>
++#include <asm/uaccess.h>
++#include <asm-xen/xenbus.h>
++#include <asm-xen/xen-public/grant_table.h>
++
++#include "tpmfront.h"
++
++#undef DEBUG
++
++/* locally visible variables */
++static grant_ref_t gref_head;
++static struct tpm_private my_private;
++
++/* local function prototypes */
++static irqreturn_t tpmif_int(int irq,
++                             void *tpm_priv,
++                             struct pt_regs *ptregs);
++static void tpmif_rx_action(unsigned long unused);
++static void tpmif_connect(u16 evtchn, domid_t domid);
++static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
++static int tpm_allocate_buffers(struct tpm_private *tp);
++static void tpmif_set_connected_state(struct tpm_private *tp,
++                                      u8 newstate);
++static int tpm_xmit(struct tpm_private *tp,
++                    const u8 * buf, size_t count, int userbuffer,
++                    void *remember);
++
++#define DPRINTK(fmt, args...) \
++    pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
++
++
++static inline int
++tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
++               int isuserbuffer)
++{
++	int copied = len;
++
++	if (len > txb->size) {
++		copied = txb->size;
++	}
++	if (isuserbuffer) {
++		if (copy_from_user(txb->data, src, copied))
++			return -EFAULT;
++	} else {
++		memcpy(txb->data, src, copied);
++	}
++	txb->len = len;
++	return copied;
++}
++
++static inline struct tx_buffer *tx_buffer_alloc(void)
++{
++	struct tx_buffer *txb = kmalloc(sizeof (struct tx_buffer),
++					GFP_KERNEL);
++
++	if (txb) {
++		txb->len = 0;
++		txb->size = PAGE_SIZE;
++		txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
++		if (txb->data == NULL) {
++			kfree(txb);
++			txb = NULL;
++		}
++	}
++	return txb;
++}
++
++
++/**************************************************************
++
++ The interface to let the tpm plugin register its callback
++ function and send data to another partition using this module
++
++**************************************************************/
++
++static DECLARE_MUTEX(upperlayer_lock);
++static DECLARE_MUTEX(suspend_lock);
++static struct tpmfe_device *upperlayer_tpmfe;
++
++/*
++ * Send data via this module by calling this function
++ */
++int tpm_fe_send(const u8 * buf, size_t count, void *ptr)
++{
++	int sent = 0;
++	struct tpm_private *tp = &my_private;
++
++	down(&suspend_lock);
++	sent = tpm_xmit(tp, buf, count, 0, ptr);
++	up(&suspend_lock);
++
++	return sent;
++}
++EXPORT_SYMBOL(tpm_fe_send);
++
++/*
++ * Register a callback for receiving data from this module
++ */
++int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
++{
++	int rc = 0;
++
++	down(&upperlayer_lock);
++	if (NULL == upperlayer_tpmfe) {
++		upperlayer_tpmfe = tpmfe_dev;
++		tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
++	} else {
++		rc = -EBUSY;
++	}
++	up(&upperlayer_lock);
++	return rc;
++}
++EXPORT_SYMBOL(tpm_fe_register_receiver);
++
++/*
++ * Unregister the callback for receiving data from this module
++ */
++void tpm_fe_unregister_receiver(void)
++{
++	down(&upperlayer_lock);
++	upperlayer_tpmfe = NULL;
++	up(&upperlayer_lock);
++}
++EXPORT_SYMBOL(tpm_fe_unregister_receiver);
++
++/*
++ * Call this function to send data to the upper layer's
++ * registered receiver function.
++ */
++static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
++                                  const void *ptr)
++{
++	int rc = 0;
++
++	down(&upperlayer_lock);
++
++	if (upperlayer_tpmfe && upperlayer_tpmfe->receive)
++		rc = upperlayer_tpmfe->receive(buf, count, ptr);
++
++	up(&upperlayer_lock);
++	return rc;
++}
++
++/**************************************************************
++ XENBUS support code
++**************************************************************/
++
++static int setup_tpmring(struct xenbus_device *dev,
++                         struct tpmfront_info * info)
++{
++	tpmif_tx_interface_t *sring;
++	struct tpm_private *tp = &my_private;
++	int err;
++
++	sring = (void *)__get_free_page(GFP_KERNEL);
++	if (!sring) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++		return -ENOMEM;
++	}
++	tp->tx = sring;
++
++	tpm_allocate_buffers(tp);
++
++	err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
++	if (err < 0) {
++		free_page((unsigned long)sring);
++		tp->tx = NULL;
++		xenbus_dev_fatal(dev, err, "allocating grant reference");
++		goto fail;
++	}
++	info->ring_ref = err;
++
++	err = xenbus_alloc_evtchn(dev, &tp->evtchn);
++	if (err)
++		goto fail;
++
++	tpmif_connect(tp->evtchn, dev->otherend_id);
++
++	return 0;
++fail:
++	return err;
++}
++
++
++static void destroy_tpmring(struct tpmfront_info *info, struct tpm_private *tp)
++{
++	tpmif_set_connected_state(tp, 0);
++	if (tp->tx != NULL) {
++		gnttab_end_foreign_access(info->ring_ref, 0,
++					  (unsigned long)tp->tx);
++		tp->tx = NULL;
++	}
++
++	if (tp->irq)
++		unbind_from_irqhandler(tp->irq, NULL);
++	tp->evtchn = tp->irq = 0;
++}
++
++
++static int talk_to_backend(struct xenbus_device *dev,
++                           struct tpmfront_info *info)
++{
++	const char *message = NULL;
++	int err;
++	xenbus_transaction_t xbt;
++
++	err = setup_tpmring(dev, info);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "setting up ring");
++		goto out;
++	}
++
++again:
++	err = xenbus_transaction_start(&xbt);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "starting transaction");
++		goto destroy_tpmring;
++	}
++
++	err = xenbus_printf(xbt, dev->nodename,
++	                    "ring-ref","%u", info->ring_ref);
++	if (err) {
++		message = "writing ring-ref";
++		goto abort_transaction;
++	}
++
++	err = xenbus_printf(xbt, dev->nodename,
++			    "event-channel", "%u", my_private.evtchn);
++	if (err) {
++		message = "writing event-channel";
++		goto abort_transaction;
++	}
++
++	err = xenbus_switch_state(dev, xbt, XenbusStateInitialised);
++	if (err) {
++		goto abort_transaction;
++	}
++
++	err = xenbus_transaction_end(xbt, 0);
++	if (err == -EAGAIN)
++		goto again;
++	if (err) {
++		xenbus_dev_fatal(dev, err, "completing transaction");
++		goto destroy_tpmring;
++	}
++	return 0;
++
++abort_transaction:
++	xenbus_transaction_end(xbt, 1);
++	if (message)
++		xenbus_dev_error(dev, err, "%s", message);
++destroy_tpmring:
++	destroy_tpmring(info, &my_private);
++out:
++	return err;
++}
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++			    XenbusState backend_state)
++{
++	struct tpm_private *tp = &my_private;
++	DPRINTK("\n");
++
++	switch (backend_state) {
++	case XenbusStateInitialising:
++	case XenbusStateInitWait:
++	case XenbusStateInitialised:
++	case XenbusStateUnknown:
++		break;
++
++	case XenbusStateConnected:
++		tpmif_set_connected_state(tp, 1);
++		break;
++
++	case XenbusStateClosing:
++		tpmif_set_connected_state(tp, 0);
++		break;
++
++	case XenbusStateClosed:
++        	if (tp->is_suspended == 0) {
++        	        device_unregister(&dev->dev);
++        	}
++	        break;
++	}
++}
++
++
++static int tpmfront_probe(struct xenbus_device *dev,
++                          const struct xenbus_device_id *id)
++{
++	int err;
++	struct tpmfront_info *info;
++	int handle;
++
++	err = xenbus_scanf(XBT_NULL, dev->nodename,
++	                   "handle", "%i", &handle);
++	if (XENBUS_EXIST_ERR(err))
++		return err;
++
++	if (err < 0) {
++		xenbus_dev_fatal(dev,err,"reading virtual-device");
++		return err;
++	}
++
++	info = kmalloc(sizeof(*info), GFP_KERNEL);
++	if (!info) {
++		err = -ENOMEM;
++		xenbus_dev_fatal(dev,err,"allocating info structure");
++		return err;
++	}
++	memset(info, 0x0, sizeof(*info));
++
++	info->dev = dev;
++	dev->data = info;
++
++	err = talk_to_backend(dev, info);
++	if (err) {
++		kfree(info);
++		dev->data = NULL;
++		return err;
++	}
++	return 0;
++}
++
++
++static int tpmfront_remove(struct xenbus_device *dev)
++{
++	struct tpmfront_info *info = dev->data;
++
++	destroy_tpmring(info, &my_private);
++
++	kfree(info);
++	return 0;
++}
++
++static int
++tpmfront_suspend(struct xenbus_device *dev)
++{
++	struct tpm_private *tp = &my_private;
++	u32 ctr;
++
++	/* lock, so no app can send */
++	down(&suspend_lock);
++	tp->is_suspended = 1;
++
++	for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
++		if ((ctr % 10) == 0)
++			printk("TPM-FE [INFO]: Waiting for outstanding request.\n");
++		/*
++		 * Wait for a request to be responded to.
++		 */
++		interruptible_sleep_on_timeout(&tp->wait_q, 100);
++	}
++
++	if (atomic_read(&tp->tx_busy)) {
++		/*
++		 * A temporary work-around.
++		 */
++		printk("TPM-FE [WARNING]: Resetting busy flag.");
++		atomic_set(&tp->tx_busy, 0);
++	}
++
++	return 0;
++}
++
++static int
++tpmfront_resume(struct xenbus_device *dev)
++{
++	struct tpmfront_info *info = dev->data;
++	return talk_to_backend(dev, info);
++}
++
++static void
++tpmif_connect(u16 evtchn, domid_t domid)
++{
++	int err;
++	struct tpm_private *tp = &my_private;
++
++	tp->evtchn = evtchn;
++	tp->backend_id = domid;
++
++	err = bind_evtchn_to_irqhandler(tp->evtchn,
++					tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
++					tp);
++	if (err <= 0) {
++		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
++		return;
++	}
++
++	tp->irq = err;
++}
++
++static struct xenbus_device_id tpmfront_ids[] = {
++	{ "vtpm" },
++	{ "" }
++};
++
++static struct xenbus_driver tpmfront = {
++	.name = "vtpm",
++	.owner = THIS_MODULE,
++	.ids = tpmfront_ids,
++	.probe = tpmfront_probe,
++	.remove =  tpmfront_remove,
++	.resume = tpmfront_resume,
++	.otherend_changed = backend_changed,
++	.suspend = tpmfront_suspend,
++};
++
++static void __init init_tpm_xenbus(void)
++{
++	xenbus_register_frontend(&tpmfront);
++}
++
++
++static int
++tpm_allocate_buffers(struct tpm_private *tp)
++{
++	unsigned int i;
++
++	for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
++		tp->tx_buffers[i] = tx_buffer_alloc();
++	return 1;
++}
++
++static void
++tpmif_rx_action(unsigned long unused)
++{
++	struct tpm_private *tp = &my_private;
++
++	int i = 0;
++	unsigned int received;
++	unsigned int offset = 0;
++	u8 *buffer;
++	tpmif_tx_request_t *tx;
++	tx = &tp->tx->ring[i].req;
++
++	received = tx->size;
++
++	buffer = kmalloc(received, GFP_KERNEL);
++	if (NULL == buffer) {
++		goto exit;
++	}
++
++	for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
++		struct tx_buffer *txb = tp->tx_buffers[i];
++		tpmif_tx_request_t *tx;
++		unsigned int tocopy;
++
++		tx = &tp->tx->ring[i].req;
++		tocopy = tx->size;
++		if (tocopy > PAGE_SIZE) {
++			tocopy = PAGE_SIZE;
++		}
++
++		memcpy(&buffer[offset], txb->data, tocopy);
++
++		gnttab_release_grant_reference(&gref_head, tx->ref);
++
++		offset += tocopy;
++	}
++
++	tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
++	kfree(buffer);
++
++exit:
++	atomic_set(&tp->tx_busy, 0);
++	wake_up_interruptible(&tp->wait_q);
++}
++
++
++static irqreturn_t
++tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
++{
++	struct tpm_private *tp = tpm_priv;
++	unsigned long flags;
++
++	spin_lock_irqsave(&tp->tx_lock, flags);
++	tasklet_schedule(&tpmif_rx_tasklet);
++	spin_unlock_irqrestore(&tp->tx_lock, flags);
++
++	return IRQ_HANDLED;
++}
++
++
++static int
++tpm_xmit(struct tpm_private *tp,
++         const u8 * buf, size_t count, int isuserbuffer,
++         void *remember)
++{
++	tpmif_tx_request_t *tx;
++	TPMIF_RING_IDX i;
++	unsigned int offset = 0;
++
++	spin_lock_irq(&tp->tx_lock);
++
++	if (unlikely(atomic_read(&tp->tx_busy))) {
++		printk("tpm_xmit: There's an outstanding request/response "
++		       "on the way!\n");
++		spin_unlock_irq(&tp->tx_lock);
++		return -EBUSY;
++	}
++
++	if (tp->is_connected != 1) {
++		spin_unlock_irq(&tp->tx_lock);
++		return -EIO;
++	}
++
++	for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
++		struct tx_buffer *txb = tp->tx_buffers[i];
++		int copied;
++
++		if (NULL == txb) {
++			DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
++				"Not transmitting anything!\n", i);
++			spin_unlock_irq(&tp->tx_lock);
++			return -EFAULT;
++		}
++		copied = tx_buffer_copy(txb, &buf[offset], count,
++		                        isuserbuffer);
++		if (copied < 0) {
++			/* An error occurred */
++			spin_unlock_irq(&tp->tx_lock);
++			return copied;
++		}
++		count -= copied;
++		offset += copied;
++
++		tx = &tp->tx->ring[i].req;
++
++		tx->id = i;
++		tx->addr = virt_to_machine(txb->data);
++		tx->size = txb->len;
++
++		DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
++		        txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
++
++		/* get the granttable reference for this page */
++		tx->ref = gnttab_claim_grant_reference(&gref_head);
++
++		if (-ENOSPC == tx->ref) {
++			spin_unlock_irq(&tp->tx_lock);
++			DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
++			return -ENOSPC;
++		}
++		gnttab_grant_foreign_access_ref( tx->ref,
++		                                 tp->backend_id,
++		                                 (tx->addr >> PAGE_SHIFT),
++		                                 0 /*RW*/);
++		wmb();
++	}
++
++	atomic_set(&tp->tx_busy, 1);
++	tp->tx_remember = remember;
++	mb();
++
++	DPRINTK("Notifying backend via event channel %d\n",
++	        tp->evtchn);
++
++	notify_remote_via_irq(tp->irq);
++
++	spin_unlock_irq(&tp->tx_lock);
++	return offset;
++}
++
++
++static void tpmif_notify_upperlayer(struct tpm_private *tp)
++{
++	/*
++	 * Notify upper layer about the state of the connection
++	 * to the BE.
++	 */
++	down(&upperlayer_lock);
++
++	if (upperlayer_tpmfe != NULL) {
++		if (tp->is_connected) {
++			upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
++		} else {
++			upperlayer_tpmfe->status(0);
++		}
++	}
++	up(&upperlayer_lock);
++}
++
++
++static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
++{
++	/*
++	 * Don't notify upper layer if we are in suspend mode and
++	 * should disconnect - assumption is that we will resume
++	 * The semaphore keeps apps from sending.
++	 */
++	if (is_connected == 0 && tp->is_suspended == 1) {
++		return;
++	}
++
++	/*
++	 * Unlock the semaphore if we are connected again
++	 * after being suspended - now resuming.
++	 * This also removes the suspend state.
++	 */
++	if (is_connected == 1 && tp->is_suspended == 1) {
++		tp->is_suspended = 0;
++		/* unlock, so apps can resume sending */
++		up(&suspend_lock);
++	}
++
++	if (is_connected != tp->is_connected) {
++		tp->is_connected = is_connected;
++		tpmif_notify_upperlayer(tp);
++	}
++}
++
++
++/* =================================================================
++ * Initialization function.
++ * =================================================================
++ */
++
++static int __init
++tpmif_init(void)
++{
++	IPRINTK("Initialising the vTPM driver.\n");
++	if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
++	                                     &gref_head ) < 0) {
++		return -EFAULT;
++	}
++	/*
++	 * Only don't send the driver status when we are in the
++	 * INIT domain.
++	 */
++	spin_lock_init(&my_private.tx_lock);
++	init_waitqueue_head(&my_private.wait_q);
++
++	init_tpm_xenbus();
++
++	return 0;
++}
++
++__initcall(tpmif_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmfront/tpmfront.h linux-2.6.12-xen/drivers/xen/tpmfront/tpmfront.h
+--- pristine-linux-2.6.12/drivers/xen/tpmfront/tpmfront.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/tpmfront/tpmfront.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,43 @@
++#ifndef TPM_FRONT_H
++#define TPM_FRONT_H
++
++struct tpm_private {
++	tpmif_tx_interface_t *tx;
++	unsigned int evtchn;
++	unsigned int irq;
++	u8 is_connected;
++	u8 is_suspended;
++
++	spinlock_t tx_lock;
++
++	struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
++
++	atomic_t tx_busy;
++	void *tx_remember;
++	domid_t backend_id;
++	wait_queue_head_t wait_q;
++
++};
++
++struct tpmfront_info {
++	struct xenbus_device *dev;
++	int ring_ref;
++};
++
++struct tx_buffer {
++	unsigned int size;	// available space in data
++	unsigned int len;	// used space in data
++	unsigned char *data;	// pointer to a page
++};
++
++#endif
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/util.c linux-2.6.12-xen/drivers/xen/util.c
+--- pristine-linux-2.6.12/drivers/xen/util.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/util.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,80 @@
++#include <linux/config.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <asm-xen/driver_util.h>
++
++static int f(pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
++{
++	/* generic_page_range() does all the hard work. */
++	return 0;
++}
++
++struct vm_struct *alloc_vm_area(unsigned long size)
++{
++	struct vm_struct *area;
++
++	area = get_vm_area(size, VM_IOREMAP);
++	if (area == NULL)
++		return NULL;
++
++	/*
++	 * This ensures that page tables are constructed for this region
++	 * of kernel virtual address space and mapped into init_mm.
++	 */
++	if (generic_page_range(&init_mm, (unsigned long)area->addr,
++			       area->size, f, NULL)) {
++		free_vm_area(area);
++		return NULL;
++	}
++
++	return area;
++}
++EXPORT_SYMBOL_GPL(alloc_vm_area);
++
++void free_vm_area(struct vm_struct *area)
++{
++	struct vm_struct *ret;
++	ret = remove_vm_area(area->addr);
++	BUG_ON(ret != area);
++	kfree(area);
++}
++EXPORT_SYMBOL_GPL(free_vm_area);
++
++void lock_vm_area(struct vm_struct *area)
++{
++	unsigned long i;
++	char c;
++
++	/*
++	 * Prevent context switch to a lazy mm that doesn't have this area
++	 * mapped into its page tables.
++	 */
++	preempt_disable();
++
++	/*
++	 * Ensure that the page tables are mapped into the current mm. The
++	 * page-fault path will copy the page directory pointers from init_mm.
++	 */
++	for (i = 0; i < area->size; i += PAGE_SIZE)
++		(void)__get_user(c, (char __user *)area->addr + i);
++}
++EXPORT_SYMBOL_GPL(lock_vm_area);
++
++void unlock_vm_area(struct vm_struct *area)
++{
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(unlock_vm_area);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/Makefile linux-2.6.12-xen/drivers/xen/xenbus/Makefile
+--- pristine-linux-2.6.12/drivers/xen/xenbus/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/xenbus/Makefile	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,8 @@
++obj-y	+= xenbus.o
++
++xenbus-objs =
++xenbus-objs += xenbus_client.o 
++xenbus-objs += xenbus_comms.o
++xenbus-objs += xenbus_xs.o
++xenbus-objs += xenbus_probe.o 
++xenbus-objs += xenbus_dev.o 
+diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_client.c linux-2.6.12-xen/drivers/xen/xenbus/xenbus_client.c
+--- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_client.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_client.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,238 @@
++/******************************************************************************
++ * Client-facing interface for the Xenbus driver.  In other words, the
++ * interface between the Xenbus and the device-specific code, be it the
++ * frontend or the backend of that driver.
++ *
++ * Copyright (C) 2005 XenSource Ltd
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <asm-xen/evtchn.h>
++#include <asm-xen/gnttab.h>
++#include <asm-xen/xenbus.h>
++
++/* xenbus_probe.c */
++extern char *kasprintf(const char *fmt, ...);
++
++#define DPRINTK(fmt, args...) \
++    pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++
++int xenbus_watch_path(struct xenbus_device *dev, const char *path,
++		      struct xenbus_watch *watch, 
++		      void (*callback)(struct xenbus_watch *,
++				       const char **, unsigned int))
++{
++	int err;
++
++	watch->node = path;
++	watch->callback = callback;
++
++	err = register_xenbus_watch(watch);
++
++	if (err) {
++		watch->node = NULL;
++		watch->callback = NULL;
++		xenbus_dev_fatal(dev, err, "adding watch on %s", path);
++	}
++
++	return err;
++}
++EXPORT_SYMBOL(xenbus_watch_path);
++
++
++int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
++		       const char *path2, struct xenbus_watch *watch, 
++		       void (*callback)(struct xenbus_watch *,
++					const char **, unsigned int))
++{
++	int err;
++	char *state = kasprintf("%s/%s", path, path2);
++	if (!state) {
++		xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
++		return -ENOMEM;
++	}
++	err = xenbus_watch_path(dev, state, watch, callback);
++
++	if (err) {
++		kfree(state);
++	}
++	return err;
++}
++EXPORT_SYMBOL(xenbus_watch_path2);
++
++
++int xenbus_switch_state(struct xenbus_device *dev,
++			xenbus_transaction_t xbt,
++			XenbusState state)
++{
++	/* We check whether the state is currently set to the given value, and
++	   if not, then the state is set.  We don't want to unconditionally
++	   write the given state, because we don't want to fire watches
++	   unnecessarily.  Furthermore, if the node has gone, we don't write
++	   to it, as the device will be tearing down, and we don't want to
++	   resurrect that directory.
++	 */
++
++	int current_state;
++
++	int err = xenbus_scanf(xbt, dev->nodename, "state", "%d",
++			       &current_state);
++	if ((err == 1 && (XenbusState)current_state == state) ||
++	    err == -ENOENT)
++		return 0;
++
++	err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
++	if (err) {
++		xenbus_dev_fatal(dev, err, "writing new state");
++		return err;
++	}
++	return 0;
++}
++EXPORT_SYMBOL(xenbus_switch_state);
++
++
++/**
++ * Return the path to the error node for the given device, or NULL on failure.
++ * If the value returned is non-NULL, then it is the caller's to kfree.
++ */
++static char *error_path(struct xenbus_device *dev)
++{
++	return kasprintf("error/%s", dev->nodename);
++}
++
++
++void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
++		va_list ap)
++{
++	int ret;
++	unsigned int len;
++	char *printf_buffer = NULL, *path_buffer = NULL;
++
++#define PRINTF_BUFFER_SIZE 4096
++	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
++	if (printf_buffer == NULL)
++		goto fail;
++
++	len = sprintf(printf_buffer, "%i ", -err);
++	ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
++
++	BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
++	dev->has_error = 1;
++
++	path_buffer = error_path(dev);
++
++	if (path_buffer == NULL) {
++		printk("xenbus: failed to write error node for %s (%s)\n",
++		       dev->nodename, printf_buffer);
++		goto fail;
++	}
++
++	if (xenbus_write(XBT_NULL, path_buffer, "error", printf_buffer) != 0) {
++		printk("xenbus: failed to write error node for %s (%s)\n",
++		       dev->nodename, printf_buffer);
++		goto fail;
++	}
++
++fail:
++	if (printf_buffer)
++		kfree(printf_buffer);
++	if (path_buffer)
++		kfree(path_buffer);
++}
++
++
++void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
++		      ...)
++{
++	va_list ap;
++
++	va_start(ap, fmt);
++	_dev_error(dev, err, fmt, ap);
++	va_end(ap);
++}
++EXPORT_SYMBOL(xenbus_dev_error);
++
++
++void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
++		      ...)
++{
++	va_list ap;
++
++	va_start(ap, fmt);
++	_dev_error(dev, err, fmt, ap);
++	va_end(ap);
++	
++	xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
++}
++EXPORT_SYMBOL(xenbus_dev_fatal);
++
++
++int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
++{
++	int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
++	if (err < 0)
++		xenbus_dev_fatal(dev, err, "granting access to ring page");
++	return err;
++}
++EXPORT_SYMBOL(xenbus_grant_ring);
++
++
++int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
++{
++	evtchn_op_t op = {
++		.cmd = EVTCHNOP_alloc_unbound,
++		.u.alloc_unbound.dom = DOMID_SELF,
++		.u.alloc_unbound.remote_dom = dev->otherend_id };
++
++	int err = HYPERVISOR_event_channel_op(&op);
++	if (err)
++		xenbus_dev_fatal(dev, err, "allocating event channel");
++	else
++		*port = op.u.alloc_unbound.port;
++	return err;
++}
++EXPORT_SYMBOL(xenbus_alloc_evtchn);
++
++
++XenbusState xenbus_read_driver_state(const char *path)
++{
++	XenbusState result;
++
++	int err = xenbus_gather(XBT_NULL, path, "state", "%d", &result, NULL);
++	if (err)
++		result = XenbusStateClosed;
++
++	return result;
++}
++EXPORT_SYMBOL(xenbus_read_driver_state);
++
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_comms.c linux-2.6.12-xen/drivers/xen/xenbus/xenbus_comms.c
+--- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_comms.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_comms.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,203 @@
++/******************************************************************************
++ * xenbus_comms.c
++ *
++ * Low level code to talks to Xen Store: ringbuffer and event channel.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <asm/hypervisor.h>
++#include <asm-xen/evtchn.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/err.h>
++#include <asm-xen/xenbus.h>
++#include "xenbus_comms.h"
++
++static int xenbus_irq;
++
++extern void xenbus_probe(void *); 
++extern int xenstored_ready; 
++static DECLARE_WORK(probe_work, xenbus_probe, NULL);
++
++DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
++
++static inline struct xenstore_domain_interface *xenstore_domain_interface(void)
++{
++	return mfn_to_virt(xen_start_info->store_mfn);
++}
++
++static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
++{
++	if (unlikely(xenstored_ready == 0)) {
++		xenstored_ready = 1; 
++		schedule_work(&probe_work); 
++	} 
++
++	wake_up(&xb_waitq);
++	return IRQ_HANDLED;
++}
++
++static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
++{
++	return ((prod - cons) <= XENSTORE_RING_SIZE);
++}
++
++static void *get_output_chunk(XENSTORE_RING_IDX cons,
++			      XENSTORE_RING_IDX prod,
++			      char *buf, uint32_t *len)
++{
++	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
++	if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
++		*len = XENSTORE_RING_SIZE - (prod - cons);
++	return buf + MASK_XENSTORE_IDX(prod);
++}
++
++static const void *get_input_chunk(XENSTORE_RING_IDX cons,
++				   XENSTORE_RING_IDX prod,
++				   const char *buf, uint32_t *len)
++{
++	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
++	if ((prod - cons) < *len)
++		*len = prod - cons;
++	return buf + MASK_XENSTORE_IDX(cons);
++}
++
++int xb_write(const void *data, unsigned len)
++{
++	struct xenstore_domain_interface *intf = xenstore_domain_interface();
++	XENSTORE_RING_IDX cons, prod;
++
++	while (len != 0) {
++		void *dst;
++		unsigned int avail;
++
++		wait_event_interruptible(xb_waitq,
++					 (intf->req_prod - intf->req_cons) !=
++					 XENSTORE_RING_SIZE);
++
++		/* Read indexes, then verify. */
++		cons = intf->req_cons;
++		prod = intf->req_prod;
++		mb();
++		if (!check_indexes(cons, prod))
++			return -EIO;
++
++		dst = get_output_chunk(cons, prod, intf->req, &avail);
++		if (avail == 0)
++			continue;
++		if (avail > len)
++			avail = len;
++
++		memcpy(dst, data, avail);
++		data += avail;
++		len -= avail;
++
++		/* Other side must not see new header until data is there. */
++		wmb();
++		intf->req_prod += avail;
++
++		/* This implies mb() before other side sees interrupt. */
++		notify_remote_via_evtchn(xen_start_info->store_evtchn);
++	}
++
++	return 0;
++}
++
++int xb_read(void *data, unsigned len)
++{
++	struct xenstore_domain_interface *intf = xenstore_domain_interface();
++	XENSTORE_RING_IDX cons, prod;
++
++	while (len != 0) {
++		unsigned int avail;
++		const char *src;
++
++		wait_event_interruptible(xb_waitq,
++					 intf->rsp_cons != intf->rsp_prod);
++
++		/* Read indexes, then verify. */
++		cons = intf->rsp_cons;
++		prod = intf->rsp_prod;
++		mb();
++		if (!check_indexes(cons, prod))
++			return -EIO;
++
++		src = get_input_chunk(cons, prod, intf->rsp, &avail);
++		if (avail == 0)
++			continue;
++		if (avail > len)
++			avail = len;
++
++		/* We must read header before we read data. */
++		rmb();
++
++		memcpy(data, src, avail);
++		data += avail;
++		len -= avail;
++
++		/* Other side must not see free space until we've copied out */
++		mb();
++		intf->rsp_cons += avail;
++
++		pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
++
++		/* Implies mb(): they will see new header. */
++		notify_remote_via_evtchn(xen_start_info->store_evtchn);
++	}
++
++	return 0;
++}
++
++/* Set up interrupt handler off store event channel. */
++int xb_init_comms(void)
++{
++	int err;
++
++	if (xenbus_irq)
++		unbind_from_irqhandler(xenbus_irq, &xb_waitq);
++
++	err = bind_evtchn_to_irqhandler(
++		xen_start_info->store_evtchn, wake_waiting,
++		0, "xenbus", &xb_waitq);
++	if (err <= 0) {
++		printk(KERN_ERR "XENBUS request irq failed %i\n", err);
++		return err;
++	}
++
++	xenbus_irq = err;
++
++	return 0;
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_comms.h linux-2.6.12-xen/drivers/xen/xenbus/xenbus_comms.h
+--- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_comms.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_comms.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,50 @@
++/*
++ * Private include for xenbus communications.
++ * 
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ *
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef _XENBUS_COMMS_H
++#define _XENBUS_COMMS_H
++
++int xs_init(void);
++int xb_init_comms(void);
++
++/* Low level routines. */
++int xb_write(const void *data, unsigned len);
++int xb_read(void *data, unsigned len);
++int xs_input_avail(void);
++extern wait_queue_head_t xb_waitq;
++
++#endif /* _XENBUS_COMMS_H */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_dev.c linux-2.6.12-xen/drivers/xen/xenbus/xenbus_dev.c
+--- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_dev.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_dev.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,238 @@
++/*
++ * xenbus_dev.c
++ * 
++ * Driver giving user-space access to the kernel's xenbus connection
++ * to xenstore.
++ * 
++ * Copyright (c) 2005, Christian Limpach
++ * Copyright (c) 2005, Rusty Russell, IBM Corporation
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/uio.h>
++#include <linux/notifier.h>
++#include <linux/wait.h>
++#include <linux/fs.h>
++
++#include "xenbus_comms.h"
++
++#include <asm/uaccess.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/xenbus.h>
++#include <asm-xen/xen_proc.h>
++#include <asm/hypervisor.h>
++
++struct xenbus_dev_transaction {
++	struct list_head list;
++	xenbus_transaction_t handle;
++};
++
++struct xenbus_dev_data {
++	/* In-progress transaction. */
++	struct list_head transactions;
++
++	/* Partial request. */
++	unsigned int len;
++	union {
++		struct xsd_sockmsg msg;
++		char buffer[PAGE_SIZE];
++	} u;
++
++	/* Response queue. */
++#define MASK_READ_IDX(idx) ((idx)&(PAGE_SIZE-1))
++	char read_buffer[PAGE_SIZE];
++	unsigned int read_cons, read_prod;
++	wait_queue_head_t read_waitq;
++};
++
++static struct proc_dir_entry *xenbus_dev_intf;
++
++static ssize_t xenbus_dev_read(struct file *filp,
++			       char __user *ubuf,
++			       size_t len, loff_t *ppos)
++{
++	struct xenbus_dev_data *u = filp->private_data;
++	int i;
++
++	if (wait_event_interruptible(u->read_waitq,
++				     u->read_prod != u->read_cons))
++		return -EINTR;
++
++	for (i = 0; i < len; i++) {
++		if (u->read_cons == u->read_prod)
++			break;
++		put_user(u->read_buffer[MASK_READ_IDX(u->read_cons)], ubuf+i);
++		u->read_cons++;
++	}
++
++	return i;
++}
++
++static void queue_reply(struct xenbus_dev_data *u,
++			char *data, unsigned int len)
++{
++	int i;
++
++	for (i = 0; i < len; i++, u->read_prod++)
++		u->read_buffer[MASK_READ_IDX(u->read_prod)] = data[i];
++
++	BUG_ON((u->read_prod - u->read_cons) > sizeof(u->read_buffer));
++
++	wake_up(&u->read_waitq);
++}
++
++static ssize_t xenbus_dev_write(struct file *filp,
++				const char __user *ubuf,
++				size_t len, loff_t *ppos)
++{
++	struct xenbus_dev_data *u = filp->private_data;
++	struct xenbus_dev_transaction *trans = NULL;
++	void *reply;
++
++	if ((len + u->len) > sizeof(u->u.buffer))
++		return -EINVAL;
++
++	if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0)
++		return -EFAULT;
++
++	u->len += len;
++	if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
++		return len;
++
++	switch (u->u.msg.type) {
++	case XS_TRANSACTION_START:
++	case XS_TRANSACTION_END:
++	case XS_DIRECTORY:
++	case XS_READ:
++	case XS_GET_PERMS:
++	case XS_RELEASE:
++	case XS_GET_DOMAIN_PATH:
++	case XS_WRITE:
++	case XS_MKDIR:
++	case XS_RM:
++	case XS_SET_PERMS:
++		if (u->u.msg.type == XS_TRANSACTION_START) {
++			trans = kmalloc(sizeof(*trans), GFP_KERNEL);
++			if (!trans)
++				return -ENOMEM;
++		}
++
++		reply = xenbus_dev_request_and_reply(&u->u.msg);
++		if (IS_ERR(reply)) {
++			kfree(trans);
++			return PTR_ERR(reply);
++		}
++
++		if (u->u.msg.type == XS_TRANSACTION_START) {
++			trans->handle = simple_strtoul(reply, NULL, 0);
++			list_add(&trans->list, &u->transactions);
++		} else if (u->u.msg.type == XS_TRANSACTION_END) {
++			list_for_each_entry(trans, &u->transactions, list)
++				if (trans->handle == u->u.msg.tx_id)
++					break;
++			BUG_ON(&trans->list == &u->transactions);
++			list_del(&trans->list);
++			kfree(trans);
++		}
++		queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg));
++		queue_reply(u, (char *)reply, u->u.msg.len);
++		kfree(reply);
++		break;
++
++	default:
++		return -EINVAL;
++	}
++
++	u->len = 0;
++	return len;
++}
++
++static int xenbus_dev_open(struct inode *inode, struct file *filp)
++{
++	struct xenbus_dev_data *u;
++
++	if (xen_start_info->store_evtchn == 0)
++		return -ENOENT;
++
++	nonseekable_open(inode, filp);
++
++	u = kmalloc(sizeof(*u), GFP_KERNEL);
++	if (u == NULL)
++		return -ENOMEM;
++
++	memset(u, 0, sizeof(*u));
++	INIT_LIST_HEAD(&u->transactions);
++	init_waitqueue_head(&u->read_waitq);
++
++	filp->private_data = u;
++
++	return 0;
++}
++
++static int xenbus_dev_release(struct inode *inode, struct file *filp)
++{
++	struct xenbus_dev_data *u = filp->private_data;
++	struct xenbus_dev_transaction *trans, *tmp;
++
++	list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
++		xenbus_transaction_end(trans->handle, 1);
++		list_del(&trans->list);
++		kfree(trans);
++	}
++
++	kfree(u);
++
++	return 0;
++}
++
++static struct file_operations xenbus_dev_file_ops = {
++	.read = xenbus_dev_read,
++	.write = xenbus_dev_write,
++	.open = xenbus_dev_open,
++	.release = xenbus_dev_release,
++};
++
++static int __init
++xenbus_dev_init(void)
++{
++	xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400);
++	if (xenbus_dev_intf)
++		xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops;
++
++	return 0;
++}
++
++__initcall(xenbus_dev_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_probe.c linux-2.6.12-xen/drivers/xen/xenbus/xenbus_probe.c
+--- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_probe.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_probe.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,1016 @@
++/******************************************************************************
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
++ * Copyright (C) 2005 XenSource Ltd
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#define DPRINTK(fmt, args...) \
++    pr_debug("xenbus_probe (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/fcntl.h>
++#include <linux/mm.h>
++#include <linux/notifier.h>
++#include <linux/kthread.h>
++
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/xenbus.h>
++#include <asm-xen/xen_proc.h>
++#include <asm-xen/balloon.h>
++#include <asm-xen/evtchn.h>
++#include <asm-xen/linux-public/evtchn.h>
++
++#include "xenbus_comms.h"
++
++extern struct semaphore xenwatch_mutex;
++
++#define streq(a, b) (strcmp((a), (b)) == 0)
++
++static struct notifier_block *xenstore_chain;
++
++/* If something in array of ids matches this device, return it. */
++static const struct xenbus_device_id *
++match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
++{
++	for (; !streq(arr->devicetype, ""); arr++) {
++		if (streq(arr->devicetype, dev->devicetype))
++			return arr;
++	}
++	return NULL;
++}
++
++static int xenbus_match(struct device *_dev, struct device_driver *_drv)
++{
++	struct xenbus_driver *drv = to_xenbus_driver(_drv);
++
++	if (!drv->ids)
++		return 0;
++
++	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
++}
++
++struct xen_bus_type
++{
++	char *root;
++	unsigned int levels;
++	int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
++	int (*probe)(const char *type, const char *dir);
++	struct bus_type bus;
++	struct device dev;
++};
++
++
++/* device/<type>/<id> => <type>-<id> */
++static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
++{
++	nodename = strchr(nodename, '/');
++	if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
++		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
++		return -EINVAL;
++	}
++
++	strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
++	if (!strchr(bus_id, '/')) {
++		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
++		return -EINVAL;
++	}
++	*strchr(bus_id, '/') = '-';
++	return 0;
++}
++
++
++static int read_otherend_details(struct xenbus_device *xendev,
++				 char *id_node, char *path_node)
++{
++	int err = xenbus_gather(XBT_NULL, xendev->nodename,
++				id_node, "%i", &xendev->otherend_id,
++				path_node, NULL, &xendev->otherend,
++				NULL);
++	if (err) {
++		xenbus_dev_fatal(xendev, err,
++				 "reading other end details from %s",
++				 xendev->nodename);
++		return err;
++	}
++	if (strlen(xendev->otherend) == 0 ||
++	    !xenbus_exists(XBT_NULL, xendev->otherend, "")) {
++		xenbus_dev_fatal(xendev, -ENOENT, "missing other end from %s",
++				 xendev->nodename);
++		kfree(xendev->otherend);
++		xendev->otherend = NULL;
++		return -ENOENT;
++	}
++
++	return 0;
++}
++
++
++static int read_backend_details(struct xenbus_device *xendev)
++{
++	return read_otherend_details(xendev, "backend-id", "backend");
++}
++
++
++static int read_frontend_details(struct xenbus_device *xendev)
++{
++	return read_otherend_details(xendev, "frontend-id", "frontend");
++}
++
++
++static void free_otherend_details(struct xenbus_device *dev)
++{
++	kfree(dev->otherend);
++	dev->otherend = NULL;
++}
++
++
++static void free_otherend_watch(struct xenbus_device *dev)
++{
++	if (dev->otherend_watch.node) {
++		unregister_xenbus_watch(&dev->otherend_watch);
++		kfree(dev->otherend_watch.node);
++		dev->otherend_watch.node = NULL;
++	}
++}
++
++
++/* Bus type for frontend drivers. */
++static int xenbus_probe_frontend(const char *type, const char *name);
++static struct xen_bus_type xenbus_frontend = {
++	.root = "device",
++	.levels = 2, 		/* device/type/<id> */
++	.get_bus_id = frontend_bus_id,
++	.probe = xenbus_probe_frontend,
++	.bus = {
++		.name  = "xen",
++		.match = xenbus_match,
++	},
++	.dev = {
++		.bus_id = "xen",
++	},
++};
++
++/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
++static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
++{
++	int domid, err;
++	const char *devid, *type, *frontend;
++	unsigned int typelen;
++
++	type = strchr(nodename, '/');
++	if (!type)
++		return -EINVAL;
++	type++;
++	typelen = strcspn(type, "/");
++	if (!typelen || type[typelen] != '/')
++		return -EINVAL;
++
++	devid = strrchr(nodename, '/') + 1;
++
++	err = xenbus_gather(XBT_NULL, nodename, "frontend-id", "%i", &domid,
++			    "frontend", NULL, &frontend,
++			    NULL);
++	if (err)
++		return err;
++	if (strlen(frontend) == 0)
++		err = -ERANGE;
++	if (!err && !xenbus_exists(XBT_NULL, frontend, ""))
++		err = -ENOENT;
++
++	kfree(frontend);
++
++	if (err)
++		return err;
++
++	if (snprintf(bus_id, BUS_ID_SIZE,
++		     "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE)
++		return -ENOSPC;
++	return 0;
++}
++
++static int xenbus_hotplug_backend(struct device *dev, char **envp,
++				  int num_envp, char *buffer, int buffer_size);
++static int xenbus_probe_backend(const char *type, const char *domid);
++static struct xen_bus_type xenbus_backend = {
++	.root = "backend",
++	.levels = 3, 		/* backend/type/<frontend>/<id> */
++	.get_bus_id = backend_bus_id,
++	.probe = xenbus_probe_backend,
++	.bus = {
++		.name  = "xen-backend",
++		.match = xenbus_match,
++		.hotplug = xenbus_hotplug_backend,
++	},
++	.dev = {
++		.bus_id = "xen-backend",
++	},
++};
++
++static int xenbus_hotplug_backend(struct device *dev, char **envp,
++				  int num_envp, char *buffer, int buffer_size)
++{
++	struct xenbus_device *xdev;
++	struct xenbus_driver *drv;
++	int i = 0;
++	int length = 0;
++
++	DPRINTK("");
++
++	if (dev == NULL)
++		return -ENODEV;
++
++	xdev = to_xenbus_device(dev);
++	if (xdev == NULL)
++		return -ENODEV;
++
++	/* stuff we want to pass to /sbin/hotplug */
++	add_hotplug_env_var(envp, num_envp, &i,
++			    buffer, buffer_size, &length,
++			    "XENBUS_TYPE=%s", xdev->devicetype);
++
++	add_hotplug_env_var(envp, num_envp, &i,
++			    buffer, buffer_size, &length,
++			    "XENBUS_PATH=%s", xdev->nodename);
++
++	add_hotplug_env_var(envp, num_envp, &i,
++			    buffer, buffer_size, &length,
++			    "XENBUS_BASE_PATH=%s", xenbus_backend.root);
++
++	/* terminate, set to next free slot, shrink available space */
++	envp[i] = NULL;
++	envp = &envp[i];
++	num_envp -= i;
++	buffer = &buffer[length];
++	buffer_size -= length;
++
++	if (dev->driver) {
++		drv = to_xenbus_driver(dev->driver);
++		if (drv && drv->hotplug)
++			return drv->hotplug(xdev, envp, num_envp, buffer,
++					    buffer_size);
++	}
++
++	return 0;
++}
++
++static void otherend_changed(struct xenbus_watch *watch,
++			     const char **vec, unsigned int len)
++{
++	struct xenbus_device *dev =
++		container_of(watch, struct xenbus_device, otherend_watch);
++	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
++	XenbusState state;
++
++	/* Protect us against watches firing on old details when the otherend
++	   details change, say immediately after a resume. */
++	if (!dev->otherend ||
++	    strncmp(dev->otherend, vec[XS_WATCH_PATH],
++		    strlen(dev->otherend))) {
++		DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
++		return;
++	}
++
++	state = xenbus_read_driver_state(dev->otherend);
++
++	DPRINTK("state is %d, %s, %s",
++		state, dev->otherend_watch.node, vec[XS_WATCH_PATH]);
++	if (drv->otherend_changed)
++		drv->otherend_changed(dev, state);
++}
++
++
++static int talk_to_otherend(struct xenbus_device *dev)
++{
++	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
++
++	free_otherend_watch(dev);
++	free_otherend_details(dev);
++
++	return drv->read_otherend_details(dev);
++}
++
++
++static int watch_otherend(struct xenbus_device *dev)
++{
++	return xenbus_watch_path2(dev, dev->otherend, "state",
++				  &dev->otherend_watch, otherend_changed);
++}
++
++
++static int xenbus_dev_probe(struct device *_dev)
++{
++	struct xenbus_device *dev = to_xenbus_device(_dev);
++	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
++	const struct xenbus_device_id *id;
++	int err;
++
++	DPRINTK("");
++
++	if (!drv->probe) {
++		err = -ENODEV;
++		goto fail;
++	}
++
++	id = match_device(drv->ids, dev);
++	if (!id) {
++		err = -ENODEV;
++		goto fail;
++	}
++
++	err = talk_to_otherend(dev);
++	if (err) {
++		printk(KERN_WARNING
++		       "xenbus_probe: talk_to_otherend on %s failed.\n",
++		       dev->nodename);
++		return err;
++	}
++
++	err = drv->probe(dev, id);
++	if (err)
++		goto fail;
++
++	err = watch_otherend(dev);
++	if (err) {
++		printk(KERN_WARNING
++		       "xenbus_probe: watch_otherend on %s failed.\n",
++		       dev->nodename);
++		return err;
++	}
++
++	return 0;
++fail:
++	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
++	xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
++	return -ENODEV;
++	
++}
++
++static int xenbus_dev_remove(struct device *_dev)
++{
++	struct xenbus_device *dev = to_xenbus_device(_dev);
++	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
++
++	DPRINTK("");
++
++	free_otherend_watch(dev);
++	free_otherend_details(dev);
++
++	if (drv->remove)
++		drv->remove(dev);
++
++	xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
++	return 0;
++}
++
++static int xenbus_register_driver_common(struct xenbus_driver *drv,
++					 struct xen_bus_type *bus)
++{
++	int ret;
++
++	drv->driver.name = drv->name;
++	drv->driver.bus = &bus->bus;
++	drv->driver.owner = drv->owner;
++	drv->driver.probe = xenbus_dev_probe;
++	drv->driver.remove = xenbus_dev_remove;
++
++	down(&xenwatch_mutex);
++	ret = driver_register(&drv->driver);
++	up(&xenwatch_mutex);
++	return ret;
++}
++
++int xenbus_register_frontend(struct xenbus_driver *drv)
++{
++	drv->read_otherend_details = read_backend_details;
++
++	return xenbus_register_driver_common(drv, &xenbus_frontend);
++}
++EXPORT_SYMBOL(xenbus_register_frontend);
++
++int xenbus_register_backend(struct xenbus_driver *drv)
++{
++	drv->read_otherend_details = read_frontend_details;
++
++	return xenbus_register_driver_common(drv, &xenbus_backend);
++}
++EXPORT_SYMBOL(xenbus_register_backend);
++
++void xenbus_unregister_driver(struct xenbus_driver *drv)
++{
++	driver_unregister(&drv->driver);
++}
++EXPORT_SYMBOL(xenbus_unregister_driver);
++
++struct xb_find_info
++{
++	struct xenbus_device *dev;
++	const char *nodename;
++};
++
++static int cmp_dev(struct device *dev, void *data)
++{
++	struct xenbus_device *xendev = to_xenbus_device(dev);
++	struct xb_find_info *info = data;
++
++	if (streq(xendev->nodename, info->nodename)) {
++		info->dev = xendev;
++		get_device(dev);
++		return 1;
++	}
++	return 0;
++}
++
++struct xenbus_device *xenbus_device_find(const char *nodename,
++					 struct bus_type *bus)
++{
++	struct xb_find_info info = { .dev = NULL, .nodename = nodename };
++
++	bus_for_each_dev(bus, NULL, &info, cmp_dev);
++	return info.dev;
++}
++
++static int cleanup_dev(struct device *dev, void *data)
++{
++	struct xenbus_device *xendev = to_xenbus_device(dev);
++	struct xb_find_info *info = data;
++	int len = strlen(info->nodename);
++
++	DPRINTK("%s", info->nodename);
++
++	/* Match the info->nodename path, or any subdirectory of that path. */
++	if (strncmp(xendev->nodename, info->nodename, len))
++		return 0;
++
++	/* If the node name is longer, ensure it really is a subdirectory. */
++	if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
++		return 0;
++
++	info->dev = xendev;
++	get_device(dev);
++	return 1;
++}
++
++static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
++{
++	struct xb_find_info info = { .nodename = path };
++
++	do {
++		info.dev = NULL;
++		bus_for_each_dev(bus, NULL, &info, cleanup_dev);
++		if (info.dev) {
++			device_unregister(&info.dev->dev);
++			put_device(&info.dev->dev);
++		}
++	} while (info.dev);
++}
++
++static void xenbus_dev_free(struct xenbus_device *xendev)
++{
++	kfree(xendev);
++}
++
++static void xenbus_dev_release(struct device *dev)
++{
++	if (dev) {
++		xenbus_dev_free(to_xenbus_device(dev));
++	}
++}
++
++/* Simplified asprintf. */
++char *kasprintf(const char *fmt, ...)
++{
++	va_list ap;
++	unsigned int len;
++	char *p, dummy[1];
++
++	va_start(ap, fmt);
++	/* FIXME: vsnprintf has a bug, NULL should work */
++	len = vsnprintf(dummy, 0, fmt, ap);
++	va_end(ap);
++
++	p = kmalloc(len + 1, GFP_KERNEL);
++	if (!p)
++		return NULL;
++	va_start(ap, fmt);
++	vsprintf(p, fmt, ap);
++	va_end(ap);
++	return p;
++}
++
++static ssize_t xendev_show_nodename(struct device *dev, char *buf)
++{
++	return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
++}
++DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
++
++static ssize_t xendev_show_devtype(struct device *dev, char *buf)
++{
++	return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
++}
++DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
++
++
++static int xenbus_probe_node(struct xen_bus_type *bus,
++			     const char *type,
++			     const char *nodename)
++{
++	int err;
++	struct xenbus_device *xendev;
++	size_t stringlen;
++	char *tmpstring;
++
++	XenbusState state = xenbus_read_driver_state(nodename);
++
++	if (state != XenbusStateInitialising) {
++		/* Device is not new, so ignore it.  This can happen if a
++		   device is going away after switching to Closed.  */
++		return 0;
++	}
++
++	stringlen = strlen(nodename) + 1 + strlen(type) + 1;
++	xendev = kmalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
++	if (!xendev)
++		return -ENOMEM;
++	memset(xendev, 0, sizeof(*xendev));
++
++	/* Copy the strings into the extra space. */
++
++	tmpstring = (char *)(xendev + 1);
++	strcpy(tmpstring, nodename);
++	xendev->nodename = tmpstring;
++
++	tmpstring += strlen(tmpstring) + 1;
++	strcpy(tmpstring, type);
++	xendev->devicetype = tmpstring;
++
++	xendev->dev.parent = &bus->dev;
++	xendev->dev.bus = &bus->bus;
++	xendev->dev.release = xenbus_dev_release;
++
++	err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
++	if (err)
++		goto fail;
++
++	/* Register with generic device framework. */
++	err = device_register(&xendev->dev);
++	if (err)
++		goto fail;
++
++	device_create_file(&xendev->dev, &dev_attr_nodename);
++	device_create_file(&xendev->dev, &dev_attr_devtype);
++
++	return 0;
++fail:
++	xenbus_dev_free(xendev);
++	return err;
++}
++
++/* device/<typename>/<name> */
++static int xenbus_probe_frontend(const char *type, const char *name)
++{
++	char *nodename;
++	int err;
++
++	nodename = kasprintf("%s/%s/%s", xenbus_frontend.root, type, name);
++	if (!nodename)
++		return -ENOMEM;
++	
++	DPRINTK("%s", nodename);
++
++	err = xenbus_probe_node(&xenbus_frontend, type, nodename);
++	kfree(nodename);
++	return err;
++}
++
++/* backend/<typename>/<frontend-uuid>/<name> */
++static int xenbus_probe_backend_unit(const char *dir,
++				     const char *type,
++				     const char *name)
++{
++	char *nodename;
++	int err;
++
++	nodename = kasprintf("%s/%s", dir, name);
++	if (!nodename)
++		return -ENOMEM;
++
++	DPRINTK("%s\n", nodename);
++
++	err = xenbus_probe_node(&xenbus_backend, type, nodename);
++	kfree(nodename);
++	return err;
++}
++
++/* backend/<typename>/<frontend-domid> */
++static int xenbus_probe_backend(const char *type, const char *domid)
++{
++	char *nodename;
++	int err = 0;
++	char **dir;
++	unsigned int i, dir_n = 0;
++
++	DPRINTK("");
++
++	nodename = kasprintf("%s/%s/%s", xenbus_backend.root, type, domid);
++	if (!nodename)
++		return -ENOMEM;
++
++	dir = xenbus_directory(XBT_NULL, nodename, "", &dir_n);
++	if (IS_ERR(dir)) {
++		kfree(nodename);
++		return PTR_ERR(dir);
++	}
++
++	for (i = 0; i < dir_n; i++) {
++		err = xenbus_probe_backend_unit(nodename, type, dir[i]);
++		if (err)
++			break;
++	}
++	kfree(dir);
++	kfree(nodename);
++	return err;
++}
++
++static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
++{
++	int err = 0;
++	char **dir;
++	unsigned int dir_n = 0;
++	int i;
++
++	dir = xenbus_directory(XBT_NULL, bus->root, type, &dir_n);
++	if (IS_ERR(dir))
++		return PTR_ERR(dir);
++
++	for (i = 0; i < dir_n; i++) {
++		err = bus->probe(type, dir[i]);
++		if (err)
++			break;
++	}
++	kfree(dir);
++	return err;
++}
++
++static int xenbus_probe_devices(struct xen_bus_type *bus)
++{
++	int err = 0;
++	char **dir;
++	unsigned int i, dir_n;
++
++	dir = xenbus_directory(XBT_NULL, bus->root, "", &dir_n);
++	if (IS_ERR(dir))
++		return PTR_ERR(dir);
++
++	for (i = 0; i < dir_n; i++) {
++		err = xenbus_probe_device_type(bus, dir[i]);
++		if (err)
++			break;
++	}
++	kfree(dir);
++	return err;
++}
++
++static unsigned int char_count(const char *str, char c)
++{
++	unsigned int i, ret = 0;
++
++	for (i = 0; str[i]; i++)
++		if (str[i] == c)
++			ret++;
++	return ret;
++}
++
++static int strsep_len(const char *str, char c, unsigned int len)
++{
++	unsigned int i;
++
++	for (i = 0; str[i]; i++)
++		if (str[i] == c) {
++			if (len == 0)
++				return i;
++			len--;
++		}
++	return (len == 0) ? i : -ERANGE;
++}
++
++static void dev_changed(const char *node, struct xen_bus_type *bus)
++{
++	int exists, rootlen;
++	struct xenbus_device *dev;
++	char type[BUS_ID_SIZE];
++	const char *p, *root;
++
++	if (char_count(node, '/') < 2)
++ 		return;
++
++	exists = xenbus_exists(XBT_NULL, node, "");
++	if (!exists) {
++		xenbus_cleanup_devices(node, &bus->bus);
++		return;
++	}
++
++	/* backend/<type>/... or device/<type>/... */
++	p = strchr(node, '/') + 1;
++	snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
++	type[BUS_ID_SIZE-1] = '\0';
++
++	rootlen = strsep_len(node, '/', bus->levels);
++	if (rootlen < 0)
++		return;
++	root = kasprintf("%.*s", rootlen, node);
++	if (!root)
++		return;
++
++	dev = xenbus_device_find(root, &bus->bus);
++	if (!dev)
++		xenbus_probe_node(bus, type, root);
++	else
++		put_device(&dev->dev);
++
++	kfree(root);
++}
++
++static void frontend_changed(struct xenbus_watch *watch,
++			     const char **vec, unsigned int len)
++{
++	DPRINTK("");
++
++	dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
++}
++
++static void backend_changed(struct xenbus_watch *watch,
++			    const char **vec, unsigned int len)
++{
++	DPRINTK("");
++
++	dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
++}
++
++/* We watch for devices appearing and vanishing. */
++static struct xenbus_watch fe_watch = {
++	.node = "device",
++	.callback = frontend_changed,
++};
++
++static struct xenbus_watch be_watch = {
++	.node = "backend",
++	.callback = backend_changed,
++};
++
++static int suspend_dev(struct device *dev, void *data)
++{
++	int err = 0;
++	struct xenbus_driver *drv;
++	struct xenbus_device *xdev;
++
++	DPRINTK("");
++
++	if (dev->driver == NULL)
++		return 0;
++	drv = to_xenbus_driver(dev->driver);
++	xdev = container_of(dev, struct xenbus_device, dev);
++	if (drv->suspend)
++		err = drv->suspend(xdev);
++	if (err)
++		printk(KERN_WARNING
++		       "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
++	return 0;
++}
++
++static int resume_dev(struct device *dev, void *data)
++{
++	int err;
++	struct xenbus_driver *drv;
++	struct xenbus_device *xdev;
++
++	DPRINTK("");
++
++	if (dev->driver == NULL)
++		return 0;
++	drv = to_xenbus_driver(dev->driver);
++	xdev = container_of(dev, struct xenbus_device, dev);
++
++	err = talk_to_otherend(xdev);
++	if (err) {
++		printk(KERN_WARNING
++		       "xenbus: resume (talk_to_otherend) %s failed: %i\n",
++		       dev->bus_id, err);
++		return err;
++	}
++
++	err = watch_otherend(xdev);
++	if (err) {
++		printk(KERN_WARNING
++		       "xenbus_probe: resume (watch_otherend) %s failed: "
++		       "%d.\n", dev->bus_id, err);
++		return err;
++	}
++
++	if (drv->resume)
++		err = drv->resume(xdev);
++	if (err)
++		printk(KERN_WARNING
++		       "xenbus: resume %s failed: %i\n", dev->bus_id, err);
++	return err;
++}
++
++void xenbus_suspend(void)
++{
++	DPRINTK("");
++
++	bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
++	bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, suspend_dev);
++	xs_suspend();
++}
++EXPORT_SYMBOL(xenbus_suspend);
++
++void xenbus_resume(void)
++{
++	xb_init_comms();
++	xs_resume();
++	bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
++	bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, resume_dev);
++}
++EXPORT_SYMBOL(xenbus_resume);
++
++
++/* A flag to determine if xenstored is 'ready' (i.e. has started) */
++int xenstored_ready = 0; 
++
++
++int register_xenstore_notifier(struct notifier_block *nb)
++{
++	int ret = 0;
++
++	if (xenstored_ready > 0) 
++		ret = nb->notifier_call(nb, 0, NULL);
++	else 
++		notifier_chain_register(&xenstore_chain, nb);
++
++	return ret;
++}
++EXPORT_SYMBOL(register_xenstore_notifier);
++
++void unregister_xenstore_notifier(struct notifier_block *nb)
++{
++	notifier_chain_unregister(&xenstore_chain, nb);
++}
++EXPORT_SYMBOL(unregister_xenstore_notifier);
++
++
++
++void xenbus_probe(void *unused)
++{
++	BUG_ON((xenstored_ready <= 0)); 
++
++	/* Enumerate devices in xenstore. */
++	xenbus_probe_devices(&xenbus_frontend);
++	xenbus_probe_devices(&xenbus_backend);
++
++	/* Watch for changes. */
++	register_xenbus_watch(&fe_watch);
++	register_xenbus_watch(&be_watch);
++
++	/* Notify others that xenstore is up */
++	notifier_call_chain(&xenstore_chain, 0, NULL);
++}
++
++
++static struct proc_dir_entry *xsd_mfn_intf;
++static struct proc_dir_entry *xsd_port_intf;
++
++
++static int xsd_mfn_read(char *page, char **start, off_t off,
++                        int count, int *eof, void *data)
++{
++	int len; 
++	len  = sprintf(page, "%ld", xen_start_info->store_mfn); 
++	*eof = 1; 
++	return len; 
++}
++
++static int xsd_port_read(char *page, char **start, off_t off,
++			 int count, int *eof, void *data)
++{
++	int len; 
++
++	len  = sprintf(page, "%d", xen_start_info->store_evtchn); 
++	*eof = 1; 
++	return len; 
++}
++
++
++static int __init xenbus_probe_init(void)
++{
++	int err = 0, dom0;
++
++	DPRINTK("");
++
++	if (xen_init() < 0) {
++		DPRINTK("failed");
++		return -ENODEV;
++	}
++
++	/* Register ourselves with the kernel bus & device subsystems */
++	bus_register(&xenbus_frontend.bus);
++	bus_register(&xenbus_backend.bus);
++	device_register(&xenbus_frontend.dev);
++	device_register(&xenbus_backend.dev);
++
++	/*
++	** Domain0 doesn't have a store_evtchn or store_mfn yet.
++	*/
++	dom0 = (xen_start_info->store_evtchn == 0);
++
++	if (dom0) {
++
++		unsigned long page;
++		evtchn_op_t op = { 0 };
++		int ret;
++
++
++		/* Allocate page. */
++		page = get_zeroed_page(GFP_KERNEL);
++		if (!page) 
++			return -ENOMEM; 
++
++		/* We don't refcnt properly, so set reserved on page.
++		 * (this allocation is permanent) */
++		SetPageReserved(virt_to_page(page));
++
++		xen_start_info->store_mfn =
++			pfn_to_mfn(virt_to_phys((void *)page) >>
++				   PAGE_SHIFT);
++		
++		/* Next allocate a local port which xenstored can bind to */
++		op.cmd = EVTCHNOP_alloc_unbound;
++		op.u.alloc_unbound.dom        = DOMID_SELF;
++		op.u.alloc_unbound.remote_dom = 0; 
++
++		ret = HYPERVISOR_event_channel_op(&op);
++		BUG_ON(ret); 
++		xen_start_info->store_evtchn = op.u.alloc_unbound.port;
++
++		/* And finally publish the above info in /proc/xen */
++		if((xsd_mfn_intf = create_xen_proc_entry("xsd_mfn", 0400)))
++			xsd_mfn_intf->read_proc = xsd_mfn_read; 
++		if((xsd_port_intf = create_xen_proc_entry("xsd_port", 0400)))
++			xsd_port_intf->read_proc = xsd_port_read;
++	}
++
++	/* Initialize the interface to xenstore. */
++	err = xs_init(); 
++	if (err) {
++		printk(KERN_WARNING
++		       "XENBUS: Error initializing xenstore comms: %i\n", err);
++		return err; 
++	}
++
++	if (!dom0) {
++		xenstored_ready = 1;
++		xenbus_probe(NULL);
++	}
++
++	return 0;
++}
++
++postcore_initcall(xenbus_probe_init);
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_xs.c linux-2.6.12-xen/drivers/xen/xenbus/xenbus_xs.c
+--- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_xs.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_xs.c	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,821 @@
++/******************************************************************************
++ * xenbus_xs.c
++ *
++ * This is the kernel equivalent of the "xs" library.  We don't need everything
++ * and we use xenbus_comms for communication.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/unistd.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/uio.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/fcntl.h>
++#include <linux/kthread.h>
++#include <asm-xen/xenbus.h>
++#include "xenbus_comms.h"
++
++/* xenbus_probe.c */
++extern char *kasprintf(const char *fmt, ...);
++
++#define streq(a, b) (strcmp((a), (b)) == 0)
++
++struct xs_stored_msg {
++	struct list_head list;
++
++	struct xsd_sockmsg hdr;
++
++	union {
++		/* Queued replies. */
++		struct {
++			char *body;
++		} reply;
++
++		/* Queued watch events. */
++		struct {
++			struct xenbus_watch *handle;
++			char **vec;
++			unsigned int vec_size;
++		} watch;
++	} u;
++};
++
++struct xs_handle {
++	/* A list of replies. Currently only one will ever be outstanding. */
++	struct list_head reply_list;
++	spinlock_t reply_lock;
++	wait_queue_head_t reply_waitq;
++
++	/* One request at a time. */
++	struct semaphore request_mutex;
++
++	/* Protect transactions against save/restore. */
++	struct rw_semaphore suspend_mutex;
++};
++
++static struct xs_handle xs_state;
++
++/* List of registered watches, and a lock to protect it. */
++static LIST_HEAD(watches);
++static DEFINE_SPINLOCK(watches_lock);
++
++/* List of pending watch callback events, and a lock to protect it. */
++static LIST_HEAD(watch_events);
++static DEFINE_SPINLOCK(watch_events_lock);
++
++/*
++ * Details of the xenwatch callback kernel thread. The thread waits on the
++ * watch_events_waitq for work to do (queued on watch_events list). When it
++ * wakes up it acquires the xenwatch_mutex before reading the list and
++ * carrying out work.
++ */
++static pid_t xenwatch_pid;
++/* static */ DECLARE_MUTEX(xenwatch_mutex);
++static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
++
++static int get_error(const char *errorstring)
++{
++	unsigned int i;
++
++	for (i = 0; !streq(errorstring, xsd_errors[i].errstring); i++) {
++		if (i == ARRAY_SIZE(xsd_errors) - 1) {
++			printk(KERN_WARNING
++			       "XENBUS xen store gave: unknown error %s",
++			       errorstring);
++			return EINVAL;
++		}
++	}
++	return xsd_errors[i].errnum;
++}
++
++static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
++{
++	struct xs_stored_msg *msg;
++	char *body;
++
++	spin_lock(&xs_state.reply_lock);
++
++	while (list_empty(&xs_state.reply_list)) {
++		spin_unlock(&xs_state.reply_lock);
++		wait_event_interruptible(xs_state.reply_waitq,
++					 !list_empty(&xs_state.reply_list));
++		spin_lock(&xs_state.reply_lock);
++	}
++
++	msg = list_entry(xs_state.reply_list.next,
++			 struct xs_stored_msg, list);
++	list_del(&msg->list);
++
++	spin_unlock(&xs_state.reply_lock);
++
++	*type = msg->hdr.type;
++	if (len)
++		*len = msg->hdr.len;
++	body = msg->u.reply.body;
++
++	kfree(msg);
++
++	return body;
++}
++
++/* Emergency write. */
++void xenbus_debug_write(const char *str, unsigned int count)
++{
++	struct xsd_sockmsg msg = { 0 };
++
++	msg.type = XS_DEBUG;
++	msg.len = sizeof("print") + count + 1;
++
++	down(&xs_state.request_mutex);
++	xb_write(&msg, sizeof(msg));
++	xb_write("print", sizeof("print"));
++	xb_write(str, count);
++	xb_write("", 1);
++	up(&xs_state.request_mutex);
++}
++
++void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
++{
++	void *ret;
++	struct xsd_sockmsg req_msg = *msg;
++	int err;
++
++	if (req_msg.type == XS_TRANSACTION_START)
++		down_read(&xs_state.suspend_mutex);
++
++	down(&xs_state.request_mutex);
++
++	err = xb_write(msg, sizeof(*msg) + msg->len);
++	if (err) {
++		msg->type = XS_ERROR;
++		ret = ERR_PTR(err);
++	} else {
++		ret = read_reply(&msg->type, &msg->len);
++	}
++
++	up(&xs_state.request_mutex);
++
++	if ((msg->type == XS_TRANSACTION_END) ||
++	    ((req_msg.type == XS_TRANSACTION_START) &&
++	     (msg->type == XS_ERROR)))
++		up_read(&xs_state.suspend_mutex);
++
++	return ret;
++}
++
++/* Send message to xs, get kmalloc'ed reply.  ERR_PTR() on error. */
++static void *xs_talkv(xenbus_transaction_t t,
++		      enum xsd_sockmsg_type type,
++		      const struct kvec *iovec,
++		      unsigned int num_vecs,
++		      unsigned int *len)
++{
++	struct xsd_sockmsg msg;
++	void *ret = NULL;
++	unsigned int i;
++	int err;
++
++	msg.tx_id = t;
++	msg.req_id = 0;
++	msg.type = type;
++	msg.len = 0;
++	for (i = 0; i < num_vecs; i++)
++		msg.len += iovec[i].iov_len;
++
++	down(&xs_state.request_mutex);
++
++	err = xb_write(&msg, sizeof(msg));
++	if (err) {
++		up(&xs_state.request_mutex);
++		return ERR_PTR(err);
++	}
++
++	for (i = 0; i < num_vecs; i++) {
++		err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
++		if (err) {
++			up(&xs_state.request_mutex);
++			return ERR_PTR(err);
++		}
++	}
++
++	ret = read_reply(&msg.type, len);
++
++	up(&xs_state.request_mutex);
++
++	if (IS_ERR(ret))
++		return ret;
++
++	if (msg.type == XS_ERROR) {
++		err = get_error(ret);
++		kfree(ret);
++		return ERR_PTR(-err);
++	}
++
++	BUG_ON(msg.type != type);
++	return ret;
++}
++
++/* Simplified version of xs_talkv: single message. */
++static void *xs_single(xenbus_transaction_t t,
++		       enum xsd_sockmsg_type type,
++		       const char *string,
++		       unsigned int *len)
++{
++	struct kvec iovec;
++
++	iovec.iov_base = (void *)string;
++	iovec.iov_len = strlen(string) + 1;
++	return xs_talkv(t, type, &iovec, 1, len);
++}
++
++/* Many commands only need an ack, don't care what it says. */
++static int xs_error(char *reply)
++{
++	if (IS_ERR(reply))
++		return PTR_ERR(reply);
++	kfree(reply);
++	return 0;
++}
++
++static unsigned int count_strings(const char *strings, unsigned int len)
++{
++	unsigned int num;
++	const char *p;
++
++	for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
++		num++;
++
++	return num;
++}
++
++/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ 
++static char *join(const char *dir, const char *name)
++{
++	char *buffer;
++
++	if (strlen(name) == 0)
++		buffer = kasprintf("%s", dir);
++	else
++		buffer = kasprintf("%s/%s", dir, name);
++	return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
++}
++
++static char **split(char *strings, unsigned int len, unsigned int *num)
++{
++	char *p, **ret;
++
++	/* Count the strings. */
++	*num = count_strings(strings, len);
++
++	/* Transfer to one big alloc for easy freeing. */
++	ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
++	if (!ret) {
++		kfree(strings);
++		return ERR_PTR(-ENOMEM);
++	}
++	memcpy(&ret[*num], strings, len);
++	kfree(strings);
++
++	strings = (char *)&ret[*num];
++	for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
++		ret[(*num)++] = p;
++
++	return ret;
++}
++
++char **xenbus_directory(xenbus_transaction_t t,
++			const char *dir, const char *node, unsigned int *num)
++{
++	char *strings, *path;
++	unsigned int len;
++
++	path = join(dir, node);
++	if (IS_ERR(path))
++		return (char **)path;
++
++	strings = xs_single(t, XS_DIRECTORY, path, &len);
++	kfree(path);
++	if (IS_ERR(strings))
++		return (char **)strings;
++
++	return split(strings, len, num);
++}
++EXPORT_SYMBOL(xenbus_directory);
++
++/* Check if a path exists. Return 1 if it does. */
++int xenbus_exists(xenbus_transaction_t t,
++		  const char *dir, const char *node)
++{
++	char **d;
++	int dir_n;
++
++	d = xenbus_directory(t, dir, node, &dir_n);
++	if (IS_ERR(d))
++		return 0;
++	kfree(d);
++	return 1;
++}
++EXPORT_SYMBOL(xenbus_exists);
++
++/* Get the value of a single file.
++ * Returns a kmalloced value: call free() on it after use.
++ * len indicates length in bytes.
++ */
++void *xenbus_read(xenbus_transaction_t t,
++		  const char *dir, const char *node, unsigned int *len)
++{
++	char *path;
++	void *ret;
++
++	path = join(dir, node);
++	if (IS_ERR(path))
++		return (void *)path;
++
++	ret = xs_single(t, XS_READ, path, len);
++	kfree(path);
++	return ret;
++}
++EXPORT_SYMBOL(xenbus_read);
++
++/* Write the value of a single file.
++ * Returns -err on failure.
++ */
++int xenbus_write(xenbus_transaction_t t,
++		 const char *dir, const char *node, const char *string)
++{
++	const char *path;
++	struct kvec iovec[2];
++	int ret;
++
++	path = join(dir, node);
++	if (IS_ERR(path))
++		return PTR_ERR(path);
++
++	iovec[0].iov_base = (void *)path;
++	iovec[0].iov_len = strlen(path) + 1;
++	iovec[1].iov_base = (void *)string;
++	iovec[1].iov_len = strlen(string);
++
++	ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
++	kfree(path);
++	return ret;
++}
++EXPORT_SYMBOL(xenbus_write);
++
++/* Create a new directory. */
++int xenbus_mkdir(xenbus_transaction_t t,
++		 const char *dir, const char *node)
++{
++	char *path;
++	int ret;
++
++	path = join(dir, node);
++	if (IS_ERR(path))
++		return PTR_ERR(path);
++
++	ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
++	kfree(path);
++	return ret;
++}
++EXPORT_SYMBOL(xenbus_mkdir);
++
++/* Destroy a file or directory (directories must be empty). */
++int xenbus_rm(xenbus_transaction_t t, const char *dir, const char *node)
++{
++	char *path;
++	int ret;
++
++	path = join(dir, node);
++	if (IS_ERR(path))
++		return PTR_ERR(path);
++
++	ret = xs_error(xs_single(t, XS_RM, path, NULL));
++	kfree(path);
++	return ret;
++}
++EXPORT_SYMBOL(xenbus_rm);
++
++/* Start a transaction: changes by others will not be seen during this
++ * transaction, and changes will not be visible to others until end.
++ */
++int xenbus_transaction_start(xenbus_transaction_t *t)
++{
++	char *id_str;
++
++	down_read(&xs_state.suspend_mutex);
++
++	id_str = xs_single(XBT_NULL, XS_TRANSACTION_START, "", NULL);
++	if (IS_ERR(id_str)) {
++		up_read(&xs_state.suspend_mutex);
++		return PTR_ERR(id_str);
++	}
++
++	*t = simple_strtoul(id_str, NULL, 0);
++	kfree(id_str);
++	return 0;
++}
++EXPORT_SYMBOL(xenbus_transaction_start);
++
++/* End a transaction.
++ * If abandon is true, transaction is discarded instead of committed.
++ */
++int xenbus_transaction_end(xenbus_transaction_t t, int abort)
++{
++	char abortstr[2];
++	int err;
++
++	if (abort)
++		strcpy(abortstr, "F");
++	else
++		strcpy(abortstr, "T");
++
++	err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
++
++	up_read(&xs_state.suspend_mutex);
++
++	return err;
++}
++EXPORT_SYMBOL(xenbus_transaction_end);
++
++/* Single read and scanf: returns -errno or num scanned. */
++int xenbus_scanf(xenbus_transaction_t t,
++		 const char *dir, const char *node, const char *fmt, ...)
++{
++	va_list ap;
++	int ret;
++	char *val;
++
++	val = xenbus_read(t, dir, node, NULL);
++	if (IS_ERR(val))
++		return PTR_ERR(val);
++
++	va_start(ap, fmt);
++	ret = vsscanf(val, fmt, ap);
++	va_end(ap);
++	kfree(val);
++	/* Distinctive errno. */
++	if (ret == 0)
++		return -ERANGE;
++	return ret;
++}
++EXPORT_SYMBOL(xenbus_scanf);
++
++/* Single printf and write: returns -errno or 0. */
++int xenbus_printf(xenbus_transaction_t t,
++		  const char *dir, const char *node, const char *fmt, ...)
++{
++	va_list ap;
++	int ret;
++#define PRINTF_BUFFER_SIZE 4096
++	char *printf_buffer;
++
++	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
++	if (printf_buffer == NULL)
++		return -ENOMEM;
++
++	va_start(ap, fmt);
++	ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
++	va_end(ap);
++
++	BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
++	ret = xenbus_write(t, dir, node, printf_buffer);
++
++	kfree(printf_buffer);
++
++	return ret;
++}
++EXPORT_SYMBOL(xenbus_printf);
++
++/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
++int xenbus_gather(xenbus_transaction_t t, const char *dir, ...)
++{
++	va_list ap;
++	const char *name;
++	int ret = 0;
++
++	va_start(ap, dir);
++	while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
++		const char *fmt = va_arg(ap, char *);
++		void *result = va_arg(ap, void *);
++		char *p;
++
++		p = xenbus_read(t, dir, name, NULL);
++		if (IS_ERR(p)) {
++			ret = PTR_ERR(p);
++			break;
++		}
++		if (fmt) {
++			if (sscanf(p, fmt, result) == 0)
++				ret = -EINVAL;
++			kfree(p);
++		} else
++			*(char **)result = p;
++	}
++	va_end(ap);
++	return ret;
++}
++EXPORT_SYMBOL(xenbus_gather);
++
++static int xs_watch(const char *path, const char *token)
++{
++	struct kvec iov[2];
++
++	iov[0].iov_base = (void *)path;
++	iov[0].iov_len = strlen(path) + 1;
++	iov[1].iov_base = (void *)token;
++	iov[1].iov_len = strlen(token) + 1;
++
++	return xs_error(xs_talkv(XBT_NULL, XS_WATCH, iov,
++				 ARRAY_SIZE(iov), NULL));
++}
++
++static int xs_unwatch(const char *path, const char *token)
++{
++	struct kvec iov[2];
++
++	iov[0].iov_base = (char *)path;
++	iov[0].iov_len = strlen(path) + 1;
++	iov[1].iov_base = (char *)token;
++	iov[1].iov_len = strlen(token) + 1;
++
++	return xs_error(xs_talkv(XBT_NULL, XS_UNWATCH, iov,
++				 ARRAY_SIZE(iov), NULL));
++}
++
++static struct xenbus_watch *find_watch(const char *token)
++{
++	struct xenbus_watch *i, *cmp;
++
++	cmp = (void *)simple_strtoul(token, NULL, 16);
++
++	list_for_each_entry(i, &watches, list)
++		if (i == cmp)
++			return i;
++
++	return NULL;
++}
++
++/* Register callback to watch this node. */
++int register_xenbus_watch(struct xenbus_watch *watch)
++{
++	/* Pointer in ascii is the token. */
++	char token[sizeof(watch) * 2 + 1];
++	int err;
++
++	sprintf(token, "%lX", (long)watch);
++
++	down_read(&xs_state.suspend_mutex);
++
++	spin_lock(&watches_lock);
++	BUG_ON(find_watch(token));
++	list_add(&watch->list, &watches);
++	spin_unlock(&watches_lock);
++
++	err = xs_watch(watch->node, token);
++
++	/* Ignore errors due to multiple registration. */
++	if ((err != 0) && (err != -EEXIST)) {
++		spin_lock(&watches_lock);
++		list_del(&watch->list);
++		spin_unlock(&watches_lock);
++	}
++
++	up_read(&xs_state.suspend_mutex);
++
++	return err;
++}
++EXPORT_SYMBOL(register_xenbus_watch);
++
++void unregister_xenbus_watch(struct xenbus_watch *watch)
++{
++	struct xs_stored_msg *msg, *tmp;
++	char token[sizeof(watch) * 2 + 1];
++	int err;
++
++	sprintf(token, "%lX", (long)watch);
++
++	down_read(&xs_state.suspend_mutex);
++
++	spin_lock(&watches_lock);
++	BUG_ON(!find_watch(token));
++	list_del(&watch->list);
++	spin_unlock(&watches_lock);
++
++	err = xs_unwatch(watch->node, token);
++	if (err)
++		printk(KERN_WARNING
++		       "XENBUS Failed to release watch %s: %i\n",
++		       watch->node, err);
++
++	up_read(&xs_state.suspend_mutex);
++
++	/* Cancel pending watch events. */
++	spin_lock(&watch_events_lock);
++	list_for_each_entry_safe(msg, tmp, &watch_events, list) {
++		if (msg->u.watch.handle != watch)
++			continue;
++		list_del(&msg->list);
++		kfree(msg->u.watch.vec);
++		kfree(msg);
++	}
++	spin_unlock(&watch_events_lock);
++
++	/* Flush any currently-executing callback, unless we are it. :-) */
++	if (current->pid != xenwatch_pid) {
++		down(&xenwatch_mutex);
++		up(&xenwatch_mutex);
++	}
++}
++EXPORT_SYMBOL(unregister_xenbus_watch);
++
++void xs_suspend(void)
++{
++	down_write(&xs_state.suspend_mutex);
++	down(&xs_state.request_mutex);
++}
++
++void xs_resume(void)
++{
++	struct xenbus_watch *watch;
++	char token[sizeof(watch) * 2 + 1];
++
++	up(&xs_state.request_mutex);
++
++	/* No need for watches_lock: the suspend_mutex is sufficient. */
++	list_for_each_entry(watch, &watches, list) {
++		sprintf(token, "%lX", (long)watch);
++		xs_watch(watch->node, token);
++	}
++
++	up_write(&xs_state.suspend_mutex);
++}
++
++static int xenwatch_thread(void *unused)
++{
++	struct list_head *ent;
++	struct xs_stored_msg *msg;
++
++	for (;;) {
++		wait_event_interruptible(watch_events_waitq,
++					 !list_empty(&watch_events));
++
++		down(&xenwatch_mutex);
++
++		spin_lock(&watch_events_lock);
++		ent = watch_events.next;
++		if (ent != &watch_events)
++			list_del(ent);
++		spin_unlock(&watch_events_lock);
++
++		if (ent != &watch_events) {
++			msg = list_entry(ent, struct xs_stored_msg, list);
++			msg->u.watch.handle->callback(
++				msg->u.watch.handle,
++				(const char **)msg->u.watch.vec,
++				msg->u.watch.vec_size);
++			kfree(msg->u.watch.vec);
++			kfree(msg);
++		}
++
++		up(&xenwatch_mutex);
++	}
++}
++
++static int process_msg(void)
++{
++	struct xs_stored_msg *msg;
++	char *body;
++	int err;
++
++	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
++	if (msg == NULL)
++		return -ENOMEM;
++
++	err = xb_read(&msg->hdr, sizeof(msg->hdr));
++	if (err) {
++		kfree(msg);
++		return err;
++	}
++
++	body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
++	if (body == NULL) {
++		kfree(msg);
++		return -ENOMEM;
++	}
++
++	err = xb_read(body, msg->hdr.len);
++	if (err) {
++		kfree(body);
++		kfree(msg);
++		return err;
++	}
++	body[msg->hdr.len] = '\0';
++
++	if (msg->hdr.type == XS_WATCH_EVENT) {
++		msg->u.watch.vec = split(body, msg->hdr.len,
++					 &msg->u.watch.vec_size);
++		if (IS_ERR(msg->u.watch.vec)) {
++			kfree(msg);
++			return PTR_ERR(msg->u.watch.vec);
++		}
++
++		spin_lock(&watches_lock);
++		msg->u.watch.handle = find_watch(
++			msg->u.watch.vec[XS_WATCH_TOKEN]);
++		if (msg->u.watch.handle != NULL) {
++			spin_lock(&watch_events_lock);
++			list_add_tail(&msg->list, &watch_events);
++			wake_up(&watch_events_waitq);
++			spin_unlock(&watch_events_lock);
++		} else {
++			kfree(msg->u.watch.vec);
++			kfree(msg);
++		}
++		spin_unlock(&watches_lock);
++	} else {
++		msg->u.reply.body = body;
++		spin_lock(&xs_state.reply_lock);
++		list_add_tail(&msg->list, &xs_state.reply_list);
++		spin_unlock(&xs_state.reply_lock);
++		wake_up(&xs_state.reply_waitq);
++	}
++
++	return 0;
++}
++
++static int xenbus_thread(void *unused)
++{
++	int err;
++
++	for (;;) {
++		err = process_msg();
++		if (err)
++			printk(KERN_WARNING "XENBUS error %d while reading "
++			       "message\n", err);
++	}
++}
++
++int xs_init(void)
++{
++	int err;
++	struct task_struct *task;
++
++	INIT_LIST_HEAD(&xs_state.reply_list);
++	spin_lock_init(&xs_state.reply_lock);
++	init_waitqueue_head(&xs_state.reply_waitq);
++
++	init_MUTEX(&xs_state.request_mutex);
++	init_rwsem(&xs_state.suspend_mutex);
++
++	/* Initialize the shared memory rings to talk to xenstored */
++	err = xb_init_comms();
++	if (err)
++		return err;
++
++	task = kthread_run(xenwatch_thread, NULL, "xenwatch");
++	if (IS_ERR(task))
++		return PTR_ERR(task);
++	xenwatch_pid = task->pid;
++
++	task = kthread_run(xenbus_thread, NULL, "xenbus");
++	if (IS_ERR(task))
++		return PTR_ERR(task);
++
++	return 0;
++}
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/fs/bio.c linux-2.6.12-xen/fs/bio.c
+--- pristine-linux-2.6.12/fs/bio.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/fs/bio.c	2006-03-05 23:54:36.924062809 +0100
+@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
+ 	 */
+ 	bio->bi_vcnt = bio_src->bi_vcnt;
+ 	bio->bi_size = bio_src->bi_size;
++	bio->bi_idx = bio_src->bi_idx;
+ 	bio_phys_segments(q, bio);
+ 	bio_hw_segments(q, bio);
+ }
+diff -Nurp pristine-linux-2.6.12/fs/char_dev.c linux-2.6.12-xen/fs/char_dev.c
+--- pristine-linux-2.6.12/fs/char_dev.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/fs/char_dev.c	2006-03-05 23:54:36.924062809 +0100
+@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
+ 	struct char_device_struct *cd = NULL, **cp;
+ 	int i = major_to_index(major);
+ 
+-	up(&chrdevs_lock);
++	down(&chrdevs_lock);
+ 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
+ 		if ((*cp)->major == major &&
+ 		    (*cp)->baseminor == baseminor &&
+diff -Nurp pristine-linux-2.6.12/fs/exec.c linux-2.6.12-xen/fs/exec.c
+--- pristine-linux-2.6.12/fs/exec.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/fs/exec.c	2006-03-05 23:54:36.925062662 +0100
+@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
+ 	}
+ 	sig->group_exit_task = NULL;
+ 	sig->notify_count = 0;
++	sig->real_timer.data = (unsigned long)current;
+ 	spin_unlock_irq(lock);
+ 
+ 	/*
+diff -Nurp pristine-linux-2.6.12/fs/isofs/compress.c linux-2.6.12-xen/fs/isofs/compress.c
+--- pristine-linux-2.6.12/fs/isofs/compress.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/fs/isofs/compress.c	2006-03-05 23:54:36.926062514 +0100
+@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
+ 	cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
+ 	brelse(bh);
+ 
++	if (cstart > cend)
++		goto eio;
++		
+ 	csize = cend-cstart;
+ 
++	if (csize > deflateBound(1UL << zisofs_block_shift))
++		goto eio;
++
+ 	/* Now page[] contains an array of pages, any of which can be NULL,
+ 	   and the locks on which we hold.  We should now read the data and
+ 	   release the pages.  If the pages are NULL the decompressed data
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/atomic.h linux-2.6.12-xen/include/asm-i386/atomic.h
+--- pristine-linux-2.6.12/include/asm-i386/atomic.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/atomic.h	2006-03-05 23:54:37.191023469 +0100
+@@ -4,18 +4,13 @@
+ #include <linux/config.h>
+ #include <linux/compiler.h>
+ #include <asm/processor.h>
++#include <asm/smp_alt.h>
+ 
+ /*
+  * Atomic operations that C can't guarantee us.  Useful for
+  * resource counting etc..
+  */
+ 
+-#ifdef CONFIG_SMP
+-#define LOCK "lock ; "
+-#else
+-#define LOCK ""
+-#endif
+-
+ /*
+  * Make sure gcc doesn't try to be clever and move things around
+  * on us. We need to use _exactly_ the address the user gave us,
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/bitops.h linux-2.6.12-xen/include/asm-i386/bitops.h
+--- pristine-linux-2.6.12/include/asm-i386/bitops.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/bitops.h	2006-03-05 23:54:37.192023322 +0100
+@@ -7,6 +7,7 @@
+ 
+ #include <linux/config.h>
+ #include <linux/compiler.h>
++#include <asm/smp_alt.h>
+ 
+ /*
+  * These have to be done with inline assembly: that way the bit-setting
+@@ -16,12 +17,6 @@
+  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+  */
+ 
+-#ifdef CONFIG_SMP
+-#define LOCK_PREFIX "lock ; "
+-#else
+-#define LOCK_PREFIX ""
+-#endif
+-
+ #define ADDR (*(volatile long *) addr)
+ 
+ /**
+@@ -41,7 +36,7 @@
+  */
+ static inline void set_bit(int nr, volatile unsigned long * addr)
+ {
+-	__asm__ __volatile__( LOCK_PREFIX
++	__asm__ __volatile__( LOCK
+ 		"btsl %1,%0"
+ 		:"=m" (ADDR)
+ 		:"Ir" (nr));
+@@ -76,7 +71,7 @@ static inline void __set_bit(int nr, vol
+  */
+ static inline void clear_bit(int nr, volatile unsigned long * addr)
+ {
+-	__asm__ __volatile__( LOCK_PREFIX
++	__asm__ __volatile__( LOCK
+ 		"btrl %1,%0"
+ 		:"=m" (ADDR)
+ 		:"Ir" (nr));
+@@ -121,7 +116,7 @@ static inline void __change_bit(int nr, 
+  */
+ static inline void change_bit(int nr, volatile unsigned long * addr)
+ {
+-	__asm__ __volatile__( LOCK_PREFIX
++	__asm__ __volatile__( LOCK
+ 		"btcl %1,%0"
+ 		:"=m" (ADDR)
+ 		:"Ir" (nr));
+@@ -140,7 +135,7 @@ static inline int test_and_set_bit(int n
+ {
+ 	int oldbit;
+ 
+-	__asm__ __volatile__( LOCK_PREFIX
++	__asm__ __volatile__( LOCK
+ 		"btsl %2,%1\n\tsbbl %0,%0"
+ 		:"=r" (oldbit),"=m" (ADDR)
+ 		:"Ir" (nr) : "memory");
+@@ -180,7 +175,7 @@ static inline int test_and_clear_bit(int
+ {
+ 	int oldbit;
+ 
+-	__asm__ __volatile__( LOCK_PREFIX
++	__asm__ __volatile__( LOCK
+ 		"btrl %2,%1\n\tsbbl %0,%0"
+ 		:"=r" (oldbit),"=m" (ADDR)
+ 		:"Ir" (nr) : "memory");
+@@ -231,7 +226,7 @@ static inline int test_and_change_bit(in
+ {
+ 	int oldbit;
+ 
+-	__asm__ __volatile__( LOCK_PREFIX
++	__asm__ __volatile__( LOCK
+ 		"btcl %2,%1\n\tsbbl %0,%0"
+ 		:"=r" (oldbit),"=m" (ADDR)
+ 		:"Ir" (nr) : "memory");
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/cpu.h linux-2.6.12-xen/include/asm-i386/cpu.h
+--- pristine-linux-2.6.12/include/asm-i386/cpu.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/cpu.h	2006-03-05 23:54:37.122033636 +0100
+@@ -5,6 +5,7 @@
+ #include <linux/cpu.h>
+ #include <linux/topology.h>
+ #include <linux/nodemask.h>
++#include <linux/percpu.h>
+ 
+ #include <asm/node.h>
+ 
+@@ -16,4 +17,5 @@ extern int arch_register_cpu(int num);
+ extern void arch_unregister_cpu(int);
+ #endif
+ 
++DECLARE_PER_CPU(int, cpu_state);
+ #endif /* _ASM_I386_CPU_H_ */
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/irq.h linux-2.6.12-xen/include/asm-i386/irq.h
+--- pristine-linux-2.6.12/include/asm-i386/irq.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/irq.h	2006-03-05 23:54:37.123033488 +0100
+@@ -38,4 +38,8 @@ extern void release_vm86_irqs(struct tas
+ extern int irqbalance_disable(char *str);
+ #endif
+ 
++#ifdef CONFIG_HOTPLUG_CPU
++extern void fixup_irqs(cpumask_t map);
++#endif
++
+ #endif /* _ASM_IRQ_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/mach-default/mach_traps.h linux-2.6.12-xen/include/asm-i386/mach-default/mach_traps.h
+--- pristine-linux-2.6.12/include/asm-i386/mach-default/mach_traps.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/mach-default/mach_traps.h	2006-03-05 23:54:37.126033046 +0100
+@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
+ 	outb(reason, 0x61);
+ }
+ 
++static inline void clear_io_check_error(unsigned char reason)
++{
++	unsigned long i;
++
++	reason = (reason & 0xf) | 8;
++	outb(reason, 0x61);
++	i = 2000;
++	while (--i) udelay(1000);
++	reason &= ~8;
++	outb(reason, 0x61);
++}
++
+ static inline unsigned char get_nmi_reason(void)
+ {
+ 	return inb(0x61);
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/pgtable-2level-defs.h linux-2.6.12-xen/include/asm-i386/pgtable-2level-defs.h
+--- pristine-linux-2.6.12/include/asm-i386/pgtable-2level-defs.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/pgtable-2level-defs.h	2006-03-05 23:54:37.183024648 +0100
+@@ -1,6 +1,8 @@
+ #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
+ #define _I386_PGTABLE_2LEVEL_DEFS_H
+ 
++#define HAVE_SHARED_KERNEL_PMD 0
++
+ /*
+  * traditional i386 two-level paging structure:
+  */
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/pgtable-3level-defs.h linux-2.6.12-xen/include/asm-i386/pgtable-3level-defs.h
+--- pristine-linux-2.6.12/include/asm-i386/pgtable-3level-defs.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/pgtable-3level-defs.h	2006-03-05 23:54:37.184024500 +0100
+@@ -1,6 +1,8 @@
+ #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
+ #define _I386_PGTABLE_3LEVEL_DEFS_H
+ 
++#define HAVE_SHARED_KERNEL_PMD 1
++
+ /*
+  * PGDIR_SHIFT determines what a top-level page table entry can map
+  */
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/rwsem.h linux-2.6.12-xen/include/asm-i386/rwsem.h
+--- pristine-linux-2.6.12/include/asm-i386/rwsem.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/rwsem.h	2006-03-05 23:54:37.193023174 +0100
+@@ -40,6 +40,7 @@
+ 
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
++#include <asm/smp_alt.h>
+ 
+ struct rwsem_waiter;
+ 
+@@ -99,7 +100,7 @@ static inline void __down_read(struct rw
+ {
+ 	__asm__ __volatile__(
+ 		"# beginning down_read\n\t"
+-LOCK_PREFIX	"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
++LOCK	        "  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
+ 		"  js        2f\n\t" /* jump if we weren't granted the lock */
+ 		"1:\n\t"
+ 		LOCK_SECTION_START("")
+@@ -130,7 +131,7 @@ static inline int __down_read_trylock(st
+ 		"  movl	     %1,%2\n\t"
+ 		"  addl      %3,%2\n\t"
+ 		"  jle	     2f\n\t"
+-LOCK_PREFIX	"  cmpxchgl  %2,%0\n\t"
++LOCK	        "  cmpxchgl  %2,%0\n\t"
+ 		"  jnz	     1b\n\t"
+ 		"2:\n\t"
+ 		"# ending __down_read_trylock\n\t"
+@@ -150,7 +151,7 @@ static inline void __down_write(struct r
+ 	tmp = RWSEM_ACTIVE_WRITE_BIAS;
+ 	__asm__ __volatile__(
+ 		"# beginning down_write\n\t"
+-LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
++LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
+ 		"  testl     %%edx,%%edx\n\t" /* was the count 0 before? */
+ 		"  jnz       2f\n\t" /* jump if we weren't granted the lock */
+ 		"1:\n\t"
+@@ -188,7 +189,7 @@ static inline void __up_read(struct rw_s
+ 	__s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
+ 	__asm__ __volatile__(
+ 		"# beginning __up_read\n\t"
+-LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
++LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
+ 		"  js        2f\n\t" /* jump if the lock is being waited upon */
+ 		"1:\n\t"
+ 		LOCK_SECTION_START("")
+@@ -214,7 +215,7 @@ static inline void __up_write(struct rw_
+ 	__asm__ __volatile__(
+ 		"# beginning __up_write\n\t"
+ 		"  movl      %2,%%edx\n\t"
+-LOCK_PREFIX	"  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
++LOCK	        "  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
+ 		"  jnz       2f\n\t" /* jump if the lock is being waited upon */
+ 		"1:\n\t"
+ 		LOCK_SECTION_START("")
+@@ -239,7 +240,7 @@ static inline void __downgrade_write(str
+ {
+ 	__asm__ __volatile__(
+ 		"# beginning __downgrade_write\n\t"
+-LOCK_PREFIX	"  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
++LOCK	        "  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
+ 		"  js        2f\n\t" /* jump if the lock is being waited upon */
+ 		"1:\n\t"
+ 		LOCK_SECTION_START("")
+@@ -263,7 +264,7 @@ LOCK_PREFIX	"  addl      %2,(%%eax)\n\t"
+ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+ {
+ 	__asm__ __volatile__(
+-LOCK_PREFIX	"addl %1,%0"
++LOCK	          "addl %1,%0"
+ 		: "=m"(sem->count)
+ 		: "ir"(delta), "m"(sem->count));
+ }
+@@ -276,7 +277,7 @@ static inline int rwsem_atomic_update(in
+ 	int tmp = delta;
+ 
+ 	__asm__ __volatile__(
+-LOCK_PREFIX	"xadd %0,(%2)"
++LOCK  	          "xadd %0,(%2)"
+ 		: "+r"(tmp), "=m"(sem->count)
+ 		: "r"(sem), "m"(sem->count)
+ 		: "memory");
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/smp_alt.h linux-2.6.12-xen/include/asm-i386/smp_alt.h
+--- pristine-linux-2.6.12/include/asm-i386/smp_alt.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-i386/smp_alt.h	2006-03-05 23:54:37.193023174 +0100
+@@ -0,0 +1,32 @@
++#ifndef __ASM_SMP_ALT_H__
++#define __ASM_SMP_ALT_H__
++
++#include <linux/config.h>
++
++#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
++#define LOCK \
++        "6677: nop\n" \
++	".section __smp_alternatives,\"a\"\n" \
++	".long 6677b\n" \
++	".long 6678f\n" \
++	".previous\n" \
++	".section __smp_replacements,\"a\"\n" \
++	"6678: .byte 1\n" \
++	".byte 1\n" \
++	".byte 0\n" \
++        ".byte 1\n" \
++	".byte -1\n" \
++	"lock\n" \
++	"nop\n" \
++	".previous\n"
++void prepare_for_smp(void);
++void unprepare_for_smp(void);
++#else
++#define LOCK "lock ; "
++#endif
++#else
++#define LOCK ""
++#endif
++
++#endif /* __ASM_SMP_ALT_H__ */
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/smp.h linux-2.6.12-xen/include/asm-i386/smp.h
+--- pristine-linux-2.6.12/include/asm-i386/smp.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/smp.h	2006-03-05 23:54:37.123033488 +0100
+@@ -83,6 +83,9 @@ static __inline int logical_smp_processo
+ }
+ 
+ #endif
++
++extern int __cpu_disable(void);
++extern void __cpu_die(unsigned int cpu);
+ #endif /* !__ASSEMBLY__ */
+ 
+ #define NO_PROC_ID		0xFF		/* No processor magic marker */
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/spinlock.h linux-2.6.12-xen/include/asm-i386/spinlock.h
+--- pristine-linux-2.6.12/include/asm-i386/spinlock.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/spinlock.h	2006-03-05 23:54:37.194023027 +0100
+@@ -6,6 +6,7 @@
+ #include <asm/page.h>
+ #include <linux/config.h>
+ #include <linux/compiler.h>
++#include <asm/smp_alt.h>
+ 
+ asmlinkage int printk(const char * fmt, ...)
+ 	__attribute__ ((format (printf, 1, 2)));
+@@ -47,8 +48,9 @@ typedef struct {
+ #define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
+ 
+ #define spin_lock_string \
+-	"\n1:\t" \
+-	"lock ; decb %0\n\t" \
++        "1:\n" \
++	LOCK \
++	"decb %0\n\t" \
+ 	"jns 3f\n" \
+ 	"2:\t" \
+ 	"rep;nop\n\t" \
+@@ -58,8 +60,9 @@ typedef struct {
+ 	"3:\n\t"
+ 
+ #define spin_lock_string_flags \
+-	"\n1:\t" \
+-	"lock ; decb %0\n\t" \
++        "1:\n" \
++	LOCK \
++	"decb %0\n\t" \
+ 	"jns 4f\n\t" \
+ 	"2:\t" \
+ 	"testl $0x200, %1\n\t" \
+@@ -121,10 +124,34 @@ static inline void _raw_spin_unlock(spin
+ static inline int _raw_spin_trylock(spinlock_t *lock)
+ {
+ 	char oldval;
++#ifdef CONFIG_SMP_ALTERNATIVES
+ 	__asm__ __volatile__(
+-		"xchgb %b0,%1"
++		"1:movb %1,%b0\n"
++		"movb $0,%1\n"
++		"2:"
++		".section __smp_alternatives,\"a\"\n"
++		".long 1b\n"
++		".long 3f\n"
++		".previous\n"
++		".section __smp_replacements,\"a\"\n"
++		"3: .byte 2b - 1b\n"
++		".byte 5f-4f\n"
++		".byte 0\n"
++		".byte 6f-5f\n"
++		".byte -1\n"
++		"4: xchgb %b0,%1\n"
++		"5: movb %1,%b0\n"
++		"movb $0,%1\n"
++		"6:\n"
++		".previous\n"
+ 		:"=q" (oldval), "=m" (lock->slock)
+ 		:"0" (0) : "memory");
++#else
++	__asm__ __volatile__(
++		"xchgb %b0,%1\n"
++		:"=q" (oldval), "=m" (lock->slock)
++		:"0" (0) : "memory");
++#endif
+ 	return oldval > 0;
+ }
+ 
+@@ -225,8 +252,8 @@ static inline void _raw_write_lock(rwloc
+ 	__build_write_lock(rw, "__write_lock_failed");
+ }
+ 
+-#define _raw_read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
+-#define _raw_write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
++#define _raw_read_unlock(rw)	asm volatile(LOCK "incl %0" :"=m" ((rw)->lock) : : "memory")
++#define _raw_write_unlock(rw)	asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
+ 
+ static inline int _raw_read_trylock(rwlock_t *lock)
+ {
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/string.h linux-2.6.12-xen/include/asm-i386/string.h
+--- pristine-linux-2.6.12/include/asm-i386/string.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/string.h	2006-03-05 23:54:36.926062514 +0100
+@@ -116,7 +116,8 @@ __asm__ __volatile__(
+ 	"orb $1,%%al\n"
+ 	"3:"
+ 	:"=a" (__res), "=&S" (d0), "=&D" (d1)
+-		     :"1" (cs),"2" (ct));
++	:"1" (cs),"2" (ct)
++	:"memory");
+ return __res;
+ }
+ 
+@@ -138,8 +139,9 @@ __asm__ __volatile__(
+ 	"3:\tsbbl %%eax,%%eax\n\t"
+ 	"orb $1,%%al\n"
+ 	"4:"
+-		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
+-		     :"1" (cs),"2" (ct),"3" (count));
++	:"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
++	:"1" (cs),"2" (ct),"3" (count)
++	:"memory");
+ return __res;
+ }
+ 
+@@ -158,7 +160,9 @@ __asm__ __volatile__(
+ 	"movl $1,%1\n"
+ 	"2:\tmovl %1,%0\n\t"
+ 	"decl %0"
+-	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
++	:"=a" (__res), "=&S" (d0)
++	:"1" (s),"0" (c)
++	:"memory");
+ return __res;
+ }
+ 
+@@ -175,7 +179,9 @@ __asm__ __volatile__(
+ 	"leal -1(%%esi),%0\n"
+ 	"2:\ttestb %%al,%%al\n\t"
+ 	"jne 1b"
+-	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
++	:"=g" (__res), "=&S" (d0), "=&a" (d1)
++	:"0" (0),"1" (s),"2" (c)
++	:"memory");
+ return __res;
+ }
+ 
+@@ -189,7 +195,9 @@ __asm__ __volatile__(
+ 	"scasb\n\t"
+ 	"notl %0\n\t"
+ 	"decl %0"
+-	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
++	:"=c" (__res), "=&D" (d0)
++	:"1" (s),"a" (0), "0" (0xffffffffu)
++	:"memory");
+ return __res;
+ }
+ 
+@@ -333,7 +341,9 @@ __asm__ __volatile__(
+ 	"je 1f\n\t"
+ 	"movl $1,%0\n"
+ 	"1:\tdecl %0"
+-	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
++	:"=D" (__res), "=&c" (d0)
++	:"a" (c),"0" (cs),"1" (count)
++	:"memory");
+ return __res;
+ }
+ 
+@@ -369,7 +379,7 @@ __asm__ __volatile__(
+ 	"je 2f\n\t"
+ 	"stosb\n"
+ 	"2:"
+-	: "=&c" (d0), "=&D" (d1)
++	:"=&c" (d0), "=&D" (d1)
+ 	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
+ 	:"memory");
+ return (s);	
+@@ -392,7 +402,8 @@ __asm__ __volatile__(
+ 	"jne 1b\n"
+ 	"3:\tsubl %2,%0"
+ 	:"=a" (__res), "=&d" (d0)
+-	:"c" (s),"1" (count));
++	:"c" (s),"1" (count)
++	:"memory");
+ return __res;
+ }
+ /* end of additional stuff */
+@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
+ 		"dec %%edi\n"
+ 		"1:"
+ 		: "=D" (addr), "=c" (size)
+-		: "0" (addr), "1" (size), "a" (c));
++		: "0" (addr), "1" (size), "a" (c)
++		: "memory");
+ 	return addr;
+ }
+ 
+diff -Nurp pristine-linux-2.6.12/include/asm-i386/system.h linux-2.6.12-xen/include/asm-i386/system.h
+--- pristine-linux-2.6.12/include/asm-i386/system.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-i386/system.h	2006-03-05 23:54:37.194023027 +0100
+@@ -5,7 +5,7 @@
+ #include <linux/kernel.h>
+ #include <asm/segment.h>
+ #include <asm/cpufeature.h>
+-#include <linux/bitops.h> /* for LOCK_PREFIX */
++#include <asm/smp_alt.h>
+ 
+ #ifdef __KERNEL__
+ 
+@@ -249,19 +249,19 @@ static inline unsigned long __cmpxchg(vo
+ 	unsigned long prev;
+ 	switch (size) {
+ 	case 1:
+-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
++		__asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
+ 				     : "=a"(prev)
+ 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ 				     : "memory");
+ 		return prev;
+ 	case 2:
+-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
++		__asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
+ 				     : "=a"(prev)
+ 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ 				     : "memory");
+ 		return prev;
+ 	case 4:
+-		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
++		__asm__ __volatile__(LOCK "cmpxchgl %1,%2"
+ 				     : "=a"(prev)
+ 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ 				     : "memory");
+@@ -425,11 +425,55 @@ struct alt_instr { 
+ #endif
+ 
+ #ifdef CONFIG_SMP
+-#define smp_mb()	mb()
+-#define smp_rmb()	rmb()
+ #define smp_wmb()	wmb()
+-#define smp_read_barrier_depends()	read_barrier_depends()
++#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
++#define smp_alt_mb(instr)                                           \
++__asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
++		     ".section __smp_alternatives,\"a\"\n"          \
++		     ".long 6667b\n"                                \
++                     ".long 6673f\n"                                \
++		     ".previous\n"                                  \
++		     ".section __smp_replacements,\"a\"\n"          \
++		     "6673:.byte 6668b-6667b\n"                     \
++		     ".byte 6670f-6669f\n"                          \
++		     ".byte 6671f-6670f\n"                          \
++                     ".byte 0\n"                                    \
++		     ".byte %c0\n"                                  \
++		     "6669:lock;addl $0,0(%%esp)\n"                 \
++		     "6670:" instr "\n"                             \
++		     "6671:\n"                                      \
++		     ".previous\n"                                  \
++		     :                                              \
++		     : "i" (X86_FEATURE_XMM2)                       \
++		     : "memory")
++#define smp_rmb() smp_alt_mb("lfence")
++#define smp_mb()  smp_alt_mb("mfence")
++#define set_mb(var, value) do {                                     \
++unsigned long __set_mb_temp;                                        \
++__asm__ __volatile__("6667:movl %1, %0\n6668:\n"                    \
++		     ".section __smp_alternatives,\"a\"\n"          \
++		     ".long 6667b\n"                                \
++		     ".long 6673f\n"                                \
++		     ".previous\n"                                  \
++		     ".section __smp_replacements,\"a\"\n"          \
++		     "6673: .byte 6668b-6667b\n"                    \
++		     ".byte 6670f-6669f\n"                          \
++		     ".byte 0\n"                                    \
++		     ".byte 6671f-6670f\n"                          \
++		     ".byte -1\n"                                   \
++		     "6669: xchg %1, %0\n"                          \
++		     "6670:movl %1, %0\n"                           \
++		     "6671:\n"                                      \
++		     ".previous\n"                                  \
++		     : "=m" (var), "=r" (__set_mb_temp)             \
++		     : "1" (value)                                  \
++		     : "memory"); } while (0)
++#else
++#define smp_rmb()	rmb()
++#define smp_mb()	mb()
+ #define set_mb(var, value) do { xchg(&var, value); } while (0)
++#endif
++#define smp_read_barrier_depends()	read_barrier_depends()
+ #else
+ #define smp_mb()	barrier()
+ #define smp_rmb()	barrier()
+diff -Nurp pristine-linux-2.6.12/include/asm-ia64/gcc_intrin.h linux-2.6.12-xen/include/asm-ia64/gcc_intrin.h
+--- pristine-linux-2.6.12/include/asm-ia64/gcc_intrin.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-ia64/gcc_intrin.h	2006-03-05 23:36:31.000000000 +0100
+@@ -26,7 +26,7 @@ extern void ia64_bad_param_for_getreg (v
+ 
+ register unsigned long ia64_r13 asm ("r13") __attribute_used__;
+ 
+-#define ia64_setreg(regnum, val)						\
++#define __ia64_setreg(regnum, val)						\
+ ({										\
+ 	switch (regnum) {							\
+ 	    case _IA64_REG_PSR_L:						\
+@@ -55,7 +55,7 @@ register unsigned long ia64_r13 asm ("r1
+ 	}									\
+ })
+ 
+-#define ia64_getreg(regnum)							\
++#define __ia64_getreg(regnum)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 										\
+@@ -92,7 +92,7 @@ register unsigned long ia64_r13 asm ("r1
+ 
+ #define ia64_hint_pause 0
+ 
+-#define ia64_hint(mode)						\
++#define __ia64_hint(mode)						\
+ ({								\
+ 	switch (mode) {						\
+ 	case ia64_hint_pause:					\
+@@ -374,7 +374,7 @@ register unsigned long ia64_r13 asm ("r1
+ 
+ #define ia64_invala() asm volatile ("invala" ::: "memory")
+ 
+-#define ia64_thash(addr)							\
++#define __ia64_thash(addr)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
+@@ -394,18 +394,18 @@ register unsigned long ia64_r13 asm ("r1
+ 
+ #define ia64_nop(x)	asm volatile ("nop %0"::"i"(x));
+ 
+-#define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
++#define __ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
+ 
+-#define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
++#define __ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
+ 
+ 
+-#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\
++#define __ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"			\
+ 					     :: "r"(trnum), "r"(addr) : "memory")
+ 
+-#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\
++#define __ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"			\
+ 					     :: "r"(trnum), "r"(addr) : "memory")
+ 
+-#define ia64_tpa(addr)								\
++#define __ia64_tpa(addr)							\
+ ({										\
+ 	__u64 ia64_pa;								\
+ 	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\
+@@ -415,22 +415,22 @@ register unsigned long ia64_r13 asm ("r1
+ #define __ia64_set_dbr(index, val)						\
+ 	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+ 
+-#define ia64_set_ibr(index, val)						\
++#define __ia64_set_ibr(index, val)						\
+ 	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+ 
+-#define ia64_set_pkr(index, val)						\
++#define __ia64_set_pkr(index, val)						\
+ 	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
+ 
+-#define ia64_set_pmc(index, val)						\
++#define __ia64_set_pmc(index, val)						\
+ 	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
+ 
+-#define ia64_set_pmd(index, val)						\
++#define __ia64_set_pmd(index, val)						\
+ 	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
+ 
+-#define ia64_set_rr(index, val)							\
++#define __ia64_set_rr(index, val)							\
+ 	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
+ 
+-#define ia64_get_cpuid(index)								\
++#define __ia64_get_cpuid(index)								\
+ ({											\
+ 	__u64 ia64_intri_res;								\
+ 	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\
+@@ -444,21 +444,21 @@ register unsigned long ia64_r13 asm ("r1
+ 	ia64_intri_res;								\
+ })
+ 
+-#define ia64_get_ibr(index)							\
++#define __ia64_get_ibr(index)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+ 	ia64_intri_res;								\
+ })
+ 
+-#define ia64_get_pkr(index)							\
++#define __ia64_get_pkr(index)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+ 	ia64_intri_res;								\
+ })
+ 
+-#define ia64_get_pmc(index)							\
++#define __ia64_get_pmc(index)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+@@ -466,48 +466,48 @@ register unsigned long ia64_r13 asm ("r1
+ })
+ 
+ 
+-#define ia64_get_pmd(index)							\
++#define __ia64_get_pmd(index)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
+ 	ia64_intri_res;								\
+ })
+ 
+-#define ia64_get_rr(index)							\
++#define __ia64_get_rr(index)							\
+ ({										\
+ 	__u64 ia64_intri_res;							\
+ 	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\
+ 	ia64_intri_res;								\
+ })
+ 
+-#define ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
++#define __ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
+ 
+ 
+ #define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
+ 
+-#define ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
+-#define ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
++#define __ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
++#define __ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
+ #define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory")
+ #define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory")
+ 
+-#define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
++#define __ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
+ 
+-#define ia64_ptcga(addr, size)							\
++#define __ia64_ptcga(addr, size)							\
+ do {										\
+ 	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\
+ 	ia64_dv_serialize_data();						\
+ } while (0)
+ 
+-#define ia64_ptcl(addr, size)							\
++#define __ia64_ptcl(addr, size)							\
+ do {										\
+ 	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\
+ 	ia64_dv_serialize_data();						\
+ } while (0)
+ 
+-#define ia64_ptri(addr, size)						\
++#define __ia64_ptri(addr, size)						\
+ 	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
+ 
+-#define ia64_ptrd(addr, size)						\
++#define __ia64_ptrd(addr, size)						\
+ 	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
+ 
+ /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
+@@ -589,7 +589,7 @@ do {										\
+         }								\
+ })
+ 
+-#define ia64_intrin_local_irq_restore(x)			\
++#define __ia64_intrin_local_irq_restore(x)			\
+ do {								\
+ 	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\
+ 		      "(p6) ssm psr.i;"				\
+@@ -598,4 +598,6 @@ do {								\
+ 		      :: "r"((x)) : "p6", "p7", "memory");	\
+ } while (0)
+ 
++#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
++
+ #endif /* _ASM_IA64_GCC_INTRIN_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-ia64/intel_intrin.h linux-2.6.12-xen/include/asm-ia64/intel_intrin.h
+--- pristine-linux-2.6.12/include/asm-ia64/intel_intrin.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-ia64/intel_intrin.h	2006-03-05 23:36:31.000000000 +0100
+@@ -119,10 +119,10 @@ __s64 _m64_popcnt(__s64 a);
+ 		 	 * intrinsic
+ 		 	 */
+ 
+-#define ia64_getreg		__getReg
+-#define ia64_setreg		__setReg
++#define __ia64_getreg		__getReg
++#define __ia64_setreg		__setReg
+ 
+-#define ia64_hint(x)
++#define __ia64_hint(x)
+ 
+ #define ia64_mux1_brcst	 0
+ #define ia64_mux1_mix		 8
+@@ -135,16 +135,16 @@ __s64 _m64_popcnt(__s64 a);
+ #define ia64_getf_exp		__getf_exp
+ #define ia64_shrp		_m64_shrp
+ 
+-#define ia64_tpa		__tpa
++#define __ia64_tpa		__tpa
+ #define ia64_invala		__invala
+ #define ia64_invala_gr		__invala_gr
+ #define ia64_invala_fr		__invala_fr
+ #define ia64_nop		__nop
+ #define ia64_sum		__sum
+-#define ia64_ssm		__ssm
++#define __ia64_ssm		__ssm
+ #define ia64_rum		__rum
+-#define ia64_rsm		__rsm
+-#define ia64_fc 		__fc
++#define __ia64_rsm		__rsm
++#define __ia64_fc 		__fc
+ 
+ #define ia64_ldfs		__ldfs
+ #define ia64_ldfd		__ldfd
+@@ -182,24 +182,24 @@ __s64 _m64_popcnt(__s64 a);
+ 
+ #define __ia64_set_dbr(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_DBR, index, val)
+-#define ia64_set_ibr(index, val)	\
++#define __ia64_set_ibr(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_IBR, index, val)
+-#define ia64_set_pkr(index, val)	\
++#define __ia64_set_pkr(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_PKR, index, val)
+-#define ia64_set_pmc(index, val)	\
++#define __ia64_set_pmc(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_PMC, index, val)
+-#define ia64_set_pmd(index, val)	\
++#define __ia64_set_pmd(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_PMD, index, val)
+-#define ia64_set_rr(index, val)	\
++#define __ia64_set_rr(index, val)	\
+ 		__setIndReg(_IA64_REG_INDR_RR, index, val)
+ 
+-#define ia64_get_cpuid(index) 	__getIndReg(_IA64_REG_INDR_CPUID, index)
++#define __ia64_get_cpuid(index) 	__getIndReg(_IA64_REG_INDR_CPUID, index)
+ #define __ia64_get_dbr(index) 	__getIndReg(_IA64_REG_INDR_DBR, index)
+-#define ia64_get_ibr(index) 	__getIndReg(_IA64_REG_INDR_IBR, index)
+-#define ia64_get_pkr(index) 	__getIndReg(_IA64_REG_INDR_PKR, index)
+-#define ia64_get_pmc(index) 	__getIndReg(_IA64_REG_INDR_PMC, index)
+-#define ia64_get_pmd(index)  	__getIndReg(_IA64_REG_INDR_PMD, index)
+-#define ia64_get_rr(index) 	__getIndReg(_IA64_REG_INDR_RR, index)
++#define __ia64_get_ibr(index) 	__getIndReg(_IA64_REG_INDR_IBR, index)
++#define __ia64_get_pkr(index) 	__getIndReg(_IA64_REG_INDR_PKR, index)
++#define __ia64_get_pmc(index) 	__getIndReg(_IA64_REG_INDR_PMC, index)
++#define __ia64_get_pmd(index)  	__getIndReg(_IA64_REG_INDR_PMD, index)
++#define __ia64_get_rr(index) 	__getIndReg(_IA64_REG_INDR_RR, index)
+ 
+ #define ia64_srlz_d		__dsrlz
+ #define ia64_srlz_i		__isrlz
+@@ -218,18 +218,18 @@ __s64 _m64_popcnt(__s64 a);
+ #define ia64_ld8_acq		__ld8_acq
+ 
+ #define ia64_sync_i		__synci
+-#define ia64_thash		__thash
+-#define ia64_ttag		__ttag
+-#define ia64_itcd		__itcd
+-#define ia64_itci		__itci
+-#define ia64_itrd		__itrd
+-#define ia64_itri		__itri
+-#define ia64_ptce		__ptce
+-#define ia64_ptcl		__ptcl
+-#define ia64_ptcg		__ptcg
+-#define ia64_ptcga		__ptcga
+-#define ia64_ptri		__ptri
+-#define ia64_ptrd		__ptrd
++#define __ia64_thash		__thash
++#define __ia64_ttag		__ttag
++#define __ia64_itcd		__itcd
++#define __ia64_itci		__itci
++#define __ia64_itrd		__itrd
++#define __ia64_itri		__itri
++#define __ia64_ptce		__ptce
++#define __ia64_ptcl		__ptcl
++#define __ia64_ptcg		__ptcg
++#define __ia64_ptcga		__ptcga
++#define __ia64_ptri		__ptri
++#define __ia64_ptrd		__ptrd
+ #define ia64_dep_mi		_m64_dep_mi
+ 
+ /* Values for lfhint in __lfetch and __lfetch_fault */
+@@ -244,14 +244,16 @@ __s64 _m64_popcnt(__s64 a);
+ #define ia64_lfetch_fault	__lfetch_fault
+ #define ia64_lfetch_fault_excl	__lfetch_fault_excl
+ 
+-#define ia64_intrin_local_irq_restore(x)		\
++#define __ia64_intrin_local_irq_restore(x)		\
+ do {							\
+ 	if ((x) != 0) {					\
+-		ia64_ssm(IA64_PSR_I);			\
++		__ia64_ssm(IA64_PSR_I);			\
+ 		ia64_srlz_d();				\
+ 	} else {					\
+-		ia64_rsm(IA64_PSR_I);			\
++		__ia64_rsm(IA64_PSR_I);			\
+ 	}						\
+ } while (0)
+ 
++#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
++
+ #endif /* _ASM_IA64_INTEL_INTRIN_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-ia64/pal.h linux-2.6.12-xen/include/asm-ia64/pal.h
+--- pristine-linux-2.6.12/include/asm-ia64/pal.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-ia64/pal.h	2006-03-05 23:36:31.000000000 +0100
+@@ -79,6 +79,7 @@
+ #ifndef __ASSEMBLY__
+ 
+ #include <linux/types.h>
++#include <asm/processor.h>
+ #include <asm/fpu.h>
+ 
+ /*
+diff -Nurp pristine-linux-2.6.12/include/asm-ia64/privop.h linux-2.6.12-xen/include/asm-ia64/privop.h
+--- pristine-linux-2.6.12/include/asm-ia64/privop.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-ia64/privop.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,59 @@
++#ifndef _ASM_IA64_PRIVOP_H
++#define _ASM_IA64_PRIVOP_H
++
++/*
++ * Copyright (C) 2005 Hewlett-Packard Co
++ *	Dan Magenheimer <dan.magenheimer at hp.com>
++ *
++ */
++
++#include <linux/config.h>
++#ifdef CONFIG_XEN
++#include <asm/xen/privop.h>
++#endif
++
++#ifndef __ASSEMBLY
++
++#ifndef IA64_PARAVIRTUALIZED
++
++#define ia64_getreg			__ia64_getreg
++#define ia64_setreg			__ia64_setreg
++#define ia64_hint			__ia64_hint
++#define ia64_thash			__ia64_thash
++#define ia64_itci			__ia64_itci
++#define ia64_itcd			__ia64_itcd
++#define ia64_itri			__ia64_itri
++#define ia64_itrd			__ia64_itrd
++#define ia64_tpa			__ia64_tpa
++#define ia64_set_ibr			__ia64_set_ibr
++#define ia64_set_pkr			__ia64_set_pkr
++#define ia64_set_pmc			__ia64_set_pmc
++#define ia64_set_pmd			__ia64_set_pmd
++#define ia64_set_rr			__ia64_set_rr
++#define ia64_get_cpuid			__ia64_get_cpuid
++#define ia64_get_ibr			__ia64_get_ibr
++#define ia64_get_pkr			__ia64_get_pkr
++#define ia64_get_pmc			__ia64_get_pmc
++#define ia64_get_pmd			__ia64_get_pmd
++#define ia64_get_rr			__ia64_get_rr
++#define ia64_fc				__ia64_fc
++#define ia64_ssm			__ia64_ssm
++#define ia64_rsm			__ia64_rsm
++#define ia64_ptce			__ia64_ptce
++#define ia64_ptcga			__ia64_ptcga
++#define ia64_ptcl			__ia64_ptcl
++#define ia64_ptri			__ia64_ptri
++#define ia64_ptrd			__ia64_ptrd
++#define	ia64_get_psr_i			__ia64_get_psr_i
++#define ia64_intrin_local_irq_restore	__ia64_intrin_local_irq_restore
++#define ia64_pal_halt_light		__ia64_pal_halt_light
++#define	ia64_leave_kernel		__ia64_leave_kernel
++#define	ia64_leave_syscall		__ia64_leave_syscall
++#define	ia64_switch_to			__ia64_switch_to
++#define	ia64_pal_call_static		__ia64_pal_call_static
++
++#endif /* !IA64_PARAVIRTUALIZED */
++
++#endif /* !__ASSEMBLY */
++
++#endif /* _ASM_IA64_PRIVOP_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-ia64/processor.h linux-2.6.12-xen/include/asm-ia64/processor.h
+--- pristine-linux-2.6.12/include/asm-ia64/processor.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-ia64/processor.h	2006-03-05 23:36:31.000000000 +0100
+@@ -19,6 +19,7 @@
+ #include <asm/kregs.h>
+ #include <asm/ptrace.h>
+ #include <asm/ustack.h>
++#include <asm/privop.h>
+ 
+ /* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */
+ #define ARCH_HAS_SCHED_DOMAIN
+diff -Nurp pristine-linux-2.6.12/include/asm-ia64/system.h linux-2.6.12-xen/include/asm-ia64/system.h
+--- pristine-linux-2.6.12/include/asm-ia64/system.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-ia64/system.h	2006-03-05 23:36:31.000000000 +0100
+@@ -124,7 +124,7 @@ extern struct ia64_boot_param {
+ #define __local_irq_save(x)			\
+ do {						\
+ 	ia64_stop();				\
+-	(x) = ia64_getreg(_IA64_REG_PSR);	\
++	(x) = ia64_get_psr_i();			\
+ 	ia64_stop();				\
+ 	ia64_rsm(IA64_PSR_I);			\
+ } while (0)
+@@ -172,7 +172,7 @@ do {								\
+ #endif /* !CONFIG_IA64_DEBUG_IRQ */
+ 
+ #define local_irq_enable()	({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
+-#define local_save_flags(flags)	({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); })
++#define local_save_flags(flags)	({ ia64_stop(); (flags) = ia64_get_psr_i(); })
+ 
+ #define irqs_disabled()				\
+ ({						\
+diff -Nurp pristine-linux-2.6.12/include/asm-ia64/xen/privop.h linux-2.6.12-xen/include/asm-ia64/xen/privop.h
+--- pristine-linux-2.6.12/include/asm-ia64/xen/privop.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-ia64/xen/privop.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,272 @@
++#ifndef _ASM_IA64_XEN_PRIVOP_H
++#define _ASM_IA64_XEN_PRIVOP_H
++
++/*
++ * Copyright (C) 2005 Hewlett-Packard Co
++ *	Dan Magenheimer <dan.magenheimer at hp.com>
++ *
++ * Paravirtualizations of privileged operations for Xen/ia64
++ *
++ */
++
++
++#include <asm/xen/asm-xsi-offsets.h>
++
++#define IA64_PARAVIRTUALIZED
++
++#ifdef __ASSEMBLY__
++#define	XEN_HYPER_RFI			break 0x1
++#define	XEN_HYPER_RSM_PSR_DT		break 0x2
++#define	XEN_HYPER_SSM_PSR_DT		break 0x3
++#define	XEN_HYPER_COVER			break 0x4
++#define	XEN_HYPER_ITC_D			break 0x5
++#define	XEN_HYPER_ITC_I			break 0x6
++#define	XEN_HYPER_SSM_I			break 0x7
++#define	XEN_HYPER_GET_IVR		break 0x8
++#define	XEN_HYPER_GET_TPR		break 0x9
++#define	XEN_HYPER_SET_TPR		break 0xa
++#define	XEN_HYPER_EOI			break 0xb
++#define	XEN_HYPER_SET_ITM		break 0xc
++#define	XEN_HYPER_THASH			break 0xd
++#define	XEN_HYPER_PTC_GA		break 0xe
++#define	XEN_HYPER_ITR_D			break 0xf
++#define	XEN_HYPER_GET_RR		break 0x10
++#define	XEN_HYPER_SET_RR		break 0x11
++#define	XEN_HYPER_SET_KR		break 0x12
++#endif
++
++#ifndef __ASSEMBLY__
++#ifdef MODULE
++extern int is_running_on_xen(void);
++#define running_on_xen (is_running_on_xen())
++#else
++extern int running_on_xen;
++#endif
++
++#define	XEN_HYPER_SSM_I			asm("break 0x7");
++#define	XEN_HYPER_GET_IVR		asm("break 0x8");
++
++/************************************************/
++/* Instructions paravirtualized for correctness */
++/************************************************/
++
++/* "fc" and "thash" are privilege-sensitive instructions, meaning they
++ *  may have different semantics depending on whether they are executed
++ *  at PL0 vs PL!=0.  When paravirtualized, these instructions mustn't
++ *  be allowed to execute directly, lest incorrect semantics result. */
++extern unsigned long xen_fc(unsigned long addr);
++#define ia64_fc(addr)			xen_fc((unsigned long)(addr))
++extern unsigned long xen_thash(unsigned long addr);
++#define ia64_thash(addr)		xen_thash((unsigned long)(addr))
++/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
++ * is not currently used (though it may be in a long-format VHPT system!)
++ * and the semantics of cover only change if psr.ic is off which is very
++ * rare (and currently non-existent outside of assembly code */
++
++/* There are also privilege-sensitive registers.  These registers are
++ * readable at any privilege level but only writable at PL0. */
++extern unsigned long xen_get_cpuid(int index);
++#define	ia64_get_cpuid(i)		xen_get_cpuid(i)
++extern unsigned long xen_get_pmd(int index);
++#define	ia64_get_pmd(i)			xen_get_pmd(i)
++extern unsigned long xen_get_eflag(void);	/* see xen_ia64_getreg */
++extern void xen_set_eflag(unsigned long);	/* see xen_ia64_setreg */
++
++/************************************************/
++/* Instructions paravirtualized for performance */
++/************************************************/
++
++/* Xen uses memory-mapped virtual privileged registers for access to many
++ * performance-sensitive privileged registers.  Some, like the processor
++ * status register (psr), are broken up into multiple memory locations.
++ * Others, like "pend", are abstractions based on privileged registers.
++ * "Pend" is guaranteed to be set if reading cr.ivr would return a
++ * (non-spurious) interrupt. */
++#define xen_get_virtual_psr_i()		(*(int *)(XSI_PSR_I))
++#define xen_set_virtual_psr_i(_val)	({ *(int *)(XSI_PSR_I) = _val ? 1:0; })
++#define xen_set_virtual_psr_ic(_val)	({ *(int *)(XSI_PSR_IC) = _val ? 1:0; })
++#define xen_get_virtual_pend()		(*(int *)(XSI_PEND))
++
++/* Hyperprivops are "break" instructions with a well-defined API.
++ * In particular, the virtual psr.ic bit must be off; in this way
++ * it is guaranteed to never conflict with a linux break instruction.
++ * Normally, this is done in a xen stub but this one is frequent enough
++ * that we inline it */
++#define xen_hyper_ssm_i()						\
++({									\
++	xen_set_virtual_psr_i(0);					\
++	xen_set_virtual_psr_ic(0);					\
++	XEN_HYPER_SSM_I;						\
++})
++
++/* turning off interrupts can be paravirtualized simply by writing
++ * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
++#define xen_rsm_i()	xen_set_virtual_psr_i(0)
++
++/* turning on interrupts is a bit more complicated.. write to the
++ * memory-mapped virtual psr.i bit first (to avoid race condition),
++ * then if any interrupts were pending, we have to execute a hyperprivop
++ * to ensure the pending interrupt gets delivered; else we're done! */
++#define xen_ssm_i()							\
++({									\
++	int old = xen_get_virtual_psr_i();				\
++	xen_set_virtual_psr_i(1);					\
++	if (!old && xen_get_virtual_pend()) xen_hyper_ssm_i();		\
++})
++
++#define xen_ia64_intrin_local_irq_restore(x)				\
++{									\
++     if (running_on_xen) {						\
++	if ((x) & IA64_PSR_I) { xen_ssm_i(); }				\
++	else { xen_rsm_i(); }						\
++    }									\
++    else __ia64_intrin_local_irq_restore((x));				\
++}
++
++#define	xen_get_psr_i()							\
++(									\
++	(running_on_xen) ?						\
++		(xen_get_virtual_psr_i() ? IA64_PSR_I : 0)		\
++		: __ia64_get_psr_i()					\
++)
++
++#define xen_ia64_ssm(mask)						\
++{									\
++	if ((mask)==IA64_PSR_I) {					\
++		if (running_on_xen) { xen_ssm_i(); }			\
++		else { __ia64_ssm(mask); }				\
++	}								\
++	else { __ia64_ssm(mask); }					\
++}
++
++#define xen_ia64_rsm(mask)						\
++{									\
++	if ((mask)==IA64_PSR_I) {					\
++		if (running_on_xen) { xen_rsm_i(); }			\
++		else { __ia64_rsm(mask); }				\
++	}								\
++	else { __ia64_rsm(mask); }					\
++}
++
++
++/* Although all privileged operations can be left to trap and will
++ * be properly handled by Xen, some are frequent enough that we use
++ * hyperprivops for performance. */
++
++extern unsigned long xen_get_ivr(void);
++extern unsigned long xen_get_tpr(void);
++extern void xen_set_itm(unsigned long);
++extern void xen_set_tpr(unsigned long);
++extern void xen_eoi(void);
++extern void xen_set_rr(unsigned long index, unsigned long val);
++extern unsigned long xen_get_rr(unsigned long index);
++extern void xen_set_kr(unsigned long index, unsigned long val);
++
++/* Note: It may look wrong to test for running_on_xen in each case.
++ * However regnum is always a constant so, as written, the compiler
++ * eliminates the switch statement, whereas running_on_xen must be
++ * tested dynamically. */
++#define xen_ia64_getreg(regnum)						\
++({									\
++	__u64 ia64_intri_res;						\
++									\
++	switch(regnum) {						\
++	case _IA64_REG_CR_IVR:						\
++		ia64_intri_res = (running_on_xen) ?			\
++			xen_get_ivr() :					\
++			__ia64_getreg(regnum);				\
++		break;							\
++	case _IA64_REG_CR_TPR:						\
++		ia64_intri_res = (running_on_xen) ?			\
++			xen_get_tpr() :					\
++			__ia64_getreg(regnum);				\
++		break;							\
++	case _IA64_REG_AR_EFLAG:					\
++		ia64_intri_res = (running_on_xen) ?			\
++			xen_get_eflag() :				\
++			__ia64_getreg(regnum);				\
++		break;							\
++	default:							\
++		ia64_intri_res = __ia64_getreg(regnum);			\
++		break;							\
++	}								\
++	ia64_intri_res;							\
++})
++
++#define xen_ia64_setreg(regnum,val)					\
++({									\
++	switch(regnum) {						\
++	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:			\
++		(running_on_xen) ?					\
++			xen_set_kr((regnum-_IA64_REG_AR_KR0), val) :	\
++			__ia64_setreg(regnum,val);			\
++		break;							\
++	case _IA64_REG_CR_ITM:						\
++		(running_on_xen) ?					\
++			xen_set_itm(val) :				\
++			__ia64_setreg(regnum,val);			\
++		break;							\
++	case _IA64_REG_CR_TPR:						\
++		(running_on_xen) ?					\
++			xen_set_tpr(val) :				\
++			__ia64_setreg(regnum,val);			\
++		break;							\
++	case _IA64_REG_CR_EOI:						\
++		(running_on_xen) ?					\
++			xen_eoi() :					\
++			__ia64_setreg(regnum,val);			\
++		break;							\
++	case _IA64_REG_AR_EFLAG:					\
++		(running_on_xen) ?					\
++			xen_set_eflag(val) :				\
++			__ia64_setreg(regnum,val);			\
++		break;							\
++	default:							\
++		__ia64_setreg(regnum,val);				\
++		break;							\
++	}								\
++})
++
++#define ia64_ssm			xen_ia64_ssm
++#define ia64_rsm			xen_ia64_rsm
++#define ia64_intrin_local_irq_restore	xen_ia64_intrin_local_irq_restore
++#define	ia64_ptcga			xen_ptcga
++#define	ia64_set_rr(index,val)		xen_set_rr(index,val)
++#define	ia64_get_rr(index)		xen_get_rr(index)
++#define ia64_getreg			xen_ia64_getreg
++#define ia64_setreg			xen_ia64_setreg
++#define	ia64_get_psr_i			xen_get_psr_i
++
++/* the remainder of these are not performance-sensitive so its
++ * OK to not paravirtualize and just take a privop trap and emulate */
++#define ia64_hint			__ia64_hint
++#define ia64_set_pmd			__ia64_set_pmd
++#define ia64_itci			__ia64_itci
++#define ia64_itcd			__ia64_itcd
++#define ia64_itri			__ia64_itri
++#define ia64_itrd			__ia64_itrd
++#define ia64_tpa			__ia64_tpa
++#define ia64_set_ibr			__ia64_set_ibr
++#define ia64_set_pkr			__ia64_set_pkr
++#define ia64_set_pmc			__ia64_set_pmc
++#define ia64_get_ibr			__ia64_get_ibr
++#define ia64_get_pkr			__ia64_get_pkr
++#define ia64_get_pmc			__ia64_get_pmc
++#define ia64_ptce			__ia64_ptce
++#define ia64_ptcl			__ia64_ptcl
++#define ia64_ptri			__ia64_ptri
++#define ia64_ptrd			__ia64_ptrd
++
++#endif /* !__ASSEMBLY__ */
++
++/* these routines utilize privilege-sensitive or performance-sensitive
++ * privileged instructions so the code must be replaced with
++ * paravirtualized versions */
++#define ia64_pal_halt_light		xen_pal_halt_light
++#define	ia64_leave_kernel		xen_leave_kernel
++#define	ia64_leave_syscall		xen_leave_syscall
++#define	ia64_trace_syscall		xen_trace_syscall
++#define	ia64_switch_to			xen_switch_to
++#define	ia64_pal_call_static		xen_pal_call_static
++
++#endif /* _ASM_IA64_XEN_PRIVOP_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-x86_64/smp.h linux-2.6.12-xen/include/asm-x86_64/smp.h
+--- pristine-linux-2.6.12/include/asm-x86_64/smp.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/asm-x86_64/smp.h	2006-03-05 23:54:36.927062367 +0100
+@@ -46,6 +46,8 @@ extern int pic_mode;
+ extern int smp_num_siblings;
+ extern void smp_flush_tlb(void);
+ extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
++extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
++				     int retry, int wait);
+ extern void smp_send_reschedule(int cpu);
+ extern void smp_invalidate_rcv(void);		/* Process an NMI */
+ extern void zap_low_mappings(void);
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/agp.h linux-2.6.12-xen/include/asm-xen/asm-i386/agp.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/agp.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/agp.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,37 @@
++#ifndef AGP_H
++#define AGP_H 1
++
++#include <asm/pgtable.h>
++#include <asm/cacheflush.h>
++#include <asm/system.h>
++
++/* 
++ * Functions to keep the agpgart mappings coherent with the MMU.
++ * The GART gives the CPU a physical alias of pages in memory. The alias region is
++ * mapped uncacheable. Make sure there are no conflicting mappings
++ * with different cachability attributes for the same page. This avoids
++ * data corruption on some CPUs.
++ */
++
++int map_page_into_agp(struct page *page);
++int unmap_page_from_agp(struct page *page);
++#define flush_agp_mappings() global_flush_tlb()
++
++/* Could use CLFLUSH here if the cpu supports it. But then it would
++   need to be called for each cacheline of the whole page so it may not be 
++   worth it. Would need a page for it. */
++#define flush_agp_cache() wbinvd()
++
++/* Convert a physical address to an address suitable for the GART. */
++#define phys_to_gart(x) phys_to_machine(x)
++#define gart_to_phys(x) machine_to_phys(x)
++
++/* GATT allocation. Returns/accepts GATT kernel virtual address. */
++#define alloc_gatt_pages(order)	({                                          \
++	char *_t; dma_addr_t _d;                                            \
++	_t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL);    \
++	_t; })
++#define free_gatt_pages(table, order)	\
++	dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/desc.h linux-2.6.12-xen/include/asm-xen/asm-i386/desc.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/desc.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/desc.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,148 @@
++#ifndef __ARCH_DESC_H
++#define __ARCH_DESC_H
++
++#include <asm/ldt.h>
++#include <asm/segment.h>
++
++#define CPU_16BIT_STACK_SIZE 1024
++
++#ifndef __ASSEMBLY__
++
++#include <linux/preempt.h>
++#include <linux/smp.h>
++
++#include <asm/mmu.h>
++
++extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
++
++DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
++
++struct Xgt_desc_struct {
++	unsigned short size;
++	unsigned long address __attribute__((packed));
++	unsigned short pad;
++} __attribute__ ((packed));
++
++extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
++
++#define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
++#define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8))
++
++#define get_cpu_gdt_table(_cpu) ((struct desc_struct *)cpu_gdt_descr[(_cpu)].address)
++
++/*
++ * This is the ldt that every process will get unless we need
++ * something other than this.
++ */
++extern struct desc_struct default_ldt[];
++extern void set_intr_gate(unsigned int irq, void * addr);
++
++#define _set_tssldt_desc(n,addr,limit,type) \
++__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
++	"movw %%ax,2(%2)\n\t" \
++	"rorl $16,%%eax\n\t" \
++	"movb %%al,4(%2)\n\t" \
++	"movb %4,5(%2)\n\t" \
++	"movb $0,6(%2)\n\t" \
++	"movb %%ah,7(%2)\n\t" \
++	"rorl $16,%%eax" \
++	: "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
++
++static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
++{
++	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
++		offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
++}
++
++#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
++
++static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
++{
++	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
++	    (int)addr, ((size << 3)-1), 0x82);
++}
++
++#define LDT_entry_a(info) \
++	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
++
++#define LDT_entry_b(info) \
++	(((info)->base_addr & 0xff000000) | \
++	(((info)->base_addr & 0x00ff0000) >> 16) | \
++	((info)->limit & 0xf0000) | \
++	(((info)->read_exec_only ^ 1) << 9) | \
++	((info)->contents << 10) | \
++	(((info)->seg_not_present ^ 1) << 15) | \
++	((info)->seg_32bit << 22) | \
++	((info)->limit_in_pages << 23) | \
++	((info)->useable << 20) | \
++	0x7000)
++
++#define LDT_empty(info) (\
++	(info)->base_addr	== 0	&& \
++	(info)->limit		== 0	&& \
++	(info)->contents	== 0	&& \
++	(info)->read_exec_only	== 1	&& \
++	(info)->seg_32bit	== 0	&& \
++	(info)->limit_in_pages	== 0	&& \
++	(info)->seg_not_present	== 1	&& \
++	(info)->useable		== 0	)
++
++extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
++
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
++	C(0); C(1); C(2);
++#undef C
++}
++
++static inline void clear_LDT(void)
++{
++	int cpu = get_cpu();
++
++	/*
++	 * NB. We load the default_ldt for lcall7/27 handling on demand, as
++	 * it slows down context switching. Noone uses it anyway.
++	 */
++	cpu = cpu;		/* XXX avoid compiler warning */
++	xen_set_ldt(0UL, 0);
++	put_cpu();
++}
++
++/*
++ * load one particular LDT into the current CPU
++ */
++static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
++{
++	void *segments = pc->ldt;
++	int count = pc->size;
++
++	if (likely(!count))
++		segments = NULL;
++
++	xen_set_ldt((unsigned long)segments, count);
++}
++
++static inline void load_LDT(mm_context_t *pc)
++{
++	int cpu = get_cpu();
++	load_LDT_nolock(pc, cpu);
++	put_cpu();
++}
++
++static inline unsigned long get_desc_base(unsigned long *desc)
++{
++	unsigned long base;
++	base = ((desc[0] >> 16)  & 0x0000ffff) |
++		((desc[1] << 16) & 0x00ff0000) |
++		(desc[1] & 0xff000000);
++	return base;
++}
++
++#endif /* !__ASSEMBLY__ */
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/dma-mapping.h linux-2.6.12-xen/include/asm-xen/asm-i386/dma-mapping.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/dma-mapping.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/dma-mapping.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,156 @@
++#ifndef _ASM_I386_DMA_MAPPING_H
++#define _ASM_I386_DMA_MAPPING_H
++
++/*
++ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
++ * documentation.
++ */
++
++#include <linux/config.h>
++#include <linux/mm.h>
++#include <asm/cache.h>
++#include <asm/io.h>
++#include <asm/scatterlist.h>
++#include <asm-i386/swiotlb.h>
++
++static inline int
++address_needs_mapping(struct device *hwdev, dma_addr_t addr)
++{
++	dma_addr_t mask = 0xffffffff;
++	/* If the device has a mask, use it, otherwise default to 32 bits */
++	if (hwdev && hwdev->dma_mask)
++		mask = *hwdev->dma_mask;
++	return (addr & ~mask) != 0;
++}
++
++static inline int
++range_straddles_page_boundary(void *p, size_t size)
++{
++	extern unsigned long *contiguous_bitmap;
++	return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
++		!test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
++}
++
++#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
++#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
++
++void *dma_alloc_coherent(struct device *dev, size_t size,
++			   dma_addr_t *dma_handle, unsigned int __nocast flag);
++
++void dma_free_coherent(struct device *dev, size_t size,
++			 void *vaddr, dma_addr_t dma_handle);
++
++extern dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++	       enum dma_data_direction direction);
++
++extern void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++		 enum dma_data_direction direction);
++
++extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
++		      int nents, enum dma_data_direction direction);
++extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++			 int nents, enum dma_data_direction direction);
++
++extern dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++	     size_t size, enum dma_data_direction direction);
++
++extern void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++	       enum dma_data_direction direction);
++
++extern void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++			enum dma_data_direction direction);
++
++extern void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++                           enum dma_data_direction direction);
++
++static inline void
++dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
++			      unsigned long offset, size_t size,
++			      enum dma_data_direction direction)
++{
++	dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
++}
++
++static inline void
++dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
++				 unsigned long offset, size_t size,
++				 enum dma_data_direction direction)
++{
++	dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
++}
++
++static inline void
++dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
++		    enum dma_data_direction direction)
++{
++	if (swiotlb)
++		swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
++	flush_write_buffers();
++}
++
++static inline void
++dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
++		    enum dma_data_direction direction)
++{
++	if (swiotlb)
++		swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
++	flush_write_buffers();
++}
++
++extern int
++dma_mapping_error(dma_addr_t dma_addr);
++
++extern int
++dma_supported(struct device *dev, u64 mask);
++
++static inline int
++dma_set_mask(struct device *dev, u64 mask)
++{
++	if(!dev->dma_mask || !dma_supported(dev, mask))
++		return -EIO;
++
++	*dev->dma_mask = mask;
++
++	return 0;
++}
++
++#ifdef __i386__
++static inline int
++dma_get_cache_alignment(void)
++{
++	/* no easy way to get cache size on all x86, so return the
++	 * maximum possible, to be safe */
++	return (1 << L1_CACHE_SHIFT_MAX);
++}
++#else
++extern int dma_get_cache_alignment(void);
++#endif
++
++#define dma_is_consistent(d)	(1)
++
++static inline void
++dma_cache_sync(void *vaddr, size_t size,
++	       enum dma_data_direction direction)
++{
++	flush_write_buffers();
++}
++
++#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++extern int
++dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++			    dma_addr_t device_addr, size_t size, int flags);
++
++extern void
++dma_release_declared_memory(struct device *dev);
++
++extern void *
++dma_mark_declared_memory_occupied(struct device *dev,
++				  dma_addr_t device_addr, size_t size);
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/fixmap.h linux-2.6.12-xen/include/asm-xen/asm-i386/fixmap.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/fixmap.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/fixmap.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,167 @@
++/*
++ * fixmap.h: compile-time virtual memory allocation
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1998 Ingo Molnar
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
++
++#ifndef _ASM_FIXMAP_H
++#define _ASM_FIXMAP_H
++
++#include <linux/config.h>
++
++/* used by vmalloc.c, vsyscall.lds.S.
++ *
++ * Leave one empty page between vmalloc'ed areas and
++ * the start of the fixmap.
++ */
++#define __FIXADDR_TOP	(HYPERVISOR_VIRT_START - 2 * PAGE_SIZE)
++
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <asm/acpi.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#include <asm-xen/gnttab.h>
++#ifdef CONFIG_HIGHMEM
++#include <linux/threads.h>
++#include <asm/kmap_types.h>
++#endif
++
++/*
++ * Here we define all the compile-time 'special' virtual
++ * addresses. The point is to have a constant address at
++ * compile time, but to set the physical address only
++ * in the boot process. We allocate these special addresses
++ * from the end of virtual memory (0xfffff000) backwards.
++ * Also this lets us do fail-safe vmalloc(), we
++ * can guarantee that these special addresses and
++ * vmalloc()-ed addresses never overlap.
++ *
++ * these 'compile-time allocated' memory buffers are
++ * fixed-size 4k pages. (or larger if used with an increment
++ * highger than 1) use fixmap_set(idx,phys) to associate
++ * physical memory with fixmap indices.
++ *
++ * TLB entries of such buffers will not be flushed across
++ * task switches.
++ */
++enum fixed_addresses {
++	FIX_HOLE,
++	FIX_VSYSCALL,
++#ifdef CONFIG_X86_LOCAL_APIC
++	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
++#endif
++#ifdef CONFIG_X86_IO_APIC
++	FIX_IO_APIC_BASE_0,
++	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
++#endif
++#ifdef CONFIG_X86_VISWS_APIC
++	FIX_CO_CPU,	/* Cobalt timer */
++	FIX_CO_APIC,	/* Cobalt APIC Redirection Table */ 
++	FIX_LI_PCIA,	/* Lithium PCI Bridge A */
++	FIX_LI_PCIB,	/* Lithium PCI Bridge B */
++#endif
++#ifdef CONFIG_X86_F00F_BUG
++	FIX_F00F_IDT,	/* Virtual mapping for IDT */
++#endif
++#ifdef CONFIG_X86_CYCLONE_TIMER
++	FIX_CYCLONE_TIMER, /*cyclone timer register*/
++#endif 
++#ifdef CONFIG_HIGHMEM
++	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
++	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
++#endif
++#ifdef CONFIG_ACPI_BOOT
++	FIX_ACPI_BEGIN,
++	FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#endif
++#ifdef CONFIG_PCI_MMCONFIG
++	FIX_PCIE_MCFG,
++#endif
++	FIX_SHARED_INFO,
++	FIX_GNTTAB_BEGIN,
++	FIX_GNTTAB_END = FIX_GNTTAB_BEGIN + NR_GRANT_FRAMES - 1,
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++#define NR_FIX_ISAMAPS	256
++	FIX_ISAMAP_END,
++	FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
++#endif
++	__end_of_permanent_fixed_addresses,
++	/* temporary boot-time mappings, used before ioremap() is functional */
++#define NR_FIX_BTMAPS	16
++	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
++	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
++	FIX_WP_TEST,
++	__end_of_fixed_addresses
++};
++
++extern void __set_fixmap(
++	enum fixed_addresses idx, maddr_t phys, pgprot_t flags);
++
++#define set_fixmap(idx, phys) \
++		__set_fixmap(idx, phys, PAGE_KERNEL)
++/*
++ * Some hardware wants to get fixmapped without caching.
++ */
++#define set_fixmap_nocache(idx, phys) \
++		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
++
++#define clear_fixmap(idx) \
++		__set_fixmap(idx, 0, __pgprot(0))
++
++#define FIXADDR_TOP	((unsigned long)__FIXADDR_TOP)
++
++#define __FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
++#define __FIXADDR_BOOT_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
++#define FIXADDR_START		(FIXADDR_TOP - __FIXADDR_SIZE)
++#define FIXADDR_BOOT_START	(FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
++
++#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
++#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
++
++/*
++ * This is the range that is readable by user mode, and things
++ * acting like user mode such as get_user_pages.
++ */
++#define FIXADDR_USER_START	(__fix_to_virt(FIX_VSYSCALL))
++#define FIXADDR_USER_END	(FIXADDR_USER_START + PAGE_SIZE)
++
++
++extern void __this_fixmap_does_not_exist(void);
++
++/*
++ * 'index to address' translation. If anyone tries to use the idx
++ * directly without tranlation, we catch the bug with a NULL-deference
++ * kernel oops. Illegal ranges of incoming indices are caught too.
++ */
++static __always_inline unsigned long fix_to_virt(const unsigned int idx)
++{
++	/*
++	 * this branch gets completely eliminated after inlining,
++	 * except when someone tries to use fixaddr indices in an
++	 * illegal way. (such as mixing up address types or using
++	 * out-of-range indices).
++	 *
++	 * If it doesn't get removed, the linker will complain
++	 * loudly with a reasonably clear error message..
++	 */
++	if (idx >= __end_of_fixed_addresses)
++		__this_fixmap_does_not_exist();
++
++        return __fix_to_virt(idx);
++}
++
++static inline unsigned long virt_to_fix(const unsigned long vaddr)
++{
++	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
++	return __virt_to_fix(vaddr);
++}
++
++#endif /* !__ASSEMBLY__ */
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/floppy.h linux-2.6.12-xen/include/asm-xen/asm-i386/floppy.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/floppy.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/floppy.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,147 @@
++/*
++ * Architecture specific parts of the Floppy driver
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1995
++ *
++ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
++ */
++#ifndef __ASM_XEN_I386_FLOPPY_H
++#define __ASM_XEN_I386_FLOPPY_H
++
++#include <linux/vmalloc.h>
++
++/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
++#include <asm/dma.h>
++#undef MAX_DMA_ADDRESS
++#define MAX_DMA_ADDRESS 0
++#define CROSS_64KB(a,s) (0)
++
++#define fd_inb(port)			inb_p(port)
++#define fd_outb(value,port)		outb_p(value,port)
++
++#define fd_request_dma()        (0)
++#define fd_free_dma()           ((void)0)
++#define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
++#define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
++#define fd_free_irq()		free_irq(FLOPPY_IRQ, NULL)
++#define fd_get_dma_residue()    (virtual_dma_count + virtual_dma_residue)
++#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
++/*
++ * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
++ * softirq context via motor_off_callback. A generic bug we happen to trigger.
++ */
++#define fd_dma_mem_alloc(size)	__get_free_pages(GFP_KERNEL, get_order(size))
++#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
++
++static int virtual_dma_count;
++static int virtual_dma_residue;
++static char *virtual_dma_addr;
++static int virtual_dma_mode;
++static int doing_pdma;
++
++static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
++{
++	register unsigned char st;
++	register int lcount;
++	register char *lptr;
++
++	if (!doing_pdma)
++		return floppy_interrupt(irq, dev_id, regs);
++
++	st = 1;
++	for(lcount=virtual_dma_count, lptr=virtual_dma_addr; 
++	    lcount; lcount--, lptr++) {
++		st=inb(virtual_dma_port+4) & 0xa0 ;
++		if(st != 0xa0) 
++			break;
++		if(virtual_dma_mode)
++			outb_p(*lptr, virtual_dma_port+5);
++		else
++			*lptr = inb_p(virtual_dma_port+5);
++	}
++	virtual_dma_count = lcount;
++	virtual_dma_addr = lptr;
++	st = inb(virtual_dma_port+4);
++
++	if(st == 0x20)
++		return IRQ_HANDLED;
++	if(!(st & 0x20)) {
++		virtual_dma_residue += virtual_dma_count;
++		virtual_dma_count=0;
++		doing_pdma = 0;
++		floppy_interrupt(irq, dev_id, regs);
++		return IRQ_HANDLED;
++	}
++	return IRQ_HANDLED;
++}
++
++static void fd_disable_dma(void)
++{
++	doing_pdma = 0;
++	virtual_dma_residue += virtual_dma_count;
++	virtual_dma_count=0;
++}
++
++static int fd_request_irq(void)
++{
++	return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
++					   "floppy", NULL);
++}
++
++static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
++{
++	doing_pdma = 1;
++	virtual_dma_port = io;
++	virtual_dma_mode = (mode  == DMA_MODE_WRITE);
++	virtual_dma_addr = addr;
++	virtual_dma_count = size;
++	virtual_dma_residue = 0;
++	return 0;
++}
++
++/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
++#define FDC1 xen_floppy_init()
++static int FDC2 = -1;
++
++static int xen_floppy_init(void)
++{
++	use_virtual_dma = 1;
++	can_use_virtual_dma = 1;
++	return 0x3f0;
++}
++
++/*
++ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
++ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
++ * coincides with another rtc CMOS user.		Paul G.
++ */
++#define FLOPPY0_TYPE	({				\
++	unsigned long flags;				\
++	unsigned char val;				\
++	spin_lock_irqsave(&rtc_lock, flags);		\
++	val = (CMOS_READ(0x10) >> 4) & 15;		\
++	spin_unlock_irqrestore(&rtc_lock, flags);	\
++	val;						\
++})
++
++#define FLOPPY1_TYPE	({				\
++	unsigned long flags;				\
++	unsigned char val;				\
++	spin_lock_irqsave(&rtc_lock, flags);		\
++	val = CMOS_READ(0x10) & 15;			\
++	spin_unlock_irqrestore(&rtc_lock, flags);	\
++	val;						\
++})
++
++#define N_FDC 2
++#define N_DRIVE 8
++
++#define FLOPPY_MOTOR_MASK 0xf0
++
++#define EXTRA_FLOPPY_PARAMS
++
++#endif /* __ASM_XEN_I386_FLOPPY_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/highmem.h linux-2.6.12-xen/include/asm-xen/asm-i386/highmem.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/highmem.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/highmem.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,80 @@
++/*
++ * highmem.h: virtual kernel memory mappings for high memory
++ *
++ * Used in CONFIG_HIGHMEM systems for memory pages which
++ * are not addressable by direct kernel virtual addresses.
++ *
++ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
++ *		      Gerhard.Wichert at pdb.siemens.de
++ *
++ *
++ * Redesigned the x86 32-bit VM architecture to deal with 
++ * up to 16 Terabyte physical memory. With current x86 CPUs
++ * we now support up to 64 Gigabytes physical RAM.
++ *
++ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
++ */
++
++#ifndef _ASM_HIGHMEM_H
++#define _ASM_HIGHMEM_H
++
++#ifdef __KERNEL__
++
++#include <linux/config.h>
++#include <linux/interrupt.h>
++#include <linux/threads.h>
++#include <asm/kmap_types.h>
++#include <asm/tlbflush.h>
++
++/* declarations for highmem.c */
++extern unsigned long highstart_pfn, highend_pfn;
++
++extern pte_t *kmap_pte;
++extern pgprot_t kmap_prot;
++extern pte_t *pkmap_page_table;
++
++/*
++ * Right now we initialize only a single pte table. It can be extended
++ * easily, subsequent pte tables have to be allocated in one physical
++ * chunk of RAM.
++ */
++#ifdef CONFIG_X86_PAE
++#define LAST_PKMAP 512
++#else
++#define LAST_PKMAP 1024
++#endif
++/*
++ * Ordering is:
++ *
++ * FIXADDR_TOP
++ * 			fixed_addresses
++ * FIXADDR_START
++ * 			temp fixed addresses
++ * FIXADDR_BOOT_START
++ * 			Persistent kmap area
++ * PKMAP_BASE
++ * VMALLOC_END
++ * 			Vmalloc area
++ * VMALLOC_START
++ * high_memory
++ */
++#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
++#define LAST_PKMAP_MASK (LAST_PKMAP-1)
++#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
++#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
++
++extern void * FASTCALL(kmap_high(struct page *page));
++extern void FASTCALL(kunmap_high(struct page *page));
++
++void *kmap(struct page *page);
++void kunmap(struct page *page);
++void *kmap_atomic(struct page *page, enum km_type type);
++void *kmap_atomic_pte(struct page *page, enum km_type type);
++void kunmap_atomic(void *kvaddr, enum km_type type);
++struct page *kmap_atomic_to_page(void *ptr);
++
++#define flush_cache_kmaps()	do { } while (0)
++
++#endif /* __KERNEL__ */
++
++#endif /* _ASM_HIGHMEM_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/hw_irq.h linux-2.6.12-xen/include/asm-xen/asm-i386/hw_irq.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/hw_irq.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/hw_irq.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,71 @@
++#ifndef _ASM_HW_IRQ_H
++#define _ASM_HW_IRQ_H
++
++/*
++ *	linux/include/asm/hw_irq.h
++ *
++ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ *	moved some of the old arch/i386/kernel/irq.h to here. VY
++ *
++ *	IRQ/IPI changes taken from work by Thomas Radke
++ *	<tomsoft at informatik.tu-chemnitz.de>
++ */
++
++#include <linux/config.h>
++#include <linux/profile.h>
++#include <asm/atomic.h>
++#include <asm/irq.h>
++#include <asm/sections.h>
++
++/*
++ * Various low-level irq details needed by irq.c, process.c,
++ * time.c, io_apic.c and smp.c
++ *
++ * Interrupt entry/exit code at both C and assembly level
++ */
++
++extern u8 irq_vector[NR_IRQ_VECTORS];
++#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
++#define AUTO_ASSIGN		-1
++
++extern void (*interrupt[NR_IRQS])(void);
++
++#ifdef CONFIG_SMP
++fastcall void reschedule_interrupt(void);
++fastcall void invalidate_interrupt(void);
++fastcall void call_function_interrupt(void);
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++fastcall void apic_timer_interrupt(void);
++fastcall void error_interrupt(void);
++fastcall void spurious_interrupt(void);
++fastcall void thermal_interrupt(struct pt_regs *);
++#define platform_legacy_irq(irq)	((irq) < 16)
++#endif
++
++void disable_8259A_irq(unsigned int irq);
++void enable_8259A_irq(unsigned int irq);
++int i8259A_irq_pending(unsigned int irq);
++void make_8259A_irq(unsigned int irq);
++void init_8259A(int aeoi);
++void FASTCALL(send_IPI_self(int vector));
++void init_VISWS_APIC_irqs(void);
++void setup_IO_APIC(void);
++void disable_IO_APIC(void);
++void print_IO_APIC(void);
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
++void send_IPI(int dest, int vector);
++void setup_ioapic_dest(void);
++
++extern unsigned long io_apic_irqs;
++
++extern atomic_t irq_err_count;
++extern atomic_t irq_mis_count;
++
++#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
++
++extern void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i);
++
++#endif /* _ASM_HW_IRQ_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/hypercall.h linux-2.6.12-xen/include/asm-xen/asm-i386/hypercall.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/hypercall.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/hypercall.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,323 @@
++/******************************************************************************
++ * hypercall.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/xen-public/sched.h>
++#include <asm-xen/xen-public/nmi.h>
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#define _hypercall0(type, name)			\
++({						\
++	long __res;				\
++	asm volatile (				\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res)			\
++		:				\
++		: "memory" );			\
++	(type)__res;				\
++})
++
++#define _hypercall1(type, name, a1)				\
++({								\
++	long __res, __ign1;					\
++	asm volatile (						\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res), "=b" (__ign1)			\
++		: "1" ((long)(a1))				\
++		: "memory" );					\
++	(type)__res;						\
++})
++
++#define _hypercall2(type, name, a1, a2)				\
++({								\
++	long __res, __ign1, __ign2;				\
++	asm volatile (						\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res), "=b" (__ign1), "=c" (__ign2)	\
++		: "1" ((long)(a1)), "2" ((long)(a2))		\
++		: "memory" );					\
++	(type)__res;						\
++})
++
++#define _hypercall3(type, name, a1, a2, a3)			\
++({								\
++	long __res, __ign1, __ign2, __ign3;			\
++	asm volatile (						\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res), "=b" (__ign1), "=c" (__ign2), 	\
++		"=d" (__ign3)					\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		"3" ((long)(a3))				\
++		: "memory" );					\
++	(type)__res;						\
++})
++
++#define _hypercall4(type, name, a1, a2, a3, a4)			\
++({								\
++	long __res, __ign1, __ign2, __ign3, __ign4;		\
++	asm volatile (						\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
++		"=d" (__ign3), "=S" (__ign4)			\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		"3" ((long)(a3)), "4" ((long)(a4))		\
++		: "memory" );					\
++	(type)__res;						\
++})
++
++#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
++({								\
++	long __res, __ign1, __ign2, __ign3, __ign4, __ign5;	\
++	asm volatile (						\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
++		"=d" (__ign3), "=S" (__ign4), "=D" (__ign5)	\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		"3" ((long)(a3)), "4" ((long)(a4)),		\
++		"5" ((long)(a5))				\
++		: "memory" );					\
++	(type)__res;						\
++})
++
++static inline int
++HYPERVISOR_set_trap_table(
++	trap_info_t *table)
++{
++	return _hypercall1(int, set_trap_table, table);
++}
++
++static inline int
++HYPERVISOR_mmu_update(
++	mmu_update_t *req, int count, int *success_count, domid_t domid)
++{
++	return _hypercall4(int, mmu_update, req, count, success_count, domid);
++}
++
++static inline int
++HYPERVISOR_mmuext_op(
++	struct mmuext_op *op, int count, int *success_count, domid_t domid)
++{
++	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
++}
++
++static inline int
++HYPERVISOR_set_gdt(
++	unsigned long *frame_list, int entries)
++{
++	return _hypercall2(int, set_gdt, frame_list, entries);
++}
++
++static inline int
++HYPERVISOR_stack_switch(
++	unsigned long ss, unsigned long esp)
++{
++	return _hypercall2(int, stack_switch, ss, esp);
++}
++
++static inline int
++HYPERVISOR_set_callbacks(
++	unsigned long event_selector, unsigned long event_address,
++	unsigned long failsafe_selector, unsigned long failsafe_address)
++{
++	return _hypercall4(int, set_callbacks,
++			   event_selector, event_address,
++			   failsafe_selector, failsafe_address);
++}
++
++static inline int
++HYPERVISOR_fpu_taskswitch(
++	int set)
++{
++	return _hypercall1(int, fpu_taskswitch, set);
++}
++
++static inline int
++HYPERVISOR_sched_op(
++	int cmd, unsigned long arg)
++{
++	return _hypercall2(int, sched_op, cmd, arg);
++}
++
++static inline long
++HYPERVISOR_set_timer_op(
++	u64 timeout)
++{
++	unsigned long timeout_hi = (unsigned long)(timeout>>32);
++	unsigned long timeout_lo = (unsigned long)timeout;
++	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
++}
++
++static inline int
++HYPERVISOR_dom0_op(
++	dom0_op_t *dom0_op)
++{
++	dom0_op->interface_version = DOM0_INTERFACE_VERSION;
++	return _hypercall1(int, dom0_op, dom0_op);
++}
++
++static inline int
++HYPERVISOR_set_debugreg(
++	int reg, unsigned long value)
++{
++	return _hypercall2(int, set_debugreg, reg, value);
++}
++
++static inline unsigned long
++HYPERVISOR_get_debugreg(
++	int reg)
++{
++	return _hypercall1(unsigned long, get_debugreg, reg);
++}
++
++static inline int
++HYPERVISOR_update_descriptor(
++	u64 ma, u64 desc)
++{
++	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
++}
++
++static inline int
++HYPERVISOR_memory_op(
++	unsigned int cmd, void *arg)
++{
++	return _hypercall2(int, memory_op, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_multicall(
++	void *call_list, int nr_calls)
++{
++	return _hypercall2(int, multicall, call_list, nr_calls);
++}
++
++static inline int
++HYPERVISOR_update_va_mapping(
++	unsigned long va, pte_t new_val, unsigned long flags)
++{
++	unsigned long pte_hi = 0;
++#ifdef CONFIG_X86_PAE
++	pte_hi = new_val.pte_high;
++#endif
++	return _hypercall4(int, update_va_mapping, va,
++			   new_val.pte_low, pte_hi, flags);
++}
++
++static inline int
++HYPERVISOR_event_channel_op(
++	void *op)
++{
++	return _hypercall1(int, event_channel_op, op);
++}
++
++static inline int
++HYPERVISOR_xen_version(
++	int cmd, void *arg)
++{
++	return _hypercall2(int, xen_version, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_console_io(
++	int cmd, int count, char *str)
++{
++	return _hypercall3(int, console_io, cmd, count, str);
++}
++
++static inline int
++HYPERVISOR_physdev_op(
++	void *physdev_op)
++{
++	return _hypercall1(int, physdev_op, physdev_op);
++}
++
++static inline int
++HYPERVISOR_grant_table_op(
++	unsigned int cmd, void *uop, unsigned int count)
++{
++	return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
++
++static inline int
++HYPERVISOR_update_va_mapping_otherdomain(
++	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++	unsigned long pte_hi = 0;
++#ifdef CONFIG_X86_PAE
++	pte_hi = new_val.pte_high;
++#endif
++	return _hypercall5(int, update_va_mapping_otherdomain, va,
++			   new_val.pte_low, pte_hi, flags, domid);
++}
++
++static inline int
++HYPERVISOR_vm_assist(
++	unsigned int cmd, unsigned int type)
++{
++	return _hypercall2(int, vm_assist, cmd, type);
++}
++
++static inline int
++HYPERVISOR_vcpu_op(
++	int cmd, int vcpuid, void *extra_args)
++{
++	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
++}
++
++static inline int
++HYPERVISOR_suspend(
++	unsigned long srec)
++{
++	return _hypercall3(int, sched_op, SCHEDOP_shutdown,
++			   SHUTDOWN_suspend, srec);
++}
++
++static inline int
++HYPERVISOR_nmi_op(
++	unsigned long op,
++	unsigned long arg)
++{
++	return _hypercall2(int, nmi_op, op, arg);
++}
++
++#endif /* __HYPERCALL_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/hypervisor.h linux-2.6.12-xen/include/asm-xen/asm-i386/hypervisor.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/hypervisor.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,175 @@
++/******************************************************************************
++ * hypervisor.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERVISOR_H__
++#define __HYPERVISOR_H__
++
++#include <linux/config.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/xen-public/dom0_ops.h>
++#include <asm/ptrace.h>
++#include <asm/page.h>
++#if defined(__i386__)
++#  ifdef CONFIG_X86_PAE
++#   include <asm-generic/pgtable-nopud.h>
++#  else
++#   include <asm-generic/pgtable-nopmd.h>
++#  endif
++#endif
++
++extern shared_info_t *HYPERVISOR_shared_info;
++
++/* arch/xen/i386/kernel/setup.c */
++extern start_info_t *xen_start_info;
++
++/* arch/xen/kernel/evtchn.c */
++/* Force a proper event-channel callback from Xen. */
++void force_evtchn_callback(void);
++
++/* arch/xen/kernel/process.c */
++void xen_cpu_idle (void);
++
++/* arch/xen/i386/kernel/hypervisor.c */
++void do_hypervisor_callback(struct pt_regs *regs);
++
++/* arch/xen/i386/kernel/head.S */
++void lgdt_finish(void);
++
++/* arch/xen/i386/mm/hypervisor.c */
++/*
++ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
++ * be MACHINE addresses.
++ */
++
++void xen_pt_switch(unsigned long ptr);
++void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
++void xen_load_gs(unsigned int selector); /* x86_64 only */
++void xen_tlb_flush(void);
++void xen_invlpg(unsigned long ptr);
++
++#ifndef CONFIG_XEN_SHADOW_MODE
++void xen_l1_entry_update(pte_t *ptr, pte_t val);
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
++void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
++void xen_pgd_pin(unsigned long ptr);
++void xen_pgd_unpin(unsigned long ptr);
++void xen_pud_pin(unsigned long ptr); /* x86_64 only */
++void xen_pud_unpin(unsigned long ptr); /* x86_64 only */
++void xen_pmd_pin(unsigned long ptr); /* x86_64 only */
++void xen_pmd_unpin(unsigned long ptr); /* x86_64 only */
++void xen_pte_pin(unsigned long ptr);
++void xen_pte_unpin(unsigned long ptr);
++#else
++#define xen_l1_entry_update(_p, _v) set_pte((_p), (_v))
++#define xen_l2_entry_update(_p, _v) set_pgd((_p), (_v))
++#define xen_pgd_pin(_p)   ((void)0)
++#define xen_pgd_unpin(_p) ((void)0)
++#define xen_pte_pin(_p)   ((void)0)
++#define xen_pte_unpin(_p) ((void)0)
++#endif
++
++void xen_set_ldt(unsigned long ptr, unsigned long bytes);
++void xen_machphys_update(unsigned long mfn, unsigned long pfn);
++
++#ifdef CONFIG_SMP
++#include <linux/cpumask.h>
++void xen_tlb_flush_all(void);
++void xen_invlpg_all(unsigned long ptr);
++void xen_tlb_flush_mask(cpumask_t *mask);
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
++#endif
++
++/* Returns zero on success else negative errno. */
++int xen_create_contiguous_region(
++    unsigned long vstart, unsigned int order, unsigned int address_bits);
++void xen_destroy_contiguous_region(
++    unsigned long vstart, unsigned int order);
++
++#include <asm/hypercall.h>
++
++#if defined(CONFIG_X86_64)
++#define MULTI_UVMFLAGS_INDEX 2
++#define MULTI_UVMDOMID_INDEX 3
++#else
++#define MULTI_UVMFLAGS_INDEX 3
++#define MULTI_UVMDOMID_INDEX 4
++#endif
++
++#define xen_init()	(0)
++
++static inline void
++MULTI_update_va_mapping(
++    multicall_entry_t *mcl, unsigned long va,
++    pte_t new_val, unsigned long flags)
++{
++    mcl->op = __HYPERVISOR_update_va_mapping;
++    mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++    mcl->args[1] = new_val.pte;
++    mcl->args[2] = flags;
++#elif defined(CONFIG_X86_PAE)
++    mcl->args[1] = new_val.pte_low;
++    mcl->args[2] = new_val.pte_high;
++    mcl->args[3] = flags;
++#else
++    mcl->args[1] = new_val.pte_low;
++    mcl->args[2] = 0;
++    mcl->args[3] = flags;
++#endif
++}
++
++static inline void
++MULTI_update_va_mapping_otherdomain(
++    multicall_entry_t *mcl, unsigned long va,
++    pte_t new_val, unsigned long flags, domid_t domid)
++{
++    mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
++    mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++    mcl->args[1] = new_val.pte;
++    mcl->args[2] = flags;
++    mcl->args[3] = domid;
++#elif defined(CONFIG_X86_PAE)
++    mcl->args[1] = new_val.pte_low;
++    mcl->args[2] = new_val.pte_high;
++    mcl->args[3] = flags;
++    mcl->args[4] = domid;
++#else
++    mcl->args[1] = new_val.pte_low;
++    mcl->args[2] = 0;
++    mcl->args[3] = flags;
++    mcl->args[4] = domid;
++#endif
++}
++
++#endif /* __HYPERVISOR_H__ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/io.h linux-2.6.12-xen/include/asm-xen/asm-i386/io.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/io.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/io.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,400 @@
++#ifndef _ASM_IO_H
++#define _ASM_IO_H
++
++#include <linux/config.h>
++#include <linux/string.h>
++#include <linux/compiler.h>
++
++/*
++ * This file contains the definitions for the x86 IO instructions
++ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
++ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
++ * versions of the single-IO instructions (inb_p/inw_p/..).
++ *
++ * This file is not meant to be obfuscating: it's just complicated
++ * to (a) handle it all in a way that makes gcc able to optimize it
++ * as well as possible and (b) trying to avoid writing the same thing
++ * over and over again with slight variations and possibly making a
++ * mistake somewhere.
++ */
++
++/*
++ * Thanks to James van Artsdalen for a better timing-fix than
++ * the two short jumps: using outb's to a nonexistent port seems
++ * to guarantee better timings even on fast machines.
++ *
++ * On the other hand, I'd like to be sure of a non-existent port:
++ * I feel a bit unsafe about using 0x80 (should be safe, though)
++ *
++ *		Linus
++ */
++
++ /*
++  *  Bit simplified and optimized by Jan Hubicka
++  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
++  *
++  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
++  *  isa_read[wl] and isa_write[wl] fixed
++  *  - Arnaldo Carvalho de Melo <acme at conectiva.com.br>
++  */
++
++#define IO_SPACE_LIMIT 0xffff
++
++#define XQUAD_PORTIO_BASE 0xfe400000
++#define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
++
++#ifdef __KERNEL__
++
++#include <asm-generic/iomap.h>
++
++#include <linux/vmalloc.h>
++#include <asm/fixmap.h>
++
++/*
++ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
++ * access
++ */
++#define xlate_dev_mem_ptr(p)	__va(p)
++
++/*
++ * Convert a virtual cached pointer to an uncached pointer
++ */
++#define xlate_dev_kmem_ptr(p)	p
++
++/**
++ *	virt_to_phys	-	map virtual addresses to physical
++ *	@address: address to remap
++ *
++ *	The returned physical address is the physical (CPU) mapping for
++ *	the memory address given. It is only valid to use this function on
++ *	addresses directly mapped or allocated via kmalloc. 
++ *
++ *	This function does not give bus mappings for DMA transfers. In
++ *	almost all conceivable cases a device driver should not be using
++ *	this function
++ */
++ 
++static inline unsigned long virt_to_phys(volatile void * address)
++{
++	return __pa(address);
++}
++
++/**
++ *	phys_to_virt	-	map physical address to virtual
++ *	@address: address to remap
++ *
++ *	The returned virtual address is a current CPU mapping for
++ *	the memory address given. It is only valid to use this function on
++ *	addresses that have a kernel mapping
++ *
++ *	This function does not handle bus mappings for DMA transfers. In
++ *	almost all conceivable cases a device driver should not be using
++ *	this function
++ */
++
++static inline void * phys_to_virt(unsigned long address)
++{
++	return __va(address);
++}
++
++/*
++ * Change "struct page" to physical address.
++ */
++#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
++
++#define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
++				  (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv)	 (page_to_pseudophys((bv)->bv_page) + \
++				  (unsigned long) (bv)->bv_offset)
++
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
++	(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
++	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
++	  bvec_to_pseudophys((vec2))))
++
++extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
++
++/**
++ * ioremap     -   map bus memory into CPU space
++ * @offset:    bus address of the memory
++ * @size:      size of the resource to map
++ *
++ * ioremap performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address. 
++ */
++
++static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
++{
++	return __ioremap(offset, size, 0);
++}
++
++extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
++extern void iounmap(volatile void __iomem *addr);
++
++/*
++ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
++ * mappings, before the real ioremap() is functional.
++ * A boot-time mapping is currently limited to at most 16 pages.
++ */
++extern void *bt_ioremap(unsigned long offset, unsigned long size);
++extern void bt_iounmap(void *addr, unsigned long size);
++
++/*
++ * ISA I/O bus memory addresses are 1:1 with the physical address.
++ */
++#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
++#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
++#else
++#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
++#endif
++
++/*
++ * However PCI ones are not necessarily 1:1 and therefore these interfaces
++ * are forbidden in portable PCI drivers.
++ *
++ * Allow them on x86 for legacy drivers, though.
++ */
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
++
++/*
++ * readX/writeX() are used to access memory mapped devices. On some
++ * architectures the memory mapped IO stuff needs to be accessed
++ * differently. On the x86 architecture, we just read/write the
++ * memory location directly.
++ */
++
++static inline unsigned char readb(const volatile void __iomem *addr)
++{
++	return *(volatile unsigned char __force *) addr;
++}
++static inline unsigned short readw(const volatile void __iomem *addr)
++{
++	return *(volatile unsigned short __force *) addr;
++}
++static inline unsigned int readl(const volatile void __iomem *addr)
++{
++	return *(volatile unsigned int __force *) addr;
++}
++#define readb_relaxed(addr) readb(addr)
++#define readw_relaxed(addr) readw(addr)
++#define readl_relaxed(addr) readl(addr)
++#define __raw_readb readb
++#define __raw_readw readw
++#define __raw_readl readl
++
++static inline void writeb(unsigned char b, volatile void __iomem *addr)
++{
++	*(volatile unsigned char __force *) addr = b;
++}
++static inline void writew(unsigned short b, volatile void __iomem *addr)
++{
++	*(volatile unsigned short __force *) addr = b;
++}
++static inline void writel(unsigned int b, volatile void __iomem *addr)
++{
++	*(volatile unsigned int __force *) addr = b;
++}
++#define __raw_writeb writeb
++#define __raw_writew writew
++#define __raw_writel writel
++
++#define mmiowb()
++
++static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
++{
++	memset((void __force *) addr, val, count);
++}
++static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
++{
++	__memcpy(dst, (void __force *) src, count);
++}
++static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
++{
++	__memcpy((void __force *) dst, src, count);
++}
++
++/*
++ * ISA space is 'always mapped' on a typical x86 system, no need to
++ * explicitly ioremap() it. The fact that the ISA IO space is mapped
++ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
++ * are physical addresses. The following constant pointer can be
++ * used as the IO-area pointer (it can be iounmapped as well, so the
++ * analogy with PCI is quite large):
++ */
++#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
++
++#define isa_readb(a) readb(__ISA_IO_base + (a))
++#define isa_readw(a) readw(__ISA_IO_base + (a))
++#define isa_readl(a) readl(__ISA_IO_base + (a))
++#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
++#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
++#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
++#define isa_memset_io(a,b,c)		memset_io(__ISA_IO_base + (a),(b),(c))
++#define isa_memcpy_fromio(a,b,c)	memcpy_fromio((a),__ISA_IO_base + (b),(c))
++#define isa_memcpy_toio(a,b,c)		memcpy_toio(__ISA_IO_base + (a),(b),(c))
++
++
++/*
++ * Again, i386 does not require mem IO specific function.
++ */
++
++#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void __force *)(b),(c),(d))
++#define isa_eth_io_copy_and_sum(a,b,c,d)	eth_copy_and_sum((a),(void __force *)(__ISA_IO_base + (b)),(c),(d))
++
++/**
++ *	check_signature		-	find BIOS signatures
++ *	@io_addr: mmio address to check 
++ *	@signature:  signature block
++ *	@length: length of signature
++ *
++ *	Perform a signature comparison with the mmio address io_addr. This
++ *	address should have been obtained by ioremap.
++ *	Returns 1 on a match.
++ */
++ 
++static inline int check_signature(volatile void __iomem * io_addr,
++	const unsigned char *signature, int length)
++{
++	int retval = 0;
++	do {
++		if (readb(io_addr) != *signature)
++			goto out;
++		io_addr++;
++		signature++;
++		length--;
++	} while (length);
++	retval = 1;
++out:
++	return retval;
++}
++
++/*
++ *	Cache management
++ *
++ *	This needed for two cases
++ *	1. Out of order aware processors
++ *	2. Accidentally out of order processors (PPro errata #51)
++ */
++ 
++#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
++
++static inline void flush_write_buffers(void)
++{
++	__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
++}
++
++#define dma_cache_inv(_start,_size)		flush_write_buffers()
++#define dma_cache_wback(_start,_size)		flush_write_buffers()
++#define dma_cache_wback_inv(_start,_size)	flush_write_buffers()
++
++#else
++
++/* Nothing to do */
++
++#define dma_cache_inv(_start,_size)		do { } while (0)
++#define dma_cache_wback(_start,_size)		do { } while (0)
++#define dma_cache_wback_inv(_start,_size)	do { } while (0)
++#define flush_write_buffers()
++
++#endif
++
++#endif /* __KERNEL__ */
++
++#ifdef SLOW_IO_BY_JUMPING
++#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
++#else
++#define __SLOW_DOWN_IO "outb %%al,$0x80;"
++#endif
++
++static inline void slow_down_io(void) {
++	__asm__ __volatile__(
++		__SLOW_DOWN_IO
++#ifdef REALLY_SLOW_IO
++		__SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
++#endif
++		: : );
++}
++
++#ifdef CONFIG_X86_NUMAQ
++extern void *xquad_portio;    /* Where the IO area was mapped */
++#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
++#define __BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
++	if (xquad_portio) \
++		write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
++	else \
++		out##bwl##_local(value, port); \
++} \
++static inline void out##bwl(unsigned type value, int port) { \
++	out##bwl##_quad(value, port, 0); \
++} \
++static inline unsigned type in##bwl##_quad(int port, int quad) { \
++	if (xquad_portio) \
++		return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
++	else \
++		return in##bwl##_local(port); \
++} \
++static inline unsigned type in##bwl(int port) { \
++	return in##bwl##_quad(port, 0); \
++}
++#else
++#define __BUILDIO(bwl,bw,type) \
++static inline void out##bwl(unsigned type value, int port) { \
++	out##bwl##_local(value, port); \
++} \
++static inline unsigned type in##bwl(int port) { \
++	return in##bwl##_local(port); \
++}
++#endif
++
++
++#define BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_local(unsigned type value, int port) { \
++	__asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
++} \
++static inline unsigned type in##bwl##_local(int port) { \
++	unsigned type value; \
++	__asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
++	return value; \
++} \
++static inline void out##bwl##_local_p(unsigned type value, int port) { \
++	out##bwl##_local(value, port); \
++	slow_down_io(); \
++} \
++static inline unsigned type in##bwl##_local_p(int port) { \
++	unsigned type value = in##bwl##_local(port); \
++	slow_down_io(); \
++	return value; \
++} \
++__BUILDIO(bwl,bw,type) \
++static inline void out##bwl##_p(unsigned type value, int port) { \
++	out##bwl(value, port); \
++	slow_down_io(); \
++} \
++static inline unsigned type in##bwl##_p(int port) { \
++	unsigned type value = in##bwl(port); \
++	slow_down_io(); \
++	return value; \
++} \
++static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
++	__asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
++} \
++static inline void ins##bwl(int port, void *addr, unsigned long count) { \
++	__asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
++}
++
++BUILDIO(b,b,char)
++BUILDIO(w,w,short)
++BUILDIO(l,,int)
++
++/* We will be supplying our own /dev/mem implementation */
++#define ARCH_HAS_DEV_MEM
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/kmap_types.h linux-2.6.12-xen/include/asm-xen/asm-i386/kmap_types.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/kmap_types.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/kmap_types.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,32 @@
++#ifndef _ASM_KMAP_TYPES_H
++#define _ASM_KMAP_TYPES_H
++
++#include <linux/config.h>
++
++#ifdef CONFIG_DEBUG_HIGHMEM
++# define D(n) __KM_FENCE_##n ,
++#else
++# define D(n)
++#endif
++
++enum km_type {
++D(0)	KM_BOUNCE_READ,
++D(1)	KM_SKB_SUNRPC_DATA,
++D(2)	KM_SKB_DATA_SOFTIRQ,
++D(3)	KM_USER0,
++D(4)	KM_USER1,
++D(5)	KM_BIO_SRC_IRQ,
++D(6)	KM_BIO_DST_IRQ,
++D(7)	KM_PTE0,
++D(8)	KM_PTE1,
++D(9)	KM_IRQ0,
++D(10)	KM_IRQ1,
++D(11)	KM_SOFTIRQ0,
++D(12)	KM_SOFTIRQ1,
++D(13)	KM_SWIOTLB,
++D(14)	KM_TYPE_NR
++};
++
++#undef D
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/irq_vectors.h linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/irq_vectors.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/irq_vectors.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,125 @@
++/*
++ * This file should contain #defines for all of the interrupt vector
++ * numbers used by this architecture.
++ *
++ * In addition, there are some standard defines:
++ *
++ *	FIRST_EXTERNAL_VECTOR:
++ *		The first free place for external interrupts
++ *
++ *	SYSCALL_VECTOR:
++ *		The IRQ vector a syscall makes the user to kernel transition
++ *		under.
++ *
++ *	TIMER_IRQ:
++ *		The IRQ number the timer interrupt comes in at.
++ *
++ *	NR_IRQS:
++ *		The total number of interrupt vectors (including all the
++ *		architecture specific interrupts) needed.
++ *
++ */			
++#ifndef _ASM_IRQ_VECTORS_H
++#define _ASM_IRQ_VECTORS_H
++
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR	0x20
++
++#define SYSCALL_VECTOR		0x80
++
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
++
++#if 0
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ *  some of the following vectors are 'rare', they are merged
++ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ *  TLB, reschedule and local APIC vectors are performance-critical.
++ *
++ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
++ */
++#define SPURIOUS_APIC_VECTOR	0xff
++#define ERROR_APIC_VECTOR	0xfe
++#define INVALIDATE_TLB_VECTOR	0xfd
++#define RESCHEDULE_VECTOR	0xfc
++#define CALL_FUNCTION_VECTOR	0xfb
++
++#define THERMAL_APIC_VECTOR	0xf0
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR	0xef
++#endif
++
++#define SPURIOUS_APIC_VECTOR	0xff
++#define ERROR_APIC_VECTOR	0xfe
++
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR	0x31
++#define FIRST_SYSTEM_VECTOR	0xef
++
++/*
++ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
++ * Right now the APIC is mostly only used for SMP.
++ * 256 vectors is an architectural limit. (we can have
++ * more than 256 devices theoretically, but they will
++ * have to use shared interrupts)
++ * Since vectors 0x00-0x1f are used/reserved for the CPU,
++ * the usable vector space is 0x20-0xff (224 vectors)
++ */
++
++#define RESCHEDULE_VECTOR	0
++#define CALL_FUNCTION_VECTOR	1
++#define NR_IPIS			2
++
++/*
++ * The maximum number of vectors supported by i386 processors
++ * is limited to 256. For processors other than i386, NR_VECTORS
++ * should be changed accordingly.
++ */
++#define NR_VECTORS 256
++
++#define FPU_IRQ			13
++
++#define	FIRST_VM86_IRQ		3
++#define LAST_VM86_IRQ		15
++#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
++
++/*
++ * The flat IRQ space is divided into two regions:
++ *  1. A one-to-one mapping of real physical IRQs. This space is only used
++ *     if we have physical device-access privilege. This region is at the 
++ *     start of the IRQ space so that existing device drivers do not need
++ *     to be modified to translate physical IRQ numbers into our IRQ space.
++ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ *     are bound using the provided bind/unbind functions.
++ */
++
++#define PIRQ_BASE		0
++#define NR_PIRQS		256
++
++#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS		256
++
++#define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS		NR_IRQS
++
++#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
++
++#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
++
++#endif /* _ASM_IRQ_VECTORS_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/mach_traps.h linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/mach_traps.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/mach_traps.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/mach_traps.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,33 @@
++/*
++ *  include/asm-xen/asm-i386/mach-xen/mach_traps.h
++ *
++ *  Machine specific NMI handling for Xen
++ */
++#ifndef _MACH_TRAPS_H
++#define _MACH_TRAPS_H
++
++#include <linux/bitops.h>
++#include <asm-xen/xen-public/nmi.h>
++
++static inline void clear_mem_error(unsigned char reason) {}
++static inline void clear_io_check_error(unsigned char reason) {}
++
++static inline unsigned char get_nmi_reason(void)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++	unsigned char reason = 0;
++
++	/* construct a value which looks like it came from
++	 * port 0x61.
++	 */
++	if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
++		reason |= 0x40;
++	if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
++		reason |= 0x80;
++
++        return reason;
++}
++
++static inline void reassert_nmi(void) {}
++
++#endif /* !_MACH_TRAPS_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,43 @@
++/**
++ * machine_specific_memory_setup - Hook for machine specific memory setup.
++ *
++ * Description:
++ *	This is included late in kernel/setup.c so that it can make
++ *	use of all of the static functions.
++ **/
++
++static char * __init machine_specific_memory_setup(void)
++{
++	unsigned long max_pfn = xen_start_info->nr_pages;
++
++	e820.nr_map = 0;
++	add_memory_region(0, PFN_PHYS(max_pfn), E820_RAM);
++
++	return "Xen";
++}
++
++void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
++{
++	clear_bit(X86_FEATURE_VME, c->x86_capability);
++	clear_bit(X86_FEATURE_DE, c->x86_capability);
++	clear_bit(X86_FEATURE_PSE, c->x86_capability);
++	clear_bit(X86_FEATURE_PGE, c->x86_capability);
++	clear_bit(X86_FEATURE_SEP, c->x86_capability);
++	if (!(xen_start_info->flags & SIF_PRIVILEGED))
++		clear_bit(X86_FEATURE_MTRR, c->x86_capability);
++}
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
++
++static void __init machine_specific_arch_setup(void)
++{
++	HYPERVISOR_set_callbacks(
++	    __KERNEL_CS, (unsigned long)hypervisor_callback,
++	    __KERNEL_CS, (unsigned long)failsafe_callback);
++
++	HYPERVISOR_nmi_op(XENNMI_register_callback, (unsigned long)&nmi);
++
++	machine_specific_modify_cpu_capabilities(&boot_cpu_data);
++}
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,5 @@
++/* Hook to call BIOS initialisation function */
++
++#define ARCH_SETUP machine_specific_arch_setup();
++
++static void __init machine_specific_arch_setup(void);
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mmu_context.h linux-2.6.12-xen/include/asm-xen/asm-i386/mmu_context.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/mmu_context.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/mmu_context.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,108 @@
++#ifndef __I386_SCHED_H
++#define __I386_SCHED_H
++
++#include <linux/config.h>
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++
++/*
++ * Used for LDT copy/destruction.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
++void destroy_context(struct mm_struct *mm);
++
++
++static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
++{
++#if 0 /* XEN: no lazy tlb */
++	unsigned cpu = smp_processor_id();
++	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
++#endif
++}
++
++#define prepare_arch_switch(rq,next)	__prepare_arch_switch()
++#define finish_arch_switch(rq, next)	spin_unlock_irq(&(rq)->lock)
++#define task_running(rq, p)		((rq)->curr == (p))
++
++static inline void __prepare_arch_switch(void)
++{
++	/*
++	 * Save away %fs and %gs. No need to save %es and %ds, as those
++	 * are always kernel segments while inside the kernel. Must
++	 * happen before reload of cr3/ldt (i.e., not in __switch_to).
++	 */
++	asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
++		: "=m" (current->thread.fs),
++		  "=m" (current->thread.gs));
++	asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
++		: : "r" (0) );
++}
++
++extern void mm_pin(struct mm_struct *mm);
++extern void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
++
++static inline void switch_mm(struct mm_struct *prev,
++			     struct mm_struct *next,
++			     struct task_struct *tsk)
++{
++	int cpu = smp_processor_id();
++	struct mmuext_op _op[2], *op = _op;
++
++	if (likely(prev != next)) {
++		if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
++			mm_pin(next);
++
++		/* stop flush ipis for the previous mm */
++		cpu_clear(cpu, prev->cpu_vm_mask);
++#if 0 /* XEN: no lazy tlb */
++		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
++		per_cpu(cpu_tlbstate, cpu).active_mm = next;
++#endif
++		cpu_set(cpu, next->cpu_vm_mask);
++
++		/* Re-load page tables: load_cr3(next->pgd) */
++		per_cpu(cur_pgd, cpu) = next->pgd;
++		op->cmd = MMUEXT_NEW_BASEPTR;
++		op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
++		op++;
++
++		/*
++		 * load the LDT, if the LDT is different:
++		 */
++		if (unlikely(prev->context.ldt != next->context.ldt)) {
++			/* load_LDT_nolock(&next->context, cpu) */
++			op->cmd = MMUEXT_SET_LDT;
++			op->arg1.linear_addr = (unsigned long)next->context.ldt;
++			op->arg2.nr_ents     = next->context.size;
++			op++;
++		}
++
++		BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
++	}
++#if 0 /* XEN: no lazy tlb */
++	else {
++		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
++		BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
++
++		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
++			/* We were in lazy tlb mode and leave_mm disabled 
++			 * tlb flush IPI delivery. We must reload %cr3.
++			 */
++			load_cr3(next->pgd);
++			load_LDT_nolock(&next->context, cpu);
++		}
++	}
++#endif
++}
++
++#define deactivate_mm(tsk, mm) \
++	asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
++
++#define activate_mm(prev, next) \
++	switch_mm((prev),(next),NULL)
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mmu.h linux-2.6.12-xen/include/asm-xen/asm-i386/mmu.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/mmu.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/mmu.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,21 @@
++#ifndef __i386_MMU_H
++#define __i386_MMU_H
++
++#include <asm/semaphore.h>
++/*
++ * The i386 doesn't have a mmu context, but
++ * we put the segment information here.
++ *
++ * cpu_vm_mask is used to optimize ldt flushing.
++ */
++typedef struct { 
++	int size;
++	struct semaphore sem;
++	void *ldt;
++} mm_context_t;
++
++/* mm/memory.c:exit_mmap hook */
++extern void _arch_exit_mmap(struct mm_struct *mm);
++#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/page.h linux-2.6.12-xen/include/asm-xen/asm-i386/page.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/page.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/page.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,276 @@
++#ifndef _I386_PAGE_H
++#define _I386_PAGE_H
++
++/* PAGE_SHIFT determines the page size */
++#define PAGE_SHIFT	12
++#define PAGE_SIZE	(1UL << PAGE_SHIFT)
++#define PAGE_MASK	(~(PAGE_SIZE-1))
++
++#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
++#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++
++#include <linux/config.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <asm/bug.h>
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/foreign_page.h>
++
++#define arch_free_page(_page,_order)			\
++({	int foreign = PageForeign(_page);		\
++	if (foreign)					\
++		(PageForeignDestructor(_page))(_page);	\
++	foreign;					\
++})
++#define HAVE_ARCH_FREE_PAGE
++
++#ifdef CONFIG_XEN_SCRUB_PAGES
++#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
++#else
++#define scrub_pages(_p,_n) ((void)0)
++#endif
++
++#ifdef CONFIG_X86_USE_3DNOW
++
++#include <asm/mmx.h>
++
++#define clear_page(page)	mmx_clear_page((void *)(page))
++#define copy_page(to,from)	mmx_copy_page(to,from)
++
++#else
++
++#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
++#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
++
++/*
++ *	On older X86 processors it's not a win to use MMX here it seems.
++ *	Maybe the K6-III ?
++ */
++ 
++#define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
++#define copy_page(to,from)	memcpy((void *)(to), (void *)(from), PAGE_SIZE)
++
++#endif
++
++#define clear_user_page(page, vaddr, pg)	clear_page(page)
++#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
++
++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++#define INVALID_P2M_ENTRY	(~0UL)
++#define FOREIGN_FRAME(m)	((m) | (1UL<<31))
++extern unsigned long *phys_to_machine_mapping;
++#define pfn_to_mfn(pfn)	\
++(phys_to_machine_mapping[(unsigned int)(pfn)] & ~(1UL<<31))
++#define	phys_to_machine_mapping_valid(pfn) \
++	(phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
++static inline unsigned long mfn_to_pfn(unsigned long mfn)
++{
++	unsigned long pfn;
++
++	/*
++	 * The array access can fail (e.g., device space beyond end of RAM).
++	 * In such cases it doesn't matter what we return (we return garbage),
++	 * but we must handle the fault without crashing!
++	 */
++	asm (
++		"1:	movl %1,%0\n"
++		"2:\n"
++		".section __ex_table,\"a\"\n"
++		"	.align 4\n"
++		"	.long 1b,2b\n"
++		".previous"
++		: "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) );
++
++	return pfn;
++}
++
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++	phys_to_machine_mapping[pfn] = mfn;
++}
++
++/* Definitions for machine and pseudophysical addresses. */
++#ifdef CONFIG_X86_PAE
++typedef unsigned long long paddr_t;
++typedef unsigned long long maddr_t;
++#else
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
++#endif
++
++static inline maddr_t phys_to_machine(paddr_t phys)
++{
++	maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++	machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
++	return machine;
++}
++static inline paddr_t machine_to_phys(maddr_t machine)
++{
++	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++	phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
++	return phys;
++}
++
++/*
++ * These are used to make use of C type-checking..
++ */
++extern int nx_enabled;
++#ifdef CONFIG_X86_PAE
++extern unsigned long long __supported_pte_mask;
++typedef struct { unsigned long pte_low, pte_high; } pte_t;
++typedef struct { unsigned long long pmd; } pmd_t;
++typedef struct { unsigned long long pgd; } pgd_t;
++typedef struct { unsigned long long pgprot; } pgprot_t;
++#define __pte(x) ({ unsigned long long _x = (x);        \
++    if (_x & 1) _x = phys_to_machine(_x);               \
++    ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
++#define __pgd(x) ({ unsigned long long _x = (x); \
++    (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
++#define __pmd(x) ({ unsigned long long _x = (x); \
++    (((_x)&1) ? ((pmd_t) {phys_to_machine(_x)}) : ((pmd_t) {(_x)})); })
++static inline unsigned long long pte_val(pte_t x)
++{
++	unsigned long long ret;
++
++	if (x.pte_low) {
++		ret = x.pte_low | (unsigned long long)x.pte_high << 32;
++		ret = machine_to_phys(ret) | 1;
++	} else {
++		ret = 0;
++	}
++	return ret;
++}
++static inline unsigned long long pmd_val(pmd_t x)
++{
++	unsigned long long ret = x.pmd;
++	if (ret) ret = machine_to_phys(ret) | 1;
++	return ret;
++}
++static inline unsigned long long pgd_val(pgd_t x)
++{
++	unsigned long long ret = x.pgd;
++	if (ret) ret = machine_to_phys(ret) | 1;
++	return ret;
++}
++static inline unsigned long long pte_val_ma(pte_t x)
++{
++	return (unsigned long long)x.pte_high << 32 | x.pte_low;
++}
++#define HPAGE_SHIFT	21
++#else
++typedef struct { unsigned long pte_low; } pte_t;
++typedef struct { unsigned long pgd; } pgd_t;
++typedef struct { unsigned long pgprot; } pgprot_t;
++#define boot_pte_t pte_t /* or would you rather have a typedef */
++#define pte_val(x)	(((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
++			 (x).pte_low)
++#define pte_val_ma(x)	((x).pte_low)
++#define __pte(x) ({ unsigned long _x = (x); \
++    (((_x)&1) ? ((pte_t) {phys_to_machine(_x)}) : ((pte_t) {(_x)})); })
++#define __pgd(x) ({ unsigned long _x = (x); \
++    (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
++static inline unsigned long pgd_val(pgd_t x)
++{
++	unsigned long ret = x.pgd;
++	if (ret) ret = machine_to_phys(ret) | 1;
++	return ret;
++}
++#define HPAGE_SHIFT	22
++#endif
++#define PTE_MASK	PAGE_MASK
++
++#ifdef CONFIG_HUGETLB_PAGE
++#define HPAGE_SIZE	((1UL) << HPAGE_SHIFT)
++#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
++#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
++#endif
++
++#define pgprot_val(x)	((x).pgprot)
++
++#define __pte_ma(x)	((pte_t) { (x) } )
++#define __pgprot(x)	((pgprot_t) { (x) } )
++
++#endif /* !__ASSEMBLY__ */
++
++/* to align the pointer to the (next) page boundary */
++#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
++
++/*
++ * This handles the memory map.. We could make this a config
++ * option, but too many people screw it up, and too few need
++ * it.
++ *
++ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
++ * a virtual address space of one gigabyte, which limits the
++ * amount of physical memory you can use to about 950MB. 
++ *
++ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
++ * and CONFIG_HIGHMEM64G options in the kernel configuration.
++ */
++
++#ifndef __ASSEMBLY__
++
++/*
++ * This much address space is reserved for vmalloc() and iomap()
++ * as well as fixmap mappings.
++ */
++extern unsigned int __VMALLOC_RESERVE;
++
++/* Pure 2^n version of get_order */
++static __inline__ int get_order(unsigned long size)
++{
++	int order;
++
++	size = (size-1) >> (PAGE_SHIFT-1);
++	order = -1;
++	do {
++		size >>= 1;
++		order++;
++	} while (size);
++	return order;
++}
++
++extern int sysctl_legacy_va_layout;
++
++#endif /* __ASSEMBLY__ */
++
++#ifdef __ASSEMBLY__
++#define __PAGE_OFFSET		(0xC0000000)
++#else
++#define __PAGE_OFFSET		(0xC0000000UL)
++#endif
++
++
++#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
++#define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
++#define MAXMEM			(HYPERVISOR_VIRT_START-__PAGE_OFFSET-__VMALLOC_RESERVE)
++#define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
++#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
++#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
++#ifndef CONFIG_DISCONTIGMEM
++#define pfn_to_page(pfn)	(mem_map + (pfn))
++#define page_to_pfn(page)	((unsigned long)((page) - mem_map))
++#define pfn_valid(pfn)		((pfn) < max_mapnr)
++#endif /* !CONFIG_DISCONTIGMEM */
++#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
++
++#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
++
++#define VM_DATA_DEFAULT_FLAGS \
++	(VM_READ | VM_WRITE | \
++	((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
++		 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
++#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
++
++#endif /* __KERNEL__ */
++
++#endif /* _I386_PAGE_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/param.h linux-2.6.12-xen/include/asm-xen/asm-i386/param.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/param.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/param.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,23 @@
++#ifndef _ASMi386_PARAM_H
++#define _ASMi386_PARAM_H
++
++#ifdef __KERNEL__
++# define HZ		100		/* Internal kernel timer frequency */
++# define USER_HZ	100		/* .. some user interfaces are in "ticks" */
++# define CLOCKS_PER_SEC		(USER_HZ)	/* like times() */
++#endif
++
++#ifndef HZ
++#define HZ 100
++#endif
++
++#define EXEC_PAGESIZE	4096
++
++#ifndef NOGROUP
++#define NOGROUP		(-1)
++#endif
++
++#define MAXHOSTNAMELEN	64	/* max length of hostname */
++#define COMMAND_LINE_SIZE 256
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pci.h linux-2.6.12-xen/include/asm-xen/asm-i386/pci.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/pci.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/pci.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,140 @@
++#ifndef __i386_PCI_H
++#define __i386_PCI_H
++
++#include <linux/config.h>
++
++#ifdef __KERNEL__
++#include <linux/mm.h>		/* for struct page */
++
++/* Can be used to override the logic in pci_scan_bus for skipping
++   already-configured bus numbers - to be used for buggy BIOSes
++   or architectures with incomplete PCI setup by the loader */
++
++#ifdef CONFIG_PCI
++extern unsigned int pcibios_assign_all_busses(void);
++#else
++#define pcibios_assign_all_busses()	0
++#endif
++#define pcibios_scan_all_fns(a, b)	0
++
++extern unsigned long pci_mem_start;
++#define PCIBIOS_MIN_IO		0x1000
++#define PCIBIOS_MIN_MEM		(pci_mem_start)
++
++#define PCIBIOS_MIN_CARDBUS_IO	0x4000
++
++void pcibios_config_init(void);
++struct pci_bus * pcibios_scan_root(int bus);
++
++void pcibios_set_master(struct pci_dev *dev);
++void pcibios_penalize_isa_irq(int irq);
++struct irq_routing_table *pcibios_get_irq_routing_table(void);
++int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
++
++/* Dynamic DMA mapping stuff.
++ * i386 has everything mapped statically.
++ */
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <asm/scatterlist.h>
++#include <linux/string.h>
++#include <asm/io.h>
++
++struct pci_dev;
++
++#ifdef CONFIG_SWIOTLB
++
++
++/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
++#define PCI_DMA_BUS_IS_PHYS	(0)
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
++	dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
++	__u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME)			\
++	((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
++	(((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME)			\
++	((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
++	(((PTR)->LEN_NAME) = (VAL))
++
++#else
++
++/* The PCI address space does equal the physical memory
++ * address space.  The networking and block device layers use
++ * this boolean for bounce buffer decisions.
++ */
++#define PCI_DMA_BUS_IS_PHYS	(1)
++
++/* pci_unmap_{page,single} is a nop so... */
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
++#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
++#define pci_unmap_len(PTR, LEN_NAME)		(0)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
++
++#endif
++
++/* This is always fine. */
++#define pci_dac_dma_supported(pci_dev, mask)	(1)
++
++static inline dma64_addr_t
++pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
++{
++	return ((dma64_addr_t) page_to_phys(page) +
++		(dma64_addr_t) offset);
++}
++
++static inline struct page *
++pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++	return pfn_to_page(dma_addr >> PAGE_SHIFT);
++}
++
++static inline unsigned long
++pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++	return (dma_addr & ~PAGE_MASK);
++}
++
++static inline void
++pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++}
++
++static inline void
++pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++	flush_write_buffers();
++}
++
++#define HAVE_PCI_MMAP
++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++			       enum pci_mmap_state mmap_state, int write_combine);
++
++
++static inline void pcibios_add_platform_entries(struct pci_dev *dev)
++{
++}
++
++#endif /* __KERNEL__ */
++
++/* implement the pci_ DMA API in terms of the generic device dma_ one */
++#include <asm-generic/pci-dma-compat.h>
++
++/* generic pci stuff */
++#include <asm-generic/pci.h>
++
++/* On Xen we have to scan all functions since Xen hides bridges from
++ * us.  If a bridge is at fn=0 and that slot has a multifunction
++ * device, we won't find the additional devices without scanning all
++ * functions. */
++#undef pcibios_scan_all_fns
++#define pcibios_scan_all_fns(a, b)	1
++
++#endif /* __i386_PCI_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgalloc.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgalloc.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgalloc.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgalloc.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,64 @@
++#ifndef _I386_PGALLOC_H
++#define _I386_PGALLOC_H
++
++#include <linux/config.h>
++#include <asm/fixmap.h>
++#include <linux/threads.h>
++#include <linux/mm.h>		/* for struct page */
++#include <asm/io.h>		/* for phys_to_virt and page_to_pseudophys */
++
++/* Is this pagetable pinned? */
++#define PG_pinned	PG_arch_1
++
++#define pmd_populate_kernel(mm, pmd, pte) \
++		set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
++
++#define pmd_populate(mm, pmd, pte) 					\
++do {									\
++	if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) {	\
++		if (!PageHighMem(pte))					\
++			BUG_ON(HYPERVISOR_update_va_mapping(		\
++			  (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT),\
++			  pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));\
++		set_pmd(pmd, __pmd(_PAGE_TABLE +			\
++			((unsigned long long)page_to_pfn(pte) <<	\
++				(unsigned long long) PAGE_SHIFT)));	\
++	} else {							\
++		*(pmd) = __pmd(_PAGE_TABLE +				\
++			((unsigned long long)page_to_pfn(pte) <<	\
++				(unsigned long long) PAGE_SHIFT));	\
++	}								\
++} while (0)
++
++/*
++ * Allocate and free page tables.
++ */
++extern pgd_t *pgd_alloc(struct mm_struct *);
++extern void pgd_free(pgd_t *pgd);
++
++extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
++extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
++
++static inline void pte_free_kernel(pte_t *pte)
++{
++	free_page((unsigned long)pte);
++	make_page_writable(pte, XENFEAT_writable_page_tables);
++}
++
++extern void pte_free(struct page *pte);
++
++#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
++
++#ifdef CONFIG_X86_PAE
++/*
++ * In the PAE case we free the pmds as part of the pgd.
++ */
++#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
++#define pmd_free(x)			do { } while (0)
++#define __pmd_free_tlb(tlb,x)		do { } while (0)
++#define pud_populate(mm, pmd, pte)	BUG()
++#endif
++
++#define check_pgt_cache()	do { } while (0)
++
++#endif /* _I386_PGALLOC_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-2level-defs.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-2level-defs.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-2level-defs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-2level-defs.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,21 @@
++#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
++#define _I386_PGTABLE_2LEVEL_DEFS_H
++
++#define HAVE_SHARED_KERNEL_PMD 0
++
++/*
++ * traditional i386 two-level paging structure:
++ */
++
++#define PGDIR_SHIFT	22
++#define PTRS_PER_PGD	1024
++#define PTRS_PER_PGD_NO_HV	(HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
++
++/*
++ * the i386 is two-level, so we don't really have any
++ * PMD directory physically.
++ */
++
++#define PTRS_PER_PTE	1024
++
++#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-2level.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-2level.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-2level.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-2level.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,119 @@
++#ifndef _I386_PGTABLE_2LEVEL_H
++#define _I386_PGTABLE_2LEVEL_H
++
++#include <asm-generic/pgtable-nopmd.h>
++
++#define pte_ERROR(e) \
++	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
++#define pgd_ERROR(e) \
++	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
++
++/*
++ * Certain architectures need to do special things when PTEs
++ * within a page table are directly modified.  Thus, the following
++ * hook is made available.
++ */
++#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
++
++#define set_pte_at(_mm,addr,ptep,pteval) do {				\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
++		set_pte((ptep), (pteval));				\
++} while (0)
++
++#define set_pte_at_sync(_mm,addr,ptep,pteval) do {			\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
++		set_pte((ptep), (pteval));				\
++		xen_invlpg((addr));					\
++	}								\
++} while (0)
++
++#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
++
++#ifndef CONFIG_XEN_SHADOW_MODE
++#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
++#else
++#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
++#endif
++
++#define ptep_get_and_clear(mm,addr,xp)	__pte_ma(xchg(&(xp)->pte_low, 0))
++#define pte_same(a, b)		((a).pte_low == (b).pte_low)
++/*
++ * We detect special mappings in one of two ways:
++ *  1. If the MFN is an I/O page then Xen will set the m2p entry
++ *     to be outside our maximum possible pseudophys range.
++ *  2. If the MFN belongs to a different domain then we will certainly
++ *     not have MFN in our p2m table. Conversely, if the page is ours,
++ *     then we'll have p2m(m2p(MFN))==MFN.
++ * If we detect a special mapping then it doesn't have a 'struct page'.
++ * We force !pfn_valid() by returning an out-of-range pointer.
++ *
++ * NB. These checks require that, for any MFN that is not in our reservation,
++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
++ * 
++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
++ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
++ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
++ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
++ */
++#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
++#define pte_pfn(_pte)							\
++({									\
++	unsigned long mfn = pte_mfn(_pte);				\
++	unsigned long pfn = mfn_to_pfn(mfn);				\
++	if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
++		pfn = max_mapnr; /* special: force !pfn_valid() */	\
++	pfn;								\
++})
++
++#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
++
++#define pte_none(x)		(!(x).pte_low)
++#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#define pfn_pte_ma(pfn, prot)	__pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++
++#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pmd_page_kernel(pmd) \
++((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
++
++/*
++ * All present user pages are user-executable:
++ */
++static inline int pte_exec(pte_t pte)
++{
++	return pte_user(pte);
++}
++
++/*
++ * All present pages are kernel-executable:
++ */
++static inline int pte_exec_kernel(pte_t pte)
++{
++	return 1;
++}
++
++/*
++ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
++ * into this range:
++ */
++#define PTE_FILE_MAX_BITS	29
++
++#define pte_to_pgoff(pte) \
++	((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
++
++#define pgoff_to_pte(off) \
++	((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x)			(((x).val >> 1) & 0x1f)
++#define __swp_offset(x)			((x).val >> 8)
++#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
++#define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_low })
++#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
++
++#endif /* _I386_PGTABLE_2LEVEL_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-3level-defs.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-3level-defs.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-3level-defs.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-3level-defs.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,25 @@
++#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
++#define _I386_PGTABLE_3LEVEL_DEFS_H
++
++#define HAVE_SHARED_KERNEL_PMD 0
++
++/*
++ * PGDIR_SHIFT determines what a top-level page table entry can map
++ */
++#define PGDIR_SHIFT	30
++#define PTRS_PER_PGD	4
++#define PTRS_PER_PGD_NO_HV 4
++
++/*
++ * PMD_SHIFT determines the size of the area a middle-level
++ * page table can map
++ */
++#define PMD_SHIFT	21
++#define PTRS_PER_PMD	512
++
++/*
++ * entries per page directory level
++ */
++#define PTRS_PER_PTE	512
++
++#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-3level.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-3level.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-3level.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-3level.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,200 @@
++#ifndef _I386_PGTABLE_3LEVEL_H
++#define _I386_PGTABLE_3LEVEL_H
++
++#include <asm-generic/pgtable-nopud.h>
++
++/*
++ * Intel Physical Address Extension (PAE) Mode - three-level page
++ * tables on PPro+ CPUs.
++ *
++ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
++ */
++
++#define pte_ERROR(e) \
++	printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
++#define pmd_ERROR(e) \
++	printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
++#define pgd_ERROR(e) \
++	printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
++
++#define pud_none(pud)				0
++#define pud_bad(pud)				0
++#define pud_present(pud)			1
++
++/*
++ * Is the pte executable?
++ */
++static inline int pte_x(pte_t pte)
++{
++	return !(pte_val(pte) & _PAGE_NX);
++}
++
++/*
++ * All present user-pages with !NX bit are user-executable:
++ */
++static inline int pte_exec(pte_t pte)
++{
++	return pte_user(pte) && pte_x(pte);
++}
++/*
++ * All present pages with !NX bit are kernel-executable:
++ */
++static inline int pte_exec_kernel(pte_t pte)
++{
++	return pte_x(pte);
++}
++
++/* Rules for using set_pte: the pte being assigned *must* be
++ * either not present or in a state where the hardware will
++ * not attempt to update the pte.  In places where this is
++ * not possible, use pte_get_and_clear to obtain the old pte
++ * value and then use set_pte to update it.  -ben
++ */
++#define __HAVE_ARCH_SET_PTE_ATOMIC
++
++#if 1
++/* use writable pagetables */
++static inline void set_pte(pte_t *ptep, pte_t pte)
++{
++	ptep->pte_high = pte.pte_high;
++	smp_wmb();
++	ptep->pte_low = pte.pte_low;
++}
++# define set_pte_atomic(pteptr,pteval) \
++		set_64bit((unsigned long long *)(pteptr),pte_val_ma(pteval))
++#else
++/* no writable pagetables */
++# define set_pte(pteptr,pteval)				\
++		xen_l1_entry_update((pteptr), (pteval))
++# define set_pte_atomic(pteptr,pteval) set_pte(pteptr,pteval)
++#endif
++
++#define set_pte_at(_mm,addr,ptep,pteval) do {				\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
++		set_pte((ptep), (pteval));				\
++} while (0)
++
++#define set_pte_at_sync(_mm,addr,ptep,pteval) do {			\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
++		set_pte((ptep), (pteval));				\
++		xen_invlpg((addr));					\
++	}								\
++} while (0)
++
++#ifdef CONFIG_XEN_SHADOW_MODE
++# define set_pmd(pmdptr,pmdval) \
++		set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
++# define set_pud(pudptr,pudval) \
++		set_64bit((unsigned long long *)(pudptr),pud_val(pudval))
++#else
++# define set_pmd(pmdptr,pmdval)				\
++		xen_l2_entry_update((pmdptr), (pmdval))
++# define set_pud(pudptr,pudval) \
++		xen_l3_entry_update((pudptr), (pudval))
++#endif
++
++/*
++ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
++ * the TLB via cr3 if the top-level pgd is changed...
++ * We do not let the generic code free and clear pgd entries due to
++ * this erratum.
++ */
++static inline void pud_clear (pud_t * pud) { }
++
++#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pmd_page_kernel(pmd) \
++((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
++
++#define pud_page(pud) \
++((struct page *) __va(pud_val(pud) & PAGE_MASK))
++
++#define pud_page_kernel(pud) \
++((unsigned long) __va(pud_val(pud) & PAGE_MASK))
++
++
++/* Find an entry in the second-level page table.. */
++#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
++			pmd_index(address))
++
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++	pte_t res;
++
++	/* xchg acts as a barrier before the setting of the high bits */
++	res.pte_low = xchg(&ptep->pte_low, 0);
++	res.pte_high = ptep->pte_high;
++	ptep->pte_high = 0;
++
++	return res;
++}
++
++static inline int pte_same(pte_t a, pte_t b)
++{
++	return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
++}
++
++#define pte_page(x)	pfn_to_page(pte_pfn(x))
++
++static inline int pte_none(pte_t pte)
++{
++	return !pte.pte_low && !pte.pte_high;
++}
++
++#define pte_mfn(_pte) ( ((_pte).pte_low >> PAGE_SHIFT) |\
++		        (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)) )
++#define pte_pfn(_pte)                                                  \
++({                                                                     \
++       unsigned long mfn = pte_mfn(_pte);                              \
++       unsigned long pfn = mfn_to_pfn(mfn);                            \
++       if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
++               pfn = max_mapnr; /* special: force !pfn_valid() */      \
++       pfn;                                                            \
++})
++
++extern unsigned long long __supported_pte_mask;
++
++static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
++{
++	pte_t pte;
++
++	pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
++					(pgprot_val(pgprot) >> 32);
++	pte.pte_high &= (__supported_pte_mask >> 32);
++	pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
++							__supported_pte_mask;
++	return pte;
++}
++
++static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
++{
++	return pfn_pte_ma(pfn_to_mfn(page_nr), pgprot);
++}
++
++static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
++{
++	BUG(); panic("needs review");
++	return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \
++			pgprot_val(pgprot)) & __supported_pte_mask);
++}
++
++/*
++ * Bits 0, 6 and 7 are taken in the low part of the pte,
++ * put the 32 bits of offset into the high part.
++ */
++#define pte_to_pgoff(pte) ((pte).pte_high)
++#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
++#define PTE_FILE_MAX_BITS       32
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x)			(((x).val) & 0x1f)
++#define __swp_offset(x)			((x).val >> 5)
++#define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << 5})
++#define __pte_to_swp_entry(pte)		((swp_entry_t){ (pte).pte_high })
++#define __swp_entry_to_pte(x)		((pte_t){ 0, (x).val })
++
++#define __pmd_free_tlb(tlb, x)		do { } while (0)
++
++#endif /* _I386_PGTABLE_3LEVEL_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,478 @@
++#ifndef _I386_PGTABLE_H
++#define _I386_PGTABLE_H
++
++#include <linux/config.h>
++#include <asm/hypervisor.h>
++
++/*
++ * The Linux memory management assumes a three-level page table setup. On
++ * the i386, we use that, but "fold" the mid level into the top-level page
++ * table, so that we physically have the same two-level page table as the
++ * i386 mmu expects.
++ *
++ * This file contains the functions and defines necessary to modify and use
++ * the i386 page table tree.
++ */
++#ifndef __ASSEMBLY__
++#include <asm/processor.h>
++#include <asm/fixmap.h>
++#include <linux/threads.h>
++
++#ifndef _I386_BITOPS_H
++#include <asm/bitops.h>
++#endif
++
++#include <linux/slab.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++extern unsigned long empty_zero_page[1024];
++extern pgd_t *swapper_pg_dir;
++extern kmem_cache_t *pgd_cache;
++extern kmem_cache_t *pmd_cache;
++extern spinlock_t pgd_lock;
++extern struct page *pgd_list;
++
++void pmd_ctor(void *, kmem_cache_t *, unsigned long);
++void pgd_ctor(void *, kmem_cache_t *, unsigned long);
++void pgd_dtor(void *, kmem_cache_t *, unsigned long);
++void pgtable_cache_init(void);
++void paging_init(void);
++
++/*
++ * The Linux x86 paging architecture is 'compile-time dual-mode', it
++ * implements both the traditional 2-level x86 page tables and the
++ * newer 3-level PAE-mode page tables.
++ */
++#ifdef CONFIG_X86_PAE
++# include <asm/pgtable-3level-defs.h>
++# define PMD_SIZE	(1UL << PMD_SHIFT)
++# define PMD_MASK	(~(PMD_SIZE-1))
++#else
++# include <asm/pgtable-2level-defs.h>
++#endif
++
++#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
++#define PGDIR_MASK	(~(PGDIR_SIZE-1))
++
++#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
++#define FIRST_USER_ADDRESS	0
++
++#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
++#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
++
++#define TWOLEVEL_PGDIR_SHIFT	22
++#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
++#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
++
++/* Just any arbitrary offset to the start of the vmalloc VM area: the
++ * current 8MB value just means that there will be a 8MB "hole" after the
++ * physical memory until the kernel virtual memory starts.  That means that
++ * any out-of-bounds memory accesses will hopefully be caught.
++ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
++ * area for the same reason. ;)
++ */
++#define VMALLOC_OFFSET	(8*1024*1024)
++#define VMALLOC_START	(((unsigned long) high_memory + vmalloc_earlyreserve + \
++			2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
++#ifdef CONFIG_HIGHMEM
++# define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
++#else
++# define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
++#endif
++
++/*
++ * The 4MB page is guessing..  Detailed in the infamous "Chapter H"
++ * of the Pentium details, but assuming intel did the straightforward
++ * thing, this bit set in the page directory entry just means that
++ * the page directory entry points directly to a 4MB-aligned block of
++ * memory. 
++ */
++#define _PAGE_BIT_PRESENT	0
++#define _PAGE_BIT_RW		1
++#define _PAGE_BIT_USER		2
++#define _PAGE_BIT_PWT		3
++#define _PAGE_BIT_PCD		4
++#define _PAGE_BIT_ACCESSED	5
++#define _PAGE_BIT_DIRTY		6
++#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page, Pentium+, if present.. */
++#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
++#define _PAGE_BIT_UNUSED1	9	/* available for programmer */
++#define _PAGE_BIT_UNUSED2	10
++#define _PAGE_BIT_UNUSED3	11
++#define _PAGE_BIT_NX		63
++
++#define _PAGE_PRESENT	0x001
++#define _PAGE_RW	0x002
++#define _PAGE_USER	0x004
++#define _PAGE_PWT	0x008
++#define _PAGE_PCD	0x010
++#define _PAGE_ACCESSED	0x020
++#define _PAGE_DIRTY	0x040
++#define _PAGE_PSE	0x080	/* 4 MB (or 2MB) page, Pentium+, if present.. */
++#define _PAGE_GLOBAL	0x100	/* Global TLB entry PPro+ */
++#define _PAGE_UNUSED1	0x200	/* available for programmer */
++#define _PAGE_UNUSED2	0x400
++#define _PAGE_UNUSED3	0x800
++
++#define _PAGE_FILE	0x040	/* set:pagecache unset:swap */
++#define _PAGE_PROTNONE	0x080	/* If not present */
++#ifdef CONFIG_X86_PAE
++#define _PAGE_NX	(1ULL<<_PAGE_BIT_NX)
++#else
++#define _PAGE_NX	0
++#endif
++
++#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
++
++#define PAGE_NONE \
++	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
++#define PAGE_SHARED \
++	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++
++#define PAGE_SHARED_EXEC \
++	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY_NOEXEC \
++	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_COPY_EXEC \
++	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY \
++	PAGE_COPY_NOEXEC
++#define PAGE_READONLY \
++	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_READONLY_EXEC \
++	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++
++#define _PAGE_KERNEL \
++	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
++#define _PAGE_KERNEL_EXEC \
++	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
++
++extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
++#define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
++#define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD)
++#define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
++#define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
++
++#define PAGE_KERNEL		__pgprot(__PAGE_KERNEL)
++#define PAGE_KERNEL_RO		__pgprot(__PAGE_KERNEL_RO)
++#define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_NOCACHE	__pgprot(__PAGE_KERNEL_NOCACHE)
++#define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE)
++#define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC)
++
++/*
++ * The i386 can't do page protection for execute, and considers that
++ * the same are read. Also, write permissions imply read permissions.
++ * This is the closest we can get..
++ */
++#define __P000	PAGE_NONE
++#define __P001	PAGE_READONLY
++#define __P010	PAGE_COPY
++#define __P011	PAGE_COPY
++#define __P100	PAGE_READONLY_EXEC
++#define __P101	PAGE_READONLY_EXEC
++#define __P110	PAGE_COPY_EXEC
++#define __P111	PAGE_COPY_EXEC
++
++#define __S000	PAGE_NONE
++#define __S001	PAGE_READONLY
++#define __S010	PAGE_SHARED
++#define __S011	PAGE_SHARED
++#define __S100	PAGE_READONLY_EXEC
++#define __S101	PAGE_READONLY_EXEC
++#define __S110	PAGE_SHARED_EXEC
++#define __S111	PAGE_SHARED_EXEC
++
++/*
++ * Define this if things work differently on an i386 and an i486:
++ * it will (on an i486) warn about kernel memory accesses that are
++ * done without a 'access_ok(VERIFY_WRITE,..)'
++ */
++#undef TEST_ACCESS_OK
++
++/* The boot page tables (all created as a single array) */
++extern unsigned long pg0[];
++
++#define pte_present(x)	((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
++#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
++
++#define pmd_none(x)	(!pmd_val(x))
++/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
++   can temporarily clear it. */
++#define pmd_present(x)	(pmd_val(x))
++#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
++#define pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
++
++
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
++
++/*
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not..
++ */
++static inline int pte_user(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
++static inline int pte_read(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
++static inline int pte_dirty(pte_t pte)		{ return (pte).pte_low & _PAGE_DIRTY; }
++static inline int pte_young(pte_t pte)		{ return (pte).pte_low & _PAGE_ACCESSED; }
++static inline int pte_write(pte_t pte)		{ return (pte).pte_low & _PAGE_RW; }
++
++/*
++ * The following only works if pte_present() is not true.
++ */
++static inline int pte_file(pte_t pte)		{ return (pte).pte_low & _PAGE_FILE; }
++
++static inline pte_t pte_rdprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_exprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_mkclean(pte_t pte)	{ (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkold(pte_t pte)	{ (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
++static inline pte_t pte_wrprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_RW; return pte; }
++static inline pte_t pte_mkread(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkdirty(pte_t pte)	{ (pte).pte_low |= _PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkyoung(pte_t pte)	{ (pte).pte_low |= _PAGE_ACCESSED; return pte; }
++static inline pte_t pte_mkwrite(pte_t pte)	{ (pte).pte_low |= _PAGE_RW; return pte; }
++
++#ifdef CONFIG_X86_PAE
++# include <asm/pgtable-3level.h>
++#else
++# include <asm/pgtable-2level.h>
++#endif
++
++static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
++{
++	if (!pte_dirty(*ptep))
++		return 0;
++	return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
++}
++
++static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
++{
++	if (!pte_young(*ptep))
++		return 0;
++	return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
++}
++
++static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++	if (pte_write(*ptep))
++		clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
++}
++
++/*
++ * Macro to mark a page protection value as "uncacheable".  On processors which do not support
++ * it, this is a no-op.
++ */
++#define pgprot_noncached(prot)	((boot_cpu_data.x86 > 3)					  \
++				 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
++
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
++
++#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
++#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
++
++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
++{
++	pte.pte_low &= _PAGE_CHG_MASK;
++	pte.pte_low |= pgprot_val(newprot);
++#ifdef CONFIG_X86_PAE
++	/*
++	 * Chop off the NX bit (if present), and add the NX portion of
++	 * the newprot (if present):
++	 */
++	pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
++	pte.pte_high |= (pgprot_val(newprot) >> 32) & \
++					(__supported_pte_mask >> 32);
++#endif
++	return pte;
++}
++
++#define page_pte(page) page_pte_prot(page, __pgprot(0))
++
++#define pmd_large(pmd) \
++((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
++
++/*
++ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
++ *
++ * this macro returns the index of the entry in the pgd page which would
++ * control the given virtual address
++ */
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_index_k(addr) pgd_index(addr)
++
++/*
++ * pgd_offset() returns a (pgd_t *)
++ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
++ */
++#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
++
++/*
++ * a shortcut which implies the use of the kernel's pgd, instead
++ * of a process's
++ */
++#define pgd_offset_k(address) pgd_offset(&init_mm, address)
++
++/*
++ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
++ *
++ * this macro returns the index of the entry in the pmd page which would
++ * control the given virtual address
++ */
++#define pmd_index(address) \
++		(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
++
++/*
++ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
++ *
++ * this macro returns the index of the entry in the pte page which would
++ * control the given virtual address
++ */
++#define pte_index(address) \
++		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) \
++	((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address))
++
++/*
++ * Helper function that returns the kernel pagetable entry controlling
++ * the virtual address 'address'. NULL means no pagetable entry present.
++ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
++ * as a pte too.
++ */
++extern pte_t *lookup_address(unsigned long address);
++
++/*
++ * Make a given kernel text page executable/non-executable.
++ * Returns the previous executability setting of that page (which
++ * is used to restore the previous state). Used by the SMP bootup code.
++ * NOTE: this is an __init function for security reasons.
++ */
++#ifdef CONFIG_X86_PAE
++ extern int set_kernel_exec(unsigned long vaddr, int enable);
++#else
++ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
++#endif
++
++extern void noexec_setup(const char *str);
++
++#if defined(CONFIG_HIGHPTE)
++#define pte_offset_map(dir, address) \
++	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
++	 pte_index(address))
++#define pte_offset_map_nested(dir, address) \
++	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
++	 pte_index(address))
++#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
++#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
++#else
++#define pte_offset_map(dir, address) \
++	((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
++#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
++#define pte_unmap(pte) do { } while (0)
++#define pte_unmap_nested(pte) do { } while (0)
++#endif
++
++/*
++ * The i386 doesn't have any external MMU info: the kernel page
++ * tables contain all the necessary information.
++ *
++ * Also, we only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time.
++ */
++#define update_mmu_cache(vma,address,pte) do { } while (0)
++#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
++	do {								  \
++		if (__dirty) {						  \
++		        if ( likely((__vma)->vm_mm == current->mm) ) {    \
++			    BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
++			} else {                                          \
++                            xen_l1_entry_update((__ptep), (__entry)); \
++			    flush_tlb_page((__vma), (__address));         \
++			}                                                 \
++		}							  \
++	} while (0)
++
++#define __HAVE_ARCH_PTEP_ESTABLISH
++#define ptep_establish(__vma, __address, __ptep, __entry)		\
++do {				  					\
++	ptep_set_access_flags(__vma, __address, __ptep, __entry, 1);	\
++} while (0)
++
++#include <asm-xen/features.h>
++void make_lowmem_page_readonly(void *va, unsigned int feature);
++void make_lowmem_page_writable(void *va, unsigned int feature);
++void make_page_readonly(void *va, unsigned int feature);
++void make_page_writable(void *va, unsigned int feature);
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
++
++#define virt_to_ptep(__va)						\
++({									\
++	pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));		\
++	pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va));	\
++	pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va));	\
++	pte_offset_kernel(__pmd, (unsigned long)(__va));		\
++})
++
++#define arbitrary_virt_to_machine(__va)					\
++({									\
++	maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
++	m | ((unsigned long)(__va) & (PAGE_SIZE-1));			\
++})
++
++#endif /* !__ASSEMBLY__ */
++
++#ifndef CONFIG_DISCONTIGMEM
++#define kern_addr_valid(addr)	(1)
++#endif /* !CONFIG_DISCONTIGMEM */
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++                           unsigned long address, 
++                           unsigned long mfn,
++                           unsigned long size, 
++                           pgprot_t prot,
++                           domid_t  domid);
++int direct_kernel_remap_pfn_range(unsigned long address, 
++				  unsigned long mfn,
++				  unsigned long size, 
++				  pgprot_t prot,
++				  domid_t  domid);
++int create_lookup_pte_addr(struct mm_struct *mm,
++                           unsigned long address,
++                           uint64_t *ptep);
++int touch_pte_range(struct mm_struct *mm,
++                    unsigned long address,
++                    unsigned long size);
++
++#define io_remap_page_range(vma,from,phys,size,prot) \
++direct_remap_pfn_range(vma,from,(phys)>>PAGE_SHIFT,size,prot,DOMID_IO)
++
++#define io_remap_pfn_range(vma,from,pfn,size,prot) \
++direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
++
++#define MK_IOSPACE_PFN(space, pfn)	(pfn)
++#define GET_IOSPACE(pfn)		0
++#define GET_PFN(pfn)			(pfn)
++
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
++#define __HAVE_ARCH_PTE_SAME
++#include <asm-generic/pgtable.h>
++
++#endif /* _I386_PGTABLE_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/processor.h linux-2.6.12-xen/include/asm-xen/asm-i386/processor.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/processor.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/processor.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,703 @@
++/*
++ * include/asm-i386/processor.h
++ *
++ * Copyright (C) 1994 Linus Torvalds
++ */
++
++#ifndef __ASM_I386_PROCESSOR_H
++#define __ASM_I386_PROCESSOR_H
++
++#include <asm/vm86.h>
++#include <asm/math_emu.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/types.h>
++#include <asm/sigcontext.h>
++#include <asm/cpufeature.h>
++#include <asm/msr.h>
++#include <asm/system.h>
++#include <linux/cache.h>
++#include <linux/config.h>
++#include <linux/threads.h>
++#include <asm/percpu.h>
++
++/* flag for disabling the tsc */
++extern int tsc_disable;
++
++struct desc_struct {
++	unsigned long a,b;
++};
++
++#define desc_empty(desc) \
++		(!((desc)->a + (desc)->b))
++
++#define desc_equal(desc1, desc2) \
++		(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
++/*
++ * Default implementation of macro that returns current
++ * instruction pointer ("program counter").
++ */
++#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
++
++/*
++ *  CPU type and hardware bug flags. Kept separately for each CPU.
++ *  Members of this structure are referenced in head.S, so think twice
++ *  before touching them. [mj]
++ */
++
++struct cpuinfo_x86 {
++	__u8	x86;		/* CPU family */
++	__u8	x86_vendor;	/* CPU vendor */
++	__u8	x86_model;
++	__u8	x86_mask;
++	char	wp_works_ok;	/* It doesn't on 386's */
++	char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
++	char	hard_math;
++	char	rfu;
++       	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
++	unsigned long	x86_capability[NCAPINTS];
++	char	x86_vendor_id[16];
++	char	x86_model_id[64];
++	int 	x86_cache_size;  /* in KB - valid for CPUS which support this
++				    call  */
++	int 	x86_cache_alignment;	/* In bytes */
++	int	fdiv_bug;
++	int	f00f_bug;
++	int	coma_bug;
++	unsigned long loops_per_jiffy;
++	unsigned char x86_num_cores;
++} __attribute__((__aligned__(SMP_CACHE_BYTES)));
++
++#define X86_VENDOR_INTEL 0
++#define X86_VENDOR_CYRIX 1
++#define X86_VENDOR_AMD 2
++#define X86_VENDOR_UMC 3
++#define X86_VENDOR_NEXGEN 4
++#define X86_VENDOR_CENTAUR 5
++#define X86_VENDOR_RISE 6
++#define X86_VENDOR_TRANSMETA 7
++#define X86_VENDOR_NSC 8
++#define X86_VENDOR_NUM 9
++#define X86_VENDOR_UNKNOWN 0xff
++
++/*
++ * capabilities of CPUs
++ */
++
++extern struct cpuinfo_x86 boot_cpu_data;
++extern struct cpuinfo_x86 new_cpu_data;
++extern struct tss_struct doublefault_tss;
++DECLARE_PER_CPU(struct tss_struct, init_tss);
++DECLARE_PER_CPU(pgd_t *, cur_pgd);
++
++#ifdef CONFIG_SMP
++extern struct cpuinfo_x86 cpu_data[];
++#define current_cpu_data cpu_data[smp_processor_id()]
++#else
++#define cpu_data (&boot_cpu_data)
++#define current_cpu_data boot_cpu_data
++#endif
++
++extern	int phys_proc_id[NR_CPUS];
++extern	int cpu_core_id[NR_CPUS];
++extern char ignore_fpu_irq;
++
++extern void identify_cpu(struct cpuinfo_x86 *);
++extern void print_cpu_info(struct cpuinfo_x86 *);
++extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
++
++#ifdef CONFIG_X86_HT
++extern void detect_ht(struct cpuinfo_x86 *c);
++#else
++static inline void detect_ht(struct cpuinfo_x86 *c) {}
++#endif
++
++/*
++ * EFLAGS bits
++ */
++#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
++#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
++#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
++#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
++#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
++#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
++#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
++#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
++#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
++#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
++#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
++#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
++#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
++#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
++#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
++#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
++#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
++
++/*
++ * Generic CPUID function
++ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
++ * resulting in stale register contents being returned.
++ */
++static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
++{
++	__asm__("cpuid"
++		: "=a" (*eax),
++		  "=b" (*ebx),
++		  "=c" (*ecx),
++		  "=d" (*edx)
++		: "0" (op), "c"(0));
++}
++
++/* Some CPUID calls want 'count' to be placed in ecx */
++static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
++	       	int *edx)
++{
++	__asm__("cpuid"
++		: "=a" (*eax),
++		  "=b" (*ebx),
++		  "=c" (*ecx),
++		  "=d" (*edx)
++		: "0" (op), "c" (count));
++}
++
++/*
++ * CPUID functions returning a single datum
++ */
++static inline unsigned int cpuid_eax(unsigned int op)
++{
++	unsigned int eax;
++
++	__asm__("cpuid"
++		: "=a" (eax)
++		: "0" (op)
++		: "bx", "cx", "dx");
++	return eax;
++}
++static inline unsigned int cpuid_ebx(unsigned int op)
++{
++	unsigned int eax, ebx;
++
++	__asm__("cpuid"
++		: "=a" (eax), "=b" (ebx)
++		: "0" (op)
++		: "cx", "dx" );
++	return ebx;
++}
++static inline unsigned int cpuid_ecx(unsigned int op)
++{
++	unsigned int eax, ecx;
++
++	__asm__("cpuid"
++		: "=a" (eax), "=c" (ecx)
++		: "0" (op)
++		: "bx", "dx" );
++	return ecx;
++}
++static inline unsigned int cpuid_edx(unsigned int op)
++{
++	unsigned int eax, edx;
++
++	__asm__("cpuid"
++		: "=a" (eax), "=d" (edx)
++		: "0" (op)
++		: "bx", "cx");
++	return edx;
++}
++
++#define load_cr3(pgdir) do {				\
++	xen_pt_switch(__pa(pgdir));			\
++	per_cpu(cur_pgd, smp_processor_id()) = pgdir;	\
++} while (/* CONSTCOND */0)
++
++
++/*
++ * Intel CPU features in CR4
++ */
++#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
++#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
++#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
++#define X86_CR4_DE		0x0008	/* enable debugging extensions */
++#define X86_CR4_PSE		0x0010	/* enable page size extensions */
++#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
++#define X86_CR4_MCE		0x0040	/* Machine check enable */
++#define X86_CR4_PGE		0x0080	/* enable global pages */
++#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
++#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
++#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
++
++/*
++ * Save the cr4 feature set we're using (ie
++ * Pentium 4MB enable and PPro Global page
++ * enable), so that any CPU's that boot up
++ * after us can get the correct flags.
++ */
++extern unsigned long mmu_cr4_features;
++
++static inline void set_in_cr4 (unsigned long mask)
++{
++	mmu_cr4_features |= mask;
++	switch (mask) {
++	case X86_CR4_OSFXSR:
++	case X86_CR4_OSXMMEXCPT:
++		break;
++	default:
++		do {
++			const char *msg = "Xen unsupported cr4 update\n";
++			(void)HYPERVISOR_console_io(
++				CONSOLEIO_write, __builtin_strlen(msg),
++				(char *)msg);
++			BUG();
++		} while (0);
++	}
++}
++
++static inline void clear_in_cr4 (unsigned long mask)
++{
++	mmu_cr4_features &= ~mask;
++	__asm__("movl %%cr4,%%eax\n\t"
++		"andl %0,%%eax\n\t"
++		"movl %%eax,%%cr4\n"
++		: : "irg" (~mask)
++		:"ax");
++}
++
++/*
++ *      NSC/Cyrix CPU configuration register indexes
++ */
++
++#define CX86_PCR0 0x20
++#define CX86_GCR  0xb8
++#define CX86_CCR0 0xc0
++#define CX86_CCR1 0xc1
++#define CX86_CCR2 0xc2
++#define CX86_CCR3 0xc3
++#define CX86_CCR4 0xe8
++#define CX86_CCR5 0xe9
++#define CX86_CCR6 0xea
++#define CX86_CCR7 0xeb
++#define CX86_PCR1 0xf0
++#define CX86_DIR0 0xfe
++#define CX86_DIR1 0xff
++#define CX86_ARR_BASE 0xc4
++#define CX86_RCR_BASE 0xdc
++
++/*
++ *      NSC/Cyrix CPU indexed register access macros
++ */
++
++#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++
++#define setCx86(reg, data) do { \
++	outb((reg), 0x22); \
++	outb((data), 0x23); \
++} while (0)
++
++static inline void __monitor(const void *eax, unsigned long ecx,
++		unsigned long edx)
++{
++	/* "monitor %eax,%ecx,%edx;" */
++	asm volatile(
++		".byte 0x0f,0x01,0xc8;"
++		: :"a" (eax), "c" (ecx), "d"(edx));
++}
++
++static inline void __mwait(unsigned long eax, unsigned long ecx)
++{
++	/* "mwait %eax,%ecx;" */
++	asm volatile(
++		".byte 0x0f,0x01,0xc9;"
++		: :"a" (eax), "c" (ecx));
++}
++
++/* from system description table in BIOS.  Mostly for MCA use, but
++others may find it useful. */
++extern unsigned int machine_id;
++extern unsigned int machine_submodel_id;
++extern unsigned int BIOS_revision;
++extern unsigned int mca_pentium_flag;
++
++/* Boot loader type from the setup header */
++extern int bootloader_type;
++
++/*
++ * User space process size: 3GB (default).
++ */
++#define TASK_SIZE	(PAGE_OFFSET)
++
++/* This decides where the kernel will search for a free chunk of vm
++ * space during mmap's.
++ */
++#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
++
++#define HAVE_ARCH_PICK_MMAP_LAYOUT
++
++/*
++ * Size of io_bitmap.
++ */
++#define IO_BITMAP_BITS  65536
++#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
++#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
++#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
++#define INVALID_IO_BITMAP_OFFSET 0x8000
++#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
++
++struct i387_fsave_struct {
++	long	cwd;
++	long	swd;
++	long	twd;
++	long	fip;
++	long	fcs;
++	long	foo;
++	long	fos;
++	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
++	long	status;		/* software status information */
++};
++
++struct i387_fxsave_struct {
++	unsigned short	cwd;
++	unsigned short	swd;
++	unsigned short	twd;
++	unsigned short	fop;
++	long	fip;
++	long	fcs;
++	long	foo;
++	long	fos;
++	long	mxcsr;
++	long	mxcsr_mask;
++	long	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
++	long	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
++	long	padding[56];
++} __attribute__ ((aligned (16)));
++
++struct i387_soft_struct {
++	long	cwd;
++	long	swd;
++	long	twd;
++	long	fip;
++	long	fcs;
++	long	foo;
++	long	fos;
++	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
++	unsigned char	ftop, changed, lookahead, no_update, rm, alimit;
++	struct info	*info;
++	unsigned long	entry_eip;
++};
++
++union i387_union {
++	struct i387_fsave_struct	fsave;
++	struct i387_fxsave_struct	fxsave;
++	struct i387_soft_struct soft;
++};
++
++typedef struct {
++	unsigned long seg;
++} mm_segment_t;
++
++struct thread_struct;
++
++struct tss_struct {
++	unsigned short	back_link,__blh;
++	unsigned long	esp0;
++	unsigned short	ss0,__ss0h;
++	unsigned long	esp1;
++	unsigned short	ss1,__ss1h;	/* ss1 is used to cache MSR_IA32_SYSENTER_CS */
++	unsigned long	esp2;
++	unsigned short	ss2,__ss2h;
++	unsigned long	__cr3;
++	unsigned long	eip;
++	unsigned long	eflags;
++	unsigned long	eax,ecx,edx,ebx;
++	unsigned long	esp;
++	unsigned long	ebp;
++	unsigned long	esi;
++	unsigned long	edi;
++	unsigned short	es, __esh;
++	unsigned short	cs, __csh;
++	unsigned short	ss, __ssh;
++	unsigned short	ds, __dsh;
++	unsigned short	fs, __fsh;
++	unsigned short	gs, __gsh;
++	unsigned short	ldt, __ldth;
++	unsigned short	trace, io_bitmap_base;
++	/*
++	 * The extra 1 is there because the CPU will access an
++	 * additional byte beyond the end of the IO permission
++	 * bitmap. The extra byte must be all 1 bits, and must
++	 * be within the limit.
++	 */
++	unsigned long	io_bitmap[IO_BITMAP_LONGS + 1];
++	/*
++	 * Cache the current maximum and the last task that used the bitmap:
++	 */
++	unsigned long io_bitmap_max;
++	struct thread_struct *io_bitmap_owner;
++	/*
++	 * pads the TSS to be cacheline-aligned (size is 0x100)
++	 */
++	unsigned long __cacheline_filler[35];
++	/*
++	 * .. and then another 0x100 bytes for emergency kernel stack
++	 */
++	unsigned long stack[64];
++} __attribute__((packed));
++
++#define ARCH_MIN_TASKALIGN	16
++
++struct thread_struct {
++/* cached TLS descriptors. */
++	struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
++	unsigned long	esp0;
++	unsigned long	sysenter_cs;
++	unsigned long	eip;
++	unsigned long	esp;
++	unsigned long	fs;
++	unsigned long	gs;
++	unsigned int	io_pl;
++/* Hardware debugging registers */
++	unsigned long	debugreg[8];  /* %%db0-7 debug registers */
++/* fault info */
++	unsigned long	cr2, trap_no, error_code;
++/* floating point info */
++	union i387_union	i387;
++/* virtual 86 mode info */
++	struct vm86_struct __user * vm86_info;
++	unsigned long		screen_bitmap;
++	unsigned long		v86flags, v86mask, saved_esp0;
++	unsigned int		saved_fs, saved_gs;
++/* IO permissions */
++	unsigned long	*io_bitmap_ptr;
++/* max allowed port in the bitmap, in bytes: */
++	unsigned long	io_bitmap_max;
++};
++
++#define INIT_THREAD  {							\
++	.vm86_info = NULL,						\
++	.sysenter_cs = __KERNEL_CS,					\
++	.io_bitmap_ptr = NULL,						\
++}
++
++/*
++ * Note that the .io_bitmap member must be extra-big. This is because
++ * the CPU will access an additional byte beyond the end of the IO
++ * permission bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit.
++ */
++#define INIT_TSS  {							\
++	.esp0		= sizeof(init_stack) + (long)&init_stack,	\
++	.ss0		= __KERNEL_DS,					\
++	.ss1		= __KERNEL_CS,					\
++	.ldt		= GDT_ENTRY_LDT,				\
++	.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,			\
++	.io_bitmap	= { [ 0 ... IO_BITMAP_LONGS] = ~0 },		\
++}
++
++static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
++{
++	tss->esp0 = thread->esp0;
++	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
++	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
++		tss->ss1 = thread->sysenter_cs;
++		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
++	}
++	HYPERVISOR_stack_switch(tss->ss0, tss->esp0);
++}
++
++#define start_thread(regs, new_eip, new_esp) do {		\
++	__asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));	\
++	set_fs(USER_DS);					\
++	regs->xds = __USER_DS;					\
++	regs->xes = __USER_DS;					\
++	regs->xss = __USER_DS;					\
++	regs->xcs = __USER_CS;					\
++	regs->eip = new_eip;					\
++	regs->esp = new_esp;					\
++} while (0)
++
++/*
++ * This special macro can be used to load a debugging register
++ */
++#define loaddebug(thread,register) \
++		HYPERVISOR_set_debugreg((register), \
++					((thread)->debugreg[register]))
++
++/* Forward declaration, a strange C thing */
++struct task_struct;
++struct mm_struct;
++
++/* Free all resources held by a thread. */
++extern void release_thread(struct task_struct *);
++
++/* Prepare to copy thread state - unlazy all lazy status */
++extern void prepare_to_copy(struct task_struct *tsk);
++
++/*
++ * create a kernel thread without removing it from tasklists
++ */
++extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
++
++extern unsigned long thread_saved_pc(struct task_struct *tsk);
++void show_trace(struct task_struct *task, unsigned long *stack);
++
++unsigned long get_wchan(struct task_struct *p);
++
++#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
++#define KSTK_TOP(info)                                                 \
++({                                                                     \
++       unsigned long *__ptr = (unsigned long *)(info);                 \
++       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
++})
++
++#define task_pt_regs(task)                                             \
++({                                                                     \
++       struct pt_regs *__regs__;                                       \
++       __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info);     \
++       __regs__ - 1;                                                   \
++})
++
++#define KSTK_EIP(task) (task_pt_regs(task)->eip)
++#define KSTK_ESP(task) (task_pt_regs(task)->esp)
++
++
++struct microcode_header {
++	unsigned int hdrver;
++	unsigned int rev;
++	unsigned int date;
++	unsigned int sig;
++	unsigned int cksum;
++	unsigned int ldrver;
++	unsigned int pf;
++	unsigned int datasize;
++	unsigned int totalsize;
++	unsigned int reserved[3];
++};
++
++struct microcode {
++	struct microcode_header hdr;
++	unsigned int bits[0];
++};
++
++typedef struct microcode microcode_t;
++typedef struct microcode_header microcode_header_t;
++
++/* microcode format is extended from prescott processors */
++struct extended_signature {
++	unsigned int sig;
++	unsigned int pf;
++	unsigned int cksum;
++};
++
++struct extended_sigtable {
++	unsigned int count;
++	unsigned int cksum;
++	unsigned int reserved[3];
++	struct extended_signature sigs[0];
++};
++/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
++#define MICROCODE_IOCFREE	_IO('6',0)
++
++/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
++static inline void rep_nop(void)
++{
++	__asm__ __volatile__("rep;nop": : :"memory");
++}
++
++#define cpu_relax()	rep_nop()
++
++/* generic versions from gas */
++#define GENERIC_NOP1	".byte 0x90\n"
++#define GENERIC_NOP2    	".byte 0x89,0xf6\n"
++#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
++#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
++#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
++#define GENERIC_NOP6	".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
++#define GENERIC_NOP7	".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
++#define GENERIC_NOP8	GENERIC_NOP1 GENERIC_NOP7
++
++/* Opteron nops */
++#define K8_NOP1 GENERIC_NOP1
++#define K8_NOP2	".byte 0x66,0x90\n" 
++#define K8_NOP3	".byte 0x66,0x66,0x90\n" 
++#define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" 
++#define K8_NOP5	K8_NOP3 K8_NOP2 
++#define K8_NOP6	K8_NOP3 K8_NOP3
++#define K8_NOP7	K8_NOP4 K8_NOP3
++#define K8_NOP8	K8_NOP4 K8_NOP4
++
++/* K7 nops */
++/* uses eax dependencies (arbitary choice) */
++#define K7_NOP1  GENERIC_NOP1
++#define K7_NOP2	".byte 0x8b,0xc0\n" 
++#define K7_NOP3	".byte 0x8d,0x04,0x20\n"
++#define K7_NOP4	".byte 0x8d,0x44,0x20,0x00\n"
++#define K7_NOP5	K7_NOP4 ASM_NOP1
++#define K7_NOP6	".byte 0x8d,0x80,0,0,0,0\n"
++#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
++#define K7_NOP8        K7_NOP7 ASM_NOP1
++
++#ifdef CONFIG_MK8
++#define ASM_NOP1 K8_NOP1
++#define ASM_NOP2 K8_NOP2
++#define ASM_NOP3 K8_NOP3
++#define ASM_NOP4 K8_NOP4
++#define ASM_NOP5 K8_NOP5
++#define ASM_NOP6 K8_NOP6
++#define ASM_NOP7 K8_NOP7
++#define ASM_NOP8 K8_NOP8
++#elif defined(CONFIG_MK7)
++#define ASM_NOP1 K7_NOP1
++#define ASM_NOP2 K7_NOP2
++#define ASM_NOP3 K7_NOP3
++#define ASM_NOP4 K7_NOP4
++#define ASM_NOP5 K7_NOP5
++#define ASM_NOP6 K7_NOP6
++#define ASM_NOP7 K7_NOP7
++#define ASM_NOP8 K7_NOP8
++#else
++#define ASM_NOP1 GENERIC_NOP1
++#define ASM_NOP2 GENERIC_NOP2
++#define ASM_NOP3 GENERIC_NOP3
++#define ASM_NOP4 GENERIC_NOP4
++#define ASM_NOP5 GENERIC_NOP5
++#define ASM_NOP6 GENERIC_NOP6
++#define ASM_NOP7 GENERIC_NOP7
++#define ASM_NOP8 GENERIC_NOP8
++#endif
++
++#define ASM_NOP_MAX 8
++
++/* Prefetch instructions for Pentium III and AMD Athlon */
++/* It's not worth to care about 3dnow! prefetches for the K6
++   because they are microcoded there and very slow.
++   However we don't do prefetches for pre XP Athlons currently
++   That should be fixed. */
++#define ARCH_HAS_PREFETCH
++extern inline void prefetch(const void *x)
++{
++	alternative_input(ASM_NOP4,
++			  "prefetchnta (%1)",
++			  X86_FEATURE_XMM,
++			  "r" (x));
++}
++
++#define ARCH_HAS_PREFETCH
++#define ARCH_HAS_PREFETCHW
++#define ARCH_HAS_SPINLOCK_PREFETCH
++
++/* 3dnow! prefetch to get an exclusive cache line. Useful for 
++   spinlocks to avoid one state transition in the cache coherency protocol. */
++extern inline void prefetchw(const void *x)
++{
++	alternative_input(ASM_NOP4,
++			  "prefetchw (%1)",
++			  X86_FEATURE_3DNOW,
++			  "r" (x));
++}
++#define spin_lock_prefetch(x)	prefetchw(x)
++
++extern void select_idle_routine(const struct cpuinfo_x86 *c);
++
++#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
++
++extern unsigned long boot_option_idle_override;
++
++#endif /* __ASM_I386_PROCESSOR_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/ptrace.h linux-2.6.12-xen/include/asm-xen/asm-i386/ptrace.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/ptrace.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/ptrace.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,69 @@
++#ifndef _I386_PTRACE_H
++#define _I386_PTRACE_H
++
++#define EBX 0
++#define ECX 1
++#define EDX 2
++#define ESI 3
++#define EDI 4
++#define EBP 5
++#define EAX 6
++#define DS 7
++#define ES 8
++#define FS 9
++#define GS 10
++#define ORIG_EAX 11
++#define EIP 12
++#define CS  13
++#define EFL 14
++#define UESP 15
++#define SS   16
++#define FRAME_SIZE 17
++
++/* this struct defines the way the registers are stored on the 
++   stack during a system call. */
++
++struct pt_regs {
++	long ebx;
++	long ecx;
++	long edx;
++	long esi;
++	long edi;
++	long ebp;
++	long eax;
++	int  xds;
++	int  xes;
++	long orig_eax;
++	long eip;
++	int  xcs;
++	long eflags;
++	long esp;
++	int  xss;
++};
++
++/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
++#define PTRACE_GETREGS            12
++#define PTRACE_SETREGS            13
++#define PTRACE_GETFPREGS          14
++#define PTRACE_SETFPREGS          15
++#define PTRACE_GETFPXREGS         18
++#define PTRACE_SETFPXREGS         19
++
++#define PTRACE_OLDSETOPTIONS         21
++
++#define PTRACE_GET_THREAD_AREA    25
++#define PTRACE_SET_THREAD_AREA    26
++
++#ifdef __KERNEL__
++struct task_struct;
++extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
++#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (2 & (regs)->xcs))
++#define instruction_pointer(regs) ((regs)->eip)
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++extern unsigned long profile_pc(struct pt_regs *regs);
++#else
++#define profile_pc(regs) instruction_pointer(regs)
++#endif
++#endif
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/scatterlist.h linux-2.6.12-xen/include/asm-xen/asm-i386/scatterlist.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/scatterlist.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/scatterlist.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,22 @@
++#ifndef _I386_SCATTERLIST_H
++#define _I386_SCATTERLIST_H
++
++struct scatterlist {
++    struct page		*page;
++    unsigned int	offset;
++    unsigned int	length;
++    dma_addr_t		dma_address;
++    unsigned int	dma_length;
++};
++
++/* These macros should be used after a pci_map_sg call has been done
++ * to get bus addresses of each of the SG entries and their lengths.
++ * You should only work with the number of sg entries pci_map_sg
++ * returns.
++ */
++#define sg_dma_address(sg)	((sg)->dma_address)
++#define sg_dma_len(sg)		((sg)->dma_length)
++
++#define ISA_DMA_THRESHOLD (0x00ffffff)
++
++#endif /* !(_I386_SCATTERLIST_H) */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/segment.h linux-2.6.12-xen/include/asm-xen/asm-i386/segment.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/segment.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/segment.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,99 @@
++#ifndef _ASM_SEGMENT_H
++#define _ASM_SEGMENT_H
++
++/*
++ * The layout of the per-CPU GDT under Linux:
++ *
++ *   0 - null
++ *   1 - reserved
++ *   2 - reserved
++ *   3 - reserved
++ *
++ *   4 - unused			<==== new cacheline
++ *   5 - unused
++ *
++ *  ------- start of TLS (Thread-Local Storage) segments:
++ *
++ *   6 - TLS segment #1			[ glibc's TLS segment ]
++ *   7 - TLS segment #2			[ Wine's %fs Win32 segment ]
++ *   8 - TLS segment #3
++ *   9 - reserved
++ *  10 - reserved
++ *  11 - reserved
++ *
++ *  ------- start of kernel segments:
++ *
++ *  12 - kernel code segment		<==== new cacheline
++ *  13 - kernel data segment
++ *  14 - default user CS
++ *  15 - default user DS
++ *  16 - TSS
++ *  17 - LDT
++ *  18 - PNPBIOS support (16->32 gate)
++ *  19 - PNPBIOS support
++ *  20 - PNPBIOS support
++ *  21 - PNPBIOS support
++ *  22 - PNPBIOS support
++ *  23 - APM BIOS support
++ *  24 - APM BIOS support
++ *  25 - APM BIOS support 
++ *
++ *  26 - ESPFIX small SS
++ *  27 - unused
++ *  28 - unused
++ *  29 - unused
++ *  30 - unused
++ *  31 - TSS for double fault handler
++ */
++#define GDT_ENTRY_TLS_ENTRIES	3
++#define GDT_ENTRY_TLS_MIN	6
++#define GDT_ENTRY_TLS_MAX 	(GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
++
++#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
++
++#define GDT_ENTRY_DEFAULT_USER_CS	14
++#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
++
++#define GDT_ENTRY_DEFAULT_USER_DS	15
++#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
++
++#define GDT_ENTRY_KERNEL_BASE	12
++
++#define GDT_ENTRY_KERNEL_CS		(GDT_ENTRY_KERNEL_BASE + 0)
++#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8 + 1)
++
++#define GDT_ENTRY_KERNEL_DS		(GDT_ENTRY_KERNEL_BASE + 1)
++#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8 + 1)
++
++#define GDT_ENTRY_TSS			(GDT_ENTRY_KERNEL_BASE + 4)
++#define GDT_ENTRY_LDT			(GDT_ENTRY_KERNEL_BASE + 5)
++
++#define GDT_ENTRY_PNPBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 6)
++#define GDT_ENTRY_APMBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 11)
++
++#define GDT_ENTRY_ESPFIX_SS		(GDT_ENTRY_KERNEL_BASE + 14)
++#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
++
++#define GDT_ENTRY_DOUBLEFAULT_TSS	31
++
++/*
++ * The GDT has 32 entries
++ */
++#define GDT_ENTRIES 32
++
++#define GDT_SIZE (GDT_ENTRIES * 8)
++
++/* Simple and small GDT entries for booting only */
++
++#define __BOOT_CS	FLAT_KERNEL_CS
++
++#define __BOOT_DS	FLAT_KERNEL_DS
++
++/*
++ * The interrupt descriptor table has room for 256 idt's,
++ * the global descriptor table is dependent on the number
++ * of tasks we can have..
++ */
++#define IDT_ENTRIES 256
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/setup.h linux-2.6.12-xen/include/asm-xen/asm-i386/setup.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/setup.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/setup.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,66 @@
++/*
++ *	Just a place holder. We don't want to have to test x86 before
++ *	we include stuff
++ */
++
++#ifndef _i386_SETUP_H
++#define _i386_SETUP_H
++
++#define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
++#define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
++#define PFN_PHYS(x)	((unsigned long long)(x) << PAGE_SHIFT)
++
++/*
++ * Reserved space for vmalloc and iomap - defined in asm/page.h
++ */
++#define MAXMEM_PFN	PFN_DOWN(MAXMEM)
++#define MAX_NONPAE_PFN	(1 << 20)
++
++#define PARAM_SIZE 4096
++#define COMMAND_LINE_SIZE 256
++
++#define OLD_CL_MAGIC_ADDR	0x90020
++#define OLD_CL_MAGIC		0xA33F
++#define OLD_CL_BASE_ADDR	0x90000
++#define OLD_CL_OFFSET		0x90022
++#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
++
++#ifndef __ASSEMBLY__
++/*
++ * This is set up by the setup-routine at boot-time
++ */
++extern unsigned char boot_params[PARAM_SIZE];
++
++#define PARAM	(boot_params)
++#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
++#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
++#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
++#define E820_MAP_NR (*(char*) (PARAM+E820NR))
++#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
++#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
++#define IST_INFO   (*(struct ist_info *) (PARAM+0x60))
++#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
++#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
++#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
++#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
++#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
++#define EFI_MEMMAP ((efi_memory_desc_t *) *((unsigned long *)(PARAM+0x1d0)))
++#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
++#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
++#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
++#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
++#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
++#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
++#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
++#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
++#define INITRD_START (__pa(xen_start_info->mod_start))
++#define INITRD_SIZE (xen_start_info->mod_len)
++#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
++#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
++#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
++#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
++#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _i386_SETUP_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/smp.h linux-2.6.12-xen/include/asm-xen/asm-i386/smp.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/smp.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/smp.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,93 @@
++#ifndef __ASM_SMP_H
++#define __ASM_SMP_H
++
++/*
++ * We need the APIC definitions automatically as part of 'smp.h'
++ */
++#ifndef __ASSEMBLY__
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef __ASSEMBLY__
++#include <asm/fixmap.h>
++#include <asm/bitops.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_X86_IO_APIC
++#include <asm/io_apic.h>
++#endif
++#include <asm/apic.h>
++#endif
++#endif
++
++#define BAD_APICID 0xFFu
++#ifdef CONFIG_SMP
++#ifndef __ASSEMBLY__
++
++/*
++ * Private routines/data
++ */
++ 
++extern void smp_alloc_memory(void);
++extern int pic_mode;
++extern int smp_num_siblings;
++extern cpumask_t cpu_sibling_map[];
++extern cpumask_t cpu_core_map[];
++
++extern void smp_flush_tlb(void);
++extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
++extern void smp_invalidate_rcv(void);		/* Process an NMI */
++extern void (*mtrr_hook) (void);
++extern void zap_low_mappings (void);
++
++#define MAX_APICID 256
++extern u8 x86_cpu_to_apicid[];
++
++/*
++ * This function is needed by all SMP systems. It must _always_ be valid
++ * from the initial startup. We map APIC_BASE very early in page_setup(),
++ * so this is correct in the x86 case.
++ */
++#define __smp_processor_id() (current_thread_info()->cpu)
++
++extern cpumask_t cpu_possible_map;
++#define cpu_callin_map cpu_possible_map
++
++/* We don't mark CPUs online until __cpu_up(), so we need another measure */
++static inline int num_booting_cpus(void)
++{
++	return cpus_weight(cpu_possible_map);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++
++#ifdef APIC_DEFINITION
++extern int hard_smp_processor_id(void);
++#else
++#include <mach_apicdef.h>
++static inline int hard_smp_processor_id(void)
++{
++	/* we don't want to mark this access volatile - bad code generation */
++	return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
++}
++#endif
++
++static __inline int logical_smp_processor_id(void)
++{
++	/* we don't want to mark this access volatile - bad code generation */
++	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
++
++#endif
++
++extern int __cpu_disable(void);
++extern void __cpu_die(unsigned int cpu);
++#endif /* !__ASSEMBLY__ */
++
++#define NO_PROC_ID		0xFF		/* No processor magic marker */
++
++#endif
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/spinlock.h linux-2.6.12-xen/include/asm-xen/asm-i386/spinlock.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/spinlock.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/spinlock.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,277 @@
++#ifndef __ASM_SPINLOCK_H
++#define __ASM_SPINLOCK_H
++
++#include <asm/atomic.h>
++#include <asm/rwlock.h>
++#include <asm/page.h>
++#include <linux/config.h>
++#include <linux/compiler.h>
++#include <asm/smp_alt.h>
++
++asmlinkage int printk(const char * fmt, ...)
++	__attribute__ ((format (printf, 1, 2)));
++
++/*
++ * Your basic SMP spinlocks, allowing only a single CPU anywhere
++ */
++
++typedef struct {
++	volatile unsigned int slock;
++#ifdef CONFIG_DEBUG_SPINLOCK
++	unsigned magic;
++#endif
++#ifdef CONFIG_PREEMPT
++	unsigned int break_lock;
++#endif
++} spinlock_t;
++
++#define SPINLOCK_MAGIC	0xdead4ead
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++#define SPINLOCK_MAGIC_INIT	, SPINLOCK_MAGIC
++#else
++#define SPINLOCK_MAGIC_INIT	/* */
++#endif
++
++#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
++
++#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
++
++/*
++ * Simple spin lock operations.  There are two variants, one clears IRQ's
++ * on the local processor, one does not.
++ *
++ * We make no fairness assumptions. They have a cost.
++ */
++
++#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->slock) <= 0)
++#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
++
++#define spin_lock_string \
++        "1:\n" \
++	LOCK \
++	"decb %0\n\t" \
++	"jns 3f\n" \
++	"2:\t" \
++	"rep;nop\n\t" \
++	"cmpb $0,%0\n\t" \
++	"jle 2b\n\t" \
++	"jmp 1b\n" \
++	"3:\n\t"
++
++#define spin_lock_string_flags \
++        "1:\n" \
++	LOCK \
++	"decb %0\n\t" \
++	"jns 4f\n\t" \
++	"2:\t" \
++	"testl $0x200, %1\n\t" \
++	"jz 3f\n\t" \
++	"#sti\n\t" \
++	"3:\t" \
++	"rep;nop\n\t" \
++	"cmpb $0, %0\n\t" \
++	"jle 3b\n\t" \
++	"#cli\n\t" \
++	"jmp 1b\n" \
++	"4:\n\t"
++
++/*
++ * This works. Despite all the confusion.
++ * (except on PPro SMP or if we are using OOSTORE)
++ * (PPro errata 66, 92)
++ */
++
++#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
++
++#define spin_unlock_string \
++	"movb $1,%0" \
++		:"=m" (lock->slock) : : "memory"
++
++
++static inline void _raw_spin_unlock(spinlock_t *lock)
++{
++#ifdef CONFIG_DEBUG_SPINLOCK
++	BUG_ON(lock->magic != SPINLOCK_MAGIC);
++	BUG_ON(!spin_is_locked(lock));
++#endif
++	__asm__ __volatile__(
++		spin_unlock_string
++	);
++}
++
++#else
++
++#define spin_unlock_string \
++	"xchgb %b0, %1" \
++		:"=q" (oldval), "=m" (lock->slock) \
++		:"0" (oldval) : "memory"
++
++static inline void _raw_spin_unlock(spinlock_t *lock)
++{
++	char oldval = 1;
++#ifdef CONFIG_DEBUG_SPINLOCK
++	BUG_ON(lock->magic != SPINLOCK_MAGIC);
++	BUG_ON(!spin_is_locked(lock));
++#endif
++	__asm__ __volatile__(
++		spin_unlock_string
++	);
++}
++
++#endif
++
++static inline int _raw_spin_trylock(spinlock_t *lock)
++{
++	char oldval;
++#ifdef CONFIG_SMP_ALTERNATIVES
++	__asm__ __volatile__(
++		"1:movb %1,%b0\n"
++		"movb $0,%1\n"
++		"2:"
++		".section __smp_alternatives,\"a\"\n"
++		".long 1b\n"
++		".long 3f\n"
++		".previous\n"
++		".section __smp_replacements,\"a\"\n"
++		"3: .byte 2b - 1b\n"
++		".byte 5f-4f\n"
++		".byte 0\n"
++		".byte 6f-5f\n"
++		".byte -1\n"
++		"4: xchgb %b0,%1\n"
++		"5: movb %1,%b0\n"
++		"movb $0,%1\n"
++		"6:\n"
++		".previous\n"
++		:"=q" (oldval), "=m" (lock->slock)
++		:"0" (0) : "memory");
++#else
++	__asm__ __volatile__(
++		"xchgb %b0,%1\n"
++		:"=q" (oldval), "=m" (lock->slock)
++		:"0" (0) : "memory");
++#endif
++	return oldval > 0;
++}
++
++static inline void _raw_spin_lock(spinlock_t *lock)
++{
++#ifdef CONFIG_DEBUG_SPINLOCK
++	if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
++		printk("eip: %p\n", __builtin_return_address(0));
++		BUG();
++	}
++#endif
++	__asm__ __volatile__(
++		spin_lock_string
++		:"=m" (lock->slock) : : "memory");
++}
++
++static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
++{
++#ifdef CONFIG_DEBUG_SPINLOCK
++	if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
++		printk("eip: %p\n", __builtin_return_address(0));
++		BUG();
++	}
++#endif
++	__asm__ __volatile__(
++		spin_lock_string_flags
++		:"=m" (lock->slock) : "r" (flags) : "memory");
++}
++
++/*
++ * Read-write spinlocks, allowing multiple readers
++ * but only one writer.
++ *
++ * NOTE! it is quite common to have readers in interrupts
++ * but no interrupt writers. For those circumstances we
++ * can "mix" irq-safe locks - any writer needs to get a
++ * irq-safe write-lock, but readers can get non-irqsafe
++ * read-locks.
++ */
++typedef struct {
++	volatile unsigned int lock;
++#ifdef CONFIG_DEBUG_SPINLOCK
++	unsigned magic;
++#endif
++#ifdef CONFIG_PREEMPT
++	unsigned int break_lock;
++#endif
++} rwlock_t;
++
++#define RWLOCK_MAGIC	0xdeaf1eed
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++#define RWLOCK_MAGIC_INIT	, RWLOCK_MAGIC
++#else
++#define RWLOCK_MAGIC_INIT	/* */
++#endif
++
++#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
++
++#define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
++
++/**
++ * read_can_lock - would read_trylock() succeed?
++ * @lock: the rwlock in question.
++ */
++#define read_can_lock(x) ((int)(x)->lock > 0)
++
++/**
++ * write_can_lock - would write_trylock() succeed?
++ * @lock: the rwlock in question.
++ */
++#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
++
++/*
++ * On x86, we implement read-write locks as a 32-bit counter
++ * with the high bit (sign) being the "contended" bit.
++ *
++ * The inline assembly is non-obvious. Think about it.
++ *
++ * Changed to use the same technique as rw semaphores.  See
++ * semaphore.h for details.  -ben
++ */
++/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
++
++static inline void _raw_read_lock(rwlock_t *rw)
++{
++#ifdef CONFIG_DEBUG_SPINLOCK
++	BUG_ON(rw->magic != RWLOCK_MAGIC);
++#endif
++	__build_read_lock(rw, "__read_lock_failed");
++}
++
++static inline void _raw_write_lock(rwlock_t *rw)
++{
++#ifdef CONFIG_DEBUG_SPINLOCK
++	BUG_ON(rw->magic != RWLOCK_MAGIC);
++#endif
++	__build_write_lock(rw, "__write_lock_failed");
++}
++
++#define _raw_read_unlock(rw)	asm volatile(LOCK "incl %0" :"=m" ((rw)->lock) : : "memory")
++#define _raw_write_unlock(rw)	asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
++
++static inline int _raw_read_trylock(rwlock_t *lock)
++{
++	atomic_t *count = (atomic_t *)lock;
++	atomic_dec(count);
++	if (atomic_read(count) >= 0)
++		return 1;
++	atomic_inc(count);
++	return 0;
++}
++
++static inline int _raw_write_trylock(rwlock_t *lock)
++{
++	atomic_t *count = (atomic_t *)lock;
++	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
++		return 1;
++	atomic_add(RW_LOCK_BIAS, count);
++	return 0;
++}
++
++#endif /* __ASM_SPINLOCK_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/swiotlb.h linux-2.6.12-xen/include/asm-xen/asm-i386/swiotlb.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/swiotlb.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/swiotlb.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,42 @@
++#ifndef _ASM_SWIOTLB_H
++#define _ASM_SWIOTLB_H 1
++
++#include <linux/config.h>
++
++/* SWIOTLB interface */
++
++extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
++				      int dir);
++extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
++				  size_t size, int dir);
++extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
++					 dma_addr_t dev_addr,
++					 size_t size, int dir);
++extern void swiotlb_sync_single_for_device(struct device *hwdev,
++					    dma_addr_t dev_addr,
++					    size_t size, int dir);
++extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
++				     struct scatterlist *sg, int nelems,
++				     int dir);
++extern void swiotlb_sync_sg_for_device(struct device *hwdev,
++					struct scatterlist *sg, int nelems,
++					int dir);
++extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
++		      int nents, int direction);
++extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++			 int nents, int direction);
++extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
++extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
++                                   unsigned long offset, size_t size,
++                                   enum dma_data_direction direction);
++extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++                               size_t size, enum dma_data_direction direction);
++extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
++
++#ifdef CONFIG_SWIOTLB
++extern int swiotlb;
++#else
++#define swiotlb 0
++#endif
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/synch_bitops.h linux-2.6.12-xen/include/asm-xen/asm-i386/synch_bitops.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/synch_bitops.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,140 @@
++#ifndef __XEN_SYNCH_BITOPS_H__
++#define __XEN_SYNCH_BITOPS_H__
++
++/*
++ * Copyright 1992, Linus Torvalds.
++ * Heavily modified to provide guaranteed strong synchronisation
++ * when communicating with Xen or other guest OSes running on other CPUs.
++ */
++
++#include <linux/config.h>
++
++#define ADDR (*(volatile long *) addr)
++
++static __inline__ void synch_set_bit(int nr, volatile void * addr)
++{
++    __asm__ __volatile__ ( 
++        "lock btsl %1,%0"
++        : "=m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ void synch_clear_bit(int nr, volatile void * addr)
++{
++    __asm__ __volatile__ (
++        "lock btrl %1,%0"
++        : "=m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ void synch_change_bit(int nr, volatile void * addr)
++{
++    __asm__ __volatile__ (
++        "lock btcl %1,%0"
++        : "=m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
++{
++    int oldbit;
++    __asm__ __volatile__ (
++        "lock btsl %2,%1\n\tsbbl %0,%0"
++        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
++    return oldbit;
++}
++
++static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
++{
++    int oldbit;
++    __asm__ __volatile__ (
++        "lock btrl %2,%1\n\tsbbl %0,%0"
++        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
++    return oldbit;
++}
++
++static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
++{
++    int oldbit;
++
++    __asm__ __volatile__ (
++        "lock btcl %2,%1\n\tsbbl %0,%0"
++        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
++    return oldbit;
++}
++
++struct __synch_xchg_dummy { unsigned long a[100]; };
++#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
++
++#define synch_cmpxchg(ptr, old, new) \
++((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
++                                     (unsigned long)(old), \
++                                     (unsigned long)(new), \
++                                     sizeof(*(ptr))))
++
++static inline unsigned long __synch_cmpxchg(volatile void *ptr,
++					    unsigned long old,
++					    unsigned long new, int size)
++{
++	unsigned long prev;
++	switch (size) {
++	case 1:
++		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__synch_xg(ptr)),
++				       "0"(old)
++				     : "memory");
++		return prev;
++	case 2:
++		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__synch_xg(ptr)),
++				       "0"(old)
++				     : "memory");
++		return prev;
++#ifdef CONFIG_X86_64
++	case 4:
++		__asm__ __volatile__("lock; cmpxchgl %k1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__synch_xg(ptr)),
++				       "0"(old)
++				     : "memory");
++		return prev;
++	case 8:
++		__asm__ __volatile__("lock; cmpxchgq %1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__synch_xg(ptr)),
++				       "0"(old)
++				     : "memory");
++		return prev;
++#else
++	case 4:
++		__asm__ __volatile__("lock; cmpxchgl %1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__synch_xg(ptr)),
++				       "0"(old)
++				     : "memory");
++		return prev;
++#endif
++	}
++	return old;
++}
++
++static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
++{
++    return ((1UL << (nr & 31)) & 
++            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
++}
++
++static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
++{
++    int oldbit;
++    __asm__ __volatile__ (
++        "btl %2,%1\n\tsbbl %0,%0"
++        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
++    return oldbit;
++}
++
++#define synch_test_bit(nr,addr) \
++(__builtin_constant_p(nr) ? \
++ synch_const_test_bit((nr),(addr)) : \
++ synch_var_test_bit((nr),(addr)))
++
++#endif /* __XEN_SYNCH_BITOPS_H__ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/system.h linux-2.6.12-xen/include/asm-xen/asm-i386/system.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/system.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/system.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,588 @@
++#ifndef __ASM_SYSTEM_H
++#define __ASM_SYSTEM_H
++
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <linux/bitops.h>
++#include <asm/synch_bitops.h>
++#include <asm/segment.h>
++#include <asm/cpufeature.h>
++#include <asm/hypervisor.h>
++#include <asm/smp_alt.h>
++
++#ifdef __KERNEL__
++
++struct task_struct;	/* one of the stranger aspects of C forward declarations.. */
++extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
++
++#define switch_to(prev,next,last) do {					\
++	unsigned long esi,edi;						\
++	asm volatile("pushfl\n\t"					\
++		     "pushl %%ebp\n\t"					\
++		     "movl %%esp,%0\n\t"	/* save ESP */		\
++		     "movl %5,%%esp\n\t"	/* restore ESP */	\
++		     "movl $1f,%1\n\t"		/* save EIP */		\
++		     "pushl %6\n\t"		/* restore EIP */	\
++		     "jmp __switch_to\n"				\
++		     "1:\t"						\
++		     "popl %%ebp\n\t"					\
++		     "popfl"						\
++		     :"=m" (prev->thread.esp),"=m" (prev->thread.eip),	\
++		      "=a" (last),"=S" (esi),"=D" (edi)			\
++		     :"m" (next->thread.esp),"m" (next->thread.eip),	\
++		      "2" (prev), "d" (next));				\
++} while (0)
++
++#define _set_base(addr,base) do { unsigned long __pr; \
++__asm__ __volatile__ ("movw %%dx,%1\n\t" \
++	"rorl $16,%%edx\n\t" \
++	"movb %%dl,%2\n\t" \
++	"movb %%dh,%3" \
++	:"=&d" (__pr) \
++	:"m" (*((addr)+2)), \
++	 "m" (*((addr)+4)), \
++	 "m" (*((addr)+7)), \
++         "0" (base) \
++        ); } while(0)
++
++#define _set_limit(addr,limit) do { unsigned long __lr; \
++__asm__ __volatile__ ("movw %%dx,%1\n\t" \
++	"rorl $16,%%edx\n\t" \
++	"movb %2,%%dh\n\t" \
++	"andb $0xf0,%%dh\n\t" \
++	"orb %%dh,%%dl\n\t" \
++	"movb %%dl,%2" \
++	:"=&d" (__lr) \
++	:"m" (*(addr)), \
++	 "m" (*((addr)+6)), \
++	 "0" (limit) \
++        ); } while(0)
++
++#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
++#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
++
++static inline unsigned long _get_base(char * addr)
++{
++	unsigned long __base;
++	__asm__("movb %3,%%dh\n\t"
++		"movb %2,%%dl\n\t"
++		"shll $16,%%edx\n\t"
++		"movw %1,%%dx"
++		:"=&d" (__base)
++		:"m" (*((addr)+2)),
++		 "m" (*((addr)+4)),
++		 "m" (*((addr)+7)));
++	return __base;
++}
++
++#define get_base(ldt) _get_base( ((char *)&(ldt)) )
++
++/*
++ * Load a segment. Fall back on loading the zero
++ * segment if something goes wrong..
++ */
++#define loadsegment(seg,value)			\
++	asm volatile("\n"			\
++		"1:\t"				\
++		"mov %0,%%" #seg "\n"		\
++		"2:\n"				\
++		".section .fixup,\"ax\"\n"	\
++		"3:\t"				\
++		"pushl $0\n\t"			\
++		"popl %%" #seg "\n\t"		\
++		"jmp 2b\n"			\
++		".previous\n"			\
++		".section __ex_table,\"a\"\n\t"	\
++		".align 4\n\t"			\
++		".long 1b,3b\n"			\
++		".previous"			\
++		: :"m" (value))
++
++/*
++ * Save a segment register away
++ */
++#define savesegment(seg, value) \
++	asm volatile("mov %%" #seg ",%0":"=m" (value))
++
++/*
++ * Clear and set 'TS' bit respectively
++ */
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
++#define read_cr0() ({ \
++	unsigned int __dummy; \
++	__asm__( \
++		"movl %%cr0,%0\n\t" \
++		:"=r" (__dummy)); \
++	__dummy; \
++})
++#define write_cr0(x) \
++	__asm__("movl %0,%%cr0": :"r" (x));
++
++#define read_cr4() ({ \
++	unsigned int __dummy; \
++	__asm__( \
++		"movl %%cr4,%0\n\t" \
++		:"=r" (__dummy)); \
++	__dummy; \
++})
++#define write_cr4(x) \
++	__asm__("movl %0,%%cr4": :"r" (x));
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
++
++#endif	/* __KERNEL__ */
++
++#define wbinvd() \
++	__asm__ __volatile__ ("wbinvd": : :"memory");
++
++static inline unsigned long get_limit(unsigned long segment)
++{
++	unsigned long __limit;
++	__asm__("lsll %1,%0"
++		:"=r" (__limit):"r" (segment));
++	return __limit+1;
++}
++
++#define nop() __asm__ __volatile__ ("nop")
++
++#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
++
++#define tas(ptr) (xchg((ptr),1))
++
++struct __xchg_dummy { unsigned long a[100]; };
++#define __xg(x) ((struct __xchg_dummy *)(x))
++
++
++/*
++ * The semantics of XCHGCMP8B are a bit strange, this is why
++ * there is a loop and the loading of %%eax and %%edx has to
++ * be inside. This inlines well in most cases, the cached
++ * cost is around ~38 cycles. (in the future we might want
++ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
++ * might have an implicit FPU-save as a cost, so it's not
++ * clear which path to go.)
++ *
++ * cmpxchg8b must be used with the lock prefix here to allow
++ * the instruction to be executed atomically, see page 3-102
++ * of the instruction set reference 24319102.pdf. We need
++ * the reader side to see the coherent 64bit value.
++ */
++static inline void __set_64bit (unsigned long long * ptr,
++		unsigned int low, unsigned int high)
++{
++	__asm__ __volatile__ (
++		"\n1:\t"
++		"movl (%0), %%eax\n\t"
++		"movl 4(%0), %%edx\n\t"
++		"lock cmpxchg8b (%0)\n\t"
++		"jnz 1b"
++		: /* no outputs */
++		:	"D"(ptr),
++			"b"(low),
++			"c"(high)
++		:	"ax","dx","memory");
++}
++
++static inline void __set_64bit_constant (unsigned long long *ptr,
++						 unsigned long long value)
++{
++	__set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
++}
++#define ll_low(x)	*(((unsigned int*)&(x))+0)
++#define ll_high(x)	*(((unsigned int*)&(x))+1)
++
++static inline void __set_64bit_var (unsigned long long *ptr,
++			 unsigned long long value)
++{
++	__set_64bit(ptr,ll_low(value), ll_high(value));
++}
++
++#define set_64bit(ptr,value) \
++(__builtin_constant_p(value) ? \
++ __set_64bit_constant(ptr, value) : \
++ __set_64bit_var(ptr, value) )
++
++#define _set_64bit(ptr,value) \
++(__builtin_constant_p(value) ? \
++ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
++ __set_64bit(ptr, ll_low(value), ll_high(value)) )
++
++/*
++ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
++ * Note 2: xchg has side effect, so that attribute volatile is necessary,
++ *	  but generally the primitive is invalid, *ptr is output argument. --ANK
++ */
++static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++{
++	switch (size) {
++		case 1:
++			__asm__ __volatile__("xchgb %b0,%1"
++				:"=q" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++		case 2:
++			__asm__ __volatile__("xchgw %w0,%1"
++				:"=r" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++		case 4:
++			__asm__ __volatile__("xchgl %0,%1"
++				:"=r" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++	}
++	return x;
++}
++
++/*
++ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
++ * store NEW in MEM.  Return the initial value in MEM.  Success is
++ * indicated by comparing RETURN with OLD.
++ */
++
++#ifdef CONFIG_X86_CMPXCHG
++#define __HAVE_ARCH_CMPXCHG 1
++#endif
++
++static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++				      unsigned long new, int size)
++{
++	unsigned long prev;
++	switch (size) {
++	case 1:
++		__asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 2:
++		__asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 4:
++		__asm__ __volatile__(LOCK "cmpxchgl %1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	}
++	return old;
++}
++
++#define cmpxchg(ptr,o,n)\
++	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
++					(unsigned long)(n),sizeof(*(ptr))))
++    
++#ifdef __KERNEL__
++struct alt_instr { 
++	__u8 *instr; 		/* original instruction */
++	__u8 *replacement;
++	__u8  cpuid;		/* cpuid bit set for replacement */
++	__u8  instrlen;		/* length of original instruction */
++	__u8  replacementlen; 	/* length of new instruction, <= instrlen */ 
++	__u8  pad;
++}; 
++#endif
++
++/* 
++ * Alternative instructions for different CPU types or capabilities.
++ * 
++ * This allows to use optimized instructions even on generic binary
++ * kernels.
++ * 
++ * length of oldinstr must be longer or equal the length of newinstr
++ * It can be padded with nops as needed.
++ * 
++ * For non barrier like inlines please define new variants
++ * without volatile and memory clobber.
++ */
++#define alternative(oldinstr, newinstr, feature) 	\
++	asm volatile ("661:\n\t" oldinstr "\n662:\n" 		     \
++		      ".section .altinstructions,\"a\"\n"     	     \
++		      "  .align 4\n"				       \
++		      "  .long 661b\n"            /* label */          \
++		      "  .long 663f\n"		  /* new instruction */ 	\
++		      "  .byte %c0\n"             /* feature bit */    \
++		      "  .byte 662b-661b\n"       /* sourcelen */      \
++		      "  .byte 664f-663f\n"       /* replacementlen */ \
++		      ".previous\n"						\
++		      ".section .altinstr_replacement,\"ax\"\n"			\
++		      "663:\n\t" newinstr "\n664:\n"   /* replacement */    \
++		      ".previous" :: "i" (feature) : "memory")  
++
++/*
++ * Alternative inline assembly with input.
++ * 
++ * Pecularities:
++ * No memory clobber here. 
++ * Argument numbers start with 1.
++ * Best is to use constraints that are fixed size (like (%1) ... "r")
++ * If you use variable sized constraints like "m" or "g" in the 
++ * replacement maake sure to pad to the worst case length.
++ */
++#define alternative_input(oldinstr, newinstr, feature, input...)		\
++	asm volatile ("661:\n\t" oldinstr "\n662:\n"				\
++		      ".section .altinstructions,\"a\"\n"			\
++		      "  .align 4\n"						\
++		      "  .long 661b\n"            /* label */			\
++		      "  .long 663f\n"		  /* new instruction */ 	\
++		      "  .byte %c0\n"             /* feature bit */		\
++		      "  .byte 662b-661b\n"       /* sourcelen */		\
++		      "  .byte 664f-663f\n"       /* replacementlen */ 		\
++		      ".previous\n"						\
++		      ".section .altinstr_replacement,\"ax\"\n"			\
++		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ 	\
++		      ".previous" :: "i" (feature), ##input)
++
++/*
++ * Force strict CPU ordering.
++ * And yes, this is required on UP too when we're talking
++ * to devices.
++ *
++ * For now, "wmb()" doesn't actually do anything, as all
++ * Intel CPU's follow what Intel calls a *Processor Order*,
++ * in which all writes are seen in the program order even
++ * outside the CPU.
++ *
++ * I expect future Intel CPU's to have a weaker ordering,
++ * but I'd also expect them to finally get their act together
++ * and add some real memory barriers if so.
++ *
++ * Some non intel clones support out of order store. wmb() ceases to be a
++ * nop for these.
++ */
++ 
++
++/* 
++ * Actually only lfence would be needed for mb() because all stores done 
++ * by the kernel should be already ordered. But keep a full barrier for now. 
++ */
++
++#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
++#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
++
++/**
++ * read_barrier_depends - Flush all pending reads that subsequents reads
++ * depend on.
++ *
++ * No data-dependent reads from memory-like regions are ever reordered
++ * over this barrier.  All reads preceding this primitive are guaranteed
++ * to access memory (but not necessarily other CPUs' caches) before any
++ * reads following this primitive that depend on the data return by
++ * any of the preceding reads.  This primitive is much lighter weight than
++ * rmb() on most CPUs, and is never heavier weight than is
++ * rmb().
++ *
++ * These ordering constraints are respected by both the local CPU
++ * and the compiler.
++ *
++ * Ordering is not guaranteed by anything other than these primitives,
++ * not even by data dependencies.  See the documentation for
++ * memory_barrier() for examples and URLs to more information.
++ *
++ * For example, the following code would force ordering (the initial
++ * value of "a" is zero, "b" is one, and "p" is "&a"):
++ *
++ * <programlisting>
++ *	CPU 0				CPU 1
++ *
++ *	b = 2;
++ *	memory_barrier();
++ *	p = &b;				q = p;
++ *					read_barrier_depends();
++ *					d = *q;
++ * </programlisting>
++ *
++ * because the read of "*q" depends on the read of "p" and these
++ * two reads are separated by a read_barrier_depends().  However,
++ * the following code, with the same initial values for "a" and "b":
++ *
++ * <programlisting>
++ *	CPU 0				CPU 1
++ *
++ *	a = 2;
++ *	memory_barrier();
++ *	b = 3;				y = b;
++ *					read_barrier_depends();
++ *					x = a;
++ * </programlisting>
++ *
++ * does not enforce ordering, since there is no data dependency between
++ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
++ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
++ * in cases like thiswhere there are no data dependencies.
++ **/
++
++#define read_barrier_depends()	do { } while(0)
++
++#ifdef CONFIG_X86_OOSTORE
++/* Actually there are no OOO store capable CPUs for now that do SSE, 
++   but make it already an possibility. */
++#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
++#else
++#define wmb()	__asm__ __volatile__ ("": : :"memory")
++#endif
++
++#ifdef CONFIG_SMP
++#define smp_wmb()	wmb()
++#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
++#define smp_alt_mb(instr)                                           \
++__asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
++		     ".section __smp_alternatives,\"a\"\n"          \
++		     ".long 6667b\n"                                \
++                     ".long 6673f\n"                                \
++		     ".previous\n"                                  \
++		     ".section __smp_replacements,\"a\"\n"          \
++		     "6673:.byte 6668b-6667b\n"                     \
++		     ".byte 6670f-6669f\n"                          \
++		     ".byte 6671f-6670f\n"                          \
++                     ".byte 0\n"                                    \
++		     ".byte %c0\n"                                  \
++		     "6669:lock;addl $0,0(%%esp)\n"                 \
++		     "6670:" instr "\n"                             \
++		     "6671:\n"                                      \
++		     ".previous\n"                                  \
++		     :                                              \
++		     : "i" (X86_FEATURE_XMM2)                       \
++		     : "memory")
++#define smp_rmb() smp_alt_mb("lfence")
++#define smp_mb()  smp_alt_mb("mfence")
++#define set_mb(var, value) do {                                     \
++unsigned long __set_mb_temp;                                        \
++__asm__ __volatile__("6667:movl %1, %0\n6668:\n"                    \
++		     ".section __smp_alternatives,\"a\"\n"          \
++		     ".long 6667b\n"                                \
++		     ".long 6673f\n"                                \
++		     ".previous\n"                                  \
++		     ".section __smp_replacements,\"a\"\n"          \
++		     "6673: .byte 6668b-6667b\n"                    \
++		     ".byte 6670f-6669f\n"                          \
++		     ".byte 0\n"                                    \
++		     ".byte 6671f-6670f\n"                          \
++		     ".byte -1\n"                                   \
++		     "6669: xchg %1, %0\n"                          \
++		     "6670:movl %1, %0\n"                           \
++		     "6671:\n"                                      \
++		     ".previous\n"                                  \
++		     : "=m" (var), "=r" (__set_mb_temp)             \
++		     : "1" (value)                                  \
++		     : "memory"); } while (0)
++#else
++#define smp_rmb()	rmb()
++#define smp_mb()	mb()
++#define set_mb(var, value) do { xchg(&var, value); } while (0)
++#endif
++#define smp_read_barrier_depends()	read_barrier_depends()
++#else
++#define smp_mb()	barrier()
++#define smp_rmb()	barrier()
++#define smp_wmb()	barrier()
++#define smp_read_barrier_depends()	do { } while(0)
++#define set_mb(var, value) do { var = value; barrier(); } while (0)
++#endif
++
++#define set_wmb(var, value) do { var = value; wmb(); } while (0)
++
++/* interrupt control.. */
++
++/* 
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
++ */
++
++#define __cli()								\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	_vcpu->evtchn_upcall_mask = 1;					\
++	preempt_enable_no_resched();					\
++	barrier();							\
++} while (0)
++
++#define __sti()								\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	barrier();							\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	_vcpu->evtchn_upcall_mask = 0;					\
++	barrier(); /* unmask then check (avoid races) */		\
++	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
++		force_evtchn_callback();				\
++	preempt_enable();						\
++} while (0)
++
++#define __save_flags(x)							\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	(x) = _vcpu->evtchn_upcall_mask;				\
++	preempt_enable();						\
++} while (0)
++
++#define __restore_flags(x)						\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	barrier();							\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
++		barrier(); /* unmask then check (avoid races) */	\
++		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
++			force_evtchn_callback();			\
++		preempt_enable();					\
++	} else								\
++		preempt_enable_no_resched();				\
++} while (0)
++
++#define safe_halt()		((void)0)
++
++#define __save_and_cli(x)						\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	(x) = _vcpu->evtchn_upcall_mask;				\
++	_vcpu->evtchn_upcall_mask = 1;					\
++	preempt_enable_no_resched();					\
++	barrier();							\
++} while (0)
++
++#define local_irq_save(x)	__save_and_cli(x)
++#define local_irq_restore(x)	__restore_flags(x)
++#define local_save_flags(x)	__save_flags(x)
++#define local_irq_disable()	__cli()
++#define local_irq_enable()	__sti()
++
++/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
++#define irqs_disabled()							\
++({	int ___x;							\
++	vcpu_info_t *_vcpu;						\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	___x = (_vcpu->evtchn_upcall_mask != 0);			\
++	preempt_enable_no_resched();					\
++	___x; })
++
++/*
++ * disable hlt during certain critical i/o operations
++ */
++#define HAVE_DISABLE_HLT
++void disable_hlt(void);
++void enable_hlt(void);
++
++extern int es7000_plat;
++void cpu_idle_wait(void);
++
++extern unsigned long arch_align_stack(unsigned long sp);
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/tlbflush.h linux-2.6.12-xen/include/asm-xen/asm-i386/tlbflush.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/tlbflush.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/tlbflush.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,102 @@
++#ifndef _I386_TLBFLUSH_H
++#define _I386_TLBFLUSH_H
++
++#include <linux/config.h>
++#include <linux/mm.h>
++#include <asm/processor.h>
++
++#define __flush_tlb() xen_tlb_flush()
++#define __flush_tlb_global() xen_tlb_flush()
++#define __flush_tlb_all() xen_tlb_flush()
++
++extern unsigned long pgkern_mask;
++
++#define cpu_has_invlpg	(boot_cpu_data.x86 > 3)
++
++#define __flush_tlb_single(addr) xen_invlpg(addr)
++
++#define __flush_tlb_one(addr) __flush_tlb_single(addr)
++
++/*
++ * TLB flushing:
++ *
++ *  - flush_tlb() flushes the current mm struct TLBs
++ *  - flush_tlb_all() flushes all processes TLBs
++ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
++ *  - flush_tlb_page(vma, vmaddr) flushes one page
++ *  - flush_tlb_range(vma, start, end) flushes a range of pages
++ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
++ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ *
++ * ..but the i386 has somewhat limited tlb flushing capabilities,
++ * and page-granular flushes are available only on i486 and up.
++ */
++
++#ifndef CONFIG_SMP
++
++#define flush_tlb() __flush_tlb()
++#define flush_tlb_all() __flush_tlb_all()
++#define local_flush_tlb() __flush_tlb()
++
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++	if (mm == current->active_mm)
++		__flush_tlb();
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++	unsigned long addr)
++{
++	if (vma->vm_mm == current->active_mm)
++		__flush_tlb_one(addr);
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++	unsigned long start, unsigned long end)
++{
++	if (vma->vm_mm == current->active_mm)
++		__flush_tlb();
++}
++
++#else
++
++#include <asm/smp.h>
++
++#define local_flush_tlb() \
++	__flush_tlb()
++
++extern void flush_tlb_all(void);
++extern void flush_tlb_current_task(void);
++extern void flush_tlb_mm(struct mm_struct *);
++extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
++
++#define flush_tlb()	flush_tlb_current_task()
++
++static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
++{
++	flush_tlb_mm(vma->vm_mm);
++}
++
++#define TLBSTATE_OK	1
++#define TLBSTATE_LAZY	2
++
++struct tlb_state
++{
++	struct mm_struct *active_mm;
++	int state;
++	char __cacheline_padding[L1_CACHE_BYTES-8];
++};
++DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
++
++
++#endif
++
++#define flush_tlb_kernel_range(start, end) flush_tlb_all()
++
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++				      unsigned long start, unsigned long end)
++{
++	/* i386 does not keep any page table caches in TLB */
++}
++
++#endif /* _I386_TLBFLUSH_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/vga.h linux-2.6.12-xen/include/asm-xen/asm-i386/vga.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-i386/vga.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-i386/vga.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,20 @@
++/*
++ *	Access to VGA videoram
++ *
++ *	(c) 1998 Martin Mares <mj at ucw.cz>
++ */
++
++#ifndef _LINUX_ASM_VGA_H_
++#define _LINUX_ASM_VGA_H_
++
++/*
++ *	On the PC, we can just recalculate addresses and then
++ *	access the videoram directly without any black magic.
++ */
++
++#define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x)
++
++#define vga_readb(x) (*(x))
++#define vga_writeb(x,y) (*(y) = (x))
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-ia64/fixmap.h linux-2.6.12-xen/include/asm-xen/asm-ia64/fixmap.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-ia64/fixmap.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-ia64/fixmap.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,2 @@
++#define clear_fixmap(x)	do {} while (0)
++#define	set_fixmap(x,y)	do {} while (0)
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-ia64/hypercall.h linux-2.6.12-xen/include/asm-xen/asm-ia64/hypercall.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-ia64/hypercall.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-ia64/hypercall.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,500 @@
++/******************************************************************************
++ * hypercall.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/xen-public/sched.h>
++
++/* FIXME: temp place to hold these page related macros */
++#include <asm/page.h>
++#define virt_to_machine(v) __pa(v)
++#define machine_to_virt(m) __va(m)
++//#define virt_to_mfn(v)	(__pa(v) >> 14)
++//#define mfn_to_virt(m)	(__va(m << 14))
++#define virt_to_mfn(v)	((__pa(v)) >> PAGE_SHIFT)
++#define mfn_to_virt(m)	(__va((m) << PAGE_SHIFT))
++
++/*
++ * Assembler stubs for hyper-calls.
++ */
++
++#if 0
++static inline int
++HYPERVISOR_set_trap_table(
++    trap_info_t *table)
++{
++#if 0
++    int ret;
++    unsigned long ignore;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ignore)
++	: "0" (__HYPERVISOR_set_trap_table), "1" (table)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_mmu_update(
++    mmu_update_t *req, int count, int *success_count, domid_t domid)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2, ign3, ign4;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
++	: "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
++        "3" (success_count), "4" (domid)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_mmuext_op(
++    struct mmuext_op *op, int count, int *success_count, domid_t domid)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2, ign3, ign4;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
++	: "0" (__HYPERVISOR_mmuext_op), "1" (op), "2" (count),
++        "3" (success_count), "4" (domid)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_set_gdt(
++    unsigned long *frame_list, int entries)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2)
++	: "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
++	: "memory" );
++
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_stack_switch(
++    unsigned long ss, unsigned long esp)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2)
++	: "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_set_callbacks(
++    unsigned long event_selector, unsigned long event_address,
++    unsigned long failsafe_selector, unsigned long failsafe_address)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2, ign3, ign4;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
++	: "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
++	  "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_fpu_taskswitch(
++    int set)
++{
++#if 0
++    int ret;
++    unsigned long ign;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign)
++        : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
++        : "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_sched_op(
++    int cmd, unsigned long arg)
++{
++    return 1;
++}
++
++static inline int
++HYPERVISOR_suspend(
++    unsigned long srec)
++{
++    return 1;
++}
++
++static inline long
++HYPERVISOR_set_timer_op(
++    u64 timeout)
++{
++#if 0
++    int ret;
++    unsigned long timeout_hi = (unsigned long)(timeout>>32);
++    unsigned long timeout_lo = (unsigned long)timeout;
++    unsigned long ign1, ign2;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2)
++	: "0" (__HYPERVISOR_set_timer_op), "b" (timeout_lo), "c" (timeout_hi)
++	: "memory");
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_dom0_op(
++    dom0_op_t *dom0_op)
++{
++#if 0
++    int ret;
++    unsigned long ign1;
++
++    dom0_op->interface_version = DOM0_INTERFACE_VERSION;
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1)
++	: "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
++	: "memory");
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_set_debugreg(
++    int reg, unsigned long value)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2;
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2)
++	: "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline unsigned long
++HYPERVISOR_get_debugreg(
++    int reg)
++{
++#if 0
++    unsigned long ret;
++    unsigned long ign;
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign)
++	: "0" (__HYPERVISOR_get_debugreg), "1" (reg)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_update_descriptor(
++    unsigned long ma, unsigned long word1, unsigned long word2)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2, ign3;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
++	: "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
++	  "3" (word2)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_set_fast_trap(
++    int idx)
++{
++#if 0
++    int ret;
++    unsigned long ign;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign)
++	: "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_dom_mem_op(
++    unsigned int op, unsigned long *extent_list,
++    unsigned long nr_extents, unsigned int extent_order)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2, ign3, ign4, ign5;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
++	  "=D" (ign5)
++	: "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
++	  "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
++        : "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_multicall(
++    void *call_list, int nr_calls)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2)
++	: "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++#endif
++
++static inline int
++HYPERVISOR_update_va_mapping(
++    unsigned long va, pte_t new_val, unsigned long flags)
++{
++    /* no-op */
++    return 1;
++}
++
++static inline int
++HYPERVISOR_memory_op(
++    unsigned int cmd, void *arg)
++{
++    int ret;
++    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
++        : "=r" (ret)
++        : "i" (__HYPERVISOR_memory_op), "r"(cmd), "r"(arg)
++        : "r14","r15","r2","r8","memory" );
++    return ret;
++}
++
++static inline int
++HYPERVISOR_event_channel_op(
++    void *op)
++{
++    int ret;
++    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
++        : "=r" (ret)
++        : "i" (__HYPERVISOR_event_channel_op), "r"(op)
++        : "r14","r2","r8","memory" );
++    return ret;
++}
++
++#if 0
++static inline int
++HYPERVISOR_xen_version(
++    int cmd)
++{
++#if 0
++    int ret;
++    unsigned long ignore;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ignore)
++	: "0" (__HYPERVISOR_xen_version), "1" (cmd)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++#endif
++
++static inline int
++HYPERVISOR_console_io(
++    int cmd, int count, char *str)
++{
++    int ret;
++    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r16=%4 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
++        : "=r" (ret)
++        : "i" (__HYPERVISOR_console_io), "r"(cmd), "r"(count), "r"(str)
++        : "r14","r15","r16","r2","r8","memory" );
++    return ret;
++}
++
++#if 0
++static inline int
++HYPERVISOR_physdev_op(
++    void *physdev_op)
++{
++#if 0
++    int ret;
++    unsigned long ign;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign)
++	: "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++#endif
++
++static inline int
++HYPERVISOR_grant_table_op(
++    unsigned int cmd, void *uop, unsigned int count)
++{
++    int ret;
++    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r16=%4 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
++        : "=r" (ret)
++        : "i" (__HYPERVISOR_grant_table_op), "r"(cmd), "r"(uop), "r"(count)
++        : "r14","r15","r16","r2","r8","memory" );
++    return ret;
++}
++
++#if 0
++static inline int
++HYPERVISOR_update_va_mapping_otherdomain(
++    unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2, ign3, ign4;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
++	: "0" (__HYPERVISOR_update_va_mapping_otherdomain),
++          "1" (va), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
++        "memory" );
++    
++    return ret;
++#endif
++    return 1;
++}
++
++static inline int
++HYPERVISOR_vm_assist(
++    unsigned int cmd, unsigned int type)
++{
++#if 0
++    int ret;
++    unsigned long ign1, ign2;
++
++    __asm__ __volatile__ (
++        TRAP_INSTR
++        : "=a" (ret), "=b" (ign1), "=c" (ign2)
++	: "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
++	: "memory" );
++
++    return ret;
++#endif
++    return 1;
++}
++
++#endif
++
++#endif /* __HYPERCALL_H__ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-ia64/hypervisor.h linux-2.6.12-xen/include/asm-xen/asm-ia64/hypervisor.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-ia64/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-ia64/hypervisor.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,70 @@
++/******************************************************************************
++ * hypervisor.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERVISOR_H__
++#define __HYPERVISOR_H__
++
++#include <linux/config.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/xen-public/dom0_ops.h>
++#include <asm/ptrace.h>
++#include <asm/page.h>
++
++extern shared_info_t *HYPERVISOR_shared_info;
++extern start_info_t *xen_start_info;
++
++void force_evtchn_callback(void);
++
++#include <asm/hypercall.h>
++
++// for drivers/xen/privcmd/privcmd.c
++#define direct_remap_pfn_range(a,b,c,d,e,f) remap_pfn_range(a,b,c,d,e)
++#define	pfn_to_mfn(x)	(x)
++#define	mfn_to_pfn(x)	(x)
++#define machine_to_phys_mapping 0
++
++// for drivers/xen/balloon/balloon.c
++#ifdef CONFIG_XEN_SCRUB_PAGES
++#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
++#else
++#define scrub_pages(_p,_n) ((void)0)
++#endif
++#define	pte_mfn(_x)	pte_pfn(_x)
++#define INVALID_P2M_ENTRY	(~0UL)
++#define __pte_ma(_x)	((pte_t) {(_x)})
++#define phys_to_machine_mapping_valid(_x)	(1)
++#define	kmap_flush_unused()	do {} while (0)
++#define set_phys_to_machine(_x,_y)	do {} while (0)
++#define xen_machphys_update(_x,_y)	do {} while (0)
++#define pfn_pte_ma(_x,_y)	__pte_ma(0)
++
++#endif /* __HYPERVISOR_H__ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-ia64/synch_bitops.h linux-2.6.12-xen/include/asm-xen/asm-ia64/synch_bitops.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-ia64/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-ia64/synch_bitops.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,61 @@
++#ifndef __XEN_SYNCH_BITOPS_H__
++#define __XEN_SYNCH_BITOPS_H__
++
++/*
++ * Copyright 1992, Linus Torvalds.
++ * Heavily modified to provide guaranteed strong synchronisation
++ * when communicating with Xen or other guest OSes running on other CPUs.
++ */
++
++#include <linux/config.h>
++
++#define ADDR (*(volatile long *) addr)
++
++static __inline__ void synch_set_bit(int nr, volatile void * addr)
++{
++	set_bit(nr, addr);
++}
++
++static __inline__ void synch_clear_bit(int nr, volatile void * addr)
++{
++	clear_bit(nr, addr);
++}
++
++static __inline__ void synch_change_bit(int nr, volatile void * addr)
++{
++	change_bit(nr, addr);
++}
++
++static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
++{
++    return test_and_set_bit(nr, addr);
++}
++
++static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
++{
++    return test_and_clear_bit(nr, addr);
++}
++
++static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
++{
++    return test_and_change_bit(nr, addr);
++}
++
++static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
++{
++    return test_bit(nr, addr);
++}
++
++static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
++{
++    return test_bit(nr, addr);
++}
++
++#define synch_cmpxchg	ia64_cmpxchg4_acq
++
++#define synch_test_bit(nr,addr) \
++(__builtin_constant_p(nr) ? \
++ synch_const_test_bit((nr),(addr)) : \
++ synch_var_test_bit((nr),(addr)))
++
++#endif /* __XEN_SYNCH_BITOPS_H__ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/arch_hooks.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/arch_hooks.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/arch_hooks.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/arch_hooks.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,27 @@
++#ifndef _ASM_ARCH_HOOKS_H
++#define _ASM_ARCH_HOOKS_H
++
++#include <linux/interrupt.h>
++
++/*
++ *	linux/include/asm/arch_hooks.h
++ *
++ *	define the architecture specific hooks 
++ */
++
++/* these aren't arch hooks, they are generic routines
++ * that can be used by the hooks */
++extern void init_ISA_irqs(void);
++extern void apic_intr_init(void);
++extern void smp_intr_init(void);
++extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
++
++/* these are the defined hooks */
++extern void intr_init_hook(void);
++extern void pre_intr_init_hook(void);
++extern void pre_setup_arch_hook(void);
++extern void trap_init_hook(void);
++extern void time_init_hook(void);
++extern void mca_nmi_hook(void);
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/bootsetup.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/bootsetup.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/bootsetup.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/bootsetup.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,42 @@
++
++#ifndef _X86_64_BOOTSETUP_H
++#define _X86_64_BOOTSETUP_H 1
++
++#define BOOT_PARAM_SIZE		4096
++extern char x86_boot_params[BOOT_PARAM_SIZE];
++
++/*
++ * This is set up by the setup-routine at boot-time
++ */
++#define PARAM	((unsigned char *)x86_boot_params)
++#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
++#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
++#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
++#define E820_MAP_NR (*(char*) (PARAM+E820NR))
++#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
++#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
++#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
++#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
++#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
++#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
++#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
++#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
++#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
++#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
++#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
++
++#define INITRD_START (__pa(xen_start_info->mod_start))
++#define INITRD_SIZE (xen_start_info->mod_len)
++#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
++
++#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
++#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
++#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
++#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
++#define COMMAND_LINE saved_command_line
++
++#define RAMDISK_IMAGE_START_MASK  	0x07FF
++#define RAMDISK_PROMPT_FLAG		0x8000
++#define RAMDISK_LOAD_FLAG		0x4000	
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/desc.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/desc.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/desc.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/desc.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,240 @@
++/* Written 2000 by Andi Kleen */ 
++#ifndef __ARCH_DESC_H
++#define __ARCH_DESC_H
++
++#include <linux/threads.h>
++#include <asm/ldt.h>
++
++#ifndef __ASSEMBLY__
++
++#include <linux/string.h>
++#include <asm/segment.h>
++#include <asm/mmu.h>
++
++// 8 byte segment descriptor
++struct desc_struct { 
++	u16 limit0;
++	u16 base0;
++	unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
++	unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
++} __attribute__((packed)); 
++
++struct n_desc_struct { 
++	unsigned int a,b;
++}; 	
++
++enum { 
++	GATE_INTERRUPT = 0xE, 
++	GATE_TRAP = 0xF, 	
++	GATE_CALL = 0xC,
++}; 	
++
++// 16byte gate
++struct gate_struct {          
++	u16 offset_low;
++	u16 segment; 
++	unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
++	u16 offset_middle;
++	u32 offset_high;
++	u32 zero1; 
++} __attribute__((packed));
++
++#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) 
++#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
++#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
++
++enum { 
++	DESC_TSS = 0x9,
++	DESC_LDT = 0x2,
++}; 
++
++// LDT or TSS descriptor in the GDT. 16 bytes.
++struct ldttss_desc { 
++	u16 limit0;
++	u16 base0;
++	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
++	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
++	u32 base3;
++	u32 zero1; 
++} __attribute__((packed)); 
++
++struct desc_ptr {
++	unsigned short size;
++	unsigned long address;
++} __attribute__((packed)) ;
++
++extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
++
++extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
++
++#define get_cpu_gdt_table(_cpu) ((struct desc_struct *)(cpu_gdt_descr[(_cpu)].address))
++
++#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
++#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
++
++static inline void clear_LDT(void)
++{
++	int cpu = get_cpu();
++
++	/*
++	 * NB. We load the default_ldt for lcall7/27 handling on demand, as
++	 * it slows down context switching. Noone uses it anyway.
++	 */
++	cpu = cpu;              /* XXX avoid compiler warning */
++	xen_set_ldt(0UL, 0);
++	put_cpu();
++}
++
++/*
++ * This is the ldt that every process will get unless we need
++ * something other than this.
++ */
++extern struct desc_struct default_ldt[];
++extern struct gate_struct idt_table[]; 
++
++static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)  
++{
++	struct gate_struct s; 	
++	s.offset_low = PTR_LOW(func); 
++	s.segment = __KERNEL_CS;
++	s.ist = ist; 
++	s.p = 1;
++	s.dpl = dpl; 
++	s.zero0 = 0;
++	s.zero1 = 0; 
++	s.type = type; 
++	s.offset_middle = PTR_MIDDLE(func); 
++	s.offset_high = PTR_HIGH(func); 
++	/* does not need to be atomic because it is only done once at setup time */ 
++	memcpy(adr, &s, 16); 
++} 
++
++static inline void set_intr_gate(int nr, void *func) 
++{ 
++	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); 
++} 
++
++static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) 
++{ 
++	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); 
++} 
++
++static inline void set_system_gate(int nr, void *func) 
++{ 
++	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); 
++} 
++
++static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, 
++					 unsigned size) 
++{ 
++	struct ldttss_desc d;
++	memset(&d,0,sizeof(d)); 
++	d.limit0 = size & 0xFFFF;
++	d.base0 = PTR_LOW(tss); 
++	d.base1 = PTR_MIDDLE(tss) & 0xFF; 
++	d.type = type;
++	d.p = 1; 
++	d.limit1 = (size >> 16) & 0xF;
++	d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF; 
++	d.base3 = PTR_HIGH(tss); 
++	memcpy(ptr, &d, 16); 
++}
++
++static inline void set_tss_desc(unsigned cpu, void *addr)
++{ 
++        set_tssldt_descriptor((struct ldttss_desc *)&get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS], 
++                              (unsigned long)addr, 
++                              DESC_TSS,
++                              sizeof(struct tss_struct) - 1);
++} 
++
++static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
++{ 
++	set_tssldt_descriptor((struct ldttss_desc *)&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], 
++                              (unsigned long)addr, 
++			      DESC_LDT, size * 8 - 1);
++}
++
++static inline void set_seg_base(unsigned cpu, int entry, void *base)
++{ 
++	struct desc_struct *d = (struct desc_struct *)&get_cpu_gdt_table(cpu)[entry];
++	u32 addr = (u32)(u64)base;
++	BUG_ON((u64)base >> 32); 
++	d->base0 = addr & 0xffff;
++	d->base1 = (addr >> 16) & 0xff;
++	d->base2 = (addr >> 24) & 0xff;
++} 
++
++#define LDT_entry_a(info) \
++	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
++/* Don't allow setting of the lm bit. It is useless anyways because 
++   64bit system calls require __USER_CS. */ 
++#define LDT_entry_b(info) \
++	(((info)->base_addr & 0xff000000) | \
++	(((info)->base_addr & 0x00ff0000) >> 16) | \
++	((info)->limit & 0xf0000) | \
++	(((info)->read_exec_only ^ 1) << 9) | \
++	((info)->contents << 10) | \
++	(((info)->seg_not_present ^ 1) << 15) | \
++	((info)->seg_32bit << 22) | \
++	((info)->limit_in_pages << 23) | \
++	((info)->useable << 20) | \
++	/* ((info)->lm << 21) | */ \
++	0x7000)
++
++#define LDT_empty(info) (\
++	(info)->base_addr	== 0	&& \
++	(info)->limit		== 0	&& \
++	(info)->contents	== 0	&& \
++	(info)->read_exec_only	== 1	&& \
++	(info)->seg_32bit	== 0	&& \
++	(info)->limit_in_pages	== 0	&& \
++	(info)->seg_not_present	== 1	&& \
++	(info)->useable		== 0	&& \
++	(info)->lm		== 0)
++
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#if 0
++	u64 *gdt = (u64 *)(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN);
++	gdt[0] = t->tls_array[0];
++	gdt[1] = t->tls_array[1];
++	gdt[2] = t->tls_array[2];
++#endif
++#define C(i) \
++	HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), t->tls_array[i])
++
++	C(0); C(1); C(2);
++#undef C
++} 
++
++/*
++ * load one particular LDT into the current CPU
++ */
++extern inline void load_LDT_nolock (mm_context_t *pc, int cpu)
++{
++        void *segments = pc->ldt;
++        int count = pc->size;
++
++        if (likely(!count))
++                segments = NULL;
++
++        xen_set_ldt((unsigned long)segments, count);
++}
++
++static inline void load_LDT(mm_context_t *pc)
++{
++	int cpu = get_cpu();
++	load_LDT_nolock(pc, cpu);
++	put_cpu();
++}
++
++extern struct desc_ptr idt_descr;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/dma-mapping.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/dma-mapping.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/dma-mapping.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/dma-mapping.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1 @@
++#include <asm-i386/dma-mapping.h>
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/fixmap.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/fixmap.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/fixmap.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/fixmap.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,113 @@
++/*
++ * fixmap.h: compile-time virtual memory allocation
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1998 Ingo Molnar
++ */
++
++#ifndef _ASM_FIXMAP_H
++#define _ASM_FIXMAP_H
++
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <asm/apicdef.h>
++#include <asm-xen/gnttab.h>
++#include <asm/page.h>
++#include <asm/vsyscall.h>
++#include <asm/vsyscall32.h>
++#include <asm/acpi.h>
++
++/*
++ * Here we define all the compile-time 'special' virtual
++ * addresses. The point is to have a constant address at
++ * compile time, but to set the physical address only
++ * in the boot process.
++ *
++ * these 'compile-time allocated' memory buffers are
++ * fixed-size 4k pages. (or larger if used with an increment
++ * highger than 1) use fixmap_set(idx,phys) to associate
++ * physical memory with fixmap indices.
++ *
++ * TLB entries of such buffers will not be flushed across
++ * task switches.
++ */
++
++enum fixed_addresses {
++	VSYSCALL_LAST_PAGE,
++	VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
++	VSYSCALL_HPET,
++	FIX_HPET_BASE,
++#ifdef CONFIG_X86_LOCAL_APIC
++	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
++#endif
++#ifdef CONFIG_X86_IO_APIC
++	FIX_IO_APIC_BASE_0,
++	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
++#endif
++#ifdef CONFIG_ACPI_BOOT
++	FIX_ACPI_BEGIN,
++	FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#endif
++	FIX_SHARED_INFO,
++	FIX_GNTTAB_BEGIN,
++	FIX_GNTTAB_END = FIX_GNTTAB_BEGIN + NR_GRANT_FRAMES - 1,
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++#define NR_FIX_ISAMAPS	256
++	FIX_ISAMAP_END,
++	FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
++#endif
++	__end_of_fixed_addresses
++};
++
++extern void __set_fixmap (enum fixed_addresses idx,
++					unsigned long phys, pgprot_t flags);
++
++#define set_fixmap(idx, phys) \
++		__set_fixmap(idx, phys, PAGE_KERNEL)
++/*
++ * Some hardware wants to get fixmapped without caching.
++ */
++#define set_fixmap_nocache(idx, phys) \
++		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
++
++#define clear_fixmap(idx) \
++                __set_fixmap(idx, 0, __pgprot(0))
++
++#define FIXADDR_TOP	(VSYSCALL_END-PAGE_SIZE)
++#define FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
++#define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE)
++
++/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
++#define FIXADDR_USER_START	((unsigned long)VSYSCALL32_VSYSCALL)
++#define FIXADDR_USER_END	(FIXADDR_USER_START + PAGE_SIZE)
++
++#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
++
++extern void __this_fixmap_does_not_exist(void);
++
++/*
++ * 'index to address' translation. If anyone tries to use the idx
++ * directly without translation, we catch the bug with a NULL-deference
++ * kernel oops. Illegal ranges of incoming indices are caught too.
++ */
++extern inline unsigned long fix_to_virt(const unsigned int idx)
++{
++	/*
++	 * this branch gets completely eliminated after inlining,
++	 * except when someone tries to use fixaddr indices in an
++	 * illegal way. (such as mixing up address types or using
++	 * out-of-range indices).
++	 *
++	 * If it doesn't get removed, the linker will complain
++	 * loudly with a reasonably clear error message..
++	 */
++	if (idx >= __end_of_fixed_addresses)
++		__this_fixmap_does_not_exist();
++
++        return __fix_to_virt(idx);
++}
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/floppy.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/floppy.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/floppy.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/floppy.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,204 @@
++/*
++ * Architecture specific parts of the Floppy driver
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1995
++ *
++ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
++ */
++#ifndef __ASM_XEN_X86_64_FLOPPY_H
++#define __ASM_XEN_X86_64_FLOPPY_H
++
++#include <linux/vmalloc.h>
++
++
++/*
++ * The DMA channel used by the floppy controller cannot access data at
++ * addresses >= 16MB
++ *
++ * Went back to the 1MB limit, as some people had problems with the floppy
++ * driver otherwise. It doesn't matter much for performance anyway, as most
++ * floppy accesses go through the track buffer.
++ */
++#define _CROSS_64KB(a,s,vdma) \
++(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
++
++#include <linux/vmalloc.h>
++
++/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
++#include <asm/dma.h>
++#undef MAX_DMA_ADDRESS
++#define MAX_DMA_ADDRESS 0
++#define CROSS_64KB(a,s) (0)
++
++#define fd_inb(port)			inb_p(port)
++#define fd_outb(value,port)		outb_p(value,port)
++
++#define fd_request_dma()        (0)
++#define fd_free_dma()           ((void)0)
++#define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
++#define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
++#define fd_free_irq()		free_irq(FLOPPY_IRQ, NULL)
++#define fd_get_dma_residue()    vdma_get_dma_residue(FLOPPY_DMA)
++#define fd_dma_mem_alloc(size)	vdma_mem_alloc(size)
++#define fd_dma_mem_free(addr, size) vdma_mem_free(addr, size) 
++#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
++
++static int virtual_dma_count;
++static int virtual_dma_residue;
++static char *virtual_dma_addr;
++static int virtual_dma_mode;
++static int doing_pdma;
++
++static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
++{
++	register unsigned char st;
++
++#undef TRACE_FLPY_INT
++
++#ifdef TRACE_FLPY_INT
++	static int calls=0;
++	static int bytes=0;
++	static int dma_wait=0;
++#endif
++	if (!doing_pdma)
++		return floppy_interrupt(irq, dev_id, regs);
++
++#ifdef TRACE_FLPY_INT
++	if(!calls)
++		bytes = virtual_dma_count;
++#endif
++
++	{
++		register int lcount;
++		register char *lptr;
++
++		st = 1;
++		for(lcount=virtual_dma_count, lptr=virtual_dma_addr; 
++		    lcount; lcount--, lptr++) {
++			st=inb(virtual_dma_port+4) & 0xa0 ;
++			if(st != 0xa0) 
++				break;
++			if(virtual_dma_mode)
++				outb_p(*lptr, virtual_dma_port+5);
++			else
++				*lptr = inb_p(virtual_dma_port+5);
++		}
++		virtual_dma_count = lcount;
++		virtual_dma_addr = lptr;
++		st = inb(virtual_dma_port+4);
++	}
++
++#ifdef TRACE_FLPY_INT
++	calls++;
++#endif
++	if(st == 0x20)
++		return IRQ_HANDLED;
++	if(!(st & 0x20)) {
++		virtual_dma_residue += virtual_dma_count;
++		virtual_dma_count=0;
++#ifdef TRACE_FLPY_INT
++		printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", 
++		       virtual_dma_count, virtual_dma_residue, calls, bytes,
++		       dma_wait);
++		calls = 0;
++		dma_wait=0;
++#endif
++		doing_pdma = 0;
++		floppy_interrupt(irq, dev_id, regs);
++		return IRQ_HANDLED;
++	}
++#ifdef TRACE_FLPY_INT
++	if(!virtual_dma_count)
++		dma_wait++;
++#endif
++	return IRQ_HANDLED;
++}
++
++static void fd_disable_dma(void)
++{
++	doing_pdma = 0;
++	virtual_dma_residue += virtual_dma_count;
++	virtual_dma_count=0;
++}
++
++static int vdma_get_dma_residue(unsigned int dummy)
++{
++	return virtual_dma_count + virtual_dma_residue;
++}
++
++
++static int fd_request_irq(void)
++{
++	return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
++					   "floppy", NULL);
++}
++
++
++static unsigned long vdma_mem_alloc(unsigned long size)
++{
++	return (unsigned long) vmalloc(size);
++
++}
++
++static void vdma_mem_free(unsigned long addr, unsigned long size)
++{
++	vfree((void *)addr);
++}
++
++static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
++{
++	doing_pdma = 1;
++	virtual_dma_port = io;
++	virtual_dma_mode = (mode  == DMA_MODE_WRITE);
++	virtual_dma_addr = addr;
++	virtual_dma_count = size;
++	virtual_dma_residue = 0;
++	return 0;
++}
++
++/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
++#define FDC1 xen_floppy_init()
++static int FDC2 = -1;
++
++static int xen_floppy_init(void)
++{
++	use_virtual_dma = 1;
++	can_use_virtual_dma = 1;
++	return 0x340;
++}
++
++/*
++ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
++ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
++ * coincides with another rtc CMOS user.		Paul G.
++ */
++#define FLOPPY0_TYPE	({				\
++	unsigned long flags;				\
++	unsigned char val;				\
++	spin_lock_irqsave(&rtc_lock, flags);		\
++	val = (CMOS_READ(0x10) >> 4) & 15;		\
++	spin_unlock_irqrestore(&rtc_lock, flags);	\
++	val;						\
++})
++
++#define FLOPPY1_TYPE	({				\
++	unsigned long flags;				\
++	unsigned char val;				\
++	spin_lock_irqsave(&rtc_lock, flags);		\
++	val = CMOS_READ(0x10) & 15;			\
++	spin_unlock_irqrestore(&rtc_lock, flags);	\
++	val;						\
++})
++
++#define N_FDC 2
++#define N_DRIVE 8
++
++#define FLOPPY_MOTOR_MASK 0xf0
++
++#define EXTRA_FLOPPY_PARAMS
++
++#endif /* __ASM_XEN_X86_64_FLOPPY_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hw_irq.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/hw_irq.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hw_irq.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/hw_irq.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,138 @@
++#ifndef _ASM_HW_IRQ_H
++#define _ASM_HW_IRQ_H
++
++/*
++ *	linux/include/asm/hw_irq.h
++ *
++ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ *	moved some of the old arch/i386/kernel/irq.h to here. VY
++ *
++ *	IRQ/IPI changes taken from work by Thomas Radke
++ *	<tomsoft at informatik.tu-chemnitz.de>
++ *
++ *	hacked by Andi Kleen for x86-64.
++ * 
++ *  $Id: hw_irq.h,v 1.24 2001/09/14 20:55:03 vojtech Exp $
++ */
++
++#ifndef __ASSEMBLY__
++#include <linux/config.h>
++#include <asm/atomic.h>
++#include <asm/irq.h>
++#include <linux/profile.h>
++#include <linux/smp.h>
++
++struct hw_interrupt_type;
++#endif
++
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR	0x20
++
++#define IA32_SYSCALL_VECTOR	0x80
++
++
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
++
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ *  some of the following vectors are 'rare', they are merged
++ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ *  TLB, reschedule and local APIC vectors are performance-critical.
++ *
++ *  Vectors 0xf0-0xf9 are free (reserved for future Linux use).
++ */
++#ifndef CONFIG_XEN
++#define SPURIOUS_APIC_VECTOR	0xff
++#define ERROR_APIC_VECTOR	0xfe
++#define INVALIDATE_TLB_VECTOR	0xfd
++#define RESCHEDULE_VECTOR	0xfc
++#define TASK_MIGRATION_VECTOR	0xfb
++#define CALL_FUNCTION_VECTOR	0xfa
++#define KDB_VECTOR	0xf9
++
++#define THERMAL_APIC_VECTOR	0xf0
++#endif
++
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR	0xef
++
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR	0x31
++#define FIRST_SYSTEM_VECTOR	0xef   /* duplicated in irq.h */
++
++
++#ifndef __ASSEMBLY__
++extern u8 irq_vector[NR_IRQ_VECTORS];
++#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
++#define AUTO_ASSIGN		-1
++
++/*
++ * Various low-level irq details needed by irq.c, process.c,
++ * time.c, io_apic.c and smp.c
++ *
++ * Interrupt entry/exit code at both C and assembly level
++ */
++
++extern void disable_8259A_irq(unsigned int irq);
++extern void enable_8259A_irq(unsigned int irq);
++extern int i8259A_irq_pending(unsigned int irq);
++extern void make_8259A_irq(unsigned int irq);
++extern void init_8259A(int aeoi);
++extern void FASTCALL(send_IPI_self(int vector));
++extern void init_VISWS_APIC_irqs(void);
++extern void setup_IO_APIC(void);
++extern void disable_IO_APIC(void);
++extern void print_IO_APIC(void);
++extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
++extern void send_IPI(int dest, int vector);
++extern void setup_ioapic_dest(void);
++
++extern unsigned long io_apic_irqs;
++
++extern atomic_t irq_err_count;
++extern atomic_t irq_mis_count;
++
++#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#include <asm/ptrace.h>
++
++#define IRQ_NAME2(nr) nr##_interrupt(void)
++#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
++
++/*
++ *	SMP has a few special interrupts for IPI messages
++ */
++
++#define BUILD_IRQ(nr) \
++asmlinkage void IRQ_NAME(nr); \
++__asm__( \
++"\n.p2align\n" \
++"IRQ" #nr "_interrupt:\n\t" \
++	"push $" #nr "-256 ; " \
++	"jmp common_interrupt");
++
++extern void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i);
++
++#define platform_legacy_irq(irq)	((irq) < 16)
++
++#endif
++
++#endif /* _ASM_HW_IRQ_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hypercall.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/hypercall.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hypercall.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/hypercall.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,323 @@
++/******************************************************************************
++ * hypercall.h
++ * 
++ * Linux-specific hypervisor handling.
++ * 
++ * Copyright (c) 2002-2004, K A Fraser
++ * 
++ * 64-bit updates:
++ *   Benjamin Liu <benjamin.liu at intel.com>
++ *   Jun Nakajima <jun.nakajima at intel.com>
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#include <asm-xen/xen-public/xen.h>
++#include <asm-xen/xen-public/sched.h>
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#define _hypercall0(type, name)			\
++({						\
++	long __res;				\
++	asm volatile (				\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res)			\
++		:				\
++		: "memory" );			\
++	(type)__res;				\
++})
++
++#define _hypercall1(type, name, a1)				\
++({								\
++	long __res, __ign1;					\
++	asm volatile (						\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res), "=D" (__ign1)			\
++		: "1" ((long)(a1))				\
++		: "memory" );					\
++	(type)__res;						\
++})
++
++#define _hypercall2(type, name, a1, a2)				\
++({								\
++	long __res, __ign1, __ign2;				\
++	asm volatile (						\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res), "=D" (__ign1), "=S" (__ign2)	\
++		: "1" ((long)(a1)), "2" ((long)(a2))		\
++		: "memory" );					\
++	(type)__res;						\
++})
++
++#define _hypercall3(type, name, a1, a2, a3)			\
++({								\
++	long __res, __ign1, __ign2, __ign3;			\
++	asm volatile (						\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res), "=D" (__ign1), "=S" (__ign2), 	\
++		"=d" (__ign3)					\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		"3" ((long)(a3))				\
++		: "memory" );					\
++	(type)__res;						\
++})
++
++#define _hypercall4(type, name, a1, a2, a3, a4)			\
++({								\
++	long __res, __ign1, __ign2, __ign3;			\
++	asm volatile (						\
++		"movq %7,%%r10; "				\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
++		"=d" (__ign3)					\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		"3" ((long)(a3)), "g" ((long)(a4))		\
++		: "memory", "r10" );				\
++	(type)__res;						\
++})
++
++#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
++({								\
++	long __res, __ign1, __ign2, __ign3;			\
++	asm volatile (						\
++		"movq %7,%%r10; movq %8,%%r8; "			\
++		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
++		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
++		"=d" (__ign3)					\
++		: "1" ((long)(a1)), "2" ((long)(a2)),		\
++		"3" ((long)(a3)), "g" ((long)(a4)),		\
++		"g" ((long)(a5))				\
++		: "memory", "r10", "r8" );			\
++	(type)__res;						\
++})
++
++static inline int
++HYPERVISOR_set_trap_table(
++	trap_info_t *table)
++{
++	return _hypercall1(int, set_trap_table, table);
++}
++
++static inline int
++HYPERVISOR_mmu_update(
++	mmu_update_t *req, int count, int *success_count, domid_t domid)
++{
++	return _hypercall4(int, mmu_update, req, count, success_count, domid);
++}
++
++static inline int
++HYPERVISOR_mmuext_op(
++	struct mmuext_op *op, int count, int *success_count, domid_t domid)
++{
++	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
++}
++
++static inline int
++HYPERVISOR_set_gdt(
++	unsigned long *frame_list, int entries)
++{
++	return _hypercall2(int, set_gdt, frame_list, entries);
++}
++
++static inline int
++HYPERVISOR_stack_switch(
++	unsigned long ss, unsigned long esp)
++{
++	return _hypercall2(int, stack_switch, ss, esp);
++}
++
++static inline int
++HYPERVISOR_set_callbacks(
++	unsigned long event_address, unsigned long failsafe_address, 
++	unsigned long syscall_address)
++{
++	return _hypercall3(int, set_callbacks,
++			   event_address, failsafe_address, syscall_address);
++}
++
++static inline int
++HYPERVISOR_fpu_taskswitch(
++	int set)
++{
++	return _hypercall1(int, fpu_taskswitch, set);
++}
++
++static inline int
++HYPERVISOR_sched_op(
++	int cmd, unsigned long arg)
++{
++	return _hypercall2(int, sched_op, cmd, arg);
++}
++
++static inline long
++HYPERVISOR_set_timer_op(
++	u64 timeout)
++{
++	return _hypercall1(long, set_timer_op, timeout);
++}
++
++static inline int
++HYPERVISOR_dom0_op(
++	dom0_op_t *dom0_op)
++{
++	dom0_op->interface_version = DOM0_INTERFACE_VERSION;
++	return _hypercall1(int, dom0_op, dom0_op);
++}
++
++static inline int
++HYPERVISOR_set_debugreg(
++	int reg, unsigned long value)
++{
++	return _hypercall2(int, set_debugreg, reg, value);
++}
++
++static inline unsigned long
++HYPERVISOR_get_debugreg(
++	int reg)
++{
++	return _hypercall1(unsigned long, get_debugreg, reg);
++}
++
++static inline int
++HYPERVISOR_update_descriptor(
++	unsigned long ma, unsigned long word)
++{
++	return _hypercall2(int, update_descriptor, ma, word);
++}
++
++static inline int
++HYPERVISOR_memory_op(
++	unsigned int cmd, void *arg)
++{
++	return _hypercall2(int, memory_op, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_multicall(
++	void *call_list, int nr_calls)
++{
++	return _hypercall2(int, multicall, call_list, nr_calls);
++}
++
++static inline int
++HYPERVISOR_update_va_mapping(
++	unsigned long va, pte_t new_val, unsigned long flags)
++{
++	return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
++}
++
++static inline int
++HYPERVISOR_event_channel_op(
++	void *op)
++{
++	return _hypercall1(int, event_channel_op, op);
++}
++
++static inline int
++HYPERVISOR_xen_version(
++	int cmd, void *arg)
++{
++	return _hypercall2(int, xen_version, cmd, arg);
++}
++
++static inline int
++HYPERVISOR_console_io(
++	int cmd, int count, char *str)
++{
++	return _hypercall3(int, console_io, cmd, count, str);
++}
++
++static inline int
++HYPERVISOR_physdev_op(
++	void *physdev_op)
++{
++	return _hypercall1(int, physdev_op, physdev_op);
++}
++
++static inline int
++HYPERVISOR_grant_table_op(
++	unsigned int cmd, void *uop, unsigned int count)
++{
++	return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
++
++static inline int
++HYPERVISOR_update_va_mapping_otherdomain(
++	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++	return _hypercall4(int, update_va_mapping_otherdomain, va,
++			   new_val.pte, flags, domid);
++}
++
++static inline int
++HYPERVISOR_vm_assist(
++	unsigned int cmd, unsigned int type)
++{
++	return _hypercall2(int, vm_assist, cmd, type);
++}
++
++static inline int
++HYPERVISOR_vcpu_op(
++	int cmd, int vcpuid, void *extra_args)
++{
++	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
++}
++
++static inline int
++HYPERVISOR_set_segment_base(
++	int reg, unsigned long value)
++{
++	return _hypercall2(int, set_segment_base, reg, value);
++}
++
++static inline int
++HYPERVISOR_suspend(
++	unsigned long srec)
++{
++	return _hypercall3(int, sched_op, SCHEDOP_shutdown,
++			   SHUTDOWN_suspend, srec);
++}
++
++static inline int
++HYPERVISOR_nmi_op(
++	unsigned long op,
++	unsigned long arg)
++{
++	return _hypercall2(int, nmi_op, op, arg);
++}
++
++#endif /* __HYPERCALL_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hypervisor.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/hypervisor.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/hypervisor.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,2 @@
++
++#include <asm-i386/hypervisor.h>
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/io.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/io.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/io.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/io.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,374 @@
++#ifndef _ASM_IO_H
++#define _ASM_IO_H
++
++#include <linux/config.h>
++#include <asm/fixmap.h>
++/*
++ * This file contains the definitions for the x86 IO instructions
++ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
++ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
++ * versions of the single-IO instructions (inb_p/inw_p/..).
++ *
++ * This file is not meant to be obfuscating: it's just complicated
++ * to (a) handle it all in a way that makes gcc able to optimize it
++ * as well as possible and (b) trying to avoid writing the same thing
++ * over and over again with slight variations and possibly making a
++ * mistake somewhere.
++ */
++
++/*
++ * Thanks to James van Artsdalen for a better timing-fix than
++ * the two short jumps: using outb's to a nonexistent port seems
++ * to guarantee better timings even on fast machines.
++ *
++ * On the other hand, I'd like to be sure of a non-existent port:
++ * I feel a bit unsafe about using 0x80 (should be safe, though)
++ *
++ *		Linus
++ */
++
++ /*
++  *  Bit simplified and optimized by Jan Hubicka
++  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
++  *
++  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
++  *  isa_read[wl] and isa_write[wl] fixed
++  *  - Arnaldo Carvalho de Melo <acme at conectiva.com.br>
++  */
++
++#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
++
++#ifdef REALLY_SLOW_IO
++#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
++#else
++#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
++#endif
++
++/*
++ * Talk about misusing macros..
++ */
++#define __OUT1(s,x) \
++extern inline void out##s(unsigned x value, unsigned short port) {
++
++#define __OUT2(s,s1,s2) \
++__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
++
++#define __OUT(s,s1,x) \
++__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
++__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
++
++#define __IN1(s) \
++extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
++
++#define __IN2(s,s1,s2) \
++__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
++
++#define __IN(s,s1,i...) \
++__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
++__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
++
++#define __INS(s) \
++extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
++{ __asm__ __volatile__ ("rep ; ins" #s \
++: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
++
++#define __OUTS(s) \
++extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
++{ __asm__ __volatile__ ("rep ; outs" #s \
++: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
++
++#define RETURN_TYPE unsigned char
++__IN(b,"")
++#undef RETURN_TYPE
++#define RETURN_TYPE unsigned short
++__IN(w,"")
++#undef RETURN_TYPE
++#define RETURN_TYPE unsigned int
++__IN(l,"")
++#undef RETURN_TYPE
++
++__OUT(b,"b",char)
++__OUT(w,"w",short)
++__OUT(l,,int)
++
++__INS(b)
++__INS(w)
++__INS(l)
++
++__OUTS(b)
++__OUTS(w)
++__OUTS(l)
++
++#define IO_SPACE_LIMIT 0xffff
++
++#if defined(__KERNEL__) && __x86_64__
++
++#include <linux/vmalloc.h>
++
++#ifndef __i386__
++/*
++ * Change virtual addresses to physical addresses and vv.
++ * These are pretty trivial
++ */
++extern inline unsigned long virt_to_phys(volatile void * address)
++{
++	return __pa(address);
++}
++
++extern inline void * phys_to_virt(unsigned long address)
++{
++	return __va(address);
++}
++
++
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
++#endif
++
++/*
++ * Change "struct page" to physical address.
++ */
++#ifdef CONFIG_DISCONTIGMEM
++#include <asm/mmzone.h>
++#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
++
++#define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
++				  (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv)	 (page_to_pseudophys((bv)->bv_page) + \
++				  (unsigned long) (bv)->bv_offset)
++
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
++	(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
++	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
++	  bvec_to_pseudophys((vec2))))
++#else
++// #define page_to_phys(page)	((page - mem_map) << PAGE_SHIFT)
++#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
++
++#define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
++				  (unsigned long) bio_offset((bio)))
++#define bvec_to_pseudophys(bv)	 (page_to_pseudophys((bv)->bv_page) + \
++				  (unsigned long) (bv)->bv_offset)
++
++#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
++	(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
++	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
++	  bvec_to_pseudophys((vec2))))
++#endif
++
++#include <asm-generic/iomap.h>
++
++extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
++
++extern inline void __iomem * ioremap (unsigned long offset, unsigned long size)
++{
++	return __ioremap(offset, size, 0);
++}
++
++/*
++ * This one maps high address device memory and turns off caching for that area.
++ * it's useful if some control registers are in such an area and write combining
++ * or read caching is not desirable:
++ */
++extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
++extern void iounmap(volatile void __iomem *addr);
++
++/*
++ * ISA I/O bus memory addresses are 1:1 with the physical address.
++ */
++
++#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
++#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
++#ifdef CONFIG_XEN_PHYSDEV_ACCESS
++#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
++#else
++#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
++#endif
++/*
++ * However PCI ones are not necessarily 1:1 and therefore these interfaces
++ * are forbidden in portable PCI drivers.
++ *
++ * Allow them on x86 for legacy drivers, though.
++ */
++#define virt_to_bus(_x) phys_to_machine(__pa(_x))
++#define bus_to_virt(_x) __va(machine_to_phys(_x))
++
++/*
++ * readX/writeX() are used to access memory mapped devices. On some
++ * architectures the memory mapped IO stuff needs to be accessed
++ * differently. On the x86 architecture, we just read/write the
++ * memory location directly.
++ */
++
++static inline __u8 __readb(const volatile void __iomem *addr)
++{
++	return *(__force volatile __u8 *)addr;
++}
++static inline __u16 __readw(const volatile void __iomem *addr)
++{
++	return *(__force volatile __u16 *)addr;
++}
++static inline __u32 __readl(const volatile void __iomem *addr)
++{
++	return *(__force volatile __u32 *)addr;
++}
++static inline __u64 __readq(const volatile void __iomem *addr)
++{
++	return *(__force volatile __u64 *)addr;
++}
++#define readb(x) __readb(x)
++#define readw(x) __readw(x)
++#define readl(x) __readl(x)
++#define readq(x) __readq(x)
++#define readb_relaxed(a) readb(a)
++#define readw_relaxed(a) readw(a)
++#define readl_relaxed(a) readl(a)
++#define readq_relaxed(a) readq(a)
++#define __raw_readb readb
++#define __raw_readw readw
++#define __raw_readl readl
++#define __raw_readq readq
++
++#define mmiowb()
++
++#ifdef CONFIG_UNORDERED_IO
++static inline void __writel(__u32 val, volatile void __iomem *addr)
++{
++	volatile __u32 __iomem *target = addr;
++	asm volatile("movnti %1,%0"
++		     : "=m" (*target)
++		     : "r" (val) : "memory");
++}
++
++static inline void __writeq(__u64 val, volatile void __iomem *addr)
++{
++	volatile __u64 __iomem *target = addr;
++	asm volatile("movnti %1,%0"
++		     : "=m" (*target)
++		     : "r" (val) : "memory");
++}
++#else
++static inline void __writel(__u32 b, volatile void __iomem *addr)
++{
++	*(__force volatile __u32 *)addr = b;
++}
++static inline void __writeq(__u64 b, volatile void __iomem *addr)
++{
++	*(__force volatile __u64 *)addr = b;
++}
++#endif
++static inline void __writeb(__u8 b, volatile void __iomem *addr)
++{
++	*(__force volatile __u8 *)addr = b;
++}
++static inline void __writew(__u16 b, volatile void __iomem *addr)
++{
++	*(__force volatile __u16 *)addr = b;
++}
++#define writeq(val,addr) __writeq((val),(addr))
++#define writel(val,addr) __writel((val),(addr))
++#define writew(val,addr) __writew((val),(addr))
++#define writeb(val,addr) __writeb((val),(addr))
++#define __raw_writeb writeb
++#define __raw_writew writew
++#define __raw_writel writel
++#define __raw_writeq writeq
++
++void __memcpy_fromio(void*,unsigned long,unsigned);
++void __memcpy_toio(unsigned long,const void*,unsigned);
++
++static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
++{
++	__memcpy_fromio(to,(unsigned long)from,len);
++}
++static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
++{
++	__memcpy_toio((unsigned long)to,from,len);
++}
++
++void memset_io(volatile void __iomem *a, int b, size_t c);
++
++/*
++ * ISA space is 'always mapped' on a typical x86 system, no need to
++ * explicitly ioremap() it. The fact that the ISA IO space is mapped
++ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
++ * are physical addresses. The following constant pointer can be
++ * used as the IO-area pointer (it can be iounmapped as well, so the
++ * analogy with PCI is quite large):
++ */
++#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
++
++#define isa_readb(a) readb(__ISA_IO_base + (a))
++#define isa_readw(a) readw(__ISA_IO_base + (a))
++#define isa_readl(a) readl(__ISA_IO_base + (a))
++#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
++#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
++#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
++#define isa_memset_io(a,b,c)		memset_io(__ISA_IO_base + (a),(b),(c))
++#define isa_memcpy_fromio(a,b,c)	memcpy_fromio((a),__ISA_IO_base + (b),(c))
++#define isa_memcpy_toio(a,b,c)		memcpy_toio(__ISA_IO_base + (a),(b),(c))
++
++
++/*
++ * Again, x86-64 does not require mem IO specific function.
++ */
++
++#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void *)(b),(c),(d))
++#define isa_eth_io_copy_and_sum(a,b,c,d)	eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d))
++
++/**
++ *	check_signature		-	find BIOS signatures
++ *	@io_addr: mmio address to check 
++ *	@signature:  signature block
++ *	@length: length of signature
++ *
++ *	Perform a signature comparison with the mmio address io_addr. This
++ *	address should have been obtained by ioremap.
++ *	Returns 1 on a match.
++ */
++ 
++static inline int check_signature(void __iomem *io_addr,
++	const unsigned char *signature, int length)
++{
++	int retval = 0;
++	do {
++		if (readb(io_addr) != *signature)
++			goto out;
++		io_addr++;
++		signature++;
++		length--;
++	} while (length);
++	retval = 1;
++out:
++	return retval;
++}
++
++/* Nothing to do */
++
++#define dma_cache_inv(_start,_size)		do { } while (0)
++#define dma_cache_wback(_start,_size)		do { } while (0)
++#define dma_cache_wback_inv(_start,_size)	do { } while (0)
++
++#define flush_write_buffers() 
++
++extern int iommu_bio_merge;
++#define BIO_VMERGE_BOUNDARY iommu_bio_merge
++
++/*
++ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
++ * access
++ */
++#define xlate_dev_mem_ptr(p)	__va(p)
++
++/*
++ * Convert a virtual cached pointer to an uncached pointer
++ */
++#define xlate_dev_kmem_ptr(p)	p
++
++#endif /* __KERNEL__ */
++
++#define ARCH_HAS_DEV_MEM
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/irq.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/irq.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/irq.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/irq.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,36 @@
++#ifndef _ASM_IRQ_H
++#define _ASM_IRQ_H
++
++/*
++ *	linux/include/asm/irq.h
++ *
++ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
++ *
++ *	IRQ/IPI changes taken from work by Thomas Radke
++ *	<tomsoft at informatik.tu-chemnitz.de>
++ */
++
++#include <linux/config.h>
++#include <linux/sched.h>
++/* include comes from machine specific directory */
++#include "irq_vectors.h"
++#include <asm/thread_info.h>
++
++static __inline__ int irq_canonicalize(int irq)
++{
++	return ((irq == 2) ? 9 : irq);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#define ARCH_HAS_NMI_WATCHDOG		/* See include/linux/nmi.h */
++#endif
++
++#define KDB_VECTOR	0xf9
++
++# define irq_ctx_init(cpu) do { } while (0)
++
++struct irqaction;
++struct pt_regs;
++int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
++
++#endif /* _ASM_IRQ_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/io_ports.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/io_ports.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/io_ports.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/io_ports.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,30 @@
++/*
++ *  arch/i386/mach-generic/io_ports.h
++ *
++ *  Machine specific IO port address definition for generic.
++ *  Written by Osamu Tomita <tomita at cinet.co.jp>
++ */
++#ifndef _MACH_IO_PORTS_H
++#define _MACH_IO_PORTS_H
++
++/* i8253A PIT registers */
++#define PIT_MODE		0x43
++#define PIT_CH0			0x40
++#define PIT_CH2			0x42
++
++/* i8259A PIC registers */
++#define PIC_MASTER_CMD		0x20
++#define PIC_MASTER_IMR		0x21
++#define PIC_MASTER_ISR		PIC_MASTER_CMD
++#define PIC_MASTER_POLL		PIC_MASTER_ISR
++#define PIC_MASTER_OCW3		PIC_MASTER_ISR
++#define PIC_SLAVE_CMD		0xa0
++#define PIC_SLAVE_IMR		0xa1
++
++/* i8259A PIC related value */
++#define PIC_CASCADE_IR		2
++#define MASTER_ICW4_DEFAULT	0x01
++#define SLAVE_ICW4_DEFAULT	0x01
++#define PIC_ICW4_AEOI		2
++
++#endif /* !_MACH_IO_PORTS_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,123 @@
++/*
++ * This file should contain #defines for all of the interrupt vector
++ * numbers used by this architecture.
++ *
++ * In addition, there are some standard defines:
++ *
++ *	FIRST_EXTERNAL_VECTOR:
++ *		The first free place for external interrupts
++ *
++ *	SYSCALL_VECTOR:
++ *		The IRQ vector a syscall makes the user to kernel transition
++ *		under.
++ *
++ *	TIMER_IRQ:
++ *		The IRQ number the timer interrupt comes in at.
++ *
++ *	NR_IRQS:
++ *		The total number of interrupt vectors (including all the
++ *		architecture specific interrupts) needed.
++ *
++ */			
++#ifndef _ASM_IRQ_VECTORS_H
++#define _ASM_IRQ_VECTORS_H
++
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR	0x20
++
++#define SYSCALL_VECTOR		0x80
++
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
++
++#if 0
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ *  some of the following vectors are 'rare', they are merged
++ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ *  TLB, reschedule and local APIC vectors are performance-critical.
++ *
++ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
++ */
++#define INVALIDATE_TLB_VECTOR	0xfd
++#define RESCHEDULE_VECTOR	0xfc
++#define CALL_FUNCTION_VECTOR	0xfb
++
++#define THERMAL_APIC_VECTOR	0xf0
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR	0xef
++#endif
++
++#define SPURIOUS_APIC_VECTOR	0xff
++#define ERROR_APIC_VECTOR	0xfe
++
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR	0x31
++#define FIRST_SYSTEM_VECTOR	0xef
++
++/*
++ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
++ * Right now the APIC is mostly only used for SMP.
++ * 256 vectors is an architectural limit. (we can have
++ * more than 256 devices theoretically, but they will
++ * have to use shared interrupts)
++ * Since vectors 0x00-0x1f are used/reserved for the CPU,
++ * the usable vector space is 0x20-0xff (224 vectors)
++ */
++
++#define RESCHEDULE_VECTOR	0
++#define CALL_FUNCTION_VECTOR	1
++#define NR_IPIS			2
++
++/*
++ * The maximum number of vectors supported by i386 processors
++ * is limited to 256. For processors other than i386, NR_VECTORS
++ * should be changed accordingly.
++ */
++#define NR_VECTORS 256
++
++#define FPU_IRQ			13
++
++#define	FIRST_VM86_IRQ		3
++#define LAST_VM86_IRQ		15
++#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
++
++/*
++ * The flat IRQ space is divided into two regions:
++ *  1. A one-to-one mapping of real physical IRQs. This space is only used
++ *     if we have physical device-access privilege. This region is at the 
++ *     start of the IRQ space so that existing device drivers do not need
++ *     to be modified to translate physical IRQ numbers into our IRQ space.
++ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ *     are bound using the provided bind/unbind functions.
++ */
++
++#define PIRQ_BASE		0
++#define NR_PIRQS		256
++
++#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS		256
++
++#define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS		NR_IRQS
++
++#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
++#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
++
++#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
++#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
++
++#endif /* _ASM_IRQ_VECTORS_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/mach_time.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/mach_time.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/mach_time.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/mach_time.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,122 @@
++/*
++ *  include/asm-i386/mach-default/mach_time.h
++ *
++ *  Machine specific set RTC function for generic.
++ *  Split out from time.c by Osamu Tomita <tomita at cinet.co.jp>
++ */
++#ifndef _MACH_TIME_H
++#define _MACH_TIME_H
++
++#include <asm-i386/mc146818rtc.h>
++
++/* for check timing call set_rtc_mmss() 500ms     */
++/* used in arch/i386/time.c::do_timer_interrupt() */
++#define USEC_AFTER	500000
++#define USEC_BEFORE	500000
++
++/*
++ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
++ * called 500 ms after the second nowtime has started, because when
++ * nowtime is written into the registers of the CMOS clock, it will
++ * jump to the next second precisely 500 ms later. Check the Motorola
++ * MC146818A or Dallas DS12887 data sheet for details.
++ *
++ * BUG: This routine does not handle hour overflow properly; it just
++ *      sets the minutes. Usually you'll only notice that after reboot!
++ */
++static inline int mach_set_rtc_mmss(unsigned long nowtime)
++{
++	int retval = 0;
++	int real_seconds, real_minutes, cmos_minutes;
++	unsigned char save_control, save_freq_select;
++
++	save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
++	CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
++
++	save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
++	CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
++
++	cmos_minutes = CMOS_READ(RTC_MINUTES);
++	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
++		BCD_TO_BIN(cmos_minutes);
++
++	/*
++	 * since we're only adjusting minutes and seconds,
++	 * don't interfere with hour overflow. This avoids
++	 * messing with unknown time zones but requires your
++	 * RTC not to be off by more than 15 minutes
++	 */
++	real_seconds = nowtime % 60;
++	real_minutes = nowtime / 60;
++	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
++		real_minutes += 30;		/* correct for half hour time zone */
++	real_minutes %= 60;
++
++	if (abs(real_minutes - cmos_minutes) < 30) {
++		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++			BIN_TO_BCD(real_seconds);
++			BIN_TO_BCD(real_minutes);
++		}
++		CMOS_WRITE(real_seconds,RTC_SECONDS);
++		CMOS_WRITE(real_minutes,RTC_MINUTES);
++	} else {
++		printk(KERN_WARNING
++		       "set_rtc_mmss: can't update from %d to %d\n",
++		       cmos_minutes, real_minutes);
++		retval = -1;
++	}
++
++	/* The following flags have to be released exactly in this order,
++	 * otherwise the DS12887 (popular MC146818A clone with integrated
++	 * battery and quartz) will not reset the oscillator and will not
++	 * update precisely 500 ms later. You won't find this mentioned in
++	 * the Dallas Semiconductor data sheets, but who believes data
++	 * sheets anyway ...                           -- Markus Kuhn
++	 */
++	CMOS_WRITE(save_control, RTC_CONTROL);
++	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++
++	return retval;
++}
++
++static inline unsigned long mach_get_cmos_time(void)
++{
++	unsigned int year, mon, day, hour, min, sec;
++	int i;
++
++	/* The Linux interpretation of the CMOS clock register contents:
++	 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
++	 * RTC registers show the second which has precisely just started.
++	 * Let's hope other operating systems interpret the RTC the same way.
++	 */
++	/* read RTC exactly on falling edge of update flag */
++	for (i = 0 ; i < 1000000 ; i++)	/* may take up to 1 second... */
++		if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
++			break;
++	for (i = 0 ; i < 1000000 ; i++)	/* must try at least 2.228 ms */
++		if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
++			break;
++	do { /* Isn't this overkill ? UIP above should guarantee consistency */
++		sec = CMOS_READ(RTC_SECONDS);
++		min = CMOS_READ(RTC_MINUTES);
++		hour = CMOS_READ(RTC_HOURS);
++		day = CMOS_READ(RTC_DAY_OF_MONTH);
++		mon = CMOS_READ(RTC_MONTH);
++		year = CMOS_READ(RTC_YEAR);
++	} while (sec != CMOS_READ(RTC_SECONDS));
++	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
++	  {
++	    BCD_TO_BIN(sec);
++	    BCD_TO_BIN(min);
++	    BCD_TO_BIN(hour);
++	    BCD_TO_BIN(day);
++	    BCD_TO_BIN(mon);
++	    BCD_TO_BIN(year);
++	  }
++	if ((year += 1900) < 1970)
++		year += 100;
++
++	return mktime(year, mon, day, hour, min, sec);
++}
++
++#endif /* !_MACH_TIME_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,48 @@
++/*
++ *  include/asm-i386/mach-default/mach_timer.h
++ *
++ *  Machine specific calibrate_tsc() for generic.
++ *  Split out from timer_tsc.c by Osamu Tomita <tomita at cinet.co.jp>
++ */
++/* ------ Calibrate the TSC ------- 
++ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
++ * Too much 64-bit arithmetic here to do this cleanly in C, and for
++ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
++ * output busy loop as low as possible. We avoid reading the CTC registers
++ * directly because of the awkward 8-bit access mechanism of the 82C54
++ * device.
++ */
++#ifndef _MACH_TIMER_H
++#define _MACH_TIMER_H
++
++#define CALIBRATE_LATCH	(5 * LATCH)
++
++static inline void mach_prepare_counter(void)
++{
++       /* Set the Gate high, disable speaker */
++	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
++
++	/*
++	 * Now let's take care of CTC channel 2
++	 *
++	 * Set the Gate high, program CTC channel 2 for mode 0,
++	 * (interrupt on terminal count mode), binary count,
++	 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
++	 *
++	 * Some devices need a delay here.
++	 */
++	outb(0xb0, 0x43);			/* binary, mode 0, LSB/MSB, Ch 2 */
++	outb_p(CALIBRATE_LATCH & 0xff, 0x42);	/* LSB of count */
++	outb_p(CALIBRATE_LATCH >> 8, 0x42);       /* MSB of count */
++}
++
++static inline void mach_countup(unsigned long *count_p)
++{
++	unsigned long count = 0;
++	do {
++		count++;
++	} while ((inb_p(0x61) & 0x20) == 0);
++	*count_p = count;
++}
++
++#endif /* !_MACH_TIMER_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,52 @@
++/**
++ * machine_specific_memory_setup - Hook for machine specific memory setup.
++ *
++ * Description:
++ *	This is included late in kernel/setup.c so that it can make
++ *	use of all of the static functions.
++ **/
++
++static char * __init machine_specific_memory_setup(void)
++{
++	char *who;
++	unsigned long start_pfn, max_pfn;
++
++	who = "Xen";
++
++	start_pfn = 0;
++	max_pfn = xen_start_info->nr_pages;
++
++	e820.nr_map = 0;
++	add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) - PFN_PHYS(start_pfn), E820_RAM);
++
++	return who;
++}
++
++void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
++{
++	clear_bit(X86_FEATURE_VME, c->x86_capability);
++	clear_bit(X86_FEATURE_DE, c->x86_capability);
++	clear_bit(X86_FEATURE_PSE, c->x86_capability);
++	clear_bit(X86_FEATURE_PGE, c->x86_capability);
++	clear_bit(X86_FEATURE_SEP, c->x86_capability);
++	if (!(xen_start_info->flags & SIF_PRIVILEGED))
++		clear_bit(X86_FEATURE_MTRR, c->x86_capability);
++}
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
++
++static void __init machine_specific_arch_setup(void)
++{
++	HYPERVISOR_set_callbacks(
++                (unsigned long) hypervisor_callback,
++                (unsigned long) failsafe_callback,
++                (unsigned long) system_call);
++
++#ifdef CONFIG_X86_LOCAL_APIC
++	HYPERVISOR_nmi_op(XENNMI_register_callback, (unsigned long)&nmi);
++#endif
++
++	machine_specific_modify_cpu_capabilities(&boot_cpu_data);
++}
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,5 @@
++/* Hook to call BIOS initialisation function */
++
++#define ARCH_SETUP machine_specific_arch_setup();
++
++static void __init machine_specific_arch_setup(void);
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mmu_context.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mmu_context.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mmu_context.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mmu_context.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,132 @@
++#ifndef __X86_64_MMU_CONTEXT_H
++#define __X86_64_MMU_CONTEXT_H
++
++#include <linux/config.h>
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/pgalloc.h>
++#include <asm/page.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/tlbflush.h>
++
++/*
++ * possibly do the LDT unload here?
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
++void destroy_context(struct mm_struct *mm);
++
++static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
++{
++#if 0 /*  XEN: no lazy tlb */
++	if (read_pda(mmu_state) == TLBSTATE_OK) 
++		write_pda(mmu_state, TLBSTATE_LAZY);
++#endif
++}
++
++#define prepare_arch_switch(rq,next)	__prepare_arch_switch()
++#define finish_arch_switch(rq, next)	spin_unlock_irq(&(rq)->lock)
++#define task_running(rq, p)		((rq)->curr == (p))
++
++static inline void __prepare_arch_switch(void)
++{
++	/*
++	 * Save away %es, %ds, %fs and %gs. Must happen before reload
++	 * of cr3/ldt (i.e., not in __switch_to).
++	 */
++	__asm__ __volatile__ (
++		"mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
++		: "=m" (current->thread.es),
++		  "=m" (current->thread.ds),
++		  "=m" (current->thread.fsindex),
++		  "=m" (current->thread.gsindex) );
++
++	if (current->thread.ds)
++		__asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
++
++	if (current->thread.es)
++		__asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
++
++	if (current->thread.fsindex) {
++		__asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
++		current->thread.fs = 0;
++	}
++
++	if (current->thread.gsindex) {
++		load_gs_index(0);
++		current->thread.gs = 0;
++	}
++}
++
++extern void mm_pin(struct mm_struct *mm);
++extern void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
++
++static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
++			     struct task_struct *tsk)
++{
++	unsigned cpu = smp_processor_id();
++	struct mmuext_op _op[3], *op = _op;
++
++	if (likely(prev != next)) {
++		if (!next->context.pinned)
++			mm_pin(next);
++
++		/* stop flush ipis for the previous mm */
++		clear_bit(cpu, &prev->cpu_vm_mask);
++#if 0  /* XEN: no lazy tlb */
++		write_pda(mmu_state, TLBSTATE_OK);
++		write_pda(active_mm, next);
++#endif
++		set_bit(cpu, &next->cpu_vm_mask);
++
++		/* load_cr3(next->pgd) */
++		per_cpu(cur_pgd, smp_processor_id()) = next->pgd;
++		op->cmd = MMUEXT_NEW_BASEPTR;
++		op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
++		op++;
++
++		/* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
++		op->cmd = MMUEXT_NEW_USER_BASEPTR;
++		op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
++		op++;
++		
++		if (unlikely(next->context.ldt != prev->context.ldt)) {
++			/* load_LDT_nolock(&next->context, cpu) */
++			op->cmd = MMUEXT_SET_LDT;
++			op->arg1.linear_addr = (unsigned long)next->context.ldt;
++			op->arg2.nr_ents     = next->context.size;
++			op++;
++		}
++
++		BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
++	}
++
++#if 0 /* XEN: no lazy tlb */
++	else {
++		write_pda(mmu_state, TLBSTATE_OK);
++		if (read_pda(active_mm) != next)
++			out_of_line_bug();
++		if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
++			/* We were in lazy tlb mode and leave_mm disabled 
++			 * tlb flush IPI delivery. We must reload CR3
++			 * to make sure to use no freed page tables.
++			 */
++                        load_cr3(next->pgd);
++                        xen_new_user_pt(__pa(__user_pgd(next->pgd)));		
++			load_LDT_nolock(&next->context, cpu);
++		}
++	}
++#endif
++}
++
++#define deactivate_mm(tsk,mm)	do { \
++	load_gs_index(0); \
++	asm volatile("movl %0,%%fs"::"r"(0));  \
++} while(0)
++
++#define activate_mm(prev, next) do {		\
++	switch_mm((prev),(next),NULL);		\
++} while (0)
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mmu.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mmu.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mmu.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mmu.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,33 @@
++#ifndef __x86_64_MMU_H
++#define __x86_64_MMU_H
++
++#include <linux/spinlock.h>
++#include <asm/semaphore.h>
++
++/*
++ * The x86_64 doesn't have a mmu context, but
++ * we put the segment information here.
++ *
++ * cpu_vm_mask is used to optimize ldt flushing.
++ */
++typedef struct { 
++	void *ldt;
++	rwlock_t ldtlock; 
++	int size;
++	struct semaphore sem; 
++#ifdef CONFIG_XEN
++	unsigned pinned:1;
++	struct list_head unpinned;
++#endif
++} mm_context_t;
++
++#ifdef CONFIG_XEN
++extern struct list_head mm_unpinned;
++extern spinlock_t mm_unpinned_lock;
++
++/* mm/memory.c:exit_mmap hook */
++extern void _arch_exit_mmap(struct mm_struct *mm);
++#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
++#endif
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/nmi.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/nmi.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/nmi.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/nmi.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,75 @@
++/*
++ *  linux/include/asm-i386/nmi.h
++ */
++#ifndef ASM_NMI_H
++#define ASM_NMI_H
++
++#include <linux/pm.h>
++
++#include <asm-xen/xen-public/nmi.h>
++
++struct pt_regs;
++ 
++typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
++ 
++/** 
++ * set_nmi_callback
++ *
++ * Set a handler for an NMI. Only one handler may be
++ * set. Return 1 if the NMI was handled.
++ */
++void set_nmi_callback(nmi_callback_t callback);
++ 
++/** 
++ * unset_nmi_callback
++ *
++ * Remove the handler previously set.
++ */
++void unset_nmi_callback(void);
++ 
++#ifdef CONFIG_PM
++ 
++/** Replace the PM callback routine for NMI. */
++struct pm_dev * set_nmi_pm_callback(pm_callback callback);
++
++/** Unset the PM callback routine back to the default. */
++void unset_nmi_pm_callback(struct pm_dev * dev);
++
++#else
++
++static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
++{
++	return 0;
++} 
++ 
++static inline void unset_nmi_pm_callback(struct pm_dev * dev)
++{
++}
++
++#endif /* CONFIG_PM */
++ 
++extern void default_do_nmi(struct pt_regs *);
++extern void die_nmi(char *str, struct pt_regs *regs);
++
++static inline unsigned char get_nmi_reason(void)
++{
++        shared_info_t *s = HYPERVISOR_shared_info;
++        unsigned char reason = 0;
++
++        /* construct a value which looks like it came from
++         * port 0x61.
++         */
++        if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
++                reason |= 0x40;
++        if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
++                reason |= 0x80;
++
++        return reason;
++}
++
++extern int panic_on_timeout;
++extern int unknown_nmi_panic;
++
++extern int check_nmi_watchdog(void);
++ 
++#endif /* ASM_NMI_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/page.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/page.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/page.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/page.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,260 @@
++#ifndef _X86_64_PAGE_H
++#define _X86_64_PAGE_H
++
++#include <linux/config.h>
++/* #include <linux/string.h> */
++#ifndef __ASSEMBLY__
++#include <linux/types.h>
++#endif
++#include <asm-xen/xen-public/xen.h> 
++#include <asm-xen/foreign_page.h>
++
++#define arch_free_page(_page,_order)			\
++({	int foreign = PageForeign(_page);		\
++	if (foreign)					\
++		(PageForeignDestructor(_page))(_page);	\
++	foreign;					\
++})
++#define HAVE_ARCH_FREE_PAGE
++
++#ifdef CONFIG_XEN_SCRUB_PAGES
++#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
++#else
++#define scrub_pages(_p,_n) ((void)0)
++#endif
++
++/* PAGE_SHIFT determines the page size */
++#define PAGE_SHIFT	12
++#ifdef __ASSEMBLY__
++#define PAGE_SIZE	(0x1 << PAGE_SHIFT)
++#else
++#define PAGE_SIZE	(1UL << PAGE_SHIFT)
++#endif
++#define PAGE_MASK	(~(PAGE_SIZE-1))
++#define PHYSICAL_PAGE_MASK	(~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
++
++#define THREAD_ORDER 1 
++#ifdef __ASSEMBLY__
++#define THREAD_SIZE  (1 << (PAGE_SHIFT + THREAD_ORDER))
++#else
++#define THREAD_SIZE  (1UL << (PAGE_SHIFT + THREAD_ORDER))
++#endif
++#define CURRENT_MASK (~(THREAD_SIZE-1))
++
++#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
++#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
++
++#define HPAGE_SHIFT PMD_SHIFT
++#define HPAGE_SIZE	((1UL) << HPAGE_SHIFT)
++#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
++#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++
++void clear_page(void *);
++void copy_page(void *, void *);
++
++#define clear_user_page(page, vaddr, pg)	clear_page(page)
++#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
++
++#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
++#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
++
++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++#define INVALID_P2M_ENTRY	(~0UL)
++#define FOREIGN_FRAME(m)	((m) | (1UL<<63))
++extern unsigned long *phys_to_machine_mapping;
++#define pfn_to_mfn(pfn)	\
++(phys_to_machine_mapping[(unsigned int)(pfn)] & ~(1UL << 63))
++#define	phys_to_machine_mapping_valid(pfn) \
++	(phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
++static inline unsigned long mfn_to_pfn(unsigned long mfn)
++{
++	unsigned long pfn;
++
++	/*
++	 * The array access can fail (e.g., device space beyond end of RAM).
++	 * In such cases it doesn't matter what we return (we return garbage),
++	 * but we must handle the fault without crashing!
++	 */
++	asm (
++		"1:	movq %1,%0\n"
++		"2:\n"
++		".section __ex_table,\"a\"\n"
++		"	.align 8\n"
++		"	.quad 1b,2b\n"
++		".previous"
++		: "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) );
++
++	return pfn;
++}
++
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++	phys_to_machine_mapping[pfn] = mfn;
++}
++
++/* Definitions for machine and pseudophysical addresses. */
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
++
++static inline maddr_t phys_to_machine(paddr_t phys)
++{
++	maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++	machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
++	return machine;
++}
++
++static inline paddr_t machine_to_phys(maddr_t machine)
++{
++	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++	phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
++	return phys;
++}
++
++/*
++ * These are used to make use of C type-checking..
++ */
++typedef struct { unsigned long pte; } pte_t;
++typedef struct { unsigned long pmd; } pmd_t;
++typedef struct { unsigned long pud; } pud_t;
++typedef struct { unsigned long pgd; } pgd_t;
++#define PTE_MASK	PHYSICAL_PAGE_MASK
++
++typedef struct { unsigned long pgprot; } pgprot_t;
++
++#define pte_val(x)	(((x).pte & 1) ? machine_to_phys((x).pte) : \
++			 (x).pte)
++#define pte_val_ma(x)	((x).pte)
++
++static inline unsigned long pmd_val(pmd_t x)
++{
++	unsigned long ret = x.pmd;
++	if (ret) ret = machine_to_phys(ret);
++	return ret;
++}
++
++static inline unsigned long pud_val(pud_t x)
++{
++	unsigned long ret = x.pud;
++	if (ret) ret = machine_to_phys(ret);
++	return ret;
++}
++
++static inline unsigned long pgd_val(pgd_t x)
++{
++	unsigned long ret = x.pgd;
++	if (ret) ret = machine_to_phys(ret);
++	return ret;
++}
++
++#define pgprot_val(x)	((x).pgprot)
++
++#define __pte_ma(x)     ((pte_t) { (x) } )
++
++static inline pte_t __pte(unsigned long x)
++{
++	if (x & 1) x = phys_to_machine(x);
++	return ((pte_t) { (x) });
++}
++
++static inline pmd_t __pmd(unsigned long x)
++{
++	if ((x & 1)) x = phys_to_machine(x);
++	return ((pmd_t) { (x) });
++}
++
++static inline pud_t __pud(unsigned long x)
++{
++	if ((x & 1)) x = phys_to_machine(x);
++	return ((pud_t) { (x) });
++}
++
++static inline pgd_t __pgd(unsigned long x)
++{
++	if ((x & 1)) x = phys_to_machine(x);
++	return ((pgd_t) { (x) });
++}
++
++#define __pgprot(x)	((pgprot_t) { (x) } )
++
++#define __START_KERNEL		0xffffffff80100000UL
++#define __START_KERNEL_map	0xffffffff80000000UL
++#define __PAGE_OFFSET           0xffff880000000000UL	
++
++#else
++#define __START_KERNEL		0xffffffff80100000
++#define __START_KERNEL_map	0xffffffff80000000
++#define __PAGE_OFFSET           0xffff880000000000
++#endif /* !__ASSEMBLY__ */
++
++/* to align the pointer to the (next) page boundary */
++#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
++
++/* See Documentation/x86_64/mm.txt for a description of the memory map. */
++#define __PHYSICAL_MASK_SHIFT	46
++#define __PHYSICAL_MASK		((1UL << __PHYSICAL_MASK_SHIFT) - 1)
++#define __VIRTUAL_MASK_SHIFT	48
++#define __VIRTUAL_MASK		((1UL << __VIRTUAL_MASK_SHIFT) - 1)
++
++#define KERNEL_TEXT_SIZE  (40UL*1024*1024)
++#define KERNEL_TEXT_START 0xffffffff80000000UL 
++
++#ifndef __ASSEMBLY__
++
++#include <asm/bug.h>
++
++/* Pure 2^n version of get_order */
++extern __inline__ int get_order(unsigned long size)
++{
++	int order;
++
++	size = (size-1) >> (PAGE_SHIFT-1);
++	order = -1;
++	do {
++		size >>= 1;
++		order++;
++	} while (size);
++	return order;
++}
++
++#endif /* __ASSEMBLY__ */
++
++#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
++
++/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
++   Otherwise you risk miscompilation. */ 
++#define __pa(x)			(((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
++/* __pa_symbol should be used for C visible symbols.
++   This seems to be the official gcc blessed way to do such arithmetic. */ 
++#define __pa_symbol(x)		\
++	({unsigned long v;  \
++	  asm("" : "=r" (v) : "0" (x)); \
++	  __pa(v); })
++
++#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
++#ifndef CONFIG_DISCONTIGMEM
++#define pfn_to_page(pfn)	(mem_map + (pfn))
++#define page_to_pfn(page)	((unsigned long)((page) - mem_map))
++#define pfn_valid(pfn)		((pfn) < max_mapnr)
++#endif
++
++#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
++#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
++#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
++
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
++#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
++
++#define VM_DATA_DEFAULT_FLAGS \
++	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
++	 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++
++#define __HAVE_ARCH_GATE_AREA 1	
++
++#endif /* __KERNEL__ */
++
++#endif /* _X86_64_PAGE_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/param.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/param.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/param.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/param.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,22 @@
++#ifndef _ASMx86_64_PARAM_H
++#define _ASMx86_64_PARAM_H
++
++#ifdef __KERNEL__
++# define HZ		100		/* Internal kernel timer frequency */
++# define USER_HZ	100		/* .. some user interfaces are in "ticks" */
++# define CLOCKS_PER_SEC		(USER_HZ)	/* like times() */
++#endif
++
++#ifndef HZ
++#define HZ 100
++#endif
++
++#define EXEC_PAGESIZE	4096
++
++#ifndef NOGROUP
++#define NOGROUP		(-1)
++#endif
++
++#define MAXHOSTNAMELEN	64	/* max length of hostname */
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pci.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/pci.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pci.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/pci.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,167 @@
++#ifndef __x8664_PCI_H
++#define __x8664_PCI_H
++
++#include <linux/config.h>
++#include <asm/io.h>
++
++#ifdef __KERNEL__
++
++#include <linux/mm.h> /* for struct page */
++
++/* Can be used to override the logic in pci_scan_bus for skipping
++   already-configured bus numbers - to be used for buggy BIOSes
++   or architectures with incomplete PCI setup by the loader */
++
++#ifdef CONFIG_PCI
++extern unsigned int pcibios_assign_all_busses(void);
++#else
++#define pcibios_assign_all_busses()	0
++#endif
++#define pcibios_scan_all_fns(a, b)	0
++
++extern int no_iommu, force_iommu;
++
++extern unsigned long pci_mem_start;
++#define PCIBIOS_MIN_IO		0x1000
++#define PCIBIOS_MIN_MEM		(pci_mem_start)
++
++#define PCIBIOS_MIN_CARDBUS_IO	0x4000
++
++void pcibios_config_init(void);
++struct pci_bus * pcibios_scan_root(int bus);
++extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
++extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
++
++void pcibios_set_master(struct pci_dev *dev);
++void pcibios_penalize_isa_irq(int irq);
++struct irq_routing_table *pcibios_get_irq_routing_table(void);
++int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <asm/scatterlist.h>
++#include <linux/string.h>
++#include <asm/page.h>
++
++extern int iommu_setup(char *opt);
++
++#ifdef CONFIG_GART_IOMMU
++/* The PCI address space does equal the physical memory
++ * address space.  The networking and block device layers use
++ * this boolean for bounce buffer decisions
++ *
++ * On AMD64 it mostly equals, but we set it to zero to tell some subsystems
++ * that an IOMMU is available.
++ */
++#define PCI_DMA_BUS_IS_PHYS	(no_iommu ? 1 : 0)
++
++/*
++ * x86-64 always supports DAC, but sometimes it is useful to force
++ * devices through the IOMMU to get automatic sg list merging.
++ * Optional right now.
++ */
++extern int iommu_sac_force;
++#define pci_dac_dma_supported(pci_dev, mask)	(!iommu_sac_force)
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
++	dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
++	__u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME)			\
++	((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
++	(((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME)			\
++	((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
++	(((PTR)->LEN_NAME) = (VAL))
++
++#elif defined(CONFIG_SWIOTLB)
++
++#define PCI_DMA_BUS_IS_PHYS	0
++
++#define pci_dac_dma_supported(pci_dev, mask)    1
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
++	dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
++	__u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME)			\
++	((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
++	(((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME)			\
++	((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
++	(((PTR)->LEN_NAME) = (VAL))
++
++#else
++/* No IOMMU */
++
++#define PCI_DMA_BUS_IS_PHYS	1
++#define pci_dac_dma_supported(pci_dev, mask)    1
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
++#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
++#define pci_unmap_len(PTR, LEN_NAME)		(0)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
++
++#endif
++
++#include <asm-generic/pci-dma-compat.h>
++
++static inline dma64_addr_t
++pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
++{
++	return ((dma64_addr_t) page_to_phys(page) +
++		(dma64_addr_t) offset);
++}
++
++static inline struct page *
++pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++	return virt_to_page(__va(dma_addr)); 	
++}
++
++static inline unsigned long
++pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++	return (dma_addr & ~PAGE_MASK);
++}
++
++static inline void
++pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++}
++
++static inline void
++pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++	flush_write_buffers();
++}
++
++#define HAVE_PCI_MMAP
++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++			       enum pci_mmap_state mmap_state, int write_combine);
++
++static inline void pcibios_add_platform_entries(struct pci_dev *dev)
++{
++}
++
++#endif /* __KERNEL__ */
++
++/* generic pci stuff */
++#ifdef CONFIG_PCI
++#include <asm-generic/pci.h>
++#endif
++
++/* On Xen we have to scan all functions since Xen hides bridges from
++ * us.  If a bridge is at fn=0 and that slot has a multifunction
++ * device, we won't find the additional devices without scanning all
++ * functions. */
++#undef pcibios_scan_all_fns
++#define pcibios_scan_all_fns(a, b)	1
++
++#endif /* __x8664_PCI_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pgalloc.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/pgalloc.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pgalloc.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/pgalloc.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,199 @@
++#ifndef _X86_64_PGALLOC_H
++#define _X86_64_PGALLOC_H
++
++#include <asm/fixmap.h>
++#include <asm/pda.h>
++#include <linux/threads.h>
++#include <linux/mm.h>
++#include <asm/io.h>		/* for phys_to_virt and page_to_pseudophys */
++
++#include <asm-xen/features.h>
++void make_page_readonly(void *va, unsigned int feature);
++void make_page_writable(void *va, unsigned int feature);
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
++
++#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
++
++static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
++{
++	set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
++}
++
++static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
++{
++	if (unlikely((mm)->context.pinned)) {
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
++			       pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
++		set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
++	} else {
++		*(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
++	}
++}
++
++static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++	if (unlikely((mm)->context.pinned)) {
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)pmd,
++			       pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, 
++				       PAGE_KERNEL_RO), 0));
++		set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
++	} else {
++		*(pud) =  __pud(_PAGE_TABLE | __pa(pmd));
++	}
++}
++
++/*
++ * We need to use the batch mode here, but pgd_pupulate() won't be
++ * be called frequently.
++ */
++static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++	if (unlikely((mm)->context.pinned)) {
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)pud,
++			       pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, 
++				       PAGE_KERNEL_RO), 0));
++		set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
++		set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
++	} else {
++		*(pgd) =  __pgd(_PAGE_TABLE | __pa(pud));
++		*(__user_pgd(pgd)) = *(pgd);
++	}
++}
++
++extern __inline__ void pmd_free(pmd_t *pmd)
++{
++	pte_t *ptep = virt_to_ptep(pmd);
++
++	if (!pte_write(*ptep)) {
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			(unsigned long)pmd,
++			pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, PAGE_KERNEL),
++			0));
++	}
++	free_page((unsigned long)pmd);
++}
++
++static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++        pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
++        return pmd;
++}
++
++static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++        pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
++        return pud;
++}
++
++static inline void pud_free(pud_t *pud)
++{
++	pte_t *ptep = virt_to_ptep(pud);
++
++	if (!pte_write(*ptep)) {
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			(unsigned long)pud,
++			pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL),
++			0));
++	}
++	free_page((unsigned long)pud);
++}
++
++static inline pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++        /*
++         * We allocate two contiguous pages for kernel and user.
++         */
++        unsigned boundary;
++	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
++
++	if (!pgd)
++		return NULL;
++	/*
++	 * Copy kernel pointers in from init.
++	 * Could keep a freelist or slab cache of those because the kernel
++	 * part never changes.
++	 */
++	boundary = pgd_index(__PAGE_OFFSET);
++	memset(pgd, 0, boundary * sizeof(pgd_t));
++	memcpy(pgd + boundary,
++	       init_level4_pgt + boundary,
++	       (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
++
++	memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
++        /*
++         * Set level3_user_pgt for vsyscall area
++         */
++	set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START), 
++                mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
++	return pgd;
++}
++
++static inline void pgd_free(pgd_t *pgd)
++{
++	pte_t *ptep = virt_to_ptep(pgd);
++
++	if (!pte_write(*ptep)) {
++		xen_pgd_unpin(__pa(pgd));
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)pgd,
++			       pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
++			       0));
++	}
++
++	ptep = virt_to_ptep(__user_pgd(pgd));
++
++	if (!pte_write(*ptep)) {
++		xen_pgd_unpin(__pa(__user_pgd(pgd)));
++		BUG_ON(HYPERVISOR_update_va_mapping(
++			       (unsigned long)__user_pgd(pgd),
++			       pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT, 
++				       PAGE_KERNEL),
++			       0));
++	}
++
++	free_pages((unsigned long)pgd, 1);
++}
++
++static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++        pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
++        if (pte)
++		make_page_readonly(pte, XENFEAT_writable_page_tables);
++
++	return pte;
++}
++
++static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++	struct page *pte;
++
++	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++	return pte;
++}
++
++/* Should really implement gc for free page table pages. This could be
++   done with a reference count in struct page. */
++
++extern __inline__ void pte_free_kernel(pte_t *pte)
++{
++	BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
++        xen_pte_unpin(__pa(pte));
++        make_page_writable(pte, XENFEAT_writable_page_tables);
++	free_page((unsigned long)pte); 
++}
++
++extern void pte_free(struct page *pte);
++
++//#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 
++//#define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
++//#define __pud_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
++
++#define __pte_free_tlb(tlb,x)   pte_free((x))
++#define __pmd_free_tlb(tlb,x)   pmd_free((x))
++#define __pud_free_tlb(tlb,x)   pud_free((x))
++
++#endif /* _X86_64_PGALLOC_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pgtable.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/pgtable.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pgtable.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/pgtable.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,570 @@
++#ifndef _X86_64_PGTABLE_H
++#define _X86_64_PGTABLE_H
++
++/*
++ * This file contains the functions and defines necessary to modify and use
++ * the x86-64 page table tree.
++ */
++#include <asm/processor.h>
++#include <asm/fixmap.h>
++#include <asm/bitops.h>
++#include <linux/threads.h>
++#include <linux/sched.h>
++#include <asm/pda.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++
++extern pud_t level3_user_pgt[512];
++extern pud_t init_level4_user_pgt[];
++
++extern void xen_init_pt(void);
++
++#define virt_to_ptep(__va)						\
++({									\
++	pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));		\
++	pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va));	\
++	pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va));	\
++	pte_offset_kernel(__pmd, (unsigned long)(__va));		\
++})
++
++#define arbitrary_virt_to_machine(__va)					\
++({									\
++	maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
++	m | ((unsigned long)(__va) & (PAGE_SIZE-1));			\
++})
++#endif
++
++extern pud_t level3_kernel_pgt[512];
++extern pud_t level3_physmem_pgt[512];
++extern pud_t level3_ident_pgt[512];
++extern pmd_t level2_kernel_pgt[512];
++extern pgd_t init_level4_pgt[];
++extern unsigned long __supported_pte_mask;
++
++#define swapper_pg_dir init_level4_pgt
++
++extern int nonx_setup(char *str);
++extern void paging_init(void);
++extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
++
++extern unsigned long pgkern_mask;
++
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++
++/*
++ * PGDIR_SHIFT determines what a top-level page table entry can map
++ */
++#define PGDIR_SHIFT	39
++#define PTRS_PER_PGD	512
++
++/*
++ * 3rd level page
++ */
++#define PUD_SHIFT	30
++#define PTRS_PER_PUD	512
++
++/*
++ * PMD_SHIFT determines the size of the area a middle-level
++ * page table can map
++ */
++#define PMD_SHIFT	21
++#define PTRS_PER_PMD	512
++
++/*
++ * entries per page directory level
++ */
++#define PTRS_PER_PTE	512
++
++#define pte_ERROR(e) \
++	printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
++#define pmd_ERROR(e) \
++	printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
++#define pud_ERROR(e) \
++	printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
++#define pgd_ERROR(e) \
++	printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
++
++#define pgd_none(x)	(!pgd_val(x))
++#define pud_none(x)	(!pud_val(x))
++
++#define set_pte_batched(pteptr, pteval) \
++	queue_l1_entry_update(pteptr, (pteval))
++
++extern inline int pud_present(pud_t pud)	{ return !pud_none(pud); }
++
++static inline void set_pte(pte_t *dst, pte_t val)
++{
++	*dst = val;
++}
++
++#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
++#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
++#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
++
++extern inline void pud_clear (pud_t * pud)
++{
++	set_pud(pud, __pud(0));
++}
++
++#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
++
++extern inline void pgd_clear (pgd_t * pgd)
++{
++        set_pgd(pgd, __pgd(0));
++        set_pgd(__user_pgd(pgd), __pgd(0));
++}
++
++#define pud_page(pud) \
++    ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
++
++/*
++ * A note on implementation of this atomic 'get-and-clear' operation.
++ * This is actually very simple because Xen Linux can only run on a single
++ * processor. Therefore, we cannot race other processors setting the 'accessed'
++ * or 'dirty' bits on a page-table entry.
++ * Even if pages are shared between domains, that is not a problem because
++ * each domain will have separate page tables, with their own versions of
++ * accessed & dirty state.
++ */
++#define ptep_get_and_clear(mm,addr,xp)	__pte_ma(xchg(&(xp)->pte, 0))
++
++#if 0
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
++{
++        pte_t pte = *xp;
++        if (pte.pte)
++                set_pte(xp, __pte_ma(0));
++        return pte;
++}
++#endif
++
++#define pte_same(a, b)		((a).pte == (b).pte)
++
++#define PMD_SIZE	(1UL << PMD_SHIFT)
++#define PMD_MASK	(~(PMD_SIZE-1))
++#define PUD_SIZE	(1UL << PUD_SHIFT)
++#define PUD_MASK	(~(PUD_SIZE-1))
++#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
++#define PGDIR_MASK	(~(PGDIR_SIZE-1))
++
++#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
++#define FIRST_USER_ADDRESS	0
++
++#ifndef __ASSEMBLY__
++#define MAXMEM		 0x3fffffffffffUL
++#define VMALLOC_START    0xffffc20000000000UL
++#define VMALLOC_END      0xffffe1ffffffffffUL
++#define MODULES_VADDR    0xffffffff88000000UL
++#define MODULES_END      0xfffffffffff00000UL
++#define MODULES_LEN   (MODULES_END - MODULES_VADDR)
++
++#define _PAGE_BIT_PRESENT	0
++#define _PAGE_BIT_RW		1
++#define _PAGE_BIT_USER		2
++#define _PAGE_BIT_PWT		3
++#define _PAGE_BIT_PCD		4
++#define _PAGE_BIT_ACCESSED	5
++#define _PAGE_BIT_DIRTY		6
++#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
++#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
++#define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
++
++#define _PAGE_PRESENT	0x001
++#define _PAGE_RW	0x002
++#define _PAGE_USER	0x004
++#define _PAGE_PWT	0x008
++#define _PAGE_PCD	0x010
++#define _PAGE_ACCESSED	0x020
++#define _PAGE_DIRTY	0x040
++#define _PAGE_PSE	0x080	/* 2MB page */
++#define _PAGE_FILE	0x040	/* set:pagecache, unset:swap */
++#define _PAGE_GLOBAL	0x100	/* Global TLB entry */
++
++#define _PAGE_PROTNONE	0x080	/* If not present */
++#define _PAGE_NX        (1UL<<_PAGE_BIT_NX)
++
++#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _KERNPG_TABLE	_PAGE_TABLE
++
++#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
++
++#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
++#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_COPY PAGE_COPY_NOEXEC
++#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define __PAGE_KERNEL \
++	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
++#define __PAGE_KERNEL_EXEC \
++	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_USER )
++#define __PAGE_KERNEL_NOCACHE \
++	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
++#define __PAGE_KERNEL_RO \
++	(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
++#define __PAGE_KERNEL_VSYSCALL \
++	(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_USER )
++#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
++	(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD | _PAGE_USER )
++#define __PAGE_KERNEL_LARGE \
++	(__PAGE_KERNEL | _PAGE_PSE | _PAGE_USER )
++
++
++/*
++ * We don't support GLOBAL page in xenolinux64
++ */
++#define MAKE_GLOBAL(x) __pgprot((x))
++
++#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
++#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
++#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
++#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
++#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
++#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
++#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
++
++/*         xwr */
++#define __P000	PAGE_NONE
++#define __P001	PAGE_READONLY
++#define __P010	PAGE_COPY
++#define __P011	PAGE_COPY
++#define __P100	PAGE_READONLY_EXEC
++#define __P101	PAGE_READONLY_EXEC
++#define __P110	PAGE_COPY_EXEC
++#define __P111	PAGE_COPY_EXEC
++
++#define __S000	PAGE_NONE
++#define __S001	PAGE_READONLY
++#define __S010	PAGE_SHARED
++#define __S011	PAGE_SHARED
++#define __S100	PAGE_READONLY_EXEC
++#define __S101	PAGE_READONLY_EXEC
++#define __S110	PAGE_SHARED_EXEC
++#define __S111	PAGE_SHARED_EXEC
++
++static inline unsigned long pgd_bad(pgd_t pgd)
++{
++       unsigned long val = pgd_val(pgd);
++       val &= ~PTE_MASK;
++       val &= ~(_PAGE_USER | _PAGE_DIRTY);
++       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
++}
++
++static inline unsigned long pud_bad(pud_t pud) 
++{ 
++       unsigned long val = pud_val(pud);
++       val &= ~PTE_MASK; 
++       val &= ~(_PAGE_USER | _PAGE_DIRTY); 
++       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);      
++} 
++
++#define set_pte_at(_mm,addr,ptep,pteval) do {				\
++	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
++	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
++		set_pte((ptep), (pteval));				\
++} while (0)
++
++#define pte_none(x)	(!(x).pte)
++#define pte_present(x)	((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
++#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
++
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
++
++/*
++ * We detect special mappings in one of two ways:
++ *  1. If the MFN is an I/O page then Xen will set the m2p entry
++ *     to be outside our maximum possible pseudophys range.
++ *  2. If the MFN belongs to a different domain then we will certainly
++ *     not have MFN in our p2m table. Conversely, if the page is ours,
++ *     then we'll have p2m(m2p(MFN))==MFN.
++ * If we detect a special mapping then it doesn't have a 'struct page'.
++ * We force !pfn_valid() by returning an out-of-range pointer.
++ *
++ * NB. These checks require that, for any MFN that is not in our reservation,
++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
++ * 
++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
++ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
++ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
++ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
++ */
++#define pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
++#define pte_pfn(_pte)							\
++({									\
++	unsigned long mfn = pte_mfn(_pte);                              \
++	unsigned long pfn = mfn_to_pfn(mfn);                            \
++	if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
++		pfn = max_mapnr; /* special: force !pfn_valid() */	\
++	pfn;								\
++})
++
++#define pte_page(x)	pfn_to_page(pte_pfn(x))
++
++static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
++{
++	pte_t pte;
++        
++	(pte).pte = (pfn_to_mfn(page_nr) << PAGE_SHIFT);
++	(pte).pte |= pgprot_val(pgprot);
++	(pte).pte &= __supported_pte_mask;
++	return pte;
++}
++
++#define pfn_pte_ma(pfn, prot)	__pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
++/*
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not..
++ */
++#define __pte_val(x)	((x).pte)
++
++static inline int pte_user(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
++extern inline int pte_read(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
++extern inline int pte_exec(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
++extern inline int pte_dirty(pte_t pte)		{ return __pte_val(pte) & _PAGE_DIRTY; }
++extern inline int pte_young(pte_t pte)		{ return __pte_val(pte) & _PAGE_ACCESSED; }
++extern inline int pte_write(pte_t pte)		{ return __pte_val(pte) & _PAGE_RW; }
++static inline int pte_file(pte_t pte)		{ return __pte_val(pte) & _PAGE_FILE; }
++
++extern inline pte_t pte_rdprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_USER; return pte; }
++extern inline pte_t pte_exprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_USER; return pte; }
++extern inline pte_t pte_mkclean(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
++extern inline pte_t pte_mkold(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
++extern inline pte_t pte_wrprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_RW; return pte; }
++extern inline pte_t pte_mkread(pte_t pte)	{ __pte_val(pte) |= _PAGE_USER; return pte; }
++extern inline pte_t pte_mkexec(pte_t pte)	{ __pte_val(pte) |= _PAGE_USER; return pte; }
++extern inline pte_t pte_mkdirty(pte_t pte)	{ __pte_val(pte) |= _PAGE_DIRTY; return pte; }
++extern inline pte_t pte_mkyoung(pte_t pte)	{ __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
++extern inline pte_t pte_mkwrite(pte_t pte)	{ __pte_val(pte) |= _PAGE_RW; return pte; }
++
++struct vm_area_struct;
++
++static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
++{
++	pte_t pte = *ptep;
++	int ret = pte_dirty(pte);
++	if (ret)
++		set_pte(ptep, pte_mkclean(pte));
++	return ret;
++}
++
++static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
++{
++	pte_t pte = *ptep;
++	int ret = pte_young(pte);
++	if (ret)
++		set_pte(ptep, pte_mkold(pte));
++	return ret;
++}
++
++static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++	pte_t pte = *ptep;
++	if (pte_write(pte))
++		set_pte(ptep, pte_wrprotect(pte));
++}
++
++/*
++ * Macro to mark a page protection value as "uncacheable".
++ */
++#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
++
++#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) 
++static inline int pmd_large(pmd_t pte) { 
++	return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; 
++} 	
++
++
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
++
++#define page_pte(page) page_pte_prot(page, __pgprot(0))
++
++/*
++ * Level 4 access.
++ * Never use these in the common code.
++ */
++#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
++#define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
++#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
++#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
++
++/* PUD - Level3 access */
++/* to find an entry in a page-table-directory. */
++#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
++#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
++static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
++{ 
++	return pud + pud_index(address);
++} 
++
++/* Find correct pud via the hidden fourth level page level: */
++
++/* This accesses the reference page table of the boot cpu. 
++   Other CPUs get synced lazily via the page fault handler. */
++static inline pud_t *pud_offset_k(unsigned long address)
++{
++	return pud_offset(pgd_offset_k(address), address);
++}
++
++/* PMD  - Level 2 access */
++#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
++#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
++#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
++                                  pmd_index(address))
++#define pmd_none(x)	(!pmd_val(x))
++/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
++   can temporarily clear it. */
++#define pmd_present(x)	(pmd_val(x))
++#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
++#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
++#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
++#define pmd_pfn(x)  ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
++
++#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
++#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
++#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
++
++/* PTE - Level 1 access. */
++
++/* page, protection -> pte */
++#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
++#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
++ 
++/* physical address -> PTE */
++static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
++{ 
++	pte_t pte;
++	(pte).pte = physpage | pgprot_val(pgprot); 
++	return pte; 
++}
++ 
++/* Change flags of a PTE */
++extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
++{ 
++        (pte).pte &= _PAGE_CHG_MASK;
++	(pte).pte |= pgprot_val(newprot);
++	(pte).pte &= __supported_pte_mask;
++       return pte; 
++}
++
++#define pte_index(address) \
++		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
++			pte_index(address))
++
++/* x86-64 always has all page tables mapped. */
++#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
++#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
++#define pte_unmap(pte) /* NOP */
++#define pte_unmap_nested(pte) /* NOP */ 
++
++#define update_mmu_cache(vma,address,pte) do { } while (0)
++
++/* We only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time. */
++#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#if 0
++#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
++	do {								  \
++		if (__dirty) {						  \
++			set_pte(__ptep, __entry);			  \
++			flush_tlb_page(__vma, __address);		  \
++		}							  \
++	} while (0)
++#endif
++#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
++	do {								  \
++		if (__dirty) {						  \
++		        if ( likely((__vma)->vm_mm == current->mm) ) {    \
++			    BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
++			} else {                                          \
++                            xen_l1_entry_update((__ptep), (__entry)); \
++			    flush_tlb_page((__vma), (__address));         \
++			}                                                 \
++		}							  \
++	} while (0)
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x)			(((x).val >> 1) & 0x3f)
++#define __swp_offset(x)			((x).val >> 8)
++#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
++#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
++#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
++
++#endif /* !__ASSEMBLY__ */
++
++extern int kern_addr_valid(unsigned long addr); 
++
++#define DOMID_LOCAL (0xFFFFU)
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++                            unsigned long address,
++                            unsigned long mfn,
++                            unsigned long size,
++                            pgprot_t prot,
++                            domid_t  domid);
++
++int direct_kernel_remap_pfn_range(unsigned long address, 
++				  unsigned long mfn,
++				  unsigned long size, 
++				  pgprot_t prot,
++				  domid_t  domid);
++
++int create_lookup_pte_addr(struct mm_struct *mm,
++                           unsigned long address,
++                           uint64_t *ptep);
++
++int touch_pte_range(struct mm_struct *mm,
++                    unsigned long address,
++                    unsigned long size);
++
++#define io_remap_page_range(vma, vaddr, paddr, size, prot)		\
++		direct_remap_pfn_range(vma,vaddr,(paddr)>>PAGE_SHIFT,size,prot,DOMID_IO)
++
++#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
++		direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
++
++#define MK_IOSPACE_PFN(space, pfn)	(pfn)
++#define GET_IOSPACE(pfn)		0
++#define GET_PFN(pfn)			(pfn)
++
++#define HAVE_ARCH_UNMAPPED_AREA
++
++#define pgtable_cache_init()   do { } while (0)
++#define check_pgt_cache()      do { } while (0)
++
++#define PAGE_AGP    PAGE_KERNEL_NOCACHE
++#define HAVE_PAGE_AGP 1
++
++/* fs/proc/kcore.c */
++#define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
++#define	kc_offset_to_vaddr(o) \
++   (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
++
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
++#define __HAVE_ARCH_PTE_SAME
++#include <asm-generic/pgtable.h>
++
++#endif /* _X86_64_PGTABLE_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/processor.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/processor.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/processor.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/processor.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,486 @@
++/*
++ * include/asm-x86_64/processor.h
++ *
++ * Copyright (C) 1994 Linus Torvalds
++ */
++
++#ifndef __ASM_X86_64_PROCESSOR_H
++#define __ASM_X86_64_PROCESSOR_H
++
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/types.h>
++#include <asm/sigcontext.h>
++#include <asm/cpufeature.h>
++#include <linux/config.h>
++#include <linux/threads.h>
++#include <asm/msr.h>
++#include <asm/current.h>
++#include <asm/system.h>
++#include <asm/mmsegment.h>
++#include <asm/percpu.h>
++#include <linux/personality.h>
++
++#define TF_MASK		0x00000100
++#define IF_MASK		0x00000200
++#define IOPL_MASK	0x00003000
++#define NT_MASK		0x00004000
++#define VM_MASK		0x00020000
++#define AC_MASK		0x00040000
++#define VIF_MASK	0x00080000	/* virtual interrupt flag */
++#define VIP_MASK	0x00100000	/* virtual interrupt pending */
++#define ID_MASK		0x00200000
++
++#define desc_empty(desc) \
++               (!((desc)->a + (desc)->b))
++
++#define desc_equal(desc1, desc2) \
++               (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
++
++/*
++ * Default implementation of macro that returns current
++ * instruction pointer ("program counter").
++ */
++#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
++
++/*
++ *  CPU type and hardware bug flags. Kept separately for each CPU.
++ */
++
++struct cpuinfo_x86 {
++	__u8	x86;		/* CPU family */
++	__u8	x86_vendor;	/* CPU vendor */
++	__u8	x86_model;
++	__u8	x86_mask;
++	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
++	__u32	x86_capability[NCAPINTS];
++	char	x86_vendor_id[16];
++	char	x86_model_id[64];
++	int 	x86_cache_size;  /* in KB */
++	int	x86_clflush_size;
++	int	x86_cache_alignment;
++	int	x86_tlbsize;	/* number of 4K pages in DTLB/ITLB combined(in pages)*/
++        __u8    x86_virt_bits, x86_phys_bits;
++	__u8	x86_num_cores;
++        __u32   x86_power; 	
++	__u32   extended_cpuid_level;	/* Max extended CPUID function supported */
++	unsigned long loops_per_jiffy;
++} ____cacheline_aligned;
++
++#define X86_VENDOR_INTEL 0
++#define X86_VENDOR_CYRIX 1
++#define X86_VENDOR_AMD 2
++#define X86_VENDOR_UMC 3
++#define X86_VENDOR_NEXGEN 4
++#define X86_VENDOR_CENTAUR 5
++#define X86_VENDOR_RISE 6
++#define X86_VENDOR_TRANSMETA 7
++#define X86_VENDOR_NUM 8
++#define X86_VENDOR_UNKNOWN 0xff
++
++#ifdef CONFIG_SMP
++extern struct cpuinfo_x86 cpu_data[];
++#define current_cpu_data cpu_data[smp_processor_id()]
++#else
++#define cpu_data (&boot_cpu_data)
++#define current_cpu_data boot_cpu_data
++#endif
++
++extern char ignore_irq13;
++
++extern void identify_cpu(struct cpuinfo_x86 *);
++extern void print_cpu_info(struct cpuinfo_x86 *);
++extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
++
++/*
++ * EFLAGS bits
++ */
++#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
++#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
++#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
++#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
++#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
++#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
++#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
++#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
++#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
++#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
++#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
++#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
++#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
++#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
++#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
++#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
++#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
++
++/*
++ * Intel CPU features in CR4
++ */
++#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
++#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
++#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
++#define X86_CR4_DE		0x0008	/* enable debugging extensions */
++#define X86_CR4_PSE		0x0010	/* enable page size extensions */
++#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
++#define X86_CR4_MCE		0x0040	/* Machine check enable */
++#define X86_CR4_PGE		0x0080	/* enable global pages */
++#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
++#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
++#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
++
++/*
++ * Save the cr4 feature set we're using (ie
++ * Pentium 4MB enable and PPro Global page
++ * enable), so that any CPU's that boot up
++ * after us can get the correct flags.
++ */
++extern unsigned long mmu_cr4_features;
++
++static inline void set_in_cr4 (unsigned long mask)
++{
++	mmu_cr4_features |= mask;
++	switch (mask) {
++	case X86_CR4_OSFXSR:
++	case X86_CR4_OSXMMEXCPT:
++		break;
++	default:
++		do {
++			const char *msg = "Xen unsupported cr4 update\n";
++			(void)HYPERVISOR_console_io(
++				CONSOLEIO_write, __builtin_strlen(msg),
++				(char *)msg);
++			BUG();
++		} while (0);
++	}
++}
++
++
++static inline void clear_in_cr4 (unsigned long mask)
++{
++#ifndef CONFIG_XEN
++	mmu_cr4_features &= ~mask;
++	__asm__("movq %%cr4,%%rax\n\t"
++		"andq %0,%%rax\n\t"
++		"movq %%rax,%%cr4\n"
++		: : "irg" (~mask)
++		:"ax");
++#endif
++}
++
++
++#define load_cr3(pgdir) do {				\
++	xen_pt_switch(__pa(pgdir));			\
++	per_cpu(cur_pgd, smp_processor_id()) = pgdir;	\
++} while (/* CONSTCOND */0)
++
++/*
++ * Bus types
++ */
++#define MCA_bus 0
++#define MCA_bus__is_a_macro
++
++
++/*
++ * User space process size. 47bits minus one guard page.
++ */
++#define TASK_SIZE	(0x800000000000UL - 4096)
++
++/* This decides where the kernel will search for a free chunk of vm
++ * space during mmap's.
++ */
++#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
++#define TASK_UNMAPPED_32 PAGE_ALIGN(IA32_PAGE_OFFSET/3)
++#define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3) 
++#define TASK_UNMAPPED_BASE	\
++	(test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)  
++
++/*
++ * Size of io_bitmap.
++ */
++#define IO_BITMAP_BITS  65536
++#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
++#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
++#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
++#define INVALID_IO_BITMAP_OFFSET 0x8000
++
++struct i387_fxsave_struct {
++	u16	cwd;
++	u16	swd;
++	u16	twd;
++	u16	fop;
++	u64	rip;
++	u64	rdp; 
++	u32	mxcsr;
++	u32	mxcsr_mask;
++	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
++	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 128 bytes */
++	u32	padding[24];
++} __attribute__ ((aligned (16)));
++
++union i387_union {
++	struct i387_fxsave_struct	fxsave;
++};
++
++struct tss_struct {
++	u32 reserved1;
++	u64 rsp0;	
++	u64 rsp1;
++	u64 rsp2;
++	u64 reserved2;
++	u64 ist[7];
++	u32 reserved3;
++	u32 reserved4;
++	u16 reserved5;
++	u16 io_bitmap_base;
++	/*
++	 * The extra 1 is there because the CPU will access an
++	 * additional byte beyond the end of the IO permission
++	 * bitmap. The extra byte must be all 1 bits, and must
++	 * be within the limit. Thus we have:
++	 *
++	 * 128 bytes, the bitmap itself, for ports 0..0x3ff
++	 * 8 bytes, for an extra "long" of ~0UL
++	 */
++	unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
++} __attribute__((packed)) ____cacheline_aligned;
++
++extern struct cpuinfo_x86 boot_cpu_data;
++DECLARE_PER_CPU(struct tss_struct,init_tss);
++DECLARE_PER_CPU(pgd_t *, cur_pgd);
++
++#define ARCH_MIN_TASKALIGN	16
++
++struct thread_struct {
++	unsigned long	rsp0;
++	unsigned long	rsp;
++	unsigned long 	userrsp;	/* Copy from PDA */ 
++	unsigned long	fs;
++	unsigned long	gs;
++       	unsigned int	io_pl;
++	unsigned short	es, ds, fsindex, gsindex;	
++/* Hardware debugging registers */
++	unsigned long	debugreg0;  
++	unsigned long	debugreg1;  
++	unsigned long	debugreg2;  
++	unsigned long	debugreg3;  
++	unsigned long	debugreg6;  
++	unsigned long	debugreg7;  
++/* fault info */
++	unsigned long	cr2, trap_no, error_code;
++/* floating point info */
++	union i387_union	i387  __attribute__((aligned(16)));
++/* IO permissions. the bitmap could be moved into the GDT, that would make
++   switch faster for a limited number of ioperm using tasks. -AK */
++	int		ioperm;
++	unsigned long	*io_bitmap_ptr;
++	unsigned io_bitmap_max;
++/* cached TLS descriptors. */
++	u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
++} __attribute__((aligned(16)));
++
++#define INIT_THREAD  {}
++
++#define INIT_MMAP \
++{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
++
++#define STACKFAULT_STACK 1
++#define DOUBLEFAULT_STACK 2 
++#define NMI_STACK 3 
++#define DEBUG_STACK 4 
++#define MCE_STACK 5
++#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
++#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
++#define EXCEPTION_STACK_ORDER 0 
++
++#define start_thread(regs,new_rip,new_rsp) do { \
++	asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0));	 \
++	load_gs_index(0);							\
++	(regs)->rip = (new_rip);						 \
++	(regs)->rsp = (new_rsp);						 \
++	write_pda(oldrsp, (new_rsp));						 \
++	(regs)->cs = __USER_CS;							 \
++	(regs)->ss = __USER_DS;							 \
++	(regs)->eflags = 0x200;							 \
++	set_fs(USER_DS);							 \
++} while(0) 
++
++struct task_struct;
++struct mm_struct;
++
++/* Free all resources held by a thread. */
++extern void release_thread(struct task_struct *);
++
++/* Prepare to copy thread state - unlazy all lazy status */
++extern void prepare_to_copy(struct task_struct *tsk);
++
++/*
++ * create a kernel thread without removing it from tasklists
++ */
++extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
++
++/*
++ * Return saved PC of a blocked thread.
++ * What is this good for? it will be always the scheduler or ret_from_fork.
++ */
++#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
++
++extern unsigned long get_wchan(struct task_struct *p);
++#define KSTK_EIP(tsk) \
++	(((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
++#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
++
++
++struct microcode_header {
++	unsigned int hdrver;
++	unsigned int rev;
++	unsigned int date;
++	unsigned int sig;
++	unsigned int cksum;
++	unsigned int ldrver;
++	unsigned int pf;
++	unsigned int datasize;
++	unsigned int totalsize;
++	unsigned int reserved[3];
++};
++
++struct microcode {
++	struct microcode_header hdr;
++	unsigned int bits[0];
++};
++
++typedef struct microcode microcode_t;
++typedef struct microcode_header microcode_header_t;
++
++/* microcode format is extended from prescott processors */
++struct extended_signature {
++	unsigned int sig;
++	unsigned int pf;
++	unsigned int cksum;
++};
++
++struct extended_sigtable {
++	unsigned int count;
++	unsigned int cksum;
++	unsigned int reserved[3];
++	struct extended_signature sigs[0];
++};
++
++/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
++#define MICROCODE_IOCFREE	_IO('6',0)
++
++
++#define ASM_NOP1 K8_NOP1
++#define ASM_NOP2 K8_NOP2
++#define ASM_NOP3 K8_NOP3
++#define ASM_NOP4 K8_NOP4
++#define ASM_NOP5 K8_NOP5
++#define ASM_NOP6 K8_NOP6
++#define ASM_NOP7 K8_NOP7
++#define ASM_NOP8 K8_NOP8
++
++/* Opteron nops */
++#define K8_NOP1 ".byte 0x90\n"
++#define K8_NOP2	".byte 0x66,0x90\n" 
++#define K8_NOP3	".byte 0x66,0x66,0x90\n" 
++#define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" 
++#define K8_NOP5	K8_NOP3 K8_NOP2 
++#define K8_NOP6	K8_NOP3 K8_NOP3
++#define K8_NOP7	K8_NOP4 K8_NOP3
++#define K8_NOP8	K8_NOP4 K8_NOP4
++
++#define ASM_NOP_MAX 8
++
++/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
++extern inline void rep_nop(void)
++{
++	__asm__ __volatile__("rep;nop": : :"memory");
++}
++
++/* Stop speculative execution */
++extern inline void sync_core(void)
++{ 
++	int tmp;
++	asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
++} 
++
++#define cpu_has_fpu 1
++
++#define ARCH_HAS_PREFETCH
++static inline void prefetch(void *x) 
++{ 
++	asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
++} 
++
++#define ARCH_HAS_PREFETCHW 1
++static inline void prefetchw(void *x) 
++{ 
++	alternative_input(ASM_NOP5,
++			  "prefetchw (%1)",
++			  X86_FEATURE_3DNOW,
++			  "r" (x));
++} 
++
++#define ARCH_HAS_SPINLOCK_PREFETCH 1
++
++#define spin_lock_prefetch(x)  prefetchw(x)
++
++#define cpu_relax()   rep_nop()
++
++/*
++ *      NSC/Cyrix CPU configuration register indexes
++ */
++#define CX86_CCR0 0xc0
++#define CX86_CCR1 0xc1
++#define CX86_CCR2 0xc2
++#define CX86_CCR3 0xc3
++#define CX86_CCR4 0xe8
++#define CX86_CCR5 0xe9
++#define CX86_CCR6 0xea
++#define CX86_CCR7 0xeb
++#define CX86_DIR0 0xfe
++#define CX86_DIR1 0xff
++#define CX86_ARR_BASE 0xc4
++#define CX86_RCR_BASE 0xdc
++
++/*
++ *      NSC/Cyrix CPU indexed register access macros
++ */
++
++#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++
++#define setCx86(reg, data) do { \
++	outb((reg), 0x22); \
++	outb((data), 0x23); \
++} while (0)
++
++static inline void __monitor(const void *eax, unsigned long ecx,
++		unsigned long edx)
++{
++	/* "monitor %eax,%ecx,%edx;" */
++	asm volatile(
++		".byte 0x0f,0x01,0xc8;"
++		: :"a" (eax), "c" (ecx), "d"(edx));
++}
++
++static inline void __mwait(unsigned long eax, unsigned long ecx)
++{
++	/* "mwait %eax,%ecx;" */
++	asm volatile(
++		".byte 0x0f,0x01,0xc9;"
++		: :"a" (eax), "c" (ecx));
++}
++
++#define stack_current() \
++({								\
++	struct thread_info *ti;					\
++	asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
++	ti->task;					\
++})
++
++#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
++
++extern unsigned long boot_option_idle_override;
++/* Boot loader type from the setup header */
++extern int bootloader_type;
++
++#endif /* __ASM_X86_64_PROCESSOR_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/ptrace.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/ptrace.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/ptrace.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/ptrace.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,124 @@
++#ifndef _X86_64_PTRACE_H
++#define _X86_64_PTRACE_H
++
++#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS) 
++#define R15 0
++#define R14 8
++#define R13 16
++#define R12 24
++#define RBP 32
++#define RBX 40
++/* arguments: interrupts/non tracing syscalls only save upto here*/
++#define R11 48
++#define R10 56	
++#define R9 64
++#define R8 72
++#define RAX 80
++#define RCX 88
++#define RDX 96
++#define RSI 104
++#define RDI 112
++#define ORIG_RAX 120       /* = ERROR */ 
++/* end of arguments */ 	
++/* cpu exception frame or undefined in case of fast syscall. */
++#define RIP 128
++#define CS 136
++#define EFLAGS 144
++#define RSP 152
++#define SS 160
++#define ARGOFFSET R11
++#endif /* __ASSEMBLY__ */
++
++/* top of stack page */ 
++#define FRAME_SIZE 168
++
++#define PTRACE_OLDSETOPTIONS         21
++
++#ifndef __ASSEMBLY__ 
++
++struct pt_regs {
++	unsigned long r15;
++	unsigned long r14;
++	unsigned long r13;
++	unsigned long r12;
++	unsigned long rbp;
++	unsigned long rbx;
++/* arguments: non interrupts/non tracing syscalls only save upto here*/
++ 	unsigned long r11;
++	unsigned long r10;	
++	unsigned long r9;
++	unsigned long r8;
++	unsigned long rax;
++	unsigned long rcx;
++	unsigned long rdx;
++	unsigned long rsi;
++	unsigned long rdi;
++	unsigned long orig_rax;
++/* end of arguments */ 	
++/* cpu exception frame or undefined */
++	unsigned long rip;
++	unsigned long cs;
++	unsigned long eflags; 
++	unsigned long rsp; 
++	unsigned long ss;
++/* top of stack page */ 
++};
++
++#endif
++
++/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
++#define PTRACE_GETREGS            12
++#define PTRACE_SETREGS            13
++#define PTRACE_GETFPREGS          14
++#define PTRACE_SETFPREGS          15
++#define PTRACE_GETFPXREGS         18
++#define PTRACE_SETFPXREGS         19
++
++/* only useful for access 32bit programs */
++#define PTRACE_GET_THREAD_AREA    25
++#define PTRACE_SET_THREAD_AREA    26
++
++#define PTRACE_ARCH_PRCTL	  30	/* arch_prctl for child */
++
++#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 
++#define user_mode(regs) (!!((regs)->cs & 3))
++#define instruction_pointer(regs) ((regs)->rip)
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++extern unsigned long profile_pc(struct pt_regs *regs);
++#else
++#define profile_pc(regs) instruction_pointer(regs)
++#endif
++
++void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
++
++struct task_struct;
++
++extern unsigned long
++convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
++
++enum {
++        EF_CF   = 0x00000001,
++        EF_PF   = 0x00000004,
++        EF_AF   = 0x00000010,
++        EF_ZF   = 0x00000040,
++        EF_SF   = 0x00000080,
++        EF_TF   = 0x00000100,
++        EF_IE   = 0x00000200,
++        EF_DF   = 0x00000400,
++        EF_OF   = 0x00000800,
++        EF_IOPL = 0x00003000,
++        EF_IOPL_RING0 = 0x00000000,
++        EF_IOPL_RING1 = 0x00001000,
++        EF_IOPL_RING2 = 0x00002000,
++        EF_NT   = 0x00004000,   /* nested task */
++        EF_RF   = 0x00010000,   /* resume */
++        EF_VM   = 0x00020000,   /* virtual mode */
++        EF_AC   = 0x00040000,   /* alignment */
++        EF_VIF  = 0x00080000,   /* virtual interrupt */
++        EF_VIP  = 0x00100000,   /* virtual interrupt pending */
++        EF_ID   = 0x00200000,   /* id */
++};
++
++#endif
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/segment.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/segment.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/segment.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/segment.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,46 @@
++#ifndef _ASM_SEGMENT_H
++#define _ASM_SEGMENT_H
++
++#include <asm/cache.h>
++
++#define __KERNEL_CS	0x10
++#define __KERNEL_DS	0x1b
++
++#define __KERNEL32_CS   0x3b
++
++/* 
++ * we cannot use the same code segment descriptor for user and kernel
++ * -- not even in the long flat mode, because of different DPL /kkeil 
++ * The segment offset needs to contain a RPL. Grr. -AK
++ * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) 
++ */
++
++#define __USER32_CS   0x23   /* 4*8+3 */ 
++#define __USER_DS     0x2b   /* 5*8+3 */ 
++#define __USER_CS     0x33   /* 6*8+3 */ 
++#define __USER32_DS	__USER_DS 
++#define __KERNEL16_CS	(GDT_ENTRY_KERNELCS16 * 8)
++#define __KERNEL_COMPAT32_CS   0x8
++
++#define GDT_ENTRY_TLS 1
++#define GDT_ENTRY_TSS 8	/* needs two entries */
++#define GDT_ENTRY_LDT 10 /* needs two entries */
++#define GDT_ENTRY_TLS_MIN 12
++#define GDT_ENTRY_TLS_MAX 14
++#define GDT_ENTRY_KERNELCS16 15
++
++#define GDT_ENTRY_TLS_ENTRIES 3
++
++/* TLS indexes for 64bit - hardcoded in arch_prctl */
++#define FS_TLS 0	
++#define GS_TLS 1	
++
++#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
++#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
++
++#define IDT_ENTRIES 256
++#define GDT_ENTRIES 16
++#define GDT_SIZE (GDT_ENTRIES * 8)
++#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) 
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/smp.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/smp.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/smp.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/smp.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,138 @@
++#ifndef __ASM_SMP_H
++#define __ASM_SMP_H
++
++/*
++ * We need the APIC definitions automatically as part of 'smp.h'
++ */
++#ifndef __ASSEMBLY__
++#include <linux/config.h>
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/bitops.h>
++extern int disable_apic;
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef __ASSEMBLY__
++#include <asm/fixmap.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_X86_IO_APIC
++#include <asm/io_apic.h>
++#endif
++#include <asm/apic.h>
++#include <asm/thread_info.h>
++#endif
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef ASSEMBLY
++
++#include <asm/pda.h>
++
++struct pt_regs;
++
++extern cpumask_t cpu_present_mask;
++extern cpumask_t cpu_possible_map;
++extern cpumask_t cpu_online_map;
++
++/*
++ * Private routines/data
++ */
++ 
++extern void smp_alloc_memory(void);
++extern volatile unsigned long smp_invalidate_needed;
++extern int pic_mode;
++extern int smp_num_siblings;
++extern void smp_flush_tlb(void);
++extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
++extern void smp_send_reschedule(int cpu);
++extern void smp_invalidate_rcv(void);		/* Process an NMI */
++extern void zap_low_mappings(void);
++void smp_stop_cpu(void);
++extern cpumask_t cpu_sibling_map[NR_CPUS];
++extern cpumask_t cpu_core_map[NR_CPUS];
++extern int phys_proc_id[NR_CPUS];
++extern int cpu_core_id[NR_CPUS];
++
++#define SMP_TRAMPOLINE_BASE 0x6000
++
++/*
++ * On x86 all CPUs are mapped 1:1 to the APIC space.
++ * This simplifies scheduling and IPI sending and
++ * compresses data structures.
++ */
++
++static inline int num_booting_cpus(void)
++{
++	return cpus_weight(cpu_possible_map);
++}
++
++#define __smp_processor_id() read_pda(cpunumber)
++
++#ifdef CONFIG_X86_LOCAL_APIC
++extern __inline int hard_smp_processor_id(void)
++{
++	/* we don't want to mark this access volatile - bad code generation */
++	return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
++}
++#endif
++
++extern int safe_smp_processor_id(void);
++
++#endif /* !ASSEMBLY */
++
++#define NO_PROC_ID		0xFF		/* No processor magic marker */
++
++#endif
++
++#ifndef ASSEMBLY
++/*
++ * Some lowlevel functions might want to know about
++ * the real APIC ID <-> CPU # mapping.
++ */
++extern u8 x86_cpu_to_apicid[NR_CPUS];	/* physical ID */
++extern u8 x86_cpu_to_log_apicid[NR_CPUS];
++extern u8 bios_cpu_apicid[];
++#ifdef CONFIG_X86_LOCAL_APIC
++static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
++{
++	return cpus_addr(cpumask)[0];
++}
++
++static inline int cpu_present_to_apicid(int mps_cpu)
++{
++	if (mps_cpu < NR_CPUS)
++		return (int)bios_cpu_apicid[mps_cpu];
++	else
++		return BAD_APICID;
++}
++#endif
++
++#endif /* !ASSEMBLY */
++
++#ifndef CONFIG_SMP
++#define stack_smp_processor_id() 0
++#define safe_smp_processor_id() 0
++#define cpu_logical_map(x) (x)
++#else
++#include <asm/thread_info.h>
++#define stack_smp_processor_id() \
++({ 								\
++	struct thread_info *ti;					\
++	__asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
++	ti->cpu;						\
++})
++#endif
++
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_LOCAL_APIC
++static __inline int logical_smp_processor_id(void)
++{
++	/* we don't want to mark this access volatile - bad code generation */
++	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
++#endif
++#endif
++
++#endif
++
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/synch_bitops.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/synch_bitops.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/synch_bitops.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,2 @@
++
++#include <asm-i386/synch_bitops.h>
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/system.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/system.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/system.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/system.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,414 @@
++#ifndef __ASM_SYSTEM_H
++#define __ASM_SYSTEM_H
++
++#include <linux/config.h>
++#include <linux/kernel.h>
++#include <asm/segment.h>
++#include <asm/synch_bitops.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/xen-public/arch-x86_64.h>
++
++#ifdef __KERNEL__
++
++#ifdef CONFIG_SMP
++#define LOCK_PREFIX "lock ; "
++#else
++#define LOCK_PREFIX ""
++#endif
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
++#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
++
++/* frame pointer must be last for get_wchan */
++#define SAVE_CONTEXT    "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
++#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t" 
++
++#define __EXTRA_CLOBBER  \
++	,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
++
++#define switch_to(prev,next,last) \
++	asm volatile(SAVE_CONTEXT						    \
++		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
++		     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
++		     "call __switch_to\n\t"					  \
++		     ".globl thread_return\n"					\
++		     "thread_return:\n\t"					    \
++		     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
++		     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
++		     LOCK "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"		  \
++		     "movq %%rax,%%rdi\n\t" 					  \
++		     "jc   ret_from_fork\n\t"					  \
++		     RESTORE_CONTEXT						    \
++		     : "=a" (last)					  	  \
++		     : [next] "S" (next), [prev] "D" (prev),			  \
++		       [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
++		       [ti_flags] "i" (offsetof(struct thread_info, flags)),\
++		       [tif_fork] "i" (TIF_FORK),			  \
++		       [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
++		       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
++		     : "memory", "cc" __EXTRA_CLOBBER)
++    
++
++extern void load_gs_index(unsigned);
++
++/*
++ * Load a segment. Fall back on loading the zero
++ * segment if something goes wrong..
++ */
++#define loadsegment(seg,value)	\
++	asm volatile("\n"			\
++		"1:\t"				\
++		"movl %k0,%%" #seg "\n"		\
++		"2:\n"				\
++		".section .fixup,\"ax\"\n"	\
++		"3:\t"				\
++		"movl %1,%%" #seg "\n\t" 	\
++		"jmp 2b\n"			\
++		".previous\n"			\
++		".section __ex_table,\"a\"\n\t"	\
++		".align 8\n\t"			\
++		".quad 1b,3b\n"			\
++		".previous"			\
++		: :"r" (value), "r" (0))
++
++#define set_debug(value,register) \
++                __asm__("movq %0,%%db" #register  \
++		: /* no output */ \
++		:"r" ((unsigned long) value))
++
++
++#ifdef __KERNEL__
++struct alt_instr { 
++	__u8 *instr; 		/* original instruction */
++	__u8 *replacement;
++	__u8  cpuid;		/* cpuid bit set for replacement */
++	__u8  instrlen;		/* length of original instruction */
++	__u8  replacementlen; 	/* length of new instruction, <= instrlen */ 
++	__u8  pad[5];
++}; 
++#endif
++
++/*
++ * Alternative instructions for different CPU types or capabilities.
++ * 
++ * This allows to use optimized instructions even on generic binary
++ * kernels.
++ * 
++ * length of oldinstr must be longer or equal the length of newinstr
++ * It can be padded with nops as needed.
++ * 
++ * For non barrier like inlines please define new variants
++ * without volatile and memory clobber.
++ */
++#define alternative(oldinstr, newinstr, feature) 	\
++	asm volatile ("661:\n\t" oldinstr "\n662:\n" 		     \
++		      ".section .altinstructions,\"a\"\n"     	     \
++		      "  .align 8\n"				       \
++		      "  .quad 661b\n"            /* label */          \
++		      "  .quad 663f\n"		  /* new instruction */ \
++		      "  .byte %c0\n"             /* feature bit */    \
++		      "  .byte 662b-661b\n"       /* sourcelen */      \
++		      "  .byte 664f-663f\n"       /* replacementlen */ \
++		      ".previous\n"					\
++		      ".section .altinstr_replacement,\"ax\"\n"		\
++		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ \
++		      ".previous" :: "i" (feature) : "memory")  
++
++/*
++ * Alternative inline assembly with input.
++ * 
++ * Pecularities:
++ * No memory clobber here. 
++ * Argument numbers start with 1.
++ * Best is to use constraints that are fixed size (like (%1) ... "r")
++ * If you use variable sized constraints like "m" or "g" in the 
++ * replacement maake sure to pad to the worst case length.
++ */
++#define alternative_input(oldinstr, newinstr, feature, input...)	\
++	asm volatile ("661:\n\t" oldinstr "\n662:\n"			\
++		      ".section .altinstructions,\"a\"\n"		\
++		      "  .align 8\n"					\
++		      "  .quad 661b\n"            /* label */		\
++		      "  .quad 663f\n"		  /* new instruction */	\
++		      "  .byte %c0\n"             /* feature bit */	\
++		      "  .byte 662b-661b\n"       /* sourcelen */	\
++		      "  .byte 664f-663f\n"       /* replacementlen */	\
++		      ".previous\n"					\
++		      ".section .altinstr_replacement,\"ax\"\n"		\
++		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ \
++		      ".previous" :: "i" (feature), ##input)
++
++/*
++ * Clear and set 'TS' bit respectively
++ */
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
++
++static inline unsigned long read_cr0(void)
++{ 
++	unsigned long cr0;
++	asm volatile("movq %%cr0,%0" : "=r" (cr0));
++	return cr0;
++} 
++
++static inline void write_cr0(unsigned long val) 
++{ 
++	asm volatile("movq %0,%%cr0" :: "r" (val));
++} 
++
++static inline unsigned long read_cr3(void)
++{ 
++	unsigned long cr3;
++	asm("movq %%cr3,%0" : "=r" (cr3));
++	return cr3;
++} 
++
++static inline unsigned long read_cr4(void)
++{ 
++	unsigned long cr4;
++	asm("movq %%cr4,%0" : "=r" (cr4));
++	return cr4;
++} 
++
++static inline void write_cr4(unsigned long val)
++{ 
++	asm volatile("movq %0,%%cr4" :: "r" (val));
++} 
++
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
++
++#define wbinvd() \
++	__asm__ __volatile__ ("wbinvd": : :"memory");
++
++#endif	/* __KERNEL__ */
++
++#define nop() __asm__ __volatile__ ("nop")
++
++#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
++
++#define tas(ptr) (xchg((ptr),1))
++
++#define __xg(x) ((volatile long *)(x))
++
++extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
++{
++	*ptr = val;
++}
++
++#define _set_64bit set_64bit
++
++/*
++ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
++ * Note 2: xchg has side effect, so that attribute volatile is necessary,
++ *	  but generally the primitive is invalid, *ptr is output argument. --ANK
++ */
++static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++{
++	switch (size) {
++		case 1:
++			__asm__ __volatile__("xchgb %b0,%1"
++				:"=q" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++		case 2:
++			__asm__ __volatile__("xchgw %w0,%1"
++				:"=r" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++		case 4:
++			__asm__ __volatile__("xchgl %k0,%1"
++				:"=r" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++		case 8:
++			__asm__ __volatile__("xchgq %0,%1"
++				:"=r" (x)
++				:"m" (*__xg(ptr)), "0" (x)
++				:"memory");
++			break;
++	}
++	return x;
++}
++
++/*
++ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
++ * store NEW in MEM.  Return the initial value in MEM.  Success is
++ * indicated by comparing RETURN with OLD.
++ */
++
++#define __HAVE_ARCH_CMPXCHG 1
++
++static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++				      unsigned long new, int size)
++{
++	unsigned long prev;
++	switch (size) {
++	case 1:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 2:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 4:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	case 8:
++		__asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
++				     : "=a"(prev)
++				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
++				     : "memory");
++		return prev;
++	}
++	return old;
++}
++
++#define cmpxchg(ptr,o,n)\
++	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
++					(unsigned long)(n),sizeof(*(ptr))))
++
++#ifdef CONFIG_SMP
++#define smp_mb()	mb()
++#define smp_rmb()	rmb()
++#define smp_wmb()	wmb()
++#define smp_read_barrier_depends()	do {} while(0)
++#else
++#define smp_mb()	barrier()
++#define smp_rmb()	barrier()
++#define smp_wmb()	barrier()
++#define smp_read_barrier_depends()	do {} while(0)
++#endif
++
++    
++/*
++ * Force strict CPU ordering.
++ * And yes, this is required on UP too when we're talking
++ * to devices.
++ */
++#define mb() 	asm volatile("mfence":::"memory")
++#define rmb()	asm volatile("lfence":::"memory")
++
++#ifdef CONFIG_UNORDERED_IO
++#define wmb()	asm volatile("sfence" ::: "memory")
++#else
++#define wmb()	asm volatile("" ::: "memory")
++#endif
++#define read_barrier_depends()	do {} while(0)
++#define set_mb(var, value) do { xchg(&var, value); } while (0)
++#define set_wmb(var, value) do { var = value; wmb(); } while (0)
++
++#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
++
++
++/* 
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
++ */
++
++#define __cli()								\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	_vcpu->evtchn_upcall_mask = 1;					\
++	preempt_enable_no_resched();					\
++	barrier();							\
++} while (0)
++
++#define __sti()								\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	barrier();							\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	_vcpu->evtchn_upcall_mask = 0;					\
++	barrier(); /* unmask then check (avoid races) */		\
++	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
++		force_evtchn_callback();				\
++	preempt_enable();						\
++} while (0)
++
++#define __save_flags(x)							\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	(x) = _vcpu->evtchn_upcall_mask;				\
++	preempt_enable();						\
++} while (0)
++
++#define __restore_flags(x)						\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	barrier();							\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
++		barrier(); /* unmask then check (avoid races) */	\
++		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
++			force_evtchn_callback();			\
++		preempt_enable();					\
++	} else								\
++		preempt_enable_no_resched();				\
++} while (0)
++
++#define safe_halt()		((void)0)
++
++#define __save_and_cli(x)						\
++do {									\
++	vcpu_info_t *_vcpu;						\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	(x) = _vcpu->evtchn_upcall_mask;				\
++	_vcpu->evtchn_upcall_mask = 1;					\
++	preempt_enable_no_resched();					\
++	barrier();							\
++} while (0)
++
++void cpu_idle_wait(void);
++
++#define local_irq_save(x)	__save_and_cli(x)
++#define local_irq_restore(x)	__restore_flags(x)
++#define local_save_flags(x)	__save_flags(x)
++#define local_irq_disable()	__cli()
++#define local_irq_enable()	__sti()
++
++/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
++#define irqs_disabled()							\
++({	int ___x;							\
++	vcpu_info_t *_vcpu;						\
++	preempt_disable();						\
++	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
++	___x = (_vcpu->evtchn_upcall_mask != 0);			\
++	preempt_enable_no_resched();					\
++	___x; })
++
++/*
++ * disable hlt during certain critical i/o operations
++ */
++#define HAVE_DISABLE_HLT
++void disable_hlt(void);
++void enable_hlt(void);
++
++#define HAVE_EAT_KEY
++void eat_key(void);
++
++extern unsigned long arch_align_stack(unsigned long sp);
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/timer.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/timer.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/timer.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/timer.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,64 @@
++#ifndef _ASMi386_TIMER_H
++#define _ASMi386_TIMER_H
++#include <linux/init.h>
++
++/**
++ * struct timer_ops - used to define a timer source
++ *
++ * @name: name of the timer.
++ * @init: Probes and initializes the timer. Takes clock= override 
++ *        string as an argument. Returns 0 on success, anything else
++ *        on failure.
++ * @mark_offset: called by the timer interrupt.
++ * @get_offset:  called by gettimeofday(). Returns the number of microseconds
++ *               since the last timer interupt.
++ * @monotonic_clock: returns the number of nanoseconds since the init of the
++ *                   timer.
++ * @delay: delays this many clock cycles.
++ */
++struct timer_opts {
++	char* name;
++	void (*mark_offset)(void);
++	unsigned long (*get_offset)(void);
++	unsigned long long (*monotonic_clock)(void);
++	void (*delay)(unsigned long);
++};
++
++struct init_timer_opts {
++	int (*init)(char *override);
++	struct timer_opts *opts;
++};
++
++#define TICK_SIZE (tick_nsec / 1000)
++
++extern struct timer_opts* __init select_timer(void);
++extern void clock_fallback(void);
++void setup_pit_timer(void);
++
++/* Modifiers for buggy PIT handling */
++
++extern int pit_latch_buggy;
++
++extern struct timer_opts *cur_timer;
++extern int timer_ack;
++
++/* list of externed timers */
++extern struct timer_opts timer_none;
++extern struct timer_opts timer_pit;
++extern struct init_timer_opts timer_pit_init;
++extern struct init_timer_opts timer_tsc_init;
++#ifdef CONFIG_X86_CYCLONE_TIMER
++extern struct init_timer_opts timer_cyclone_init;
++#endif
++
++extern unsigned long calibrate_tsc(void);
++extern void init_cpu_khz(void);
++#ifdef CONFIG_HPET_TIMER
++extern struct init_timer_opts timer_hpet_init;
++extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
++#endif
++
++#ifdef CONFIG_X86_PM_TIMER
++extern struct init_timer_opts timer_pmtmr_init;
++#endif
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/tlbflush.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/tlbflush.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/tlbflush.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/tlbflush.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,97 @@
++#ifndef _X8664_TLBFLUSH_H
++#define _X8664_TLBFLUSH_H
++
++#include <linux/config.h>
++#include <linux/mm.h>
++#include <asm/processor.h>
++
++#define __flush_tlb()	xen_tlb_flush()
++
++/*
++ * Global pages have to be flushed a bit differently. Not a real
++ * performance problem because this does not happen often.
++ */
++#define __flush_tlb_global()	xen_tlb_flush()
++
++
++extern unsigned long pgkern_mask;
++
++#define __flush_tlb_all() __flush_tlb_global()
++
++#define __flush_tlb_one(addr)	xen_invlpg((unsigned long)addr)
++
++
++/*
++ * TLB flushing:
++ *
++ *  - flush_tlb() flushes the current mm struct TLBs
++ *  - flush_tlb_all() flushes all processes TLBs
++ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
++ *  - flush_tlb_page(vma, vmaddr) flushes one page
++ *  - flush_tlb_range(vma, start, end) flushes a range of pages
++ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
++ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ *
++ * ..but the x86_64 has somewhat limited tlb flushing capabilities,
++ * and page-granular flushes are available only on i486 and up.
++ */
++
++#ifndef CONFIG_SMP
++
++#define flush_tlb() __flush_tlb()
++#define flush_tlb_all() __flush_tlb_all()
++#define local_flush_tlb() __flush_tlb()
++
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++	if (mm == current->active_mm)
++		__flush_tlb();
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++	unsigned long addr)
++{
++	if (vma->vm_mm == current->active_mm)
++		__flush_tlb_one(addr);
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++	unsigned long start, unsigned long end)
++{
++	if (vma->vm_mm == current->active_mm)
++		__flush_tlb();
++}
++
++#else
++
++#include <asm/smp.h>
++
++#define local_flush_tlb() \
++	__flush_tlb()
++
++extern void flush_tlb_all(void);
++extern void flush_tlb_current_task(void);
++extern void flush_tlb_mm(struct mm_struct *);
++extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
++
++#define flush_tlb()	flush_tlb_current_task()
++
++static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
++{
++	flush_tlb_mm(vma->vm_mm);
++}
++
++#define TLBSTATE_OK	1
++#define TLBSTATE_LAZY	2
++
++#endif
++
++#define flush_tlb_kernel_range(start, end) flush_tlb_all()
++
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++				      unsigned long start, unsigned long end)
++{
++	/* x86_64 does not keep any page table caches in TLB */
++}
++
++#endif /* _X8664_TLBFLUSH_H */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/vga.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/vga.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/vga.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/vga.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,20 @@
++/*
++ *	Access to VGA videoram
++ *
++ *	(c) 1998 Martin Mares <mj at ucw.cz>
++ */
++
++#ifndef _LINUX_ASM_VGA_H_
++#define _LINUX_ASM_VGA_H_
++
++/*
++ *	On the PC, we can just recalculate addresses and then
++ *	access the videoram directly without any black magic.
++ */
++
++#define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x)
++
++#define vga_readb(x) (*(x))
++#define vga_writeb(x,y) (*(y) = (x))
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/xor.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/xor.h
+--- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/xor.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/xor.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,328 @@
++/*
++ * x86-64 changes / gcc fixes from Andi Kleen. 
++ * Copyright 2002 Andi Kleen, SuSE Labs.
++ *
++ * This hasn't been optimized for the hammer yet, but there are likely
++ * no advantages to be gotten from x86-64 here anyways.
++ */
++
++typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
++
++/* Doesn't use gcc to save the XMM registers, because there is no easy way to 
++   tell it to do a clts before the register saving. */
++#define XMMS_SAVE do {				\
++	preempt_disable();			\
++	if (!(current_thread_info()->status & TS_USEDFPU))	\
++		clts();				\
++	__asm__ __volatile__ ( 			\
++		"movups %%xmm0,(%1)	;\n\t"	\
++		"movups %%xmm1,0x10(%1)	;\n\t"	\
++		"movups %%xmm2,0x20(%1)	;\n\t"	\
++		"movups %%xmm3,0x30(%1)	;\n\t"	\
++		: "=&r" (cr0)			\
++		: "r" (xmm_save) 		\
++		: "memory");			\
++} while(0)
++
++#define XMMS_RESTORE do {			\
++	asm volatile (				\
++		"sfence			;\n\t"	\
++		"movups (%1),%%xmm0	;\n\t"	\
++		"movups 0x10(%1),%%xmm1	;\n\t"	\
++		"movups 0x20(%1),%%xmm2	;\n\t"	\
++		"movups 0x30(%1),%%xmm3	;\n\t"	\
++		:				\
++		: "r" (cr0), "r" (xmm_save)	\
++		: "memory");			\
++	if (!(current_thread_info()->status & TS_USEDFPU))	\
++		stts();				\
++	preempt_enable();			\
++} while(0)
++
++#define OFFS(x)		"16*("#x")"
++#define PF_OFFS(x)	"256+16*("#x")"
++#define	PF0(x)		"	prefetchnta "PF_OFFS(x)"(%[p1])		;\n"
++#define LD(x,y)		"       movaps   "OFFS(x)"(%[p1]), %%xmm"#y"	;\n"
++#define ST(x,y)		"       movaps %%xmm"#y",   "OFFS(x)"(%[p1])	;\n"
++#define PF1(x)		"	prefetchnta "PF_OFFS(x)"(%[p2])		;\n"
++#define PF2(x)		"	prefetchnta "PF_OFFS(x)"(%[p3])		;\n"
++#define PF3(x)		"	prefetchnta "PF_OFFS(x)"(%[p4])		;\n"
++#define PF4(x)		"	prefetchnta "PF_OFFS(x)"(%[p5])		;\n"
++#define PF5(x)		"	prefetchnta "PF_OFFS(x)"(%[p6])		;\n"
++#define XO1(x,y)	"       xorps   "OFFS(x)"(%[p2]), %%xmm"#y"	;\n"
++#define XO2(x,y)	"       xorps   "OFFS(x)"(%[p3]), %%xmm"#y"	;\n"
++#define XO3(x,y)	"       xorps   "OFFS(x)"(%[p4]), %%xmm"#y"	;\n"
++#define XO4(x,y)	"       xorps   "OFFS(x)"(%[p5]), %%xmm"#y"	;\n"
++#define XO5(x,y)	"       xorps   "OFFS(x)"(%[p6]), %%xmm"#y"	;\n"
++
++
++static void
++xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
++{
++        unsigned int lines = bytes >> 8;
++	unsigned long cr0;
++	xmm_store_t xmm_save[4];
++
++	XMMS_SAVE;
++
++        asm volatile (
++#undef BLOCK
++#define BLOCK(i) \
++		LD(i,0)					\
++			LD(i+1,1)			\
++		PF1(i)					\
++				PF1(i+2)		\
++				LD(i+2,2)		\
++					LD(i+3,3)	\
++		PF0(i+4)				\
++				PF0(i+6)		\
++		XO1(i,0)				\
++			XO1(i+1,1)			\
++				XO1(i+2,2)		\
++					XO1(i+3,3)	\
++		ST(i,0)					\
++			ST(i+1,1)			\
++				ST(i+2,2)		\
++					ST(i+3,3)	\
++
++
++		PF0(0)
++				PF0(2)
++
++	" .align 32			;\n"
++        " 1:                            ;\n"
++
++		BLOCK(0)
++		BLOCK(4)
++		BLOCK(8)
++		BLOCK(12)
++
++        "       addq %[inc], %[p1]           ;\n"
++        "       addq %[inc], %[p2]           ;\n"
++		"		decl %[cnt] ; jnz 1b"
++	: [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
++	: [inc] "r" (256UL) 
++        : "memory");
++
++	XMMS_RESTORE;
++}
++
++static void
++xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++	  unsigned long *p3)
++{
++	unsigned int lines = bytes >> 8;
++	xmm_store_t xmm_save[4];
++	unsigned long cr0;
++
++	XMMS_SAVE;
++
++        __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++		PF1(i)					\
++				PF1(i+2)		\
++		LD(i,0)					\
++			LD(i+1,1)			\
++				LD(i+2,2)		\
++					LD(i+3,3)	\
++		PF2(i)					\
++				PF2(i+2)		\
++		PF0(i+4)				\
++				PF0(i+6)		\
++		XO1(i,0)				\
++			XO1(i+1,1)			\
++				XO1(i+2,2)		\
++					XO1(i+3,3)	\
++		XO2(i,0)				\
++			XO2(i+1,1)			\
++				XO2(i+2,2)		\
++					XO2(i+3,3)	\
++		ST(i,0)					\
++			ST(i+1,1)			\
++				ST(i+2,2)		\
++					ST(i+3,3)	\
++
++
++		PF0(0)
++				PF0(2)
++
++	" .align 32			;\n"
++        " 1:                            ;\n"
++
++		BLOCK(0)
++		BLOCK(4)
++		BLOCK(8)
++		BLOCK(12)
++
++        "       addq %[inc], %[p1]           ;\n"
++        "       addq %[inc], %[p2]          ;\n"
++        "       addq %[inc], %[p3]           ;\n"
++		"		decl %[cnt] ; jnz 1b"
++	: [cnt] "+r" (lines),
++	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
++	: [inc] "r" (256UL)
++	: "memory"); 
++	XMMS_RESTORE;
++}
++
++static void
++xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++	  unsigned long *p3, unsigned long *p4)
++{
++	unsigned int lines = bytes >> 8;
++	xmm_store_t xmm_save[4]; 
++	unsigned long cr0;
++
++	XMMS_SAVE;
++
++        __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++		PF1(i)					\
++				PF1(i+2)		\
++		LD(i,0)					\
++			LD(i+1,1)			\
++				LD(i+2,2)		\
++					LD(i+3,3)	\
++		PF2(i)					\
++				PF2(i+2)		\
++		XO1(i,0)				\
++			XO1(i+1,1)			\
++				XO1(i+2,2)		\
++					XO1(i+3,3)	\
++		PF3(i)					\
++				PF3(i+2)		\
++		PF0(i+4)				\
++				PF0(i+6)		\
++		XO2(i,0)				\
++			XO2(i+1,1)			\
++				XO2(i+2,2)		\
++					XO2(i+3,3)	\
++		XO3(i,0)				\
++			XO3(i+1,1)			\
++				XO3(i+2,2)		\
++					XO3(i+3,3)	\
++		ST(i,0)					\
++			ST(i+1,1)			\
++				ST(i+2,2)		\
++					ST(i+3,3)	\
++
++
++		PF0(0)
++				PF0(2)
++
++	" .align 32			;\n"
++        " 1:                            ;\n"
++
++		BLOCK(0)
++		BLOCK(4)
++		BLOCK(8)
++		BLOCK(12)
++
++        "       addq %[inc], %[p1]           ;\n"
++        "       addq %[inc], %[p2]           ;\n"
++        "       addq %[inc], %[p3]           ;\n"
++        "       addq %[inc], %[p4]           ;\n"
++	"	decl %[cnt] ; jnz 1b"
++	: [cnt] "+c" (lines),
++	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
++	: [inc] "r" (256UL)
++        : "memory" );
++
++	XMMS_RESTORE;
++}
++
++static void
++xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++	  unsigned long *p3, unsigned long *p4, unsigned long *p5)
++{
++        unsigned int lines = bytes >> 8;
++	xmm_store_t xmm_save[4];
++	unsigned long cr0;
++
++	XMMS_SAVE;
++
++        __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++		PF1(i)					\
++				PF1(i+2)		\
++		LD(i,0)					\
++			LD(i+1,1)			\
++				LD(i+2,2)		\
++					LD(i+3,3)	\
++		PF2(i)					\
++				PF2(i+2)		\
++		XO1(i,0)				\
++			XO1(i+1,1)			\
++				XO1(i+2,2)		\
++					XO1(i+3,3)	\
++		PF3(i)					\
++				PF3(i+2)		\
++		XO2(i,0)				\
++			XO2(i+1,1)			\
++				XO2(i+2,2)		\
++					XO2(i+3,3)	\
++		PF4(i)					\
++				PF4(i+2)		\
++		PF0(i+4)				\
++				PF0(i+6)		\
++		XO3(i,0)				\
++			XO3(i+1,1)			\
++				XO3(i+2,2)		\
++					XO3(i+3,3)	\
++		XO4(i,0)				\
++			XO4(i+1,1)			\
++				XO4(i+2,2)		\
++					XO4(i+3,3)	\
++		ST(i,0)					\
++			ST(i+1,1)			\
++				ST(i+2,2)		\
++					ST(i+3,3)	\
++
++
++		PF0(0)
++				PF0(2)
++
++	" .align 32			;\n"
++        " 1:                            ;\n"
++
++		BLOCK(0)
++		BLOCK(4)
++		BLOCK(8)
++		BLOCK(12)
++
++        "       addq %[inc], %[p1]           ;\n"
++        "       addq %[inc], %[p2]           ;\n"
++        "       addq %[inc], %[p3]           ;\n"
++        "       addq %[inc], %[p4]           ;\n"
++        "       addq %[inc], %[p5]           ;\n"
++	"	decl %[cnt] ; jnz 1b"
++	: [cnt] "+c" (lines),
++  	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), 
++	  [p5] "+r" (p5)
++	: [inc] "r" (256UL)
++	: "memory");
++
++	XMMS_RESTORE;
++}
++
++static struct xor_block_template xor_block_sse = {
++        .name = "generic_sse",
++        .do_2 = xor_sse_2,
++        .do_3 = xor_sse_3,
++        .do_4 = xor_sse_4,
++        .do_5 = xor_sse_5,
++};
++
++#undef XOR_TRY_TEMPLATES
++#define XOR_TRY_TEMPLATES				\
++	do {						\
++		xor_speed(&xor_block_sse);	\
++	} while (0)
++
++/* We force the use of the SSE xor block because it can write around L2.
++   We may also be able to load into the L1 only depending on how the cpu
++   deals with a load to a line that is being prefetched.  */
++#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/balloon.h linux-2.6.12-xen/include/asm-xen/balloon.h
+--- pristine-linux-2.6.12/include/asm-xen/balloon.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/balloon.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,70 @@
++/******************************************************************************
++ * balloon.h
++ *
++ * Xen balloon driver - enables returning/claiming memory to/from Xen.
++ *
++ * Copyright (c) 2003, B Dragovic
++ * Copyright (c) 2003-2004, M Williamson, K Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_BALLOON_H__
++#define __ASM_BALLOON_H__
++
++/*
++ * Inform the balloon driver that it should allow some slop for device-driver
++ * memory activities.
++ */
++extern void
++balloon_update_driver_allowance(
++	long delta);
++
++/* Allocate an empty low-memory page range. */
++extern struct page *
++balloon_alloc_empty_page_range(
++	unsigned long nr_pages);
++
++/* Deallocate an empty page range, adding to the balloon. */
++extern void
++balloon_dealloc_empty_page_range(
++	struct page *page, unsigned long nr_pages);
++
++/*
++ * Prevent the balloon driver from changing the memory reservation during
++ * a driver critical region.
++ */
++extern spinlock_t balloon_lock;
++#define balloon_lock(__flags)   spin_lock_irqsave(&balloon_lock, __flags)
++#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
++
++#endif /* __ASM_BALLOON_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/driver_util.h linux-2.6.12-xen/include/asm-xen/driver_util.h
+--- pristine-linux-2.6.12/include/asm-xen/driver_util.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/driver_util.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,26 @@
++
++#ifndef __ASM_XEN_DRIVER_UTIL_H__
++#define __ASM_XEN_DRIVER_UTIL_H__
++
++#include <linux/config.h>
++#include <linux/vmalloc.h>
++
++/* Allocate/destroy a 'vmalloc' VM area. */
++extern struct vm_struct *alloc_vm_area(unsigned long size);
++extern void free_vm_area(struct vm_struct *area);
++
++/* Lock an area so that PTEs are accessible in the current address space. */
++extern void lock_vm_area(struct vm_struct *area);
++extern void unlock_vm_area(struct vm_struct *area);
++
++#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/evtchn.h linux-2.6.12-xen/include/asm-xen/evtchn.h
+--- pristine-linux-2.6.12/include/asm-xen/evtchn.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/evtchn.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,123 @@
++/******************************************************************************
++ * evtchn.h
++ * 
++ * Communication via Xen event channels.
++ * Also definitions for the device that demuxes notifications to userspace.
++ * 
++ * Copyright (c) 2004-2005, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_EVTCHN_H__
++#define __ASM_EVTCHN_H__
++
++#include <linux/config.h>
++#include <linux/interrupt.h>
++#include <asm/hypervisor.h>
++#include <asm/ptrace.h>
++#include <asm/synch_bitops.h>
++#include <asm-xen/xen-public/event_channel.h>
++#include <linux/smp.h>
++
++/*
++ * LOW-LEVEL DEFINITIONS
++ */
++
++/*
++ * Dynamically bind an event source to an IRQ-like callback handler.
++ * On some platforms this may not be implemented via the Linux IRQ subsystem.
++ * The IRQ argument passed to the callback handler is the same as returned
++ * from the bind call. It may not correspond to a Linux IRQ number.
++ * Returns IRQ or negative errno.
++ * UNBIND: Takes IRQ to unbind from; automatically closes the event channel.
++ */
++extern int bind_evtchn_to_irqhandler(
++	unsigned int evtchn,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id);
++extern int bind_virq_to_irqhandler(
++	unsigned int virq,
++	unsigned int cpu,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id);
++extern int bind_ipi_to_irqhandler(
++	unsigned int ipi,
++	unsigned int cpu,
++	irqreturn_t (*handler)(int, void *, struct pt_regs *),
++	unsigned long irqflags,
++	const char *devname,
++	void *dev_id);
++
++/*
++ * Common unbind function for all event sources. Takes IRQ to unbind from.
++ * Automatically closes the underlying event channel (even for bindings
++ * made with bind_evtchn_to_irqhandler()).
++ */
++extern void unbind_from_irqhandler(unsigned int irq, void *dev_id);
++
++extern void irq_resume(void);
++
++/* Entry point for notifications into Linux subsystems. */
++asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
++
++/* Entry point for notifications into the userland character device. */
++extern void evtchn_device_upcall(int port);
++
++extern void mask_evtchn(int port);
++extern void unmask_evtchn(int port);
++
++static inline void clear_evtchn(int port)
++{
++	shared_info_t *s = HYPERVISOR_shared_info;
++	synch_clear_bit(port, &s->evtchn_pending[0]);
++}
++
++static inline void notify_remote_via_evtchn(int port)
++{
++	evtchn_op_t op;
++	op.cmd         = EVTCHNOP_send,
++	op.u.send.port = port;
++	(void)HYPERVISOR_event_channel_op(&op);
++}
++
++/*
++ * Unlike notify_remote_via_evtchn(), this is safe to use across
++ * save/restore. Notifications on a broken connection are silently dropped.
++ */
++extern void notify_remote_via_irq(int irq);
++
++#endif /* __ASM_EVTCHN_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/features.h linux-2.6.12-xen/include/asm-xen/features.h
+--- pristine-linux-2.6.12/include/asm-xen/features.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/features.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,20 @@
++/******************************************************************************
++ * features.h
++ *
++ * Query the features reported by Xen.
++ *
++ * Copyright (c) 2006, Ian Campbell
++ */
++
++#ifndef __ASM_XEN_FEATURES_H__
++#define __ASM_XEN_FEATURES_H__
++
++#include <asm-xen/xen-public/version.h>
++
++extern void setup_xen_features(void);
++
++extern unsigned long xen_features[XENFEAT_NR_SUBMAPS];
++
++#define xen_feature(flag)	(test_bit(flag, xen_features))
++
++#endif /* __ASM_XEN_FEATURES_H__ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/foreign_page.h linux-2.6.12-xen/include/asm-xen/foreign_page.h
+--- pristine-linux-2.6.12/include/asm-xen/foreign_page.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/foreign_page.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,40 @@
++/******************************************************************************
++ * foreign_page.h
++ * 
++ * Provide a "foreign" page type, that is owned by a foreign allocator and 
++ * not the normal buddy allocator in page_alloc.c
++ * 
++ * Copyright (c) 2004, K A Fraser
++ */
++
++#ifndef __ASM_XEN_FOREIGN_PAGE_H__
++#define __ASM_XEN_FOREIGN_PAGE_H__
++
++#define PG_foreign		PG_arch_1
++
++#define PageForeign(page)	test_bit(PG_foreign, &(page)->flags)
++
++#define SetPageForeign(page, dtor) do {		\
++	set_bit(PG_foreign, &(page)->flags);	\
++	(page)->mapping = (void *)dtor;		\
++} while (0)
++
++#define ClearPageForeign(page) do {		\
++	clear_bit(PG_foreign, &(page)->flags);	\
++	(page)->mapping = NULL;			\
++} while (0)
++
++#define PageForeignDestructor(page)	\
++	( (void (*) (struct page *)) (page)->mapping )
++
++#endif /* __ASM_XEN_FOREIGN_PAGE_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/gnttab.h linux-2.6.12-xen/include/asm-xen/gnttab.h
+--- pristine-linux-2.6.12/include/asm-xen/gnttab.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/gnttab.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,98 @@
++/******************************************************************************
++ * gnttab.h
++ * 
++ * Two sets of functionality:
++ * 1. Granting foreign access to our memory reservation.
++ * 2. Accessing others' memory reservations via grant references.
++ * (i.e., mechanisms for both sender and recipient of grant references)
++ * 
++ * Copyright (c) 2004-2005, K A Fraser
++ * Copyright (c) 2005, Christopher Clark
++ */
++
++#ifndef __ASM_GNTTAB_H__
++#define __ASM_GNTTAB_H__
++
++#include <linux/config.h>
++#include <asm/hypervisor.h>
++#include <asm-xen/xen-public/grant_table.h>
++
++/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
++#ifdef __ia64__
++#define NR_GRANT_FRAMES 1
++#else
++#define NR_GRANT_FRAMES 4
++#endif
++
++struct gnttab_free_callback {
++	struct gnttab_free_callback *next;
++	void (*fn)(void *);
++	void *arg;
++	u16 count;
++};
++
++int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
++				int readonly);
++
++/*
++ * End access through the given grant reference, iff the grant entry is no
++ * longer in use.  Return 1 if the grant entry was freed, 0 if it is still in
++ * use.
++ */
++int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
++
++/*
++ * Eventually end access through the given grant reference, and once that
++ * access has been ended, free the given page too.  Access will be ended
++ * immediately iff the grant entry is not in use, otherwise it will happen
++ * some time later.  page may be 0, in which case no freeing will occur.
++ */
++void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
++			       unsigned long page);
++
++int gnttab_grant_foreign_transfer(domid_t domid);
++
++unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
++unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
++
++int gnttab_query_foreign_access(grant_ref_t ref);
++
++/*
++ * operations on reserved batches of grant references
++ */
++int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
++
++void gnttab_free_grant_reference(grant_ref_t ref);
++
++void gnttab_free_grant_references(grant_ref_t head);
++
++int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
++
++void gnttab_release_grant_reference(grant_ref_t *private_head,
++				    grant_ref_t release);
++
++void gnttab_request_free_callback(struct gnttab_free_callback *callback,
++				  void (*fn)(void *), void *arg, u16 count);
++
++void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
++				     unsigned long frame, int readonly);
++
++void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid);
++
++#ifdef __ia64__
++#define gnttab_map_vaddr(map) __va(map.dev_bus_addr)
++#else
++#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
++#endif
++
++#endif /* __ASM_GNTTAB_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/linux-public/evtchn.h linux-2.6.12-xen/include/asm-xen/linux-public/evtchn.h
+--- pristine-linux-2.6.12/include/asm-xen/linux-public/evtchn.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/linux-public/evtchn.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,98 @@
++/******************************************************************************
++ * evtchn.h
++ * 
++ * Interface to /dev/xen/evtchn.
++ * 
++ * Copyright (c) 2003-2005, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_EVTCHN_H__
++#define __LINUX_PUBLIC_EVTCHN_H__
++
++/* /dev/xen/evtchn resides at device number major=10, minor=201 */
++#define EVTCHN_MINOR 201
++
++/*
++ * Bind a fresh port to VIRQ @virq.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_VIRQ				\
++	_IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
++struct ioctl_evtchn_bind_virq {
++	unsigned int virq;
++};
++
++/*
++ * Bind a fresh port to remote <@remote_domain, @remote_port>.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_INTERDOMAIN			\
++	_IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
++struct ioctl_evtchn_bind_interdomain {
++	unsigned int remote_domain, remote_port;
++};
++
++/*
++ * Allocate a fresh port for binding to @remote_domain.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_UNBOUND_PORT			\
++	_IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
++struct ioctl_evtchn_bind_unbound_port {
++	unsigned int remote_domain;
++};
++
++/*
++ * Unbind previously allocated @port.
++ */
++#define IOCTL_EVTCHN_UNBIND				\
++	_IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
++struct ioctl_evtchn_unbind {
++	unsigned int port;
++};
++
++/*
++ * Unbind previously allocated @port.
++ */
++#define IOCTL_EVTCHN_NOTIFY				\
++	_IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
++struct ioctl_evtchn_notify {
++	unsigned int port;
++};
++
++/* Clear and reinitialise the event buffer. Clear error condition. */
++#define IOCTL_EVTCHN_RESET				\
++	_IOC(_IOC_NONE, 'E', 5, 0)
++
++#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/linux-public/privcmd.h linux-2.6.12-xen/include/asm-xen/linux-public/privcmd.h
+--- pristine-linux-2.6.12/include/asm-xen/linux-public/privcmd.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/linux-public/privcmd.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,91 @@
++/******************************************************************************
++ * privcmd.h
++ * 
++ * Interface to /proc/xen/privcmd.
++ * 
++ * Copyright (c) 2003-2005, K A Fraser
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_PRIVCMD_H__
++#define __LINUX_PUBLIC_PRIVCMD_H__
++
++#ifndef __user
++#define __user
++#endif
++
++typedef struct privcmd_hypercall
++{
++	unsigned long op;
++	unsigned long arg[5];
++} privcmd_hypercall_t;
++
++typedef struct privcmd_mmap_entry {
++	unsigned long va;
++	unsigned long mfn;
++	unsigned long npages;
++} privcmd_mmap_entry_t; 
++
++typedef struct privcmd_mmap {
++	int num;
++	domid_t dom; /* target domain */
++	privcmd_mmap_entry_t __user *entry;
++} privcmd_mmap_t; 
++
++typedef struct privcmd_mmapbatch {
++	int num;     /* number of pages to populate */
++	domid_t dom; /* target domain */
++	unsigned long addr;  /* virtual address */
++	unsigned long __user *arr; /* array of mfns - top nibble set on err */
++} privcmd_mmapbatch_t; 
++
++typedef struct privcmd_blkmsg
++{
++	unsigned long op;
++	void         *buf;
++	int           buf_size;
++} privcmd_blkmsg_t;
++
++/*
++ * @cmd: IOCTL_PRIVCMD_HYPERCALL
++ * @arg: &privcmd_hypercall_t
++ * Return: Value returned from execution of the specified hypercall.
++ */
++#define IOCTL_PRIVCMD_HYPERCALL					\
++	_IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
++#define IOCTL_PRIVCMD_MMAP					\
++	_IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
++#define IOCTL_PRIVCMD_MMAPBATCH					\
++	_IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
++
++#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/net_driver_util.h linux-2.6.12-xen/include/asm-xen/net_driver_util.h
+--- pristine-linux-2.6.12/include/asm-xen/net_driver_util.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/net_driver_util.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,56 @@
++/*****************************************************************************
++ *
++ * Utility functions for Xen network devices.
++ *
++ * Copyright (c) 2005 XenSource Ltd.
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following
++ * license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject
++ * to the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _ASM_XEN_NET_DRIVER_UTIL_H
++#define _ASM_XEN_NET_DRIVER_UTIL_H
++
++
++#include <asm-xen/xenbus.h>
++
++
++/**
++ * Read the 'mac' node at the given device's node in the store, and parse that
++ * as colon-separated octets, placing result the given mac array.  mac must be
++ * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
++ * Return 0 on success, or -errno on error.
++ */
++int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]);
++
++
++#endif /* _ASM_XEN_NET_DRIVER_UTIL_H */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/tpmfe.h linux-2.6.12-xen/include/asm-xen/tpmfe.h
+--- pristine-linux-2.6.12/include/asm-xen/tpmfe.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/tpmfe.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,33 @@
++#ifndef TPM_FE_H
++#define TPM_FE_H
++
++struct tpmfe_device {
++	/*
++	 * Let upper layer receive data from front-end
++	 */
++	int (*receive)(const u8 *buffer, size_t count, const void *ptr);
++	/*
++	 * Indicate the status of the front-end to the upper
++	 * layer.
++	 */
++	void (*status)(unsigned int flags);
++
++	/*
++	 * This field indicates the maximum size the driver can
++	 * transfer in one chunk. It is filled out by the front-end
++	 * driver and should be propagated to the generic tpm driver
++	 * for allocation of buffers.
++	 */
++	unsigned int max_tx_size;
++};
++
++enum {
++	TPMFE_STATUS_DISCONNECTED = 0x0,
++	TPMFE_STATUS_CONNECTED = 0x1
++};
++
++int tpm_fe_send(const u8 * buf, size_t count, void *ptr);
++int tpm_fe_register_receiver(struct tpmfe_device *);
++void tpm_fe_unregister_receiver(void);
++
++#endif
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xenbus.h linux-2.6.12-xen/include/asm-xen/xenbus.h
+--- pristine-linux-2.6.12/include/asm-xen/xenbus.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xenbus.h	2006-03-05 23:36:32.000000000 +0100
+@@ -0,0 +1,254 @@
++/******************************************************************************
++ * xenbus.h
++ *
++ * Talks to Xen Store to figure out what devices we have.
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 XenSource Ltd.
++ * 
++ * This file may be distributed separately from the Linux kernel, or
++ * incorporated into other software packages, subject to the following license:
++ * 
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ * 
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ * 
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef _ASM_XEN_XENBUS_H
++#define _ASM_XEN_XENBUS_H
++
++#include <linux/device.h>
++#include <linux/notifier.h>
++#include <asm/semaphore.h>
++#include <asm-xen/xen-public/io/xenbus.h>
++#include <asm-xen/xen-public/io/xs_wire.h>
++
++#define XBT_NULL 0
++
++/* Register callback to watch this node. */
++struct xenbus_watch
++{
++	struct list_head list;
++
++	/* Path being watched. */
++	const char *node;
++
++	/* Callback (executed in a process context with no locks held). */
++	void (*callback)(struct xenbus_watch *,
++			 const char **vec, unsigned int len);
++};
++
++
++/* A xenbus device. */
++struct xenbus_device {
++	const char *devicetype;
++	const char *nodename;
++	const char *otherend;
++	int otherend_id;
++	struct xenbus_watch otherend_watch;
++	struct device dev;
++	int has_error;
++	void *data;
++};
++
++static inline struct xenbus_device *to_xenbus_device(struct device *dev)
++{
++	return container_of(dev, struct xenbus_device, dev);
++}
++
++struct xenbus_device_id
++{
++	/* .../device/<device_type>/<identifier> */
++	char devicetype[32]; 	/* General class of device. */
++};
++
++/* A xenbus driver. */
++struct xenbus_driver {
++	char *name;
++	struct module *owner;
++	const struct xenbus_device_id *ids;
++	int (*probe)(struct xenbus_device *dev,
++		     const struct xenbus_device_id *id);
++	void (*otherend_changed)(struct xenbus_device *dev,
++				 XenbusState backend_state);
++	int (*remove)(struct xenbus_device *dev);
++	int (*suspend)(struct xenbus_device *dev);
++	int (*resume)(struct xenbus_device *dev);
++	int (*hotplug)(struct xenbus_device *, char **, int, char *, int);
++	struct device_driver driver;
++	int (*read_otherend_details)(struct xenbus_device *dev);
++};
++
++static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
++{
++	return container_of(drv, struct xenbus_driver, driver);
++}
++
++int xenbus_register_frontend(struct xenbus_driver *drv);
++int xenbus_register_backend(struct xenbus_driver *drv);
++void xenbus_unregister_driver(struct xenbus_driver *drv);
++
++typedef u32 xenbus_transaction_t;
++
++char **xenbus_directory(xenbus_transaction_t t,
++			const char *dir, const char *node, unsigned int *num);
++void *xenbus_read(xenbus_transaction_t t,
++		  const char *dir, const char *node, unsigned int *len);
++int xenbus_write(xenbus_transaction_t t,
++		 const char *dir, const char *node, const char *string);
++int xenbus_mkdir(xenbus_transaction_t t,
++		 const char *dir, const char *node);
++int xenbus_exists(xenbus_transaction_t t,
++		  const char *dir, const char *node);
++int xenbus_rm(xenbus_transaction_t t, const char *dir, const char *node);
++int xenbus_transaction_start(xenbus_transaction_t *t);
++int xenbus_transaction_end(xenbus_transaction_t t, int abort);
++
++/* Single read and scanf: returns -errno or num scanned if > 0. */
++int xenbus_scanf(xenbus_transaction_t t,
++		 const char *dir, const char *node, const char *fmt, ...)
++	__attribute__((format(scanf, 4, 5)));
++
++/* Single printf and write: returns -errno or 0. */
++int xenbus_printf(xenbus_transaction_t t,
++		  const char *dir, const char *node, const char *fmt, ...)
++	__attribute__((format(printf, 4, 5)));
++
++/* Generic read function: NULL-terminated triples of name,
++ * sprintf-style type string, and pointer. Returns 0 or errno.*/
++int xenbus_gather(xenbus_transaction_t t, const char *dir, ...);
++
++/* notifer routines for when the xenstore comes up */
++int register_xenstore_notifier(struct notifier_block *nb);
++void unregister_xenstore_notifier(struct notifier_block *nb);
++
++int register_xenbus_watch(struct xenbus_watch *watch);
++void unregister_xenbus_watch(struct xenbus_watch *watch);
++void xs_suspend(void);
++void xs_resume(void);
++
++/* Used by xenbus_dev to borrow kernel's store connection. */
++void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
++
++/* Called from xen core code. */
++void xenbus_suspend(void);
++void xenbus_resume(void);
++
++#define XENBUS_IS_ERR_READ(str) ({			\
++	if (!IS_ERR(str) && strlen(str) == 0) {		\
++		kfree(str);				\
++		str = ERR_PTR(-ERANGE);			\
++	}						\
++	IS_ERR(str);					\
++})
++
++#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
++
++
++/**
++ * Register a watch on the given path, using the given xenbus_watch structure
++ * for storage, and the given callback function as the callback.  Return 0 on
++ * success, or -errno on error.  On success, the given path will be saved as
++ * watch->node, and remains the caller's to free.  On error, watch->node will
++ * be NULL, the device will switch to XenbusStateClosing, and the error will
++ * be saved in the store.
++ */
++int xenbus_watch_path(struct xenbus_device *dev, const char *path,
++		      struct xenbus_watch *watch, 
++		      void (*callback)(struct xenbus_watch *,
++				       const char **, unsigned int));
++
++
++/**
++ * Register a watch on the given path/path2, using the given xenbus_watch
++ * structure for storage, and the given callback function as the callback.
++ * Return 0 on success, or -errno on error.  On success, the watched path
++ * (path/path2) will be saved as watch->node, and becomes the caller's to
++ * kfree().  On error, watch->node will be NULL, so the caller has nothing to
++ * free, the device will switch to XenbusStateClosing, and the error will be
++ * saved in the store.
++ */
++int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
++		       const char *path2, struct xenbus_watch *watch, 
++		       void (*callback)(struct xenbus_watch *,
++					const char **, unsigned int));
++
++
++/**
++ * Advertise in the store a change of the given driver to the given new_state.
++ * Perform the change inside the given transaction xbt.  xbt may be NULL, in
++ * which case this is performed inside its own transaction.  Return 0 on
++ * success, or -errno on error.  On error, the device will switch to
++ * XenbusStateClosing, and the error will be saved in the store.
++ */
++int xenbus_switch_state(struct xenbus_device *dev,
++			xenbus_transaction_t xbt,
++			XenbusState new_state);
++
++
++/**
++ * Grant access to the given ring_mfn to the peer of the given device.  Return
++ * 0 on success, or -errno on error.  On error, the device will switch to
++ * XenbusStateClosing, and the error will be saved in the store.
++ */
++int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
++
++
++/**
++ * Allocate an event channel for the given xenbus_device, assigning the newly
++ * created local port to *port.  Return 0 on success, or -errno on error.  On
++ * error, the device will switch to XenbusStateClosing, and the error will be
++ * saved in the store.
++ */
++int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
++
++
++/**
++ * Return the state of the driver rooted at the given store path, or
++ * XenbusStateClosed if no state can be read.
++ */
++XenbusState xenbus_read_driver_state(const char *path);
++
++
++/***
++ * Report the given negative errno into the store, along with the given
++ * formatted message.
++ */
++void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
++		      ...);
++
++
++/***
++ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
++ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
++ * closedown of this driver and its peer.
++ */
++void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
++		      ...);
++
++
++#endif /* _ASM_XEN_XENBUS_H */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xencons.h linux-2.6.12-xen/include/asm-xen/xencons.h
+--- pristine-linux-2.6.12/include/asm-xen/xencons.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xencons.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,14 @@
++#ifndef __ASM_XENCONS_H__
++#define __ASM_XENCONS_H__
++
++void xencons_force_flush(void);
++void xencons_resume(void);
++
++/* Interrupt work hooks. Receive data, or kick data out. */
++void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
++void xencons_tx(void);
++
++int xencons_ring_init(void);
++int xencons_ring_send(const char *data, unsigned len);
++
++#endif /* __ASM_XENCONS_H__ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen_proc.h linux-2.6.12-xen/include/asm-xen/xen_proc.h
+--- pristine-linux-2.6.12/include/asm-xen/xen_proc.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen_proc.h	2006-03-05 23:36:31.000000000 +0100
+@@ -0,0 +1,23 @@
++
++#ifndef __ASM_XEN_PROC_H__
++#define __ASM_XEN_PROC_H__
++
++#include <linux/config.h>
++#include <linux/proc_fs.h>
++
++extern struct proc_dir_entry *create_xen_proc_entry(
++	const char *name, mode_t mode);
++extern void remove_xen_proc_entry(
++	const char *name);
++
++#endif /* __ASM_XEN_PROC_H__ */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/acm.h linux-2.6.12-xen/include/asm-xen/xen-public/acm.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/acm.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/acm.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,181 @@
++/*
++ * acm.h: Xen access control module interface defintions
++ *
++ * Reiner Sailer <sailer at watson.ibm.com>
++ * Copyright (c) 2005, International Business Machines Corporation.
++ */
++
++#ifndef _XEN_PUBLIC_ACM_H
++#define _XEN_PUBLIC_ACM_H
++
++#include "xen.h"
++#include "sched_ctl.h"
++
++/* if ACM_DEBUG defined, all hooks should
++ * print a short trace message (comment it out
++ * when not in testing mode )
++ */
++/* #define ACM_DEBUG */
++
++#ifdef ACM_DEBUG
++#  define printkd(fmt, args...) printk(fmt,## args)
++#else
++#  define printkd(fmt, args...)
++#endif
++
++/* default ssid reference value if not supplied */
++#define ACM_DEFAULT_SSID  0x0
++#define ACM_DEFAULT_LOCAL_SSID  0x0
++
++/* Internal ACM ERROR types */
++#define ACM_OK     0
++#define ACM_UNDEF   -1
++#define ACM_INIT_SSID_ERROR  -2
++#define ACM_INIT_SOID_ERROR  -3
++#define ACM_ERROR          -4
++
++/* External ACCESS DECISIONS */
++#define ACM_ACCESS_PERMITTED        0
++#define ACM_ACCESS_DENIED           -111
++#define ACM_NULL_POINTER_ERROR      -200
++
++/* primary policy in lower 4 bits */
++#define ACM_NULL_POLICY 0
++#define ACM_CHINESE_WALL_POLICY 1
++#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
++#define ACM_POLICY_UNDEFINED 15
++
++/* combinations have secondary policy component in higher 4bit */
++#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
++    ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
++
++/* policy: */
++#define ACM_POLICY_NAME(X) \
++ ((X) == (ACM_NULL_POLICY)) ? "NULL policy" :                        \
++    ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL policy" :        \
++    ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT policy" : \
++    ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT policy" : \
++     "UNDEFINED policy"
++
++/* the following policy versions must be increased
++ * whenever the interpretation of the related
++ * policy's data structure changes
++ */
++#define ACM_POLICY_VERSION 1
++#define ACM_CHWALL_VERSION 1
++#define ACM_STE_VERSION  1
++
++/* defines a ssid reference used by xen */
++typedef uint32_t ssidref_t;
++
++/* hooks that are known to domains */
++enum acm_hook_type {NONE=0, SHARING};
++
++/* -------security policy relevant type definitions-------- */
++
++/* type identifier; compares to "equal" or "not equal" */
++typedef uint16_t domaintype_t;
++
++/* CHINESE WALL POLICY DATA STRUCTURES
++ *
++ * current accumulated conflict type set:
++ * When a domain is started and has a type that is in
++ * a conflict set, the conflicting types are incremented in
++ * the aggregate set. When a domain is destroyed, the 
++ * conflicting types to its type are decremented.
++ * If a domain has multiple types, this procedure works over
++ * all those types.
++ *
++ * conflict_aggregate_set[i] holds the number of
++ *   running domains that have a conflict with type i.
++ *
++ * running_types[i] holds the number of running domains
++ *        that include type i in their ssidref-referenced type set
++ *
++ * conflict_sets[i][j] is "0" if type j has no conflict
++ *    with type i and is "1" otherwise.
++ */
++/* high-16 = version, low-16 = check magic */
++#define ACM_MAGIC  0x0001debc
++
++/* each offset in bytes from start of the struct they
++ * are part of */
++
++/* each buffer consists of all policy information for
++ * the respective policy given in the policy code
++ *
++ * acm_policy_buffer, acm_chwall_policy_buffer,
++ * and acm_ste_policy_buffer need to stay 32-bit aligned
++ * because we create binary policies also with external
++ * tools that assume packed representations (e.g. the java tool)
++ */
++struct acm_policy_buffer {
++    uint32_t policy_version; /* ACM_POLICY_VERSION */
++    uint32_t magic;
++    uint32_t len;
++    uint32_t primary_policy_code;
++    uint32_t primary_buffer_offset;
++    uint32_t secondary_policy_code;
++    uint32_t secondary_buffer_offset;
++};
++
++struct acm_chwall_policy_buffer {
++    uint32_t policy_version; /* ACM_CHWALL_VERSION */
++    uint32_t policy_code;
++    uint32_t chwall_max_types;
++    uint32_t chwall_max_ssidrefs;
++    uint32_t chwall_max_conflictsets;
++    uint32_t chwall_ssid_offset;
++    uint32_t chwall_conflict_sets_offset;
++    uint32_t chwall_running_types_offset;
++    uint32_t chwall_conflict_aggregate_offset;
++};
++
++struct acm_ste_policy_buffer {
++    uint32_t policy_version; /* ACM_STE_VERSION */
++    uint32_t policy_code;
++    uint32_t ste_max_types;
++    uint32_t ste_max_ssidrefs;
++    uint32_t ste_ssid_offset;
++};
++
++struct acm_stats_buffer {
++    uint32_t magic;
++    uint32_t len;
++    uint32_t primary_policy_code;
++    uint32_t primary_stats_offset;
++    uint32_t secondary_policy_code;
++    uint32_t secondary_stats_offset;
++};
++
++struct acm_ste_stats_buffer {
++    uint32_t ec_eval_count;
++    uint32_t gt_eval_count;
++    uint32_t ec_denied_count;
++    uint32_t gt_denied_count; 
++    uint32_t ec_cachehit_count;
++    uint32_t gt_cachehit_count;
++};
++
++struct acm_ssid_buffer {
++    uint32_t len;
++    ssidref_t ssidref;
++    uint32_t primary_policy_code;
++    uint32_t primary_max_types;
++    uint32_t primary_types_offset;
++    uint32_t secondary_policy_code;
++    uint32_t secondary_max_types;
++    uint32_t secondary_types_offset;
++};
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/acm_ops.h linux-2.6.12-xen/include/asm-xen/xen-public/acm_ops.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/acm_ops.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/acm_ops.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,96 @@
++/*
++ * acm_ops.h: Xen access control module hypervisor commands
++ *
++ * Reiner Sailer <sailer at watson.ibm.com>
++ * Copyright (c) 2005, International Business Machines Corporation.
++ */
++
++#ifndef __XEN_PUBLIC_ACM_OPS_H__
++#define __XEN_PUBLIC_ACM_OPS_H__
++
++#include "xen.h"
++#include "sched_ctl.h"
++
++/*
++ * Make sure you increment the interface version whenever you modify this file!
++ * This makes sure that old versions of acm tools will stop working in a
++ * well-defined way (rather than crashing the machine, for instance).
++ */
++#define ACM_INTERFACE_VERSION   0xAAAA0005
++
++/************************************************************************/
++
++#define ACM_SETPOLICY         4
++struct acm_setpolicy {
++    /* OUT variables */
++    void *pushcache;
++    uint32_t pushcache_size;
++};
++
++
++#define ACM_GETPOLICY         5
++struct acm_getpolicy {
++    /* OUT variables */
++    void *pullcache;
++    uint32_t pullcache_size;
++};
++
++
++#define ACM_DUMPSTATS         6
++struct acm_dumpstats {
++    void *pullcache;
++    uint32_t pullcache_size;
++};
++
++
++#define ACM_GETSSID           7
++enum get_type {UNSET=0, SSIDREF, DOMAINID};
++struct acm_getssid {
++    enum get_type get_ssid_by;
++    union {
++        domaintype_t domainid;
++        ssidref_t    ssidref;
++    } id;
++    void *ssidbuf;
++    uint32_t ssidbuf_size;
++};
++
++#define ACM_GETDECISION        8
++struct acm_getdecision {
++    enum get_type get_decision_by1; /* in */
++    enum get_type get_decision_by2;
++    union {
++        domaintype_t domainid;
++        ssidref_t    ssidref;
++    } id1;
++    union {
++        domaintype_t domainid;
++        ssidref_t    ssidref;
++    } id2;
++    enum acm_hook_type hook;
++    int acm_decision;           /* out */
++};
++
++struct acm_op {
++    uint32_t cmd;
++    uint32_t interface_version;      /* ACM_INTERFACE_VERSION */
++    union {
++        struct acm_setpolicy setpolicy;
++        struct acm_getpolicy getpolicy;
++        struct acm_dumpstats dumpstats;
++        struct acm_getssid getssid;
++        struct acm_getdecision getdecision;
++    } u;
++};
++
++#endif                          /* __XEN_PUBLIC_ACM_OPS_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/arch-ia64.h linux-2.6.12-xen/include/asm-xen/xen-public/arch-ia64.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/arch-ia64.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/arch-ia64.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,314 @@
++/******************************************************************************
++ * arch-ia64/hypervisor-if.h
++ * 
++ * Guest OS interface to IA64 Xen.
++ */
++
++#ifndef __HYPERVISOR_IF_IA64_H__
++#define __HYPERVISOR_IF_IA64_H__
++
++/* Maximum number of virtual CPUs in multi-processor guests. */
++/* WARNING: before changing this, check that shared_info fits on a page */
++#define MAX_VIRT_CPUS 4
++
++#ifndef __ASSEMBLY__
++
++#define MAX_NR_SECTION  32  /* at most 32 memory holes */
++typedef struct {
++    unsigned long start;  /* start of memory hole */
++    unsigned long end;    /* end of memory hole */
++} mm_section_t;
++
++typedef struct {
++    unsigned long mfn : 56;
++    unsigned long type: 8;
++} pmt_entry_t;
++
++#define GPFN_MEM          (0UL << 56) /* Guest pfn is normal mem */
++#define GPFN_FRAME_BUFFER (1UL << 56) /* VGA framebuffer */
++#define GPFN_LOW_MMIO     (2UL << 56) /* Low MMIO range */
++#define GPFN_PIB          (3UL << 56) /* PIB base */
++#define GPFN_IOSAPIC      (4UL << 56) /* IOSAPIC base */
++#define GPFN_LEGACY_IO    (5UL << 56) /* Legacy I/O base */
++#define GPFN_GFW          (6UL << 56) /* Guest Firmware */
++#define GPFN_HIGH_MMIO    (7UL << 56) /* High MMIO range */
++
++#define GPFN_IO_MASK     (7UL << 56)  /* Guest pfn is I/O type */
++#define GPFN_INV_MASK    (31UL << 59) /* Guest pfn is invalid */
++
++#define INVALID_MFN       (~0UL)
++
++#define MEM_G   (1UL << 30)	
++#define MEM_M   (1UL << 20)	
++
++#define MMIO_START       (3 * MEM_G)
++#define MMIO_SIZE        (512 * MEM_M)
++
++#define VGA_IO_START     0xA0000UL
++#define VGA_IO_SIZE      0x20000
++
++#define LEGACY_IO_START  (MMIO_START + MMIO_SIZE)
++#define LEGACY_IO_SIZE   (64*MEM_M)  
++
++#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE)
++#define IO_PAGE_SIZE  PAGE_SIZE
++
++#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
++#define STORE_PAGE_SIZE	 PAGE_SIZE
++
++#define IO_SAPIC_START   0xfec00000UL
++#define IO_SAPIC_SIZE    0x100000
++
++#define PIB_START 0xfee00000UL
++#define PIB_SIZE 0x100000 
++
++#define GFW_START        (4*MEM_G -16*MEM_M)
++#define GFW_SIZE         (16*MEM_M)
++
++/*
++ * NB. This may become a 64-bit count with no shift. If this happens then the 
++ * structure size will still be 8 bytes, so no other alignments will change.
++ */
++typedef struct {
++    unsigned int  tsc_bits;      /* 0: 32 bits read from the CPU's TSC. */
++    unsigned int  tsc_bitshift;  /* 4: 'tsc_bits' uses N:N+31 of TSC.   */
++} tsc_timestamp_t; /* 8 bytes */
++
++struct pt_fpreg {
++    union {
++        unsigned long bits[2];
++        long double __dummy;    /* force 16-byte alignment */
++    } u;
++};
++
++typedef struct cpu_user_regs{
++    /* The following registers are saved by SAVE_MIN: */
++    unsigned long b6;  /* scratch */
++    unsigned long b7;  /* scratch */
++
++    unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
++    unsigned long ar_ssd; /* reserved for future use (scratch) */
++
++    unsigned long r8;  /* scratch (return value register 0) */
++    unsigned long r9;  /* scratch (return value register 1) */
++    unsigned long r10; /* scratch (return value register 2) */
++    unsigned long r11; /* scratch (return value register 3) */
++
++    unsigned long cr_ipsr; /* interrupted task's psr */
++    unsigned long cr_iip;  /* interrupted task's instruction pointer */
++    unsigned long cr_ifs;  /* interrupted task's function state */
++
++    unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
++    unsigned long ar_pfs;  /* prev function state  */
++    unsigned long ar_rsc;  /* RSE configuration */
++    /* The following two are valid only if cr_ipsr.cpl > 0: */
++    unsigned long ar_rnat;  /* RSE NaT */
++    unsigned long ar_bspstore; /* RSE bspstore */
++
++    unsigned long pr;  /* 64 predicate registers (1 bit each) */
++    unsigned long b0;  /* return pointer (bp) */
++    unsigned long loadrs;  /* size of dirty partition << 16 */
++
++    unsigned long r1;  /* the gp pointer */
++    unsigned long r12; /* interrupted task's memory stack pointer */
++    unsigned long r13; /* thread pointer */
++
++    unsigned long ar_fpsr;  /* floating point status (preserved) */
++    unsigned long r15;  /* scratch */
++
++ /* The remaining registers are NOT saved for system calls.  */
++
++    unsigned long r14;  /* scratch */
++    unsigned long r2;  /* scratch */
++    unsigned long r3;  /* scratch */
++    unsigned long r16;  /* scratch */
++    unsigned long r17;  /* scratch */
++    unsigned long r18;  /* scratch */
++    unsigned long r19;  /* scratch */
++    unsigned long r20;  /* scratch */
++    unsigned long r21;  /* scratch */
++    unsigned long r22;  /* scratch */
++    unsigned long r23;  /* scratch */
++    unsigned long r24;  /* scratch */
++    unsigned long r25;  /* scratch */
++    unsigned long r26;  /* scratch */
++    unsigned long r27;  /* scratch */
++    unsigned long r28;  /* scratch */
++    unsigned long r29;  /* scratch */
++    unsigned long r30;  /* scratch */
++    unsigned long r31;  /* scratch */
++    unsigned long ar_ccv;  /* compare/exchange value (scratch) */
++
++    /*
++     * Floating point registers that the kernel considers scratch:
++     */
++    struct pt_fpreg f6;  /* scratch */
++    struct pt_fpreg f7;  /* scratch */
++    struct pt_fpreg f8;  /* scratch */
++    struct pt_fpreg f9;  /* scratch */
++    struct pt_fpreg f10;  /* scratch */
++    struct pt_fpreg f11;  /* scratch */
++    unsigned long r4;  /* preserved */
++    unsigned long r5;  /* preserved */
++    unsigned long r6;  /* preserved */
++    unsigned long r7;  /* preserved */
++    unsigned long eml_unat;    /* used for emulating instruction */
++    unsigned long rfi_pfs;     /* used for elulating rfi */
++
++}cpu_user_regs_t;
++
++typedef union {
++    unsigned long value;
++    struct {
++        int a_int:1;
++        int a_from_int_cr:1;
++        int a_to_int_cr:1;
++        int a_from_psr:1;
++        int a_from_cpuid:1;
++        int a_cover:1;
++        int a_bsw:1;
++        long reserved:57;
++    };
++} vac_t;
++
++typedef union {
++    unsigned long value;
++    struct {
++        int d_vmsw:1;
++        int d_extint:1;
++        int d_ibr_dbr:1;
++        int d_pmc:1;
++        int d_to_pmd:1;
++        int d_itm:1;
++        long reserved:58;
++    };
++} vdc_t;
++
++typedef struct {
++    vac_t   vac;
++    vdc_t   vdc;
++    unsigned long  virt_env_vaddr;
++    unsigned long  reserved1[29];
++    unsigned long  vhpi;
++    unsigned long  reserved2[95];
++    union {
++        unsigned long  vgr[16];
++        unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
++    };
++    union {
++        unsigned long  vbgr[16];
++        unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
++    };
++    unsigned long  vnat;
++    unsigned long  vbnat;
++    unsigned long  vcpuid[5];
++    unsigned long  reserved3[11];
++    unsigned long  vpsr;
++    unsigned long  vpr;
++    unsigned long  reserved4[76];
++    union {
++        unsigned long  vcr[128];
++        struct {
++            unsigned long dcr;  // CR0
++            unsigned long itm;
++            unsigned long iva;
++            unsigned long rsv1[5];
++            unsigned long pta;  // CR8
++            unsigned long rsv2[7];
++            unsigned long ipsr;  // CR16
++            unsigned long isr;
++            unsigned long rsv3;
++            unsigned long iip;
++            unsigned long ifa;
++            unsigned long itir;
++            unsigned long iipa;
++            unsigned long ifs;
++            unsigned long iim;  // CR24
++            unsigned long iha;
++            unsigned long rsv4[38];
++            unsigned long lid;  // CR64
++            unsigned long ivr;
++            unsigned long tpr;
++            unsigned long eoi;
++            unsigned long irr[4];
++            unsigned long itv;  // CR72
++            unsigned long pmv;
++            unsigned long cmcv;
++            unsigned long rsv5[5];
++            unsigned long lrr0;  // CR80
++            unsigned long lrr1;
++            unsigned long rsv6[46];
++        };
++    };
++    union {
++        unsigned long  reserved5[128];
++        struct {
++            unsigned long precover_ifs;
++            unsigned long unat;  // not sure if this is needed until NaT arch is done
++            int interrupt_collection_enabled; // virtual psr.ic
++            int interrupt_delivery_enabled; // virtual psr.i
++            int pending_interruption;
++            int incomplete_regframe; // see SDM vol2 6.8
++            unsigned long delivery_mask[4];
++            int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
++            int banknum; // 0 or 1, which virtual register bank is active
++            unsigned long rrs[8]; // region registers
++            unsigned long krs[8]; // kernel registers
++            unsigned long pkrs[8]; // protection key registers
++            unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
++            // FIXME: tmp[8] temp'ly being used for virtual psr.pp
++        };
++    };
++    unsigned long  reserved6[3456];
++    unsigned long  vmm_avail[128];
++    unsigned long  reserved7[4096];
++} mapped_regs_t;
++
++typedef struct {
++    mapped_regs_t *privregs;
++    int evtchn_vector;
++} arch_vcpu_info_t;
++
++typedef mapped_regs_t vpd_t;
++
++typedef struct {
++    unsigned int flags;
++    unsigned long start_info_pfn;
++} arch_shared_info_t;
++
++typedef struct {
++    unsigned long start;
++    unsigned long size; 
++} arch_initrd_info_t;
++
++#define IA64_COMMAND_LINE_SIZE 512
++typedef struct vcpu_guest_context {
++#define VGCF_FPU_VALID (1<<0)
++#define VGCF_VMX_GUEST (1<<1)
++#define VGCF_IN_KERNEL (1<<2)
++    unsigned long flags;       /* VGCF_* flags */
++    unsigned long pt_base;     /* PMT table base */
++    unsigned long share_io_pg; /* Shared page for I/O emulation */
++    unsigned long sys_pgnr;    /* System pages out of domain memory */
++    unsigned long vm_assist;   /* VMASST_TYPE_* bitmap, now none on IPF */
++
++    cpu_user_regs_t regs;
++    arch_vcpu_info_t vcpu;
++    arch_shared_info_t shared;
++    arch_initrd_info_t initrd;
++    char cmdline[IA64_COMMAND_LINE_SIZE];
++} vcpu_guest_context_t;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* __HYPERVISOR_IF_IA64_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/arch-x86_32.h linux-2.6.12-xen/include/asm-xen/xen-public/arch-x86_32.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/arch-x86_32.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/arch-x86_32.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,158 @@
++/******************************************************************************
++ * arch-x86_32.h
++ * 
++ * Guest OS interface to x86 32-bit Xen.
++ * 
++ * Copyright (c) 2004, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
++#define __XEN_PUBLIC_ARCH_X86_32_H__
++
++/*
++ * SEGMENT DESCRIPTOR TABLES
++ */
++/*
++ * A number of GDT entries are reserved by Xen. These are not situated at the
++ * start of the GDT because some stupid OSes export hard-coded selector values
++ * in their ABI. These hard-coded values are always near the start of the GDT,
++ * so Xen places itself out of the way, at the far end of the GDT.
++ */
++#define FIRST_RESERVED_GDT_PAGE  14
++#define FIRST_RESERVED_GDT_BYTE  (FIRST_RESERVED_GDT_PAGE * 4096)
++#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
++
++/*
++ * These flat segments are in the Xen-private section of every GDT. Since these
++ * are also present in the initial GDT, many OSes will be able to avoid
++ * installing their own GDT.
++ */
++#define FLAT_RING1_CS 0xe019    /* GDT index 259 */
++#define FLAT_RING1_DS 0xe021    /* GDT index 260 */
++#define FLAT_RING1_SS 0xe021    /* GDT index 260 */
++#define FLAT_RING3_CS 0xe02b    /* GDT index 261 */
++#define FLAT_RING3_DS 0xe033    /* GDT index 262 */
++#define FLAT_RING3_SS 0xe033    /* GDT index 262 */
++
++#define FLAT_KERNEL_CS FLAT_RING1_CS
++#define FLAT_KERNEL_DS FLAT_RING1_DS
++#define FLAT_KERNEL_SS FLAT_RING1_SS
++#define FLAT_USER_CS    FLAT_RING3_CS
++#define FLAT_USER_DS    FLAT_RING3_DS
++#define FLAT_USER_SS    FLAT_RING3_SS
++
++/* And the trap vector is... */
++#define TRAP_INSTR "int $0x82"
++
++/*
++ * Virtual addresses beyond this are not modifiable by guest OSes. The 
++ * machine->physical mapping table starts at this address, read-only.
++ */
++#ifdef CONFIG_X86_PAE
++#define __HYPERVISOR_VIRT_START 0xF5800000
++#else
++#define __HYPERVISOR_VIRT_START 0xFC000000
++#endif
++
++#ifndef HYPERVISOR_VIRT_START
++#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
++#endif
++
++#ifndef machine_to_phys_mapping
++#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
++#endif
++
++/* Maximum number of virtual CPUs in multi-processor guests. */
++#define MAX_VIRT_CPUS 32
++
++#ifndef __ASSEMBLY__
++
++/*
++ * Send an array of these to HYPERVISOR_set_trap_table()
++ */
++#define TI_GET_DPL(_ti)      ((_ti)->flags & 3)
++#define TI_GET_IF(_ti)       ((_ti)->flags & 4)
++#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
++#define TI_SET_IF(_ti,_if)   ((_ti)->flags |= ((!!(_if))<<2))
++typedef struct trap_info {
++    uint8_t       vector;  /* exception vector                              */
++    uint8_t       flags;   /* 0-3: privilege level; 4: clear event enable?  */
++    uint16_t      cs;      /* code selector                                 */
++    unsigned long address; /* code offset                                   */
++} trap_info_t;
++
++typedef struct cpu_user_regs {
++    uint32_t ebx;
++    uint32_t ecx;
++    uint32_t edx;
++    uint32_t esi;
++    uint32_t edi;
++    uint32_t ebp;
++    uint32_t eax;
++    uint16_t error_code;    /* private */
++    uint16_t entry_vector;  /* private */
++    uint32_t eip;
++    uint16_t cs;
++    uint8_t  saved_upcall_mask;
++    uint8_t  _pad0;
++    uint32_t eflags;        /* eflags.IF == !saved_upcall_mask */
++    uint32_t esp;
++    uint16_t ss, _pad1;
++    uint16_t es, _pad2;
++    uint16_t ds, _pad3;
++    uint16_t fs, _pad4;
++    uint16_t gs, _pad5;
++} cpu_user_regs_t;
++
++typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
++
++/*
++ * The following is all CPU context. Note that the fpu_ctxt block is filled 
++ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
++ */
++typedef struct vcpu_guest_context {
++    /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
++    struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
++#define VGCF_I387_VALID (1<<0)
++#define VGCF_VMX_GUEST  (1<<1)
++#define VGCF_IN_KERNEL  (1<<2)
++    unsigned long flags;                    /* VGCF_* flags                 */
++    cpu_user_regs_t user_regs;              /* User-level CPU registers     */
++    struct trap_info trap_ctxt[256];        /* Virtual IDT                  */
++    unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
++    unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
++    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
++    unsigned long ctrlreg[8];               /* CR0-CR7 (control registers)  */
++    unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
++    unsigned long event_callback_cs;        /* CS:EIP of event callback     */
++    unsigned long event_callback_eip;
++    unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
++    unsigned long failsafe_callback_eip;
++    unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
++} vcpu_guest_context_t;
++
++typedef struct arch_shared_info {
++    unsigned long max_pfn;                  /* max pfn that appears in table */
++    /* Frame containing list of mfns containing list of mfns containing p2m. */
++    unsigned long pfn_to_mfn_frame_list_list; 
++    unsigned long nmi_reason;
++} arch_shared_info_t;
++
++typedef struct {
++    unsigned long cr2;
++    unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
++} arch_vcpu_info_t;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/arch-x86_64.h linux-2.6.12-xen/include/asm-xen/xen-public/arch-x86_64.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/arch-x86_64.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/arch-x86_64.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,243 @@
++/******************************************************************************
++ * arch-x86_64.h
++ * 
++ * Guest OS interface to x86 64-bit Xen.
++ * 
++ * Copyright (c) 2004, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_64_H__
++#define __XEN_PUBLIC_ARCH_X86_64_H__
++
++/*
++ * SEGMENT DESCRIPTOR TABLES
++ */
++/*
++ * A number of GDT entries are reserved by Xen. These are not situated at the
++ * start of the GDT because some stupid OSes export hard-coded selector values
++ * in their ABI. These hard-coded values are always near the start of the GDT,
++ * so Xen places itself out of the way, at the far end of the GDT.
++ */
++#define FIRST_RESERVED_GDT_PAGE  14
++#define FIRST_RESERVED_GDT_BYTE  (FIRST_RESERVED_GDT_PAGE * 4096)
++#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
++
++/*
++ * 64-bit segment selectors
++ * These flat segments are in the Xen-private section of every GDT. Since these
++ * are also present in the initial GDT, many OSes will be able to avoid
++ * installing their own GDT.
++ */
++
++#define FLAT_RING3_CS32 0xe023  /* GDT index 260 */
++#define FLAT_RING3_CS64 0xe033  /* GDT index 261 */
++#define FLAT_RING3_DS32 0xe02b  /* GDT index 262 */
++#define FLAT_RING3_DS64 0x0000  /* NULL selector */
++#define FLAT_RING3_SS32 0xe02b  /* GDT index 262 */
++#define FLAT_RING3_SS64 0xe02b  /* GDT index 262 */
++
++#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
++#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
++#define FLAT_KERNEL_DS   FLAT_KERNEL_DS64
++#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
++#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
++#define FLAT_KERNEL_CS   FLAT_KERNEL_CS64
++#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
++#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
++#define FLAT_KERNEL_SS   FLAT_KERNEL_SS64
++
++#define FLAT_USER_DS64 FLAT_RING3_DS64
++#define FLAT_USER_DS32 FLAT_RING3_DS32
++#define FLAT_USER_DS   FLAT_USER_DS64
++#define FLAT_USER_CS64 FLAT_RING3_CS64
++#define FLAT_USER_CS32 FLAT_RING3_CS32
++#define FLAT_USER_CS   FLAT_USER_CS64
++#define FLAT_USER_SS64 FLAT_RING3_SS64
++#define FLAT_USER_SS32 FLAT_RING3_SS32
++#define FLAT_USER_SS   FLAT_USER_SS64
++
++/* And the trap vector is... */
++#define TRAP_INSTR "syscall"
++
++#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
++#define __HYPERVISOR_VIRT_END   0xFFFF880000000000
++
++#ifndef HYPERVISOR_VIRT_START
++#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
++#define HYPERVISOR_VIRT_END   mk_unsigned_long(__HYPERVISOR_VIRT_END)
++#endif
++
++/* Maximum number of virtual CPUs in multi-processor guests. */
++#define MAX_VIRT_CPUS 32
++
++#ifndef __ASSEMBLY__
++
++/* The machine->physical mapping table starts at this address, read-only. */
++#ifndef machine_to_phys_mapping
++#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
++#endif
++
++/*
++ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
++ *  @which == SEGBASE_*  ;  @base == 64-bit base address
++ * Returns 0 on success.
++ */
++#define SEGBASE_FS          0
++#define SEGBASE_GS_USER     1
++#define SEGBASE_GS_KERNEL   2
++#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
++
++/*
++ * int HYPERVISOR_iret(void)
++ * All arguments are on the kernel stack, in the following format.
++ * Never returns if successful. Current kernel context is lost.
++ * The saved CS is mapped as follows:
++ *   RING0 -> RING3 kernel mode.
++ *   RING1 -> RING3 kernel mode.
++ *   RING2 -> RING3 kernel mode.
++ *   RING3 -> RING3 user mode.
++ * However RING0 indicates that the guest kernel should return to iteself
++ * directly with
++ *      orb   $3,1*8(%rsp)
++ *      iretq
++ * If flags contains VGCF_IN_SYSCALL:
++ *   Restore RAX, RIP, RFLAGS, RSP.
++ *   Discard R11, RCX, CS, SS.
++ * Otherwise:
++ *   Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
++ * All other registers are saved on hypercall entry and restored to user.
++ */
++/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
++#define VGCF_IN_SYSCALL (1<<8)
++struct iret_context {
++    /* Top of stack (%rsp at point of hypercall). */
++    uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++    /* Bottom of iret stack frame. */
++};
++/*
++ * For compatibility with HYPERVISOR_switch_to_user which is the old
++ * name for HYPERVISOR_iret.
++ */
++struct switch_to_user {
++    /* Top of stack (%rsp at point of hypercall). */
++    uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++    /* Bottom of iret stack frame. */
++};
++
++/*
++ * Send an array of these to HYPERVISOR_set_trap_table().
++ * N.B. As in x86/32 mode, the privilege level specifies which modes may enter
++ * a trap via a software interrupt. Since rings 1 and 2 are unavailable, we
++ * allocate privilege levels as follows:
++ *  Level == 0: Noone may enter
++ *  Level == 1: Kernel may enter
++ *  Level == 2: Kernel may enter
++ *  Level == 3: Everyone may enter
++ */
++#define TI_GET_DPL(_ti)      ((_ti)->flags & 3)
++#define TI_GET_IF(_ti)       ((_ti)->flags & 4)
++#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
++#define TI_SET_IF(_ti,_if)   ((_ti)->flags |= ((!!(_if))<<2))
++typedef struct trap_info {
++    uint8_t       vector;  /* exception vector                              */
++    uint8_t       flags;   /* 0-3: privilege level; 4: clear event enable?  */
++    uint16_t      cs;      /* code selector                                 */
++    unsigned long address; /* code offset                                   */
++} trap_info_t;
++
++#ifdef __GNUC__
++/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
++#define __DECL_REG(name) union { uint64_t r ## name, e ## name; }
++#else
++/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
++#define __DECL_REG(name) uint64_t r ## name
++#endif
++
++typedef struct cpu_user_regs {
++    uint64_t r15;
++    uint64_t r14;
++    uint64_t r13;
++    uint64_t r12;
++    __DECL_REG(bp);
++    __DECL_REG(bx);
++    uint64_t r11;
++    uint64_t r10;
++    uint64_t r9;
++    uint64_t r8;
++    __DECL_REG(ax);
++    __DECL_REG(cx);
++    __DECL_REG(dx);
++    __DECL_REG(si);
++    __DECL_REG(di);
++    uint32_t error_code;    /* private */
++    uint32_t entry_vector;  /* private */
++    __DECL_REG(ip);
++    uint16_t cs, _pad0[1];
++    uint8_t  saved_upcall_mask;
++    uint8_t  _pad1[3];
++    __DECL_REG(flags);      /* rflags.IF == !saved_upcall_mask */
++    __DECL_REG(sp);
++    uint16_t ss, _pad2[3];
++    uint16_t es, _pad3[3];
++    uint16_t ds, _pad4[3];
++    uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base.     */
++    uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
++} cpu_user_regs_t;
++
++#undef __DECL_REG
++
++typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
++
++/*
++ * The following is all CPU context. Note that the fpu_ctxt block is filled 
++ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
++ */
++typedef struct vcpu_guest_context {
++    /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
++    struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
++#define VGCF_I387_VALID (1<<0)
++#define VGCF_VMX_GUEST  (1<<1)
++#define VGCF_IN_KERNEL  (1<<2)
++    unsigned long flags;                    /* VGCF_* flags                 */
++    cpu_user_regs_t user_regs;              /* User-level CPU registers     */
++    struct trap_info trap_ctxt[256];        /* Virtual IDT                  */
++    unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
++    unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
++    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
++    unsigned long ctrlreg[8];               /* CR0-CR7 (control registers)  */
++    unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
++    unsigned long event_callback_eip;
++    unsigned long failsafe_callback_eip;
++    unsigned long syscall_callback_eip;
++    unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
++    /* Segment base addresses. */
++    uint64_t      fs_base;
++    uint64_t      gs_base_kernel;
++    uint64_t      gs_base_user;
++} vcpu_guest_context_t;
++
++typedef struct arch_shared_info {
++    unsigned long max_pfn;                  /* max pfn that appears in table */
++    /* Frame containing list of mfns containing list of mfns containing p2m. */
++    unsigned long pfn_to_mfn_frame_list_list; 
++    unsigned long nmi_reason;
++} arch_shared_info_t;
++
++typedef struct {
++    unsigned long cr2;
++    unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
++} arch_vcpu_info_t;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/COPYING linux-2.6.12-xen/include/asm-xen/xen-public/COPYING
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/COPYING	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/COPYING	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,28 @@
++XEN NOTICE
++==========
++
++This copyright applies to all files within this subdirectory. All
++other files in the Xen source distribution are covered by version 2 of
++the GNU General Public License.
++
++ -- Keir Fraser (on behalf of the Xen team)
++
++=====================================================================
++
++Permission is hereby granted, free of charge, to any person obtaining a copy
++of this software and associated documentation files (the "Software"), to
++deal in the Software without restriction, including without limitation the
++rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++sell copies of the Software, and to permit persons to whom the Software is
++furnished to do so, subject to the following conditions:
++
++The above copyright notice and this permission notice shall be included in
++all copies or substantial portions of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
++FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
++DEALINGS IN THE SOFTWARE.
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/dom0_ops.h linux-2.6.12-xen/include/asm-xen/xen-public/dom0_ops.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/dom0_ops.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/dom0_ops.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,488 @@
++/******************************************************************************
++ * dom0_ops.h
++ * 
++ * Process command requests from domain-0 guest OS.
++ * 
++ * Copyright (c) 2002-2003, B Dragovic
++ * Copyright (c) 2002-2004, K Fraser
++ */
++
++
++#ifndef __XEN_PUBLIC_DOM0_OPS_H__
++#define __XEN_PUBLIC_DOM0_OPS_H__
++
++#include "xen.h"
++#include "sched_ctl.h"
++
++/*
++ * Make sure you increment the interface version whenever you modify this file!
++ * This makes sure that old versions of dom0 tools will stop working in a
++ * well-defined way (rather than crashing the machine, for instance).
++ */
++#define DOM0_INTERFACE_VERSION   0x03000000
++
++/************************************************************************/
++
++#define DOM0_GETMEMLIST        2
++typedef struct dom0_getmemlist {
++    /* IN variables. */
++    domid_t       domain;
++    unsigned long max_pfns;
++    void         *buffer;
++    /* OUT variables. */
++    unsigned long num_pfns;
++} dom0_getmemlist_t;
++
++#define DOM0_SCHEDCTL          6
++ /* struct sched_ctl_cmd is from sched-ctl.h   */
++typedef struct sched_ctl_cmd dom0_schedctl_t;
++
++#define DOM0_ADJUSTDOM         7
++/* struct sched_adjdom_cmd is from sched-ctl.h */
++typedef struct sched_adjdom_cmd dom0_adjustdom_t;
++
++#define DOM0_CREATEDOMAIN      8
++typedef struct dom0_createdomain {
++    /* IN parameters */
++    uint32_t ssidref;
++    xen_domain_handle_t handle;
++    /* IN/OUT parameters. */
++    /* Identifier for new domain (auto-allocate if zero is specified). */
++    domid_t domain;
++} dom0_createdomain_t;
++
++#define DOM0_DESTROYDOMAIN     9
++typedef struct dom0_destroydomain {
++    /* IN variables. */
++    domid_t domain;
++} dom0_destroydomain_t;
++
++#define DOM0_PAUSEDOMAIN      10
++typedef struct dom0_pausedomain {
++    /* IN parameters. */
++    domid_t domain;
++} dom0_pausedomain_t;
++
++#define DOM0_UNPAUSEDOMAIN    11
++typedef struct dom0_unpausedomain {
++    /* IN parameters. */
++    domid_t domain;
++} dom0_unpausedomain_t;
++
++#define DOM0_GETDOMAININFO    12
++typedef struct dom0_getdomaininfo {
++    /* IN variables. */
++    domid_t  domain;                  /* NB. IN/OUT variable. */
++    /* OUT variables. */
++#define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
++#define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut down.             */
++#define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
++#define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
++#define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
++#define DOMFLAGS_CPUMASK      255 /* CPU to which this domain is bound.      */
++#define DOMFLAGS_CPUSHIFT       8
++#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
++#define DOMFLAGS_SHUTDOWNSHIFT 16
++    uint32_t flags;
++    unsigned long tot_pages;
++    unsigned long max_pages;
++    unsigned long shared_info_frame;       /* MFN of shared_info struct */
++    uint64_t cpu_time;
++    uint32_t nr_online_vcpus;     /* Number of VCPUs currently online. */
++    uint32_t max_vcpu_id;         /* Maximum VCPUID in use by this domain. */
++    uint32_t ssidref;
++    xen_domain_handle_t handle;
++} dom0_getdomaininfo_t;
++
++#define DOM0_SETVCPUCONTEXT   13
++typedef struct dom0_setvcpucontext {
++    /* IN variables. */
++    domid_t               domain;
++    uint32_t              vcpu;
++    /* IN/OUT parameters */
++    vcpu_guest_context_t *ctxt;
++} dom0_setvcpucontext_t;
++
++#define DOM0_MSR              15
++typedef struct dom0_msr {
++    /* IN variables. */
++    uint32_t write;
++    cpumap_t cpu_mask;
++    uint32_t msr;
++    uint32_t in1;
++    uint32_t in2;
++    /* OUT variables. */
++    uint32_t out1;
++    uint32_t out2;
++} dom0_msr_t;
++
++/*
++ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
++ * 1 January, 1970 if the current system time was <system_time>.
++ */
++#define DOM0_SETTIME          17
++typedef struct dom0_settime {
++    /* IN variables. */
++    uint32_t secs;
++    uint32_t nsecs;
++    uint64_t system_time;
++} dom0_settime_t;
++
++#define DOM0_GETPAGEFRAMEINFO 18
++#define NOTAB 0         /* normal page */
++#define L1TAB (1<<28)
++#define L2TAB (2<<28)
++#define L3TAB (3<<28)
++#define L4TAB (4<<28)
++#define LPINTAB  (1<<31)
++#define XTAB  (0xf<<28) /* invalid page */
++#define LTAB_MASK XTAB
++#define LTABTYPE_MASK (0x7<<28)
++
++typedef struct dom0_getpageframeinfo {
++    /* IN variables. */
++    unsigned long pfn;     /* Machine page frame number to query.       */
++    domid_t domain;        /* To which domain does the frame belong?    */
++    /* OUT variables. */
++    /* Is the page PINNED to a type? */
++    uint32_t type;              /* see above type defs */
++} dom0_getpageframeinfo_t;
++
++/*
++ * Read console content from Xen buffer ring.
++ */
++#define DOM0_READCONSOLE      19
++typedef struct dom0_readconsole {
++    /* IN variables. */
++    uint32_t clear;        /* Non-zero -> clear after reading. */
++    /* IN/OUT variables. */
++    char    *buffer;       /* In: Buffer start; Out: Used buffer start */
++    uint32_t count;        /* In: Buffer size;  Out: Used buffer size  */
++} dom0_readconsole_t;
++
++/* 
++ * Set which physical cpus a vcpu can execute on.
++ */
++#define DOM0_SETVCPUAFFINITY  20
++typedef struct dom0_setvcpuaffinity {
++    /* IN variables. */
++    domid_t   domain;
++    uint32_t  vcpu;
++    cpumap_t  cpumap;
++} dom0_setvcpuaffinity_t;
++
++/* Get trace buffers machine base address */
++#define DOM0_TBUFCONTROL       21
++typedef struct dom0_tbufcontrol {
++    /* IN variables */
++#define DOM0_TBUF_GET_INFO     0
++#define DOM0_TBUF_SET_CPU_MASK 1
++#define DOM0_TBUF_SET_EVT_MASK 2
++#define DOM0_TBUF_SET_SIZE     3
++#define DOM0_TBUF_ENABLE       4
++#define DOM0_TBUF_DISABLE      5
++    uint32_t      op;
++    /* IN/OUT variables */
++    cpumap_t      cpu_mask;
++    uint32_t      evt_mask;
++    /* OUT variables */
++    unsigned long buffer_mfn;
++    uint32_t size;
++} dom0_tbufcontrol_t;
++
++/*
++ * Get physical information about the host machine
++ */
++#define DOM0_PHYSINFO         22
++typedef struct dom0_physinfo {
++    uint32_t threads_per_core;
++    uint32_t cores_per_socket;
++    uint32_t sockets_per_node;
++    uint32_t nr_nodes;
++    uint32_t cpu_khz;
++    unsigned long total_pages;
++    unsigned long free_pages;
++    uint32_t hw_cap[8];
++} dom0_physinfo_t;
++
++/*
++ * Get the ID of the current scheduler.
++ */
++#define DOM0_SCHED_ID        24
++typedef struct dom0_sched_id {
++    /* OUT variable */
++    uint32_t sched_id;
++} dom0_sched_id_t;
++
++/* 
++ * Control shadow pagetables operation
++ */
++#define DOM0_SHADOW_CONTROL  25
++
++#define DOM0_SHADOW_CONTROL_OP_OFF         0
++#define DOM0_SHADOW_CONTROL_OP_ENABLE_TEST 1
++#define DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY 2
++#define DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE 3
++
++#define DOM0_SHADOW_CONTROL_OP_FLUSH       10     /* table ops */
++#define DOM0_SHADOW_CONTROL_OP_CLEAN       11
++#define DOM0_SHADOW_CONTROL_OP_PEEK        12
++
++typedef struct dom0_shadow_control_stats {
++    uint32_t fault_count;
++    uint32_t dirty_count;
++    uint32_t dirty_net_count;     
++    uint32_t dirty_block_count;     
++} dom0_shadow_control_stats_t;
++
++typedef struct dom0_shadow_control {
++    /* IN variables. */
++    domid_t        domain;
++    uint32_t       op;
++    unsigned long *dirty_bitmap; /* pointer to locked buffer */
++    /* IN/OUT variables. */
++    unsigned long  pages;        /* size of buffer, updated with actual size */
++    /* OUT variables. */
++    dom0_shadow_control_stats_t stats;
++} dom0_shadow_control_t;
++
++#define DOM0_SETDOMAINMAXMEM   28
++typedef struct dom0_setdomainmaxmem {
++    /* IN variables. */
++    domid_t       domain;
++    unsigned long max_memkb;
++} dom0_setdomainmaxmem_t;
++
++#define DOM0_GETPAGEFRAMEINFO2 29   /* batched interface */
++typedef struct dom0_getpageframeinfo2 {
++    /* IN variables. */
++    domid_t        domain;
++    unsigned long  num;
++    /* IN/OUT variables. */
++    unsigned long *array;
++} dom0_getpageframeinfo2_t;
++
++/*
++ * Request memory range (@pfn, @pfn+ at nr_pfns-1) to have type @type.
++ * On x86, @type is an architecture-defined MTRR memory type.
++ * On success, returns the MTRR that was used (@reg) and a handle that can
++ * be passed to DOM0_DEL_MEMTYPE to accurately tear down the new setting.
++ * (x86-specific).
++ */
++#define DOM0_ADD_MEMTYPE         31
++typedef struct dom0_add_memtype {
++    /* IN variables. */
++    unsigned long pfn;
++    unsigned long nr_pfns;
++    uint32_t      type;
++    /* OUT variables. */
++    uint32_t      handle;
++    uint32_t      reg;
++} dom0_add_memtype_t;
++
++/*
++ * Tear down an existing memory-range type. If @handle is remembered then it
++ * should be passed in to accurately tear down the correct setting (in case
++ * of overlapping memory regions with differing types). If it is not known
++ * then @handle should be set to zero. In all cases @reg must be set.
++ * (x86-specific).
++ */
++#define DOM0_DEL_MEMTYPE         32
++typedef struct dom0_del_memtype {
++    /* IN variables. */
++    uint32_t handle;
++    uint32_t reg;
++} dom0_del_memtype_t;
++
++/* Read current type of an MTRR (x86-specific). */
++#define DOM0_READ_MEMTYPE        33
++typedef struct dom0_read_memtype {
++    /* IN variables. */
++    uint32_t reg;
++    /* OUT variables. */
++    unsigned long pfn;
++    unsigned long nr_pfns;
++    uint32_t type;
++} dom0_read_memtype_t;
++
++/* Interface for controlling Xen software performance counters. */
++#define DOM0_PERFCCONTROL        34
++/* Sub-operations: */
++#define DOM0_PERFCCONTROL_OP_RESET 1   /* Reset all counters to zero. */
++#define DOM0_PERFCCONTROL_OP_QUERY 2   /* Get perfctr information. */
++typedef struct dom0_perfc_desc {
++    uint8_t      name[80];             /* name of perf counter */
++    uint32_t     nr_vals;              /* number of values for this counter */
++    uint32_t     vals[64];             /* array of values */
++} dom0_perfc_desc_t;
++typedef struct dom0_perfccontrol {
++    /* IN variables. */
++    uint32_t       op;                /*  DOM0_PERFCCONTROL_OP_??? */
++    /* OUT variables. */
++    uint32_t       nr_counters;       /*  number of counters */
++    dom0_perfc_desc_t *desc;          /*  counter information (or NULL) */
++} dom0_perfccontrol_t;
++
++#define DOM0_MICROCODE           35
++typedef struct dom0_microcode {
++    /* IN variables. */
++    void    *data;                    /* Pointer to microcode data */
++    uint32_t length;                  /* Length of microcode data. */
++} dom0_microcode_t;
++
++#define DOM0_IOPORT_PERMISSION   36
++typedef struct dom0_ioport_permission {
++    domid_t  domain;                  /* domain to be affected */
++    uint32_t first_port;              /* first port int range */
++    uint32_t nr_ports;                /* size of port range */
++    uint8_t  allow_access;            /* allow or deny access to range? */
++} dom0_ioport_permission_t;
++
++#define DOM0_GETVCPUCONTEXT      37
++typedef struct dom0_getvcpucontext {
++    /* IN variables. */
++    domid_t  domain;                  /* domain to be affected */
++    uint32_t vcpu;                    /* vcpu # */
++    /* OUT variables. */
++    vcpu_guest_context_t *ctxt;
++} dom0_getvcpucontext_t;
++
++#define DOM0_GETVCPUINFO         43
++typedef struct dom0_getvcpuinfo {
++    /* IN variables. */
++    domid_t  domain;                  /* domain to be affected */
++    uint32_t vcpu;                    /* vcpu # */
++    /* OUT variables. */
++    uint8_t  online;                  /* currently online (not hotplugged)? */
++    uint8_t  blocked;                 /* blocked waiting for an event? */
++    uint8_t  running;                 /* currently scheduled on its CPU? */
++    uint64_t cpu_time;                /* total cpu time consumed (ns) */
++    uint32_t cpu;                     /* current mapping   */
++    cpumap_t cpumap;                  /* allowable mapping */
++} dom0_getvcpuinfo_t;
++
++#define DOM0_GETDOMAININFOLIST   38
++typedef struct dom0_getdomaininfolist {
++    /* IN variables. */
++    domid_t               first_domain;
++    uint32_t              max_domains;
++    dom0_getdomaininfo_t *buffer;
++    /* OUT variables. */
++    uint32_t              num_domains;
++} dom0_getdomaininfolist_t;
++
++#define DOM0_PLATFORM_QUIRK      39  
++#define QUIRK_NOIRQBALANCING  1
++typedef struct dom0_platform_quirk {
++    /* IN variables. */
++    uint32_t quirk_id;
++} dom0_platform_quirk_t;
++
++#define DOM0_PHYSICAL_MEMORY_MAP 40
++typedef struct dom0_physical_memory_map {
++    /* IN variables. */
++    uint32_t max_map_entries;
++    /* OUT variables. */
++    uint32_t nr_map_entries;
++    struct dom0_memory_map_entry {
++        uint64_t start, end;
++        uint32_t flags; /* reserved */
++        uint8_t  is_ram;
++    } *memory_map;
++} dom0_physical_memory_map_t;
++
++#define DOM0_MAX_VCPUS 41
++typedef struct dom0_max_vcpus {
++    domid_t  domain;        /* domain to be affected */
++    uint32_t max;           /* maximum number of vcpus */
++} dom0_max_vcpus_t;
++
++#define DOM0_SETDOMAINHANDLE 44
++typedef struct dom0_setdomainhandle {
++    domid_t domain;
++    xen_domain_handle_t handle;
++} dom0_setdomainhandle_t;
++
++#define DOM0_SETDEBUGGING 45
++typedef struct dom0_setdebugging {
++    domid_t domain;
++    uint8_t enable;
++} dom0_setdebugging_t;
++
++#define DOM0_IRQ_PERMISSION 46
++typedef struct dom0_irq_permission {
++    domid_t domain;          /* domain to be affected */
++    uint8_t pirq;
++    uint8_t allow_access;    /* flag to specify enable/disable of IRQ access */
++} dom0_irq_permission_t;
++
++#define DOM0_IOMEM_PERMISSION 47
++typedef struct dom0_iomem_permission {
++    domid_t  domain;          /* domain to be affected */
++    unsigned long first_pfn;  /* first page (physical page number) in range */
++    unsigned long nr_pfns;    /* number of pages in range (>0) */
++    uint8_t allow_access;     /* allow (!0) or deny (0) access to range? */
++} dom0_iomem_permission_t;
++ 
++#define DOM0_HYPERCALL_INIT   48
++typedef struct dom0_hypercall_init {
++    domid_t  domain;          /* domain to be affected */
++    unsigned long mfn;        /* machine frame to be initialised */
++} dom0_hypercall_init_t;
++ 
++typedef struct dom0_op {
++    uint32_t cmd;
++    uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
++    union {
++        struct dom0_createdomain      createdomain;
++        struct dom0_pausedomain       pausedomain;
++        struct dom0_unpausedomain     unpausedomain;
++        struct dom0_destroydomain     destroydomain;
++        struct dom0_getmemlist        getmemlist;
++        struct sched_ctl_cmd          schedctl;
++        struct sched_adjdom_cmd       adjustdom;
++        struct dom0_setvcpucontext    setvcpucontext;
++        struct dom0_getdomaininfo     getdomaininfo;
++        struct dom0_getpageframeinfo  getpageframeinfo;
++        struct dom0_msr               msr;
++        struct dom0_settime           settime;
++        struct dom0_readconsole       readconsole;
++        struct dom0_setvcpuaffinity   setvcpuaffinity;
++        struct dom0_tbufcontrol       tbufcontrol;
++        struct dom0_physinfo          physinfo;
++        struct dom0_sched_id          sched_id;
++        struct dom0_shadow_control    shadow_control;
++        struct dom0_setdomainmaxmem   setdomainmaxmem;
++        struct dom0_getpageframeinfo2 getpageframeinfo2;
++        struct dom0_add_memtype       add_memtype;
++        struct dom0_del_memtype       del_memtype;
++        struct dom0_read_memtype      read_memtype;
++        struct dom0_perfccontrol      perfccontrol;
++        struct dom0_microcode         microcode;
++        struct dom0_ioport_permission ioport_permission;
++        struct dom0_getvcpucontext    getvcpucontext;
++        struct dom0_getvcpuinfo       getvcpuinfo;
++        struct dom0_getdomaininfolist getdomaininfolist;
++        struct dom0_platform_quirk    platform_quirk;
++        struct dom0_physical_memory_map physical_memory_map;
++        struct dom0_max_vcpus         max_vcpus;
++        struct dom0_setdomainhandle   setdomainhandle;        
++        struct dom0_setdebugging      setdebugging;
++        struct dom0_irq_permission    irq_permission;
++        struct dom0_iomem_permission  iomem_permission;
++        struct dom0_hypercall_init    hypercall_init;
++        uint8_t                  pad[128];
++    } u;
++} dom0_op_t;
++
++#endif /* __XEN_PUBLIC_DOM0_OPS_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/event_channel.h linux-2.6.12-xen/include/asm-xen/xen-public/event_channel.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/event_channel.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/event_channel.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,203 @@
++/******************************************************************************
++ * event_channel.h
++ * 
++ * Event channels between domains.
++ * 
++ * Copyright (c) 2003-2004, K A Fraser.
++ */
++
++#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
++#define __XEN_PUBLIC_EVENT_CHANNEL_H__
++
++typedef uint32_t evtchn_port_t;
++
++/*
++ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
++ * accepting interdomain bindings from domain <remote_dom>. A fresh port
++ * is allocated in <dom> and returned as <port>.
++ * NOTES:
++ *  1. If the caller is unprivileged then <dom> must be DOMID_SELF.
++ *  2. <rdom> may be DOMID_SELF, allowing loopback connections.
++ */
++#define EVTCHNOP_alloc_unbound    6
++typedef struct evtchn_alloc_unbound {
++    /* IN parameters */
++    domid_t dom, remote_dom;
++    /* OUT parameters */
++    evtchn_port_t port;
++} evtchn_alloc_unbound_t;
++
++/*
++ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
++ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
++ * a port that is unbound and marked as accepting bindings from the calling
++ * domain. A fresh port is allocated in the calling domain and returned as
++ * <local_port>.
++ * NOTES:
++ *  2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
++ */
++#define EVTCHNOP_bind_interdomain 0
++typedef struct evtchn_bind_interdomain {
++    /* IN parameters. */
++    domid_t remote_dom;
++    evtchn_port_t remote_port;
++    /* OUT parameters. */
++    evtchn_port_t local_port;
++} evtchn_bind_interdomain_t;
++
++/*
++ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
++ * vcpu.
++ * NOTES:
++ *  1. A virtual IRQ may be bound to at most one event channel per vcpu.
++ *  2. The allocated event channel is bound to the specified vcpu. The binding
++ *     may not be changed.
++ */
++#define EVTCHNOP_bind_virq        1
++typedef struct evtchn_bind_virq {
++    /* IN parameters. */
++    uint32_t virq;
++    uint32_t vcpu;
++    /* OUT parameters. */
++    evtchn_port_t port;
++} evtchn_bind_virq_t;
++
++/*
++ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
++ * NOTES:
++ *  1. A physical IRQ may be bound to at most one event channel per domain.
++ *  2. Only a sufficiently-privileged domain may bind to a physical IRQ.
++ */
++#define EVTCHNOP_bind_pirq        2
++typedef struct evtchn_bind_pirq {
++    /* IN parameters. */
++    uint32_t pirq;
++#define BIND_PIRQ__WILL_SHARE 1
++    uint32_t flags; /* BIND_PIRQ__* */
++    /* OUT parameters. */
++    evtchn_port_t port;
++} evtchn_bind_pirq_t;
++
++/*
++ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
++ * NOTES:
++ *  1. The allocated event channel is bound to the specified vcpu. The binding
++ *     may not be changed.
++ */
++#define EVTCHNOP_bind_ipi         7
++typedef struct evtchn_bind_ipi {
++    uint32_t vcpu;
++    /* OUT parameters. */
++    evtchn_port_t port;
++} evtchn_bind_ipi_t;
++
++/*
++ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
++ * interdomain then the remote end is placed in the unbound state
++ * (EVTCHNSTAT_unbound), awaiting a new connection.
++ */
++#define EVTCHNOP_close            3
++typedef struct evtchn_close {
++    /* IN parameters. */
++    evtchn_port_t port;
++} evtchn_close_t;
++
++/*
++ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
++ * endpoint is <port>.
++ */
++#define EVTCHNOP_send             4
++typedef struct evtchn_send {
++    /* IN parameters. */
++    evtchn_port_t port;
++} evtchn_send_t;
++
++/*
++ * EVTCHNOP_status: Get the current status of the communication channel which
++ * has an endpoint at <dom, port>.
++ * NOTES:
++ *  1. <dom> may be specified as DOMID_SELF.
++ *  2. Only a sufficiently-privileged domain may obtain the status of an event
++ *     channel for which <dom> is not DOMID_SELF.
++ */
++#define EVTCHNOP_status           5
++typedef struct evtchn_status {
++    /* IN parameters */
++    domid_t  dom;
++    evtchn_port_t port;
++    /* OUT parameters */
++#define EVTCHNSTAT_closed       0  /* Channel is not in use.                 */
++#define EVTCHNSTAT_unbound      1  /* Channel is waiting interdom connection.*/
++#define EVTCHNSTAT_interdomain  2  /* Channel is connected to remote domain. */
++#define EVTCHNSTAT_pirq         3  /* Channel is bound to a phys IRQ line.   */
++#define EVTCHNSTAT_virq         4  /* Channel is bound to a virtual IRQ line */
++#define EVTCHNSTAT_ipi          5  /* Channel is bound to a virtual IPI line */
++    uint32_t status;
++    uint32_t vcpu;                 /* VCPU to which this channel is bound.   */
++    union {
++        struct {
++            domid_t dom;
++        } unbound; /* EVTCHNSTAT_unbound */
++        struct {
++            domid_t dom;
++            evtchn_port_t port;
++        } interdomain; /* EVTCHNSTAT_interdomain */
++        uint32_t pirq;      /* EVTCHNSTAT_pirq        */
++        uint32_t virq;      /* EVTCHNSTAT_virq        */
++    } u;
++} evtchn_status_t;
++
++/*
++ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
++ * event is pending.
++ * NOTES:
++ *  1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
++ *     the binding. This binding cannot be changed.
++ *  2. All other channels notify vcpu0 by default. This default is set when
++ *     the channel is allocated (a port that is freed and subsequently reused
++ *     has its binding reset to vcpu0).
++ */
++#define EVTCHNOP_bind_vcpu        8
++typedef struct evtchn_bind_vcpu {
++    /* IN parameters. */
++    evtchn_port_t port;
++    uint32_t vcpu;
++} evtchn_bind_vcpu_t;
++
++/*
++ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
++ * a notification to the appropriate VCPU if an event is pending.
++ */
++#define EVTCHNOP_unmask           9
++typedef struct evtchn_unmask {
++    /* IN parameters. */
++    evtchn_port_t port;
++} evtchn_unmask_t;
++
++typedef struct evtchn_op {
++    uint32_t cmd; /* EVTCHNOP_* */
++    union {
++        evtchn_alloc_unbound_t    alloc_unbound;
++        evtchn_bind_interdomain_t bind_interdomain;
++        evtchn_bind_virq_t        bind_virq;
++        evtchn_bind_pirq_t        bind_pirq;
++        evtchn_bind_ipi_t         bind_ipi;
++        evtchn_close_t            close;
++        evtchn_send_t             send;
++        evtchn_status_t           status;
++        evtchn_bind_vcpu_t        bind_vcpu;
++        evtchn_unmask_t           unmask;
++    } u;
++} evtchn_op_t;
++
++#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/grant_table.h linux-2.6.12-xen/include/asm-xen/xen-public/grant_table.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/grant_table.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/grant_table.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,306 @@
++/******************************************************************************
++ * grant_table.h
++ * 
++ * Interface for granting foreign access to page frames, and receiving
++ * page-ownership transfers.
++ * 
++ * Copyright (c) 2004, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
++#define __XEN_PUBLIC_GRANT_TABLE_H__
++
++
++/***********************************
++ * GRANT TABLE REPRESENTATION
++ */
++
++/* Some rough guidelines on accessing and updating grant-table entries
++ * in a concurrency-safe manner. For more information, Linux contains a
++ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
++ * 
++ * NB. WMB is a no-op on current-generation x86 processors. However, a
++ *     compiler barrier will still be required.
++ * 
++ * Introducing a valid entry into the grant table:
++ *  1. Write ent->domid.
++ *  2. Write ent->frame:
++ *      GTF_permit_access:   Frame to which access is permitted.
++ *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
++ *                           frame, or zero if none.
++ *  3. Write memory barrier (WMB).
++ *  4. Write ent->flags, inc. valid type.
++ * 
++ * Invalidating an unused GTF_permit_access entry:
++ *  1. flags = ent->flags.
++ *  2. Observe that !(flags & (GTF_reading|GTF_writing)).
++ *  3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
++ *  NB. No need for WMB as reuse of entry is control-dependent on success of
++ *      step 3, and all architectures guarantee ordering of ctrl-dep writes.
++ *
++ * Invalidating an in-use GTF_permit_access entry:
++ *  This cannot be done directly. Request assistance from the domain controller
++ *  which can set a timeout on the use of a grant entry and take necessary
++ *  action. (NB. This is not yet implemented!).
++ * 
++ * Invalidating an unused GTF_accept_transfer entry:
++ *  1. flags = ent->flags.
++ *  2. Observe that !(flags & GTF_transfer_committed). [*]
++ *  3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
++ *  NB. No need for WMB as reuse of entry is control-dependent on success of
++ *      step 3, and all architectures guarantee ordering of ctrl-dep writes.
++ *  [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
++ *      The guest must /not/ modify the grant entry until the address of the
++ *      transferred frame is written. It is safe for the guest to spin waiting
++ *      for this to occur (detect by observing GTF_transfer_completed in
++ *      ent->flags).
++ *
++ * Invalidating a committed GTF_accept_transfer entry:
++ *  1. Wait for (ent->flags & GTF_transfer_completed).
++ *
++ * Changing a GTF_permit_access from writable to read-only:
++ *  Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
++ * 
++ * Changing a GTF_permit_access from read-only to writable:
++ *  Use SMP-safe bit-setting instruction.
++ */
++
++/*
++ * A grant table comprises a packed array of grant entries in one or more
++ * page frames shared between Xen and a guest.
++ * [XEN]: This field is written by Xen and read by the sharing guest.
++ * [GST]: This field is written by the guest and read by Xen.
++ */
++typedef struct grant_entry {
++    /* GTF_xxx: various type and flag information.  [XEN,GST] */
++    uint16_t flags;
++    /* The domain being granted foreign privileges. [GST] */
++    domid_t  domid;
++    /*
++     * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
++     * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
++     */
++    uint32_t frame;
++} grant_entry_t;
++
++/*
++ * Type of grant entry.
++ *  GTF_invalid: This grant entry grants no privileges.
++ *  GTF_permit_access: Allow @domid to map/access @frame.
++ *  GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
++ *                       to this guest. Xen writes the page number to @frame.
++ */
++#define GTF_invalid         (0U<<0)
++#define GTF_permit_access   (1U<<0)
++#define GTF_accept_transfer (2U<<0)
++#define GTF_type_mask       (3U<<0)
++
++/*
++ * Subflags for GTF_permit_access.
++ *  GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
++ *  GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
++ *  GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
++ */
++#define _GTF_readonly       (2)
++#define GTF_readonly        (1U<<_GTF_readonly)
++#define _GTF_reading        (3)
++#define GTF_reading         (1U<<_GTF_reading)
++#define _GTF_writing        (4)
++#define GTF_writing         (1U<<_GTF_writing)
++
++/*
++ * Subflags for GTF_accept_transfer:
++ *  GTF_transfer_committed: Xen sets this flag to indicate that it is committed
++ *      to transferring ownership of a page frame. When a guest sees this flag
++ *      it must /not/ modify the grant entry until GTF_transfer_completed is
++ *      set by Xen.
++ *  GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
++ *      after reading GTF_transfer_committed. Xen will always write the frame
++ *      address, followed by ORing this flag, in a timely manner.
++ */
++#define _GTF_transfer_committed (2)
++#define GTF_transfer_committed  (1U<<_GTF_transfer_committed)
++#define _GTF_transfer_completed (3)
++#define GTF_transfer_completed  (1U<<_GTF_transfer_completed)
++
++
++/***********************************
++ * GRANT TABLE QUERIES AND USES
++ */
++
++/*
++ * Reference to a grant entry in a specified domain's grant table.
++ */
++typedef uint32_t grant_ref_t;
++
++/*
++ * Handle to track a mapping created via a grant reference.
++ */
++typedef uint32_t grant_handle_t;
++
++/*
++ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
++ * by devices and/or host CPUs. If successful, <handle> is a tracking number
++ * that must be presented later to destroy the mapping(s). On error, <handle>
++ * is a negative status code.
++ * NOTES:
++ *  1. If GNTPIN_map_for_dev is specified then <dev_bus_addr> is the address
++ *     via which I/O devices may access the granted frame.
++ *  2. If GNTPIN_map_for_host is specified then a mapping will be added at
++ *     either a host virtual address in the current address space, or at
++ *     a PTE at the specified machine address.  The type of mapping to
++ *     perform is selected through the GNTMAP_contains_pte flag, and the 
++ *     address is specified in <host_addr>.
++ *  3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
++ *     host mapping is destroyed by other means then it is *NOT* guaranteed
++ *     to be accounted to the correct grant reference!
++ */
++#define GNTTABOP_map_grant_ref        0
++typedef struct gnttab_map_grant_ref {
++    /* IN parameters. */
++    uint64_t host_addr;
++    uint32_t flags;               /* GNTMAP_* */
++    grant_ref_t ref;
++    domid_t  dom;
++    /* OUT parameters. */
++    int16_t  status;              /* GNTST_* */
++    grant_handle_t handle;
++    uint64_t dev_bus_addr;
++} gnttab_map_grant_ref_t;
++
++/*
++ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
++ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
++ * field is ignored. If non-zero, they must refer to a device/host mapping
++ * that is tracked by <handle>
++ * NOTES:
++ *  1. The call may fail in an undefined manner if either mapping is not
++ *     tracked by <handle>.
++ *  3. After executing a batch of unmaps, it is guaranteed that no stale
++ *     mappings will remain in the device or host TLBs.
++ */
++#define GNTTABOP_unmap_grant_ref      1
++typedef struct gnttab_unmap_grant_ref {
++    /* IN parameters. */
++    uint64_t host_addr;
++    uint64_t dev_bus_addr;
++    grant_handle_t handle;
++    /* OUT parameters. */
++    int16_t  status;              /* GNTST_* */
++} gnttab_unmap_grant_ref_t;
++
++/*
++ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
++ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
++ * Only <nr_frames> addresses are written, even if the table is larger.
++ * NOTES:
++ *  1. <dom> may be specified as DOMID_SELF.
++ *  2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
++ *  3. Xen may not support more than a single grant-table page per domain.
++ */
++#define GNTTABOP_setup_table          2
++typedef struct gnttab_setup_table {
++    /* IN parameters. */
++    domid_t  dom;
++    uint32_t nr_frames;
++    /* OUT parameters. */
++    int16_t  status;              /* GNTST_* */
++    unsigned long *frame_list;
++} gnttab_setup_table_t;
++
++/*
++ * GNTTABOP_dump_table: Dump the contents of the grant table to the
++ * xen console. Debugging use only.
++ */
++#define GNTTABOP_dump_table           3
++typedef struct gnttab_dump_table {
++    /* IN parameters. */
++    domid_t dom;
++    /* OUT parameters. */
++    int16_t status;               /* GNTST_* */
++} gnttab_dump_table_t;
++
++/*
++ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
++ * foreign domain has previously registered its interest in the transfer via
++ * <domid, ref>.
++ * 
++ * Note that, even if the transfer fails, the specified page no longer belongs
++ * to the calling domain *unless* the error is GNTST_bad_page.
++ */
++#define GNTTABOP_transfer                4
++typedef struct {
++    /* IN parameters. */
++    unsigned long mfn;
++    domid_t       domid;
++    grant_ref_t   ref;
++    /* OUT parameters. */
++    int16_t       status;
++} gnttab_transfer_t;
++
++/*
++ * Bitfield values for update_pin_status.flags.
++ */
++ /* Map the grant entry for access by I/O devices. */
++#define _GNTMAP_device_map      (0)
++#define GNTMAP_device_map       (1<<_GNTMAP_device_map)
++ /* Map the grant entry for access by host CPUs. */
++#define _GNTMAP_host_map        (1)
++#define GNTMAP_host_map         (1<<_GNTMAP_host_map)
++ /* Accesses to the granted frame will be restricted to read-only access. */
++#define _GNTMAP_readonly        (2)
++#define GNTMAP_readonly         (1<<_GNTMAP_readonly)
++ /*
++  * GNTMAP_host_map subflag:
++  *  0 => The host mapping is usable only by the guest OS.
++  *  1 => The host mapping is usable by guest OS + current application.
++  */
++#define _GNTMAP_application_map (3)
++#define GNTMAP_application_map  (1<<_GNTMAP_application_map)
++
++ /*
++  * GNTMAP_contains_pte subflag:
++  *  0 => This map request contains a host virtual address.
++  *  1 => This map request contains the machine addess of the PTE to update.
++  */ 
++#define _GNTMAP_contains_pte    (4)
++#define GNTMAP_contains_pte     (1<<_GNTMAP_contains_pte)
++
++/*
++ * Values for error status returns. All errors are -ve.
++ */
++#define GNTST_okay             (0)  /* Normal return.                        */
++#define GNTST_general_error    (-1) /* General undefined error.              */
++#define GNTST_bad_domain       (-2) /* Unrecognsed domain id.                */
++#define GNTST_bad_gntref       (-3) /* Unrecognised or inappropriate gntref. */
++#define GNTST_bad_handle       (-4) /* Unrecognised or inappropriate handle. */
++#define GNTST_bad_virt_addr    (-5) /* Inappropriate virtual address to map. */
++#define GNTST_bad_dev_addr     (-6) /* Inappropriate device address to unmap.*/
++#define GNTST_no_device_space  (-7) /* Out of space in I/O MMU.              */
++#define GNTST_permission_denied (-8) /* Not enough privilege for operation.  */
++#define GNTST_bad_page         (-9) /* Specified page was invalid for op.    */
++
++#define GNTTABOP_error_msgs {                   \
++    "okay",                                     \
++    "undefined error",                          \
++    "unrecognised domain id",                   \
++    "invalid grant reference",                  \
++    "invalid mapping handle",                   \
++    "invalid virtual address",                  \
++    "invalid device address",                   \
++    "no spare translation slot in the I/O MMU", \
++    "permission denied",                        \
++    "bad page"                                  \
++}
++
++#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/hvm_info_table.h linux-2.6.12-xen/include/asm-xen/xen-public/hvm/hvm_info_table.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/hvm_info_table.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/hvm/hvm_info_table.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,24 @@
++/******************************************************************************
++ * hvm/hvm_info_table.h
++ * 
++ * HVM parameter and information table, written into guest memory map.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
++#define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
++
++#define HVM_INFO_PFN         0x09F
++#define HVM_INFO_OFFSET      0x800
++#define HVM_INFO_PADDR       ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET)
++
++struct hvm_info_table {
++    char        signature[8]; /* "HVM INFO" */
++    uint32_t    length;
++    uint8_t     checksum;
++    uint8_t     acpi_enabled;
++    uint8_t     apic_enabled;
++    uint8_t     pad[1];
++    uint32_t    nr_vcpus;
++};
++
++#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/ioreq.h linux-2.6.12-xen/include/asm-xen/xen-public/hvm/ioreq.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/ioreq.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/hvm/ioreq.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,90 @@
++/*
++ * ioreq.h: I/O request definitions for device models
++ * Copyright (c) 2004, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
++ * Place - Suite 330, Boston, MA 02111-1307 USA.
++ *
++ */
++
++#ifndef _IOREQ_H_
++#define _IOREQ_H_
++
++#define IOREQ_READ      1
++#define IOREQ_WRITE     0
++
++#define STATE_INVALID           0
++#define STATE_IOREQ_READY       1
++#define STATE_IOREQ_INPROCESS   2
++#define STATE_IORESP_READY      3
++#define STATE_IORESP_HOOK       4
++
++#define IOREQ_TYPE_PIO          0 /* pio */
++#define IOREQ_TYPE_COPY         1 /* mmio ops */
++#define IOREQ_TYPE_AND          2
++#define IOREQ_TYPE_OR           3
++#define IOREQ_TYPE_XOR          4
++
++/*
++ * VMExit dispatcher should cooperate with instruction decoder to
++ * prepare this structure and notify service OS and DM by sending
++ * virq
++ */
++typedef struct {
++    uint64_t addr;          /*  physical address            */
++    uint64_t size;          /*  size in bytes               */
++    uint64_t count;         /*  for rep prefixes            */
++    union {
++        uint64_t data;      /*  data                        */
++        void    *pdata;     /*  pointer to data             */
++    } u;
++    uint8_t state:4;
++    uint8_t pdata_valid:1;  /* if 1, use pdata above        */
++    uint8_t dir:1;          /*  1=read, 0=write             */
++    uint8_t df:1;
++    uint8_t type;           /* I/O type                     */
++} ioreq_t;
++
++#define MAX_VECTOR      256
++#define BITS_PER_BYTE   8
++#define INTR_LEN        (MAX_VECTOR/(BITS_PER_BYTE * sizeof(uint64_t)))
++#define INTR_LEN_32     (MAX_VECTOR/(BITS_PER_BYTE * sizeof(uint32_t)))
++
++typedef struct {
++    uint16_t    pic_elcr;
++    uint16_t    pic_irr;
++    uint16_t    pic_last_irr;
++    uint16_t    pic_clear_irr;
++    int         eport; /* Event channel port */
++} global_iodata_t;
++
++typedef struct {
++    ioreq_t     vp_ioreq;
++} vcpu_iodata_t;
++
++typedef struct {
++    global_iodata_t sp_global;
++    vcpu_iodata_t   vcpu_iodata[1];
++} shared_iopage_t;
++
++#endif /* _IOREQ_H_ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/vmx_assist.h linux-2.6.12-xen/include/asm-xen/xen-public/hvm/vmx_assist.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/vmx_assist.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/hvm/vmx_assist.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,97 @@
++/*
++ * vmx_assist.h: Context definitions for the VMXASSIST world switch.
++ *
++ * Leendert van Doorn, leendert at watson.ibm.com
++ * Copyright (c) 2005, International Business Machines Corporation.
++ */
++
++#ifndef _VMX_ASSIST_H_
++#define _VMX_ASSIST_H_
++
++#define VMXASSIST_BASE         0xD0000
++#define VMXASSIST_MAGIC        0x17101966
++#define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8)
++
++#define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12)
++#define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4)
++
++#ifndef __ASSEMBLY__
++
++union vmcs_arbytes {
++    struct arbyte_fields {
++        unsigned int seg_type : 4,
++            s         : 1,
++            dpl       : 2,
++            p         : 1, 
++            reserved0 : 4,
++            avl       : 1,
++            reserved1 : 1,     
++            default_ops_size: 1,
++            g         : 1,
++            null_bit  : 1, 
++            reserved2 : 15;
++    } fields;
++    unsigned int bytes;
++};
++
++/*
++ * World switch state
++ */
++typedef struct vmx_assist_context {
++    uint32_t  eip;        /* execution pointer */
++    uint32_t  esp;        /* stack pointer */
++    uint32_t  eflags;     /* flags register */
++    uint32_t  cr0;
++    uint32_t  cr3;        /* page table directory */
++    uint32_t  cr4;
++    uint32_t  idtr_limit; /* idt */
++    uint32_t  idtr_base;
++    uint32_t  gdtr_limit; /* gdt */
++    uint32_t  gdtr_base;
++    uint32_t  cs_sel;     /* cs selector */
++    uint32_t  cs_limit;
++    uint32_t  cs_base;
++    union vmcs_arbytes cs_arbytes;
++    uint32_t  ds_sel;     /* ds selector */
++    uint32_t  ds_limit;
++    uint32_t  ds_base;
++    union vmcs_arbytes ds_arbytes;
++    uint32_t  es_sel;     /* es selector */
++    uint32_t  es_limit;
++    uint32_t  es_base;
++    union vmcs_arbytes es_arbytes;
++    uint32_t  ss_sel;     /* ss selector */
++    uint32_t  ss_limit;
++    uint32_t  ss_base;
++    union vmcs_arbytes ss_arbytes;
++    uint32_t  fs_sel;     /* fs selector */
++    uint32_t  fs_limit;
++    uint32_t  fs_base;
++    union vmcs_arbytes fs_arbytes;
++    uint32_t  gs_sel;     /* gs selector */
++    uint32_t  gs_limit;
++    uint32_t  gs_base;
++    union vmcs_arbytes gs_arbytes;
++    uint32_t  tr_sel;     /* task selector */
++    uint32_t  tr_limit;
++    uint32_t  tr_base;
++    union vmcs_arbytes tr_arbytes;
++    uint32_t  ldtr_sel;   /* ldtr selector */
++    uint32_t  ldtr_limit;
++    uint32_t  ldtr_base;
++    union vmcs_arbytes ldtr_arbytes;
++} vmx_assist_context_t;
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _VMX_ASSIST_H_ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/blkif.h linux-2.6.12-xen/include/asm-xen/xen-public/io/blkif.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/io/blkif.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/io/blkif.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,85 @@
++/******************************************************************************
++ * blkif.h
++ * 
++ * Unified block-device I/O interface for Xen guest OSes.
++ * 
++ * Copyright (c) 2003-2004, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_BLKIF_H__
++#define __XEN_PUBLIC_IO_BLKIF_H__
++
++#include "ring.h"
++#include "../grant_table.h"
++
++/*
++ * Front->back notifications: When enqueuing a new request, sending a
++ * notification can be made conditional on req_event (i.e., the generic
++ * hold-off mechanism provided by the ring macros). Backends must set
++ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
++ * 
++ * Back->front notifications: When enqueuing a new response, sending a
++ * notification can be made conditional on rsp_event (i.e., the generic
++ * hold-off mechanism provided by the ring macros). Frontends must set
++ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
++ */
++
++#ifndef blkif_vdev_t
++#define blkif_vdev_t   uint16_t
++#endif
++#define blkif_sector_t uint64_t
++
++#define BLKIF_OP_READ      0
++#define BLKIF_OP_WRITE     1
++
++/*
++ * Maximum scatter/gather segments per request.
++ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
++ * NB. This could be 12 if the ring indexes weren't stored in the same page.
++ */
++#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
++
++typedef struct blkif_request {
++    uint8_t        operation;    /* BLKIF_OP_???                         */
++    uint8_t        nr_segments;  /* number of segments                   */
++    blkif_vdev_t   handle;       /* only for read/write requests         */
++    uint64_t       id;           /* private guest value, echoed in resp  */
++    blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
++    struct blkif_request_segment {
++        grant_ref_t gref;        /* reference to I/O buffer frame        */
++        /* @first_sect: first sector in frame to transfer (inclusive).   */
++        /* @last_sect: last sector in frame to transfer (inclusive).     */
++        uint8_t     first_sect, last_sect;
++    } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++} blkif_request_t;
++
++typedef struct blkif_response {
++    uint64_t        id;              /* copied from request */
++    uint8_t         operation;       /* copied from request */
++    int16_t         status;          /* BLKIF_RSP_???       */
++} blkif_response_t;
++
++#define BLKIF_RSP_ERROR  -1 /* non-specific 'error' */
++#define BLKIF_RSP_OKAY    0 /* non-specific 'okay'  */
++
++/*
++ * Generate blkif ring structures and types.
++ */
++
++DEFINE_RING_TYPES(blkif, blkif_request_t, blkif_response_t);
++
++#define VDISK_CDROM        0x1
++#define VDISK_REMOVABLE    0x2
++#define VDISK_READONLY     0x4
++
++#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/console.h linux-2.6.12-xen/include/asm-xen/xen-public/io/console.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/io/console.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/io/console.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,33 @@
++/******************************************************************************
++ * console.h
++ * 
++ * Console I/O interface for Xen guest OSes.
++ * 
++ * Copyright (c) 2005, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
++#define __XEN_PUBLIC_IO_CONSOLE_H__
++
++typedef uint32_t XENCONS_RING_IDX;
++
++#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
++
++struct xencons_interface {
++    char in[1024];
++    char out[2048];
++    XENCONS_RING_IDX in_cons, in_prod;
++    XENCONS_RING_IDX out_cons, out_prod;
++};
++
++#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/netif.h linux-2.6.12-xen/include/asm-xen/xen-public/io/netif.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/io/netif.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/io/netif.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,76 @@
++/******************************************************************************
++ * netif.h
++ * 
++ * Unified network-device I/O interface for Xen guest OSes.
++ * 
++ * Copyright (c) 2003-2004, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_NETIF_H__
++#define __XEN_PUBLIC_IO_NETIF_H__
++
++#include "ring.h"
++#include "../grant_table.h"
++
++/*
++ * Note that there is *never* any need to notify the backend when enqueuing
++ * receive requests (netif_rx_request_t). Notifications after enqueuing any
++ * other type of message should be conditional on the appropriate req_event
++ * or rsp_event field in the shared ring.
++ */
++
++/* Protocol checksum field is blank in the packet (hardware offload)? */
++#define _NETTXF_csum_blank (0)
++#define  NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
++
++typedef struct netif_tx_request {
++    grant_ref_t gref;      /* Reference to buffer page */
++    uint16_t offset;       /* Offset within buffer page */
++    uint16_t flags;        /* NETTXF_* */
++    uint16_t id;           /* Echoed in response message. */
++    uint16_t size;         /* Packet size in bytes.       */
++} netif_tx_request_t;
++
++typedef struct netif_tx_response {
++    uint16_t id;
++    int16_t  status;       /* NETIF_RSP_* */
++} netif_tx_response_t;
++
++typedef struct {
++    uint16_t    id;        /* Echoed in response message.        */
++    grant_ref_t gref;      /* Reference to incoming granted frame */
++} netif_rx_request_t;
++
++/* Protocol checksum already validated (e.g., performed by hardware)? */
++#define _NETRXF_csum_valid (0)
++#define  NETRXF_csum_valid (1U<<_NETRXF_csum_valid)
++
++typedef struct {
++    uint16_t id;
++    uint16_t offset;       /* Offset in page of start of received packet  */
++    uint16_t flags;        /* NETRXF_* */
++    int16_t  status;       /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
++} netif_rx_response_t;
++
++/*
++ * Generate netif ring structures and types.
++ */
++
++DEFINE_RING_TYPES(netif_tx, netif_tx_request_t, netif_tx_response_t);
++DEFINE_RING_TYPES(netif_rx, netif_rx_request_t, netif_rx_response_t);
++
++#define NETIF_RSP_DROPPED         -2
++#define NETIF_RSP_ERROR           -1
++#define NETIF_RSP_OKAY             0
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/ring.h linux-2.6.12-xen/include/asm-xen/xen-public/io/ring.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/io/ring.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/io/ring.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,270 @@
++/******************************************************************************
++ * ring.h
++ * 
++ * Shared producer-consumer ring macros.
++ *
++ * Tim Deegan and Andrew Warfield November 2004.
++ */
++
++#ifndef __XEN_PUBLIC_IO_RING_H__
++#define __XEN_PUBLIC_IO_RING_H__
++
++typedef unsigned int RING_IDX;
++
++/* Round a 32-bit unsigned constant down to the nearest power of two. */
++#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2                  : ((_x) & 0x1))
++#define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
++#define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
++#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
++#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
++
++/*
++ * Calculate size of a shared ring, given the total available space for the
++ * ring and indexes (_sz), and the name tag of the request/response structure.
++ * A ring contains as many entries as will fit, rounded down to the nearest 
++ * power of two (so we can mask with (size-1) to loop around).
++ */
++#define __RING_SIZE(_s, _sz) \
++    (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
++
++/*
++ * Macros to make the correct C datatypes for a new kind of ring.
++ * 
++ * To make a new ring datatype, you need to have two message structures,
++ * let's say request_t, and response_t already defined.
++ *
++ * In a header where you want the ring datatype declared, you then do:
++ *
++ *     DEFINE_RING_TYPES(mytag, request_t, response_t);
++ *
++ * These expand out to give you a set of types, as you can see below.
++ * The most important of these are:
++ *  
++ *     mytag_sring_t      - The shared ring.
++ *     mytag_front_ring_t - The 'front' half of the ring.
++ *     mytag_back_ring_t  - The 'back' half of the ring.
++ *
++ * To initialize a ring in your code you need to know the location and size
++ * of the shared memory area (PAGE_SIZE, for instance). To initialise
++ * the front half:
++ *
++ *     mytag_front_ring_t front_ring;
++ *     SHARED_RING_INIT((mytag_sring_t *)shared_page);
++ *     FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
++ *
++ * Initializing the back follows similarly (note that only the front
++ * initializes the shared ring):
++ *
++ *     mytag_back_ring_t back_ring;
++ *     BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
++ */
++         
++#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)                     \
++                                                                        \
++/* Shared ring entry */                                                 \
++union __name##_sring_entry {                                            \
++    __req_t req;                                                        \
++    __rsp_t rsp;                                                        \
++};                                                                      \
++                                                                        \
++/* Shared ring page */                                                  \
++struct __name##_sring {                                                 \
++    RING_IDX req_prod, req_event;                                       \
++    RING_IDX rsp_prod, rsp_event;                                       \
++    uint8_t  pad[48];                                                   \
++    union __name##_sring_entry ring[1]; /* variable-length */           \
++};                                                                      \
++                                                                        \
++/* "Front" end's private variables */                                   \
++struct __name##_front_ring {                                            \
++    RING_IDX req_prod_pvt;                                              \
++    RING_IDX rsp_cons;                                                  \
++    unsigned int nr_ents;                                               \
++    struct __name##_sring *sring;                                       \
++};                                                                      \
++                                                                        \
++/* "Back" end's private variables */                                    \
++struct __name##_back_ring {                                             \
++    RING_IDX rsp_prod_pvt;                                              \
++    RING_IDX req_cons;                                                  \
++    unsigned int nr_ents;                                               \
++    struct __name##_sring *sring;                                       \
++};                                                                      \
++                                                                        \
++/* Syntactic sugar */                                                   \
++typedef struct __name##_sring __name##_sring_t;                         \
++typedef struct __name##_front_ring __name##_front_ring_t;               \
++typedef struct __name##_back_ring __name##_back_ring_t
++
++/*
++ * Macros for manipulating rings.  
++ * 
++ * FRONT_RING_whatever works on the "front end" of a ring: here 
++ * requests are pushed on to the ring and responses taken off it.
++ * 
++ * BACK_RING_whatever works on the "back end" of a ring: here 
++ * requests are taken off the ring and responses put on.
++ * 
++ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.  
++ * This is OK in 1-for-1 request-response situations where the 
++ * requestor (front end) never has more than RING_SIZE()-1
++ * outstanding requests.
++ */
++
++/* Initialising empty rings */
++#define SHARED_RING_INIT(_s) do {                                       \
++    (_s)->req_prod  = (_s)->rsp_prod  = 0;                              \
++    (_s)->req_event = (_s)->rsp_event = 1;                              \
++    memset((_s)->pad, 0, sizeof((_s)->pad));                            \
++} while(0)
++
++#define FRONT_RING_INIT(_r, _s, __size) do {                            \
++    (_r)->req_prod_pvt = 0;                                             \
++    (_r)->rsp_cons = 0;                                                 \
++    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
++    (_r)->sring = (_s);                                                 \
++} while (0)
++
++#define BACK_RING_INIT(_r, _s, __size) do {                             \
++    (_r)->rsp_prod_pvt = 0;                                             \
++    (_r)->req_cons = 0;                                                 \
++    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
++    (_r)->sring = (_s);                                                 \
++} while (0)
++
++/* Initialize to existing shared indexes -- for recovery */
++#define FRONT_RING_ATTACH(_r, _s, __size) do {                          \
++    (_r)->sring = (_s);                                                 \
++    (_r)->req_prod_pvt = (_s)->req_prod;                                \
++    (_r)->rsp_cons = (_s)->rsp_prod;                                    \
++    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
++} while (0)
++
++#define BACK_RING_ATTACH(_r, _s, __size) do {                           \
++    (_r)->sring = (_s);                                                 \
++    (_r)->rsp_prod_pvt = (_s)->rsp_prod;                                \
++    (_r)->req_cons = (_s)->req_prod;                                    \
++    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
++} while (0)
++
++/* How big is this ring? */
++#define RING_SIZE(_r)                                                   \
++    ((_r)->nr_ents)
++
++/* Test if there is an empty slot available on the front ring. 
++ * (This is only meaningful from the front. )
++ */
++#define RING_FULL(_r)                                                   \
++    (((_r)->req_prod_pvt - (_r)->rsp_cons) == RING_SIZE(_r))
++
++/* Test if there are outstanding messages to be processed on a ring. */
++#define RING_HAS_UNCONSUMED_RESPONSES(_r)                               \
++   ( (_r)->rsp_cons != (_r)->sring->rsp_prod )
++   
++#define RING_HAS_UNCONSUMED_REQUESTS(_r)                                \
++   ( ((_r)->req_cons != (_r)->sring->req_prod ) &&                      \
++     (((_r)->req_cons - (_r)->rsp_prod_pvt) !=                          \
++      RING_SIZE(_r)) )
++      
++/* Direct access to individual ring elements, by index. */
++#define RING_GET_REQUEST(_r, _idx)                                      \
++ (&((_r)->sring->ring[                                                  \
++     ((_idx) & (RING_SIZE(_r) - 1))                                     \
++     ].req))
++
++#define RING_GET_RESPONSE(_r, _idx)                                     \
++ (&((_r)->sring->ring[                                                  \
++     ((_idx) & (RING_SIZE(_r) - 1))                                     \
++     ].rsp))   
++    
++/* Loop termination condition: Would the specified index overflow the ring? */
++#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)                           \
++    (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
++
++#define RING_PUSH_REQUESTS(_r) do {                                     \
++    wmb(); /* back sees requests /before/ updated producer index */     \
++    (_r)->sring->req_prod = (_r)->req_prod_pvt;                         \
++} while (0)
++
++#define RING_PUSH_RESPONSES(_r) do {                                    \
++    wmb(); /* front sees responses /before/ updated producer index */   \
++    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;                         \
++} while (0)
++
++/*
++ * Notification hold-off (req_event and rsp_event):
++ * 
++ * When queueing requests or responses on a shared ring, it may not always be
++ * necessary to notify the remote end. For example, if requests are in flight
++ * in a backend, the front may be able to queue further requests without
++ * notifying the back (if the back checks for new requests when it queues
++ * responses).
++ * 
++ * When enqueuing requests or responses:
++ * 
++ *  Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
++ *  is a boolean return value. True indicates that the receiver requires an
++ *  asynchronous notification.
++ * 
++ * After dequeuing requests or responses (before sleeping the connection):
++ * 
++ *  Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
++ *  The second argument is a boolean return value. True indicates that there
++ *  are pending messages on the ring (i.e., the connection should not be put
++ *  to sleep).
++ *  
++ *  These macros will set the req_event/rsp_event field to trigger a
++ *  notification on the very next message that is enqueued. If you want to
++ *  create batches of work (i.e., only receive a notification after several
++ *  messages have been enqueued) then you will need to create a customised
++ *  version of the FINAL_CHECK macro in your own code, which sets the event
++ *  field appropriately.
++ */
++
++#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {           \
++    RING_IDX __old = (_r)->sring->req_prod;                             \
++    RING_IDX __new = (_r)->req_prod_pvt;                                \
++    wmb(); /* back sees requests /before/ updated producer index */     \
++    (_r)->sring->req_prod = __new;                                      \
++    mb(); /* back sees new requests /before/ we check req_event */      \
++    (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <           \
++                 (RING_IDX)(__new - __old));                            \
++} while (0)
++
++#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {          \
++    RING_IDX __old = (_r)->sring->rsp_prod;                             \
++    RING_IDX __new = (_r)->rsp_prod_pvt;                                \
++    wmb(); /* front sees responses /before/ updated producer index */   \
++    (_r)->sring->rsp_prod = __new;                                      \
++    mb(); /* front sees new responses /before/ we check rsp_event */    \
++    (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <           \
++                 (RING_IDX)(__new - __old));                            \
++} while (0)
++
++#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {             \
++    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
++    if (_work_to_do) break;                                             \
++    (_r)->sring->req_event = (_r)->req_cons + 1;                        \
++    mb();                                                               \
++    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
++} while (0)
++
++#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {            \
++    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
++    if (_work_to_do) break;                                             \
++    (_r)->sring->rsp_event = (_r)->rsp_cons + 1;                        \
++    mb();                                                               \
++    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
++} while (0)
++
++#endif /* __XEN_PUBLIC_IO_RING_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/tpmif.h linux-2.6.12-xen/include/asm-xen/xen-public/io/tpmif.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/io/tpmif.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/io/tpmif.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,56 @@
++/******************************************************************************
++ * tpmif.h
++ *
++ * TPM I/O interface for Xen guest OSes.
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb at us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from tools/libxc/xen/io/netif.h
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_TPMIF_H__
++#define __XEN_PUBLIC_IO_TPMIF_H__
++
++#include "../grant_table.h"
++
++typedef struct {
++    unsigned long addr;   /* Machine address of packet.   */
++    grant_ref_t ref;      /* grant table access reference */
++    uint16_t id;          /* Echoed in response message.  */
++    uint16_t size;        /* Packet size in bytes.        */
++} tpmif_tx_request_t;
++
++/*
++ * The TPMIF_TX_RING_SIZE defines the number of pages the
++ * front-end and backend can exchange (= size of array).
++ */
++typedef uint32_t TPMIF_RING_IDX;
++
++#define TPMIF_TX_RING_SIZE 10
++
++/* This structure must fit in a memory page. */
++
++typedef struct {
++    tpmif_tx_request_t req;
++} tpmif_ring_t;
++
++typedef struct {
++    tpmif_ring_t ring[TPMIF_TX_RING_SIZE];
++} tpmif_tx_interface_t;
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/xenbus.h linux-2.6.12-xen/include/asm-xen/xen-public/io/xenbus.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/io/xenbus.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/io/xenbus.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,44 @@
++/*****************************************************************************
++ * xenbus.h
++ *
++ * Xenbus protocol details.
++ *
++ * Copyright (C) 2005 XenSource Ltd.
++ */
++
++#ifndef _XEN_XENBUS_H
++#define _XEN_XENBUS_H
++
++
++/* The state of either end of the Xenbus, i.e. the current communication
++   status of initialisation across the bus.  States here imply nothing about
++   the state of the connection between the driver and the kernel's device
++   layers.  */
++typedef enum
++{
++  XenbusStateUnknown      = 0,
++  XenbusStateInitialising = 1,
++  XenbusStateInitWait     = 2,  /* Finished early initialisation, but waiting
++                                   for information from the peer or hotplug
++				   scripts. */
++  XenbusStateInitialised  = 3,  /* Initialised and waiting for a connection
++				   from the peer. */
++  XenbusStateConnected    = 4,
++  XenbusStateClosing      = 5,  /* The device is being closed due to an error
++				   or an unplug event. */
++  XenbusStateClosed       = 6
++
++} XenbusState;
++
++
++#endif /* _XEN_XENBUS_H */
++
++/*
++ * Local variables:
++ *  c-file-style: "linux"
++ *  indent-tabs-mode: t
++ *  c-indent-level: 8
++ *  c-basic-offset: 8
++ *  tab-width: 8
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/xs_wire.h linux-2.6.12-xen/include/asm-xen/xen-public/io/xs_wire.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/io/xs_wire.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/io/xs_wire.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,97 @@
++/*
++ * Details of the "wire" protocol between Xen Store Daemon and client
++ * library or guest kernel.
++ * Copyright (C) 2005 Rusty Russell IBM Corporation
++ */
++
++#ifndef _XS_WIRE_H
++#define _XS_WIRE_H
++
++enum xsd_sockmsg_type
++{
++    XS_DEBUG,
++    XS_DIRECTORY,
++    XS_READ,
++    XS_GET_PERMS,
++    XS_WATCH,
++    XS_UNWATCH,
++    XS_TRANSACTION_START,
++    XS_TRANSACTION_END,
++    XS_INTRODUCE,
++    XS_RELEASE,
++    XS_GET_DOMAIN_PATH,
++    XS_WRITE,
++    XS_MKDIR,
++    XS_RM,
++    XS_SET_PERMS,
++    XS_WATCH_EVENT,
++    XS_ERROR,
++    XS_IS_DOMAIN_INTRODUCED
++};
++
++#define XS_WRITE_NONE "NONE"
++#define XS_WRITE_CREATE "CREATE"
++#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
++
++/* We hand errors as strings, for portability. */
++struct xsd_errors
++{
++    int errnum;
++    const char *errstring;
++};
++#define XSD_ERROR(x) { x, #x }
++static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
++    XSD_ERROR(EINVAL),
++    XSD_ERROR(EACCES),
++    XSD_ERROR(EEXIST),
++    XSD_ERROR(EISDIR),
++    XSD_ERROR(ENOENT),
++    XSD_ERROR(ENOMEM),
++    XSD_ERROR(ENOSPC),
++    XSD_ERROR(EIO),
++    XSD_ERROR(ENOTEMPTY),
++    XSD_ERROR(ENOSYS),
++    XSD_ERROR(EROFS),
++    XSD_ERROR(EBUSY),
++    XSD_ERROR(EAGAIN),
++    XSD_ERROR(EISCONN),
++};
++
++struct xsd_sockmsg
++{
++    uint32_t type;  /* XS_??? */
++    uint32_t req_id;/* Request identifier, echoed in daemon's response.  */
++    uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
++    uint32_t len;   /* Length of data following this. */
++
++    /* Generally followed by nul-terminated string(s). */
++};
++
++enum xs_watch_type
++{
++    XS_WATCH_PATH = 0,
++    XS_WATCH_TOKEN,
++};
++
++/* Inter-domain shared memory communications. */
++#define XENSTORE_RING_SIZE 1024
++typedef uint32_t XENSTORE_RING_IDX;
++#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
++struct xenstore_domain_interface {
++    char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
++    char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
++    XENSTORE_RING_IDX req_cons, req_prod;
++    XENSTORE_RING_IDX rsp_cons, rsp_prod;
++};
++
++#endif /* _XS_WIRE_H */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/memory.h linux-2.6.12-xen/include/asm-xen/xen-public/memory.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/memory.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/memory.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,127 @@
++/******************************************************************************
++ * memory.h
++ * 
++ * Memory reservation and information.
++ * 
++ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_MEMORY_H__
++#define __XEN_PUBLIC_MEMORY_H__
++
++/*
++ * Increase or decrease the specified domain's memory reservation. Returns a
++ * -ve errcode on failure, or the # extents successfully allocated or freed.
++ * arg == addr of struct xen_memory_reservation.
++ */
++#define XENMEM_increase_reservation 0
++#define XENMEM_decrease_reservation 1
++#define XENMEM_populate_physmap     6
++typedef struct xen_memory_reservation {
++
++    /*
++     * XENMEM_increase_reservation:
++     *   OUT: MFN bases of extents that were allocated
++     * XENMEM_decrease_reservation:
++     *   IN:  MFN bases of extents to free
++     * XENMEM_populate_physmap:
++     *   IN:  PFN bases of extents to populate with memory
++     *   OUT: MFN bases of extents that were allocated
++     *   (NB. This command also updates the mach_to_phys translation table)
++     */
++    unsigned long *extent_start;
++
++    /* Number of extents, and size/alignment of each (2^extent_order pages). */
++    unsigned long  nr_extents;
++    unsigned int   extent_order;
++
++    /*
++     * Mmaximum # bits addressable by the user of the allocated region (e.g., 
++     * I/O devices often have a 32-bit limitation even in 64-bit systems). If 
++     * zero then the user has no addressing restriction.
++     * This field is not used by XENMEM_decrease_reservation.
++     */
++    unsigned int   address_bits;
++
++    /*
++     * Domain whose reservation is being changed.
++     * Unprivileged domains can specify only DOMID_SELF.
++     */
++    domid_t        domid;
++
++} xen_memory_reservation_t;
++
++/*
++ * Returns the maximum machine frame number of mapped RAM in this system.
++ * This command always succeeds (it never returns an error code).
++ * arg == NULL.
++ */
++#define XENMEM_maximum_ram_page     2
++
++/*
++ * Returns the current or maximum memory reservation, in pages, of the
++ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
++ * arg == addr of domid_t.
++ */
++#define XENMEM_current_reservation  3
++#define XENMEM_maximum_reservation  4
++
++/*
++ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
++ * mapping table. Architectures which do not have a m2p table do not implement
++ * this command.
++ * arg == addr of xen_machphys_mfn_list_t.
++ */
++#define XENMEM_machphys_mfn_list    5
++typedef struct xen_machphys_mfn_list {
++    /*
++     * Size of the 'extent_start' array. Fewer entries will be filled if the
++     * machphys table is smaller than max_extents * 2MB.
++     */
++    unsigned int max_extents;
++    
++    /*
++     * Pointer to buffer to fill with list of extent starts. If there are
++     * any large discontiguities in the machine address space, 2MB gaps in
++     * the machphys table will be represented by an MFN base of zero.
++     */
++    unsigned long *extent_start;
++
++    /*
++     * Number of extents written to the above array. This will be smaller
++     * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
++     */
++    unsigned int nr_extents;
++} xen_machphys_mfn_list_t;
++
++/*
++ * Returns the base and size of the specified reserved 'RAM hole' in the
++ * specified guest's pseudophysical address space.
++ * arg == addr of xen_reserved_phys_area_t.
++ */
++#define XENMEM_reserved_phys_area   7
++typedef struct xen_reserved_phys_area {
++    /* Which request to report about? */
++    domid_t domid;
++
++    /*
++     * Which reserved area to report? Out-of-range request reports
++     * -ESRCH. Currently no architecture will have more than one reserved area.
++     */
++    unsigned int idx;
++
++    /* Base and size of the specified reserved area. */
++    unsigned long first_pfn, nr_pfns;
++} xen_reserved_phys_area_t;
++
++#endif /* __XEN_PUBLIC_MEMORY_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/nmi.h linux-2.6.12-xen/include/asm-xen/xen-public/nmi.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/nmi.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/nmi.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,54 @@
++/******************************************************************************
++ * nmi.h
++ * 
++ * NMI callback registration and reason codes.
++ * 
++ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_NMI_H__
++#define __XEN_PUBLIC_NMI_H__
++
++/*
++ * NMI reason codes:
++ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
++ */
++ /* I/O-check error reported via ISA port 0x61, bit 6. */
++#define _XEN_NMIREASON_io_error     0
++#define XEN_NMIREASON_io_error      (1UL << _XEN_NMIREASON_io_error)
++ /* Parity error reported via ISA port 0x61, bit 7. */
++#define _XEN_NMIREASON_parity_error 1
++#define XEN_NMIREASON_parity_error  (1UL << _XEN_NMIREASON_parity_error)
++ /* Unknown hardware-generated NMI. */
++#define _XEN_NMIREASON_unknown      2
++#define XEN_NMIREASON_unknown       (1UL << _XEN_NMIREASON_unknown)
++
++/*
++ * long nmi_op(unsigned int cmd, void *arg)
++ * NB. All ops return zero on success, else a negative error code.
++ */
++
++/*
++ * Register NMI callback for this (calling) VCPU. Currently this only makes
++ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
++ * arg == address of callback function.
++ */
++#define XENNMI_register_callback   0
++
++/*
++ * Deregister NMI callback for this (calling) VCPU.
++ * arg == NULL.
++ */
++#define XENNMI_unregister_callback 1
++
++#endif /* __XEN_PUBLIC_NMI_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/physdev.h linux-2.6.12-xen/include/asm-xen/xen-public/physdev.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/physdev.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/physdev.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,70 @@
++
++#ifndef __XEN_PUBLIC_PHYSDEV_H__
++#define __XEN_PUBLIC_PHYSDEV_H__
++
++/* Commands to HYPERVISOR_physdev_op() */
++#define PHYSDEVOP_IRQ_UNMASK_NOTIFY     4
++#define PHYSDEVOP_IRQ_STATUS_QUERY      5
++#define PHYSDEVOP_SET_IOPL              6
++#define PHYSDEVOP_SET_IOBITMAP          7
++#define PHYSDEVOP_APIC_READ             8
++#define PHYSDEVOP_APIC_WRITE            9
++#define PHYSDEVOP_ASSIGN_VECTOR         10
++
++typedef struct physdevop_irq_status_query {
++    /* IN */
++    uint32_t irq;
++    /* OUT */
++/* Need to call PHYSDEVOP_IRQ_UNMASK_NOTIFY when the IRQ has been serviced? */
++#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY (1<<0)
++    uint32_t flags;
++} physdevop_irq_status_query_t;
++
++typedef struct physdevop_set_iopl {
++    /* IN */
++    uint32_t iopl;
++} physdevop_set_iopl_t;
++
++typedef struct physdevop_set_iobitmap {
++    /* IN */
++    uint8_t *bitmap;
++    uint32_t nr_ports;
++} physdevop_set_iobitmap_t;
++
++typedef struct physdevop_apic {
++    /* IN */
++    uint32_t apic;
++    uint32_t offset;
++    /* IN or OUT */
++    uint32_t value;
++} physdevop_apic_t; 
++
++typedef struct physdevop_irq {
++    /* IN */
++    uint32_t irq;
++    /* OUT */
++    uint32_t vector;
++} physdevop_irq_t; 
++
++typedef struct physdev_op {
++    uint32_t cmd;
++    union {
++        physdevop_irq_status_query_t      irq_status_query;
++        physdevop_set_iopl_t              set_iopl;
++        physdevop_set_iobitmap_t          set_iobitmap;
++        physdevop_apic_t                  apic_op;
++        physdevop_irq_t                   irq_op;
++    } u;
++} physdev_op_t;
++
++#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/sched_ctl.h linux-2.6.12-xen/include/asm-xen/xen-public/sched_ctl.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/sched_ctl.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/sched_ctl.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,68 @@
++/******************************************************************************
++ * Generic scheduler control interface.
++ *
++ * Mark Williamson, (C) 2004 Intel Research Cambridge
++ */
++
++#ifndef __XEN_PUBLIC_SCHED_CTL_H__
++#define __XEN_PUBLIC_SCHED_CTL_H__
++
++/* Scheduler types. */
++#define SCHED_BVT      0
++#define SCHED_SEDF     4
++
++/* Set or get info? */
++#define SCHED_INFO_PUT 0
++#define SCHED_INFO_GET 1
++
++/*
++ * Generic scheduler control command - used to adjust system-wide scheduler
++ * parameters
++ */
++struct sched_ctl_cmd {
++    uint32_t sched_id;
++    uint32_t direction;
++    union {
++        struct bvt_ctl {
++            uint32_t ctx_allow;
++        } bvt;
++    } u;
++};
++
++struct sched_adjdom_cmd {
++    uint32_t sched_id;
++    uint32_t direction;
++    domid_t  domain;
++    union {
++        struct bvt_adjdom
++        {
++            uint32_t mcu_adv;      /* mcu advance: inverse of weight */
++            uint32_t warpback;     /* warp? */
++            int32_t  warpvalue;    /* warp value */
++            int64_t  warpl;        /* warp limit */
++            int64_t  warpu;        /* unwarp time requirement */
++        } bvt;
++        
++        struct sedf_adjdom
++        {
++            uint64_t period;
++            uint64_t slice;
++            uint64_t latency;
++            uint32_t extratime;
++            uint32_t weight;
++        } sedf;
++
++    } u;
++};
++
++#endif /* __XEN_PUBLIC_SCHED_CTL_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/sched.h linux-2.6.12-xen/include/asm-xen/xen-public/sched.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/sched.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/sched.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,60 @@
++/******************************************************************************
++ * sched.h
++ * 
++ * Scheduler state interactions
++ * 
++ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_SCHED_H__
++#define __XEN_PUBLIC_SCHED_H__
++
++/*
++ * Prototype for this hypercall is:
++ *  int sched_op(int cmd, unsigned long arg)
++ * @cmd == SCHEDOP_??? (scheduler operation).
++ * @arg == Operation-specific extra argument(s).
++ */
++
++/*
++ * Voluntarily yield the CPU.
++ * @arg == 0.
++ */
++#define SCHEDOP_yield       0
++
++/*
++ * Block execution of this VCPU until an event is received for processing.
++ * If called with event upcalls masked, this operation will atomically
++ * reenable event delivery and check for pending events before blocking the
++ * VCPU. This avoids a "wakeup waiting" race.
++ * @arg == 0.
++ */
++#define SCHEDOP_block       1
++
++/*
++ * Halt execution of this domain (all VCPUs) and notify the system controller.
++ * @arg == SHUTDOWN_??? (reason for shutdown).
++ */
++#define SCHEDOP_shutdown    2
++
++/*
++ * Reason codes for SCHEDOP_shutdown. These may be interpreted by controller
++ * software to determine the appropriate action. For the most part, Xen does
++ * not care about the shutdown code.
++ */
++#define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
++#define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
++#define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
++#define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
++
++#endif /* __XEN_PUBLIC_SCHED_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/trace.h linux-2.6.12-xen/include/asm-xen/xen-public/trace.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/trace.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/trace.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,90 @@
++/******************************************************************************
++ * include/public/trace.h
++ * 
++ * Mark Williamson, (C) 2004 Intel Research Cambridge
++ * Copyright (C) 2005 Bin Ren
++ */
++
++#ifndef __XEN_PUBLIC_TRACE_H__
++#define __XEN_PUBLIC_TRACE_H__
++
++/* Trace classes */
++#define TRC_CLS_SHIFT 16
++#define TRC_GEN     0x0001f000    /* General trace            */
++#define TRC_SCHED   0x0002f000    /* Xen Scheduler trace      */
++#define TRC_DOM0OP  0x0004f000    /* Xen DOM0 operation trace */
++#define TRC_VMX     0x0008f000    /* Xen VMX trace            */
++#define TRC_MEM     0x000af000    /* Xen memory trace         */
++#define TRC_ALL     0xfffff000
++
++/* Trace subclasses */
++#define TRC_SUBCLS_SHIFT 12
++/* trace subclasses for VMX */
++#define TRC_VMXEXIT  0x00081000   /* VMX exit trace            */
++#define TRC_VMXTIMER 0x00082000   /* VMX timer trace           */
++#define TRC_VMXINT   0x00084000   /* VMX interrupt trace       */
++#define TRC_VMXIO    0x00088000   /* VMX io emulation trace  */
++#define TRC_VMEXIT_HANDLER    0x00090000   /* VMX handler trace  */
++
++/* Trace events per class */
++
++#define TRC_SCHED_DOM_ADD       (TRC_SCHED +  1)
++#define TRC_SCHED_DOM_REM       (TRC_SCHED +  2)
++#define TRC_SCHED_SLEEP         (TRC_SCHED +  3)
++#define TRC_SCHED_WAKE          (TRC_SCHED +  4)
++#define TRC_SCHED_YIELD         (TRC_SCHED +  5)
++#define TRC_SCHED_BLOCK         (TRC_SCHED +  6)
++#define TRC_SCHED_SHUTDOWN      (TRC_SCHED +  7)
++#define TRC_SCHED_CTL           (TRC_SCHED +  8)
++#define TRC_SCHED_ADJDOM        (TRC_SCHED +  9)
++#define TRC_SCHED_SWITCH        (TRC_SCHED + 10)
++#define TRC_SCHED_S_TIMER_FN    (TRC_SCHED + 11)
++#define TRC_SCHED_T_TIMER_FN    (TRC_SCHED + 12)
++#define TRC_SCHED_DOM_TIMER_FN  (TRC_SCHED + 13)
++#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14)
++#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15)
++
++#define TRC_MEM_PAGE_GRANT_MAP      (TRC_MEM + 1)
++#define TRC_MEM_PAGE_GRANT_UNMAP    (TRC_MEM + 2)
++#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
++
++/* trace events per subclass */
++#define TRC_VMX_VMEXIT          (TRC_VMXEXIT + 1)
++#define TRC_VMX_VECTOR          (TRC_VMXEXIT + 2)
++
++#define TRC_VMX_TIMER_INTR      (TRC_VMXTIMER + 1)
++
++#define TRC_VMX_INT             (TRC_VMXINT + 1)
++
++#define TRC_VMEXIT              (TRC_VMEXIT_HANDLER + 1)
++#define TRC_VMENTRY             (TRC_VMEXIT_HANDLER + 2)
++
++
++/* This structure represents a single trace buffer record. */
++struct t_rec {
++    uint64_t cycles;          /* cycle counter timestamp */
++    uint32_t event;           /* event ID                */
++    unsigned long data[5];    /* event data items        */
++};
++
++/*
++ * This structure contains the metadata for a single trace buffer.  The head
++ * field, indexes into an array of struct t_rec's.
++ */
++struct t_buf {
++    uint32_t cons;      /* Next item to be consumed by control tools. */
++    uint32_t prod;      /* Next item to be produced by Xen.           */
++    /* 'nr_recs' records follow immediately after the meta-data header.    */
++};
++
++#endif /* __XEN_PUBLIC_TRACE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/vcpu.h linux-2.6.12-xen/include/asm-xen/xen-public/vcpu.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/vcpu.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/vcpu.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,64 @@
++/******************************************************************************
++ * vcpu.h
++ * 
++ * VCPU initialisation, query, and hotplug.
++ * 
++ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_VCPU_H__
++#define __XEN_PUBLIC_VCPU_H__
++
++/*
++ * Prototype for this hypercall is:
++ *  int vcpu_op(int cmd, int vcpuid, void *extra_args)
++ * @cmd        == VCPUOP_??? (VCPU operation).
++ * @vcpuid     == VCPU to operate on.
++ * @extra_args == Operation-specific extra arguments (NULL if none).
++ */
++
++/*
++ * Initialise a VCPU. Each VCPU can be initialised only once. A 
++ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
++ * 
++ * @extra_arg == pointer to vcpu_guest_context structure containing initial
++ *               state for the VCPU.
++ */
++#define VCPUOP_initialise           0
++
++/*
++ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
++ * if the VCPU has not been initialised (VCPUOP_initialise).
++ */
++#define VCPUOP_up                   1
++
++/*
++ * Bring down a VCPU (i.e., make it non-runnable).
++ * There are a few caveats that callers should observe:
++ *  1. This operation may return, and VCPU_is_up may return false, before the
++ *     VCPU stops running (i.e., the command is asynchronous). It is a good
++ *     idea to ensure that the VCPU has entered a non-critical loop before
++ *     bringing it down. Alternatively, this operation is guaranteed
++ *     synchronous if invoked by the VCPU itself.
++ *  2. After a VCPU is initialised, there is currently no way to drop all its
++ *     references to domain memory. Even a VCPU that is down still holds
++ *     memory references via its pagetable base pointer and GDT. It is good
++ *     practise to move a VCPU onto an 'idle' or default page table, LDT and
++ *     GDT before bringing it down.
++ */
++#define VCPUOP_down                 2
++
++/* Returns 1 if the given VCPU is up. */
++#define VCPUOP_is_up                3
++
++#endif /* __XEN_PUBLIC_VCPU_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/version.h linux-2.6.12-xen/include/asm-xen/xen-public/version.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/version.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/version.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,63 @@
++/******************************************************************************
++ * version.h
++ * 
++ * Xen version, type, and compile information.
++ * 
++ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh at gmail.com>
++ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_VERSION_H__
++#define __XEN_PUBLIC_VERSION_H__
++
++/* NB. All ops return zero on success, except XENVER_version. */
++
++/* arg == NULL; returns major:minor (16:16). */
++#define XENVER_version      0
++
++/* arg == xen_extraversion_t. */
++#define XENVER_extraversion 1
++typedef char xen_extraversion_t[16];
++
++/* arg == xen_compile_info_t. */
++#define XENVER_compile_info 2
++typedef struct xen_compile_info {
++    char compiler[64];
++    char compile_by[16];
++    char compile_domain[32];
++    char compile_date[32];
++} xen_compile_info_t;
++
++#define XENVER_capabilities 3
++typedef char xen_capabilities_info_t[1024];
++
++#define XENVER_changeset 4
++typedef char xen_changeset_info_t[64];
++
++#define XENVER_platform_parameters 5
++typedef struct xen_platform_parameters {
++    unsigned long virt_start;
++} xen_platform_parameters_t;
++
++#define XENVER_get_features 6
++typedef struct xen_feature_info {
++    unsigned int submap_idx;    /* IN: which 32-bit submap to return */
++    uint32_t     submap;        /* OUT: 32-bit submap */
++} xen_feature_info_t;
++
++#define XENFEAT_writable_page_tables       0
++#define XENFEAT_writable_descriptor_tables 1
++
++#define XENFEAT_NR_SUBMAPS 1
++
++#endif /* __XEN_PUBLIC_VERSION_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/xen.h linux-2.6.12-xen/include/asm-xen/xen-public/xen.h
+--- pristine-linux-2.6.12/include/asm-xen/xen-public/xen.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/asm-xen/xen-public/xen.h	2006-03-05 23:36:51.000000000 +0100
+@@ -0,0 +1,447 @@
++/******************************************************************************
++ * xen.h
++ * 
++ * Guest OS interface to Xen.
++ * 
++ * Copyright (c) 2004, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_XEN_H__
++#define __XEN_PUBLIC_XEN_H__
++
++#if defined(__i386__)
++#include "arch-x86_32.h"
++#elif defined(__x86_64__)
++#include "arch-x86_64.h"
++#elif defined(__ia64__)
++#include "arch-ia64.h"
++#else
++#error "Unsupported architecture"
++#endif
++
++/*
++ * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
++ */
++
++/*
++ * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
++ *         EAX = return value
++ *         (argument registers may be clobbered on return)
++ * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6. 
++ *         RAX = return value
++ *         (argument registers not clobbered on return; RCX, R11 are)
++ */
++#define __HYPERVISOR_set_trap_table        0
++#define __HYPERVISOR_mmu_update            1
++#define __HYPERVISOR_set_gdt               2
++#define __HYPERVISOR_stack_switch          3
++#define __HYPERVISOR_set_callbacks         4
++#define __HYPERVISOR_fpu_taskswitch        5
++#define __HYPERVISOR_sched_op              6
++#define __HYPERVISOR_dom0_op               7
++#define __HYPERVISOR_set_debugreg          8
++#define __HYPERVISOR_get_debugreg          9
++#define __HYPERVISOR_update_descriptor    10
++#define __HYPERVISOR_memory_op            12
++#define __HYPERVISOR_multicall            13
++#define __HYPERVISOR_update_va_mapping    14
++#define __HYPERVISOR_set_timer_op         15
++#define __HYPERVISOR_event_channel_op     16
++#define __HYPERVISOR_xen_version          17
++#define __HYPERVISOR_console_io           18
++#define __HYPERVISOR_physdev_op           19
++#define __HYPERVISOR_grant_table_op       20
++#define __HYPERVISOR_vm_assist            21
++#define __HYPERVISOR_update_va_mapping_otherdomain 22
++#define __HYPERVISOR_iret                 23 /* x86 only */
++#define __HYPERVISOR_switch_vm86          23 /* x86/32 only (obsolete name) */
++#define __HYPERVISOR_switch_to_user       23 /* x86/64 only (obsolete name) */
++#define __HYPERVISOR_vcpu_op              24
++#define __HYPERVISOR_set_segment_base     25 /* x86/64 only */
++#define __HYPERVISOR_mmuext_op            26
++#define __HYPERVISOR_acm_op               27
++#define __HYPERVISOR_nmi_op               28
++
++/* 
++ * VIRTUAL INTERRUPTS
++ * 
++ * Virtual interrupts that a guest OS may receive from Xen.
++ */
++#define VIRQ_TIMER      0  /* Timebase update, and/or requested timeout.  */
++#define VIRQ_DEBUG      1  /* Request guest to dump debug info.           */
++#define VIRQ_CONSOLE    2  /* (DOM0) Bytes received on emergency console. */
++#define VIRQ_DOM_EXC    3  /* (DOM0) Exceptional event for some domain.   */
++#define VIRQ_DEBUGGER   6  /* (DOM0) A domain has paused for debugging.   */
++#define NR_VIRQS        8
++
++/*
++ * MMU-UPDATE REQUESTS
++ * 
++ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
++ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
++ * Where the FD has some effect, it is described below.
++ * ptr[1:0] specifies the appropriate MMU_* command.
++ * 
++ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
++ * Updates an entry in a page table. If updating an L1 table, and the new
++ * table entry is valid/present, the mapped frame must belong to the FD, if
++ * an FD has been specified. If attempting to map an I/O page then the
++ * caller assumes the privilege of the FD.
++ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
++ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
++ * ptr[:2]  -- Machine address of the page-table entry to modify.
++ * val      -- Value to write.
++ * 
++ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
++ * Updates an entry in the machine->pseudo-physical mapping table.
++ * ptr[:2]  -- Machine address within the frame whose mapping to modify.
++ *             The frame must belong to the FD, if one is specified.
++ * val      -- Value to write into the mapping entry.
++ */
++#define MMU_NORMAL_PT_UPDATE     0 /* checked '*ptr = val'. ptr is MA.       */
++#define MMU_MACHPHYS_UPDATE      1 /* ptr = MA of frame to modify entry for  */
++
++/*
++ * MMU EXTENDED OPERATIONS
++ * 
++ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
++ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
++ * Where the FD has some effect, it is described below.
++ * 
++ * cmd: MMUEXT_(UN)PIN_*_TABLE
++ * mfn: Machine frame number to be (un)pinned as a p.t. page.
++ *      The frame must belong to the FD, if one is specified.
++ * 
++ * cmd: MMUEXT_NEW_BASEPTR
++ * mfn: Machine frame number of new page-table base to install in MMU.
++ * 
++ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
++ * mfn: Machine frame number of new page-table base to install in MMU
++ *      when in user space.
++ * 
++ * cmd: MMUEXT_TLB_FLUSH_LOCAL
++ * No additional arguments. Flushes local TLB.
++ * 
++ * cmd: MMUEXT_INVLPG_LOCAL
++ * linear_addr: Linear address to be flushed from the local TLB.
++ * 
++ * cmd: MMUEXT_TLB_FLUSH_MULTI
++ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
++ * 
++ * cmd: MMUEXT_INVLPG_MULTI
++ * linear_addr: Linear address to be flushed.
++ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
++ * 
++ * cmd: MMUEXT_TLB_FLUSH_ALL
++ * No additional arguments. Flushes all VCPUs' TLBs.
++ * 
++ * cmd: MMUEXT_INVLPG_ALL
++ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
++ * 
++ * cmd: MMUEXT_FLUSH_CACHE
++ * No additional arguments. Writes back and flushes cache contents.
++ * 
++ * cmd: MMUEXT_SET_LDT
++ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
++ * nr_ents: Number of entries in LDT.
++ */
++#define MMUEXT_PIN_L1_TABLE      0
++#define MMUEXT_PIN_L2_TABLE      1
++#define MMUEXT_PIN_L3_TABLE      2
++#define MMUEXT_PIN_L4_TABLE      3
++#define MMUEXT_UNPIN_TABLE       4
++#define MMUEXT_NEW_BASEPTR       5
++#define MMUEXT_TLB_FLUSH_LOCAL   6
++#define MMUEXT_INVLPG_LOCAL      7
++#define MMUEXT_TLB_FLUSH_MULTI   8
++#define MMUEXT_INVLPG_MULTI      9
++#define MMUEXT_TLB_FLUSH_ALL    10
++#define MMUEXT_INVLPG_ALL       11
++#define MMUEXT_FLUSH_CACHE      12
++#define MMUEXT_SET_LDT          13
++#define MMUEXT_NEW_USER_BASEPTR 15
++
++#ifndef __ASSEMBLY__
++struct mmuext_op {
++    unsigned int cmd;
++    union {
++        /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
++        unsigned long mfn;
++        /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
++        unsigned long linear_addr;
++    } arg1;
++    union {
++        /* SET_LDT */
++        unsigned int nr_ents;
++        /* TLB_FLUSH_MULTI, INVLPG_MULTI */
++        void *vcpumask;
++    } arg2;
++};
++#endif
++
++/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
++/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap.   */
++/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer.         */
++#define UVMF_NONE               (0UL<<0) /* No flushing at all.   */
++#define UVMF_TLB_FLUSH          (1UL<<0) /* Flush entire TLB(s).  */
++#define UVMF_INVLPG             (2UL<<0) /* Flush only one entry. */
++#define UVMF_FLUSHTYPE_MASK     (3UL<<0)
++#define UVMF_MULTI              (0UL<<2) /* Flush subset of TLBs. */
++#define UVMF_LOCAL              (0UL<<2) /* Flush local TLB.      */
++#define UVMF_ALL                (1UL<<2) /* Flush all TLBs.       */
++
++/*
++ * Commands to HYPERVISOR_console_io().
++ */
++#define CONSOLEIO_write         0
++#define CONSOLEIO_read          1
++
++/*
++ * Commands to HYPERVISOR_vm_assist().
++ */
++#define VMASST_CMD_enable                0
++#define VMASST_CMD_disable               1
++#define VMASST_TYPE_4gb_segments         0
++#define VMASST_TYPE_4gb_segments_notify  1
++#define VMASST_TYPE_writable_pagetables  2
++#define MAX_VMASST_TYPE 2
++
++#ifndef __ASSEMBLY__
++
++typedef uint16_t domid_t;
++
++/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
++#define DOMID_FIRST_RESERVED (0x7FF0U)
++
++/* DOMID_SELF is used in certain contexts to refer to oneself. */
++#define DOMID_SELF (0x7FF0U)
++
++/*
++ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
++ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
++ * is useful to ensure that no mappings to the OS's own heap are accidentally
++ * installed. (e.g., in Linux this could cause havoc as reference counts
++ * aren't adjusted on the I/O-mapping code path).
++ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
++ * be specified by any calling domain.
++ */
++#define DOMID_IO   (0x7FF1U)
++
++/*
++ * DOMID_XEN is used to allow privileged domains to map restricted parts of
++ * Xen's heap space (e.g., the machine_to_phys table).
++ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
++ * the caller is privileged.
++ */
++#define DOMID_XEN  (0x7FF2U)
++
++/*
++ * Send an array of these to HYPERVISOR_mmu_update().
++ * NB. The fields are natural pointer/address size for this architecture.
++ */
++typedef struct mmu_update {
++    uint64_t ptr;       /* Machine address of PTE. */
++    uint64_t val;       /* New contents of PTE.    */
++} mmu_update_t;
++
++/*
++ * Send an array of these to HYPERVISOR_multicall().
++ * NB. The fields are natural register size for this architecture.
++ */
++typedef struct multicall_entry {
++    unsigned long op, result;
++    unsigned long args[6];
++} multicall_entry_t;
++
++/*
++ * Event channel endpoints per domain:
++ *  1024 if a long is 32 bits; 4096 if a long is 64 bits.
++ */
++#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
++
++typedef struct vcpu_time_info {
++    /*
++     * Updates to the following values are preceded and followed by an
++     * increment of 'version'. The guest can therefore detect updates by
++     * looking for changes to 'version'. If the least-significant bit of
++     * the version number is set then an update is in progress and the guest
++     * must wait to read a consistent set of values.
++     * The correct way to interact with the version number is similar to
++     * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
++     */
++    uint32_t version;
++    uint32_t pad0;
++    uint64_t tsc_timestamp;   /* TSC at last update of time vals.  */
++    uint64_t system_time;     /* Time, in nanosecs, since boot.    */
++    /*
++     * Current system time:
++     *   system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
++     * CPU frequency (Hz):
++     *   ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
++     */
++    uint32_t tsc_to_system_mul;
++    int8_t   tsc_shift;
++    int8_t   pad1[3];
++} vcpu_time_info_t; /* 32 bytes */
++
++typedef struct vcpu_info {
++    /*
++     * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
++     * a pending notification for a particular VCPU. It is then cleared 
++     * by the guest OS /before/ checking for pending work, thus avoiding
++     * a set-and-check race. Note that the mask is only accessed by Xen
++     * on the CPU that is currently hosting the VCPU. This means that the
++     * pending and mask flags can be updated by the guest without special
++     * synchronisation (i.e., no need for the x86 LOCK prefix).
++     * This may seem suboptimal because if the pending flag is set by
++     * a different CPU then an IPI may be scheduled even when the mask
++     * is set. However, note:
++     *  1. The task of 'interrupt holdoff' is covered by the per-event-
++     *     channel mask bits. A 'noisy' event that is continually being
++     *     triggered can be masked at source at this very precise
++     *     granularity.
++     *  2. The main purpose of the per-VCPU mask is therefore to restrict
++     *     reentrant execution: whether for concurrency control, or to
++     *     prevent unbounded stack usage. Whatever the purpose, we expect
++     *     that the mask will be asserted only for short periods at a time,
++     *     and so the likelihood of a 'spurious' IPI is suitably small.
++     * The mask is read before making an event upcall to the guest: a
++     * non-zero mask therefore guarantees that the VCPU will not receive
++     * an upcall activation. The mask is cleared when the VCPU requests
++     * to block: this avoids wakeup-waiting races.
++     */
++    uint8_t evtchn_upcall_pending;
++    uint8_t evtchn_upcall_mask;
++    unsigned long evtchn_pending_sel;
++    arch_vcpu_info_t arch;
++    vcpu_time_info_t time;
++} vcpu_info_t; /* 64 bytes (x86) */
++
++/*
++ * Xen/kernel shared data -- pointer provided in start_info.
++ * NB. We expect that this struct is smaller than a page.
++ */
++typedef struct shared_info {
++    vcpu_info_t vcpu_info[MAX_VIRT_CPUS];
++
++    /*
++     * A domain can create "event channels" on which it can send and receive
++     * asynchronous event notifications. There are three classes of event that
++     * are delivered by this mechanism:
++     *  1. Bi-directional inter- and intra-domain connections. Domains must
++     *     arrange out-of-band to set up a connection (usually by allocating
++     *     an unbound 'listener' port and avertising that via a storage service
++     *     such as xenstore).
++     *  2. Physical interrupts. A domain with suitable hardware-access
++     *     privileges can bind an event-channel port to a physical interrupt
++     *     source.
++     *  3. Virtual interrupts ('events'). A domain can bind an event-channel
++     *     port to a virtual interrupt source, such as the virtual-timer
++     *     device or the emergency console.
++     * 
++     * Event channels are addressed by a "port index". Each channel is
++     * associated with two bits of information:
++     *  1. PENDING -- notifies the domain that there is a pending notification
++     *     to be processed. This bit is cleared by the guest.
++     *  2. MASK -- if this bit is clear then a 0->1 transition of PENDING
++     *     will cause an asynchronous upcall to be scheduled. This bit is only
++     *     updated by the guest. It is read-only within Xen. If a channel
++     *     becomes pending while the channel is masked then the 'edge' is lost
++     *     (i.e., when the channel is unmasked, the guest must manually handle
++     *     pending notifications as no upcall will be scheduled by Xen).
++     * 
++     * To expedite scanning of pending notifications, any 0->1 pending
++     * transition on an unmasked channel causes a corresponding bit in a
++     * per-vcpu selector word to be set. Each bit in the selector covers a
++     * 'C long' in the PENDING bitfield array.
++     */
++    unsigned long evtchn_pending[sizeof(unsigned long) * 8];
++    unsigned long evtchn_mask[sizeof(unsigned long) * 8];
++
++    /*
++     * Wallclock time: updated only by control software. Guests should base
++     * their gettimeofday() syscall on this wallclock-base value.
++     */
++    uint32_t wc_version;      /* Version counter: see vcpu_time_info_t. */
++    uint32_t wc_sec;          /* Secs  00:00:00 UTC, Jan 1, 1970.  */
++    uint32_t wc_nsec;         /* Nsecs 00:00:00 UTC, Jan 1, 1970.  */
++
++    arch_shared_info_t arch;
++
++} shared_info_t;
++
++/*
++ * Start-of-day memory layout for the initial domain (DOM0):
++ *  1. The domain is started within contiguous virtual-memory region.
++ *  2. The contiguous region begins and ends on an aligned 4MB boundary.
++ *  3. The region start corresponds to the load address of the OS image.
++ *     If the load address is not 4MB aligned then the address is rounded down.
++ *  4. This the order of bootstrap elements in the initial virtual region:
++ *      a. relocated kernel image
++ *      b. initial ram disk              [mod_start, mod_len]
++ *      c. list of allocated page frames [mfn_list, nr_pages]
++ *      d. bootstrap page tables         [pt_base, CR3 (x86)]
++ *      e. start_info_t structure        [register ESI (x86)]
++ *      f. bootstrap stack               [register ESP (x86)]
++ *  5. Bootstrap elements are packed together, but each is 4kB-aligned.
++ *  6. The initial ram disk may be omitted.
++ *  7. The list of page frames forms a contiguous 'pseudo-physical' memory
++ *     layout for the domain. In particular, the bootstrap virtual-memory
++ *     region is a 1:1 mapping to the first section of the pseudo-physical map.
++ *  8. All bootstrap elements are mapped read-writable for the guest OS. The
++ *     only exception is the bootstrap page table, which is mapped read-only.
++ *  9. There is guaranteed to be at least 512kB padding after the final
++ *     bootstrap element. If necessary, the bootstrap virtual region is
++ *     extended by an extra 4MB to ensure this.
++ */
++
++#define MAX_GUEST_CMDLINE 1024
++typedef struct start_info {
++    /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME.    */
++    char magic[32];             /* "xen-<version>-<platform>".            */
++    unsigned long nr_pages;     /* Total pages allocated to this domain.  */
++    unsigned long shared_info;  /* MACHINE address of shared info struct. */
++    uint32_t flags;             /* SIF_xxx flags.                         */
++    unsigned long store_mfn;    /* MACHINE page number of shared page.    */
++    uint32_t store_evtchn;      /* Event channel for store communication. */
++    unsigned long console_mfn;  /* MACHINE address of console page.       */
++    uint32_t console_evtchn;    /* Event channel for console messages.    */
++    /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME).     */
++    unsigned long pt_base;      /* VIRTUAL address of page directory.     */
++    unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames.       */
++    unsigned long mfn_list;     /* VIRTUAL address of page-frame list.    */
++    unsigned long mod_start;    /* VIRTUAL address of pre-loaded module.  */
++    unsigned long mod_len;      /* Size (bytes) of pre-loaded module.     */
++    int8_t cmd_line[MAX_GUEST_CMDLINE];
++} start_info_t;
++
++/* These flags are passed in the 'flags' field of start_info_t. */
++#define SIF_PRIVILEGED    (1<<0)  /* Is the domain privileged? */
++#define SIF_INITDOMAIN    (1<<1)  /* Is this the initial control domain? */
++
++typedef uint64_t cpumap_t;
++
++typedef uint8_t xen_domain_handle_t[16];
++
++/* Turn a plain number into a C unsigned long constant. */
++#define __mk_unsigned_long(x) x ## UL
++#define mk_unsigned_long(x) __mk_unsigned_long(x)
++
++#else /* __ASSEMBLY__ */
++
++/* In assembly code we cannot use C numeric constant suffixes. */
++#define mk_unsigned_long(x) x
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* __XEN_PUBLIC_XEN_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+diff -Nurp pristine-linux-2.6.12/include/linux/autoconf.h linux-2.6.12-xen/include/linux/autoconf.h
+--- pristine-linux-2.6.12/include/linux/autoconf.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/include/linux/autoconf.h	2006-03-05 23:55:06.826653101 +0100
+@@ -0,0 +1,2967 @@
++/*
++ * Automatically generated C config: don't edit
++ * Linux kernel version: 2.6.12.6-xen
++ * Sun Mar  5 23:55:06 2006
++ */
++#define AUTOCONF_INCLUDED
++#define CONFIG_XEN 1
++#define CONFIG_ARCH_XEN 1
++#define CONFIG_NO_IDLE_HZ 1
++
++/*
++ * XEN
++ */
++#define CONFIG_XEN_PRIVILEGED_GUEST 1
++#define CONFIG_XEN_PHYSDEV_ACCESS 1
++#define CONFIG_XEN_BLKDEV_BACKEND 1
++#undef CONFIG_XEN_BLKDEV_TAP_BE
++#define CONFIG_XEN_NETDEV_BACKEND 1
++#undef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++#undef CONFIG_XEN_TPMDEV_FRONTEND
++#undef CONFIG_XEN_TPMDEV_BACKEND
++#define CONFIG_XEN_BLKDEV_FRONTEND 1
++#define CONFIG_XEN_NETDEV_FRONTEND 1
++#undef CONFIG_XEN_BLKDEV_TAP
++#undef CONFIG_XEN_SHADOW_MODE
++#define CONFIG_XEN_SCRUB_PAGES 1
++#define CONFIG_XEN_X86 1
++#undef CONFIG_XEN_X86_64
++#define CONFIG_HAVE_ARCH_ALLOC_SKB 1
++#define CONFIG_HAVE_ARCH_DEV_ALLOC_SKB 1
++
++/*
++ * Code maturity level options
++ */
++#define CONFIG_EXPERIMENTAL 1
++#undef CONFIG_CLEAN_COMPILE
++#define CONFIG_BROKEN 1
++#define CONFIG_BROKEN_ON_SMP 1
++#define CONFIG_LOCK_KERNEL 1
++#define CONFIG_INIT_ENV_ARG_LIMIT 32
++
++/*
++ * General setup
++ */
++#define CONFIG_LOCALVERSION ""
++#define CONFIG_SWAP 1
++#define CONFIG_SYSVIPC 1
++#define CONFIG_POSIX_MQUEUE 1
++#define CONFIG_BSD_PROCESS_ACCT 1
++#undef CONFIG_BSD_PROCESS_ACCT_V3
++#define CONFIG_SYSCTL 1
++#undef CONFIG_AUDIT
++#define CONFIG_HOTPLUG 1
++#define CONFIG_KOBJECT_UEVENT 1
++#define CONFIG_IKCONFIG 1
++#define CONFIG_IKCONFIG_PROC 1
++#undef CONFIG_CPUSETS
++#define CONFIG_EMBEDDED 1
++#define CONFIG_KALLSYMS 1
++#undef CONFIG_KALLSYMS_ALL
++#undef CONFIG_KALLSYMS_EXTRA_PASS
++#define CONFIG_PRINTK 1
++#define CONFIG_BUG 1
++#define CONFIG_BASE_FULL 1
++#define CONFIG_FUTEX 1
++#define CONFIG_EPOLL 1
++#undef CONFIG_CC_OPTIMIZE_FOR_SIZE
++#define CONFIG_SHMEM 1
++#define CONFIG_CC_ALIGN_FUNCTIONS 0
++#define CONFIG_CC_ALIGN_LABELS 0
++#define CONFIG_CC_ALIGN_LOOPS 0
++#define CONFIG_CC_ALIGN_JUMPS 0
++#undef CONFIG_TINY_SHMEM
++#define CONFIG_BASE_SMALL 0
++
++/*
++ * Loadable module support
++ */
++#define CONFIG_MODULES 1
++#define CONFIG_MODULE_UNLOAD 1
++#define CONFIG_MODULE_FORCE_UNLOAD 1
++#define CONFIG_OBSOLETE_MODPARM 1
++#define CONFIG_MODVERSIONS 1
++#undef CONFIG_MODULE_SRCVERSION_ALL
++#define CONFIG_KMOD 1
++#define CONFIG_STOP_MACHINE 1
++
++/*
++ * X86 Processor Configuration
++ */
++#define CONFIG_XENARCH "i386"
++#define CONFIG_X86 1
++#define CONFIG_MMU 1
++#define CONFIG_UID16 1
++#define CONFIG_GENERIC_ISA_DMA 1
++#define CONFIG_GENERIC_IOMAP 1
++#undef CONFIG_M386
++#undef CONFIG_M486
++#undef CONFIG_M586
++#undef CONFIG_M586TSC
++#undef CONFIG_M586MMX
++#define CONFIG_M686 1
++#undef CONFIG_MPENTIUMII
++#undef CONFIG_MPENTIUMIII
++#undef CONFIG_MPENTIUMM
++#undef CONFIG_MPENTIUM4
++#undef CONFIG_MK6
++#undef CONFIG_MK7
++#undef CONFIG_MK8
++#undef CONFIG_MCRUSOE
++#undef CONFIG_MEFFICEON
++#undef CONFIG_MWINCHIPC6
++#undef CONFIG_MWINCHIP2
++#undef CONFIG_MWINCHIP3D
++#undef CONFIG_MGEODEGX1
++#undef CONFIG_MCYRIXIII
++#undef CONFIG_MVIAC3_2
++#undef CONFIG_X86_GENERIC
++#define CONFIG_X86_CMPXCHG 1
++#define CONFIG_X86_XADD 1
++#define CONFIG_X86_L1_CACHE_SHIFT 5
++#define CONFIG_RWSEM_XCHGADD_ALGORITHM 1
++#define CONFIG_GENERIC_CALIBRATE_DELAY 1
++#define CONFIG_X86_PPRO_FENCE 1
++#define CONFIG_X86_WP_WORKS_OK 1
++#define CONFIG_X86_INVLPG 1
++#define CONFIG_X86_BSWAP 1
++#define CONFIG_X86_POPAD_OK 1
++#define CONFIG_X86_GOOD_APIC 1
++#define CONFIG_X86_USE_PPRO_CHECKSUM 1
++#undef CONFIG_HPET_TIMER
++#undef CONFIG_HPET_EMULATE_RTC
++#define CONFIG_SMP 1
++#define CONFIG_SMP_ALTERNATIVES 1
++#define CONFIG_NR_CPUS 8
++#undef CONFIG_SCHED_SMT
++#undef CONFIG_X86_REBOOTFIXUPS
++#define CONFIG_MICROCODE 1
++#define CONFIG_X86_CPUID_MODULE 1
++#define CONFIG_SWIOTLB 1
++
++/*
++ * Firmware Drivers
++ */
++#define CONFIG_EDD_MODULE 1
++#undef CONFIG_NOHIGHMEM
++#define CONFIG_HIGHMEM4G 1
++#undef CONFIG_HIGHMEM64G
++#define CONFIG_HIGHMEM 1
++#define CONFIG_MTRR 1
++#define CONFIG_HAVE_DEC_LOCK 1
++#undef CONFIG_REGPARM
++#define CONFIG_X86_LOCAL_APIC 1
++#define CONFIG_X86_IO_APIC 1
++#define CONFIG_HOTPLUG_CPU 1
++
++/*
++ * Bus options (PCI, PCMCIA, EISA, MCA, ISA)
++ */
++#define CONFIG_PCI 1
++#undef CONFIG_PCI_GOMMCONFIG
++#undef CONFIG_PCI_GODIRECT
++#define CONFIG_PCI_GOANY 1
++#define CONFIG_PCI_DIRECT 1
++#define CONFIG_PCI_MMCONFIG 1
++#undef CONFIG_PCIEPORTBUS
++#undef CONFIG_PCI_MSI
++#undef CONFIG_PCI_LEGACY_PROC
++#define CONFIG_PCI_NAMES 1
++#undef CONFIG_PCI_DEBUG
++#define CONFIG_ISA_DMA_API 1
++#define CONFIG_ISA 1
++#undef CONFIG_EISA
++#undef CONFIG_MCA
++#define CONFIG_SCx200_MODULE 1
++
++/*
++ * PCCARD (PCMCIA/CardBus) support
++ */
++#define CONFIG_PCCARD_MODULE 1
++#undef CONFIG_PCMCIA_DEBUG
++#define CONFIG_PCMCIA_MODULE 1
++#define CONFIG_CARDBUS 1
++
++/*
++ * PC-card bridges
++ */
++#define CONFIG_YENTA_MODULE 1
++#define CONFIG_PD6729_MODULE 1
++#define CONFIG_I82092_MODULE 1
++#define CONFIG_I82365_MODULE 1
++#define CONFIG_TCIC_MODULE 1
++#define CONFIG_PCMCIA_PROBE 1
++#define CONFIG_PCCARD_NONSTATIC_MODULE 1
++
++/*
++ * PCI Hotplug Support
++ */
++#define CONFIG_HOTPLUG_PCI_MODULE 1
++#define CONFIG_HOTPLUG_PCI_FAKE_MODULE 1
++#undef CONFIG_HOTPLUG_PCI_ACPI
++#define CONFIG_HOTPLUG_PCI_CPCI 1
++#define CONFIG_HOTPLUG_PCI_CPCI_ZT5550_MODULE 1
++#define CONFIG_HOTPLUG_PCI_CPCI_GENERIC_MODULE 1
++#define CONFIG_HOTPLUG_PCI_SHPC_MODULE 1
++#undef CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE
++#define CONFIG_GENERIC_HARDIRQS 1
++#define CONFIG_GENERIC_IRQ_PROBE 1
++#define CONFIG_X86_SMP 1
++#define CONFIG_X86_BIOS_REBOOT 1
++#define CONFIG_X86_TRAMPOLINE 1
++#define CONFIG_SECCOMP 1
++#undef CONFIG_EARLY_PRINTK
++
++/*
++ * Executable file formats
++ */
++#define CONFIG_BINFMT_ELF 1
++#define CONFIG_BINFMT_AOUT_MODULE 1
++#define CONFIG_BINFMT_MISC_MODULE 1
++
++/*
++ * Device Drivers
++ */
++
++/*
++ * Generic Driver Options
++ */
++#define CONFIG_STANDALONE 1
++#define CONFIG_PREVENT_FIRMWARE_BUILD 1
++#define CONFIG_FW_LOADER_MODULE 1
++#undef CONFIG_DEBUG_DRIVER
++
++/*
++ * Memory Technology Devices (MTD)
++ */
++#define CONFIG_MTD_MODULE 1
++#undef CONFIG_MTD_DEBUG
++#define CONFIG_MTD_CONCAT_MODULE 1
++#define CONFIG_MTD_PARTITIONS 1
++#define CONFIG_MTD_REDBOOT_PARTS_MODULE 1
++#define CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK -1
++#undef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
++#undef CONFIG_MTD_REDBOOT_PARTS_READONLY
++#undef CONFIG_MTD_CMDLINE_PARTS
++
++/*
++ * User Modules And Translation Layers
++ */
++#define CONFIG_MTD_CHAR_MODULE 1
++#define CONFIG_MTD_BLOCK_MODULE 1
++#define CONFIG_MTD_BLOCK_RO_MODULE 1
++#define CONFIG_FTL_MODULE 1
++#define CONFIG_NFTL_MODULE 1
++#define CONFIG_NFTL_RW 1
++#define CONFIG_INFTL_MODULE 1
++
++/*
++ * RAM/ROM/Flash chip drivers
++ */
++#define CONFIG_MTD_CFI_MODULE 1
++#define CONFIG_MTD_JEDECPROBE_MODULE 1
++#define CONFIG_MTD_GEN_PROBE_MODULE 1
++#undef CONFIG_MTD_CFI_ADV_OPTIONS
++#define CONFIG_MTD_MAP_BANK_WIDTH_1 1
++#define CONFIG_MTD_MAP_BANK_WIDTH_2 1
++#define CONFIG_MTD_MAP_BANK_WIDTH_4 1
++#undef CONFIG_MTD_MAP_BANK_WIDTH_8
++#undef CONFIG_MTD_MAP_BANK_WIDTH_16
++#undef CONFIG_MTD_MAP_BANK_WIDTH_32
++#define CONFIG_MTD_CFI_I1 1
++#define CONFIG_MTD_CFI_I2 1
++#undef CONFIG_MTD_CFI_I4
++#undef CONFIG_MTD_CFI_I8
++#define CONFIG_MTD_CFI_INTELEXT_MODULE 1
++#define CONFIG_MTD_CFI_AMDSTD_MODULE 1
++#define CONFIG_MTD_CFI_AMDSTD_RETRY 0
++#define CONFIG_MTD_CFI_STAA_MODULE 1
++#define CONFIG_MTD_CFI_UTIL_MODULE 1
++#define CONFIG_MTD_RAM_MODULE 1
++#define CONFIG_MTD_ROM_MODULE 1
++#define CONFIG_MTD_ABSENT_MODULE 1
++#undef CONFIG_MTD_OBSOLETE_CHIPS
++
++/*
++ * Mapping drivers for chip access
++ */
++#define CONFIG_MTD_COMPLEX_MAPPINGS 1
++#define CONFIG_MTD_PHYSMAP_MODULE 1
++#define CONFIG_MTD_PHYSMAP_START 0x8000000
++#define CONFIG_MTD_PHYSMAP_LEN 0x4000000
++#define CONFIG_MTD_PHYSMAP_BANKWIDTH 2
++#define CONFIG_MTD_PNC2000_MODULE 1
++#define CONFIG_MTD_SC520CDP_MODULE 1
++#define CONFIG_MTD_NETSC520_MODULE 1
++#define CONFIG_MTD_TS5500_MODULE 1
++#define CONFIG_MTD_SBC_GXX_MODULE 1
++#define CONFIG_MTD_ELAN_104NC_MODULE 1
++#define CONFIG_MTD_SCx200_DOCFLASH_MODULE 1
++#undef CONFIG_MTD_AMD76XROM
++#undef CONFIG_MTD_ICHXROM
++#undef CONFIG_MTD_SCB2_FLASH
++#define CONFIG_MTD_NETtel_MODULE 1
++#define CONFIG_MTD_DILNETPC_MODULE 1
++#define CONFIG_MTD_DILNETPC_BOOTSIZE 0x80000
++#undef CONFIG_MTD_L440GX
++#define CONFIG_MTD_PCI_MODULE 1
++#define CONFIG_MTD_PCMCIA_MODULE 1
++
++/*
++ * Self-contained MTD device drivers
++ */
++#define CONFIG_MTD_PMC551_MODULE 1
++#undef CONFIG_MTD_PMC551_BUGFIX
++#undef CONFIG_MTD_PMC551_DEBUG
++#define CONFIG_MTD_SLRAM_MODULE 1
++#define CONFIG_MTD_PHRAM_MODULE 1
++#define CONFIG_MTD_MTDRAM_MODULE 1
++#define CONFIG_MTDRAM_TOTAL_SIZE 4096
++#define CONFIG_MTDRAM_ERASE_SIZE 128
++#define CONFIG_MTD_BLKMTD_MODULE 1
++#undef CONFIG_MTD_BLOCK2MTD
++
++/*
++ * Disk-On-Chip Device Drivers
++ */
++#define CONFIG_MTD_DOC2000_MODULE 1
++#define CONFIG_MTD_DOC2001_MODULE 1
++#define CONFIG_MTD_DOC2001PLUS_MODULE 1
++#define CONFIG_MTD_DOCPROBE_MODULE 1
++#define CONFIG_MTD_DOCECC_MODULE 1
++#undef CONFIG_MTD_DOCPROBE_ADVANCED
++#define CONFIG_MTD_DOCPROBE_ADDRESS 0x0
++
++/*
++ * NAND Flash Device Drivers
++ */
++#define CONFIG_MTD_NAND_MODULE 1
++#undef CONFIG_MTD_NAND_VERIFY_WRITE
++#define CONFIG_MTD_NAND_IDS_MODULE 1
++#define CONFIG_MTD_NAND_DISKONCHIP_MODULE 1
++#undef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED
++#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0x0
++#undef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
++#undef CONFIG_MTD_NAND_NANDSIM
++
++/*
++ * Parallel port support
++ */
++#define CONFIG_PARPORT_MODULE 1
++#define CONFIG_PARPORT_PC_MODULE 1
++#define CONFIG_PARPORT_SERIAL_MODULE 1
++#define CONFIG_PARPORT_PC_FIFO 1
++#undef CONFIG_PARPORT_PC_SUPERIO
++#define CONFIG_PARPORT_PC_PCMCIA_MODULE 1
++#define CONFIG_PARPORT_NOT_PC 1
++#undef CONFIG_PARPORT_GSC
++#define CONFIG_PARPORT_1284 1
++
++/*
++ * Plug and Play support
++ */
++#define CONFIG_PNP 1
++#undef CONFIG_PNP_DEBUG
++
++/*
++ * Protocols
++ */
++#define CONFIG_ISAPNP 1
++#undef CONFIG_PNPBIOS
++#undef CONFIG_PNPACPI
++
++/*
++ * Block devices
++ */
++#define CONFIG_BLK_DEV_FD_MODULE 1
++#define CONFIG_BLK_DEV_XD_MODULE 1
++#define CONFIG_PARIDE_MODULE 1
++#define CONFIG_PARIDE_PARPORT_MODULE 1
++
++/*
++ * Parallel IDE high-level drivers
++ */
++#define CONFIG_PARIDE_PD_MODULE 1
++#define CONFIG_PARIDE_PCD_MODULE 1
++#define CONFIG_PARIDE_PF_MODULE 1
++#define CONFIG_PARIDE_PT_MODULE 1
++#define CONFIG_PARIDE_PG_MODULE 1
++
++/*
++ * Parallel IDE protocol modules
++ */
++#define CONFIG_PARIDE_ATEN_MODULE 1
++#define CONFIG_PARIDE_BPCK_MODULE 1
++#define CONFIG_PARIDE_BPCK6_MODULE 1
++#define CONFIG_PARIDE_COMM_MODULE 1
++#define CONFIG_PARIDE_DSTR_MODULE 1
++#define CONFIG_PARIDE_FIT2_MODULE 1
++#define CONFIG_PARIDE_FIT3_MODULE 1
++#define CONFIG_PARIDE_EPAT_MODULE 1
++#undef CONFIG_PARIDE_EPATC8
++#define CONFIG_PARIDE_EPIA_MODULE 1
++#define CONFIG_PARIDE_FRIQ_MODULE 1
++#define CONFIG_PARIDE_FRPW_MODULE 1
++#define CONFIG_PARIDE_KBIC_MODULE 1
++#define CONFIG_PARIDE_KTTI_MODULE 1
++#define CONFIG_PARIDE_ON20_MODULE 1
++#define CONFIG_PARIDE_ON26_MODULE 1
++#define CONFIG_BLK_CPQ_DA_MODULE 1
++#define CONFIG_BLK_CPQ_CISS_DA_MODULE 1
++#define CONFIG_CISS_SCSI_TAPE 1
++#define CONFIG_BLK_DEV_DAC960_MODULE 1
++#define CONFIG_BLK_DEV_UMEM_MODULE 1
++#undef CONFIG_BLK_DEV_COW_COMMON
++#define CONFIG_BLK_DEV_LOOP_MODULE 1
++#define CONFIG_BLK_DEV_CRYPTOLOOP_MODULE 1
++#define CONFIG_BLK_DEV_NBD_MODULE 1
++#define CONFIG_BLK_DEV_SX8_MODULE 1
++#undef CONFIG_BLK_DEV_UB
++#define CONFIG_BLK_DEV_RAM 1
++#define CONFIG_BLK_DEV_RAM_COUNT 16
++#define CONFIG_BLK_DEV_RAM_SIZE 16384
++#define CONFIG_BLK_DEV_INITRD 1
++#define CONFIG_INITRAMFS_SOURCE ""
++#define CONFIG_LBD 1
++#define CONFIG_CDROM_PKTCDVD_MODULE 1
++#define CONFIG_CDROM_PKTCDVD_BUFFERS 8
++#undef CONFIG_CDROM_PKTCDVD_WCACHE
++
++/*
++ * IO Schedulers
++ */
++#define CONFIG_IOSCHED_NOOP 1
++#define CONFIG_IOSCHED_AS 1
++#define CONFIG_IOSCHED_DEADLINE 1
++#define CONFIG_IOSCHED_CFQ 1
++#define CONFIG_ATA_OVER_ETH_MODULE 1
++
++/*
++ * ATA/ATAPI/MFM/RLL support
++ */
++#define CONFIG_IDE 1
++#define CONFIG_BLK_DEV_IDE 1
++
++/*
++ * Please see Documentation/ide.txt for help/info on IDE drives
++ */
++#undef CONFIG_BLK_DEV_IDE_SATA
++#undef CONFIG_BLK_DEV_HD_IDE
++#define CONFIG_BLK_DEV_IDEDISK 1
++#define CONFIG_IDEDISK_MULTI_MODE 1
++#define CONFIG_BLK_DEV_IDECS_MODULE 1
++#define CONFIG_BLK_DEV_IDECD 1
++#define CONFIG_BLK_DEV_IDETAPE_MODULE 1
++#define CONFIG_BLK_DEV_IDEFLOPPY 1
++#define CONFIG_BLK_DEV_IDESCSI_MODULE 1
++#undef CONFIG_IDE_TASK_IOCTL
++
++/*
++ * IDE chipset support/bugfixes
++ */
++#define CONFIG_IDE_GENERIC 1
++#define CONFIG_BLK_DEV_CMD640 1
++#define CONFIG_BLK_DEV_CMD640_ENHANCED 1
++#define CONFIG_BLK_DEV_IDEPNP 1
++#define CONFIG_BLK_DEV_IDEPCI 1
++#define CONFIG_IDEPCI_SHARE_IRQ 1
++#undef CONFIG_BLK_DEV_OFFBOARD
++#define CONFIG_BLK_DEV_GENERIC 1
++#define CONFIG_BLK_DEV_OPTI621_MODULE 1
++#define CONFIG_BLK_DEV_RZ1000 1
++#define CONFIG_BLK_DEV_IDEDMA_PCI 1
++#undef CONFIG_BLK_DEV_IDEDMA_FORCED
++#define CONFIG_IDEDMA_PCI_AUTO 1
++#undef CONFIG_IDEDMA_ONLYDISK
++#define CONFIG_BLK_DEV_AEC62XX 1
++#define CONFIG_BLK_DEV_ALI15X3 1
++#undef CONFIG_WDC_ALI15X3
++#define CONFIG_BLK_DEV_AMD74XX 1
++#define CONFIG_BLK_DEV_ATIIXP 1
++#define CONFIG_BLK_DEV_CMD64X 1
++#define CONFIG_BLK_DEV_TRIFLEX 1
++#define CONFIG_BLK_DEV_CY82C693 1
++#define CONFIG_BLK_DEV_CS5520 1
++#define CONFIG_BLK_DEV_CS5530 1
++#define CONFIG_BLK_DEV_HPT34X 1
++#undef CONFIG_HPT34X_AUTODMA
++#define CONFIG_BLK_DEV_HPT366 1
++#define CONFIG_BLK_DEV_SC1200_MODULE 1
++#define CONFIG_BLK_DEV_PIIX 1
++#define CONFIG_BLK_DEV_NS87415_MODULE 1
++#define CONFIG_BLK_DEV_PDC202XX_OLD 1
++#define CONFIG_PDC202XX_BURST 1
++#define CONFIG_BLK_DEV_PDC202XX_NEW 1
++#define CONFIG_PDC202XX_FORCE 1
++#define CONFIG_BLK_DEV_SVWKS 1
++#define CONFIG_BLK_DEV_SIIMAGE 1
++#define CONFIG_BLK_DEV_SIS5513 1
++#define CONFIG_BLK_DEV_SLC90E66 1
++#define CONFIG_BLK_DEV_TRM290_MODULE 1
++#define CONFIG_BLK_DEV_VIA82CXXX 1
++#undef CONFIG_IDE_ARM
++#undef CONFIG_IDE_CHIPSETS
++#define CONFIG_BLK_DEV_IDEDMA 1
++#undef CONFIG_IDEDMA_IVB
++#define CONFIG_IDEDMA_AUTO 1
++#undef CONFIG_BLK_DEV_HD
++
++/*
++ * SCSI device support
++ */
++#define CONFIG_SCSI_MODULE 1
++#define CONFIG_SCSI_PROC_FS 1
++
++/*
++ * SCSI support type (disk, tape, CD-ROM)
++ */
++#define CONFIG_BLK_DEV_SD_MODULE 1
++#define CONFIG_CHR_DEV_ST_MODULE 1
++#define CONFIG_CHR_DEV_OSST_MODULE 1
++#define CONFIG_BLK_DEV_SR_MODULE 1
++#undef CONFIG_BLK_DEV_SR_VENDOR
++#define CONFIG_CHR_DEV_SG_MODULE 1
++
++/*
++ * Some SCSI devices (e.g. CD jukebox) support multiple LUNs
++ */
++#define CONFIG_SCSI_MULTI_LUN 1
++#define CONFIG_SCSI_CONSTANTS 1
++#define CONFIG_SCSI_LOGGING 1
++
++/*
++ * SCSI Transport Attributes
++ */
++#define CONFIG_SCSI_SPI_ATTRS_MODULE 1
++#define CONFIG_SCSI_FC_ATTRS_MODULE 1
++#undef CONFIG_SCSI_ISCSI_ATTRS
++
++/*
++ * SCSI low-level drivers
++ */
++#define CONFIG_BLK_DEV_3W_XXXX_RAID_MODULE 1
++#define CONFIG_SCSI_3W_9XXX_MODULE 1
++#undef CONFIG_SCSI_7000FASST
++#define CONFIG_SCSI_ACARD_MODULE 1
++#define CONFIG_SCSI_AHA152X_MODULE 1
++#undef CONFIG_SCSI_AHA1542
++#define CONFIG_SCSI_AACRAID_MODULE 1
++#define CONFIG_SCSI_AIC7XXX_MODULE 1
++#define CONFIG_AIC7XXX_CMDS_PER_DEVICE 8
++#define CONFIG_AIC7XXX_RESET_DELAY_MS 15000
++#define CONFIG_AIC7XXX_DEBUG_ENABLE 1
++#define CONFIG_AIC7XXX_DEBUG_MASK 0
++#define CONFIG_AIC7XXX_REG_PRETTY_PRINT 1
++#define CONFIG_SCSI_AIC7XXX_OLD_MODULE 1
++#define CONFIG_SCSI_AIC79XX_MODULE 1
++#define CONFIG_AIC79XX_CMDS_PER_DEVICE 32
++#define CONFIG_AIC79XX_RESET_DELAY_MS 15000
++#define CONFIG_AIC79XX_ENABLE_RD_STRM 1
++#define CONFIG_AIC79XX_DEBUG_ENABLE 1
++#define CONFIG_AIC79XX_DEBUG_MASK 0
++#define CONFIG_AIC79XX_REG_PRETTY_PRINT 1
++#define CONFIG_SCSI_DPT_I2O_MODULE 1
++#define CONFIG_SCSI_ADVANSYS_MODULE 1
++#define CONFIG_SCSI_IN2000_MODULE 1
++#define CONFIG_MEGARAID_NEWGEN 1
++#define CONFIG_MEGARAID_MM_MODULE 1
++#define CONFIG_MEGARAID_MAILBOX_MODULE 1
++#define CONFIG_SCSI_SATA 1
++#define CONFIG_SCSI_SATA_AHCI_MODULE 1
++#define CONFIG_SCSI_SATA_SVW_MODULE 1
++#define CONFIG_SCSI_ATA_PIIX_MODULE 1
++#define CONFIG_SCSI_SATA_NV_MODULE 1
++#define CONFIG_SCSI_SATA_PROMISE_MODULE 1
++#undef CONFIG_SCSI_SATA_QSTOR
++#define CONFIG_SCSI_SATA_SX4_MODULE 1
++#define CONFIG_SCSI_SATA_SIL_MODULE 1
++#define CONFIG_SCSI_SATA_SIS_MODULE 1
++#define CONFIG_SCSI_SATA_ULI_MODULE 1
++#define CONFIG_SCSI_SATA_VIA_MODULE 1
++#define CONFIG_SCSI_SATA_VITESSE_MODULE 1
++#define CONFIG_SCSI_BUSLOGIC_MODULE 1
++#undef CONFIG_SCSI_OMIT_FLASHPOINT
++#undef CONFIG_SCSI_CPQFCTS
++#define CONFIG_SCSI_DMX3191D_MODULE 1
++#define CONFIG_SCSI_DTC3280_MODULE 1
++#define CONFIG_SCSI_EATA_MODULE 1
++#define CONFIG_SCSI_EATA_TAGGED_QUEUE 1
++#define CONFIG_SCSI_EATA_LINKED_COMMANDS 1
++#define CONFIG_SCSI_EATA_MAX_TAGS 16
++#define CONFIG_SCSI_EATA_PIO_MODULE 1
++#define CONFIG_SCSI_FUTURE_DOMAIN_MODULE 1
++#define CONFIG_SCSI_GDTH_MODULE 1
++#define CONFIG_SCSI_GENERIC_NCR5380_MODULE 1
++#define CONFIG_SCSI_GENERIC_NCR5380_MMIO_MODULE 1
++#define CONFIG_SCSI_GENERIC_NCR53C400 1
++#define CONFIG_SCSI_IPS_MODULE 1
++#undef CONFIG_SCSI_INITIO
++#undef CONFIG_SCSI_INIA100
++#define CONFIG_SCSI_PPA_MODULE 1
++#define CONFIG_SCSI_IMM_MODULE 1
++#undef CONFIG_SCSI_IZIP_EPP16
++#undef CONFIG_SCSI_IZIP_SLOW_CTR
++#define CONFIG_SCSI_NCR53C406A_MODULE 1
++#define CONFIG_SCSI_SYM53C8XX_2_MODULE 1
++#define CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE 1
++#define CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS 16
++#define CONFIG_SCSI_SYM53C8XX_MAX_TAGS 64
++#undef CONFIG_SCSI_SYM53C8XX_IOMAPPED
++#define CONFIG_SCSI_IPR_MODULE 1
++#undef CONFIG_SCSI_IPR_TRACE
++#undef CONFIG_SCSI_IPR_DUMP
++#define CONFIG_SCSI_PAS16_MODULE 1
++#undef CONFIG_SCSI_PCI2000
++#undef CONFIG_SCSI_PCI2220I
++#define CONFIG_SCSI_PSI240I_MODULE 1
++#define CONFIG_SCSI_QLOGIC_FAS_MODULE 1
++#define CONFIG_SCSI_QLOGIC_ISP_MODULE 1
++#define CONFIG_SCSI_QLOGIC_FC_MODULE 1
++#define CONFIG_SCSI_QLOGIC_FC_FIRMWARE 1
++#define CONFIG_SCSI_QLOGIC_1280_MODULE 1
++#define CONFIG_SCSI_QLOGIC_1280_1040 1
++#define CONFIG_SCSI_QLA2XXX_MODULE 1
++#define CONFIG_SCSI_QLA21XX_MODULE 1
++#define CONFIG_SCSI_QLA22XX_MODULE 1
++#define CONFIG_SCSI_QLA2300_MODULE 1
++#define CONFIG_SCSI_QLA2322_MODULE 1
++#define CONFIG_SCSI_QLA6312_MODULE 1
++#define CONFIG_SCSI_LPFC_MODULE 1
++#undef CONFIG_SCSI_SEAGATE
++#define CONFIG_SCSI_SYM53C416_MODULE 1
++#define CONFIG_SCSI_DC395x_MODULE 1
++#define CONFIG_SCSI_DC390T_MODULE 1
++#define CONFIG_SCSI_T128_MODULE 1
++#define CONFIG_SCSI_U14_34F_MODULE 1
++#define CONFIG_SCSI_U14_34F_TAGGED_QUEUE 1
++#define CONFIG_SCSI_U14_34F_LINKED_COMMANDS 1
++#define CONFIG_SCSI_U14_34F_MAX_TAGS 8
++#undef CONFIG_SCSI_ULTRASTOR
++#define CONFIG_SCSI_NSP32_MODULE 1
++#define CONFIG_SCSI_DEBUG_MODULE 1
++
++/*
++ * PCMCIA SCSI adapter support
++ */
++#define CONFIG_PCMCIA_AHA152X_MODULE 1
++#define CONFIG_PCMCIA_FDOMAIN_MODULE 1
++#define CONFIG_PCMCIA_NINJA_SCSI_MODULE 1
++#define CONFIG_PCMCIA_QLOGIC_MODULE 1
++#define CONFIG_PCMCIA_SYM53C500_MODULE 1
++
++/*
++ * Old CD-ROM drivers (not SCSI, not IDE)
++ */
++#define CONFIG_CD_NO_IDESCSI 1
++#define CONFIG_AZTCD_MODULE 1
++#define CONFIG_GSCD_MODULE 1
++#undef CONFIG_SBPCD
++#define CONFIG_MCDX_MODULE 1
++#define CONFIG_OPTCD_MODULE 1
++#undef CONFIG_CM206
++#define CONFIG_SJCD_MODULE 1
++#define CONFIG_ISP16_CDI_MODULE 1
++#define CONFIG_CDU31A_MODULE 1
++#define CONFIG_CDU535_MODULE 1
++
++/*
++ * Multi-device support (RAID and LVM)
++ */
++#define CONFIG_MD 1
++#define CONFIG_BLK_DEV_MD_MODULE 1
++#define CONFIG_MD_LINEAR_MODULE 1
++#define CONFIG_MD_RAID0_MODULE 1
++#define CONFIG_MD_RAID1_MODULE 1
++#define CONFIG_MD_RAID10_MODULE 1
++#define CONFIG_MD_RAID5_MODULE 1
++#define CONFIG_MD_RAID6_MODULE 1
++#define CONFIG_MD_MULTIPATH_MODULE 1
++#define CONFIG_MD_FAULTY_MODULE 1
++#define CONFIG_BLK_DEV_DM_MODULE 1
++#define CONFIG_DM_CRYPT_MODULE 1
++#define CONFIG_DM_SNAPSHOT_MODULE 1
++#define CONFIG_DM_MIRROR_MODULE 1
++#define CONFIG_DM_ZERO_MODULE 1
++#define CONFIG_DM_MULTIPATH_MODULE 1
++#define CONFIG_DM_MULTIPATH_EMC_MODULE 1
++
++/*
++ * Fusion MPT device support
++ */
++#define CONFIG_FUSION_MODULE 1
++#define CONFIG_FUSION_MAX_SGE 40
++#define CONFIG_FUSION_CTL_MODULE 1
++#define CONFIG_FUSION_LAN_MODULE 1
++
++/*
++ * IEEE 1394 (FireWire) support
++ */
++#define CONFIG_IEEE1394_MODULE 1
++
++/*
++ * Subsystem Options
++ */
++#undef CONFIG_IEEE1394_VERBOSEDEBUG
++#undef CONFIG_IEEE1394_OUI_DB
++#define CONFIG_IEEE1394_EXTRA_CONFIG_ROMS 1
++#define CONFIG_IEEE1394_CONFIG_ROM_IP1394 1
++
++/*
++ * Device Drivers
++ */
++#define CONFIG_IEEE1394_PCILYNX_MODULE 1
++#define CONFIG_IEEE1394_OHCI1394_MODULE 1
++
++/*
++ * Protocol Drivers
++ */
++#define CONFIG_IEEE1394_VIDEO1394_MODULE 1
++#define CONFIG_IEEE1394_SBP2_MODULE 1
++#undef CONFIG_IEEE1394_SBP2_PHYS_DMA
++#define CONFIG_IEEE1394_ETH1394_MODULE 1
++#define CONFIG_IEEE1394_DV1394_MODULE 1
++#define CONFIG_IEEE1394_RAWIO_MODULE 1
++#define CONFIG_IEEE1394_CMP_MODULE 1
++#define CONFIG_IEEE1394_AMDTP_MODULE 1
++
++/*
++ * I2O device support
++ */
++#define CONFIG_I2O_MODULE 1
++#define CONFIG_I2O_CONFIG_MODULE 1
++#define CONFIG_I2O_BLOCK_MODULE 1
++#define CONFIG_I2O_SCSI_MODULE 1
++#define CONFIG_I2O_PROC_MODULE 1
++
++/*
++ * Networking support
++ */
++#define CONFIG_NET 1
++
++/*
++ * Networking options
++ */
++#define CONFIG_PACKET_MODULE 1
++#define CONFIG_PACKET_MMAP 1
++#define CONFIG_UNIX_MODULE 1
++#define CONFIG_NET_KEY_MODULE 1
++#define CONFIG_INET 1
++#define CONFIG_IP_MULTICAST 1
++#define CONFIG_IP_ADVANCED_ROUTER 1
++#define CONFIG_IP_MULTIPLE_TABLES 1
++#define CONFIG_IP_ROUTE_FWMARK 1
++#define CONFIG_IP_ROUTE_MULTIPATH 1
++#undef CONFIG_IP_ROUTE_MULTIPATH_CACHED
++#define CONFIG_IP_ROUTE_VERBOSE 1
++#undef CONFIG_IP_PNP
++#define CONFIG_NET_IPIP_MODULE 1
++#define CONFIG_NET_IPGRE_MODULE 1
++#define CONFIG_NET_IPGRE_BROADCAST 1
++#define CONFIG_IP_MROUTE 1
++#define CONFIG_IP_PIMSM_V1 1
++#define CONFIG_IP_PIMSM_V2 1
++#undef CONFIG_ARPD
++#define CONFIG_SYN_COOKIES 1
++#define CONFIG_INET_AH_MODULE 1
++#define CONFIG_INET_ESP_MODULE 1
++#define CONFIG_INET_IPCOMP_MODULE 1
++#define CONFIG_INET_TUNNEL_MODULE 1
++#define CONFIG_IP_TCPDIAG_MODULE 1
++#define CONFIG_IP_TCPDIAG_IPV6 1
++
++/*
++ * IP: Virtual Server Configuration
++ */
++#define CONFIG_IP_VS_MODULE 1
++#undef CONFIG_IP_VS_DEBUG
++#define CONFIG_IP_VS_TAB_BITS 12
++
++/*
++ * IPVS transport protocol load balancing support
++ */
++#define CONFIG_IP_VS_PROTO_TCP 1
++#define CONFIG_IP_VS_PROTO_UDP 1
++#define CONFIG_IP_VS_PROTO_ESP 1
++#define CONFIG_IP_VS_PROTO_AH 1
++
++/*
++ * IPVS scheduler
++ */
++#define CONFIG_IP_VS_RR_MODULE 1
++#define CONFIG_IP_VS_WRR_MODULE 1
++#define CONFIG_IP_VS_LC_MODULE 1
++#define CONFIG_IP_VS_WLC_MODULE 1
++#define CONFIG_IP_VS_LBLC_MODULE 1
++#define CONFIG_IP_VS_LBLCR_MODULE 1
++#define CONFIG_IP_VS_DH_MODULE 1
++#define CONFIG_IP_VS_SH_MODULE 1
++#define CONFIG_IP_VS_SED_MODULE 1
++#define CONFIG_IP_VS_NQ_MODULE 1
++
++/*
++ * IPVS application helper
++ */
++#define CONFIG_IP_VS_FTP_MODULE 1
++#define CONFIG_IPV6_MODULE 1
++#define CONFIG_IPV6_PRIVACY 1
++#define CONFIG_INET6_AH_MODULE 1
++#define CONFIG_INET6_ESP_MODULE 1
++#define CONFIG_INET6_IPCOMP_MODULE 1
++#define CONFIG_INET6_TUNNEL_MODULE 1
++#define CONFIG_IPV6_TUNNEL_MODULE 1
++#define CONFIG_NETFILTER 1
++#undef CONFIG_NETFILTER_DEBUG
++#define CONFIG_BRIDGE_NETFILTER 1
++
++/*
++ * IP: Netfilter Configuration
++ */
++#define CONFIG_IP_NF_CONNTRACK_MODULE 1
++#define CONFIG_IP_NF_CT_ACCT 1
++#define CONFIG_IP_NF_CONNTRACK_MARK 1
++#define CONFIG_IP_NF_CT_PROTO_SCTP_MODULE 1
++#define CONFIG_IP_NF_FTP_MODULE 1
++#define CONFIG_IP_NF_IRC_MODULE 1
++#define CONFIG_IP_NF_TFTP_MODULE 1
++#define CONFIG_IP_NF_AMANDA_MODULE 1
++#define CONFIG_IP_NF_QUEUE_MODULE 1
++#define CONFIG_IP_NF_IPTABLES_MODULE 1
++#define CONFIG_IP_NF_MATCH_LIMIT_MODULE 1
++#define CONFIG_IP_NF_MATCH_IPRANGE_MODULE 1
++#define CONFIG_IP_NF_MATCH_MAC_MODULE 1
++#define CONFIG_IP_NF_MATCH_PKTTYPE_MODULE 1
++#define CONFIG_IP_NF_MATCH_MARK_MODULE 1
++#define CONFIG_IP_NF_MATCH_MULTIPORT_MODULE 1
++#define CONFIG_IP_NF_MATCH_TOS_MODULE 1
++#define CONFIG_IP_NF_MATCH_RECENT_MODULE 1
++#define CONFIG_IP_NF_MATCH_ECN_MODULE 1
++#define CONFIG_IP_NF_MATCH_DSCP_MODULE 1
++#define CONFIG_IP_NF_MATCH_AH_ESP_MODULE 1
++#define CONFIG_IP_NF_MATCH_LENGTH_MODULE 1
++#define CONFIG_IP_NF_MATCH_TTL_MODULE 1
++#define CONFIG_IP_NF_MATCH_TCPMSS_MODULE 1
++#define CONFIG_IP_NF_MATCH_HELPER_MODULE 1
++#define CONFIG_IP_NF_MATCH_STATE_MODULE 1
++#define CONFIG_IP_NF_MATCH_CONNTRACK_MODULE 1
++#define CONFIG_IP_NF_MATCH_OWNER_MODULE 1
++#define CONFIG_IP_NF_MATCH_PHYSDEV_MODULE 1
++#define CONFIG_IP_NF_MATCH_ADDRTYPE_MODULE 1
++#define CONFIG_IP_NF_MATCH_REALM_MODULE 1
++#define CONFIG_IP_NF_MATCH_SCTP_MODULE 1
++#define CONFIG_IP_NF_MATCH_COMMENT_MODULE 1
++#define CONFIG_IP_NF_MATCH_CONNMARK_MODULE 1
++#define CONFIG_IP_NF_MATCH_HASHLIMIT_MODULE 1
++#define CONFIG_IP_NF_FILTER_MODULE 1
++#define CONFIG_IP_NF_TARGET_REJECT_MODULE 1
++#define CONFIG_IP_NF_TARGET_LOG_MODULE 1
++#define CONFIG_IP_NF_TARGET_ULOG_MODULE 1
++#define CONFIG_IP_NF_TARGET_TCPMSS_MODULE 1
++#define CONFIG_IP_NF_NAT_MODULE 1
++#define CONFIG_IP_NF_NAT_NEEDED 1
++#define CONFIG_IP_NF_TARGET_MASQUERADE_MODULE 1
++#define CONFIG_IP_NF_TARGET_REDIRECT_MODULE 1
++#define CONFIG_IP_NF_TARGET_NETMAP_MODULE 1
++#define CONFIG_IP_NF_TARGET_SAME_MODULE 1
++#define CONFIG_IP_NF_NAT_SNMP_BASIC_MODULE 1
++#define CONFIG_IP_NF_NAT_IRC_MODULE 1
++#define CONFIG_IP_NF_NAT_FTP_MODULE 1
++#define CONFIG_IP_NF_NAT_TFTP_MODULE 1
++#define CONFIG_IP_NF_NAT_AMANDA_MODULE 1
++#define CONFIG_IP_NF_MANGLE_MODULE 1
++#define CONFIG_IP_NF_TARGET_TOS_MODULE 1
++#define CONFIG_IP_NF_TARGET_ECN_MODULE 1
++#define CONFIG_IP_NF_TARGET_DSCP_MODULE 1
++#define CONFIG_IP_NF_TARGET_MARK_MODULE 1
++#define CONFIG_IP_NF_TARGET_CLASSIFY_MODULE 1
++#define CONFIG_IP_NF_TARGET_CONNMARK_MODULE 1
++#define CONFIG_IP_NF_TARGET_CLUSTERIP_MODULE 1
++#define CONFIG_IP_NF_RAW_MODULE 1
++#define CONFIG_IP_NF_TARGET_NOTRACK_MODULE 1
++#define CONFIG_IP_NF_ARPTABLES_MODULE 1
++#define CONFIG_IP_NF_ARPFILTER_MODULE 1
++#define CONFIG_IP_NF_ARP_MANGLE_MODULE 1
++
++/*
++ * IPv6: Netfilter Configuration (EXPERIMENTAL)
++ */
++#define CONFIG_IP6_NF_QUEUE_MODULE 1
++#define CONFIG_IP6_NF_IPTABLES_MODULE 1
++#define CONFIG_IP6_NF_MATCH_LIMIT_MODULE 1
++#define CONFIG_IP6_NF_MATCH_MAC_MODULE 1
++#define CONFIG_IP6_NF_MATCH_RT_MODULE 1
++#define CONFIG_IP6_NF_MATCH_OPTS_MODULE 1
++#define CONFIG_IP6_NF_MATCH_FRAG_MODULE 1
++#define CONFIG_IP6_NF_MATCH_HL_MODULE 1
++#define CONFIG_IP6_NF_MATCH_MULTIPORT_MODULE 1
++#define CONFIG_IP6_NF_MATCH_OWNER_MODULE 1
++#define CONFIG_IP6_NF_MATCH_MARK_MODULE 1
++#define CONFIG_IP6_NF_MATCH_IPV6HEADER_MODULE 1
++#define CONFIG_IP6_NF_MATCH_AHESP_MODULE 1
++#define CONFIG_IP6_NF_MATCH_LENGTH_MODULE 1
++#define CONFIG_IP6_NF_MATCH_EUI64_MODULE 1
++#define CONFIG_IP6_NF_MATCH_PHYSDEV_MODULE 1
++#define CONFIG_IP6_NF_FILTER_MODULE 1
++#define CONFIG_IP6_NF_TARGET_LOG_MODULE 1
++#define CONFIG_IP6_NF_MANGLE_MODULE 1
++#define CONFIG_IP6_NF_TARGET_MARK_MODULE 1
++#define CONFIG_IP6_NF_RAW_MODULE 1
++
++/*
++ * DECnet: Netfilter Configuration
++ */
++#define CONFIG_DECNET_NF_GRABULATOR_MODULE 1
++
++/*
++ * Bridge: Netfilter Configuration
++ */
++#define CONFIG_BRIDGE_NF_EBTABLES_MODULE 1
++#define CONFIG_BRIDGE_EBT_BROUTE_MODULE 1
++#define CONFIG_BRIDGE_EBT_T_FILTER_MODULE 1
++#define CONFIG_BRIDGE_EBT_T_NAT_MODULE 1
++#define CONFIG_BRIDGE_EBT_802_3_MODULE 1
++#define CONFIG_BRIDGE_EBT_AMONG_MODULE 1
++#define CONFIG_BRIDGE_EBT_ARP_MODULE 1
++#define CONFIG_BRIDGE_EBT_IP_MODULE 1
++#define CONFIG_BRIDGE_EBT_LIMIT_MODULE 1
++#define CONFIG_BRIDGE_EBT_MARK_MODULE 1
++#define CONFIG_BRIDGE_EBT_PKTTYPE_MODULE 1
++#define CONFIG_BRIDGE_EBT_STP_MODULE 1
++#define CONFIG_BRIDGE_EBT_VLAN_MODULE 1
++#define CONFIG_BRIDGE_EBT_ARPREPLY_MODULE 1
++#define CONFIG_BRIDGE_EBT_DNAT_MODULE 1
++#define CONFIG_BRIDGE_EBT_MARK_T_MODULE 1
++#define CONFIG_BRIDGE_EBT_REDIRECT_MODULE 1
++#define CONFIG_BRIDGE_EBT_SNAT_MODULE 1
++#define CONFIG_BRIDGE_EBT_LOG_MODULE 1
++#undef CONFIG_BRIDGE_EBT_ULOG
++#define CONFIG_XFRM 1
++#define CONFIG_XFRM_USER_MODULE 1
++
++/*
++ * SCTP Configuration (EXPERIMENTAL)
++ */
++#define CONFIG_IP_SCTP_MODULE 1
++#undef CONFIG_SCTP_DBG_MSG
++#undef CONFIG_SCTP_DBG_OBJCNT
++#undef CONFIG_SCTP_HMAC_NONE
++#undef CONFIG_SCTP_HMAC_SHA1
++#define CONFIG_SCTP_HMAC_MD5 1
++#define CONFIG_ATM 1
++#define CONFIG_ATM_CLIP 1
++#undef CONFIG_ATM_CLIP_NO_ICMP
++#define CONFIG_ATM_LANE_MODULE 1
++#define CONFIG_ATM_MPOA_MODULE 1
++#define CONFIG_ATM_BR2684_MODULE 1
++#undef CONFIG_ATM_BR2684_IPFILTER
++#define CONFIG_BRIDGE_MODULE 1
++#define CONFIG_VLAN_8021Q_MODULE 1
++#define CONFIG_DECNET_MODULE 1
++#undef CONFIG_DECNET_ROUTER
++#define CONFIG_LLC 1
++#define CONFIG_LLC2_MODULE 1
++#define CONFIG_IPX_MODULE 1
++#undef CONFIG_IPX_INTERN
++#define CONFIG_ATALK_MODULE 1
++#define CONFIG_DEV_APPLETALK 1
++#define CONFIG_LTPC_MODULE 1
++#define CONFIG_COPS_MODULE 1
++#define CONFIG_COPS_DAYNA 1
++#define CONFIG_COPS_TANGENT 1
++#define CONFIG_IPDDP_MODULE 1
++#define CONFIG_IPDDP_ENCAP 1
++#define CONFIG_IPDDP_DECAP 1
++#define CONFIG_X25_MODULE 1
++#define CONFIG_LAPB_MODULE 1
++#undef CONFIG_NET_DIVERT
++#define CONFIG_ECONET_MODULE 1
++#define CONFIG_ECONET_AUNUDP 1
++#define CONFIG_ECONET_NATIVE 1
++#define CONFIG_WAN_ROUTER_MODULE 1
++
++/*
++ * QoS and/or fair queueing
++ */
++#define CONFIG_NET_SCHED 1
++#define CONFIG_NET_SCH_CLK_JIFFIES 1
++#undef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
++#undef CONFIG_NET_SCH_CLK_CPU
++#define CONFIG_NET_SCH_CBQ_MODULE 1
++#define CONFIG_NET_SCH_HTB_MODULE 1
++#define CONFIG_NET_SCH_HFSC_MODULE 1
++#define CONFIG_NET_SCH_ATM_MODULE 1
++#define CONFIG_NET_SCH_PRIO_MODULE 1
++#define CONFIG_NET_SCH_RED_MODULE 1
++#define CONFIG_NET_SCH_SFQ_MODULE 1
++#define CONFIG_NET_SCH_TEQL_MODULE 1
++#define CONFIG_NET_SCH_TBF_MODULE 1
++#define CONFIG_NET_SCH_GRED_MODULE 1
++#define CONFIG_NET_SCH_DSMARK_MODULE 1
++#define CONFIG_NET_SCH_NETEM_MODULE 1
++#define CONFIG_NET_SCH_INGRESS_MODULE 1
++#define CONFIG_NET_QOS 1
++#define CONFIG_NET_ESTIMATOR 1
++#define CONFIG_NET_CLS 1
++#define CONFIG_NET_CLS_BASIC_MODULE 1
++#define CONFIG_NET_CLS_TCINDEX_MODULE 1
++#define CONFIG_NET_CLS_ROUTE4_MODULE 1
++#define CONFIG_NET_CLS_ROUTE 1
++#define CONFIG_NET_CLS_FW_MODULE 1
++#define CONFIG_NET_CLS_U32_MODULE 1
++#undef CONFIG_CLS_U32_PERF
++#undef CONFIG_NET_CLS_IND
++#undef CONFIG_CLS_U32_MARK
++#define CONFIG_NET_CLS_RSVP_MODULE 1
++#define CONFIG_NET_CLS_RSVP6_MODULE 1
++#define CONFIG_NET_EMATCH 1
++#define CONFIG_NET_EMATCH_STACK 32
++#define CONFIG_NET_EMATCH_CMP_MODULE 1
++#define CONFIG_NET_EMATCH_NBYTE_MODULE 1
++#define CONFIG_NET_EMATCH_U32_MODULE 1
++#define CONFIG_NET_EMATCH_META_MODULE 1
++#undef CONFIG_NET_CLS_ACT
++#define CONFIG_NET_CLS_POLICE 1
++
++/*
++ * Network testing
++ */
++#define CONFIG_NET_PKTGEN_MODULE 1
++#define CONFIG_NETPOLL 1
++#undef CONFIG_NETPOLL_RX
++#undef CONFIG_NETPOLL_TRAP
++#define CONFIG_NET_POLL_CONTROLLER 1
++#define CONFIG_HAMRADIO 1
++
++/*
++ * Packet Radio protocols
++ */
++#define CONFIG_AX25_MODULE 1
++#undef CONFIG_AX25_DAMA_SLAVE
++#define CONFIG_NETROM_MODULE 1
++#define CONFIG_ROSE_MODULE 1
++
++/*
++ * AX.25 network device drivers
++ */
++#define CONFIG_MKISS_MODULE 1
++#define CONFIG_6PACK_MODULE 1
++#define CONFIG_BPQETHER_MODULE 1
++#undef CONFIG_DMASCC
++#define CONFIG_SCC_MODULE 1
++#undef CONFIG_SCC_DELAY
++#undef CONFIG_SCC_TRXECHO
++#define CONFIG_BAYCOM_SER_FDX_MODULE 1
++#define CONFIG_BAYCOM_SER_HDX_MODULE 1
++#define CONFIG_BAYCOM_PAR_MODULE 1
++#define CONFIG_BAYCOM_EPP_MODULE 1
++#define CONFIG_YAM_MODULE 1
++#define CONFIG_IRDA_MODULE 1
++
++/*
++ * IrDA protocols
++ */
++#define CONFIG_IRLAN_MODULE 1
++#define CONFIG_IRNET_MODULE 1
++#define CONFIG_IRCOMM_MODULE 1
++#undef CONFIG_IRDA_ULTRA
++
++/*
++ * IrDA options
++ */
++#define CONFIG_IRDA_CACHE_LAST_LSAP 1
++#define CONFIG_IRDA_FAST_RR 1
++#define CONFIG_IRDA_DEBUG 1
++
++/*
++ * Infrared-port device drivers
++ */
++
++/*
++ * SIR device drivers
++ */
++#define CONFIG_IRTTY_SIR_MODULE 1
++
++/*
++ * Dongle support
++ */
++#define CONFIG_DONGLE 1
++#define CONFIG_ESI_DONGLE_MODULE 1
++#define CONFIG_ACTISYS_DONGLE_MODULE 1
++#define CONFIG_TEKRAM_DONGLE_MODULE 1
++#define CONFIG_LITELINK_DONGLE_MODULE 1
++#define CONFIG_MA600_DONGLE_MODULE 1
++#define CONFIG_GIRBIL_DONGLE_MODULE 1
++#define CONFIG_MCP2120_DONGLE_MODULE 1
++#define CONFIG_OLD_BELKIN_DONGLE_MODULE 1
++#define CONFIG_ACT200L_DONGLE_MODULE 1
++
++/*
++ * Old SIR device drivers
++ */
++#define CONFIG_IRPORT_SIR_MODULE 1
++
++/*
++ * Old Serial dongle support
++ */
++#undef CONFIG_DONGLE_OLD
++
++/*
++ * FIR device drivers
++ */
++#define CONFIG_USB_IRDA_MODULE 1
++#define CONFIG_SIGMATEL_FIR_MODULE 1
++#define CONFIG_NSC_FIR_MODULE 1
++#define CONFIG_WINBOND_FIR_MODULE 1
++#undef CONFIG_TOSHIBA_FIR
++#define CONFIG_SMC_IRCC_FIR_MODULE 1
++#define CONFIG_ALI_FIR_MODULE 1
++#define CONFIG_VLSI_FIR_MODULE 1
++#define CONFIG_VIA_FIR_MODULE 1
++#define CONFIG_BT_MODULE 1
++#define CONFIG_BT_L2CAP_MODULE 1
++#define CONFIG_BT_SCO_MODULE 1
++#define CONFIG_BT_RFCOMM_MODULE 1
++#define CONFIG_BT_RFCOMM_TTY 1
++#define CONFIG_BT_BNEP_MODULE 1
++#define CONFIG_BT_BNEP_MC_FILTER 1
++#define CONFIG_BT_BNEP_PROTO_FILTER 1
++#define CONFIG_BT_CMTP_MODULE 1
++#define CONFIG_BT_HIDP_MODULE 1
++
++/*
++ * Bluetooth device drivers
++ */
++#define CONFIG_BT_HCIUSB_MODULE 1
++#define CONFIG_BT_HCIUSB_SCO 1
++#define CONFIG_BT_HCIUART_MODULE 1
++#define CONFIG_BT_HCIUART_H4 1
++#define CONFIG_BT_HCIUART_BCSP 1
++#undef CONFIG_BT_HCIUART_BCSP_TXCRC
++#define CONFIG_BT_HCIBCM203X_MODULE 1
++#undef CONFIG_BT_HCIBPA10X
++#define CONFIG_BT_HCIBFUSB_MODULE 1
++#define CONFIG_BT_HCIDTL1_MODULE 1
++#define CONFIG_BT_HCIBT3C_MODULE 1
++#define CONFIG_BT_HCIBLUECARD_MODULE 1
++#define CONFIG_BT_HCIBTUART_MODULE 1
++#define CONFIG_BT_HCIVHCI_MODULE 1
++#define CONFIG_NETDEVICES 1
++#define CONFIG_DUMMY_MODULE 1
++#define CONFIG_BONDING_MODULE 1
++#define CONFIG_EQUALIZER_MODULE 1
++#define CONFIG_TUN_MODULE 1
++#define CONFIG_NET_SB1000_MODULE 1
++
++/*
++ * ARCnet devices
++ */
++#define CONFIG_ARCNET_MODULE 1
++#define CONFIG_ARCNET_1201_MODULE 1
++#define CONFIG_ARCNET_1051_MODULE 1
++#define CONFIG_ARCNET_RAW_MODULE 1
++#undef CONFIG_ARCNET_CAP
++#define CONFIG_ARCNET_COM90xx_MODULE 1
++#define CONFIG_ARCNET_COM90xxIO_MODULE 1
++#define CONFIG_ARCNET_RIM_I_MODULE 1
++#define CONFIG_ARCNET_COM20020_MODULE 1
++#define CONFIG_ARCNET_COM20020_ISA_MODULE 1
++#define CONFIG_ARCNET_COM20020_PCI_MODULE 1
++
++/*
++ * Ethernet (10 or 100Mbit)
++ */
++#define CONFIG_NET_ETHERNET 1
++#define CONFIG_MII_MODULE 1
++#define CONFIG_HAPPYMEAL_MODULE 1
++#define CONFIG_SUNGEM_MODULE 1
++#define CONFIG_NET_VENDOR_3COM 1
++#define CONFIG_EL1_MODULE 1
++#define CONFIG_EL2_MODULE 1
++#undef CONFIG_ELPLUS
++#define CONFIG_EL16_MODULE 1
++#define CONFIG_EL3_MODULE 1
++#undef CONFIG_3C515
++#define CONFIG_VORTEX_MODULE 1
++#define CONFIG_TYPHOON_MODULE 1
++#undef CONFIG_LANCE
++#define CONFIG_NET_VENDOR_SMC 1
++#define CONFIG_WD80x3_MODULE 1
++#define CONFIG_ULTRA_MODULE 1
++#define CONFIG_SMC9194_MODULE 1
++#define CONFIG_NET_VENDOR_RACAL 1
++#define CONFIG_NI5010_MODULE 1
++#define CONFIG_NI52_MODULE 1
++#undef CONFIG_NI65
++
++/*
++ * Tulip family network device support
++ */
++#define CONFIG_NET_TULIP 1
++#define CONFIG_DE2104X_MODULE 1
++#define CONFIG_TULIP_MODULE 1
++#undef CONFIG_TULIP_MWI
++#undef CONFIG_TULIP_MMIO
++#undef CONFIG_TULIP_NAPI
++#define CONFIG_DE4X5_MODULE 1
++#define CONFIG_WINBOND_840_MODULE 1
++#define CONFIG_DM9102_MODULE 1
++#define CONFIG_PCMCIA_XIRCOM_MODULE 1
++#undef CONFIG_PCMCIA_XIRTULIP
++#define CONFIG_AT1700_MODULE 1
++#define CONFIG_DEPCA_MODULE 1
++#define CONFIG_HP100_MODULE 1
++#define CONFIG_NET_ISA 1
++#define CONFIG_E2100_MODULE 1
++#define CONFIG_EWRK3_MODULE 1
++#define CONFIG_EEXPRESS_MODULE 1
++#define CONFIG_EEXPRESS_PRO_MODULE 1
++#define CONFIG_HPLAN_PLUS_MODULE 1
++#define CONFIG_HPLAN_MODULE 1
++#define CONFIG_LP486E_MODULE 1
++#define CONFIG_ETH16I_MODULE 1
++#define CONFIG_NE2000_MODULE 1
++#define CONFIG_ZNET_MODULE 1
++#define CONFIG_SEEQ8005_MODULE 1
++#define CONFIG_NET_PCI 1
++#define CONFIG_PCNET32_MODULE 1
++#define CONFIG_AMD8111_ETH_MODULE 1
++#undef CONFIG_AMD8111E_NAPI
++#define CONFIG_ADAPTEC_STARFIRE_MODULE 1
++#undef CONFIG_ADAPTEC_STARFIRE_NAPI
++#define CONFIG_AC3200_MODULE 1
++#define CONFIG_APRICOT_MODULE 1
++#define CONFIG_B44_MODULE 1
++#define CONFIG_FORCEDETH_MODULE 1
++#define CONFIG_CS89x0_MODULE 1
++#undef CONFIG_DGRS
++#define CONFIG_EEPRO100_MODULE 1
++#define CONFIG_E100_MODULE 1
++#define CONFIG_FEALNX_MODULE 1
++#define CONFIG_NATSEMI_MODULE 1
++#define CONFIG_NE2K_PCI_MODULE 1
++#define CONFIG_8139CP_MODULE 1
++#define CONFIG_8139TOO_MODULE 1
++#define CONFIG_8139TOO_PIO 1
++#define CONFIG_8139TOO_TUNE_TWISTER 1
++#define CONFIG_8139TOO_8129 1
++#undef CONFIG_8139_OLD_RX_RESET
++#define CONFIG_SIS900_MODULE 1
++#define CONFIG_EPIC100_MODULE 1
++#define CONFIG_SUNDANCE_MODULE 1
++#undef CONFIG_SUNDANCE_MMIO
++#define CONFIG_TLAN_MODULE 1
++#define CONFIG_VIA_RHINE_MODULE 1
++#undef CONFIG_VIA_RHINE_MMIO
++#define CONFIG_NET_POCKET 1
++#define CONFIG_ATP_MODULE 1
++#define CONFIG_DE600_MODULE 1
++#define CONFIG_DE620_MODULE 1
++
++/*
++ * Ethernet (1000 Mbit)
++ */
++#undef CONFIG_ACENIC
++#define CONFIG_DL2K_MODULE 1
++#define CONFIG_E1000_MODULE 1
++#undef CONFIG_E1000_NAPI
++#define CONFIG_NS83820_MODULE 1
++#define CONFIG_HAMACHI_MODULE 1
++#define CONFIG_YELLOWFIN_MODULE 1
++#define CONFIG_R8169_MODULE 1
++#undef CONFIG_R8169_NAPI
++#undef CONFIG_R8169_VLAN
++#define CONFIG_SK98LIN_MODULE 1
++#define CONFIG_VIA_VELOCITY_MODULE 1
++#define CONFIG_TIGON3_MODULE 1
++#define CONFIG_BNX2_MODULE 1
++
++/*
++ * Ethernet (10000 Mbit)
++ */
++#define CONFIG_IXGB_MODULE 1
++#undef CONFIG_IXGB_NAPI
++#define CONFIG_S2IO_MODULE 1
++#undef CONFIG_S2IO_NAPI
++#undef CONFIG_2BUFF_MODE
++
++/*
++ * Token Ring devices
++ */
++#define CONFIG_TR 1
++#define CONFIG_IBMTR_MODULE 1
++#define CONFIG_IBMOL_MODULE 1
++#define CONFIG_IBMLS_MODULE 1
++#define CONFIG_3C359_MODULE 1
++#define CONFIG_TMS380TR_MODULE 1
++#define CONFIG_TMSPCI_MODULE 1
++#define CONFIG_SKISA_MODULE 1
++#define CONFIG_PROTEON_MODULE 1
++#define CONFIG_ABYSS_MODULE 1
++#undef CONFIG_SMCTR
++
++/*
++ * Wireless LAN (non-hamradio)
++ */
++#define CONFIG_NET_RADIO 1
++
++/*
++ * Obsolete Wireless cards support (pre-802.11)
++ */
++#define CONFIG_STRIP_MODULE 1
++#define CONFIG_ARLAN_MODULE 1
++#define CONFIG_WAVELAN_MODULE 1
++#define CONFIG_PCMCIA_WAVELAN_MODULE 1
++#define CONFIG_PCMCIA_NETWAVE_MODULE 1
++
++/*
++ * Wireless 802.11 Frequency Hopping cards support
++ */
++#define CONFIG_PCMCIA_RAYCS_MODULE 1
++
++/*
++ * Wireless 802.11b ISA/PCI cards support
++ */
++#define CONFIG_AIRO_MODULE 1
++#define CONFIG_HERMES_MODULE 1
++#define CONFIG_PLX_HERMES_MODULE 1
++#define CONFIG_TMD_HERMES_MODULE 1
++#define CONFIG_PCI_HERMES_MODULE 1
++#define CONFIG_ATMEL_MODULE 1
++#define CONFIG_PCI_ATMEL_MODULE 1
++
++/*
++ * Wireless 802.11b Pcmcia/Cardbus cards support
++ */
++#define CONFIG_PCMCIA_HERMES_MODULE 1
++#define CONFIG_AIRO_CS_MODULE 1
++#define CONFIG_PCMCIA_ATMEL_MODULE 1
++#define CONFIG_PCMCIA_WL3501_MODULE 1
++
++/*
++ * Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
++ */
++#define CONFIG_PRISM54_MODULE 1
++#define CONFIG_NET_WIRELESS 1
++
++/*
++ * PCMCIA network device support
++ */
++#define CONFIG_NET_PCMCIA 1
++#define CONFIG_PCMCIA_3C589_MODULE 1
++#define CONFIG_PCMCIA_3C574_MODULE 1
++#define CONFIG_PCMCIA_FMVJ18X_MODULE 1
++#define CONFIG_PCMCIA_PCNET_MODULE 1
++#define CONFIG_PCMCIA_NMCLAN_MODULE 1
++#define CONFIG_PCMCIA_SMC91C92_MODULE 1
++#define CONFIG_PCMCIA_XIRC2PS_MODULE 1
++#define CONFIG_PCMCIA_AXNET_MODULE 1
++#define CONFIG_ARCNET_COM20020_CS_MODULE 1
++#define CONFIG_PCMCIA_IBMTR_MODULE 1
++
++/*
++ * Wan interfaces
++ */
++#define CONFIG_WAN 1
++#define CONFIG_HOSTESS_SV11_MODULE 1
++#define CONFIG_COSA_MODULE 1
++#define CONFIG_DSCC4_MODULE 1
++#define CONFIG_DSCC4_PCISYNC 1
++#define CONFIG_DSCC4_PCI_RST 1
++#define CONFIG_LANMEDIA_MODULE 1
++#define CONFIG_SEALEVEL_4021_MODULE 1
++#define CONFIG_SYNCLINK_SYNCPPP_MODULE 1
++#define CONFIG_HDLC_MODULE 1
++#define CONFIG_HDLC_RAW 1
++#define CONFIG_HDLC_RAW_ETH 1
++#define CONFIG_HDLC_CISCO 1
++#define CONFIG_HDLC_FR 1
++#define CONFIG_HDLC_PPP 1
++#define CONFIG_HDLC_X25 1
++#define CONFIG_PCI200SYN_MODULE 1
++#define CONFIG_WANXL_MODULE 1
++#define CONFIG_PC300_MODULE 1
++#define CONFIG_PC300_MLPPP 1
++#define CONFIG_N2_MODULE 1
++#define CONFIG_C101_MODULE 1
++#define CONFIG_FARSYNC_MODULE 1
++#define CONFIG_DLCI_MODULE 1
++#define CONFIG_DLCI_COUNT 24
++#define CONFIG_DLCI_MAX 8
++#define CONFIG_SDLA_MODULE 1
++#define CONFIG_WAN_ROUTER_DRIVERS 1
++#undef CONFIG_VENDOR_SANGOMA
++#define CONFIG_CYCLADES_SYNC_MODULE 1
++#define CONFIG_CYCLOMX_X25 1
++#define CONFIG_LAPBETHER_MODULE 1
++#define CONFIG_X25_ASY_MODULE 1
++#define CONFIG_SBNI_MODULE 1
++#undef CONFIG_SBNI_MULTILINE
++
++/*
++ * ATM drivers
++ */
++#define CONFIG_ATM_TCP_MODULE 1
++#define CONFIG_ATM_LANAI_MODULE 1
++#define CONFIG_ATM_ENI_MODULE 1
++#undef CONFIG_ATM_ENI_DEBUG
++#undef CONFIG_ATM_ENI_TUNE_BURST
++#define CONFIG_ATM_FIRESTREAM_MODULE 1
++#define CONFIG_ATM_ZATM_MODULE 1
++#undef CONFIG_ATM_ZATM_DEBUG
++#define CONFIG_ATM_NICSTAR_MODULE 1
++#undef CONFIG_ATM_NICSTAR_USE_SUNI
++#undef CONFIG_ATM_NICSTAR_USE_IDT77105
++#define CONFIG_ATM_IDT77252_MODULE 1
++#undef CONFIG_ATM_IDT77252_DEBUG
++#undef CONFIG_ATM_IDT77252_RCV_ALL
++#define CONFIG_ATM_IDT77252_USE_SUNI 1
++#define CONFIG_ATM_AMBASSADOR_MODULE 1
++#undef CONFIG_ATM_AMBASSADOR_DEBUG
++#define CONFIG_ATM_HORIZON_MODULE 1
++#undef CONFIG_ATM_HORIZON_DEBUG
++#define CONFIG_ATM_IA_MODULE 1
++#undef CONFIG_ATM_IA_DEBUG
++#define CONFIG_ATM_FORE200E_MAYBE_MODULE 1
++#define CONFIG_ATM_FORE200E_PCA 1
++#define CONFIG_ATM_FORE200E_PCA_DEFAULT_FW 1
++#undef CONFIG_ATM_FORE200E_USE_TASKLET
++#define CONFIG_ATM_FORE200E_TX_RETRY 16
++#define CONFIG_ATM_FORE200E_DEBUG 0
++#define CONFIG_ATM_FORE200E_MODULE 1
++#define CONFIG_ATM_HE_MODULE 1
++#define CONFIG_ATM_HE_USE_SUNI 1
++#define CONFIG_FDDI 1
++#define CONFIG_DEFXX_MODULE 1
++#define CONFIG_SKFP_MODULE 1
++#define CONFIG_HIPPI 1
++#define CONFIG_ROADRUNNER_MODULE 1
++#undef CONFIG_ROADRUNNER_LARGE_RINGS
++#define CONFIG_PLIP_MODULE 1
++#define CONFIG_PPP_MODULE 1
++#define CONFIG_PPP_MULTILINK 1
++#define CONFIG_PPP_FILTER 1
++#define CONFIG_PPP_ASYNC_MODULE 1
++#define CONFIG_PPP_SYNC_TTY_MODULE 1
++#define CONFIG_PPP_DEFLATE_MODULE 1
++#define CONFIG_PPP_BSDCOMP_MODULE 1
++#define CONFIG_PPPOE_MODULE 1
++#define CONFIG_PPPOATM_MODULE 1
++#define CONFIG_SLIP_MODULE 1
++#define CONFIG_SLIP_COMPRESSED 1
++#define CONFIG_SLIP_SMART 1
++#define CONFIG_SLIP_MODE_SLIP6 1
++#define CONFIG_NET_FC 1
++#define CONFIG_SHAPER_MODULE 1
++#define CONFIG_NETCONSOLE_MODULE 1
++
++/*
++ * ISDN subsystem
++ */
++#define CONFIG_ISDN_MODULE 1
++
++/*
++ * Old ISDN4Linux
++ */
++#define CONFIG_ISDN_I4L_MODULE 1
++#define CONFIG_ISDN_PPP 1
++#define CONFIG_ISDN_PPP_VJ 1
++#define CONFIG_ISDN_MPP 1
++#define CONFIG_IPPP_FILTER 1
++#define CONFIG_ISDN_PPP_BSDCOMP_MODULE 1
++#define CONFIG_ISDN_AUDIO 1
++#define CONFIG_ISDN_TTY_FAX 1
++#define CONFIG_ISDN_X25 1
++
++/*
++ * ISDN feature submodules
++ */
++#undef CONFIG_ISDN_DRV_LOOP
++#undef CONFIG_ISDN_DIVERSION
++
++/*
++ * ISDN4Linux hardware drivers
++ */
++
++/*
++ * Passive cards
++ */
++#define CONFIG_ISDN_DRV_HISAX_MODULE 1
++
++/*
++ * D-channel protocol features
++ */
++#define CONFIG_HISAX_EURO 1
++#define CONFIG_DE_AOC 1
++#undef CONFIG_HISAX_NO_SENDCOMPLETE
++#undef CONFIG_HISAX_NO_LLC
++#undef CONFIG_HISAX_NO_KEYPAD
++#define CONFIG_HISAX_1TR6 1
++#define CONFIG_HISAX_NI1 1
++#define CONFIG_HISAX_MAX_CARDS 8
++
++/*
++ * HiSax supported cards
++ */
++#define CONFIG_HISAX_16_0 1
++#define CONFIG_HISAX_16_3 1
++#define CONFIG_HISAX_TELESPCI 1
++#define CONFIG_HISAX_S0BOX 1
++#define CONFIG_HISAX_AVM_A1 1
++#define CONFIG_HISAX_FRITZPCI 1
++#define CONFIG_HISAX_AVM_A1_PCMCIA 1
++#define CONFIG_HISAX_ELSA 1
++#define CONFIG_HISAX_IX1MICROR2 1
++#define CONFIG_HISAX_DIEHLDIVA 1
++#define CONFIG_HISAX_ASUSCOM 1
++#define CONFIG_HISAX_TELEINT 1
++#define CONFIG_HISAX_HFCS 1
++#define CONFIG_HISAX_SEDLBAUER 1
++#define CONFIG_HISAX_SPORTSTER 1
++#define CONFIG_HISAX_MIC 1
++#define CONFIG_HISAX_NETJET 1
++#define CONFIG_HISAX_NETJET_U 1
++#define CONFIG_HISAX_NICCY 1
++#define CONFIG_HISAX_ISURF 1
++#define CONFIG_HISAX_HSTSAPHIR 1
++#define CONFIG_HISAX_BKM_A4T 1
++#define CONFIG_HISAX_SCT_QUADRO 1
++#define CONFIG_HISAX_GAZEL 1
++#define CONFIG_HISAX_HFC_PCI 1
++#define CONFIG_HISAX_W6692 1
++#define CONFIG_HISAX_HFC_SX 1
++#define CONFIG_HISAX_ENTERNOW_PCI 1
++#undef CONFIG_HISAX_DEBUG
++
++/*
++ * HiSax PCMCIA card service modules
++ */
++#define CONFIG_HISAX_SEDLBAUER_CS_MODULE 1
++#define CONFIG_HISAX_ELSA_CS_MODULE 1
++#define CONFIG_HISAX_AVM_A1_CS_MODULE 1
++#define CONFIG_HISAX_TELES_CS_MODULE 1
++
++/*
++ * HiSax sub driver modules
++ */
++#define CONFIG_HISAX_ST5481_MODULE 1
++#define CONFIG_HISAX_HFCUSB_MODULE 1
++#define CONFIG_HISAX_HFC4S8S_MODULE 1
++#define CONFIG_HISAX_FRITZ_PCIPNP_MODULE 1
++#define CONFIG_HISAX_HDLC 1
++
++/*
++ * Active cards
++ */
++#define CONFIG_ISDN_DRV_ICN_MODULE 1
++#define CONFIG_ISDN_DRV_PCBIT_MODULE 1
++#define CONFIG_ISDN_DRV_SC_MODULE 1
++#define CONFIG_ISDN_DRV_ACT2000_MODULE 1
++#undef CONFIG_HYSDN
++
++/*
++ * CAPI subsystem
++ */
++#define CONFIG_ISDN_CAPI_MODULE 1
++#define CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON 1
++#define CONFIG_ISDN_CAPI_MIDDLEWARE 1
++#define CONFIG_ISDN_CAPI_CAPI20_MODULE 1
++#define CONFIG_ISDN_CAPI_CAPIFS_BOOL 1
++#define CONFIG_ISDN_CAPI_CAPIFS_MODULE 1
++#define CONFIG_ISDN_CAPI_CAPIDRV_MODULE 1
++
++/*
++ * CAPI hardware drivers
++ */
++
++/*
++ * Active AVM cards
++ */
++#define CONFIG_CAPI_AVM 1
++#define CONFIG_ISDN_DRV_AVMB1_B1ISA_MODULE 1
++#define CONFIG_ISDN_DRV_AVMB1_B1PCI_MODULE 1
++#define CONFIG_ISDN_DRV_AVMB1_B1PCIV4 1
++#define CONFIG_ISDN_DRV_AVMB1_T1ISA_MODULE 1
++#define CONFIG_ISDN_DRV_AVMB1_B1PCMCIA_MODULE 1
++#define CONFIG_ISDN_DRV_AVMB1_AVM_CS_MODULE 1
++#define CONFIG_ISDN_DRV_AVMB1_T1PCI_MODULE 1
++#define CONFIG_ISDN_DRV_AVMB1_C4_MODULE 1
++
++/*
++ * Active Eicon DIVA Server cards
++ */
++#define CONFIG_CAPI_EICON 1
++#define CONFIG_ISDN_DIVAS_MODULE 1
++#define CONFIG_ISDN_DIVAS_BRIPCI 1
++#define CONFIG_ISDN_DIVAS_PRIPCI 1
++#define CONFIG_ISDN_DIVAS_DIVACAPI_MODULE 1
++#define CONFIG_ISDN_DIVAS_USERIDI_MODULE 1
++#define CONFIG_ISDN_DIVAS_MAINT_MODULE 1
++
++/*
++ * Telephony Support
++ */
++#define CONFIG_PHONE_MODULE 1
++#define CONFIG_PHONE_IXJ_MODULE 1
++#define CONFIG_PHONE_IXJ_PCMCIA_MODULE 1
++
++/*
++ * Input device support
++ */
++#define CONFIG_INPUT 1
++
++/*
++ * Userland interfaces
++ */
++#define CONFIG_INPUT_MOUSEDEV 1
++#define CONFIG_INPUT_MOUSEDEV_PSAUX 1
++#define CONFIG_INPUT_MOUSEDEV_SCREEN_X 1024
++#define CONFIG_INPUT_MOUSEDEV_SCREEN_Y 768
++#define CONFIG_INPUT_JOYDEV_MODULE 1
++#define CONFIG_INPUT_TSDEV_MODULE 1
++#define CONFIG_INPUT_TSDEV_SCREEN_X 240
++#define CONFIG_INPUT_TSDEV_SCREEN_Y 320
++#define CONFIG_INPUT_EVDEV_MODULE 1
++#define CONFIG_INPUT_EVBUG_MODULE 1
++
++/*
++ * Input Device Drivers
++ */
++#define CONFIG_INPUT_KEYBOARD 1
++#define CONFIG_KEYBOARD_ATKBD 1
++#define CONFIG_KEYBOARD_SUNKBD_MODULE 1
++#define CONFIG_KEYBOARD_LKKBD_MODULE 1
++#define CONFIG_KEYBOARD_XTKBD_MODULE 1
++#define CONFIG_KEYBOARD_NEWTON_MODULE 1
++#define CONFIG_INPUT_MOUSE 1
++#define CONFIG_MOUSE_PS2 1
++#define CONFIG_MOUSE_SERIAL_MODULE 1
++#define CONFIG_MOUSE_INPORT_MODULE 1
++#undef CONFIG_MOUSE_ATIXL
++#define CONFIG_MOUSE_LOGIBM_MODULE 1
++#define CONFIG_MOUSE_PC110PAD_MODULE 1
++#define CONFIG_MOUSE_VSXXXAA_MODULE 1
++#define CONFIG_INPUT_JOYSTICK 1
++#define CONFIG_JOYSTICK_ANALOG_MODULE 1
++#define CONFIG_JOYSTICK_A3D_MODULE 1
++#define CONFIG_JOYSTICK_ADI_MODULE 1
++#define CONFIG_JOYSTICK_COBRA_MODULE 1
++#define CONFIG_JOYSTICK_GF2K_MODULE 1
++#define CONFIG_JOYSTICK_GRIP_MODULE 1
++#define CONFIG_JOYSTICK_GRIP_MP_MODULE 1
++#define CONFIG_JOYSTICK_GUILLEMOT_MODULE 1
++#define CONFIG_JOYSTICK_INTERACT_MODULE 1
++#define CONFIG_JOYSTICK_SIDEWINDER_MODULE 1
++#define CONFIG_JOYSTICK_TMDC_MODULE 1
++#define CONFIG_JOYSTICK_IFORCE_MODULE 1
++#define CONFIG_JOYSTICK_IFORCE_USB 1
++#define CONFIG_JOYSTICK_IFORCE_232 1
++#define CONFIG_JOYSTICK_WARRIOR_MODULE 1
++#define CONFIG_JOYSTICK_MAGELLAN_MODULE 1
++#define CONFIG_JOYSTICK_SPACEORB_MODULE 1
++#define CONFIG_JOYSTICK_SPACEBALL_MODULE 1
++#define CONFIG_JOYSTICK_STINGER_MODULE 1
++#define CONFIG_JOYSTICK_TWIDJOY_MODULE 1
++#define CONFIG_JOYSTICK_DB9_MODULE 1
++#define CONFIG_JOYSTICK_GAMECON_MODULE 1
++#define CONFIG_JOYSTICK_TURBOGRAFX_MODULE 1
++#define CONFIG_JOYSTICK_JOYDUMP_MODULE 1
++#define CONFIG_INPUT_TOUCHSCREEN 1
++#define CONFIG_TOUCHSCREEN_GUNZE_MODULE 1
++#define CONFIG_TOUCHSCREEN_ELO_MODULE 1
++#define CONFIG_TOUCHSCREEN_MTOUCH_MODULE 1
++#define CONFIG_TOUCHSCREEN_MK712_MODULE 1
++#define CONFIG_INPUT_MISC 1
++#define CONFIG_INPUT_PCSPKR_MODULE 1
++#define CONFIG_INPUT_UINPUT_MODULE 1
++
++/*
++ * Hardware I/O ports
++ */
++#define CONFIG_SERIO 1
++#define CONFIG_SERIO_I8042 1
++#define CONFIG_SERIO_SERPORT_MODULE 1
++#define CONFIG_SERIO_CT82C710_MODULE 1
++#define CONFIG_SERIO_PARKBD_MODULE 1
++#define CONFIG_SERIO_PCIPS2_MODULE 1
++#define CONFIG_SERIO_LIBPS2 1
++#define CONFIG_SERIO_RAW_MODULE 1
++#define CONFIG_GAMEPORT_MODULE 1
++#define CONFIG_GAMEPORT_NS558_MODULE 1
++#define CONFIG_GAMEPORT_L4_MODULE 1
++#define CONFIG_GAMEPORT_EMU10K1_MODULE 1
++#define CONFIG_GAMEPORT_VORTEX_MODULE 1
++#define CONFIG_GAMEPORT_FM801_MODULE 1
++#undef CONFIG_GAMEPORT_CS461X
++
++/*
++ * Character devices
++ */
++#define CONFIG_VT 1
++#define CONFIG_VT_CONSOLE 1
++#define CONFIG_HW_CONSOLE 1
++#undef CONFIG_SERIAL_NONSTANDARD
++
++/*
++ * Serial drivers
++ */
++#define CONFIG_SERIAL_8250_MODULE 1
++#undef CONFIG_SERIAL_8250_CS
++#undef CONFIG_SERIAL_8250_ACPI
++#define CONFIG_SERIAL_8250_NR_UARTS 4
++#undef CONFIG_SERIAL_8250_EXTENDED
++
++/*
++ * Non-8250 serial port support
++ */
++#define CONFIG_SERIAL_CORE_MODULE 1
++#define CONFIG_SERIAL_JSM_MODULE 1
++#define CONFIG_UNIX98_PTYS 1
++#define CONFIG_LEGACY_PTYS 1
++#define CONFIG_LEGACY_PTY_COUNT 256
++#define CONFIG_PRINTER_MODULE 1
++#undef CONFIG_LP_CONSOLE
++#define CONFIG_PPDEV_MODULE 1
++#define CONFIG_TIPAR_MODULE 1
++
++/*
++ * IPMI
++ */
++#define CONFIG_IPMI_HANDLER_MODULE 1
++#undef CONFIG_IPMI_PANIC_EVENT
++#define CONFIG_IPMI_DEVICE_INTERFACE_MODULE 1
++#define CONFIG_IPMI_SI_MODULE 1
++#define CONFIG_IPMI_WATCHDOG_MODULE 1
++#define CONFIG_IPMI_POWEROFF_MODULE 1
++
++/*
++ * Watchdog Cards
++ */
++#define CONFIG_WATCHDOG 1
++#undef CONFIG_WATCHDOG_NOWAYOUT
++
++/*
++ * Watchdog Device Drivers
++ */
++#define CONFIG_SOFT_WATCHDOG_MODULE 1
++#define CONFIG_ACQUIRE_WDT_MODULE 1
++#define CONFIG_ADVANTECH_WDT_MODULE 1
++#define CONFIG_ALIM1535_WDT_MODULE 1
++#define CONFIG_ALIM7101_WDT_MODULE 1
++#define CONFIG_SC520_WDT_MODULE 1
++#define CONFIG_EUROTECH_WDT_MODULE 1
++#define CONFIG_IB700_WDT_MODULE 1
++#define CONFIG_WAFER_WDT_MODULE 1
++#define CONFIG_I8XX_TCO_MODULE 1
++#define CONFIG_SC1200_WDT_MODULE 1
++#define CONFIG_SCx200_WDT_MODULE 1
++#define CONFIG_60XX_WDT_MODULE 1
++#define CONFIG_CPU5_WDT_MODULE 1
++#define CONFIG_W83627HF_WDT_MODULE 1
++#define CONFIG_W83877F_WDT_MODULE 1
++#define CONFIG_MACHZ_WDT_MODULE 1
++
++/*
++ * ISA-based Watchdog Cards
++ */
++#define CONFIG_PCWATCHDOG_MODULE 1
++#define CONFIG_MIXCOMWD_MODULE 1
++#define CONFIG_WDT_MODULE 1
++#define CONFIG_WDT_501 1
++
++/*
++ * PCI-based Watchdog Cards
++ */
++#define CONFIG_PCIPCWATCHDOG_MODULE 1
++#define CONFIG_WDTPCI_MODULE 1
++#define CONFIG_WDT_501_PCI 1
++
++/*
++ * USB-based Watchdog Cards
++ */
++#define CONFIG_USBPCWATCHDOG_MODULE 1
++#define CONFIG_HW_RANDOM_MODULE 1
++#define CONFIG_NVRAM_MODULE 1
++#define CONFIG_RTC_MODULE 1
++#define CONFIG_GEN_RTC_MODULE 1
++#define CONFIG_GEN_RTC_X 1
++#define CONFIG_DTLK_MODULE 1
++#define CONFIG_R3964_MODULE 1
++#define CONFIG_APPLICOM_MODULE 1
++#define CONFIG_SONYPI_MODULE 1
++
++/*
++ * Ftape, the floppy tape device driver
++ */
++#undef CONFIG_FTAPE
++#define CONFIG_AGP_MODULE 1
++#define CONFIG_AGP_ALI_MODULE 1
++#define CONFIG_AGP_ATI_MODULE 1
++#define CONFIG_AGP_AMD_MODULE 1
++#define CONFIG_AGP_AMD64_MODULE 1
++#define CONFIG_AGP_INTEL_MODULE 1
++#define CONFIG_AGP_NVIDIA_MODULE 1
++#define CONFIG_AGP_SIS_MODULE 1
++#define CONFIG_AGP_SWORKS_MODULE 1
++#define CONFIG_AGP_VIA_MODULE 1
++#define CONFIG_AGP_EFFICEON_MODULE 1
++#define CONFIG_DRM_MODULE 1
++#define CONFIG_DRM_TDFX_MODULE 1
++#undef CONFIG_DRM_GAMMA
++#define CONFIG_DRM_R128_MODULE 1
++#define CONFIG_DRM_RADEON_MODULE 1
++#define CONFIG_DRM_I810_MODULE 1
++#define CONFIG_DRM_I830_MODULE 1
++#define CONFIG_DRM_I915_MODULE 1
++#define CONFIG_DRM_MGA_MODULE 1
++#define CONFIG_DRM_SIS_MODULE 1
++
++/*
++ * PCMCIA character devices
++ */
++#define CONFIG_SYNCLINK_CS_MODULE 1
++#define CONFIG_MWAVE_MODULE 1
++#define CONFIG_SCx200_GPIO_MODULE 1
++#define CONFIG_RAW_DRIVER_MODULE 1
++#undef CONFIG_HPET
++#define CONFIG_MAX_RAW_DEVS 256
++#define CONFIG_HANGCHECK_TIMER_MODULE 1
++
++/*
++ * TPM devices
++ */
++#undef CONFIG_TCG_TPM
++
++/*
++ * I2C support
++ */
++#define CONFIG_I2C_MODULE 1
++#define CONFIG_I2C_CHARDEV_MODULE 1
++
++/*
++ * I2C Algorithms
++ */
++#define CONFIG_I2C_ALGOBIT_MODULE 1
++#define CONFIG_I2C_ALGOPCF_MODULE 1
++#define CONFIG_I2C_ALGOPCA_MODULE 1
++
++/*
++ * I2C Hardware Bus support
++ */
++#define CONFIG_I2C_ALI1535_MODULE 1
++#define CONFIG_I2C_ALI1563_MODULE 1
++#define CONFIG_I2C_ALI15X3_MODULE 1
++#define CONFIG_I2C_AMD756_MODULE 1
++#define CONFIG_I2C_AMD756_S4882_MODULE 1
++#define CONFIG_I2C_AMD8111_MODULE 1
++#define CONFIG_I2C_ELEKTOR_MODULE 1
++#define CONFIG_I2C_I801_MODULE 1
++#define CONFIG_I2C_I810_MODULE 1
++#define CONFIG_I2C_PIIX4_MODULE 1
++#define CONFIG_I2C_ISA_MODULE 1
++#define CONFIG_I2C_NFORCE2_MODULE 1
++#define CONFIG_I2C_PARPORT_MODULE 1
++#define CONFIG_I2C_PARPORT_LIGHT_MODULE 1
++#define CONFIG_I2C_PROSAVAGE_MODULE 1
++#define CONFIG_I2C_SAVAGE4_MODULE 1
++#define CONFIG_SCx200_I2C_MODULE 1
++#define CONFIG_SCx200_I2C_SCL 12
++#define CONFIG_SCx200_I2C_SDA 13
++#define CONFIG_SCx200_ACB_MODULE 1
++#define CONFIG_I2C_SIS5595_MODULE 1
++#define CONFIG_I2C_SIS630_MODULE 1
++#define CONFIG_I2C_SIS96X_MODULE 1
++#define CONFIG_I2C_STUB_MODULE 1
++#define CONFIG_I2C_VIA_MODULE 1
++#define CONFIG_I2C_VIAPRO_MODULE 1
++#define CONFIG_I2C_VOODOO3_MODULE 1
++#define CONFIG_I2C_PCA_ISA_MODULE 1
++
++/*
++ * Hardware Sensors Chip support
++ */
++#define CONFIG_I2C_SENSOR_MODULE 1
++#define CONFIG_SENSORS_ADM1021_MODULE 1
++#define CONFIG_SENSORS_ADM1025_MODULE 1
++#define CONFIG_SENSORS_ADM1026_MODULE 1
++#define CONFIG_SENSORS_ADM1031_MODULE 1
++#define CONFIG_SENSORS_ASB100_MODULE 1
++#define CONFIG_SENSORS_DS1621_MODULE 1
++#define CONFIG_SENSORS_FSCHER_MODULE 1
++#define CONFIG_SENSORS_FSCPOS_MODULE 1
++#define CONFIG_SENSORS_GL518SM_MODULE 1
++#define CONFIG_SENSORS_GL520SM_MODULE 1
++#define CONFIG_SENSORS_IT87_MODULE 1
++#define CONFIG_SENSORS_LM63_MODULE 1
++#define CONFIG_SENSORS_LM75_MODULE 1
++#define CONFIG_SENSORS_LM77_MODULE 1
++#define CONFIG_SENSORS_LM78_MODULE 1
++#define CONFIG_SENSORS_LM80_MODULE 1
++#define CONFIG_SENSORS_LM83_MODULE 1
++#define CONFIG_SENSORS_LM85_MODULE 1
++#define CONFIG_SENSORS_LM87_MODULE 1
++#define CONFIG_SENSORS_LM90_MODULE 1
++#define CONFIG_SENSORS_LM92_MODULE 1
++#define CONFIG_SENSORS_MAX1619_MODULE 1
++#define CONFIG_SENSORS_PC87360_MODULE 1
++#undef CONFIG_SENSORS_SMSC47B397
++#define CONFIG_SENSORS_SIS5595_MODULE 1
++#define CONFIG_SENSORS_SMSC47M1_MODULE 1
++#define CONFIG_SENSORS_VIA686A_MODULE 1
++#define CONFIG_SENSORS_W83781D_MODULE 1
++#define CONFIG_SENSORS_W83L785TS_MODULE 1
++#define CONFIG_SENSORS_W83627HF_MODULE 1
++
++/*
++ * Other I2C Chip support
++ */
++#define CONFIG_SENSORS_DS1337_MODULE 1
++#define CONFIG_SENSORS_EEPROM_MODULE 1
++#define CONFIG_SENSORS_PCF8574_MODULE 1
++#define CONFIG_SENSORS_PCF8591_MODULE 1
++#define CONFIG_SENSORS_RTC8564_MODULE 1
++#undef CONFIG_I2C_DEBUG_CORE
++#undef CONFIG_I2C_DEBUG_ALGO
++#undef CONFIG_I2C_DEBUG_BUS
++#undef CONFIG_I2C_DEBUG_CHIP
++
++/*
++ * Dallas's 1-wire bus
++ */
++#define CONFIG_W1_MODULE 1
++#define CONFIG_W1_MATROX_MODULE 1
++#define CONFIG_W1_DS9490_MODULE 1
++#define CONFIG_W1_DS9490_BRIDGE_MODULE 1
++#define CONFIG_W1_THERM_MODULE 1
++#define CONFIG_W1_SMEM_MODULE 1
++
++/*
++ * Misc devices
++ */
++#define CONFIG_IBM_ASM_MODULE 1
++
++/*
++ * Multimedia devices
++ */
++#define CONFIG_VIDEO_DEV_MODULE 1
++
++/*
++ * Video For Linux
++ */
++
++/*
++ * Video Adapters
++ */
++#define CONFIG_VIDEO_BT848_MODULE 1
++#define CONFIG_VIDEO_PMS_MODULE 1
++#define CONFIG_VIDEO_BWQCAM_MODULE 1
++#define CONFIG_VIDEO_CQCAM_MODULE 1
++#define CONFIG_VIDEO_W9966_MODULE 1
++#define CONFIG_VIDEO_CPIA_MODULE 1
++#define CONFIG_VIDEO_CPIA_PP_MODULE 1
++#define CONFIG_VIDEO_CPIA_USB_MODULE 1
++#define CONFIG_VIDEO_SAA5246A_MODULE 1
++#define CONFIG_VIDEO_SAA5249_MODULE 1
++#define CONFIG_TUNER_3036_MODULE 1
++#define CONFIG_VIDEO_STRADIS_MODULE 1
++#define CONFIG_VIDEO_ZORAN_MODULE 1
++#define CONFIG_VIDEO_ZORAN_BUZ_MODULE 1
++#define CONFIG_VIDEO_ZORAN_DC10_MODULE 1
++#define CONFIG_VIDEO_ZORAN_DC30_MODULE 1
++#define CONFIG_VIDEO_ZORAN_LML33_MODULE 1
++#define CONFIG_VIDEO_ZORAN_LML33R10_MODULE 1
++#undef CONFIG_VIDEO_ZR36120
++#define CONFIG_VIDEO_MEYE_MODULE 1
++#undef CONFIG_VIDEO_SAA7134
++#define CONFIG_VIDEO_MXB_MODULE 1
++#define CONFIG_VIDEO_DPC_MODULE 1
++#define CONFIG_VIDEO_HEXIUM_ORION_MODULE 1
++#define CONFIG_VIDEO_HEXIUM_GEMINI_MODULE 1
++#define CONFIG_VIDEO_CX88_MODULE 1
++#undef CONFIG_VIDEO_CX88_DVB
++#define CONFIG_VIDEO_OVCAMCHIP_MODULE 1
++
++/*
++ * Radio Adapters
++ */
++#define CONFIG_RADIO_CADET_MODULE 1
++#define CONFIG_RADIO_RTRACK_MODULE 1
++#define CONFIG_RADIO_RTRACK2_MODULE 1
++#define CONFIG_RADIO_AZTECH_MODULE 1
++#define CONFIG_RADIO_GEMTEK_MODULE 1
++#define CONFIG_RADIO_GEMTEK_PCI_MODULE 1
++#define CONFIG_RADIO_MAXIRADIO_MODULE 1
++#define CONFIG_RADIO_MAESTRO_MODULE 1
++#define CONFIG_RADIO_MIROPCM20_MODULE 1
++#define CONFIG_RADIO_MIROPCM20_RDS_MODULE 1
++#define CONFIG_RADIO_SF16FMI_MODULE 1
++#define CONFIG_RADIO_SF16FMR2_MODULE 1
++#define CONFIG_RADIO_TERRATEC_MODULE 1
++#define CONFIG_RADIO_TRUST_MODULE 1
++#define CONFIG_RADIO_TYPHOON_MODULE 1
++#define CONFIG_RADIO_TYPHOON_PROC_FS 1
++#define CONFIG_RADIO_ZOLTRIX_MODULE 1
++
++/*
++ * Digital Video Broadcasting Devices
++ */
++#define CONFIG_DVB 1
++#define CONFIG_DVB_CORE_MODULE 1
++
++/*
++ * Supported SAA7146 based PCI Adapters
++ */
++#define CONFIG_DVB_AV7110_MODULE 1
++#undef CONFIG_DVB_AV7110_OSD
++#define CONFIG_DVB_BUDGET_MODULE 1
++#define CONFIG_DVB_BUDGET_CI_MODULE 1
++#define CONFIG_DVB_BUDGET_AV_MODULE 1
++#define CONFIG_DVB_BUDGET_PATCH_MODULE 1
++
++/*
++ * Supported USB Adapters
++ */
++#define CONFIG_DVB_TTUSB_BUDGET_MODULE 1
++#define CONFIG_DVB_TTUSB_DEC_MODULE 1
++#define CONFIG_DVB_DIBUSB_MODULE 1
++#define CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES 1
++#undef CONFIG_DVB_DIBCOM_DEBUG
++#define CONFIG_DVB_CINERGYT2_MODULE 1
++#undef CONFIG_DVB_CINERGYT2_TUNING
++
++/*
++ * Supported FlexCopII (B2C2) Adapters
++ */
++#define CONFIG_DVB_B2C2_FLEXCOP_MODULE 1
++#define CONFIG_DVB_B2C2_FLEXCOP_PCI_MODULE 1
++#define CONFIG_DVB_B2C2_FLEXCOP_USB_MODULE 1
++#undef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
++#define CONFIG_DVB_B2C2_SKYSTAR_MODULE 1
++
++/*
++ * Supported BT878 Adapters
++ */
++#define CONFIG_DVB_BT8XX_MODULE 1
++
++/*
++ * Supported DVB Frontends
++ */
++
++/*
++ * Customise DVB Frontends
++ */
++
++/*
++ * DVB-S (satellite) frontends
++ */
++#define CONFIG_DVB_STV0299_MODULE 1
++#define CONFIG_DVB_CX24110_MODULE 1
++#define CONFIG_DVB_TDA8083_MODULE 1
++#define CONFIG_DVB_TDA80XX_MODULE 1
++#define CONFIG_DVB_MT312_MODULE 1
++#define CONFIG_DVB_VES1X93_MODULE 1
++
++/*
++ * DVB-T (terrestrial) frontends
++ */
++#define CONFIG_DVB_SP8870_MODULE 1
++#define CONFIG_DVB_SP887X_MODULE 1
++#define CONFIG_DVB_CX22700_MODULE 1
++#define CONFIG_DVB_CX22702_MODULE 1
++#define CONFIG_DVB_L64781_MODULE 1
++#define CONFIG_DVB_TDA1004X_MODULE 1
++#define CONFIG_DVB_NXT6000_MODULE 1
++#define CONFIG_DVB_MT352_MODULE 1
++#define CONFIG_DVB_DIB3000MB_MODULE 1
++#define CONFIG_DVB_DIB3000MC_MODULE 1
++
++/*
++ * DVB-C (cable) frontends
++ */
++#define CONFIG_DVB_ATMEL_AT76C651_MODULE 1
++#define CONFIG_DVB_VES1820_MODULE 1
++#define CONFIG_DVB_TDA10021_MODULE 1
++#define CONFIG_DVB_STV0297_MODULE 1
++
++/*
++ * ATSC (North American/Korean Terresterial DTV) frontends
++ */
++#define CONFIG_DVB_NXT2002_MODULE 1
++#define CONFIG_DVB_OR51211_MODULE 1
++#define CONFIG_DVB_OR51132_MODULE 1
++#define CONFIG_VIDEO_SAA7146_MODULE 1
++#define CONFIG_VIDEO_SAA7146_VV_MODULE 1
++#define CONFIG_VIDEO_VIDEOBUF_MODULE 1
++#define CONFIG_VIDEO_TUNER_MODULE 1
++#define CONFIG_VIDEO_BUF_MODULE 1
++#define CONFIG_VIDEO_BTCX_MODULE 1
++#define CONFIG_VIDEO_IR_MODULE 1
++#define CONFIG_VIDEO_TVEEPROM_MODULE 1
++
++/*
++ * Graphics support
++ */
++#define CONFIG_FB 1
++#define CONFIG_FB_CFB_FILLRECT_MODULE 1
++#define CONFIG_FB_CFB_COPYAREA_MODULE 1
++#define CONFIG_FB_CFB_IMAGEBLIT_MODULE 1
++#define CONFIG_FB_SOFT_CURSOR_MODULE 1
++#undef CONFIG_FB_MACMODES
++#define CONFIG_FB_MODE_HELPERS 1
++#define CONFIG_FB_TILEBLITTING 1
++#define CONFIG_FB_CIRRUS_MODULE 1
++#define CONFIG_FB_PM2_MODULE 1
++#define CONFIG_FB_PM2_FIFO_DISCONNECT 1
++#define CONFIG_FB_CYBER2000_MODULE 1
++#undef CONFIG_FB_ASILIANT
++#undef CONFIG_FB_IMSTT
++#define CONFIG_FB_VGA16_MODULE 1
++#undef CONFIG_FB_VESA
++#define CONFIG_VIDEO_SELECT 1
++#define CONFIG_FB_HGA_MODULE 1
++#undef CONFIG_FB_HGA_ACCEL
++#define CONFIG_FB_NVIDIA_MODULE 1
++#define CONFIG_FB_NVIDIA_I2C 1
++#define CONFIG_FB_RIVA_MODULE 1
++#define CONFIG_FB_RIVA_I2C 1
++#define CONFIG_FB_RIVA_DEBUG 1
++#define CONFIG_FB_I810_MODULE 1
++#undef CONFIG_FB_I810_GTF
++#define CONFIG_FB_INTEL_MODULE 1
++#undef CONFIG_FB_INTEL_DEBUG
++#define CONFIG_FB_MATROX_MODULE 1
++#define CONFIG_FB_MATROX_MILLENIUM 1
++#define CONFIG_FB_MATROX_MYSTIQUE 1
++#undef CONFIG_FB_MATROX_G
++#define CONFIG_FB_MATROX_I2C_MODULE 1
++#define CONFIG_FB_MATROX_MULTIHEAD 1
++#define CONFIG_FB_RADEON_OLD_MODULE 1
++#define CONFIG_FB_RADEON_MODULE 1
++#define CONFIG_FB_RADEON_I2C 1
++#undef CONFIG_FB_RADEON_DEBUG
++#define CONFIG_FB_ATY128_MODULE 1
++#define CONFIG_FB_ATY_MODULE 1
++#define CONFIG_FB_ATY_CT 1
++#define CONFIG_FB_ATY_GENERIC_LCD 1
++#define CONFIG_FB_ATY_XL_INIT 1
++#define CONFIG_FB_ATY_GX 1
++#define CONFIG_FB_SAVAGE_MODULE 1
++#define CONFIG_FB_SAVAGE_I2C 1
++#define CONFIG_FB_SAVAGE_ACCEL 1
++#define CONFIG_FB_SIS_MODULE 1
++#define CONFIG_FB_SIS_300 1
++#define CONFIG_FB_SIS_315 1
++#define CONFIG_FB_NEOMAGIC_MODULE 1
++#define CONFIG_FB_KYRO_MODULE 1
++#define CONFIG_FB_3DFX_MODULE 1
++#undef CONFIG_FB_3DFX_ACCEL
++#define CONFIG_FB_VOODOO1_MODULE 1
++#define CONFIG_FB_TRIDENT_MODULE 1
++#undef CONFIG_FB_TRIDENT_ACCEL
++#undef CONFIG_FB_PM3
++#define CONFIG_FB_GEODE 1
++#define CONFIG_FB_GEODE_GX1_MODULE 1
++#define CONFIG_FB_S1D13XXX_MODULE 1
++#define CONFIG_FB_VIRTUAL_MODULE 1
++
++/*
++ * Console display driver support
++ */
++#define CONFIG_VGA_CONSOLE 1
++#define CONFIG_MDA_CONSOLE_MODULE 1
++#define CONFIG_DUMMY_CONSOLE 1
++#define CONFIG_FRAMEBUFFER_CONSOLE_MODULE 1
++#undef CONFIG_FONTS
++#define CONFIG_FONT_8x8 1
++#define CONFIG_FONT_8x16 1
++
++/*
++ * Logo configuration
++ */
++#undef CONFIG_LOGO
++#undef CONFIG_BACKLIGHT_LCD_SUPPORT
++
++/*
++ * Sound
++ */
++#define CONFIG_SOUND_MODULE 1
++
++/*
++ * Advanced Linux Sound Architecture
++ */
++#define CONFIG_SND_MODULE 1
++#define CONFIG_SND_TIMER_MODULE 1
++#define CONFIG_SND_PCM_MODULE 1
++#define CONFIG_SND_HWDEP_MODULE 1
++#define CONFIG_SND_RAWMIDI_MODULE 1
++#define CONFIG_SND_SEQUENCER_MODULE 1
++#define CONFIG_SND_SEQ_DUMMY_MODULE 1
++#define CONFIG_SND_OSSEMUL 1
++#define CONFIG_SND_MIXER_OSS_MODULE 1
++#define CONFIG_SND_PCM_OSS_MODULE 1
++#define CONFIG_SND_SEQUENCER_OSS 1
++#define CONFIG_SND_RTCTIMER_MODULE 1
++#undef CONFIG_SND_VERBOSE_PRINTK
++#undef CONFIG_SND_DEBUG
++#define CONFIG_SND_GENERIC_PM 1
++
++/*
++ * Generic devices
++ */
++#define CONFIG_SND_MPU401_UART_MODULE 1
++#define CONFIG_SND_OPL3_LIB_MODULE 1
++#define CONFIG_SND_OPL4_LIB_MODULE 1
++#define CONFIG_SND_VX_LIB_MODULE 1
++#define CONFIG_SND_DUMMY_MODULE 1
++#define CONFIG_SND_VIRMIDI_MODULE 1
++#define CONFIG_SND_MTPAV_MODULE 1
++#define CONFIG_SND_SERIAL_U16550_MODULE 1
++#define CONFIG_SND_MPU401_MODULE 1
++
++/*
++ * ISA devices
++ */
++#define CONFIG_SND_AD1848_LIB_MODULE 1
++#define CONFIG_SND_CS4231_LIB_MODULE 1
++#define CONFIG_SND_AD1816A_MODULE 1
++#define CONFIG_SND_AD1848_MODULE 1
++#define CONFIG_SND_CS4231_MODULE 1
++#define CONFIG_SND_CS4232_MODULE 1
++#define CONFIG_SND_CS4236_MODULE 1
++#define CONFIG_SND_ES968_MODULE 1
++#define CONFIG_SND_ES1688_MODULE 1
++#define CONFIG_SND_ES18XX_MODULE 1
++#define CONFIG_SND_GUS_SYNTH_MODULE 1
++#define CONFIG_SND_GUSCLASSIC_MODULE 1
++#define CONFIG_SND_GUSEXTREME_MODULE 1
++#define CONFIG_SND_GUSMAX_MODULE 1
++#define CONFIG_SND_INTERWAVE_MODULE 1
++#define CONFIG_SND_INTERWAVE_STB_MODULE 1
++#define CONFIG_SND_OPTI92X_AD1848_MODULE 1
++#define CONFIG_SND_OPTI92X_CS4231_MODULE 1
++#define CONFIG_SND_OPTI93X_MODULE 1
++#define CONFIG_SND_SB8_MODULE 1
++#define CONFIG_SND_SB16_MODULE 1
++#define CONFIG_SND_SBAWE_MODULE 1
++#define CONFIG_SND_SB16_CSP 1
++#define CONFIG_SND_WAVEFRONT_MODULE 1
++#define CONFIG_SND_ALS100_MODULE 1
++#define CONFIG_SND_AZT2320_MODULE 1
++#define CONFIG_SND_CMI8330_MODULE 1
++#define CONFIG_SND_DT019X_MODULE 1
++#define CONFIG_SND_OPL3SA2_MODULE 1
++#define CONFIG_SND_SGALAXY_MODULE 1
++#define CONFIG_SND_SSCAPE_MODULE 1
++
++/*
++ * PCI devices
++ */
++#define CONFIG_SND_AC97_CODEC_MODULE 1
++#define CONFIG_SND_ALI5451_MODULE 1
++#define CONFIG_SND_ATIIXP_MODULE 1
++#define CONFIG_SND_ATIIXP_MODEM_MODULE 1
++#define CONFIG_SND_AU8810_MODULE 1
++#define CONFIG_SND_AU8820_MODULE 1
++#define CONFIG_SND_AU8830_MODULE 1
++#define CONFIG_SND_AZT3328_MODULE 1
++#define CONFIG_SND_BT87X_MODULE 1
++#undef CONFIG_SND_BT87X_OVERCLOCK
++#define CONFIG_SND_CS46XX_MODULE 1
++#define CONFIG_SND_CS46XX_NEW_DSP 1
++#define CONFIG_SND_CS4281_MODULE 1
++#define CONFIG_SND_EMU10K1_MODULE 1
++#undef CONFIG_SND_EMU10K1X
++#undef CONFIG_SND_CA0106
++#define CONFIG_SND_KORG1212_MODULE 1
++#define CONFIG_SND_MIXART_MODULE 1
++#define CONFIG_SND_NM256_MODULE 1
++#define CONFIG_SND_RME32_MODULE 1
++#define CONFIG_SND_RME96_MODULE 1
++#define CONFIG_SND_RME9652_MODULE 1
++#define CONFIG_SND_HDSP_MODULE 1
++#define CONFIG_SND_TRIDENT_MODULE 1
++#define CONFIG_SND_YMFPCI_MODULE 1
++#define CONFIG_SND_ALS4000_MODULE 1
++#define CONFIG_SND_CMIPCI_MODULE 1
++#define CONFIG_SND_ENS1370_MODULE 1
++#define CONFIG_SND_ENS1371_MODULE 1
++#define CONFIG_SND_ES1938_MODULE 1
++#define CONFIG_SND_ES1968_MODULE 1
++#define CONFIG_SND_MAESTRO3_MODULE 1
++#define CONFIG_SND_FM801_MODULE 1
++#define CONFIG_SND_FM801_TEA575X_MODULE 1
++#define CONFIG_SND_ICE1712_MODULE 1
++#define CONFIG_SND_ICE1724_MODULE 1
++#define CONFIG_SND_INTEL8X0_MODULE 1
++#define CONFIG_SND_INTEL8X0M_MODULE 1
++#define CONFIG_SND_SONICVIBES_MODULE 1
++#define CONFIG_SND_VIA82XX_MODULE 1
++#undef CONFIG_SND_VIA82XX_MODEM
++#define CONFIG_SND_VX222_MODULE 1
++#define CONFIG_SND_HDA_INTEL_MODULE 1
++
++/*
++ * USB devices
++ */
++#define CONFIG_SND_USB_AUDIO_MODULE 1
++#define CONFIG_SND_USB_USX2Y_MODULE 1
++
++/*
++ * PCMCIA devices
++ */
++#define CONFIG_SND_VXPOCKET_MODULE 1
++#define CONFIG_SND_VXP440_MODULE 1
++#define CONFIG_SND_PDAUDIOCF_MODULE 1
++
++/*
++ * Open Sound System
++ */
++#define CONFIG_SOUND_PRIME_MODULE 1
++#define CONFIG_SOUND_BT878_MODULE 1
++#define CONFIG_SOUND_CMPCI_MODULE 1
++#undef CONFIG_SOUND_CMPCI_FM
++#undef CONFIG_SOUND_CMPCI_MIDI
++#define CONFIG_SOUND_CMPCI_JOYSTICK 1
++#define CONFIG_SOUND_EMU10K1_MODULE 1
++#define CONFIG_MIDI_EMU10K1 1
++#define CONFIG_SOUND_FUSION_MODULE 1
++#define CONFIG_SOUND_CS4281_MODULE 1
++#define CONFIG_SOUND_ES1370_MODULE 1
++#define CONFIG_SOUND_ES1371_MODULE 1
++#define CONFIG_SOUND_ESSSOLO1_MODULE 1
++#define CONFIG_SOUND_MAESTRO_MODULE 1
++#define CONFIG_SOUND_MAESTRO3_MODULE 1
++#define CONFIG_SOUND_ICH_MODULE 1
++#define CONFIG_SOUND_SONICVIBES_MODULE 1
++#define CONFIG_SOUND_TRIDENT_MODULE 1
++#undef CONFIG_SOUND_MSNDCLAS
++#undef CONFIG_SOUND_MSNDPIN
++#define CONFIG_SOUND_VIA82CXXX_MODULE 1
++#define CONFIG_MIDI_VIA82CXXX 1
++#define CONFIG_SOUND_OSS_MODULE 1
++#undef CONFIG_SOUND_TRACEINIT
++#undef CONFIG_SOUND_DMAP
++#undef CONFIG_SOUND_AD1816
++#define CONFIG_SOUND_AD1889_MODULE 1
++#define CONFIG_SOUND_SGALAXY_MODULE 1
++#define CONFIG_SOUND_ADLIB_MODULE 1
++#define CONFIG_SOUND_ACI_MIXER_MODULE 1
++#define CONFIG_SOUND_CS4232_MODULE 1
++#define CONFIG_SOUND_SSCAPE_MODULE 1
++#define CONFIG_SOUND_GUS_MODULE 1
++#define CONFIG_SOUND_GUS16 1
++#define CONFIG_SOUND_GUSMAX 1
++#define CONFIG_SOUND_VMIDI_MODULE 1
++#define CONFIG_SOUND_TRIX_MODULE 1
++#define CONFIG_SOUND_MSS_MODULE 1
++#define CONFIG_SOUND_MPU401_MODULE 1
++#define CONFIG_SOUND_NM256_MODULE 1
++#define CONFIG_SOUND_MAD16_MODULE 1
++#define CONFIG_MAD16_OLDCARD 1
++#define CONFIG_SOUND_PAS_MODULE 1
++#define CONFIG_SOUND_PSS_MODULE 1
++#define CONFIG_PSS_MIXER 1
++#define CONFIG_SOUND_SB_MODULE 1
++#undef CONFIG_SOUND_AWE32_SYNTH
++#define CONFIG_SOUND_WAVEFRONT_MODULE 1
++#define CONFIG_SOUND_MAUI_MODULE 1
++#define CONFIG_SOUND_YM3812_MODULE 1
++#define CONFIG_SOUND_OPL3SA1_MODULE 1
++#define CONFIG_SOUND_OPL3SA2_MODULE 1
++#define CONFIG_SOUND_YMFPCI_MODULE 1
++#undef CONFIG_SOUND_YMFPCI_LEGACY
++#define CONFIG_SOUND_UART6850_MODULE 1
++#define CONFIG_SOUND_AEDSP16_MODULE 1
++#define CONFIG_SC6600 1
++#define CONFIG_SC6600_JOY 1
++#define CONFIG_SC6600_CDROM 4
++#define CONFIG_SC6600_CDROMBASE 0x0
++#undef CONFIG_AEDSP16_MSS
++#undef CONFIG_AEDSP16_SBPRO
++#undef CONFIG_AEDSP16_MPU401
++#define CONFIG_SOUND_TVMIXER_MODULE 1
++#define CONFIG_SOUND_KAHLUA_MODULE 1
++#define CONFIG_SOUND_ALI5455_MODULE 1
++#define CONFIG_SOUND_FORTE_MODULE 1
++#define CONFIG_SOUND_RME96XX_MODULE 1
++#define CONFIG_SOUND_AD1980_MODULE 1
++
++/*
++ * USB support
++ */
++#define CONFIG_USB_ARCH_HAS_HCD 1
++#define CONFIG_USB_ARCH_HAS_OHCI 1
++#define CONFIG_USB 1
++#undef CONFIG_USB_DEBUG
++
++/*
++ * Miscellaneous USB options
++ */
++#define CONFIG_USB_DEVICEFS 1
++#define CONFIG_USB_BANDWIDTH 1
++#undef CONFIG_USB_DYNAMIC_MINORS
++#undef CONFIG_USB_OTG
++
++/*
++ * USB Host Controller Drivers
++ */
++#define CONFIG_USB_EHCI_HCD 1
++#define CONFIG_USB_EHCI_SPLIT_ISO 1
++#define CONFIG_USB_EHCI_ROOT_HUB_TT 1
++#define CONFIG_USB_OHCI_HCD_MODULE 1
++#undef CONFIG_USB_OHCI_BIG_ENDIAN
++#define CONFIG_USB_OHCI_LITTLE_ENDIAN 1
++#define CONFIG_USB_UHCI_HCD_MODULE 1
++#define CONFIG_USB_SL811_HCD_MODULE 1
++#define CONFIG_USB_SL811_CS_MODULE 1
++
++/*
++ * USB Device Class drivers
++ */
++#define CONFIG_USB_AUDIO_MODULE 1
++
++/*
++ * USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
++ */
++#define CONFIG_USB_MIDI_MODULE 1
++#define CONFIG_USB_ACM_MODULE 1
++#define CONFIG_USB_PRINTER_MODULE 1
++
++/*
++ * NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
++ */
++#define CONFIG_USB_STORAGE_MODULE 1
++#undef CONFIG_USB_STORAGE_DEBUG
++#define CONFIG_USB_STORAGE_DATAFAB 1
++#define CONFIG_USB_STORAGE_FREECOM 1
++#define CONFIG_USB_STORAGE_ISD200 1
++#define CONFIG_USB_STORAGE_DPCM 1
++#define CONFIG_USB_STORAGE_USBAT 1
++#define CONFIG_USB_STORAGE_SDDR09 1
++#define CONFIG_USB_STORAGE_SDDR55 1
++#define CONFIG_USB_STORAGE_JUMPSHOT 1
++
++/*
++ * USB Input Devices
++ */
++#define CONFIG_USB_HID_MODULE 1
++#define CONFIG_USB_HIDINPUT 1
++#undef CONFIG_HID_FF
++#define CONFIG_USB_HIDDEV 1
++
++/*
++ * USB HID Boot Protocol drivers
++ */
++#define CONFIG_USB_KBD_MODULE 1
++#define CONFIG_USB_MOUSE_MODULE 1
++#define CONFIG_USB_AIPTEK_MODULE 1
++#define CONFIG_USB_WACOM_MODULE 1
++#define CONFIG_USB_KBTAB_MODULE 1
++#define CONFIG_USB_POWERMATE_MODULE 1
++#define CONFIG_USB_MTOUCH_MODULE 1
++#define CONFIG_USB_EGALAX_MODULE 1
++#define CONFIG_USB_XPAD_MODULE 1
++#define CONFIG_USB_ATI_REMOTE_MODULE 1
++
++/*
++ * USB Imaging devices
++ */
++#define CONFIG_USB_MDC800_MODULE 1
++#define CONFIG_USB_MICROTEK_MODULE 1
++
++/*
++ * USB Multimedia devices
++ */
++#undef CONFIG_USB_DABUSB
++#define CONFIG_USB_VICAM_MODULE 1
++#define CONFIG_USB_DSBR_MODULE 1
++#define CONFIG_USB_IBMCAM_MODULE 1
++#define CONFIG_USB_KONICAWC_MODULE 1
++#define CONFIG_USB_OV511_MODULE 1
++#define CONFIG_USB_SE401_MODULE 1
++#define CONFIG_USB_SN9C102_MODULE 1
++#define CONFIG_USB_STV680_MODULE 1
++#define CONFIG_USB_W9968CF_MODULE 1
++#define CONFIG_USB_PWC_MODULE 1
++
++/*
++ * USB Network Adapters
++ */
++#define CONFIG_USB_CATC_MODULE 1
++#define CONFIG_USB_KAWETH_MODULE 1
++#define CONFIG_USB_PEGASUS_MODULE 1
++#define CONFIG_USB_RTL8150_MODULE 1
++#define CONFIG_USB_USBNET_MODULE 1
++
++/*
++ * USB Host-to-Host Cables
++ */
++#define CONFIG_USB_ALI_M5632 1
++#define CONFIG_USB_AN2720 1
++#define CONFIG_USB_BELKIN 1
++#define CONFIG_USB_GENESYS 1
++#define CONFIG_USB_NET1080 1
++#define CONFIG_USB_PL2301 1
++#define CONFIG_USB_KC2190 1
++
++/*
++ * Intelligent USB Devices/Gadgets
++ */
++#define CONFIG_USB_ARMLINUX 1
++#define CONFIG_USB_EPSON2888 1
++#define CONFIG_USB_ZAURUS 1
++#define CONFIG_USB_CDCETHER 1
++
++/*
++ * USB Network Adapters
++ */
++#define CONFIG_USB_AX8817X 1
++#define CONFIG_USB_ZD1201_MODULE 1
++#define CONFIG_USB_MON_MODULE 1
++
++/*
++ * USB port drivers
++ */
++#define CONFIG_USB_USS720_MODULE 1
++
++/*
++ * USB Serial Converter support
++ */
++#define CONFIG_USB_SERIAL_MODULE 1
++#define CONFIG_USB_SERIAL_GENERIC 1
++#define CONFIG_USB_SERIAL_AIRPRIME_MODULE 1
++#define CONFIG_USB_SERIAL_BELKIN_MODULE 1
++#define CONFIG_USB_SERIAL_WHITEHEAT_MODULE 1
++#define CONFIG_USB_SERIAL_DIGI_ACCELEPORT_MODULE 1
++#define CONFIG_USB_SERIAL_CP2101_MODULE 1
++#define CONFIG_USB_SERIAL_CYPRESS_M8_MODULE 1
++#define CONFIG_USB_SERIAL_EMPEG_MODULE 1
++#define CONFIG_USB_SERIAL_FTDI_SIO_MODULE 1
++#define CONFIG_USB_SERIAL_VISOR_MODULE 1
++#define CONFIG_USB_SERIAL_IPAQ_MODULE 1
++#define CONFIG_USB_SERIAL_IR_MODULE 1
++#define CONFIG_USB_SERIAL_EDGEPORT_MODULE 1
++#define CONFIG_USB_SERIAL_EDGEPORT_TI_MODULE 1
++#undef CONFIG_USB_SERIAL_GARMIN
++#define CONFIG_USB_SERIAL_IPW_MODULE 1
++#define CONFIG_USB_SERIAL_KEYSPAN_PDA_MODULE 1
++#define CONFIG_USB_SERIAL_KEYSPAN_MODULE 1
++#undef CONFIG_USB_SERIAL_KEYSPAN_MPR
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA28
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA28X
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA28XA
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA28XB
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA19
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA18X
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA19W
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA19QW
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA19QI
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA49W
++#undef CONFIG_USB_SERIAL_KEYSPAN_USA49WLC
++#define CONFIG_USB_SERIAL_KLSI_MODULE 1
++#define CONFIG_USB_SERIAL_KOBIL_SCT_MODULE 1
++#define CONFIG_USB_SERIAL_MCT_U232_MODULE 1
++#define CONFIG_USB_SERIAL_PL2303_MODULE 1
++#define CONFIG_USB_SERIAL_HP4X_MODULE 1
++#define CONFIG_USB_SERIAL_SAFE_MODULE 1
++#undef CONFIG_USB_SERIAL_SAFE_PADDED
++#undef CONFIG_USB_SERIAL_TI
++#define CONFIG_USB_SERIAL_CYBERJACK_MODULE 1
++#define CONFIG_USB_SERIAL_XIRCOM_MODULE 1
++#define CONFIG_USB_SERIAL_OPTION_MODULE 1
++#define CONFIG_USB_SERIAL_OMNINET_MODULE 1
++#define CONFIG_USB_EZUSB 1
++
++/*
++ * USB Miscellaneous drivers
++ */
++#undef CONFIG_USB_EMI62
++#undef CONFIG_USB_EMI26
++#define CONFIG_USB_AUERSWALD_MODULE 1
++#define CONFIG_USB_RIO500_MODULE 1
++#define CONFIG_USB_LEGOTOWER_MODULE 1
++#define CONFIG_USB_LCD_MODULE 1
++#define CONFIG_USB_LED_MODULE 1
++#define CONFIG_USB_CYTHERM_MODULE 1
++#define CONFIG_USB_PHIDGETKIT_MODULE 1
++#define CONFIG_USB_PHIDGETSERVO_MODULE 1
++#undef CONFIG_USB_IDMOUSE
++#define CONFIG_USB_SISUSBVGA_MODULE 1
++#define CONFIG_USB_TEST_MODULE 1
++
++/*
++ * USB ATM/DSL drivers
++ */
++#define CONFIG_USB_ATM_MODULE 1
++#define CONFIG_USB_SPEEDTOUCH_MODULE 1
++
++/*
++ * USB Gadget Support
++ */
++#define CONFIG_USB_GADGET_MODULE 1
++#undef CONFIG_USB_GADGET_DEBUG_FILES
++#define CONFIG_USB_GADGET_NET2280 1
++#define CONFIG_USB_NET2280_MODULE 1
++#undef CONFIG_USB_GADGET_PXA2XX
++#undef CONFIG_USB_GADGET_GOKU
++#undef CONFIG_USB_GADGET_LH7A40X
++#undef CONFIG_USB_GADGET_OMAP
++#undef CONFIG_USB_GADGET_DUMMY_HCD
++#define CONFIG_USB_GADGET_DUALSPEED 1
++#define CONFIG_USB_ZERO_MODULE 1
++#define CONFIG_USB_ETH_MODULE 1
++#define CONFIG_USB_ETH_RNDIS 1
++#define CONFIG_USB_GADGETFS_MODULE 1
++#define CONFIG_USB_FILE_STORAGE_MODULE 1
++#undef CONFIG_USB_FILE_STORAGE_TEST
++#define CONFIG_USB_G_SERIAL_MODULE 1
++
++/*
++ * MMC/SD Card support
++ */
++#undef CONFIG_MMC
++
++/*
++ * InfiniBand support
++ */
++#undef CONFIG_INFINIBAND
++
++/*
++ * Power management options
++ */
++
++/*
++ * ACPI (Advanced Configuration and Power Interface) Support
++ */
++#define CONFIG_ACPI 1
++#define CONFIG_ACPI_BOOT 1
++#define CONFIG_ACPI_INTERPRETER 1
++#define CONFIG_ACPI_AC_MODULE 1
++#define CONFIG_ACPI_BATTERY_MODULE 1
++#define CONFIG_ACPI_BUTTON_MODULE 1
++#define CONFIG_ACPI_VIDEO_MODULE 1
++#define CONFIG_ACPI_FAN_MODULE 1
++#define CONFIG_ACPI_PROCESSOR_MODULE 1
++#undef CONFIG_ACPI_HOTPLUG_CPU
++#define CONFIG_ACPI_THERMAL_MODULE 1
++#define CONFIG_ACPI_ASUS_MODULE 1
++#define CONFIG_ACPI_IBM_MODULE 1
++#define CONFIG_ACPI_TOSHIBA_MODULE 1
++#define CONFIG_ACPI_BLACKLIST_YEAR 0
++#undef CONFIG_ACPI_DEBUG
++#define CONFIG_ACPI_BUS 1
++#define CONFIG_ACPI_EC 1
++#define CONFIG_ACPI_POWER 1
++#define CONFIG_ACPI_PCI 1
++#define CONFIG_ACPI_SYSTEM 1
++#undef CONFIG_X86_PM_TIMER
++#undef CONFIG_ACPI_CONTAINER
++
++/*
++ * File systems
++ */
++#define CONFIG_EXT2_FS 1
++#define CONFIG_EXT2_FS_XATTR 1
++#define CONFIG_EXT2_FS_POSIX_ACL 1
++#define CONFIG_EXT2_FS_SECURITY 1
++#define CONFIG_EXT3_FS_MODULE 1
++#define CONFIG_EXT3_FS_XATTR 1
++#define CONFIG_EXT3_FS_POSIX_ACL 1
++#define CONFIG_EXT3_FS_SECURITY 1
++#define CONFIG_JBD_MODULE 1
++#undef CONFIG_JBD_DEBUG
++#define CONFIG_FS_MBCACHE 1
++#define CONFIG_REISERFS_FS_MODULE 1
++#undef CONFIG_REISERFS_CHECK
++#undef CONFIG_REISERFS_PROC_INFO
++#undef CONFIG_REISERFS_FS_XATTR
++#define CONFIG_JFS_FS_MODULE 1
++#define CONFIG_JFS_POSIX_ACL 1
++#undef CONFIG_JFS_SECURITY
++#undef CONFIG_JFS_DEBUG
++#define CONFIG_JFS_STATISTICS 1
++#define CONFIG_FS_POSIX_ACL 1
++
++/*
++ * XFS support
++ */
++#define CONFIG_XFS_FS_MODULE 1
++#define CONFIG_XFS_EXPORT 1
++#define CONFIG_XFS_RT 1
++#define CONFIG_XFS_QUOTA 1
++#define CONFIG_XFS_SECURITY 1
++#define CONFIG_XFS_POSIX_ACL 1
++#define CONFIG_MINIX_FS_MODULE 1
++#define CONFIG_ROMFS_FS_MODULE 1
++#define CONFIG_QUOTA 1
++#define CONFIG_QFMT_V1_MODULE 1
++#define CONFIG_QFMT_V2_MODULE 1
++#define CONFIG_QUOTACTL 1
++#define CONFIG_DNOTIFY 1
++#define CONFIG_AUTOFS_FS_MODULE 1
++#define CONFIG_AUTOFS4_FS_MODULE 1
++
++/*
++ * CD-ROM/DVD Filesystems
++ */
++#define CONFIG_ISO9660_FS_MODULE 1
++#define CONFIG_JOLIET 1
++#define CONFIG_ZISOFS 1
++#define CONFIG_ZISOFS_FS_MODULE 1
++#define CONFIG_UDF_FS_MODULE 1
++#define CONFIG_UDF_NLS 1
++
++/*
++ * DOS/FAT/NT Filesystems
++ */
++#define CONFIG_FAT_FS_MODULE 1
++#define CONFIG_MSDOS_FS_MODULE 1
++#define CONFIG_VFAT_FS_MODULE 1
++#define CONFIG_FAT_DEFAULT_CODEPAGE 437
++#define CONFIG_FAT_DEFAULT_IOCHARSET "iso8859-1"
++#define CONFIG_NTFS_FS_MODULE 1
++#undef CONFIG_NTFS_DEBUG
++#undef CONFIG_NTFS_RW
++
++/*
++ * Pseudo filesystems
++ */
++#define CONFIG_PROC_FS 1
++#define CONFIG_PROC_KCORE 1
++#define CONFIG_SYSFS 1
++#undef CONFIG_DEVFS_FS
++#define CONFIG_DEVPTS_FS_XATTR 1
++#define CONFIG_DEVPTS_FS_SECURITY 1
++#define CONFIG_TMPFS 1
++#define CONFIG_TMPFS_XATTR 1
++#define CONFIG_TMPFS_SECURITY 1
++#undef CONFIG_HUGETLBFS
++#undef CONFIG_HUGETLB_PAGE
++#define CONFIG_RAMFS 1
++
++/*
++ * Miscellaneous filesystems
++ */
++#define CONFIG_ADFS_FS_MODULE 1
++#undef CONFIG_ADFS_FS_RW
++#define CONFIG_AFFS_FS_MODULE 1
++#define CONFIG_HFS_FS_MODULE 1
++#define CONFIG_HFSPLUS_FS_MODULE 1
++#define CONFIG_BEFS_FS_MODULE 1
++#undef CONFIG_BEFS_DEBUG
++#define CONFIG_BFS_FS_MODULE 1
++#define CONFIG_EFS_FS_MODULE 1
++#define CONFIG_JFFS_FS_MODULE 1
++#define CONFIG_JFFS_FS_VERBOSE 0
++#define CONFIG_JFFS_PROC_FS 1
++#define CONFIG_JFFS2_FS_MODULE 1
++#define CONFIG_JFFS2_FS_DEBUG 0
++#undef CONFIG_JFFS2_FS_NAND
++#undef CONFIG_JFFS2_FS_NOR_ECC
++#undef CONFIG_JFFS2_COMPRESSION_OPTIONS
++#define CONFIG_JFFS2_ZLIB 1
++#define CONFIG_JFFS2_RTIME 1
++#undef CONFIG_JFFS2_RUBIN
++#define CONFIG_CRAMFS 1
++#define CONFIG_VXFS_FS_MODULE 1
++#define CONFIG_HPFS_FS_MODULE 1
++#define CONFIG_QNX4FS_FS_MODULE 1
++#undef CONFIG_QNX4FS_RW
++#define CONFIG_SYSV_FS_MODULE 1
++#define CONFIG_UFS_FS_MODULE 1
++#undef CONFIG_UFS_FS_WRITE
++
++/*
++ * Network File Systems
++ */
++#define CONFIG_NFS_FS_MODULE 1
++#define CONFIG_NFS_V3 1
++#define CONFIG_NFS_V4 1
++#define CONFIG_NFS_DIRECTIO 1
++#define CONFIG_NFSD_MODULE 1
++#define CONFIG_NFSD_V3 1
++#define CONFIG_NFSD_V4 1
++#define CONFIG_NFSD_TCP 1
++#define CONFIG_LOCKD_MODULE 1
++#define CONFIG_LOCKD_V4 1
++#define CONFIG_EXPORTFS_MODULE 1
++#define CONFIG_SUNRPC_MODULE 1
++#define CONFIG_SUNRPC_GSS_MODULE 1
++#define CONFIG_RPCSEC_GSS_KRB5_MODULE 1
++#define CONFIG_RPCSEC_GSS_SPKM3_MODULE 1
++#define CONFIG_SMB_FS_MODULE 1
++#undef CONFIG_SMB_NLS_DEFAULT
++#define CONFIG_CIFS_MODULE 1
++#undef CONFIG_CIFS_STATS
++#undef CONFIG_CIFS_XATTR
++#undef CONFIG_CIFS_EXPERIMENTAL
++#define CONFIG_NCP_FS_MODULE 1
++#define CONFIG_NCPFS_PACKET_SIGNING 1
++#define CONFIG_NCPFS_IOCTL_LOCKING 1
++#define CONFIG_NCPFS_STRONG 1
++#define CONFIG_NCPFS_NFS_NS 1
++#define CONFIG_NCPFS_OS2_NS 1
++#undef CONFIG_NCPFS_SMALLDOS
++#define CONFIG_NCPFS_NLS 1
++#define CONFIG_NCPFS_EXTRAS 1
++#define CONFIG_CODA_FS_MODULE 1
++#undef CONFIG_CODA_FS_OLD_API
++#define CONFIG_AFS_FS_MODULE 1
++#define CONFIG_RXRPC_MODULE 1
++
++/*
++ * Partition Types
++ */
++#define CONFIG_PARTITION_ADVANCED 1
++#define CONFIG_ACORN_PARTITION 1
++#define CONFIG_ACORN_PARTITION_CUMANA 1
++#undef CONFIG_ACORN_PARTITION_EESOX
++#define CONFIG_ACORN_PARTITION_ICS 1
++#undef CONFIG_ACORN_PARTITION_ADFS
++#undef CONFIG_ACORN_PARTITION_POWERTEC
++#define CONFIG_ACORN_PARTITION_RISCIX 1
++#define CONFIG_OSF_PARTITION 1
++#define CONFIG_AMIGA_PARTITION 1
++#define CONFIG_ATARI_PARTITION 1
++#define CONFIG_MAC_PARTITION 1
++#define CONFIG_MSDOS_PARTITION 1
++#define CONFIG_BSD_DISKLABEL 1
++#define CONFIG_MINIX_SUBPARTITION 1
++#define CONFIG_SOLARIS_X86_PARTITION 1
++#define CONFIG_UNIXWARE_DISKLABEL 1
++#define CONFIG_LDM_PARTITION 1
++#undef CONFIG_LDM_DEBUG
++#define CONFIG_SGI_PARTITION 1
++#define CONFIG_ULTRIX_PARTITION 1
++#define CONFIG_SUN_PARTITION 1
++#define CONFIG_EFI_PARTITION 1
++
++/*
++ * Native Language Support
++ */
++#define CONFIG_NLS 1
++#define CONFIG_NLS_DEFAULT "cp437"
++#define CONFIG_NLS_CODEPAGE_437_MODULE 1
++#define CONFIG_NLS_CODEPAGE_737_MODULE 1
++#define CONFIG_NLS_CODEPAGE_775_MODULE 1
++#define CONFIG_NLS_CODEPAGE_850_MODULE 1
++#define CONFIG_NLS_CODEPAGE_852_MODULE 1
++#define CONFIG_NLS_CODEPAGE_855_MODULE 1
++#define CONFIG_NLS_CODEPAGE_857_MODULE 1
++#define CONFIG_NLS_CODEPAGE_860_MODULE 1
++#define CONFIG_NLS_CODEPAGE_861_MODULE 1
++#define CONFIG_NLS_CODEPAGE_862_MODULE 1
++#define CONFIG_NLS_CODEPAGE_863_MODULE 1
++#define CONFIG_NLS_CODEPAGE_864_MODULE 1
++#define CONFIG_NLS_CODEPAGE_865_MODULE 1
++#define CONFIG_NLS_CODEPAGE_866_MODULE 1
++#define CONFIG_NLS_CODEPAGE_869_MODULE 1
++#define CONFIG_NLS_CODEPAGE_936_MODULE 1
++#define CONFIG_NLS_CODEPAGE_950_MODULE 1
++#define CONFIG_NLS_CODEPAGE_932_MODULE 1
++#define CONFIG_NLS_CODEPAGE_949_MODULE 1
++#define CONFIG_NLS_CODEPAGE_874_MODULE 1
++#define CONFIG_NLS_ISO8859_8_MODULE 1
++#define CONFIG_NLS_CODEPAGE_1250_MODULE 1
++#define CONFIG_NLS_CODEPAGE_1251_MODULE 1
++#define CONFIG_NLS_ASCII_MODULE 1
++#define CONFIG_NLS_ISO8859_1_MODULE 1
++#define CONFIG_NLS_ISO8859_2_MODULE 1
++#define CONFIG_NLS_ISO8859_3_MODULE 1
++#define CONFIG_NLS_ISO8859_4_MODULE 1
++#define CONFIG_NLS_ISO8859_5_MODULE 1
++#define CONFIG_NLS_ISO8859_6_MODULE 1
++#define CONFIG_NLS_ISO8859_7_MODULE 1
++#define CONFIG_NLS_ISO8859_9_MODULE 1
++#define CONFIG_NLS_ISO8859_13_MODULE 1
++#define CONFIG_NLS_ISO8859_14_MODULE 1
++#define CONFIG_NLS_ISO8859_15_MODULE 1
++#define CONFIG_NLS_KOI8_R_MODULE 1
++#define CONFIG_NLS_KOI8_U_MODULE 1
++#define CONFIG_NLS_UTF8_MODULE 1
++
++/*
++ * Security options
++ */
++#define CONFIG_KEYS 1
++#undef CONFIG_KEYS_DEBUG_PROC_KEYS
++#define CONFIG_SECURITY 1
++#undef CONFIG_SECURITY_NETWORK
++#define CONFIG_SECURITY_CAPABILITIES 1
++#define CONFIG_SECURITY_ROOTPLUG_MODULE 1
++#define CONFIG_SECURITY_SECLVL_MODULE 1
++#define CONFIG_SECURITY_SELINUX 1
++#define CONFIG_SECURITY_SELINUX_BOOTPARAM 1
++#define CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE 0
++#define CONFIG_SECURITY_SELINUX_DISABLE 1
++#define CONFIG_SECURITY_SELINUX_DEVELOP 1
++#define CONFIG_SECURITY_SELINUX_AVC_STATS 1
++#define CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE 1
++
++/*
++ * Cryptographic options
++ */
++#define CONFIG_CRYPTO 1
++#define CONFIG_CRYPTO_HMAC 1
++#define CONFIG_CRYPTO_NULL_MODULE 1
++#define CONFIG_CRYPTO_MD4_MODULE 1
++#define CONFIG_CRYPTO_MD5 1
++#define CONFIG_CRYPTO_SHA1_MODULE 1
++#define CONFIG_CRYPTO_SHA256_MODULE 1
++#define CONFIG_CRYPTO_SHA512_MODULE 1
++#define CONFIG_CRYPTO_WP512_MODULE 1
++#define CONFIG_CRYPTO_TGR192_MODULE 1
++#define CONFIG_CRYPTO_DES_MODULE 1
++#define CONFIG_CRYPTO_BLOWFISH_MODULE 1
++#define CONFIG_CRYPTO_TWOFISH_MODULE 1
++#define CONFIG_CRYPTO_SERPENT_MODULE 1
++#define CONFIG_CRYPTO_AES_586_MODULE 1
++#define CONFIG_CRYPTO_CAST5_MODULE 1
++#define CONFIG_CRYPTO_CAST6_MODULE 1
++#define CONFIG_CRYPTO_TEA_MODULE 1
++#define CONFIG_CRYPTO_ARC4_MODULE 1
++#define CONFIG_CRYPTO_KHAZAD_MODULE 1
++#define CONFIG_CRYPTO_ANUBIS_MODULE 1
++#define CONFIG_CRYPTO_DEFLATE_MODULE 1
++#define CONFIG_CRYPTO_MICHAEL_MIC_MODULE 1
++#define CONFIG_CRYPTO_CRC32C_MODULE 1
++#define CONFIG_CRYPTO_TEST_MODULE 1
++
++/*
++ * Hardware crypto devices
++ */
++#undef CONFIG_CRYPTO_DEV_PADLOCK
++
++/*
++ * Library routines
++ */
++#define CONFIG_CRC_CCITT_MODULE 1
++#define CONFIG_CRC32 1
++#define CONFIG_LIBCRC32C_MODULE 1
++#define CONFIG_ZLIB_INFLATE 1
++#define CONFIG_ZLIB_DEFLATE_MODULE 1
++#define CONFIG_REED_SOLOMON_MODULE 1
++#define CONFIG_REED_SOLOMON_DEC16 1
++
++/*
++ * Kernel hacking
++ */
++#undef CONFIG_PRINTK_TIME
++#define CONFIG_DEBUG_KERNEL 1
++#define CONFIG_MAGIC_SYSRQ 1
++#define CONFIG_LOG_BUF_SHIFT 14
++#undef CONFIG_SCHEDSTATS
++#undef CONFIG_DEBUG_SLAB
++#undef CONFIG_DEBUG_SPINLOCK
++#undef CONFIG_DEBUG_SPINLOCK_SLEEP
++#undef CONFIG_DEBUG_KOBJECT
++#undef CONFIG_DEBUG_HIGHMEM
++#undef CONFIG_DEBUG_BUGVERBOSE
++#undef CONFIG_DEBUG_INFO
++#undef CONFIG_DEBUG_FS
++#undef CONFIG_FRAME_POINTER
++#undef CONFIG_DEBUG_STACKOVERFLOW
++#undef CONFIG_KPROBES
++#undef CONFIG_DEBUG_STACK_USAGE
++#undef CONFIG_DEBUG_PAGEALLOC
++#undef CONFIG_4KSTACKS
++#define CONFIG_X86_FIND_SMP_CONFIG 1
++#define CONFIG_X86_MPPARSE 1
+diff -Nurp pristine-linux-2.6.12/include/linux/gfp.h linux-2.6.12-xen/include/linux/gfp.h
+--- pristine-linux-2.6.12/include/linux/gfp.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/linux/gfp.h	2006-03-05 23:36:31.000000000 +0100
+@@ -77,8 +77,12 @@ struct vm_area_struct;
+  * optimized to &contig_page_data at compile-time.
+  */
+ 
++/*
++ * If arch_free_page returns non-zero then the generic free_page code can
++ * immediately bail: the arch-specific function has done all the work.
++ */
+ #ifndef HAVE_ARCH_FREE_PAGE
+-static inline void arch_free_page(struct page *page, int order) { }
++#define arch_free_page(page, order) 0
+ #endif
+ 
+ extern struct page *
+diff -Nurp pristine-linux-2.6.12/include/linux/highmem.h linux-2.6.12-xen/include/linux/highmem.h
+--- pristine-linux-2.6.12/include/linux/highmem.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/linux/highmem.h	2006-03-05 23:36:31.000000000 +0100
+@@ -13,10 +13,12 @@
+ 
+ /* declarations for linux/mm/highmem.c */
+ unsigned int nr_free_highpages(void);
++void kmap_flush_unused(void);
+ 
+ #else /* CONFIG_HIGHMEM */
+ 
+ static inline unsigned int nr_free_highpages(void) { return 0; }
++static inline void kmap_flush_unused(void) { }
+ 
+ static inline void *kmap(struct page *page)
+ {
+diff -Nurp pristine-linux-2.6.12/include/linux/if_shaper.h linux-2.6.12-xen/include/linux/if_shaper.h
+--- pristine-linux-2.6.12/include/linux/if_shaper.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/linux/if_shaper.h	2006-03-05 23:54:36.927062367 +0100
+@@ -23,7 +23,7 @@ struct shaper
+ 	__u32 shapeclock;
+ 	unsigned long recovery;	/* Time we can next clock a packet out on
+ 				   an empty queue */
+-	struct semaphore sem;
++	spinlock_t lock;
+         struct net_device_stats stats;
+ 	struct net_device *dev;
+ 	int  (*hard_start_xmit) (struct sk_buff *skb,
+diff -Nurp pristine-linux-2.6.12/include/linux/init.h linux-2.6.12-xen/include/linux/init.h
+--- pristine-linux-2.6.12/include/linux/init.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/linux/init.h	2006-03-05 23:54:36.948059273 +0100
+@@ -229,6 +229,18 @@ void __init parse_early_param(void);
+ #define __devexitdata __exitdata
+ #endif
+ 
++#ifdef CONFIG_HOTPLUG_CPU
++#define __cpuinit
++#define __cpuinitdata
++#define __cpuexit
++#define __cpuexitdata
++#else
++#define __cpuinit	__init
++#define __cpuinitdata __initdata
++#define __cpuexit __exit
++#define __cpuexitdata	__exitdata
++#endif
++
+ /* Functions marked as __devexit may be discarded at kernel link time, depending
+    on config options.  Newer versions of binutils detect references from
+    retained sections to discarded sections and flag an error.  Pointers to
+diff -Nurp pristine-linux-2.6.12/include/linux/irq.h linux-2.6.12-xen/include/linux/irq.h
+--- pristine-linux-2.6.12/include/linux/irq.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/linux/irq.h	2006-03-05 23:36:31.000000000 +0100
+@@ -74,6 +74,7 @@ extern irq_desc_t irq_desc [NR_IRQS];
+ #include <asm/hw_irq.h> /* the arch dependent stuff */
+ 
+ extern int setup_irq(unsigned int irq, struct irqaction * new);
++extern int teardown_irq(unsigned int irq, struct irqaction * old);
+ 
+ #ifdef CONFIG_GENERIC_HARDIRQS
+ extern cpumask_t irq_affinity[NR_IRQS];
+diff -Nurp pristine-linux-2.6.12/include/linux/mm.h linux-2.6.12-xen/include/linux/mm.h
+--- pristine-linux-2.6.12/include/linux/mm.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/linux/mm.h	2006-03-05 23:36:31.000000000 +0100
+@@ -161,6 +161,7 @@ extern unsigned int kobjsize(const void 
+ #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
+ #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
+ #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
++#define VM_FOREIGN	0x02000000	/* Has pages belonging to another VM */
+ 
+ #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
+ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+@@ -816,6 +817,12 @@ extern int check_user_page_readable(stru
+ int remap_pfn_range(struct vm_area_struct *, unsigned long,
+ 		unsigned long, unsigned long, pgprot_t);
+ 
++typedef int (*pte_fn_t)(pte_t *pte, struct page *pte_page, unsigned long addr, 
++                        void *data);
++extern int generic_page_range(struct mm_struct *mm, unsigned long address, 
++                              unsigned long size, pte_fn_t fn, void *data);
++
++
+ #ifdef CONFIG_PROC_FS
+ void __vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+ #else
+diff -Nurp pristine-linux-2.6.12/include/linux/skbuff.h linux-2.6.12-xen/include/linux/skbuff.h
+--- pristine-linux-2.6.12/include/linux/skbuff.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/linux/skbuff.h	2006-03-05 23:36:31.000000000 +0100
+@@ -177,6 +177,8 @@ struct skb_shared_info {
+  *	@local_df: allow local fragmentation
+  *	@cloned: Head may be cloned (check refcnt to be sure)
+  *	@nohdr: Payload reference only, must not modify header
++ *	@proto_csum_valid: Protocol csum validated since arriving at localhost
++ *	@proto_csum_blank: Protocol csum must be added before leaving localhost
+  *	@pkt_type: Packet class
+  *	@ip_summed: Driver fed us an IP checksum
+  *	@priority: Packet queueing priority
+@@ -252,6 +254,8 @@ struct sk_buff {
+ 	unsigned char		local_df,
+ 				cloned:1,
+ 				nohdr:1,
++				proto_csum_valid:1,
++				proto_csum_blank:1,
+ 				pkt_type,
+ 				ip_summed;
+ 	__u32			priority;
+diff -Nurp pristine-linux-2.6.12/include/linux/zlib.h linux-2.6.12-xen/include/linux/zlib.h
+--- pristine-linux-2.6.12/include/linux/zlib.h	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/include/linux/zlib.h	2006-03-05 23:54:36.929062072 +0100
+@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
+    stream state was inconsistent (such as zalloc or state being NULL).
+ */
+ 
++static inline unsigned long deflateBound(unsigned long s)
++{
++	return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
++}
++
+ extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
+ /*
+      Dynamically update the compression level and compression strategy.  The
+diff -Nurp pristine-linux-2.6.12/kernel/cpu.c linux-2.6.12-xen/kernel/cpu.c
+--- pristine-linux-2.6.12/kernel/cpu.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/kernel/cpu.c	2006-03-05 23:54:37.123033488 +0100
+@@ -63,19 +63,15 @@ static int take_cpu_down(void *unused)
+ {
+ 	int err;
+ 
+-	/* Take offline: makes arch_cpu_down somewhat easier. */
+-	cpu_clear(smp_processor_id(), cpu_online_map);
+-
+ 	/* Ensure this CPU doesn't handle any more interrupts. */
+ 	err = __cpu_disable();
+ 	if (err < 0)
+-		cpu_set(smp_processor_id(), cpu_online_map);
+-	else
+-		/* Force idle task to run as soon as we yield: it should
+-		   immediately notice cpu is offline and die quickly. */
+-		sched_idle_next();
++		return err;
+ 
+-	return err;
++	/* Force idle task to run as soon as we yield: it should
++	   immediately notice cpu is offline and die quickly. */
++	sched_idle_next();
++	return 0;
+ }
+ 
+ int cpu_down(unsigned int cpu)
+diff -Nurp pristine-linux-2.6.12/kernel/irq/manage.c linux-2.6.12-xen/kernel/irq/manage.c
+--- pristine-linux-2.6.12/kernel/irq/manage.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/kernel/irq/manage.c	2006-03-05 23:36:31.000000000 +0100
+@@ -146,9 +146,14 @@ int can_request_irq(unsigned int irq, un
+ 	return !action;
+ }
+ 
+-/*
+- * Internal function to register an irqaction - typically used to
+- * allocate special interrupts that are part of the architecture.
++/**
++ *	setup_irq - register an irqaction structure
++ *	@irq: Interrupt to register
++ *	@irqaction: The irqaction structure to be registered
++ *
++ *	Normally called by request_irq, this function can be used
++ *	directly to allocate special interrupts that are part of the
++ *	architecture.
+  */
+ int setup_irq(unsigned int irq, struct irqaction * new)
+ {
+@@ -217,28 +222,27 @@ int setup_irq(unsigned int irq, struct i
+ 	return 0;
+ }
+ 
+-/**
+- *	free_irq - free an interrupt
+- *	@irq: Interrupt line to free
+- *	@dev_id: Device identity to free
+- *
+- *	Remove an interrupt handler. The handler is removed and if the
+- *	interrupt line is no longer in use by any driver it is disabled.
+- *	On a shared IRQ the caller must ensure the interrupt is disabled
+- *	on the card it drives before calling this function. The function
+- *	does not return until any executing interrupts for this IRQ
+- *	have completed.
++/*
++ *	teardown_irq - unregister an irqaction
++ *	@irq: Interrupt line being freed
++ *	@old: Pointer to the irqaction that is to be unregistered
++ *
++ *	This function is called by free_irq and does the actual
++ *	business of unregistering the handler. It exists as a 
++ *	seperate function to enable handlers to be unregistered 
++ *	for irqactions that have been allocated statically at 
++ *	boot time.
+  *
+  *	This function must not be called from interrupt context.
+  */
+-void free_irq(unsigned int irq, void *dev_id)
++int teardown_irq(unsigned int irq, struct irqaction * old)
+ {
+ 	struct irq_desc *desc;
+ 	struct irqaction **p;
+ 	unsigned long flags;
+ 
+ 	if (irq >= NR_IRQS)
+-		return;
++		return -ENOENT;
+ 
+ 	desc = irq_desc + irq;
+ 	spin_lock_irqsave(&desc->lock,flags);
+@@ -250,7 +254,7 @@ void free_irq(unsigned int irq, void *de
+ 			struct irqaction **pp = p;
+ 
+ 			p = &action->next;
+-			if (action->dev_id != dev_id)
++			if (action != old)
+ 				continue;
+ 
+ 			/* Found it - now remove it from the list of entries */
+@@ -267,13 +271,52 @@ void free_irq(unsigned int irq, void *de
+ 
+ 			/* Make sure it's not being used on another CPU */
+ 			synchronize_irq(irq);
+-			kfree(action);
+-			return;
++			return 0;
+ 		}
+-		printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
++		printk(KERN_ERR "Trying to teardown free IRQ%d\n",irq);
+ 		spin_unlock_irqrestore(&desc->lock,flags);
++		return -ENOENT;
++	}
++}
++
++/**
++ *	free_irq - free an interrupt
++ *	@irq: Interrupt line to free
++ *	@dev_id: Device identity to free
++ *
++ *	Remove an interrupt handler. The handler is removed and if the
++ *	interrupt line is no longer in use by any driver it is disabled.
++ *	On a shared IRQ the caller must ensure the interrupt is disabled
++ *	on the card it drives before calling this function. The function
++ *	does not return until any executing interrupts for this IRQ
++ *	have completed.
++ *
++ *	This function must not be called from interrupt context.
++ */
++void free_irq(unsigned int irq, void *dev_id)
++{
++	struct irq_desc *desc;
++	struct irqaction *action;
++	unsigned long flags;
++
++	if (irq >= NR_IRQS)
++		return;
++
++	desc = irq_desc + irq;
++	spin_lock_irqsave(&desc->lock,flags);
++	for (action = desc->action; action != NULL; action = action->next) {
++		if (action->dev_id != dev_id)
++			continue;
++
++		spin_unlock_irqrestore(&desc->lock,flags);
++
++		if (teardown_irq(irq, action) == 0)
++			kfree(action);
+ 		return;
+ 	}
++	printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
++	spin_unlock_irqrestore(&desc->lock,flags);
++	return;
+ }
+ 
+ EXPORT_SYMBOL(free_irq);
+diff -Nurp pristine-linux-2.6.12/kernel/module.c linux-2.6.12-xen/kernel/module.c
+--- pristine-linux-2.6.12/kernel/module.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/kernel/module.c	2006-03-05 23:54:36.930061925 +0100
+@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
+ /* Created by linker magic */
+ extern char __per_cpu_start[], __per_cpu_end[];
+ 
+-static void *percpu_modalloc(unsigned long size, unsigned long align)
++static void *percpu_modalloc(unsigned long size, unsigned long align,
++			     const char *name)
+ {
+ 	unsigned long extra;
+ 	unsigned int i;
+ 	void *ptr;
+ 
+-	BUG_ON(align > SMP_CACHE_BYTES);
++	if (align > SMP_CACHE_BYTES) {
++		printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
++		       name, align, SMP_CACHE_BYTES);
++		align = SMP_CACHE_BYTES;
++	}
+ 
+ 	ptr = __per_cpu_start;
+ 	for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
+@@ -347,7 +352,8 @@ static int percpu_modinit(void)
+ }	
+ __initcall(percpu_modinit);
+ #else /* ... !CONFIG_SMP */
+-static inline void *percpu_modalloc(unsigned long size, unsigned long align)
++static inline void *percpu_modalloc(unsigned long size, unsigned long align,
++				    const char *name)
+ {
+ 	return NULL;
+ }
+@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
+ 	if (pcpuindex) {
+ 		/* We have a special allocation for this section. */
+ 		percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
+-					 sechdrs[pcpuindex].sh_addralign);
++					 sechdrs[pcpuindex].sh_addralign,
++					 mod->name);
+ 		if (!percpu) {
+ 			err = -ENOMEM;
+ 			goto free_mod;
+diff -Nurp pristine-linux-2.6.12/kernel/rcupdate.c linux-2.6.12-xen/kernel/rcupdate.c
+--- pristine-linux-2.6.12/kernel/rcupdate.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/kernel/rcupdate.c	2006-03-05 23:54:37.185024353 +0100
+@@ -202,8 +202,11 @@ static void rcu_start_batch(struct rcu_c
+  */
+ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp, struct rcu_state *rsp)
+ {
++	cpumask_t mask;
++
+ 	cpu_clear(cpu, rsp->cpumask);
+-	if (cpus_empty(rsp->cpumask)) {
++	cpus_andnot(mask, rsp->cpumask, nohz_cpu_mask);
++	if (cpus_empty(mask)) {
+ 		/* batch completed ! */
+ 		rcp->completed = rcp->cur;
+ 		rcu_start_batch(rcp, rsp, 0);
+diff -Nurp pristine-linux-2.6.12/kernel/signal.c linux-2.6.12-xen/kernel/signal.c
+--- pristine-linux-2.6.12/kernel/signal.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/kernel/signal.c	2006-03-05 23:54:36.931061778 +0100
+@@ -686,7 +686,7 @@ static void handle_stop_signal(int sig, 
+ {
+ 	struct task_struct *t;
+ 
+-	if (p->flags & SIGNAL_GROUP_EXIT)
++	if (p->signal->flags & SIGNAL_GROUP_EXIT)
+ 		/*
+ 		 * The process is in the middle of dying already.
+ 		 */
+diff -Nurp pristine-linux-2.6.12/lib/inflate.c linux-2.6.12-xen/lib/inflate.c
+--- pristine-linux-2.6.12/lib/inflate.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/lib/inflate.c	2006-03-05 23:54:36.932061630 +0100
+@@ -326,7 +326,7 @@ DEBG("huft1 ");
+   {
+     *t = (struct huft *)NULL;
+     *m = 0;
+-    return 0;
++    return 2;
+   }
+ 
+ DEBG("huft2 ");
+@@ -374,6 +374,7 @@ DEBG("huft5 ");
+     if ((j = *p++) != 0)
+       v[x[j]++] = i;
+   } while (++i < n);
++  n = x[g];                   /* set n to length of v */
+ 
+ DEBG("h6 ");
+ 
+@@ -410,12 +411,13 @@ DEBG1("1 ");
+ DEBG1("2 ");
+           f -= a + 1;           /* deduct codes from patterns left */
+           xp = c + k;
+-          while (++j < z)       /* try smaller tables up to z bits */
+-          {
+-            if ((f <<= 1) <= *++xp)
+-              break;            /* enough codes to use up j bits */
+-            f -= *xp;           /* else deduct codes from patterns */
+-          }
++          if (j < z)
++            while (++j < z)       /* try smaller tables up to z bits */
++            {
++              if ((f <<= 1) <= *++xp)
++                break;            /* enough codes to use up j bits */
++              f -= *xp;           /* else deduct codes from patterns */
++            }
+         }
+ DEBG1("3 ");
+         z = 1 << j;             /* table entries for j-bit table */
+diff -Nurp pristine-linux-2.6.12/Makefile linux-2.6.12-xen/Makefile
+--- pristine-linux-2.6.12/Makefile	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/Makefile	2006-03-05 23:55:06.837651478 +0100
+@@ -2,6 +2,7 @@ VERSION = 2
+ PATCHLEVEL = 6
+ SUBLEVEL = 12
+ EXTRAVERSION =
++XENGUEST = -xen
+ NAME=Woozy Numbat
+ 
+ # *DOCUMENTATION*
+@@ -1149,7 +1150,7 @@ endif # KBUILD_EXTMOD
+ #(which is the most common case IMHO) to avoid unneeded clutter in the big tags file.
+ #Adding $(srctree) adds about 20M on i386 to the size of the output file!
+ 
+-ifeq ($(KBUILD_OUTPUT),)
++ifeq ($(src),$(obj))
+ __srctree =
+ else
+ __srctree = $(srctree)/
+diff -Nurp pristine-linux-2.6.12/mm/highmem.c linux-2.6.12-xen/mm/highmem.c
+--- pristine-linux-2.6.12/mm/highmem.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/mm/highmem.c	2006-03-05 23:36:29.000000000 +0100
+@@ -148,6 +148,15 @@ start:
+ 	return vaddr;
+ }
+ 
++void kmap_flush_unused(void)
++{
++	spin_lock(&kmap_lock);
++	flush_all_zero_pkmaps();
++	spin_unlock(&kmap_lock);
++}
++
++EXPORT_SYMBOL(kmap_flush_unused);
++
+ void fastcall *kmap_high(struct page *page)
+ {
+ 	unsigned long vaddr;
+diff -Nurp pristine-linux-2.6.12/mm/memory.c linux-2.6.12-xen/mm/memory.c
+--- pristine-linux-2.6.12/mm/memory.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/mm/memory.c	2006-03-05 23:36:29.000000000 +0100
+@@ -940,6 +940,24 @@ int get_user_pages(struct task_struct *t
+ 			continue;
+ 		}
+ 
++                if (vma && (vma->vm_flags & VM_FOREIGN))
++                {
++                    struct page **map = vma->vm_private_data;
++                    int offset = (start - vma->vm_start) >> PAGE_SHIFT;
++
++                    if (map[offset] != NULL) {
++                        if (pages) {
++                            pages[i] = map[offset];
++                        } 
++                        if (vmas) 
++                            vmas[i] = vma;
++                        i++;
++                        start += PAGE_SIZE;
++                        len--;
++                        continue;
++                    } 
++                }
++
+ 		if (!vma || (vma->vm_flags & VM_IO)
+ 				|| !(flags & vma->vm_flags))
+ 			return i ? : -EFAULT;
+@@ -1195,6 +1213,104 @@ int remap_pfn_range(struct vm_area_struc
+ }
+ EXPORT_SYMBOL(remap_pfn_range);
+ 
++static inline int generic_pte_range(struct mm_struct *mm,
++                                    pmd_t *pmd, 
++                                    unsigned long addr, 
++                                    unsigned long end,
++                                    pte_fn_t fn, void *data)
++{
++	pte_t *pte;
++        int err;
++        struct page *pte_page;
++
++        pte = (mm == &init_mm) ? 
++                pte_alloc_kernel(mm, pmd, addr) :
++                pte_alloc_map(mm, pmd, addr);
++        if (!pte)
++                return -ENOMEM;
++
++        pte_page = pmd_page(*pmd);
++
++        do {
++                err = fn(pte, pte_page, addr, data);
++		if (err)
++                        break;
++        } while (pte++, addr += PAGE_SIZE, addr != end);
++
++        if (mm != &init_mm)
++                pte_unmap(pte-1);
++        return err;
++
++}
++
++static inline int generic_pmd_range(struct mm_struct *mm,
++                                    pud_t *pud, 
++                                    unsigned long addr, 
++                                    unsigned long end,
++                                    pte_fn_t fn, void *data)
++{
++	pmd_t *pmd;
++	unsigned long next;
++        int err;
++
++	pmd = pmd_alloc(mm, pud, addr);
++	if (!pmd)
++		return -ENOMEM;
++	do {
++		next = pmd_addr_end(addr, end);
++                err = generic_pte_range(mm, pmd, addr, next, fn, data);
++                if (err)
++                    break;
++	} while (pmd++, addr = next, addr != end);
++	return err;
++}
++
++static inline int generic_pud_range(struct mm_struct *mm, pgd_t *pgd, 
++                                    unsigned long addr,
++                                    unsigned long end,
++                                    pte_fn_t fn, void *data)
++{
++	pud_t *pud;
++	unsigned long next;
++        int err;
++
++	pud = pud_alloc(mm, pgd, addr);
++	if (!pud)
++		return -ENOMEM;
++	do {
++		next = pud_addr_end(addr, end);
++		err = generic_pmd_range(mm, pud, addr, next, fn, data);
++                if (err)
++			break;
++	} while (pud++, addr = next, addr != end);
++	return err;
++}
++
++/*
++ * Scan a region of virtual memory, filling in page tables as necessary
++ * and calling a provided function on each leaf page table.
++ */
++int generic_page_range(struct mm_struct *mm, unsigned long addr, 
++                  unsigned long size, pte_fn_t fn, void *data)
++{
++	pgd_t *pgd;
++	unsigned long next;
++	unsigned long end = addr + size;
++	int err;
++
++	BUG_ON(addr >= end);
++	pgd = pgd_offset(mm, addr);
++	spin_lock(&mm->page_table_lock);
++	do {
++		next = pgd_addr_end(addr, end);
++		err = generic_pud_range(mm, pgd, addr, next, fn, data);
++		if (err)
++			break;
++	} while (pgd++, addr = next, addr != end);
++	spin_unlock(&mm->page_table_lock);
++	return err;
++}
++
+ /*
+  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
+  * servicing faults for write access.  In the normal case, do always want
+@@ -1249,20 +1365,15 @@ static int do_wp_page(struct mm_struct *
+ 	struct page *old_page, *new_page;
+ 	unsigned long pfn = pte_pfn(pte);
+ 	pte_t entry;
++	struct page invalid_page;
+ 
+ 	if (unlikely(!pfn_valid(pfn))) {
+-		/*
+-		 * This should really halt the system so it can be debugged or
+-		 * at least the kernel stops what it's doing before it corrupts
+-		 * data, but for the moment just pretend this is OOM.
+-		 */
+-		pte_unmap(page_table);
+-		printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
+-				address);
+-		spin_unlock(&mm->page_table_lock);
+-		return VM_FAULT_OOM;
++		/* This can happen with /dev/mem (PROT_WRITE, MAP_PRIVATE). */
++		invalid_page.flags = (1<<PG_reserved) | (1<<PG_locked);
++		old_page = &invalid_page;
++	} else {
++		old_page = pfn_to_page(pfn);
+ 	}
+-	old_page = pfn_to_page(pfn);
+ 
+ 	if (!TestSetPageLocked(old_page)) {
+ 		int reuse = can_share_swap_page(old_page);
+@@ -1298,7 +1409,13 @@ static int do_wp_page(struct mm_struct *
+ 		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ 		if (!new_page)
+ 			goto no_new_page;
+-		copy_user_highpage(new_page, old_page, address);
++		if (old_page == &invalid_page) {
++			char *vto = kmap_atomic(new_page, KM_USER1);
++			copy_page(vto, (void *)(address & PAGE_MASK));
++			kunmap_atomic(vto, KM_USER1);
++		} else {
++			copy_user_highpage(new_page, old_page, address);
++		}
+ 	}
+ 	/*
+ 	 * Re-check the pte - we dropped the lock
+diff -Nurp pristine-linux-2.6.12/mm/mempolicy.c linux-2.6.12-xen/mm/mempolicy.c
+--- pristine-linux-2.6.12/mm/mempolicy.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/mm/mempolicy.c	2006-03-05 23:54:36.934061336 +0100
+@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
+ 	struct mempolicy *new;
+ 	DECLARE_BITMAP(nodes, MAX_NUMNODES);
+ 
+-	if (mode > MPOL_MAX)
++	if (mode < 0 || mode > MPOL_MAX)
+ 		return -EINVAL;
+ 	err = get_nodes(nodes, nmask, maxnode, mode);
+ 	if (err)
+diff -Nurp pristine-linux-2.6.12/mm/mmap.c linux-2.6.12-xen/mm/mmap.c
+--- pristine-linux-2.6.12/mm/mmap.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/mm/mmap.c	2006-03-05 23:36:29.000000000 +0100
+@@ -1904,6 +1904,10 @@ void exit_mmap(struct mm_struct *mm)
+ 	unsigned long nr_accounted = 0;
+ 	unsigned long end;
+ 
++#ifdef arch_exit_mmap
++	arch_exit_mmap(mm);
++#endif
++
+ 	lru_add_drain();
+ 
+ 	spin_lock(&mm->page_table_lock);
+diff -Nurp pristine-linux-2.6.12/mm/page_alloc.c linux-2.6.12-xen/mm/page_alloc.c
+--- pristine-linux-2.6.12/mm/page_alloc.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/mm/page_alloc.c	2006-03-05 23:36:29.000000000 +0100
+@@ -368,7 +368,8 @@ void __free_pages_ok(struct page *page, 
+ 	LIST_HEAD(list);
+ 	int i;
+ 
+-	arch_free_page(page, order);
++	if (arch_free_page(page, order))
++		return;
+ 
+ 	mod_page_state(pgfree, 1 << order);
+ 
+@@ -608,7 +609,8 @@ static void fastcall free_hot_cold_page(
+ 	struct per_cpu_pages *pcp;
+ 	unsigned long flags;
+ 
+-	arch_free_page(page, 0);
++	if (arch_free_page(page, 0))
++		return;
+ 
+ 	kernel_map_pages(page, 1, 0);
+ 	inc_page_state(pgfree);
+diff -Nurp pristine-linux-2.6.12/net/8021q/vlan.c linux-2.6.12-xen/net/8021q/vlan.c
+--- pristine-linux-2.6.12/net/8021q/vlan.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/8021q/vlan.c	2006-03-05 23:54:36.935061188 +0100
+@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
+ 			if (!vlandev)
+ 				continue;
+ 
++			if (netif_carrier_ok(dev)) {
++				if (!netif_carrier_ok(vlandev))
++					netif_carrier_on(vlandev);
++			} else {
++				if (netif_carrier_ok(vlandev))
++					netif_carrier_off(vlandev);
++			}
++
+ 			if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
+ 				vlandev->state = (vlandev->state &~ VLAN_LINK_STATE_MASK) 
+ 					| flgs;
+diff -Nurp pristine-linux-2.6.12/net/core/dev.c linux-2.6.12-xen/net/core/dev.c
+--- pristine-linux-2.6.12/net/core/dev.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/core/dev.c	2006-03-05 23:36:29.000000000 +0100
+@@ -115,6 +115,11 @@
+ #endif	/* CONFIG_NET_RADIO */
+ #include <asm/current.h>
+ 
++#include <net/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++
++
+ /* This define, if set, will randomly drop a packet when congestion
+  * is more than moderate.  It helps fairness in the multi-interface
+  * case when one of them is a hog, but it kills performance for the
+@@ -1261,6 +1266,35 @@ int dev_queue_xmit(struct sk_buff *skb)
+ 	    __skb_linearize(skb, GFP_ATOMIC))
+ 		goto out_kfree_skb;
+ 
++	/* If a checksum-deferred packet is forwarded to a device that needs a
++	 * checksum, correct the pointers and force checksumming.
++	 */
++	if (skb->proto_csum_blank) {
++		if (skb->protocol != htons(ETH_P_IP))
++			goto out_kfree_skb;
++		skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
++		if (skb->h.raw >= skb->tail)
++			goto out_kfree_skb;
++		switch (skb->nh.iph->protocol) {
++		case IPPROTO_TCP:
++			skb->csum = offsetof(struct tcphdr, check);
++			break;
++		case IPPROTO_UDP:
++			skb->csum = offsetof(struct udphdr, check);
++			break;
++		default:
++			if (net_ratelimit())
++				printk(KERN_ERR "Attempting to checksum a non-"
++				       "TCP/UDP packet, dropping a protocol"
++				       " %d packet", skb->nh.iph->protocol);
++			rc = -EPROTO;
++			goto out_kfree_skb;
++		}
++		if ((skb->h.raw + skb->csum + 2) > skb->tail)
++			goto out_kfree_skb;
++		skb->ip_summed = CHECKSUM_HW;
++	}
++
+ 	/* If packet is not checksummed and device does not support
+ 	 * checksumming for this protocol, complete checksumming here.
+ 	 */
+@@ -1680,6 +1714,17 @@ int netif_receive_skb(struct sk_buff *sk
+ 	}
+ #endif
+ 
++	switch (skb->ip_summed) {
++	case CHECKSUM_UNNECESSARY:
++		skb->proto_csum_valid = 1;
++		break;
++	case CHECKSUM_HW:
++		/* XXX Implement me. */
++	default:
++		skb->proto_csum_valid = 0;
++		break;
++	}
++
+ 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ 		if (!ptype->dev || ptype->dev == skb->dev) {
+ 			if (pt_prev) 
+diff -Nurp pristine-linux-2.6.12/net/core/skbuff.c linux-2.6.12-xen/net/core/skbuff.c
+--- pristine-linux-2.6.12/net/core/skbuff.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/core/skbuff.c	2006-03-05 23:36:29.000000000 +0100
+@@ -129,6 +129,7 @@ void skb_under_panic(struct sk_buff *skb
+  *	Buffers may only be allocated from interrupts using a @gfp_mask of
+  *	%GFP_ATOMIC.
+  */
++#ifndef CONFIG_HAVE_ARCH_ALLOC_SKB
+ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
+ {
+ 	struct sk_buff *skb;
+@@ -166,6 +167,7 @@ nodata:
+ 	skb = NULL;
+ 	goto out;
+ }
++#endif /* !CONFIG_HAVE_ARCH_ALLOC_SKB */
+ 
+ /**
+  *	alloc_skb_from_cache	-	allocate a network buffer
+@@ -353,6 +355,8 @@ struct sk_buff *skb_clone(struct sk_buff
+ 	C(local_df);
+ 	n->cloned = 1;
+ 	n->nohdr = 0;
++	C(proto_csum_valid);
++	C(proto_csum_blank);
+ 	C(pkt_type);
+ 	C(ip_summed);
+ 	C(priority);
+diff -Nurp pristine-linux-2.6.12/net/ipv4/icmp.c linux-2.6.12-xen/net/ipv4/icmp.c
+--- pristine-linux-2.6.12/net/ipv4/icmp.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv4/icmp.c	2006-03-05 23:54:36.936061041 +0100
+@@ -349,12 +349,12 @@ static void icmp_push_reply(struct icmp_
+ {
+ 	struct sk_buff *skb;
+ 
+-	ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
+-		       icmp_param->data_len+icmp_param->head_len,
+-		       icmp_param->head_len,
+-		       ipc, rt, MSG_DONTWAIT);
+-
+-	if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
++	if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
++		           icmp_param->data_len+icmp_param->head_len,
++		           icmp_param->head_len,
++		           ipc, rt, MSG_DONTWAIT) < 0)
++		ip_flush_pending_frames(icmp_socket->sk);
++	else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
+ 		struct icmphdr *icmph = skb->h.icmph;
+ 		unsigned int csum = 0;
+ 		struct sk_buff *skb1;
+diff -Nurp pristine-linux-2.6.12/net/ipv4/ip_output.c linux-2.6.12-xen/net/ipv4/ip_output.c
+--- pristine-linux-2.6.12/net/ipv4/ip_output.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv4/ip_output.c	2006-03-05 23:54:36.937060894 +0100
+@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
+ #ifdef CONFIG_NETFILTER_DEBUG
+ 	nf_debug_ip_loopback_xmit(newskb);
+ #endif
+-	nf_reset(newskb);
+ 	netif_rx(newskb);
+ 	return 0;
+ }
+@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
+ 	nf_debug_ip_finish_output2(skb);
+ #endif /*CONFIG_NETFILTER_DEBUG*/
+ 
+-	nf_reset(skb);
+-
+ 	if (hh) {
+ 		int hh_alen;
+ 
+diff -Nurp pristine-linux-2.6.12/net/ipv4/ip_sockglue.c linux-2.6.12-xen/net/ipv4/ip_sockglue.c
+--- pristine-linux-2.6.12/net/ipv4/ip_sockglue.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv4/ip_sockglue.c	2006-03-05 23:54:36.937060894 +0100
+@@ -848,6 +848,9 @@ mc_msf_out:
+  
+ 		case IP_IPSEC_POLICY:
+ 		case IP_XFRM_POLICY:
++			err = -EPERM;
++			if (!capable(CAP_NET_ADMIN))
++				break;
+ 			err = xfrm_user_policy(sk, optname, optval, optlen);
+ 			break;
+ 
+diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_core.c linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_core.c
+--- pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_core.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_core.c	2006-03-05 23:54:36.938060746 +0100
+@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
+ 		schedule();
+ 		goto i_see_dead_people;
+ 	}
++	/* wait until all references to ip_conntrack_untracked are dropped */
++	while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
++		schedule();
+ 
+ 	kmem_cache_destroy(ip_conntrack_cachep);
+ 	kmem_cache_destroy(ip_conntrack_expect_cachep);
+diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_proto_udp.c linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_proto_udp.c
+--- pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_proto_udp.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_proto_udp.c	2006-03-05 23:54:37.128032752 +0100
+@@ -120,6 +120,7 @@ static int udp_error(struct sk_buff *skb
+ 	 * and moreover root might send raw packets.
+ 	 * FIXME: Source route IP option packets --RR */
+ 	if (hooknum == NF_IP_PRE_ROUTING
++	    && skb->ip_summed != CHECKSUM_UNNECESSARY
+ 	    && csum_tcpudp_magic(iph->saddr, iph->daddr, udplen, IPPROTO_UDP,
+ 			         skb->ip_summed == CHECKSUM_HW ? skb->csum
+ 			      	 : skb_checksum(skb, iph->ihl*4, udplen, 0))) {
+diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_standalone.c linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_standalone.c
+--- pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_standalone.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_standalone.c	2006-03-05 23:54:36.939060599 +0100
+@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
+ 				        const struct net_device *out,
+ 				        int (*okfn)(struct sk_buff *))
+ {
++#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
++	/* Previously seen (loopback)?  Ignore.  Do this before
++           fragment check. */
++	if ((*pskb)->nfct)
++		return NF_ACCEPT;
++#endif
++
+ 	/* Gather fragments. */
+ 	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+ 		*pskb = ip_ct_gather_frags(*pskb,
+diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_tcp.c linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_tcp.c
+--- pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_tcp.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-03-05 23:54:37.129032604 +0100
+@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
+ 		 enum ip_nat_manip_type maniptype,
+ 		 const struct ip_conntrack *conntrack)
+ {
+-	static u_int16_t port, *portptr;
++	static u_int16_t port;
++	u_int16_t *portptr;
+ 	unsigned int range_size, min, i;
+ 
+ 	if (maniptype == IP_NAT_MANIP_SRC)
+@@ -127,10 +128,16 @@ tcp_manip_pkt(struct sk_buff **pskb,
+ 	if (hdrsize < sizeof(*hdr))
+ 		return 1;
+ 
+-	hdr->check = ip_nat_cheat_check(~oldip, newip,
++	if ((*pskb)->proto_csum_blank) {
++		hdr->check = ip_nat_cheat_check(oldip, ~newip,
++				ip_nat_cheat_check(oldport ^ 0xFFFF,
++					newport, hdr->check));
++	} else { 
++		hdr->check = ip_nat_cheat_check(~oldip, newip,
+ 					ip_nat_cheat_check(oldport ^ 0xFFFF,
+ 							   newport,
+ 							   hdr->check));
++	}
+ 	return 1;
+ }
+ 
+diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_tcp.c.orig linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_tcp.c.orig
+--- pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_tcp.c.orig	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_tcp.c.orig	2006-03-05 23:54:36.939060599 +0100
+@@ -0,0 +1,179 @@
++/* (C) 1999-2001 Paul `Rusty' Russell
++ * (C) 2002-2004 Netfilter Core Team <coreteam at netfilter.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/netfilter.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/if.h>
++#include <linux/netfilter_ipv4/ip_nat.h>
++#include <linux/netfilter_ipv4/ip_nat_rule.h>
++#include <linux/netfilter_ipv4/ip_nat_protocol.h>
++#include <linux/netfilter_ipv4/ip_nat_core.h>
++
++static int
++tcp_in_range(const struct ip_conntrack_tuple *tuple,
++	     enum ip_nat_manip_type maniptype,
++	     const union ip_conntrack_manip_proto *min,
++	     const union ip_conntrack_manip_proto *max)
++{
++	u_int16_t port;
++
++	if (maniptype == IP_NAT_MANIP_SRC)
++		port = tuple->src.u.tcp.port;
++	else
++		port = tuple->dst.u.tcp.port;
++
++	return ntohs(port) >= ntohs(min->tcp.port)
++		&& ntohs(port) <= ntohs(max->tcp.port);
++}
++
++static int
++tcp_unique_tuple(struct ip_conntrack_tuple *tuple,
++		 const struct ip_nat_range *range,
++		 enum ip_nat_manip_type maniptype,
++		 const struct ip_conntrack *conntrack)
++{
++	static u_int16_t port;
++	u_int16_t *portptr;
++	unsigned int range_size, min, i;
++
++	if (maniptype == IP_NAT_MANIP_SRC)
++		portptr = &tuple->src.u.tcp.port;
++	else
++		portptr = &tuple->dst.u.tcp.port;
++
++	/* If no range specified... */
++	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
++		/* If it's dst rewrite, can't change port */
++		if (maniptype == IP_NAT_MANIP_DST)
++			return 0;
++
++		/* Map privileged onto privileged. */
++		if (ntohs(*portptr) < 1024) {
++			/* Loose convention: >> 512 is credential passing */
++			if (ntohs(*portptr)<512) {
++				min = 1;
++				range_size = 511 - min + 1;
++			} else {
++				min = 600;
++				range_size = 1023 - min + 1;
++			}
++		} else {
++			min = 1024;
++			range_size = 65535 - 1024 + 1;
++		}
++	} else {
++		min = ntohs(range->min.tcp.port);
++		range_size = ntohs(range->max.tcp.port) - min + 1;
++	}
++
++	for (i = 0; i < range_size; i++, port++) {
++		*portptr = htons(min + port % range_size);
++		if (!ip_nat_used_tuple(tuple, conntrack)) {
++			return 1;
++		}
++	}
++	return 0;
++}
++
++static int
++tcp_manip_pkt(struct sk_buff **pskb,
++	      unsigned int iphdroff,
++	      const struct ip_conntrack_tuple *tuple,
++	      enum ip_nat_manip_type maniptype)
++{
++	struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
++	struct tcphdr *hdr;
++	unsigned int hdroff = iphdroff + iph->ihl*4;
++	u32 oldip, newip;
++	u16 *portptr, newport, oldport;
++	int hdrsize = 8; /* TCP connection tracking guarantees this much */
++
++	/* this could be a inner header returned in icmp packet; in such
++	   cases we cannot update the checksum field since it is outside of
++	   the 8 bytes of transport layer headers we are guaranteed */
++	if ((*pskb)->len >= hdroff + sizeof(struct tcphdr))
++		hdrsize = sizeof(struct tcphdr);
++
++	if (!skb_ip_make_writable(pskb, hdroff + hdrsize))
++		return 0;
++
++	iph = (struct iphdr *)((*pskb)->data + iphdroff);
++	hdr = (struct tcphdr *)((*pskb)->data + hdroff);
++
++	if (maniptype == IP_NAT_MANIP_SRC) {
++		/* Get rid of src ip and src pt */
++		oldip = iph->saddr;
++		newip = tuple->src.ip;
++		newport = tuple->src.u.tcp.port;
++		portptr = &hdr->source;
++	} else {
++		/* Get rid of dst ip and dst pt */
++		oldip = iph->daddr;
++		newip = tuple->dst.ip;
++		newport = tuple->dst.u.tcp.port;
++		portptr = &hdr->dest;
++	}
++
++	oldport = *portptr;
++	*portptr = newport;
++
++	if (hdrsize < sizeof(*hdr))
++		return 1;
++
++	hdr->check = ip_nat_cheat_check(~oldip, newip,
++					ip_nat_cheat_check(oldport ^ 0xFFFF,
++							   newport,
++							   hdr->check));
++	return 1;
++}
++
++static unsigned int
++tcp_print(char *buffer,
++	  const struct ip_conntrack_tuple *match,
++	  const struct ip_conntrack_tuple *mask)
++{
++	unsigned int len = 0;
++
++	if (mask->src.u.tcp.port)
++		len += sprintf(buffer + len, "srcpt=%u ",
++			       ntohs(match->src.u.tcp.port));
++
++
++	if (mask->dst.u.tcp.port)
++		len += sprintf(buffer + len, "dstpt=%u ",
++			       ntohs(match->dst.u.tcp.port));
++
++	return len;
++}
++
++static unsigned int
++tcp_print_range(char *buffer, const struct ip_nat_range *range)
++{
++	if (range->min.tcp.port != 0 || range->max.tcp.port != 0xFFFF) {
++		if (range->min.tcp.port == range->max.tcp.port)
++			return sprintf(buffer, "port %u ",
++				       ntohs(range->min.tcp.port));
++		else
++			return sprintf(buffer, "ports %u-%u ",
++				       ntohs(range->min.tcp.port),
++				       ntohs(range->max.tcp.port));
++	}
++	else return 0;
++}
++
++struct ip_nat_protocol ip_nat_protocol_tcp
++= { "TCP", IPPROTO_TCP,
++    tcp_manip_pkt,
++    tcp_in_range,
++    tcp_unique_tuple,
++    tcp_print,
++    tcp_print_range
++};
+diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_udp.c linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_udp.c
+--- pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_udp.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-03-05 23:54:37.128032752 +0100
+@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
+ 		 enum ip_nat_manip_type maniptype,
+ 		 const struct ip_conntrack *conntrack)
+ {
+-	static u_int16_t port, *portptr;
++	static u_int16_t port;
++	u_int16_t *portptr;
+ 	unsigned int range_size, min, i;
+ 
+ 	if (maniptype == IP_NAT_MANIP_SRC)
+@@ -112,11 +113,19 @@ udp_manip_pkt(struct sk_buff **pskb,
+ 		newport = tuple->dst.u.udp.port;
+ 		portptr = &hdr->dest;
+ 	}
+-	if (hdr->check) /* 0 is a special case meaning no checksum */
+-		hdr->check = ip_nat_cheat_check(~oldip, newip,
++	
++	if (hdr->check) { /* 0 is a special case meaning no checksum */
++		if ((*pskb)->proto_csum_blank) {
++			hdr->check = ip_nat_cheat_check(oldip, ~newip, 
++					ip_nat_cheat_check(*portptr ^ 0xFFFF, 
++						newport, hdr->check));
++		} else {
++			hdr->check = ip_nat_cheat_check(~oldip, newip,
+ 					ip_nat_cheat_check(*portptr ^ 0xFFFF,
+ 							   newport,
+ 							   hdr->check));
++		}
++	}
+ 	*portptr = newport;
+ 	return 1;
+ }
+diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_udp.c.orig linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_udp.c.orig
+--- pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_udp.c.orig	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_udp.c.orig	2006-03-05 23:54:36.940060452 +0100
+@@ -0,0 +1,166 @@
++/* (C) 1999-2001 Paul `Rusty' Russell
++ * (C) 2002-2004 Netfilter Core Team <coreteam at netfilter.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/netfilter.h>
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include <linux/if.h>
++
++#include <linux/netfilter_ipv4/ip_nat.h>
++#include <linux/netfilter_ipv4/ip_nat_core.h>
++#include <linux/netfilter_ipv4/ip_nat_rule.h>
++#include <linux/netfilter_ipv4/ip_nat_protocol.h>
++
++static int
++udp_in_range(const struct ip_conntrack_tuple *tuple,
++	     enum ip_nat_manip_type maniptype,
++	     const union ip_conntrack_manip_proto *min,
++	     const union ip_conntrack_manip_proto *max)
++{
++	u_int16_t port;
++
++	if (maniptype == IP_NAT_MANIP_SRC)
++		port = tuple->src.u.udp.port;
++	else
++		port = tuple->dst.u.udp.port;
++
++	return ntohs(port) >= ntohs(min->udp.port)
++		&& ntohs(port) <= ntohs(max->udp.port);
++}
++
++static int
++udp_unique_tuple(struct ip_conntrack_tuple *tuple,
++		 const struct ip_nat_range *range,
++		 enum ip_nat_manip_type maniptype,
++		 const struct ip_conntrack *conntrack)
++{
++	static u_int16_t port;
++	u_int16_t *portptr;
++	unsigned int range_size, min, i;
++
++	if (maniptype == IP_NAT_MANIP_SRC)
++		portptr = &tuple->src.u.udp.port;
++	else
++		portptr = &tuple->dst.u.udp.port;
++
++	/* If no range specified... */
++	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
++		/* If it's dst rewrite, can't change port */
++		if (maniptype == IP_NAT_MANIP_DST)
++			return 0;
++
++		if (ntohs(*portptr) < 1024) {
++			/* Loose convention: >> 512 is credential passing */
++			if (ntohs(*portptr)<512) {
++				min = 1;
++				range_size = 511 - min + 1;
++			} else {
++				min = 600;
++				range_size = 1023 - min + 1;
++			}
++		} else {
++			min = 1024;
++			range_size = 65535 - 1024 + 1;
++		}
++	} else {
++		min = ntohs(range->min.udp.port);
++		range_size = ntohs(range->max.udp.port) - min + 1;
++	}
++
++	for (i = 0; i < range_size; i++, port++) {
++		*portptr = htons(min + port % range_size);
++		if (!ip_nat_used_tuple(tuple, conntrack))
++			return 1;
++	}
++	return 0;
++}
++
++static int
++udp_manip_pkt(struct sk_buff **pskb,
++	      unsigned int iphdroff,
++	      const struct ip_conntrack_tuple *tuple,
++	      enum ip_nat_manip_type maniptype)
++{
++	struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
++	struct udphdr *hdr;
++	unsigned int hdroff = iphdroff + iph->ihl*4;
++	u32 oldip, newip;
++	u16 *portptr, newport;
++
++	if (!skb_ip_make_writable(pskb, hdroff + sizeof(*hdr)))
++		return 0;
++
++	iph = (struct iphdr *)((*pskb)->data + iphdroff);
++	hdr = (struct udphdr *)((*pskb)->data + hdroff);
++
++	if (maniptype == IP_NAT_MANIP_SRC) {
++		/* Get rid of src ip and src pt */
++		oldip = iph->saddr;
++		newip = tuple->src.ip;
++		newport = tuple->src.u.udp.port;
++		portptr = &hdr->source;
++	} else {
++		/* Get rid of dst ip and dst pt */
++		oldip = iph->daddr;
++		newip = tuple->dst.ip;
++		newport = tuple->dst.u.udp.port;
++		portptr = &hdr->dest;
++	}
++	if (hdr->check) /* 0 is a special case meaning no checksum */
++		hdr->check = ip_nat_cheat_check(~oldip, newip,
++					ip_nat_cheat_check(*portptr ^ 0xFFFF,
++							   newport,
++							   hdr->check));
++	*portptr = newport;
++	return 1;
++}
++
++static unsigned int
++udp_print(char *buffer,
++	  const struct ip_conntrack_tuple *match,
++	  const struct ip_conntrack_tuple *mask)
++{
++	unsigned int len = 0;
++
++	if (mask->src.u.udp.port)
++		len += sprintf(buffer + len, "srcpt=%u ",
++			       ntohs(match->src.u.udp.port));
++
++
++	if (mask->dst.u.udp.port)
++		len += sprintf(buffer + len, "dstpt=%u ",
++			       ntohs(match->dst.u.udp.port));
++
++	return len;
++}
++
++static unsigned int
++udp_print_range(char *buffer, const struct ip_nat_range *range)
++{
++	if (range->min.udp.port != 0 || range->max.udp.port != 0xFFFF) {
++		if (range->min.udp.port == range->max.udp.port)
++			return sprintf(buffer, "port %u ",
++				       ntohs(range->min.udp.port));
++		else
++			return sprintf(buffer, "ports %u-%u ",
++				       ntohs(range->min.udp.port),
++				       ntohs(range->max.udp.port));
++	}
++	else return 0;
++}
++
++struct ip_nat_protocol ip_nat_protocol_udp
++= { "UDP", IPPROTO_UDP,
++    udp_manip_pkt,
++    udp_in_range,
++    udp_unique_tuple,
++    udp_print,
++    udp_print_range
++};
+diff -Nurp pristine-linux-2.6.12/net/ipv6/ip6_input.c linux-2.6.12-xen/net/ipv6/ip6_input.c
+--- pristine-linux-2.6.12/net/ipv6/ip6_input.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv6/ip6_input.c	2006-03-05 23:54:36.940060452 +0100
+@@ -198,12 +198,13 @@ resubmit:
+ 		if (!raw_sk) {
+ 			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ 				IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
+-				icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
++				icmpv6_send(skb, ICMPV6_PARAMPROB,
++				            ICMPV6_UNK_NEXTHDR, nhoff,
++				            skb->dev);
+ 			}
+-		} else {
++		} else
+ 			IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
+-			kfree_skb(skb);
+-		}
++		kfree_skb(skb);
+ 	}
+ 	rcu_read_unlock();
+ 	return 0;
+diff -Nurp pristine-linux-2.6.12/net/ipv6/ipv6_sockglue.c linux-2.6.12-xen/net/ipv6/ipv6_sockglue.c
+--- pristine-linux-2.6.12/net/ipv6/ipv6_sockglue.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv6/ipv6_sockglue.c	2006-03-05 23:54:36.941060304 +0100
+@@ -503,6 +503,9 @@ done:
+ 		break;
+ 	case IPV6_IPSEC_POLICY:
+ 	case IPV6_XFRM_POLICY:
++		retv = -EPERM;
++		if (!capable(CAP_NET_ADMIN))
++			break;
+ 		retv = xfrm_user_policy(sk, optname, optval, optlen);
+ 		break;
+ 
+diff -Nurp pristine-linux-2.6.12/net/ipv6/netfilter/ip6_queue.c linux-2.6.12-xen/net/ipv6/netfilter/ip6_queue.c
+--- pristine-linux-2.6.12/net/ipv6/netfilter/ip6_queue.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/ipv6/netfilter/ip6_queue.c	2006-03-05 23:54:36.941060304 +0100
+@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
+ static void
+ ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
+ {
++	local_bh_disable();
+ 	nf_reinject(entry->skb, entry->info, verdict);
++	local_bh_enable();
+ 	kfree(entry);
+ }
+ 
+diff -Nurp pristine-linux-2.6.12/net/netlink/af_netlink.c linux-2.6.12-xen/net/netlink/af_netlink.c
+--- pristine-linux-2.6.12/net/netlink/af_netlink.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/netlink/af_netlink.c	2006-03-05 23:54:36.942060157 +0100
+@@ -315,8 +315,8 @@ err:
+ static void netlink_remove(struct sock *sk)
+ {
+ 	netlink_table_grab();
+-	nl_table[sk->sk_protocol].hash.entries--;
+-	sk_del_node_init(sk);
++	if (sk_del_node_init(sk))
++		nl_table[sk->sk_protocol].hash.entries--;
+ 	if (nlk_sk(sk)->groups)
+ 		__sk_del_bind_node(sk);
+ 	netlink_table_ungrab();
+@@ -429,7 +429,12 @@ retry:
+ 	err = netlink_insert(sk, pid);
+ 	if (err == -EADDRINUSE)
+ 		goto retry;
+-	return 0;
++
++	/* If 2 threads race to autobind, that is fine.  */
++	if (err == -EBUSY)
++		err = 0;
++
++	return err;
+ }
+ 
+ static inline int netlink_capable(struct socket *sock, unsigned int flag) 
+diff -Nurp pristine-linux-2.6.12/net/packet/af_packet.c linux-2.6.12-xen/net/packet/af_packet.c
+--- pristine-linux-2.6.12/net/packet/af_packet.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/packet/af_packet.c	2006-03-05 23:54:36.943060010 +0100
+@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
+ 	dst_release(skb->dst);
+ 	skb->dst = NULL;
+ 
++	/* drop conntrack reference */
++	nf_reset(skb);
++
+ 	spkt = (struct sockaddr_pkt*)skb->cb;
+ 
+ 	skb_push(skb, skb->data-skb->mac.raw);
+@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
+ 	dst_release(skb->dst);
+ 	skb->dst = NULL;
+ 
++	/* drop conntrack reference */
++	nf_reset(skb);
++
+ 	spin_lock(&sk->sk_receive_queue.lock);
+ 	po->stats.tp_packets++;
+ 	__skb_queue_tail(&sk->sk_receive_queue, skb);
+diff -Nurp pristine-linux-2.6.12/net/xfrm/xfrm_user.c linux-2.6.12-xen/net/xfrm/xfrm_user.c
+--- pristine-linux-2.6.12/net/xfrm/xfrm_user.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/net/xfrm/xfrm_user.c	2006-03-05 23:54:36.944059862 +0100
+@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
+ 	if (nr > XFRM_MAX_DEPTH)
+ 		return NULL;
+ 
++	if (p->dir > XFRM_POLICY_OUT)
++		return NULL;
++
+ 	xp = xfrm_policy_alloc(GFP_KERNEL);
+ 	if (xp == NULL) {
+ 		*dir = -ENOBUFS;
+Binärdateien pristine-linux-2.6.12/scripts/basic/docproc and linux-2.6.12-xen/scripts/basic/docproc sind verschieden.
+diff -Nurp pristine-linux-2.6.12/scripts/basic/.docproc.cmd linux-2.6.12-xen/scripts/basic/.docproc.cmd
+--- pristine-linux-2.6.12/scripts/basic/.docproc.cmd	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/basic/.docproc.cmd	2006-03-05 23:55:01.675413196 +0100
+@@ -0,0 +1,68 @@
++cmd_scripts/basic/docproc := gcc -Wp,-MD,scripts/basic/.docproc.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer        -o scripts/basic/docproc scripts/basic/docproc.c
++
++deps_scripts/basic/docproc := \
++  scripts/basic/docproc.c \
++  /usr/include/stdio.h \
++  /usr/include/features.h \
++  /usr/include/sys/cdefs.h \
++  /usr/include/gnu/stubs.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
++  /usr/include/bits/types.h \
++  /usr/include/bits/wordsize.h \
++  /usr/include/bits/typesizes.h \
++  /usr/include/libio.h \
++  /usr/include/_G_config.h \
++  /usr/include/wchar.h \
++  /usr/include/bits/wchar.h \
++  /usr/include/gconv.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
++  /usr/include/bits/stdio_lim.h \
++  /usr/include/bits/sys_errlist.h \
++  /usr/include/bits/stdio.h \
++  /usr/include/stdlib.h \
++  /usr/include/sys/types.h \
++  /usr/include/time.h \
++  /usr/include/endian.h \
++  /usr/include/bits/endian.h \
++  /usr/include/sys/select.h \
++  /usr/include/bits/select.h \
++  /usr/include/bits/sigset.h \
++  /usr/include/bits/time.h \
++  /usr/include/sys/sysmacros.h \
++  /usr/include/bits/pthreadtypes.h \
++  /usr/include/bits/sched.h \
++  /usr/include/alloca.h \
++  /usr/include/string.h \
++  /usr/include/bits/string.h \
++  /usr/include/bits/string2.h \
++  /usr/include/ctype.h \
++  /usr/include/unistd.h \
++  /usr/include/bits/posix_opt.h \
++  /usr/include/bits/confname.h \
++  /usr/include/getopt.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/limits.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/syslimits.h \
++  /usr/include/limits.h \
++  /usr/include/bits/posix1_lim.h \
++  /usr/include/bits/local_lim.h \
++  /usr/include/linux/limits.h \
++  /usr/include/bits/posix2_lim.h \
++  /usr/include/sys/wait.h \
++  /usr/include/signal.h \
++  /usr/include/bits/signum.h \
++  /usr/include/bits/siginfo.h \
++  /usr/include/bits/sigaction.h \
++  /usr/include/bits/sigcontext.h \
++  /usr/include/asm/sigcontext.h \
++  /usr/include/asm-i486/sigcontext.h \
++  /usr/include/linux/compiler.h \
++  /usr/include/bits/sigstack.h \
++  /usr/include/bits/sigthread.h \
++  /usr/include/sys/resource.h \
++  /usr/include/bits/resource.h \
++  /usr/include/bits/waitflags.h \
++  /usr/include/bits/waitstatus.h \
++
++scripts/basic/docproc: $(deps_scripts/basic/docproc)
++
++$(deps_scripts/basic/docproc):
+Binärdateien pristine-linux-2.6.12/scripts/basic/fixdep and linux-2.6.12-xen/scripts/basic/fixdep sind verschieden.
+diff -Nurp pristine-linux-2.6.12/scripts/basic/.fixdep.cmd linux-2.6.12-xen/scripts/basic/.fixdep.cmd
+--- pristine-linux-2.6.12/scripts/basic/.fixdep.cmd	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/basic/.fixdep.cmd	2006-03-05 23:55:01.077501411 +0100
+@@ -0,0 +1,78 @@
++cmd_scripts/basic/fixdep := gcc -Wp,-MD,scripts/basic/.fixdep.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer        -o scripts/basic/fixdep scripts/basic/fixdep.c
++
++deps_scripts/basic/fixdep := \
++  scripts/basic/fixdep.c \
++    $(wildcard include/config/his/driver.h) \
++    $(wildcard include/config/my/option.h) \
++    $(wildcard include/config/.h) \
++    $(wildcard include/config/foo.h) \
++    $(wildcard include/config/boom.h) \
++  /usr/include/sys/types.h \
++  /usr/include/features.h \
++  /usr/include/sys/cdefs.h \
++  /usr/include/gnu/stubs.h \
++  /usr/include/bits/types.h \
++  /usr/include/bits/wordsize.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
++  /usr/include/bits/typesizes.h \
++  /usr/include/time.h \
++  /usr/include/endian.h \
++  /usr/include/bits/endian.h \
++  /usr/include/sys/select.h \
++  /usr/include/bits/select.h \
++  /usr/include/bits/sigset.h \
++  /usr/include/bits/time.h \
++  /usr/include/sys/sysmacros.h \
++  /usr/include/bits/pthreadtypes.h \
++  /usr/include/bits/sched.h \
++  /usr/include/sys/stat.h \
++  /usr/include/bits/stat.h \
++  /usr/include/sys/mman.h \
++  /usr/include/bits/mman.h \
++  /usr/include/unistd.h \
++  /usr/include/bits/posix_opt.h \
++  /usr/include/bits/confname.h \
++  /usr/include/getopt.h \
++  /usr/include/fcntl.h \
++  /usr/include/bits/fcntl.h \
++  /usr/include/string.h \
++  /usr/include/bits/string.h \
++  /usr/include/bits/string2.h \
++  /usr/include/stdlib.h \
++  /usr/include/alloca.h \
++  /usr/include/stdio.h \
++  /usr/include/libio.h \
++  /usr/include/_G_config.h \
++  /usr/include/wchar.h \
++  /usr/include/bits/wchar.h \
++  /usr/include/gconv.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
++  /usr/include/bits/stdio_lim.h \
++  /usr/include/bits/sys_errlist.h \
++  /usr/include/bits/stdio.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/limits.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/syslimits.h \
++  /usr/include/limits.h \
++  /usr/include/bits/posix1_lim.h \
++  /usr/include/bits/local_lim.h \
++  /usr/include/linux/limits.h \
++  /usr/include/bits/posix2_lim.h \
++  /usr/include/ctype.h \
++  /usr/include/arpa/inet.h \
++  /usr/include/netinet/in.h \
++  /usr/include/stdint.h \
++  /usr/include/sys/socket.h \
++  /usr/include/sys/uio.h \
++  /usr/include/bits/uio.h \
++  /usr/include/bits/socket.h \
++  /usr/include/bits/sockaddr.h \
++  /usr/include/asm/socket.h \
++  /usr/include/asm-i486/socket.h \
++  /usr/include/asm/sockios.h \
++  /usr/include/asm-i486/sockios.h \
++  /usr/include/bits/in.h \
++  /usr/include/bits/byteswap.h \
++
++scripts/basic/fixdep: $(deps_scripts/basic/fixdep)
++
++$(deps_scripts/basic/fixdep):
+Binärdateien pristine-linux-2.6.12/scripts/basic/split-include and linux-2.6.12-xen/scripts/basic/split-include sind verschieden.
+diff -Nurp pristine-linux-2.6.12/scripts/basic/.split-include.cmd linux-2.6.12-xen/scripts/basic/.split-include.cmd
+--- pristine-linux-2.6.12/scripts/basic/.split-include.cmd	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/basic/.split-include.cmd	2006-03-05 23:55:01.338462909 +0100
+@@ -0,0 +1,58 @@
++cmd_scripts/basic/split-include := gcc -Wp,-MD,scripts/basic/.split-include.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer        -o scripts/basic/split-include scripts/basic/split-include.c
++
++deps_scripts/basic/split-include := \
++  scripts/basic/split-include.c \
++    $(wildcard include/config/.h) \
++  /usr/include/sys/stat.h \
++  /usr/include/features.h \
++  /usr/include/sys/cdefs.h \
++  /usr/include/gnu/stubs.h \
++  /usr/include/bits/types.h \
++  /usr/include/bits/wordsize.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
++  /usr/include/bits/typesizes.h \
++  /usr/include/time.h \
++  /usr/include/bits/stat.h \
++  /usr/include/sys/types.h \
++  /usr/include/endian.h \
++  /usr/include/bits/endian.h \
++  /usr/include/sys/select.h \
++  /usr/include/bits/select.h \
++  /usr/include/bits/sigset.h \
++  /usr/include/bits/time.h \
++  /usr/include/sys/sysmacros.h \
++  /usr/include/bits/pthreadtypes.h \
++  /usr/include/bits/sched.h \
++  /usr/include/ctype.h \
++  /usr/include/errno.h \
++  /usr/include/bits/errno.h \
++  /usr/include/linux/errno.h \
++  /usr/include/asm/errno.h \
++  /usr/include/asm-i486/errno.h \
++  /usr/include/asm-generic/errno.h \
++  /usr/include/asm-generic/errno-base.h \
++  /usr/include/fcntl.h \
++  /usr/include/bits/fcntl.h \
++  /usr/include/stdio.h \
++  /usr/include/libio.h \
++  /usr/include/_G_config.h \
++  /usr/include/wchar.h \
++  /usr/include/bits/wchar.h \
++  /usr/include/gconv.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
++  /usr/include/bits/stdio_lim.h \
++  /usr/include/bits/sys_errlist.h \
++  /usr/include/bits/stdio.h \
++  /usr/include/stdlib.h \
++  /usr/include/alloca.h \
++  /usr/include/string.h \
++  /usr/include/bits/string.h \
++  /usr/include/bits/string2.h \
++  /usr/include/unistd.h \
++  /usr/include/bits/posix_opt.h \
++  /usr/include/bits/confname.h \
++  /usr/include/getopt.h \
++
++scripts/basic/split-include: $(deps_scripts/basic/split-include)
++
++$(deps_scripts/basic/split-include):
+Binärdateien pristine-linux-2.6.12/scripts/kconfig/conf and linux-2.6.12-xen/scripts/kconfig/conf sind verschieden.
+diff -Nurp pristine-linux-2.6.12/scripts/kconfig/.conf.cmd linux-2.6.12-xen/scripts/kconfig/.conf.cmd
+--- pristine-linux-2.6.12/scripts/kconfig/.conf.cmd	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/kconfig/.conf.cmd	2006-03-05 23:55:05.913787813 +0100
+@@ -0,0 +1 @@
++cmd_scripts/kconfig/conf := gcc  -o scripts/kconfig/conf scripts/kconfig/conf.o scripts/kconfig/zconf.tab.o  
+Binärdateien pristine-linux-2.6.12/scripts/kconfig/conf.o and linux-2.6.12-xen/scripts/kconfig/conf.o sind verschieden.
+diff -Nurp pristine-linux-2.6.12/scripts/kconfig/.conf.o.cmd linux-2.6.12-xen/scripts/kconfig/.conf.o.cmd
+--- pristine-linux-2.6.12/scripts/kconfig/.conf.o.cmd	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/kconfig/.conf.o.cmd	2006-03-05 23:55:02.088352272 +0100
+@@ -0,0 +1,55 @@
++cmd_scripts/kconfig/conf.o := gcc -Wp,-MD,scripts/kconfig/.conf.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer       -c -o scripts/kconfig/conf.o scripts/kconfig/conf.c
++
++deps_scripts/kconfig/conf.o := \
++  scripts/kconfig/conf.c \
++  /usr/include/ctype.h \
++  /usr/include/features.h \
++  /usr/include/sys/cdefs.h \
++  /usr/include/gnu/stubs.h \
++  /usr/include/bits/types.h \
++  /usr/include/bits/wordsize.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
++  /usr/include/bits/typesizes.h \
++  /usr/include/endian.h \
++  /usr/include/bits/endian.h \
++  /usr/include/stdlib.h \
++  /usr/include/sys/types.h \
++  /usr/include/time.h \
++  /usr/include/sys/select.h \
++  /usr/include/bits/select.h \
++  /usr/include/bits/sigset.h \
++  /usr/include/bits/time.h \
++  /usr/include/sys/sysmacros.h \
++  /usr/include/bits/pthreadtypes.h \
++  /usr/include/bits/sched.h \
++  /usr/include/alloca.h \
++  /usr/include/string.h \
++  /usr/include/bits/string.h \
++  /usr/include/bits/string2.h \
++  /usr/include/unistd.h \
++  /usr/include/bits/posix_opt.h \
++  /usr/include/bits/confname.h \
++  /usr/include/getopt.h \
++  /usr/include/sys/stat.h \
++  /usr/include/bits/stat.h \
++  scripts/kconfig/lkc.h \
++  scripts/kconfig/expr.h \
++  /usr/include/stdio.h \
++  /usr/include/libio.h \
++  /usr/include/_G_config.h \
++  /usr/include/wchar.h \
++  /usr/include/bits/wchar.h \
++  /usr/include/gconv.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
++  /usr/include/bits/stdio_lim.h \
++  /usr/include/bits/sys_errlist.h \
++  /usr/include/bits/stdio.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdbool.h \
++  /usr/include/libintl.h \
++  /usr/include/locale.h \
++  /usr/include/bits/locale.h \
++  scripts/kconfig/lkc_proto.h \
++
++scripts/kconfig/conf.o: $(deps_scripts/kconfig/conf.o)
++
++$(deps_scripts/kconfig/conf.o):
+Binärdateien pristine-linux-2.6.12/scripts/kconfig/kxgettext.o and linux-2.6.12-xen/scripts/kconfig/kxgettext.o sind verschieden.
+diff -Nurp pristine-linux-2.6.12/scripts/kconfig/.kxgettext.o.cmd linux-2.6.12-xen/scripts/kconfig/.kxgettext.o.cmd
+--- pristine-linux-2.6.12/scripts/kconfig/.kxgettext.o.cmd	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/kconfig/.kxgettext.o.cmd	2006-03-05 23:55:02.230331324 +0100
+@@ -0,0 +1,48 @@
++cmd_scripts/kconfig/kxgettext.o := gcc -Wp,-MD,scripts/kconfig/.kxgettext.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer       -c -o scripts/kconfig/kxgettext.o scripts/kconfig/kxgettext.c
++
++deps_scripts/kconfig/kxgettext.o := \
++  scripts/kconfig/kxgettext.c \
++  /usr/include/stdlib.h \
++  /usr/include/features.h \
++  /usr/include/sys/cdefs.h \
++  /usr/include/gnu/stubs.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
++  /usr/include/sys/types.h \
++  /usr/include/bits/types.h \
++  /usr/include/bits/wordsize.h \
++  /usr/include/bits/typesizes.h \
++  /usr/include/time.h \
++  /usr/include/endian.h \
++  /usr/include/bits/endian.h \
++  /usr/include/sys/select.h \
++  /usr/include/bits/select.h \
++  /usr/include/bits/sigset.h \
++  /usr/include/bits/time.h \
++  /usr/include/sys/sysmacros.h \
++  /usr/include/bits/pthreadtypes.h \
++  /usr/include/bits/sched.h \
++  /usr/include/alloca.h \
++  /usr/include/string.h \
++  /usr/include/bits/string.h \
++  /usr/include/bits/string2.h \
++  scripts/kconfig/lkc.h \
++  scripts/kconfig/expr.h \
++  /usr/include/stdio.h \
++  /usr/include/libio.h \
++  /usr/include/_G_config.h \
++  /usr/include/wchar.h \
++  /usr/include/bits/wchar.h \
++  /usr/include/gconv.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
++  /usr/include/bits/stdio_lim.h \
++  /usr/include/bits/sys_errlist.h \
++  /usr/include/bits/stdio.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdbool.h \
++  /usr/include/libintl.h \
++  /usr/include/locale.h \
++  /usr/include/bits/locale.h \
++  scripts/kconfig/lkc_proto.h \
++
++scripts/kconfig/kxgettext.o: $(deps_scripts/kconfig/kxgettext.o)
++
++$(deps_scripts/kconfig/kxgettext.o):
+diff -Nurp pristine-linux-2.6.12/scripts/kconfig/lex.zconf.c linux-2.6.12-xen/scripts/kconfig/lex.zconf.c
+--- pristine-linux-2.6.12/scripts/kconfig/lex.zconf.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/kconfig/lex.zconf.c	2006-03-05 23:54:56.409190071 +0100
+@@ -0,0 +1,3688 @@
++
++#line 3 "lex.zconf.c"
++
++#define  YY_INT_ALIGNED short int
++
++/* A lexical scanner generated by flex */
++
++#define FLEX_SCANNER
++#define YY_FLEX_MAJOR_VERSION 2
++#define YY_FLEX_MINOR_VERSION 5
++#define YY_FLEX_SUBMINOR_VERSION 31
++#if YY_FLEX_SUBMINOR_VERSION > 0
++#define FLEX_BETA
++#endif
++
++/* First, we deal with  platform-specific or compiler-specific issues. */
++
++/* begin standard C headers. */
++#include <stdio.h>
++#include <string.h>
++#include <errno.h>
++#include <stdlib.h>
++
++/* end standard C headers. */
++
++/* flex integer type definitions */
++
++#ifndef FLEXINT_H
++#define FLEXINT_H
++
++/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
++
++#if defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L
++#include <inttypes.h>
++typedef int8_t flex_int8_t;
++typedef uint8_t flex_uint8_t;
++typedef int16_t flex_int16_t;
++typedef uint16_t flex_uint16_t;
++typedef int32_t flex_int32_t;
++typedef uint32_t flex_uint32_t;
++#else
++typedef signed char flex_int8_t;
++typedef short int flex_int16_t;
++typedef int flex_int32_t;
++typedef unsigned char flex_uint8_t; 
++typedef unsigned short int flex_uint16_t;
++typedef unsigned int flex_uint32_t;
++#endif /* ! C99 */
++
++/* Limits of integral types. */
++#ifndef INT8_MIN
++#define INT8_MIN               (-128)
++#endif
++#ifndef INT16_MIN
++#define INT16_MIN              (-32767-1)
++#endif
++#ifndef INT32_MIN
++#define INT32_MIN              (-2147483647-1)
++#endif
++#ifndef INT8_MAX
++#define INT8_MAX               (127)
++#endif
++#ifndef INT16_MAX
++#define INT16_MAX              (32767)
++#endif
++#ifndef INT32_MAX
++#define INT32_MAX              (2147483647)
++#endif
++#ifndef UINT8_MAX
++#define UINT8_MAX              (255U)
++#endif
++#ifndef UINT16_MAX
++#define UINT16_MAX             (65535U)
++#endif
++#ifndef UINT32_MAX
++#define UINT32_MAX             (4294967295U)
++#endif
++
++#endif /* ! FLEXINT_H */
++
++#ifdef __cplusplus
++
++/* The "const" storage-class-modifier is valid. */
++#define YY_USE_CONST
++
++#else	/* ! __cplusplus */
++
++#if __STDC__
++
++#define YY_USE_CONST
++
++#endif	/* __STDC__ */
++#endif	/* ! __cplusplus */
++
++#ifdef YY_USE_CONST
++#define yyconst const
++#else
++#define yyconst
++#endif
++
++/* Returned upon end-of-file. */
++#define YY_NULL 0
++
++/* Promotes a possibly negative, possibly signed char to an unsigned
++ * integer for use as an array index.  If the signed char is negative,
++ * we want to instead treat it as an 8-bit unsigned char, hence the
++ * double cast.
++ */
++#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
++
++/* Enter a start condition.  This macro really ought to take a parameter,
++ * but we do it the disgusting crufty way forced on us by the ()-less
++ * definition of BEGIN.
++ */
++#define BEGIN (yy_start) = 1 + 2 *
++
++/* Translate the current start state into a value that can be later handed
++ * to BEGIN to return to the state.  The YYSTATE alias is for lex
++ * compatibility.
++ */
++#define YY_START (((yy_start) - 1) / 2)
++#define YYSTATE YY_START
++
++/* Action number for EOF rule of a given start state. */
++#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
++
++/* Special action meaning "start processing a new file". */
++#define YY_NEW_FILE zconfrestart(zconfin  )
++
++#define YY_END_OF_BUFFER_CHAR 0
++
++/* Size of default input buffer. */
++#ifndef YY_BUF_SIZE
++#define YY_BUF_SIZE 16384
++#endif
++
++#ifndef YY_TYPEDEF_YY_BUFFER_STATE
++#define YY_TYPEDEF_YY_BUFFER_STATE
++typedef struct yy_buffer_state *YY_BUFFER_STATE;
++#endif
++
++extern int zconfleng;
++
++extern FILE *zconfin, *zconfout;
++
++#define EOB_ACT_CONTINUE_SCAN 0
++#define EOB_ACT_END_OF_FILE 1
++#define EOB_ACT_LAST_MATCH 2
++
++    #define YY_LESS_LINENO(n)
++    
++/* Return all but the first "n" matched characters back to the input stream. */
++#define yyless(n) \
++	do \
++		{ \
++		/* Undo effects of setting up zconftext. */ \
++        int yyless_macro_arg = (n); \
++        YY_LESS_LINENO(yyless_macro_arg);\
++		*yy_cp = (yy_hold_char); \
++		YY_RESTORE_YY_MORE_OFFSET \
++		(yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
++		YY_DO_BEFORE_ACTION; /* set up zconftext again */ \
++		} \
++	while ( 0 )
++
++#define unput(c) yyunput( c, (yytext_ptr)  )
++
++/* The following is because we cannot portably get our hands on size_t
++ * (without autoconf's help, which isn't available because we want
++ * flex-generated scanners to compile on their own).
++ */
++
++#ifndef YY_TYPEDEF_YY_SIZE_T
++#define YY_TYPEDEF_YY_SIZE_T
++typedef unsigned int yy_size_t;
++#endif
++
++#ifndef YY_STRUCT_YY_BUFFER_STATE
++#define YY_STRUCT_YY_BUFFER_STATE
++struct yy_buffer_state
++	{
++	FILE *yy_input_file;
++
++	char *yy_ch_buf;		/* input buffer */
++	char *yy_buf_pos;		/* current position in input buffer */
++
++	/* Size of input buffer in bytes, not including room for EOB
++	 * characters.
++	 */
++	yy_size_t yy_buf_size;
++
++	/* Number of characters read into yy_ch_buf, not including EOB
++	 * characters.
++	 */
++	int yy_n_chars;
++
++	/* Whether we "own" the buffer - i.e., we know we created it,
++	 * and can realloc() it to grow it, and should free() it to
++	 * delete it.
++	 */
++	int yy_is_our_buffer;
++
++	/* Whether this is an "interactive" input source; if so, and
++	 * if we're using stdio for input, then we want to use getc()
++	 * instead of fread(), to make sure we stop fetching input after
++	 * each newline.
++	 */
++	int yy_is_interactive;
++
++	/* Whether we're considered to be at the beginning of a line.
++	 * If so, '^' rules will be active on the next match, otherwise
++	 * not.
++	 */
++	int yy_at_bol;
++
++    int yy_bs_lineno; /**< The line count. */
++    int yy_bs_column; /**< The column count. */
++    
++	/* Whether to try to fill the input buffer when we reach the
++	 * end of it.
++	 */
++	int yy_fill_buffer;
++
++	int yy_buffer_status;
++
++#define YY_BUFFER_NEW 0
++#define YY_BUFFER_NORMAL 1
++	/* When an EOF's been seen but there's still some text to process
++	 * then we mark the buffer as YY_EOF_PENDING, to indicate that we
++	 * shouldn't try reading from the input source any more.  We might
++	 * still have a bunch of tokens to match, though, because of
++	 * possible backing-up.
++	 *
++	 * When we actually see the EOF, we change the status to "new"
++	 * (via zconfrestart()), so that the user can continue scanning by
++	 * just pointing zconfin at a new input file.
++	 */
++#define YY_BUFFER_EOF_PENDING 2
++
++	};
++#endif /* !YY_STRUCT_YY_BUFFER_STATE */
++
++/* Stack of input buffers. */
++static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
++static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
++static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */
++
++/* We provide macros for accessing buffer states in case in the
++ * future we want to put the buffer states in a more general
++ * "scanner state".
++ *
++ * Returns the top of the stack, or NULL.
++ */
++#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
++                          ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
++                          : NULL)
++
++/* Same as previous macro, but useful when we know that the buffer stack is not
++ * NULL or when we need an lvalue. For internal use only.
++ */
++#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
++
++/* yy_hold_char holds the character lost when zconftext is formed. */
++static char yy_hold_char;
++static int yy_n_chars;		/* number of characters read into yy_ch_buf */
++int zconfleng;
++
++/* Points to current character in buffer. */
++static char *yy_c_buf_p = (char *) 0;
++static int yy_init = 1;		/* whether we need to initialize */
++static int yy_start = 0;	/* start state number */
++
++/* Flag which is used to allow zconfwrap()'s to do buffer switches
++ * instead of setting up a fresh zconfin.  A bit of a hack ...
++ */
++static int yy_did_buffer_switch_on_eof;
++
++void zconfrestart (FILE *input_file  );
++void zconf_switch_to_buffer (YY_BUFFER_STATE new_buffer  );
++YY_BUFFER_STATE zconf_create_buffer (FILE *file,int size  );
++void zconf_delete_buffer (YY_BUFFER_STATE b  );
++void zconf_flush_buffer (YY_BUFFER_STATE b  );
++void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer  );
++void zconfpop_buffer_state (void );
++
++static void zconfensure_buffer_stack (void );
++static void zconf_load_buffer_state (void );
++static void zconf_init_buffer (YY_BUFFER_STATE b,FILE *file  );
++
++#define YY_FLUSH_BUFFER zconf_flush_buffer(YY_CURRENT_BUFFER )
++
++YY_BUFFER_STATE zconf_scan_buffer (char *base,yy_size_t size  );
++YY_BUFFER_STATE zconf_scan_string (yyconst char *yy_str  );
++YY_BUFFER_STATE zconf_scan_bytes (yyconst char *bytes,int len  );
++
++void *zconfalloc (yy_size_t  );
++void *zconfrealloc (void *,yy_size_t  );
++void zconffree (void *  );
++
++#define yy_new_buffer zconf_create_buffer
++
++#define yy_set_interactive(is_interactive) \
++	{ \
++	if ( ! YY_CURRENT_BUFFER ){ \
++        zconfensure_buffer_stack (); \
++		YY_CURRENT_BUFFER_LVALUE =    \
++            zconf_create_buffer(zconfin,YY_BUF_SIZE ); \
++	} \
++	YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
++	}
++
++#define yy_set_bol(at_bol) \
++	{ \
++	if ( ! YY_CURRENT_BUFFER ){\
++        zconfensure_buffer_stack (); \
++		YY_CURRENT_BUFFER_LVALUE =    \
++            zconf_create_buffer(zconfin,YY_BUF_SIZE ); \
++	} \
++	YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
++	}
++
++#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
++
++/* Begin user sect3 */
++
++#define zconfwrap(n) 1
++#define YY_SKIP_YYWRAP
++
++typedef unsigned char YY_CHAR;
++
++FILE *zconfin = (FILE *) 0, *zconfout = (FILE *) 0;
++
++typedef int yy_state_type;
++
++extern int zconflineno;
++
++int zconflineno = 1;
++
++extern char *zconftext;
++#define yytext_ptr zconftext
++static yyconst flex_int16_t yy_nxt[][38] =
++    {
++    {
++        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
++        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
++        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
++        0,    0,    0,    0,    0,    0,    0,    0
++    },
++
++    {
++       11,   12,   13,   14,   12,   12,   15,   12,   12,   12,
++       12,   12,   12,   12,   12,   12,   12,   12,   12,   12,
++       12,   12,   12,   12,   12,   12,   12,   12,   12,   12,
++       12,   12,   12,   12,   12,   12,   12,   12
++    },
++
++    {
++       11,   12,   13,   14,   12,   12,   15,   12,   12,   12,
++       12,   12,   12,   12,   12,   12,   12,   12,   12,   12,
++
++       12,   12,   12,   12,   12,   12,   12,   12,   12,   12,
++       12,   12,   12,   12,   12,   12,   12,   12
++    },
++
++    {
++       11,   16,   16,   17,   16,   16,   16,   16,   16,   16,
++       16,   16,   16,   18,   16,   16,   18,   18,   19,   20,
++       21,   22,   18,   18,   23,   24,   18,   25,   18,   26,
++       27,   18,   28,   29,   30,   18,   18,   16
++    },
++
++    {
++       11,   16,   16,   17,   16,   16,   16,   16,   16,   16,
++       16,   16,   16,   18,   16,   16,   18,   18,   19,   20,
++       21,   22,   18,   18,   23,   24,   18,   25,   18,   26,
++       27,   18,   28,   29,   30,   18,   18,   16
++
++    },
++
++    {
++       11,   31,   32,   33,   31,   31,   31,   31,   31,   31,
++       31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
++       31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
++       31,   31,   31,   31,   31,   31,   31,   31
++    },
++
++    {
++       11,   31,   32,   33,   31,   31,   31,   31,   31,   31,
++       31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
++       31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
++       31,   31,   31,   31,   31,   31,   31,   31
++    },
++
++    {
++       11,   34,   34,   35,   34,   36,   34,   34,   36,   34,
++       34,   34,   34,   34,   34,   37,   34,   34,   34,   34,
++
++       34,   34,   34,   34,   34,   34,   34,   34,   34,   34,
++       34,   34,   34,   34,   34,   34,   34,   34
++    },
++
++    {
++       11,   34,   34,   35,   34,   36,   34,   34,   36,   34,
++       34,   34,   34,   34,   34,   37,   34,   34,   34,   34,
++       34,   34,   34,   34,   34,   34,   34,   34,   34,   34,
++       34,   34,   34,   34,   34,   34,   34,   34
++    },
++
++    {
++       11,   38,   38,   39,   40,   41,   42,   43,   41,   44,
++       45,   46,   47,   47,   48,   49,   47,   47,   47,   47,
++       47,   47,   47,   47,   47,   50,   47,   47,   47,   51,
++       47,   47,   47,   47,   47,   47,   47,   52
++
++    },
++
++    {
++       11,   38,   38,   39,   40,   41,   42,   43,   41,   44,
++       45,   46,   47,   47,   48,   49,   47,   47,   47,   47,
++       47,   47,   47,   47,   47,   50,   47,   47,   47,   51,
++       47,   47,   47,   47,   47,   47,   47,   52
++    },
++
++    {
++      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,
++      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,
++      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,
++      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11
++    },
++
++    {
++       11,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,
++      -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,
++
++      -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,
++      -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12
++    },
++
++    {
++       11,  -13,   53,   54,  -13,  -13,   55,  -13,  -13,  -13,
++      -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,
++      -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,
++      -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13
++    },
++
++    {
++       11,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,
++      -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,
++      -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,
++      -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14
++
++    },
++
++    {
++       11,   56,   56,   57,   56,   56,   56,   56,   56,   56,
++       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
++       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
++       56,   56,   56,   56,   56,   56,   56,   56
++    },
++
++    {
++       11,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,
++      -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,
++      -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,
++      -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16
++    },
++
++    {
++       11,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,
++      -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,
++
++      -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,
++      -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17
++    },
++
++    {
++       11,  -18,  -18,  -18,  -18,  -18,  -18,  -18,  -18,  -18,
++      -18,  -18,  -18,   58,  -18,  -18,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -18
++    },
++
++    {
++       11,  -19,  -19,  -19,  -19,  -19,  -19,  -19,  -19,  -19,
++      -19,  -19,  -19,   58,  -19,  -19,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   59,
++       58,   58,   58,   58,   58,   58,   58,  -19
++
++    },
++
++    {
++       11,  -20,  -20,  -20,  -20,  -20,  -20,  -20,  -20,  -20,
++      -20,  -20,  -20,   58,  -20,  -20,   58,   58,   58,   58,
++       58,   58,   58,   58,   60,   58,   58,   58,   58,   61,
++       58,   58,   58,   58,   58,   58,   58,  -20
++    },
++
++    {
++       11,  -21,  -21,  -21,  -21,  -21,  -21,  -21,  -21,  -21,
++      -21,  -21,  -21,   58,  -21,  -21,   58,   58,   58,   58,
++       58,   62,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -21
++    },
++
++    {
++       11,  -22,  -22,  -22,  -22,  -22,  -22,  -22,  -22,  -22,
++      -22,  -22,  -22,   58,  -22,  -22,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   63,   58,
++       58,   58,   58,   58,   58,   58,   58,  -22
++    },
++
++    {
++       11,  -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23,
++      -23,  -23,  -23,   58,  -23,  -23,   58,   58,   58,   58,
++       58,   64,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -23
++    },
++
++    {
++       11,  -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24,
++      -24,  -24,  -24,   58,  -24,  -24,   58,   58,   58,   58,
++       58,   58,   65,   58,   58,   58,   58,   58,   66,   58,
++       58,   58,   58,   58,   58,   58,   58,  -24
++
++    },
++
++    {
++       11,  -25,  -25,  -25,  -25,  -25,  -25,  -25,  -25,  -25,
++      -25,  -25,  -25,   58,  -25,  -25,   58,   67,   58,   58,
++       58,   68,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -25
++    },
++
++    {
++       11,  -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26,
++      -26,  -26,  -26,   58,  -26,  -26,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       69,   58,   58,   58,   58,   58,   58,  -26
++    },
++
++    {
++       11,  -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27,
++      -27,  -27,  -27,   58,  -27,  -27,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   70,   58,   58,   58,   58,  -27
++    },
++
++    {
++       11,  -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28,
++      -28,  -28,  -28,   58,  -28,  -28,   58,   71,   58,   58,
++       58,   72,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -28
++    },
++
++    {
++       11,  -29,  -29,  -29,  -29,  -29,  -29,  -29,  -29,  -29,
++      -29,  -29,  -29,   58,  -29,  -29,   58,   58,   58,   58,
++       58,   73,   58,   58,   58,   58,   58,   58,   58,   74,
++       58,   58,   58,   58,   75,   58,   58,  -29
++
++    },
++
++    {
++       11,  -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30,
++      -30,  -30,  -30,   58,  -30,  -30,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   76,   58,   58,   58,   58,  -30
++    },
++
++    {
++       11,   77,   77,  -31,   77,   77,   77,   77,   77,   77,
++       77,   77,   77,   77,   77,   77,   77,   77,   77,   77,
++       77,   77,   77,   77,   77,   77,   77,   77,   77,   77,
++       77,   77,   77,   77,   77,   77,   77,   77
++    },
++
++    {
++       11,  -32,   78,   79,  -32,  -32,  -32,  -32,  -32,  -32,
++      -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,
++
++      -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,
++      -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32
++    },
++
++    {
++       11,   80,  -33,  -33,   80,   80,   80,   80,   80,   80,
++       80,   80,   80,   80,   80,   80,   80,   80,   80,   80,
++       80,   80,   80,   80,   80,   80,   80,   80,   80,   80,
++       80,   80,   80,   80,   80,   80,   80,   80
++    },
++
++    {
++       11,   81,   81,   82,   81,  -34,   81,   81,  -34,   81,
++       81,   81,   81,   81,   81,  -34,   81,   81,   81,   81,
++       81,   81,   81,   81,   81,   81,   81,   81,   81,   81,
++       81,   81,   81,   81,   81,   81,   81,   81
++
++    },
++
++    {
++       11,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,
++      -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,
++      -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,
++      -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35
++    },
++
++    {
++       11,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,
++      -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,
++      -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,
++      -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36
++    },
++
++    {
++       11,   83,   83,   84,   83,   83,   83,   83,   83,   83,
++       83,   83,   83,   83,   83,   83,   83,   83,   83,   83,
++
++       83,   83,   83,   83,   83,   83,   83,   83,   83,   83,
++       83,   83,   83,   83,   83,   83,   83,   83
++    },
++
++    {
++       11,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,
++      -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,
++      -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,
++      -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38
++    },
++
++    {
++       11,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,
++      -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,
++      -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,
++      -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39
++
++    },
++
++    {
++       11,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,
++      -40,  -40,  -40,  -40,   85,  -40,  -40,  -40,  -40,  -40,
++      -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,
++      -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40
++    },
++
++    {
++       11,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,
++      -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,
++      -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,
++      -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41
++    },
++
++    {
++       11,   86,   86,  -42,   86,   86,   86,   86,   86,   86,
++       86,   86,   86,   86,   86,   86,   86,   86,   86,   86,
++
++       86,   86,   86,   86,   86,   86,   86,   86,   86,   86,
++       86,   86,   86,   86,   86,   86,   86,   86
++    },
++
++    {
++       11,  -43,  -43,  -43,  -43,  -43,  -43,   87,  -43,  -43,
++      -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,
++      -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,
++      -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43
++    },
++
++    {
++       11,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,
++      -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,
++      -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,
++      -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44
++
++    },
++
++    {
++       11,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,
++      -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,
++      -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,
++      -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45
++    },
++
++    {
++       11,  -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46,
++      -46,   88,   89,   89,  -46,  -46,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,  -46
++    },
++
++    {
++       11,  -47,  -47,  -47,  -47,  -47,  -47,  -47,  -47,  -47,
++      -47,   89,   89,   89,  -47,  -47,   89,   89,   89,   89,
++
++       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,  -47
++    },
++
++    {
++       11,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,
++      -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,
++      -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,
++      -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48
++    },
++
++    {
++       11,  -49,  -49,   90,  -49,  -49,  -49,  -49,  -49,  -49,
++      -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,
++      -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,
++      -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49
++
++    },
++
++    {
++       11,  -50,  -50,  -50,  -50,  -50,  -50,  -50,  -50,  -50,
++      -50,   89,   89,   89,  -50,  -50,   89,   89,   89,   89,
++       89,   89,   91,   89,   89,   89,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,  -50
++    },
++
++    {
++       11,  -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51,
++      -51,   89,   89,   89,  -51,  -51,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,   89,   92,   89,
++       89,   89,   89,   89,   89,   89,   89,  -51
++    },
++
++    {
++       11,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,
++      -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,
++
++      -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,
++      -52,  -52,  -52,  -52,  -52,  -52,  -52,   93
++    },
++
++    {
++       11,  -53,   53,   54,  -53,  -53,   55,  -53,  -53,  -53,
++      -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,
++      -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,
++      -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53
++    },
++
++    {
++       11,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,
++      -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,
++      -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,
++      -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54
++
++    },
++
++    {
++       11,   56,   56,   57,   56,   56,   56,   56,   56,   56,
++       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
++       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
++       56,   56,   56,   56,   56,   56,   56,   56
++    },
++
++    {
++       11,   56,   56,   57,   56,   56,   56,   56,   56,   56,
++       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
++       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
++       56,   56,   56,   56,   56,   56,   56,   56
++    },
++
++    {
++       11,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,
++      -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,
++
++      -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,
++      -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57
++    },
++
++    {
++       11,  -58,  -58,  -58,  -58,  -58,  -58,  -58,  -58,  -58,
++      -58,  -58,  -58,   58,  -58,  -58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -58
++    },
++
++    {
++       11,  -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59,
++      -59,  -59,  -59,   58,  -59,  -59,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   94,
++       58,   58,   58,   58,   58,   58,   58,  -59
++
++    },
++
++    {
++       11,  -60,  -60,  -60,  -60,  -60,  -60,  -60,  -60,  -60,
++      -60,  -60,  -60,   58,  -60,  -60,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   95,
++       58,   58,   58,   58,   58,   58,   58,  -60
++    },
++
++    {
++       11,  -61,  -61,  -61,  -61,  -61,  -61,  -61,  -61,  -61,
++      -61,  -61,  -61,   58,  -61,  -61,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   96,   97,   58,
++       58,   58,   58,   58,   58,   58,   58,  -61
++    },
++
++    {
++       11,  -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62,
++      -62,  -62,  -62,   58,  -62,  -62,   58,   58,   58,   58,
++
++       58,   58,   98,   58,   58,   58,   58,   58,   58,   58,
++       99,   58,   58,   58,   58,   58,   58,  -62
++    },
++
++    {
++       11,  -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63,
++      -63,  -63,  -63,   58,  -63,  -63,   58,  100,   58,   58,
++      101,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -63
++    },
++
++    {
++       11,  -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64,
++      -64,  -64,  -64,   58,  -64,  -64,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,  102,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,  103,  -64
++
++    },
++
++    {
++       11,  -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65,
++      -65,  -65,  -65,   58,  -65,  -65,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -65
++    },
++
++    {
++       11,  -66,  -66,  -66,  -66,  -66,  -66,  -66,  -66,  -66,
++      -66,  -66,  -66,   58,  -66,  -66,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  104,   58,   58,  -66
++    },
++
++    {
++       11,  -67,  -67,  -67,  -67,  -67,  -67,  -67,  -67,  -67,
++      -67,  -67,  -67,   58,  -67,  -67,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,  105,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -67
++    },
++
++    {
++       11,  -68,  -68,  -68,  -68,  -68,  -68,  -68,  -68,  -68,
++      -68,  -68,  -68,   58,  -68,  -68,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,  106,   58,
++       58,   58,   58,   58,   58,   58,   58,  -68
++    },
++
++    {
++       11,  -69,  -69,  -69,  -69,  -69,  -69,  -69,  -69,  -69,
++      -69,  -69,  -69,   58,  -69,  -69,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  107,   58,   58,  -69
++
++    },
++
++    {
++       11,  -70,  -70,  -70,  -70,  -70,  -70,  -70,  -70,  -70,
++      -70,  -70,  -70,   58,  -70,  -70,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,  108,
++       58,   58,   58,   58,   58,   58,   58,  -70
++    },
++
++    {
++       11,  -71,  -71,  -71,  -71,  -71,  -71,  -71,  -71,  -71,
++      -71,  -71,  -71,   58,  -71,  -71,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,  109,   58,
++       58,   58,   58,   58,   58,   58,   58,  -71
++    },
++
++    {
++       11,  -72,  -72,  -72,  -72,  -72,  -72,  -72,  -72,  -72,
++      -72,  -72,  -72,   58,  -72,  -72,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,  110,   58,   58,   58,   58,   58,  -72
++    },
++
++    {
++       11,  -73,  -73,  -73,  -73,  -73,  -73,  -73,  -73,  -73,
++      -73,  -73,  -73,   58,  -73,  -73,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,  111,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -73
++    },
++
++    {
++       11,  -74,  -74,  -74,  -74,  -74,  -74,  -74,  -74,  -74,
++      -74,  -74,  -74,   58,  -74,  -74,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  112,   58,  -74
++
++    },
++
++    {
++       11,  -75,  -75,  -75,  -75,  -75,  -75,  -75,  -75,  -75,
++      -75,  -75,  -75,   58,  -75,  -75,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,  113,   58,   58,   58,   58,  -75
++    },
++
++    {
++       11,  -76,  -76,  -76,  -76,  -76,  -76,  -76,  -76,  -76,
++      -76,  -76,  -76,   58,  -76,  -76,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  114,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -76
++    },
++
++    {
++       11,   77,   77,  -77,   77,   77,   77,   77,   77,   77,
++       77,   77,   77,   77,   77,   77,   77,   77,   77,   77,
++
++       77,   77,   77,   77,   77,   77,   77,   77,   77,   77,
++       77,   77,   77,   77,   77,   77,   77,   77
++    },
++
++    {
++       11,  -78,   78,   79,  -78,  -78,  -78,  -78,  -78,  -78,
++      -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,
++      -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,
++      -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78
++    },
++
++    {
++       11,   80,  -79,  -79,   80,   80,   80,   80,   80,   80,
++       80,   80,   80,   80,   80,   80,   80,   80,   80,   80,
++       80,   80,   80,   80,   80,   80,   80,   80,   80,   80,
++       80,   80,   80,   80,   80,   80,   80,   80
++
++    },
++
++    {
++       11,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,
++      -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,
++      -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,
++      -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80
++    },
++
++    {
++       11,   81,   81,   82,   81,  -81,   81,   81,  -81,   81,
++       81,   81,   81,   81,   81,  -81,   81,   81,   81,   81,
++       81,   81,   81,   81,   81,   81,   81,   81,   81,   81,
++       81,   81,   81,   81,   81,   81,   81,   81
++    },
++
++    {
++       11,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,
++      -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,
++
++      -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,
++      -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82
++    },
++
++    {
++       11,  -83,  -83,   84,  -83,  -83,  -83,  -83,  -83,  -83,
++      -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,
++      -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,
++      -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83
++    },
++
++    {
++       11,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,
++      -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,
++      -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,
++      -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84
++
++    },
++
++    {
++       11,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,
++      -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,
++      -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,
++      -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85
++    },
++
++    {
++       11,   86,   86,  -86,   86,   86,   86,   86,   86,   86,
++       86,   86,   86,   86,   86,   86,   86,   86,   86,   86,
++       86,   86,   86,   86,   86,   86,   86,   86,   86,   86,
++       86,   86,   86,   86,   86,   86,   86,   86
++    },
++
++    {
++       11,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,
++      -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,
++
++      -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,
++      -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87
++    },
++
++    {
++       11,  -88,  -88,  -88,  -88,  -88,  -88,  -88,  -88,  -88,
++      -88,  115,   89,   89,  -88,  -88,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,  -88
++    },
++
++    {
++       11,  -89,  -89,  -89,  -89,  -89,  -89,  -89,  -89,  -89,
++      -89,   89,   89,   89,  -89,  -89,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,  -89
++
++    },
++
++    {
++       11,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,
++      -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,
++      -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,
++      -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90
++    },
++
++    {
++       11,  -91,  -91,  -91,  -91,  -91,  -91,  -91,  -91,  -91,
++      -91,   89,   89,   89,  -91,  -91,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,  -91
++    },
++
++    {
++       11,  -92,  -92,  -92,  -92,  -92,  -92,  -92,  -92,  -92,
++      -92,   89,   89,   89,  -92,  -92,   89,   89,   89,   89,
++
++       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,  -92
++    },
++
++    {
++       11,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,
++      -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,
++      -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,
++      -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93
++    },
++
++    {
++       11,  -94,  -94,  -94,  -94,  -94,  -94,  -94,  -94,  -94,
++      -94,  -94,  -94,   58,  -94,  -94,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,  116,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -94
++
++    },
++
++    {
++       11,  -95,  -95,  -95,  -95,  -95,  -95,  -95,  -95,  -95,
++      -95,  -95,  -95,   58,  -95,  -95,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  117,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -95
++    },
++
++    {
++       11,  -96,  -96,  -96,  -96,  -96,  -96,  -96,  -96,  -96,
++      -96,  -96,  -96,   58,  -96,  -96,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  118,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -96
++    },
++
++    {
++       11,  -97,  -97,  -97,  -97,  -97,  -97,  -97,  -97,  -97,
++      -97,  -97,  -97,   58,  -97,  -97,   58,   58,   58,   58,
++
++       58,   58,  119,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -97
++    },
++
++    {
++       11,  -98,  -98,  -98,  -98,  -98,  -98,  -98,  -98,  -98,
++      -98,  -98,  -98,   58,  -98,  -98,  120,  121,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -98
++    },
++
++    {
++       11,  -99,  -99,  -99,  -99,  -99,  -99,  -99,  -99,  -99,
++      -99,  -99,  -99,   58,  -99,  -99,   58,   58,   58,   58,
++       58,  122,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  -99
++
++    },
++
++    {
++       11, -100, -100, -100, -100, -100, -100, -100, -100, -100,
++     -100, -100, -100,   58, -100, -100,   58,   58,  123,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -100
++    },
++
++    {
++       11, -101, -101, -101, -101, -101, -101, -101, -101, -101,
++     -101, -101, -101,   58, -101, -101,   58,   58,   58,  124,
++       58,   58,   58,   58,   58,  125,   58,  126,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -101
++    },
++
++    {
++       11, -102, -102, -102, -102, -102, -102, -102, -102, -102,
++     -102, -102, -102,   58, -102, -102,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++      127,   58,   58,   58,   58,   58,   58, -102
++    },
++
++    {
++       11, -103, -103, -103, -103, -103, -103, -103, -103, -103,
++     -103, -103, -103,   58, -103, -103,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -103
++    },
++
++    {
++       11, -104, -104, -104, -104, -104, -104, -104, -104, -104,
++     -104, -104, -104,   58, -104, -104,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -104
++
++    },
++
++    {
++       11, -105, -105, -105, -105, -105, -105, -105, -105, -105,
++     -105, -105, -105,   58, -105, -105,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,  128,   58,
++       58,   58,   58,   58,   58,   58,   58, -105
++    },
++
++    {
++       11, -106, -106, -106, -106, -106, -106, -106, -106, -106,
++     -106, -106, -106,   58, -106, -106,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  129,   58, -106
++    },
++
++    {
++       11, -107, -107, -107, -107, -107, -107, -107, -107, -107,
++     -107, -107, -107,   58, -107, -107,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,  130,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -107
++    },
++
++    {
++       11, -108, -108, -108, -108, -108, -108, -108, -108, -108,
++     -108, -108, -108,   58, -108, -108,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  131,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -108
++    },
++
++    {
++       11, -109, -109, -109, -109, -109, -109, -109, -109, -109,
++     -109, -109, -109,   58, -109, -109,   58,   58,   58,   58,
++       58,   58,   58,  132,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -109
++
++    },
++
++    {
++       11, -110, -110, -110, -110, -110, -110, -110, -110, -110,
++     -110, -110, -110,   58, -110, -110,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  133,   58, -110
++    },
++
++    {
++       11, -111, -111, -111, -111, -111, -111, -111, -111, -111,
++     -111, -111, -111,   58, -111, -111,   58,   58,   58,   58,
++       58,  134,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -111
++    },
++
++    {
++       11, -112, -112, -112, -112, -112, -112, -112, -112, -112,
++     -112, -112, -112,   58, -112, -112,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,  135,   58,   58,   58,   58, -112
++    },
++
++    {
++       11, -113, -113, -113, -113, -113, -113, -113, -113, -113,
++     -113, -113, -113,   58, -113, -113,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  136,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -113
++    },
++
++    {
++       11, -114, -114, -114, -114, -114, -114, -114, -114, -114,
++     -114, -114, -114,   58, -114, -114,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,  137,   58,   58,   58, -114
++
++    },
++
++    {
++       11, -115, -115, -115, -115, -115, -115, -115, -115, -115,
++     -115,   89,   89,   89, -115, -115,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
++       89,   89,   89,   89,   89,   89,   89, -115
++    },
++
++    {
++       11, -116, -116, -116, -116, -116, -116, -116, -116, -116,
++     -116, -116, -116,   58, -116, -116,   58,   58,   58,   58,
++       58,  138,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -116
++    },
++
++    {
++       11, -117, -117, -117, -117, -117, -117, -117, -117, -117,
++     -117, -117, -117,   58, -117, -117,   58,   58,   58,  139,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -117
++    },
++
++    {
++       11, -118, -118, -118, -118, -118, -118, -118, -118, -118,
++     -118, -118, -118,   58, -118, -118,   58,   58,   58,   58,
++       58,  140,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -118
++    },
++
++    {
++       11, -119, -119, -119, -119, -119, -119, -119, -119, -119,
++     -119, -119, -119,   58, -119, -119,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  141,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -119
++
++    },
++
++    {
++       11, -120, -120, -120, -120, -120, -120, -120, -120, -120,
++     -120, -120, -120,   58, -120, -120,   58,   58,  142,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  143,   58,   58, -120
++    },
++
++    {
++       11, -121, -121, -121, -121, -121, -121, -121, -121, -121,
++     -121, -121, -121,   58, -121, -121,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  144,   58, -121
++    },
++
++    {
++       11, -122, -122, -122, -122, -122, -122, -122, -122, -122,
++     -122, -122, -122,   58, -122, -122,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,  145,   58,
++       58,   58,   58,   58,   58,   58,   58, -122
++    },
++
++    {
++       11, -123, -123, -123, -123, -123, -123, -123, -123, -123,
++     -123, -123, -123,   58, -123, -123,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,  146,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -123
++    },
++
++    {
++       11, -124, -124, -124, -124, -124, -124, -124, -124, -124,
++     -124, -124, -124,   58, -124, -124,   58,   58,   58,   58,
++       58,   58,   58,   58,  147,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -124
++
++    },
++
++    {
++       11, -125, -125, -125, -125, -125, -125, -125, -125, -125,
++     -125, -125, -125,   58, -125, -125,   58,   58,   58,   58,
++       58,   58,  148,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -125
++    },
++
++    {
++       11, -126, -126, -126, -126, -126, -126, -126, -126, -126,
++     -126, -126, -126,   58, -126, -126,   58,   58,   58,   58,
++       58,  149,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -126
++    },
++
++    {
++       11, -127, -127, -127, -127, -127, -127, -127, -127, -127,
++     -127, -127, -127,   58, -127, -127,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -127
++    },
++
++    {
++       11, -128, -128, -128, -128, -128, -128, -128, -128, -128,
++     -128, -128, -128,   58, -128, -128,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,  150,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -128
++    },
++
++    {
++       11, -129, -129, -129, -129, -129, -129, -129, -129, -129,
++     -129, -129, -129,   58, -129, -129,   58,   58,   58,  151,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -129
++
++    },
++
++    {
++       11, -130, -130, -130, -130, -130, -130, -130, -130, -130,
++     -130, -130, -130,   58, -130, -130,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,  152,
++       58,   58,   58,   58,   58,   58,   58, -130
++    },
++
++    {
++       11, -131, -131, -131, -131, -131, -131, -131, -131, -131,
++     -131, -131, -131,   58, -131, -131,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++      153,   58,   58,   58,   58,   58,   58, -131
++    },
++
++    {
++       11, -132, -132, -132, -132, -132, -132, -132, -132, -132,
++     -132, -132, -132,   58, -132, -132,   58,   58,   58,   58,
++
++       58,  154,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -132
++    },
++
++    {
++       11, -133, -133, -133, -133, -133, -133, -133, -133, -133,
++     -133, -133, -133,   58, -133, -133,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  155,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -133
++    },
++
++    {
++       11, -134, -134, -134, -134, -134, -134, -134, -134, -134,
++     -134, -134, -134,   58, -134, -134,   58,   58,   58,  156,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -134
++
++    },
++
++    {
++       11, -135, -135, -135, -135, -135, -135, -135, -135, -135,
++     -135, -135, -135,   58, -135, -135,   58,   58,   58,  157,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -135
++    },
++
++    {
++       11, -136, -136, -136, -136, -136, -136, -136, -136, -136,
++     -136, -136, -136,   58, -136, -136,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,  158,   58,
++       58,   58,   58,   58,   58,   58,   58, -136
++    },
++
++    {
++       11, -137, -137, -137, -137, -137, -137, -137, -137, -137,
++     -137, -137, -137,   58, -137, -137,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  159,   58,   58, -137
++    },
++
++    {
++       11, -138, -138, -138, -138, -138, -138, -138, -138, -138,
++     -138, -138, -138,   58, -138, -138,   58,  160,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -138
++    },
++
++    {
++       11, -139, -139, -139, -139, -139, -139, -139, -139, -139,
++     -139, -139, -139,   58, -139, -139,   58,   58,   58,   58,
++       58,  161,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -139
++
++    },
++
++    {
++       11, -140, -140, -140, -140, -140, -140, -140, -140, -140,
++     -140, -140, -140,   58, -140, -140,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,  162,   58,
++       58,   58,   58,   58,   58,   58,   58, -140
++    },
++
++    {
++       11, -141, -141, -141, -141, -141, -141, -141, -141, -141,
++     -141, -141, -141,   58, -141, -141,   58,   58,   58,   58,
++       58,   58,   58,  163,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -141
++    },
++
++    {
++       11, -142, -142, -142, -142, -142, -142, -142, -142, -142,
++     -142, -142, -142,   58, -142, -142,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,  164,
++       58,   58,   58,   58,   58,   58,   58, -142
++    },
++
++    {
++       11, -143, -143, -143, -143, -143, -143, -143, -143, -143,
++     -143, -143, -143,   58, -143, -143,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,  165,   58,   58,   58,   58, -143
++    },
++
++    {
++       11, -144, -144, -144, -144, -144, -144, -144, -144, -144,
++     -144, -144, -144,   58, -144, -144,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,  166,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -144
++
++    },
++
++    {
++       11, -145, -145, -145, -145, -145, -145, -145, -145, -145,
++     -145, -145, -145,   58, -145, -145,   58,   58,   58,   58,
++      167,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -145
++    },
++
++    {
++       11, -146, -146, -146, -146, -146, -146, -146, -146, -146,
++     -146, -146, -146,   58, -146, -146,   58,   58,   58,   58,
++       58,  168,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -146
++    },
++
++    {
++       11, -147, -147, -147, -147, -147, -147, -147, -147, -147,
++     -147, -147, -147,   58, -147, -147,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,  169,
++       58,   58,   58,   58,   58,   58,   58, -147
++    },
++
++    {
++       11, -148, -148, -148, -148, -148, -148, -148, -148, -148,
++     -148, -148, -148,   58, -148, -148,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -148
++    },
++
++    {
++       11, -149, -149, -149, -149, -149, -149, -149, -149, -149,
++     -149, -149, -149,   58, -149, -149,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,  170,   58,
++       58,   58,   58,   58,   58,   58,   58, -149
++
++    },
++
++    {
++       11, -150, -150, -150, -150, -150, -150, -150, -150, -150,
++     -150, -150, -150,   58, -150, -150,   58,   58,   58,   58,
++       58,  171,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -150
++    },
++
++    {
++       11, -151, -151, -151, -151, -151, -151, -151, -151, -151,
++     -151, -151, -151,   58, -151, -151,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,  172,
++       58,   58,   58,   58,   58,   58,   58, -151
++    },
++
++    {
++       11, -152, -152, -152, -152, -152, -152, -152, -152, -152,
++     -152, -152, -152,   58, -152, -152,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,  173,   58,
++       58,   58,   58,   58,   58,   58,   58, -152
++    },
++
++    {
++       11, -153, -153, -153, -153, -153, -153, -153, -153, -153,
++     -153, -153, -153,   58, -153, -153,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  174,   58,   58, -153
++    },
++
++    {
++       11, -154, -154, -154, -154, -154, -154, -154, -154, -154,
++     -154, -154, -154,   58, -154, -154,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -154
++
++    },
++
++    {
++       11, -155, -155, -155, -155, -155, -155, -155, -155, -155,
++     -155, -155, -155,   58, -155, -155,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,  175,   58,   58,   58,   58, -155
++    },
++
++    {
++       11, -156, -156, -156, -156, -156, -156, -156, -156, -156,
++     -156, -156, -156,   58, -156, -156,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  176,   58,   58, -156
++    },
++
++    {
++       11, -157, -157, -157, -157, -157, -157, -157, -157, -157,
++     -157, -157, -157,   58, -157, -157,   58,   58,   58,   58,
++
++       58,  177,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -157
++    },
++
++    {
++       11, -158, -158, -158, -158, -158, -158, -158, -158, -158,
++     -158, -158, -158,   58, -158, -158,   58,   58,   58,   58,
++       58,   58,   58,  178,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -158
++    },
++
++    {
++       11, -159, -159, -159, -159, -159, -159, -159, -159, -159,
++     -159, -159, -159,   58, -159, -159,   58,  179,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -159
++
++    },
++
++    {
++       11, -160, -160, -160, -160, -160, -160, -160, -160, -160,
++     -160, -160, -160,   58, -160, -160,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,  180,   58,
++       58,   58,   58,   58,   58,   58,   58, -160
++    },
++
++    {
++       11, -161, -161, -161, -161, -161, -161, -161, -161, -161,
++     -161, -161, -161,   58, -161, -161,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -161
++    },
++
++    {
++       11, -162, -162, -162, -162, -162, -162, -162, -162, -162,
++     -162, -162, -162,   58, -162, -162,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  181,   58,   58, -162
++    },
++
++    {
++       11, -163, -163, -163, -163, -163, -163, -163, -163, -163,
++     -163, -163, -163,   58, -163, -163,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -163
++    },
++
++    {
++       11, -164, -164, -164, -164, -164, -164, -164, -164, -164,
++     -164, -164, -164,   58, -164, -164,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,  182,
++       58,   58,   58,   58,   58,   58,   58, -164
++
++    },
++
++    {
++       11, -165, -165, -165, -165, -165, -165, -165, -165, -165,
++     -165, -165, -165,   58, -165, -165,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  183,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -165
++    },
++
++    {
++       11, -166, -166, -166, -166, -166, -166, -166, -166, -166,
++     -166, -166, -166,   58, -166, -166,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  184,   58,   58, -166
++    },
++
++    {
++       11, -167, -167, -167, -167, -167, -167, -167, -167, -167,
++     -167, -167, -167,   58, -167, -167,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,  185,   58,   58,   58, -167
++    },
++
++    {
++       11, -168, -168, -168, -168, -168, -168, -168, -168, -168,
++     -168, -168, -168,   58, -168, -168,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -168
++    },
++
++    {
++       11, -169, -169, -169, -169, -169, -169, -169, -169, -169,
++     -169, -169, -169,   58, -169, -169,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  186,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -169
++
++    },
++
++    {
++       11, -170, -170, -170, -170, -170, -170, -170, -170, -170,
++     -170, -170, -170,   58, -170, -170,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  187,   58, -170
++    },
++
++    {
++       11, -171, -171, -171, -171, -171, -171, -171, -171, -171,
++     -171, -171, -171,   58, -171, -171,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,  188,   58,
++       58,   58,   58,   58,   58,   58,   58, -171
++    },
++
++    {
++       11, -172, -172, -172, -172, -172, -172, -172, -172, -172,
++     -172, -172, -172,   58, -172, -172,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,  189,   58,
++       58,   58,   58,   58,   58,   58,   58, -172
++    },
++
++    {
++       11, -173, -173, -173, -173, -173, -173, -173, -173, -173,
++     -173, -173, -173,   58, -173, -173,   58,  190,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -173
++    },
++
++    {
++       11, -174, -174, -174, -174, -174, -174, -174, -174, -174,
++     -174, -174, -174,   58, -174, -174,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -174
++
++    },
++
++    {
++       11, -175, -175, -175, -175, -175, -175, -175, -175, -175,
++     -175, -175, -175,   58, -175, -175,   58,   58,   58,   58,
++       58,  191,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -175
++    },
++
++    {
++       11, -176, -176, -176, -176, -176, -176, -176, -176, -176,
++     -176, -176, -176,   58, -176, -176,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -176
++    },
++
++    {
++       11, -177, -177, -177, -177, -177, -177, -177, -177, -177,
++     -177, -177, -177,   58, -177, -177,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -177
++    },
++
++    {
++       11, -178, -178, -178, -178, -178, -178, -178, -178, -178,
++     -178, -178, -178,   58, -178, -178,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -178
++    },
++
++    {
++       11, -179, -179, -179, -179, -179, -179, -179, -179, -179,
++     -179, -179, -179,   58, -179, -179,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  192,   58,   58, -179
++
++    },
++
++    {
++       11, -180, -180, -180, -180, -180, -180, -180, -180, -180,
++     -180, -180, -180,   58, -180, -180,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -180
++    },
++
++    {
++       11, -181, -181, -181, -181, -181, -181, -181, -181, -181,
++     -181, -181, -181,   58, -181, -181,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -181
++    },
++
++    {
++       11, -182, -182, -182, -182, -182, -182, -182, -182, -182,
++     -182, -182, -182,   58, -182, -182,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,  193,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -182
++    },
++
++    {
++       11, -183, -183, -183, -183, -183, -183, -183, -183, -183,
++     -183, -183, -183,   58, -183, -183,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,  194,   58,   58,   58, -183
++    },
++
++    {
++       11, -184, -184, -184, -184, -184, -184, -184, -184, -184,
++     -184, -184, -184,   58, -184, -184,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -184
++
++    },
++
++    {
++       11, -185, -185, -185, -185, -185, -185, -185, -185, -185,
++     -185, -185, -185,   58, -185, -185,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -185
++    },
++
++    {
++       11, -186, -186, -186, -186, -186, -186, -186, -186, -186,
++     -186, -186, -186,   58, -186, -186,   58,   58,   58,  195,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -186
++    },
++
++    {
++       11, -187, -187, -187, -187, -187, -187, -187, -187, -187,
++     -187, -187, -187,   58, -187, -187,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -187
++    },
++
++    {
++       11, -188, -188, -188, -188, -188, -188, -188, -188, -188,
++     -188, -188, -188,   58, -188, -188,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,  196,   58, -188
++    },
++
++    {
++       11, -189, -189, -189, -189, -189, -189, -189, -189, -189,
++     -189, -189, -189,   58, -189, -189,   58,   58,   58,   58,
++       58,   58,  197,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -189
++
++    },
++
++    {
++       11, -190, -190, -190, -190, -190, -190, -190, -190, -190,
++     -190, -190, -190,   58, -190, -190,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,  198,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -190
++    },
++
++    {
++       11, -191, -191, -191, -191, -191, -191, -191, -191, -191,
++     -191, -191, -191,   58, -191, -191,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,  199,   58,   58,   58, -191
++    },
++
++    {
++       11, -192, -192, -192, -192, -192, -192, -192, -192, -192,
++     -192, -192, -192,   58, -192, -192,   58,   58,   58,   58,
++
++       58,  200,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -192
++    },
++
++    {
++       11, -193, -193, -193, -193, -193, -193, -193, -193, -193,
++     -193, -193, -193,   58, -193, -193,   58,   58,   58,   58,
++       58,  201,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -193
++    },
++
++    {
++       11, -194, -194, -194, -194, -194, -194, -194, -194, -194,
++     -194, -194, -194,   58, -194, -194,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  202,   58,   58, -194
++
++    },
++
++    {
++       11, -195, -195, -195, -195, -195, -195, -195, -195, -195,
++     -195, -195, -195,   58, -195, -195,   58,   58,   58,   58,
++       58,  203,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -195
++    },
++
++    {
++       11, -196, -196, -196, -196, -196, -196, -196, -196, -196,
++     -196, -196, -196,   58, -196, -196,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -196
++    },
++
++    {
++       11, -197, -197, -197, -197, -197, -197, -197, -197, -197,
++     -197, -197, -197,   58, -197, -197,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,  204,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -197
++    },
++
++    {
++       11, -198, -198, -198, -198, -198, -198, -198, -198, -198,
++     -198, -198, -198,   58, -198, -198,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -198
++    },
++
++    {
++       11, -199, -199, -199, -199, -199, -199, -199, -199, -199,
++     -199, -199, -199,   58, -199, -199,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -199
++
++    },
++
++    {
++       11, -200, -200, -200, -200, -200, -200, -200, -200, -200,
++     -200, -200, -200,   58, -200, -200,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -200
++    },
++
++    {
++       11, -201, -201, -201, -201, -201, -201, -201, -201, -201,
++     -201, -201, -201,   58, -201, -201,   58,  205,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -201
++    },
++
++    {
++       11, -202, -202, -202, -202, -202, -202, -202, -202, -202,
++     -202, -202, -202,   58, -202, -202,   58,  206,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -202
++    },
++
++    {
++       11, -203, -203, -203, -203, -203, -203, -203, -203, -203,
++     -203, -203, -203,   58, -203, -203,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -203
++    },
++
++    {
++       11, -204, -204, -204, -204, -204, -204, -204, -204, -204,
++     -204, -204, -204,   58, -204, -204,   58,   58,   58,   58,
++       58,   58,   58,  207,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -204
++
++    },
++
++    {
++       11, -205, -205, -205, -205, -205, -205, -205, -205, -205,
++     -205, -205, -205,   58, -205, -205,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,  208,   58,
++       58,   58,   58,   58,   58,   58,   58, -205
++    },
++
++    {
++       11, -206, -206, -206, -206, -206, -206, -206, -206, -206,
++     -206, -206, -206,   58, -206, -206,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,  209,   58,   58, -206
++    },
++
++    {
++       11, -207, -207, -207, -207, -207, -207, -207, -207, -207,
++     -207, -207, -207,   58, -207, -207,   58,   58,   58,   58,
++
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -207
++    },
++
++    {
++       11, -208, -208, -208, -208, -208, -208, -208, -208, -208,
++     -208, -208, -208,   58, -208, -208,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -208
++    },
++
++    {
++       11, -209, -209, -209, -209, -209, -209, -209, -209, -209,
++     -209, -209, -209,   58, -209, -209,   58,   58,   58,   58,
++       58,  210,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -209
++
++    },
++
++    {
++       11, -210, -210, -210, -210, -210, -210, -210, -210, -210,
++     -210, -210, -210,   58, -210, -210,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
++       58,   58,   58,   58,   58,   58,   58, -210
++    },
++
++    } ;
++
++static yy_state_type yy_get_previous_state (void );
++static yy_state_type yy_try_NUL_trans (yy_state_type current_state  );
++static int yy_get_next_buffer (void );
++static void yy_fatal_error (yyconst char msg[]  );
++
++/* Done after the current pattern has been matched and before the
++ * corresponding action - sets up zconftext.
++ */
++#define YY_DO_BEFORE_ACTION \
++	(yytext_ptr) = yy_bp; \
++	zconfleng = (size_t) (yy_cp - yy_bp); \
++	(yy_hold_char) = *yy_cp; \
++	*yy_cp = '\0'; \
++	(yy_c_buf_p) = yy_cp;
++
++#define YY_NUM_RULES 64
++#define YY_END_OF_BUFFER 65
++/* This struct is not used in this scanner,
++   but its presence is necessary. */
++struct yy_trans_info
++	{
++	flex_int32_t yy_verify;
++	flex_int32_t yy_nxt;
++	};
++static yyconst flex_int16_t yy_accept[211] =
++    {   0,
++        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
++       65,    5,    4,    3,    2,   36,   37,   35,   35,   35,
++       35,   35,   35,   35,   35,   35,   35,   35,   35,   35,
++       63,   60,   62,   55,   59,   58,   57,   53,   48,   42,
++       47,   51,   53,   40,   41,   50,   50,   43,   53,   50,
++       50,   53,    4,    3,    2,    2,    1,   35,   35,   35,
++       35,   35,   35,   35,   16,   35,   35,   35,   35,   35,
++       35,   35,   35,   35,   35,   35,   63,   60,   62,   61,
++       55,   54,   57,   56,   44,   51,   38,   50,   50,   52,
++       45,   46,   39,   35,   35,   35,   35,   35,   35,   35,
++
++       35,   35,   30,   29,   35,   35,   35,   35,   35,   35,
++       35,   35,   35,   35,   49,   25,   35,   35,   35,   35,
++       35,   35,   35,   35,   35,   35,   15,   35,    7,   35,
++       35,   35,   35,   35,   35,   35,   35,   35,   35,   35,
++       35,   35,   35,   35,   35,   35,   35,   17,   35,   35,
++       35,   35,   35,   34,   35,   35,   35,   35,   35,   35,
++       10,   35,   13,   35,   35,   35,   35,   33,   35,   35,
++       35,   35,   35,   22,   35,   32,    9,   31,   35,   26,
++       12,   35,   35,   21,   18,   35,    8,   35,   35,   35,
++       35,   35,   27,   35,   35,    6,   35,   20,   19,   23,
++
++       35,   35,   11,   35,   35,   35,   14,   28,   35,   24
++    } ;
++
++static yyconst flex_int32_t yy_ec[256] =
++    {   0,
++        1,    1,    1,    1,    1,    1,    1,    1,    2,    3,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    2,    4,    5,    6,    1,    1,    7,    8,    9,
++       10,    1,    1,    1,   11,   12,   12,   13,   13,   13,
++       13,   13,   13,   13,   13,   13,   13,    1,    1,    1,
++       14,    1,    1,    1,   13,   13,   13,   13,   13,   13,
++       13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
++       13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
++        1,   15,    1,    1,   16,    1,   17,   18,   19,   20,
++
++       21,   22,   23,   24,   25,   13,   13,   26,   27,   28,
++       29,   30,   31,   32,   33,   34,   35,   13,   13,   36,
++       13,   13,    1,   37,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
++        1,    1,    1,    1,    1
++    } ;
++
++extern int zconf_flex_debug;
++int zconf_flex_debug = 0;
++
++/* The intent behind this definition is that it'll catch
++ * any uses of REJECT which flex missed.
++ */
++#define REJECT reject_used_but_not_detected
++#define yymore() yymore_used_but_not_detected
++#define YY_MORE_ADJ 0
++#define YY_RESTORE_YY_MORE_OFFSET
++char *zconftext;
++
++/*
++ * Copyright (C) 2002 Roman Zippel <zippel at linux-m68k.org>
++ * Released under the terms of the GNU GPL v2.0.
++ */
++
++#include <limits.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <unistd.h>
++
++#define LKC_DIRECT_LINK
++#include "lkc.h"
++
++#define START_STRSIZE	16
++
++char *text;
++static char *text_ptr;
++static int text_size, text_asize;
++
++struct buffer {
++        struct buffer *parent;
++        YY_BUFFER_STATE state;
++};
++
++struct buffer *current_buf;
++
++static int last_ts, first_ts;
++
++static void zconf_endhelp(void);
++static struct buffer *zconf_endfile(void);
++
++void new_string(void)
++{
++	text = malloc(START_STRSIZE);
++	text_asize = START_STRSIZE;
++	text_ptr = text;
++	text_size = 0;
++	*text_ptr = 0;
++}
++
++void append_string(const char *str, int size)
++{
++	int new_size = text_size + size + 1;
++	if (new_size > text_asize) {
++		text = realloc(text, new_size);
++		text_asize = new_size;
++		text_ptr = text + text_size;
++	}
++	memcpy(text_ptr, str, size);
++	text_ptr += size;
++	text_size += size;
++	*text_ptr = 0;
++}
++
++void alloc_string(const char *str, int size)
++{
++	text = malloc(size + 1);
++	memcpy(text, str, size);
++	text[size] = 0;
++}
++
++#define INITIAL 0
++#define COMMAND 1
++#define HELP 2
++#define STRING 3
++#define PARAM 4
++
++/* Special case for "unistd.h", since it is non-ANSI. We include it way
++ * down here because we want the user's section 1 to have been scanned first.
++ * The user has a chance to override it with an option.
++ */
++#include <unistd.h>
++
++#ifndef YY_EXTRA_TYPE
++#define YY_EXTRA_TYPE void *
++#endif
++
++/* Macros after this point can all be overridden by user definitions in
++ * section 1.
++ */
++
++#ifndef YY_SKIP_YYWRAP
++#ifdef __cplusplus
++extern "C" int zconfwrap (void );
++#else
++extern int zconfwrap (void );
++#endif
++#endif
++
++    static void yyunput (int c,char *buf_ptr  );
++    
++#ifndef yytext_ptr
++static void yy_flex_strncpy (char *,yyconst char *,int );
++#endif
++
++#ifdef YY_NEED_STRLEN
++static int yy_flex_strlen (yyconst char * );
++#endif
++
++#ifndef YY_NO_INPUT
++
++#ifdef __cplusplus
++static int yyinput (void );
++#else
++static int input (void );
++#endif
++
++#endif
++
++/* Amount of stuff to slurp up with each read. */
++#ifndef YY_READ_BUF_SIZE
++#define YY_READ_BUF_SIZE 8192
++#endif
++
++/* Copy whatever the last rule matched to the standard output. */
++#ifndef ECHO
++/* This used to be an fputs(), but since the string might contain NUL's,
++ * we now use fwrite().
++ */
++#define ECHO (void) fwrite( zconftext, zconfleng, 1, zconfout )
++#endif
++
++/* Gets input and stuffs it into "buf".  number of characters read, or YY_NULL,
++ * is returned in "result".
++ */
++#ifndef YY_INPUT
++#define YY_INPUT(buf,result,max_size) \
++	errno=0; \
++	while ( (result = read( fileno(zconfin), (char *) buf, max_size )) < 0 ) \
++	{ \
++		if( errno != EINTR) \
++		{ \
++			YY_FATAL_ERROR( "input in flex scanner failed" ); \
++			break; \
++		} \
++		errno=0; \
++		clearerr(zconfin); \
++	}\
++\
++
++#endif
++
++/* No semi-colon after return; correct usage is to write "yyterminate();" -
++ * we don't want an extra ';' after the "return" because that will cause
++ * some compilers to complain about unreachable statements.
++ */
++#ifndef yyterminate
++#define yyterminate() return YY_NULL
++#endif
++
++/* Number of entries by which start-condition stack grows. */
++#ifndef YY_START_STACK_INCR
++#define YY_START_STACK_INCR 25
++#endif
++
++/* Report a fatal error. */
++#ifndef YY_FATAL_ERROR
++#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
++#endif
++
++/* end tables serialization structures and prototypes */
++
++/* Default declaration of generated scanner - a define so the user can
++ * easily add parameters.
++ */
++#ifndef YY_DECL
++#define YY_DECL_IS_OURS 1
++
++extern int zconflex (void);
++
++#define YY_DECL int zconflex (void)
++#endif /* !YY_DECL */
++
++/* Code executed at the beginning of each rule, after zconftext and zconfleng
++ * have been set up.
++ */
++#ifndef YY_USER_ACTION
++#define YY_USER_ACTION
++#endif
++
++/* Code executed at the end of each rule. */
++#ifndef YY_BREAK
++#define YY_BREAK break;
++#endif
++
++#define YY_RULE_SETUP \
++	YY_USER_ACTION
++
++/** The main scanner function which does all the work.
++ */
++YY_DECL
++{
++	register yy_state_type yy_current_state;
++	register char *yy_cp, *yy_bp;
++	register int yy_act;
++    
++	int str = 0;
++	int ts, i;
++
++	if ( (yy_init) )
++		{
++		(yy_init) = 0;
++
++#ifdef YY_USER_INIT
++		YY_USER_INIT;
++#endif
++
++		if ( ! (yy_start) )
++			(yy_start) = 1;	/* first start state */
++
++		if ( ! zconfin )
++			zconfin = stdin;
++
++		if ( ! zconfout )
++			zconfout = stdout;
++
++		if ( ! YY_CURRENT_BUFFER ) {
++			zconfensure_buffer_stack ();
++			YY_CURRENT_BUFFER_LVALUE =
++				zconf_create_buffer(zconfin,YY_BUF_SIZE );
++		}
++
++		zconf_load_buffer_state( );
++		}
++
++	while ( 1 )		/* loops until end-of-file is reached */
++		{
++		yy_cp = (yy_c_buf_p);
++
++		/* Support of zconftext. */
++		*yy_cp = (yy_hold_char);
++
++		/* yy_bp points to the position in yy_ch_buf of the start of
++		 * the current run.
++		 */
++		yy_bp = yy_cp;
++
++		yy_current_state = (yy_start);
++yy_match:
++		while ( (yy_current_state = yy_nxt[yy_current_state][ yy_ec[YY_SC_TO_UI(*yy_cp)]  ]) > 0 )
++			++yy_cp;
++
++		yy_current_state = -yy_current_state;
++
++yy_find_action:
++		yy_act = yy_accept[yy_current_state];
++
++		YY_DO_BEFORE_ACTION;
++
++do_action:	/* This label is used only to access EOF actions. */
++
++		switch ( yy_act )
++	{ /* beginning of action switch */
++case 1:
++/* rule 1 can match eol */
++YY_RULE_SETUP
++current_file->lineno++;
++	YY_BREAK
++case 2:
++YY_RULE_SETUP
++
++	YY_BREAK
++case 3:
++/* rule 3 can match eol */
++YY_RULE_SETUP
++current_file->lineno++; return T_EOL;
++	YY_BREAK
++case 4:
++YY_RULE_SETUP
++{
++	BEGIN(COMMAND);
++}
++	YY_BREAK
++case 5:
++YY_RULE_SETUP
++{
++	unput(zconftext[0]);
++	BEGIN(COMMAND);
++}
++	YY_BREAK
++
++case 6:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_MAINMENU;
++	YY_BREAK
++case 7:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_MENU;
++	YY_BREAK
++case 8:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_ENDMENU;
++	YY_BREAK
++case 9:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_SOURCE;
++	YY_BREAK
++case 10:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_CHOICE;
++	YY_BREAK
++case 11:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_ENDCHOICE;
++	YY_BREAK
++case 12:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_COMMENT;
++	YY_BREAK
++case 13:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_CONFIG;
++	YY_BREAK
++case 14:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_MENUCONFIG;
++	YY_BREAK
++case 15:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_HELP;
++	YY_BREAK
++case 16:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_IF;
++	YY_BREAK
++case 17:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_ENDIF;
++	YY_BREAK
++case 18:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_DEPENDS;
++	YY_BREAK
++case 19:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_REQUIRES;
++	YY_BREAK
++case 20:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_OPTIONAL;
++	YY_BREAK
++case 21:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_DEFAULT;
++	YY_BREAK
++case 22:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_PROMPT;
++	YY_BREAK
++case 23:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_TRISTATE;
++	YY_BREAK
++case 24:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_DEF_TRISTATE;
++	YY_BREAK
++case 25:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_BOOLEAN;
++	YY_BREAK
++case 26:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_BOOLEAN;
++	YY_BREAK
++case 27:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_DEF_BOOLEAN;
++	YY_BREAK
++case 28:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_DEF_BOOLEAN;
++	YY_BREAK
++case 29:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_INT;
++	YY_BREAK
++case 30:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_HEX;
++	YY_BREAK
++case 31:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_STRING;
++	YY_BREAK
++case 32:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_SELECT;
++	YY_BREAK
++case 33:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_SELECT;
++	YY_BREAK
++case 34:
++YY_RULE_SETUP
++BEGIN(PARAM); return T_RANGE;
++	YY_BREAK
++case 35:
++YY_RULE_SETUP
++{
++		alloc_string(zconftext, zconfleng);
++		zconflval.string = text;
++		return T_WORD;
++	}
++	YY_BREAK
++case 36:
++YY_RULE_SETUP
++
++	YY_BREAK
++case 37:
++/* rule 37 can match eol */
++YY_RULE_SETUP
++current_file->lineno++; BEGIN(INITIAL);
++	YY_BREAK
++
++case 38:
++YY_RULE_SETUP
++return T_AND;
++	YY_BREAK
++case 39:
++YY_RULE_SETUP
++return T_OR;
++	YY_BREAK
++case 40:
++YY_RULE_SETUP
++return T_OPEN_PAREN;
++	YY_BREAK
++case 41:
++YY_RULE_SETUP
++return T_CLOSE_PAREN;
++	YY_BREAK
++case 42:
++YY_RULE_SETUP
++return T_NOT;
++	YY_BREAK
++case 43:
++YY_RULE_SETUP
++return T_EQUAL;
++	YY_BREAK
++case 44:
++YY_RULE_SETUP
++return T_UNEQUAL;
++	YY_BREAK
++case 45:
++YY_RULE_SETUP
++return T_IF;
++	YY_BREAK
++case 46:
++YY_RULE_SETUP
++return T_ON;
++	YY_BREAK
++case 47:
++YY_RULE_SETUP
++{
++		str = zconftext[0];
++		new_string();
++		BEGIN(STRING);
++	}
++	YY_BREAK
++case 48:
++/* rule 48 can match eol */
++YY_RULE_SETUP
++BEGIN(INITIAL); current_file->lineno++; return T_EOL;
++	YY_BREAK
++case 49:
++YY_RULE_SETUP
++/* ignore */
++	YY_BREAK
++case 50:
++YY_RULE_SETUP
++{
++		alloc_string(zconftext, zconfleng);
++		zconflval.string = text;
++		return T_WORD;
++	}
++	YY_BREAK
++case 51:
++YY_RULE_SETUP
++/* comment */
++	YY_BREAK
++case 52:
++/* rule 52 can match eol */
++YY_RULE_SETUP
++current_file->lineno++;
++	YY_BREAK
++case 53:
++YY_RULE_SETUP
++
++	YY_BREAK
++case YY_STATE_EOF(PARAM):
++{
++		BEGIN(INITIAL);
++	}
++	YY_BREAK
++
++case 54:
++/* rule 54 can match eol */
++*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
++(yy_c_buf_p) = yy_cp -= 1;
++YY_DO_BEFORE_ACTION; /* set up zconftext again */
++YY_RULE_SETUP
++{
++		append_string(zconftext, zconfleng);
++		zconflval.string = text;
++		return T_WORD_QUOTE;
++	}
++	YY_BREAK
++case 55:
++YY_RULE_SETUP
++{
++		append_string(zconftext, zconfleng);
++	}
++	YY_BREAK
++case 56:
++/* rule 56 can match eol */
++*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
++(yy_c_buf_p) = yy_cp -= 1;
++YY_DO_BEFORE_ACTION; /* set up zconftext again */
++YY_RULE_SETUP
++{
++		append_string(zconftext + 1, zconfleng - 1);
++		zconflval.string = text;
++		return T_WORD_QUOTE;
++	}
++	YY_BREAK
++case 57:
++YY_RULE_SETUP
++{
++		append_string(zconftext + 1, zconfleng - 1);
++	}
++	YY_BREAK
++case 58:
++YY_RULE_SETUP
++{
++		if (str == zconftext[0]) {
++			BEGIN(PARAM);
++			zconflval.string = text;
++			return T_WORD_QUOTE;
++		} else
++			append_string(zconftext, 1);
++	}
++	YY_BREAK
++case 59:
++/* rule 59 can match eol */
++YY_RULE_SETUP
++{
++		printf("%s:%d:warning: multi-line strings not supported\n", zconf_curname(), zconf_lineno());
++		current_file->lineno++;
++		BEGIN(INITIAL);
++		return T_EOL;
++	}
++	YY_BREAK
++case YY_STATE_EOF(STRING):
++{
++		BEGIN(INITIAL);
++	}
++	YY_BREAK
++
++case 60:
++YY_RULE_SETUP
++{
++		ts = 0;
++		for (i = 0; i < zconfleng; i++) {
++			if (zconftext[i] == '\t')
++				ts = (ts & ~7) + 8;
++			else
++				ts++;
++		}
++		last_ts = ts;
++		if (first_ts) {
++			if (ts < first_ts) {
++				zconf_endhelp();
++				return T_HELPTEXT;
++			}
++			ts -= first_ts;
++			while (ts > 8) {
++				append_string("        ", 8);
++				ts -= 8;
++			}
++			append_string("        ", ts);
++		}
++	}
++	YY_BREAK
++case 61:
++/* rule 61 can match eol */
++*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
++(yy_c_buf_p) = yy_cp -= 1;
++YY_DO_BEFORE_ACTION; /* set up zconftext again */
++YY_RULE_SETUP
++{
++		current_file->lineno++;
++		zconf_endhelp();
++		return T_HELPTEXT;
++	}
++	YY_BREAK
++case 62:
++/* rule 62 can match eol */
++YY_RULE_SETUP
++{
++		current_file->lineno++;
++		append_string("\n", 1);
++	}
++	YY_BREAK
++case 63:
++YY_RULE_SETUP
++{
++		append_string(zconftext, zconfleng);
++		if (!first_ts)
++			first_ts = last_ts;
++	}
++	YY_BREAK
++case YY_STATE_EOF(HELP):
++{
++		zconf_endhelp();
++		return T_HELPTEXT;
++	}
++	YY_BREAK
++
++case YY_STATE_EOF(INITIAL):
++case YY_STATE_EOF(COMMAND):
++{
++	if (current_buf) {
++		zconf_endfile();
++		return T_EOF;
++	}
++	fclose(zconfin);
++	yyterminate();
++}
++	YY_BREAK
++case 64:
++YY_RULE_SETUP
++YY_FATAL_ERROR( "flex scanner jammed" );
++	YY_BREAK
++
++	case YY_END_OF_BUFFER:
++		{
++		/* Amount of text matched not including the EOB char. */
++		int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
++
++		/* Undo the effects of YY_DO_BEFORE_ACTION. */
++		*yy_cp = (yy_hold_char);
++		YY_RESTORE_YY_MORE_OFFSET
++
++		if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
++			{
++			/* We're scanning a new file or input source.  It's
++			 * possible that this happened because the user
++			 * just pointed zconfin at a new source and called
++			 * zconflex().  If so, then we have to assure
++			 * consistency between YY_CURRENT_BUFFER and our
++			 * globals.  Here is the right place to do so, because
++			 * this is the first action (other than possibly a
++			 * back-up) that will match for the new input source.
++			 */
++			(yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
++			YY_CURRENT_BUFFER_LVALUE->yy_input_file = zconfin;
++			YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
++			}
++
++		/* Note that here we test for yy_c_buf_p "<=" to the position
++		 * of the first EOB in the buffer, since yy_c_buf_p will
++		 * already have been incremented past the NUL character
++		 * (since all states make transitions on EOB to the
++		 * end-of-buffer state).  Contrast this with the test
++		 * in input().
++		 */
++		if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
++			{ /* This was really a NUL. */
++			yy_state_type yy_next_state;
++
++			(yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
++
++			yy_current_state = yy_get_previous_state(  );
++
++			/* Okay, we're now positioned to make the NUL
++			 * transition.  We couldn't have
++			 * yy_get_previous_state() go ahead and do it
++			 * for us because it doesn't know how to deal
++			 * with the possibility of jamming (and we don't
++			 * want to build jamming into it because then it
++			 * will run more slowly).
++			 */
++
++			yy_next_state = yy_try_NUL_trans( yy_current_state );
++
++			yy_bp = (yytext_ptr) + YY_MORE_ADJ;
++
++			if ( yy_next_state )
++				{
++				/* Consume the NUL. */
++				yy_cp = ++(yy_c_buf_p);
++				yy_current_state = yy_next_state;
++				goto yy_match;
++				}
++
++			else
++				{
++				yy_cp = (yy_c_buf_p);
++				goto yy_find_action;
++				}
++			}
++
++		else switch ( yy_get_next_buffer(  ) )
++			{
++			case EOB_ACT_END_OF_FILE:
++				{
++				(yy_did_buffer_switch_on_eof) = 0;
++
++				if ( zconfwrap( ) )
++					{
++					/* Note: because we've taken care in
++					 * yy_get_next_buffer() to have set up
++					 * zconftext, we can now set up
++					 * yy_c_buf_p so that if some total
++					 * hoser (like flex itself) wants to
++					 * call the scanner after we return the
++					 * YY_NULL, it'll still work - another
++					 * YY_NULL will get returned.
++					 */
++					(yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
++
++					yy_act = YY_STATE_EOF(YY_START);
++					goto do_action;
++					}
++
++				else
++					{
++					if ( ! (yy_did_buffer_switch_on_eof) )
++						YY_NEW_FILE;
++					}
++				break;
++				}
++
++			case EOB_ACT_CONTINUE_SCAN:
++				(yy_c_buf_p) =
++					(yytext_ptr) + yy_amount_of_matched_text;
++
++				yy_current_state = yy_get_previous_state(  );
++
++				yy_cp = (yy_c_buf_p);
++				yy_bp = (yytext_ptr) + YY_MORE_ADJ;
++				goto yy_match;
++
++			case EOB_ACT_LAST_MATCH:
++				(yy_c_buf_p) =
++				&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
++
++				yy_current_state = yy_get_previous_state(  );
++
++				yy_cp = (yy_c_buf_p);
++				yy_bp = (yytext_ptr) + YY_MORE_ADJ;
++				goto yy_find_action;
++			}
++		break;
++		}
++
++	default:
++		YY_FATAL_ERROR(
++			"fatal flex scanner internal error--no action found" );
++	} /* end of action switch */
++		} /* end of scanning one token */
++} /* end of zconflex */
++
++/* yy_get_next_buffer - try to read in a new buffer
++ *
++ * Returns a code representing an action:
++ *	EOB_ACT_LAST_MATCH -
++ *	EOB_ACT_CONTINUE_SCAN - continue scanning from current position
++ *	EOB_ACT_END_OF_FILE - end of file
++ */
++static int yy_get_next_buffer (void)
++{
++    	register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
++	register char *source = (yytext_ptr);
++	register int number_to_move, i;
++	int ret_val;
++
++	if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
++		YY_FATAL_ERROR(
++		"fatal flex scanner internal error--end of buffer missed" );
++
++	if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
++		{ /* Don't try to fill the buffer, so this is an EOF. */
++		if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
++			{
++			/* We matched a single character, the EOB, so
++			 * treat this as a final EOF.
++			 */
++			return EOB_ACT_END_OF_FILE;
++			}
++
++		else
++			{
++			/* We matched some text prior to the EOB, first
++			 * process it.
++			 */
++			return EOB_ACT_LAST_MATCH;
++			}
++		}
++
++	/* Try to read more data. */
++
++	/* First move last chars to start of buffer. */
++	number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1;
++
++	for ( i = 0; i < number_to_move; ++i )
++		*(dest++) = *(source++);
++
++	if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
++		/* don't do the read, it's not guaranteed to return an EOF,
++		 * just force an EOF
++		 */
++		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
++
++	else
++		{
++			size_t num_to_read =
++			YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
++
++		while ( num_to_read <= 0 )
++			{ /* Not enough room in the buffer - grow it. */
++
++			/* just a shorter name for the current buffer */
++			YY_BUFFER_STATE b = YY_CURRENT_BUFFER;
++
++			int yy_c_buf_p_offset =
++				(int) ((yy_c_buf_p) - b->yy_ch_buf);
++
++			if ( b->yy_is_our_buffer )
++				{
++				int new_size = b->yy_buf_size * 2;
++
++				if ( new_size <= 0 )
++					b->yy_buf_size += b->yy_buf_size / 8;
++				else
++					b->yy_buf_size *= 2;
++
++				b->yy_ch_buf = (char *)
++					/* Include room in for 2 EOB chars. */
++					zconfrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2  );
++				}
++			else
++				/* Can't grow it, we don't own it. */
++				b->yy_ch_buf = 0;
++
++			if ( ! b->yy_ch_buf )
++				YY_FATAL_ERROR(
++				"fatal error - scanner input buffer overflow" );
++
++			(yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
++
++			num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
++						number_to_move - 1;
++
++			}
++
++		if ( num_to_read > YY_READ_BUF_SIZE )
++			num_to_read = YY_READ_BUF_SIZE;
++
++		/* Read in more data. */
++		YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
++			(yy_n_chars), num_to_read );
++
++		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
++		}
++
++	if ( (yy_n_chars) == 0 )
++		{
++		if ( number_to_move == YY_MORE_ADJ )
++			{
++			ret_val = EOB_ACT_END_OF_FILE;
++			zconfrestart(zconfin  );
++			}
++
++		else
++			{
++			ret_val = EOB_ACT_LAST_MATCH;
++			YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
++				YY_BUFFER_EOF_PENDING;
++			}
++		}
++
++	else
++		ret_val = EOB_ACT_CONTINUE_SCAN;
++
++	(yy_n_chars) += number_to_move;
++	YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
++	YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
++
++	(yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
++
++	return ret_val;
++}
++
++/* yy_get_previous_state - get the state just before the EOB char was reached */
++
++    static yy_state_type yy_get_previous_state (void)
++{
++	register yy_state_type yy_current_state;
++	register char *yy_cp;
++    
++	yy_current_state = (yy_start);
++
++	for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
++		{
++		yy_current_state = yy_nxt[yy_current_state][(*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1)];
++		}
++
++	return yy_current_state;
++}
++
++/* yy_try_NUL_trans - try to make a transition on the NUL character
++ *
++ * synopsis
++ *	next_state = yy_try_NUL_trans( current_state );
++ */
++    static yy_state_type yy_try_NUL_trans  (yy_state_type yy_current_state )
++{
++	register int yy_is_jam;
++    
++	yy_current_state = yy_nxt[yy_current_state][1];
++	yy_is_jam = (yy_current_state <= 0);
++
++	return yy_is_jam ? 0 : yy_current_state;
++}
++
++    static void yyunput (int c, register char * yy_bp )
++{
++	register char *yy_cp;
++    
++    yy_cp = (yy_c_buf_p);
++
++	/* undo effects of setting up zconftext */
++	*yy_cp = (yy_hold_char);
++
++	if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
++		{ /* need to shift things up to make room */
++		/* +2 for EOB chars. */
++		register int number_to_move = (yy_n_chars) + 2;
++		register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[
++					YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];
++		register char *source =
++				&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];
++
++		while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
++			*--dest = *--source;
++
++		yy_cp += (int) (dest - source);
++		yy_bp += (int) (dest - source);
++		YY_CURRENT_BUFFER_LVALUE->yy_n_chars =
++			(yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_buf_size;
++
++		if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
++			YY_FATAL_ERROR( "flex scanner push-back overflow" );
++		}
++
++	*--yy_cp = (char) c;
++
++	(yytext_ptr) = yy_bp;
++	(yy_hold_char) = *yy_cp;
++	(yy_c_buf_p) = yy_cp;
++}
++
++#ifndef YY_NO_INPUT
++#ifdef __cplusplus
++    static int yyinput (void)
++#else
++    static int input  (void)
++#endif
++
++{
++	int c;
++    
++	*(yy_c_buf_p) = (yy_hold_char);
++
++	if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
++		{
++		/* yy_c_buf_p now points to the character we want to return.
++		 * If this occurs *before* the EOB characters, then it's a
++		 * valid NUL; if not, then we've hit the end of the buffer.
++		 */
++		if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
++			/* This was really a NUL. */
++			*(yy_c_buf_p) = '\0';
++
++		else
++			{ /* need more input */
++			int offset = (yy_c_buf_p) - (yytext_ptr);
++			++(yy_c_buf_p);
++
++			switch ( yy_get_next_buffer(  ) )
++				{
++				case EOB_ACT_LAST_MATCH:
++					/* This happens because yy_g_n_b()
++					 * sees that we've accumulated a
++					 * token and flags that we need to
++					 * try matching the token before
++					 * proceeding.  But for input(),
++					 * there's no matching to consider.
++					 * So convert the EOB_ACT_LAST_MATCH
++					 * to EOB_ACT_END_OF_FILE.
++					 */
++
++					/* Reset buffer status. */
++					zconfrestart(zconfin );
++
++					/*FALLTHROUGH*/
++
++				case EOB_ACT_END_OF_FILE:
++					{
++					if ( zconfwrap( ) )
++						return EOF;
++
++					if ( ! (yy_did_buffer_switch_on_eof) )
++						YY_NEW_FILE;
++#ifdef __cplusplus
++					return yyinput();
++#else
++					return input();
++#endif
++					}
++
++				case EOB_ACT_CONTINUE_SCAN:
++					(yy_c_buf_p) = (yytext_ptr) + offset;
++					break;
++				}
++			}
++		}
++
++	c = *(unsigned char *) (yy_c_buf_p);	/* cast for 8-bit char's */
++	*(yy_c_buf_p) = '\0';	/* preserve zconftext */
++	(yy_hold_char) = *++(yy_c_buf_p);
++
++	return c;
++}
++#endif	/* ifndef YY_NO_INPUT */
++
++/** Immediately switch to a different input stream.
++ * @param input_file A readable stream.
++ * 
++ * @note This function does not reset the start condition to @c INITIAL .
++ */
++    void zconfrestart  (FILE * input_file )
++{
++    
++	if ( ! YY_CURRENT_BUFFER ){
++        zconfensure_buffer_stack ();
++		YY_CURRENT_BUFFER_LVALUE =
++            zconf_create_buffer(zconfin,YY_BUF_SIZE );
++	}
++
++	zconf_init_buffer(YY_CURRENT_BUFFER,input_file );
++	zconf_load_buffer_state( );
++}
++
++/** Switch to a different input buffer.
++ * @param new_buffer The new input buffer.
++ * 
++ */
++    void zconf_switch_to_buffer  (YY_BUFFER_STATE  new_buffer )
++{
++    
++	/* TODO. We should be able to replace this entire function body
++	 * with
++	 *		zconfpop_buffer_state();
++	 *		zconfpush_buffer_state(new_buffer);
++     */
++	zconfensure_buffer_stack ();
++	if ( YY_CURRENT_BUFFER == new_buffer )
++		return;
++
++	if ( YY_CURRENT_BUFFER )
++		{
++		/* Flush out information for old buffer. */
++		*(yy_c_buf_p) = (yy_hold_char);
++		YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
++		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
++		}
++
++	YY_CURRENT_BUFFER_LVALUE = new_buffer;
++	zconf_load_buffer_state( );
++
++	/* We don't actually know whether we did this switch during
++	 * EOF (zconfwrap()) processing, but the only time this flag
++	 * is looked at is after zconfwrap() is called, so it's safe
++	 * to go ahead and always set it.
++	 */
++	(yy_did_buffer_switch_on_eof) = 1;
++}
++
++static void zconf_load_buffer_state  (void)
++{
++    	(yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
++	(yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
++	zconfin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
++	(yy_hold_char) = *(yy_c_buf_p);
++}
++
++/** Allocate and initialize an input buffer state.
++ * @param file A readable stream.
++ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
++ * 
++ * @return the allocated buffer state.
++ */
++    YY_BUFFER_STATE zconf_create_buffer  (FILE * file, int  size )
++{
++	YY_BUFFER_STATE b;
++    
++	b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state )  );
++	if ( ! b )
++		YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" );
++
++	b->yy_buf_size = size;
++
++	/* yy_ch_buf has to be 2 characters longer than the size given because
++	 * we need to put in 2 end-of-buffer characters.
++	 */
++	b->yy_ch_buf = (char *) zconfalloc(b->yy_buf_size + 2  );
++	if ( ! b->yy_ch_buf )
++		YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" );
++
++	b->yy_is_our_buffer = 1;
++
++	zconf_init_buffer(b,file );
++
++	return b;
++}
++
++/** Destroy the buffer.
++ * @param b a buffer created with zconf_create_buffer()
++ * 
++ */
++    void zconf_delete_buffer (YY_BUFFER_STATE  b )
++{
++    
++	if ( ! b )
++		return;
++
++	if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
++		YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
++
++	if ( b->yy_is_our_buffer )
++		zconffree((void *) b->yy_ch_buf  );
++
++	zconffree((void *) b  );
++}
++
++/* Initializes or reinitializes a buffer.
++ * This function is sometimes called more than once on the same buffer,
++ * such as during a zconfrestart() or at EOF.
++ */
++    static void zconf_init_buffer  (YY_BUFFER_STATE  b, FILE * file )
++
++{
++	int oerrno = errno;
++    
++	zconf_flush_buffer(b );
++
++	b->yy_input_file = file;
++	b->yy_fill_buffer = 1;
++
++    /* If b is the current buffer, then zconf_init_buffer was _probably_
++     * called from zconfrestart() or through yy_get_next_buffer.
++     * In that case, we don't want to reset the lineno or column.
++     */
++    if (b != YY_CURRENT_BUFFER){
++        b->yy_bs_lineno = 1;
++        b->yy_bs_column = 0;
++    }
++
++        b->yy_is_interactive = 0;
++    
++	errno = oerrno;
++}
++
++/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
++ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
++ * 
++ */
++    void zconf_flush_buffer (YY_BUFFER_STATE  b )
++{
++    	if ( ! b )
++		return;
++
++	b->yy_n_chars = 0;
++
++	/* We always need two end-of-buffer characters.  The first causes
++	 * a transition to the end-of-buffer state.  The second causes
++	 * a jam in that state.
++	 */
++	b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
++	b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
++
++	b->yy_buf_pos = &b->yy_ch_buf[0];
++
++	b->yy_at_bol = 1;
++	b->yy_buffer_status = YY_BUFFER_NEW;
++
++	if ( b == YY_CURRENT_BUFFER )
++		zconf_load_buffer_state( );
++}
++
++/** Pushes the new state onto the stack. The new state becomes
++ *  the current state. This function will allocate the stack
++ *  if necessary.
++ *  @param new_buffer The new state.
++ *  
++ */
++void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer )
++{
++    	if (new_buffer == NULL)
++		return;
++
++	zconfensure_buffer_stack();
++
++	/* This block is copied from zconf_switch_to_buffer. */
++	if ( YY_CURRENT_BUFFER )
++		{
++		/* Flush out information for old buffer. */
++		*(yy_c_buf_p) = (yy_hold_char);
++		YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
++		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
++		}
++
++	/* Only push if top exists. Otherwise, replace top. */
++	if (YY_CURRENT_BUFFER)
++		(yy_buffer_stack_top)++;
++	YY_CURRENT_BUFFER_LVALUE = new_buffer;
++
++	/* copied from zconf_switch_to_buffer. */
++	zconf_load_buffer_state( );
++	(yy_did_buffer_switch_on_eof) = 1;
++}
++
++/** Removes and deletes the top of the stack, if present.
++ *  The next element becomes the new top.
++ *  
++ */
++void zconfpop_buffer_state (void)
++{
++    	if (!YY_CURRENT_BUFFER)
++		return;
++
++	zconf_delete_buffer(YY_CURRENT_BUFFER );
++	YY_CURRENT_BUFFER_LVALUE = NULL;
++	if ((yy_buffer_stack_top) > 0)
++		--(yy_buffer_stack_top);
++
++	if (YY_CURRENT_BUFFER) {
++		zconf_load_buffer_state( );
++		(yy_did_buffer_switch_on_eof) = 1;
++	}
++}
++
++/* Allocates the stack if it does not exist.
++ *  Guarantees space for at least one push.
++ */
++static void zconfensure_buffer_stack (void)
++{
++	int num_to_alloc;
++    
++	if (!(yy_buffer_stack)) {
++
++		/* First allocation is just for 2 elements, since we don't know if this
++		 * scanner will even need a stack. We use 2 instead of 1 to avoid an
++		 * immediate realloc on the next call.
++         */
++		num_to_alloc = 1;
++		(yy_buffer_stack) = (struct yy_buffer_state**)zconfalloc
++								(num_to_alloc * sizeof(struct yy_buffer_state*)
++								);
++		
++		memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
++				
++		(yy_buffer_stack_max) = num_to_alloc;
++		(yy_buffer_stack_top) = 0;
++		return;
++	}
++
++	if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
++
++		/* Increase the buffer to prepare for a possible push. */
++		int grow_size = 8 /* arbitrary grow size */;
++
++		num_to_alloc = (yy_buffer_stack_max) + grow_size;
++		(yy_buffer_stack) = (struct yy_buffer_state**)zconfrealloc
++								((yy_buffer_stack),
++								num_to_alloc * sizeof(struct yy_buffer_state*)
++								);
++
++		/* zero only the new slots.*/
++		memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
++		(yy_buffer_stack_max) = num_to_alloc;
++	}
++}
++
++/** Setup the input buffer state to scan directly from a user-specified character buffer.
++ * @param base the character buffer
++ * @param size the size in bytes of the character buffer
++ * 
++ * @return the newly allocated buffer state object. 
++ */
++YY_BUFFER_STATE zconf_scan_buffer  (char * base, yy_size_t  size )
++{
++	YY_BUFFER_STATE b;
++    
++	if ( size < 2 ||
++	     base[size-2] != YY_END_OF_BUFFER_CHAR ||
++	     base[size-1] != YY_END_OF_BUFFER_CHAR )
++		/* They forgot to leave room for the EOB's. */
++		return 0;
++
++	b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state )  );
++	if ( ! b )
++		YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_buffer()" );
++
++	b->yy_buf_size = size - 2;	/* "- 2" to take care of EOB's */
++	b->yy_buf_pos = b->yy_ch_buf = base;
++	b->yy_is_our_buffer = 0;
++	b->yy_input_file = 0;
++	b->yy_n_chars = b->yy_buf_size;
++	b->yy_is_interactive = 0;
++	b->yy_at_bol = 1;
++	b->yy_fill_buffer = 0;
++	b->yy_buffer_status = YY_BUFFER_NEW;
++
++	zconf_switch_to_buffer(b  );
++
++	return b;
++}
++
++/** Setup the input buffer state to scan a string. The next call to zconflex() will
++ * scan from a @e copy of @a str.
++ * @param str a NUL-terminated string to scan
++ * 
++ * @return the newly allocated buffer state object.
++ * @note If you want to scan bytes that may contain NUL values, then use
++ *       zconf_scan_bytes() instead.
++ */
++YY_BUFFER_STATE zconf_scan_string (yyconst char * str )
++{
++    
++	return zconf_scan_bytes(str,strlen(str) );
++}
++
++/** Setup the input buffer state to scan the given bytes. The next call to zconflex() will
++ * scan from a @e copy of @a bytes.
++ * @param bytes the byte buffer to scan
++ * @param len the number of bytes in the buffer pointed to by @a bytes.
++ * 
++ * @return the newly allocated buffer state object.
++ */
++YY_BUFFER_STATE zconf_scan_bytes  (yyconst char * bytes, int  len )
++{
++	YY_BUFFER_STATE b;
++	char *buf;
++	yy_size_t n;
++	int i;
++    
++	/* Get memory for full buffer, including space for trailing EOB's. */
++	n = len + 2;
++	buf = (char *) zconfalloc(n  );
++	if ( ! buf )
++		YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_bytes()" );
++
++	for ( i = 0; i < len; ++i )
++		buf[i] = bytes[i];
++
++	buf[len] = buf[len+1] = YY_END_OF_BUFFER_CHAR;
++
++	b = zconf_scan_buffer(buf,n );
++	if ( ! b )
++		YY_FATAL_ERROR( "bad buffer in zconf_scan_bytes()" );
++
++	/* It's okay to grow etc. this buffer, and we should throw it
++	 * away when we're done.
++	 */
++	b->yy_is_our_buffer = 1;
++
++	return b;
++}
++
++#ifndef YY_EXIT_FAILURE
++#define YY_EXIT_FAILURE 2
++#endif
++
++static void yy_fatal_error (yyconst char* msg )
++{
++    	(void) fprintf( stderr, "%s\n", msg );
++	exit( YY_EXIT_FAILURE );
++}
++
++/* Redefine yyless() so it works in section 3 code. */
++
++#undef yyless
++#define yyless(n) \
++	do \
++		{ \
++		/* Undo effects of setting up zconftext. */ \
++        int yyless_macro_arg = (n); \
++        YY_LESS_LINENO(yyless_macro_arg);\
++		zconftext[zconfleng] = (yy_hold_char); \
++		(yy_c_buf_p) = zconftext + yyless_macro_arg; \
++		(yy_hold_char) = *(yy_c_buf_p); \
++		*(yy_c_buf_p) = '\0'; \
++		zconfleng = yyless_macro_arg; \
++		} \
++	while ( 0 )
++
++/* Accessor  methods (get/set functions) to struct members. */
++
++/** Get the current line number.
++ * 
++ */
++int zconfget_lineno  (void)
++{
++        
++    return zconflineno;
++}
++
++/** Get the input stream.
++ * 
++ */
++FILE *zconfget_in  (void)
++{
++        return zconfin;
++}
++
++/** Get the output stream.
++ * 
++ */
++FILE *zconfget_out  (void)
++{
++        return zconfout;
++}
++
++/** Get the length of the current token.
++ * 
++ */
++int zconfget_leng  (void)
++{
++        return zconfleng;
++}
++
++/** Get the current token.
++ * 
++ */
++
++char *zconfget_text  (void)
++{
++        return zconftext;
++}
++
++/** Set the current line number.
++ * @param line_number
++ * 
++ */
++void zconfset_lineno (int  line_number )
++{
++    
++    zconflineno = line_number;
++}
++
++/** Set the input stream. This does not discard the current
++ * input buffer.
++ * @param in_str A readable stream.
++ * 
++ * @see zconf_switch_to_buffer
++ */
++void zconfset_in (FILE *  in_str )
++{
++        zconfin = in_str ;
++}
++
++void zconfset_out (FILE *  out_str )
++{
++        zconfout = out_str ;
++}
++
++int zconfget_debug  (void)
++{
++        return zconf_flex_debug;
++}
++
++void zconfset_debug (int  bdebug )
++{
++        zconf_flex_debug = bdebug ;
++}
++
++/* zconflex_destroy is for both reentrant and non-reentrant scanners. */
++int zconflex_destroy  (void)
++{
++    
++    /* Pop the buffer stack, destroying each element. */
++	while(YY_CURRENT_BUFFER){
++		zconf_delete_buffer(YY_CURRENT_BUFFER  );
++		YY_CURRENT_BUFFER_LVALUE = NULL;
++		zconfpop_buffer_state();
++	}
++
++	/* Destroy the stack itself. */
++	zconffree((yy_buffer_stack) );
++	(yy_buffer_stack) = NULL;
++
++    return 0;
++}
++
++/*
++ * Internal utility routines.
++ */
++
++#ifndef yytext_ptr
++static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
++{
++	register int i;
++    	for ( i = 0; i < n; ++i )
++		s1[i] = s2[i];
++}
++#endif
++
++#ifdef YY_NEED_STRLEN
++static int yy_flex_strlen (yyconst char * s )
++{
++	register int n;
++    	for ( n = 0; s[n]; ++n )
++		;
++
++	return n;
++}
++#endif
++
++void *zconfalloc (yy_size_t  size )
++{
++	return (void *) malloc( size );
++}
++
++void *zconfrealloc  (void * ptr, yy_size_t  size )
++{
++	/* The cast to (char *) in the following accommodates both
++	 * implementations that use char* generic pointers, and those
++	 * that use void* generic pointers.  It works with the latter
++	 * because both ANSI C and C++ allow castless assignment from
++	 * any pointer type to void*, and deal with argument conversions
++	 * as though doing an assignment.
++	 */
++	return (void *) realloc( (char *) ptr, size );
++}
++
++void zconffree (void * ptr )
++{
++	free( (char *) ptr );	/* see zconfrealloc() for (char *) cast */
++}
++
++#define YYTABLES_NAME "yytables"
++
++#undef YY_NEW_FILE
++#undef YY_FLUSH_BUFFER
++#undef yy_set_bol
++#undef yy_new_buffer
++#undef yy_set_interactive
++#undef yytext_ptr
++#undef YY_DO_BEFORE_ACTION
++
++#ifdef YY_DECL_IS_OURS
++#undef YY_DECL_IS_OURS
++#undef YY_DECL
++#endif
++
++void zconf_starthelp(void)
++{
++	new_string();
++	last_ts = first_ts = 0;
++	BEGIN(HELP);
++}
++
++static void zconf_endhelp(void)
++{
++	zconflval.string = text;
++	BEGIN(INITIAL);
++}
++
++/*
++ * Try to open specified file with following names:
++ * ./name
++ * $(srctree)/name
++ * The latter is used when srctree is separate from objtree
++ * when compiling the kernel.
++ * Return NULL if file is not found.
++ */
++FILE *zconf_fopen(const char *name)
++{
++	char *env, fullname[PATH_MAX+1];
++	FILE *f;
++
++	f = fopen(name, "r");
++	if (!f && name[0] != '/') {
++		env = getenv(SRCTREE);
++		if (env) {
++			sprintf(fullname, "%s/%s", env, name);
++			f = fopen(fullname, "r");
++		}
++	}
++	return f;
++}
++
++void zconf_initscan(const char *name)
++{
++	zconfin = zconf_fopen(name);
++	if (!zconfin) {
++		printf("can't find file %s\n", name);
++		exit(1);
++	}
++
++	current_buf = malloc(sizeof(*current_buf));
++	memset(current_buf, 0, sizeof(*current_buf));
++
++	current_file = file_lookup(name);
++	current_file->lineno = 1;
++	current_file->flags = FILE_BUSY;
++}
++
++void zconf_nextfile(const char *name)
++{
++	struct file *file = file_lookup(name);
++	struct buffer *buf = malloc(sizeof(*buf));
++	memset(buf, 0, sizeof(*buf));
++
++	current_buf->state = YY_CURRENT_BUFFER;
++	zconfin = zconf_fopen(name);
++	if (!zconfin) {
++		printf("%s:%d: can't open file \"%s\"\n", zconf_curname(), zconf_lineno(), name);
++		exit(1);
++	}
++	zconf_switch_to_buffer(zconf_create_buffer(zconfin,YY_BUF_SIZE));
++	buf->parent = current_buf;
++	current_buf = buf;
++
++	if (file->flags & FILE_BUSY) {
++		printf("recursive scan (%s)?\n", name);
++		exit(1);
++	}
++	if (file->flags & FILE_SCANNED) {
++		printf("file %s already scanned?\n", name);
++		exit(1);
++	}
++	file->flags |= FILE_BUSY;
++	file->lineno = 1;
++	file->parent = current_file;
++	current_file = file;
++}
++
++static struct buffer *zconf_endfile(void)
++{
++	struct buffer *parent;
++
++	current_file->flags |= FILE_SCANNED;
++	current_file->flags &= ~FILE_BUSY;
++	current_file = current_file->parent;
++
++	parent = current_buf->parent;
++	if (parent) {
++		fclose(zconfin);
++		zconf_delete_buffer(YY_CURRENT_BUFFER);
++		zconf_switch_to_buffer(parent->state);
++	}
++	free(current_buf);
++	current_buf = parent;
++
++	return parent;
++}
++
++int zconf_lineno(void)
++{
++	if (current_buf)
++		return current_file->lineno - 1;
++	else
++		return 0;
++}
++
++char *zconf_curname(void)
++{
++	if (current_buf)
++		return current_file->name;
++	else
++		return "<none>";
++}
++
+Binärdateien pristine-linux-2.6.12/scripts/kconfig/mconf.o and linux-2.6.12-xen/scripts/kconfig/mconf.o sind verschieden.
+diff -Nurp pristine-linux-2.6.12/scripts/kconfig/.mconf.o.cmd linux-2.6.12-xen/scripts/kconfig/.mconf.o.cmd
+--- pristine-linux-2.6.12/scripts/kconfig/.mconf.o.cmd	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/kconfig/.mconf.o.cmd	2006-03-05 23:55:02.729257710 +0100
+@@ -0,0 +1,96 @@
++cmd_scripts/kconfig/mconf.o := gcc -Wp,-MD,scripts/kconfig/.mconf.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer       -c -o scripts/kconfig/mconf.o scripts/kconfig/mconf.c
++
++deps_scripts/kconfig/mconf.o := \
++  scripts/kconfig/mconf.c \
++    $(wildcard include/config/mode.h) \
++    $(wildcard include/config/.h) \
++  /usr/include/sys/ioctl.h \
++  /usr/include/features.h \
++  /usr/include/sys/cdefs.h \
++  /usr/include/gnu/stubs.h \
++  /usr/include/bits/ioctls.h \
++  /usr/include/asm/ioctls.h \
++  /usr/include/asm-i486/ioctls.h \
++  /usr/include/asm/ioctl.h \
++  /usr/include/asm-i486/ioctl.h \
++  /usr/include/bits/ioctl-types.h \
++  /usr/include/sys/ttydefaults.h \
++  /usr/include/sys/wait.h \
++  /usr/include/signal.h \
++  /usr/include/bits/sigset.h \
++  /usr/include/bits/types.h \
++  /usr/include/bits/wordsize.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
++  /usr/include/bits/typesizes.h \
++  /usr/include/bits/signum.h \
++  /usr/include/time.h \
++  /usr/include/bits/siginfo.h \
++  /usr/include/bits/sigaction.h \
++  /usr/include/bits/sigcontext.h \
++  /usr/include/asm/sigcontext.h \
++  /usr/include/asm-i486/sigcontext.h \
++  /usr/include/linux/compiler.h \
++  /usr/include/bits/sigstack.h \
++  /usr/include/bits/pthreadtypes.h \
++  /usr/include/bits/sched.h \
++  /usr/include/bits/sigthread.h \
++  /usr/include/sys/resource.h \
++  /usr/include/bits/resource.h \
++  /usr/include/bits/time.h \
++  /usr/include/bits/waitflags.h \
++  /usr/include/bits/waitstatus.h \
++  /usr/include/endian.h \
++  /usr/include/bits/endian.h \
++  /usr/include/ctype.h \
++  /usr/include/errno.h \
++  /usr/include/bits/errno.h \
++  /usr/include/linux/errno.h \
++  /usr/include/asm/errno.h \
++  /usr/include/asm-i486/errno.h \
++  /usr/include/asm-generic/errno.h \
++  /usr/include/asm-generic/errno-base.h \
++  /usr/include/fcntl.h \
++  /usr/include/bits/fcntl.h \
++  /usr/include/sys/types.h \
++  /usr/include/sys/select.h \
++  /usr/include/bits/select.h \
++  /usr/include/sys/sysmacros.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/limits.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/syslimits.h \
++  /usr/include/limits.h \
++  /usr/include/bits/posix1_lim.h \
++  /usr/include/bits/local_lim.h \
++  /usr/include/linux/limits.h \
++  /usr/include/bits/posix2_lim.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
++  /usr/include/stdlib.h \
++  /usr/include/alloca.h \
++  /usr/include/string.h \
++  /usr/include/bits/string.h \
++  /usr/include/bits/string2.h \
++  /usr/include/termios.h \
++  /usr/include/bits/termios.h \
++  /usr/include/unistd.h \
++  /usr/include/bits/posix_opt.h \
++  /usr/include/bits/confname.h \
++  /usr/include/getopt.h \
++  scripts/kconfig/lkc.h \
++  scripts/kconfig/expr.h \
++  /usr/include/stdio.h \
++  /usr/include/libio.h \
++  /usr/include/_G_config.h \
++  /usr/include/wchar.h \
++  /usr/include/bits/wchar.h \
++  /usr/include/gconv.h \
++  /usr/include/bits/stdio_lim.h \
++  /usr/include/bits/sys_errlist.h \
++  /usr/include/bits/stdio.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdbool.h \
++  /usr/include/libintl.h \
++  /usr/include/locale.h \
++  /usr/include/bits/locale.h \
++  scripts/kconfig/lkc_proto.h \
++
++scripts/kconfig/mconf.o: $(deps_scripts/kconfig/mconf.o)
++
++$(deps_scripts/kconfig/mconf.o):
+diff -Nurp pristine-linux-2.6.12/scripts/kconfig/zconf.tab.c linux-2.6.12-xen/scripts/kconfig/zconf.tab.c
+--- pristine-linux-2.6.12/scripts/kconfig/zconf.tab.c	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/kconfig/zconf.tab.c	2006-03-05 23:54:56.401191251 +0100
+@@ -0,0 +1,2130 @@
++/* A Bison parser, made by GNU Bison 1.875a.  */
++
++/* Skeleton parser for Yacc-like parsing with Bison,
++   Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 2, or (at your option)
++   any later version.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place - Suite 330,
++   Boston, MA 02111-1307, USA.  */
++
++/* As a special exception, when this file is copied by Bison into a
++   Bison output file, you may use that output file without restriction.
++   This special exception was added by the Free Software Foundation
++   in version 1.24 of Bison.  */
++
++/* Written by Richard Stallman by simplifying the original so called
++   ``semantic'' parser.  */
++
++/* All symbols defined below should begin with yy or YY, to avoid
++   infringing on user name space.  This should be done even for local
++   variables, as they might otherwise be expanded by user macros.
++   There are some unavoidable exceptions within include files to
++   define necessary library symbols; they are noted "INFRINGES ON
++   USER NAME SPACE" below.  */
++
++/* Identify Bison output.  */
++#define YYBISON 1
++
++/* Skeleton name.  */
++#define YYSKELETON_NAME "yacc.c"
++
++/* Pure parsers.  */
++#define YYPURE 0
++
++/* Using locations.  */
++#define YYLSP_NEEDED 0
++
++/* If NAME_PREFIX is specified substitute the variables and functions
++   names.  */
++#define yyparse zconfparse
++#define yylex   zconflex
++#define yyerror zconferror
++#define yylval  zconflval
++#define yychar  zconfchar
++#define yydebug zconfdebug
++#define yynerrs zconfnerrs
++
++
++/* Tokens.  */
++#ifndef YYTOKENTYPE
++# define YYTOKENTYPE
++   /* Put the tokens into the symbol table, so that GDB and other debuggers
++      know about them.  */
++   enum yytokentype {
++     T_MAINMENU = 258,
++     T_MENU = 259,
++     T_ENDMENU = 260,
++     T_SOURCE = 261,
++     T_CHOICE = 262,
++     T_ENDCHOICE = 263,
++     T_COMMENT = 264,
++     T_CONFIG = 265,
++     T_MENUCONFIG = 266,
++     T_HELP = 267,
++     T_HELPTEXT = 268,
++     T_IF = 269,
++     T_ENDIF = 270,
++     T_DEPENDS = 271,
++     T_REQUIRES = 272,
++     T_OPTIONAL = 273,
++     T_PROMPT = 274,
++     T_DEFAULT = 275,
++     T_TRISTATE = 276,
++     T_DEF_TRISTATE = 277,
++     T_BOOLEAN = 278,
++     T_DEF_BOOLEAN = 279,
++     T_STRING = 280,
++     T_INT = 281,
++     T_HEX = 282,
++     T_WORD = 283,
++     T_WORD_QUOTE = 284,
++     T_UNEQUAL = 285,
++     T_EOF = 286,
++     T_EOL = 287,
++     T_CLOSE_PAREN = 288,
++     T_OPEN_PAREN = 289,
++     T_ON = 290,
++     T_SELECT = 291,
++     T_RANGE = 292,
++     T_OR = 293,
++     T_AND = 294,
++     T_EQUAL = 295,
++     T_NOT = 296
++   };
++#endif
++#define T_MAINMENU 258
++#define T_MENU 259
++#define T_ENDMENU 260
++#define T_SOURCE 261
++#define T_CHOICE 262
++#define T_ENDCHOICE 263
++#define T_COMMENT 264
++#define T_CONFIG 265
++#define T_MENUCONFIG 266
++#define T_HELP 267
++#define T_HELPTEXT 268
++#define T_IF 269
++#define T_ENDIF 270
++#define T_DEPENDS 271
++#define T_REQUIRES 272
++#define T_OPTIONAL 273
++#define T_PROMPT 274
++#define T_DEFAULT 275
++#define T_TRISTATE 276
++#define T_DEF_TRISTATE 277
++#define T_BOOLEAN 278
++#define T_DEF_BOOLEAN 279
++#define T_STRING 280
++#define T_INT 281
++#define T_HEX 282
++#define T_WORD 283
++#define T_WORD_QUOTE 284
++#define T_UNEQUAL 285
++#define T_EOF 286
++#define T_EOL 287
++#define T_CLOSE_PAREN 288
++#define T_OPEN_PAREN 289
++#define T_ON 290
++#define T_SELECT 291
++#define T_RANGE 292
++#define T_OR 293
++#define T_AND 294
++#define T_EQUAL 295
++#define T_NOT 296
++
++
++
++
++/* Copy the first part of user declarations.  */
++
++
++/*
++ * Copyright (C) 2002 Roman Zippel <zippel at linux-m68k.org>
++ * Released under the terms of the GNU GPL v2.0.
++ */
++
++#include <ctype.h>
++#include <stdarg.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <stdbool.h>
++
++#define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt)
++
++#define PRINTD		0x0001
++#define DEBUG_PARSE	0x0002
++
++int cdebug = PRINTD;
++
++extern int zconflex(void);
++static void zconfprint(const char *err, ...);
++static void zconferror(const char *err);
++static bool zconf_endtoken(int token, int starttoken, int endtoken);
++
++struct symbol *symbol_hash[257];
++
++static struct menu *current_menu, *current_entry;
++
++#define YYERROR_VERBOSE
++
++
++/* Enabling traces.  */
++#ifndef YYDEBUG
++# define YYDEBUG 0
++#endif
++
++/* Enabling verbose error messages.  */
++#ifdef YYERROR_VERBOSE
++# undef YYERROR_VERBOSE
++# define YYERROR_VERBOSE 1
++#else
++# define YYERROR_VERBOSE 0
++#endif
++
++#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED)
++
++typedef union YYSTYPE {
++	int token;
++	char *string;
++	struct symbol *symbol;
++	struct expr *expr;
++	struct menu *menu;
++} YYSTYPE;
++/* Line 191 of yacc.c.  */
++
++# define yystype YYSTYPE /* obsolescent; will be withdrawn */
++# define YYSTYPE_IS_DECLARED 1
++# define YYSTYPE_IS_TRIVIAL 1
++#endif
++
++
++
++/* Copy the second part of user declarations.  */
++
++
++#define LKC_DIRECT_LINK
++#include "lkc.h"
++
++
++/* Line 214 of yacc.c.  */
++
++
++#if ! defined (yyoverflow) || YYERROR_VERBOSE
++
++/* The parser invokes alloca or malloc; define the necessary symbols.  */
++
++# if YYSTACK_USE_ALLOCA
++#  define YYSTACK_ALLOC alloca
++# else
++#  ifndef YYSTACK_USE_ALLOCA
++#   if defined (alloca) || defined (_ALLOCA_H)
++#    define YYSTACK_ALLOC alloca
++#   else
++#    ifdef __GNUC__
++#     define YYSTACK_ALLOC __builtin_alloca
++#    endif
++#   endif
++#  endif
++# endif
++
++# ifdef YYSTACK_ALLOC
++   /* Pacify GCC's `empty if-body' warning. */
++#  define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
++# else
++#  if defined (__STDC__) || defined (__cplusplus)
++#   include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
++#   define YYSIZE_T size_t
++#  endif
++#  define YYSTACK_ALLOC malloc
++#  define YYSTACK_FREE free
++# endif
++#endif /* ! defined (yyoverflow) || YYERROR_VERBOSE */
++
++
++#if (! defined (yyoverflow) \
++     && (! defined (__cplusplus) \
++	 || (YYSTYPE_IS_TRIVIAL)))
++
++/* A type that is properly aligned for any stack member.  */
++union yyalloc
++{
++  short yyss;
++  YYSTYPE yyvs;
++  };
++
++/* The size of the maximum gap between one aligned stack and the next.  */
++# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
++
++/* The size of an array large to enough to hold all stacks, each with
++   N elements.  */
++# define YYSTACK_BYTES(N) \
++     ((N) * (sizeof (short) + sizeof (YYSTYPE))				\
++      + YYSTACK_GAP_MAXIMUM)
++
++/* Copy COUNT objects from FROM to TO.  The source and destination do
++   not overlap.  */
++# ifndef YYCOPY
++#  if 1 < __GNUC__
++#   define YYCOPY(To, From, Count) \
++      __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
++#  else
++#   define YYCOPY(To, From, Count)		\
++      do					\
++	{					\
++	  register YYSIZE_T yyi;		\
++	  for (yyi = 0; yyi < (Count); yyi++)	\
++	    (To)[yyi] = (From)[yyi];		\
++	}					\
++      while (0)
++#  endif
++# endif
++
++/* Relocate STACK from its old location to the new one.  The
++   local variables YYSIZE and YYSTACKSIZE give the old and new number of
++   elements in the stack, and YYPTR gives the new location of the
++   stack.  Advance YYPTR to a properly aligned location for the next
++   stack.  */
++# define YYSTACK_RELOCATE(Stack)					\
++    do									\
++      {									\
++	YYSIZE_T yynewbytes;						\
++	YYCOPY (&yyptr->Stack, Stack, yysize);				\
++	Stack = &yyptr->Stack;						\
++	yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
++	yyptr += yynewbytes / sizeof (*yyptr);				\
++      }									\
++    while (0)
++
++#endif
++
++#if defined (__STDC__) || defined (__cplusplus)
++   typedef signed char yysigned_char;
++#else
++   typedef short yysigned_char;
++#endif
++
++/* YYFINAL -- State number of the termination state. */
++#define YYFINAL  2
++/* YYLAST -- Last index in YYTABLE.  */
++#define YYLAST   201
++
++/* YYNTOKENS -- Number of terminals. */
++#define YYNTOKENS  42
++/* YYNNTS -- Number of nonterminals. */
++#define YYNNTS  41
++/* YYNRULES -- Number of rules. */
++#define YYNRULES  104
++/* YYNRULES -- Number of states. */
++#define YYNSTATES  182
++
++/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
++#define YYUNDEFTOK  2
++#define YYMAXUTOK   296
++
++#define YYTRANSLATE(YYX) 						\
++  ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
++
++/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX.  */
++static const unsigned char yytranslate[] =
++{
++       0,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
++       2,     2,     2,     2,     2,     2,     1,     2,     3,     4,
++       5,     6,     7,     8,     9,    10,    11,    12,    13,    14,
++      15,    16,    17,    18,    19,    20,    21,    22,    23,    24,
++      25,    26,    27,    28,    29,    30,    31,    32,    33,    34,
++      35,    36,    37,    38,    39,    40,    41
++};
++
++#if YYDEBUG
++/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
++   YYRHS.  */
++static const unsigned short yyprhs[] =
++{
++       0,     0,     3,     4,     7,     9,    11,    13,    17,    19,
++      21,    23,    26,    28,    30,    32,    34,    36,    38,    42,
++      45,    49,    52,    53,    56,    59,    62,    65,    69,    74,
++      78,    83,    87,    91,    95,   100,   105,   110,   116,   119,
++     122,   124,   128,   131,   132,   135,   138,   141,   144,   149,
++     153,   157,   160,   165,   166,   169,   173,   175,   179,   182,
++     183,   186,   189,   192,   196,   199,   201,   205,   208,   209,
++     212,   215,   218,   222,   226,   228,   232,   235,   238,   241,
++     242,   245,   248,   253,   257,   261,   262,   265,   267,   269,
++     272,   275,   278,   280,   282,   283,   286,   288,   292,   296,
++     300,   303,   307,   311,   313
++};
++
++/* YYRHS -- A `-1'-separated list of the rules' RHS. */
++static const yysigned_char yyrhs[] =
++{
++      43,     0,    -1,    -1,    43,    44,    -1,    45,    -1,    55,
++      -1,    66,    -1,     3,    77,    79,    -1,     5,    -1,    15,
++      -1,     8,    -1,     1,    79,    -1,    61,    -1,    71,    -1,
++      47,    -1,    49,    -1,    69,    -1,    79,    -1,    10,    28,
++      32,    -1,    46,    50,    -1,    11,    28,    32,    -1,    48,
++      50,    -1,    -1,    50,    51,    -1,    50,    75,    -1,    50,
++      73,    -1,    50,    32,    -1,    21,    76,    32,    -1,    22,
++      81,    80,    32,    -1,    23,    76,    32,    -1,    24,    81,
++      80,    32,    -1,    26,    76,    32,    -1,    27,    76,    32,
++      -1,    25,    76,    32,    -1,    19,    77,    80,    32,    -1,
++      20,    81,    80,    32,    -1,    36,    28,    80,    32,    -1,
++      37,    82,    82,    80,    32,    -1,     7,    32,    -1,    52,
++      56,    -1,    78,    -1,    53,    58,    54,    -1,    53,    58,
++      -1,    -1,    56,    57,    -1,    56,    75,    -1,    56,    73,
++      -1,    56,    32,    -1,    19,    77,    80,    32,    -1,    21,
++      76,    32,    -1,    23,    76,    32,    -1,    18,    32,    -1,
++      20,    28,    80,    32,    -1,    -1,    58,    45,    -1,    14,
++      81,    32,    -1,    78,    -1,    59,    62,    60,    -1,    59,
++      62,    -1,    -1,    62,    45,    -1,    62,    66,    -1,    62,
++      55,    -1,     4,    77,    32,    -1,    63,    74,    -1,    78,
++      -1,    64,    67,    65,    -1,    64,    67,    -1,    -1,    67,
++      45,    -1,    67,    66,    -1,    67,    55,    -1,    67,     1,
++      32,    -1,     6,    77,    32,    -1,    68,    -1,     9,    77,
++      32,    -1,    70,    74,    -1,    12,    32,    -1,    72,    13,
++      -1,    -1,    74,    75,    -1,    74,    32,    -1,    16,    35,
++      81,    32,    -1,    16,    81,    32,    -1,    17,    81,    32,
++      -1,    -1,    77,    80,    -1,    28,    -1,    29,    -1,     5,
++      79,    -1,     8,    79,    -1,    15,    79,    -1,    32,    -1,
++      31,    -1,    -1,    14,    81,    -1,    82,    -1,    82,    40,
++      82,    -1,    82,    30,    82,    -1,    34,    81,    33,    -1,
++      41,    81,    -1,    81,    38,    81,    -1,    81,    39,    81,
++      -1,    28,    -1,    29,    -1
++};
++
++/* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
++static const unsigned short yyrline[] =
++{
++       0,    94,    94,    95,    98,    99,   100,   101,   102,   103,
++     104,   105,   109,   110,   111,   112,   113,   114,   120,   128,
++     134,   142,   152,   154,   155,   156,   157,   160,   166,   173,
++     179,   186,   192,   198,   204,   210,   216,   222,   230,   239,
++     245,   254,   255,   261,   263,   264,   265,   266,   269,   275,
++     281,   287,   293,   299,   301,   306,   315,   324,   325,   331,
++     333,   334,   335,   340,   347,   353,   362,   363,   369,   371,
++     372,   373,   374,   377,   383,   390,   397,   404,   410,   417,
++     418,   419,   422,   427,   432,   440,   442,   447,   448,   451,
++     452,   453,   457,   457,   459,   460,   463,   464,   465,   466,
++     467,   468,   469,   472,   473
++};
++#endif
++
++#if YYDEBUG || YYERROR_VERBOSE
++/* YYTNME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
++   First, the terminals, then, starting at YYNTOKENS, nonterminals. */
++static const char *const yytname[] =
++{
++  "$end", "error", "$undefined", "T_MAINMENU", "T_MENU", "T_ENDMENU", 
++  "T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG", 
++  "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS", 
++  "T_REQUIRES", "T_OPTIONAL", "T_PROMPT", "T_DEFAULT", "T_TRISTATE", 
++  "T_DEF_TRISTATE", "T_BOOLEAN", "T_DEF_BOOLEAN", "T_STRING", "T_INT", 
++  "T_HEX", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL", "T_EOF", "T_EOL", 
++  "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_ON", "T_SELECT", "T_RANGE", "T_OR", 
++  "T_AND", "T_EQUAL", "T_NOT", "$accept", "input", "block", 
++  "common_block", "config_entry_start", "config_stmt", 
++  "menuconfig_entry_start", "menuconfig_stmt", "config_option_list", 
++  "config_option", "choice", "choice_entry", "choice_end", "choice_stmt", 
++  "choice_option_list", "choice_option", "choice_block", "if", "if_end", 
++  "if_stmt", "if_block", "menu", "menu_entry", "menu_end", "menu_stmt", 
++  "menu_block", "source", "source_stmt", "comment", "comment_stmt", 
++  "help_start", "help", "depends_list", "depends", "prompt_stmt_opt", 
++  "prompt", "end", "nl_or_eof", "if_expr", "expr", "symbol", 0
++};
++#endif
++
++# ifdef YYPRINT
++/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
++   token YYLEX-NUM.  */
++static const unsigned short yytoknum[] =
++{
++       0,   256,   257,   258,   259,   260,   261,   262,   263,   264,
++     265,   266,   267,   268,   269,   270,   271,   272,   273,   274,
++     275,   276,   277,   278,   279,   280,   281,   282,   283,   284,
++     285,   286,   287,   288,   289,   290,   291,   292,   293,   294,
++     295,   296
++};
++# endif
++
++/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives.  */
++static const unsigned char yyr1[] =
++{
++       0,    42,    43,    43,    44,    44,    44,    44,    44,    44,
++      44,    44,    45,    45,    45,    45,    45,    45,    46,    47,
++      48,    49,    50,    50,    50,    50,    50,    51,    51,    51,
++      51,    51,    51,    51,    51,    51,    51,    51,    52,    53,
++      54,    55,    55,    56,    56,    56,    56,    56,    57,    57,
++      57,    57,    57,    58,    58,    59,    60,    61,    61,    62,
++      62,    62,    62,    63,    64,    65,    66,    66,    67,    67,
++      67,    67,    67,    68,    69,    70,    71,    72,    73,    74,
++      74,    74,    75,    75,    75,    76,    76,    77,    77,    78,
++      78,    78,    79,    79,    80,    80,    81,    81,    81,    81,
++      81,    81,    81,    82,    82
++};
++
++/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
++static const unsigned char yyr2[] =
++{
++       0,     2,     0,     2,     1,     1,     1,     3,     1,     1,
++       1,     2,     1,     1,     1,     1,     1,     1,     3,     2,
++       3,     2,     0,     2,     2,     2,     2,     3,     4,     3,
++       4,     3,     3,     3,     4,     4,     4,     5,     2,     2,
++       1,     3,     2,     0,     2,     2,     2,     2,     4,     3,
++       3,     2,     4,     0,     2,     3,     1,     3,     2,     0,
++       2,     2,     2,     3,     2,     1,     3,     2,     0,     2,
++       2,     2,     3,     3,     1,     3,     2,     2,     2,     0,
++       2,     2,     4,     3,     3,     0,     2,     1,     1,     2,
++       2,     2,     1,     1,     0,     2,     1,     3,     3,     3,
++       2,     3,     3,     1,     1
++};
++
++/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
++   STATE-NUM when YYTABLE doesn't specify something else to do.  Zero
++   means the default is an error.  */
++static const unsigned char yydefact[] =
++{
++       2,     0,     1,     0,     0,     0,     8,     0,     0,    10,
++       0,     0,     0,     0,     9,    93,    92,     3,     4,    22,
++      14,    22,    15,    43,    53,     5,    59,    12,    79,    68,
++       6,    74,    16,    79,    13,    17,    11,    87,    88,     0,
++       0,     0,    38,     0,     0,     0,   103,   104,     0,     0,
++       0,    96,    19,    21,    39,    42,    58,    64,     0,    76,
++       7,    63,    73,    75,    18,    20,     0,   100,    55,     0,
++       0,     0,     0,     0,     0,     0,     0,     0,    85,     0,
++      85,     0,    85,    85,    85,    26,     0,     0,    23,     0,
++      25,    24,     0,     0,     0,    85,    85,    47,    44,    46,
++      45,     0,     0,     0,    54,    41,    40,    60,    62,    57,
++      61,    56,    81,    80,     0,    69,    71,    66,    70,    65,
++      99,   101,   102,    98,    97,    77,     0,     0,     0,    94,
++      94,     0,    94,    94,     0,    94,     0,     0,     0,    94,
++       0,    78,    51,    94,    94,     0,     0,    89,    90,    91,
++      72,     0,    83,    84,     0,     0,     0,    27,    86,     0,
++      29,     0,    33,    31,    32,     0,    94,     0,     0,    49,
++      50,    82,    95,    34,    35,    28,    30,    36,     0,    48,
++      52,    37
++};
++
++/* YYDEFGOTO[NTERM-NUM]. */
++static const short yydefgoto[] =
++{
++      -1,     1,    17,    18,    19,    20,    21,    22,    52,    88,
++      23,    24,   105,    25,    54,    98,    55,    26,   109,    27,
++      56,    28,    29,   117,    30,    58,    31,    32,    33,    34,
++      89,    90,    57,    91,   131,   132,   106,    35,   155,    50,
++      51
++};
++
++/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
++   STATE-NUM.  */
++#define YYPACT_NINF -99
++static const short yypact[] =
++{
++     -99,    48,   -99,    38,    46,    46,   -99,    46,   -29,   -99,
++      46,   -17,    -3,   -11,   -99,   -99,   -99,   -99,   -99,   -99,
++     -99,   -99,   -99,   -99,   -99,   -99,   -99,   -99,   -99,   -99,
++     -99,   -99,   -99,   -99,   -99,   -99,   -99,   -99,   -99,    38,
++      12,    15,   -99,    18,    51,    62,   -99,   -99,   -11,   -11,
++       4,   -24,   138,   138,   160,   121,   110,    -4,    81,    -4,
++     -99,   -99,   -99,   -99,   -99,   -99,   -19,   -99,   -99,   -11,
++     -11,    70,    70,    73,    32,   -11,    46,   -11,    46,   -11,
++      46,   -11,    46,    46,    46,   -99,    36,    70,   -99,    95,
++     -99,   -99,    96,    46,   106,    46,    46,   -99,   -99,   -99,
++     -99,    38,    38,    38,   -99,   -99,   -99,   -99,   -99,   -99,
++     -99,   -99,   -99,   -99,   112,   -99,   -99,   -99,   -99,   -99,
++     -99,   117,   -99,   -99,   -99,   -99,   -11,    33,    65,   131,
++       1,   119,   131,     1,   136,     1,   153,   154,   155,   131,
++      70,   -99,   -99,   131,   131,   156,   157,   -99,   -99,   -99,
++     -99,   101,   -99,   -99,   -11,   158,   159,   -99,   -99,   161,
++     -99,   162,   -99,   -99,   -99,   163,   131,   164,   165,   -99,
++     -99,   -99,    99,   -99,   -99,   -99,   -99,   -99,   166,   -99,
++     -99,   -99
++};
++
++/* YYPGOTO[NTERM-NUM].  */
++static const short yypgoto[] =
++{
++     -99,   -99,   -99,   111,   -99,   -99,   -99,   -99,   178,   -99,
++     -99,   -99,   -99,    91,   -99,   -99,   -99,   -99,   -99,   -99,
++     -99,   -99,   -99,   -99,   115,   -99,   -99,   -99,   -99,   -99,
++     -99,   146,   168,    89,    27,     0,   126,    -1,   -98,   -48,
++     -63
++};
++
++/* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
++   positive, shift that token.  If negative, reduce the rule which
++   number is the opposite.  If zero, do what YYDEFACT says.
++   If YYTABLE_NINF, syntax error.  */
++#define YYTABLE_NINF -68
++static const short yytable[] =
++{
++      66,    67,    36,    42,    39,    40,    71,    41,   123,   124,
++      43,    44,    74,    75,   120,   154,    72,    46,    47,    69,
++      70,   121,   122,    48,   140,    45,   127,   128,   112,   130,
++      49,   133,   156,   135,   158,   159,    68,   161,    60,    69,
++      70,   165,    69,    70,    61,   167,   168,    62,     2,     3,
++      63,     4,     5,     6,     7,     8,     9,    10,    11,    12,
++      46,    47,    13,    14,   139,   152,    48,   126,   178,    15,
++      16,    69,    70,    49,    37,    38,   129,   166,   151,    15,
++      16,   -67,   114,    64,   -67,     5,   101,     7,     8,   102,
++      10,    11,    12,   143,    65,    13,   103,   153,    46,    47,
++     147,   148,   149,    69,    70,   125,   172,   134,   141,   136,
++     137,   138,    15,    16,     5,   101,     7,     8,   102,    10,
++      11,    12,   145,   146,    13,   103,   101,     7,   142,   102,
++      10,    11,    12,   171,   144,    13,   103,    69,    70,    69,
++      70,    15,    16,   100,   150,   154,   113,   108,   113,   116,
++      73,   157,    15,    16,    74,    75,    70,    76,    77,    78,
++      79,    80,    81,    82,    83,    84,   104,   107,   160,   115,
++      85,   110,    73,   118,    86,    87,    74,    75,    92,    93,
++      94,    95,   111,    96,   119,   162,   163,   164,   169,   170,
++     173,   174,    97,   175,   176,   177,   179,   180,   181,    53,
++      99,    59
++};
++
++static const unsigned char yycheck[] =
++{
++      48,    49,     3,    32,     4,     5,    30,     7,    71,    72,
++      10,    28,    16,    17,    33,    14,    40,    28,    29,    38,
++      39,    69,    70,    34,    87,    28,    74,    75,    32,    77,
++      41,    79,   130,    81,   132,   133,    32,   135,    39,    38,
++      39,   139,    38,    39,    32,   143,   144,    32,     0,     1,
++      32,     3,     4,     5,     6,     7,     8,     9,    10,    11,
++      28,    29,    14,    15,    28,    32,    34,    35,   166,    31,
++      32,    38,    39,    41,    28,    29,    76,   140,   126,    31,
++      32,     0,     1,    32,     3,     4,     5,     6,     7,     8,
++       9,    10,    11,    93,    32,    14,    15,    32,    28,    29,
++     101,   102,   103,    38,    39,    32,   154,    80,    13,    82,
++      83,    84,    31,    32,     4,     5,     6,     7,     8,     9,
++      10,    11,    95,    96,    14,    15,     5,     6,    32,     8,
++       9,    10,    11,    32,    28,    14,    15,    38,    39,    38,
++      39,    31,    32,    54,    32,    14,    57,    56,    59,    58,
++      12,    32,    31,    32,    16,    17,    39,    19,    20,    21,
++      22,    23,    24,    25,    26,    27,    55,    56,    32,    58,
++      32,    56,    12,    58,    36,    37,    16,    17,    18,    19,
++      20,    21,    56,    23,    58,    32,    32,    32,    32,    32,
++      32,    32,    32,    32,    32,    32,    32,    32,    32,    21,
++      54,    33
++};
++
++/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
++   symbol of state STATE-NUM.  */
++static const unsigned char yystos[] =
++{
++       0,    43,     0,     1,     3,     4,     5,     6,     7,     8,
++       9,    10,    11,    14,    15,    31,    32,    44,    45,    46,
++      47,    48,    49,    52,    53,    55,    59,    61,    63,    64,
++      66,    68,    69,    70,    71,    79,    79,    28,    29,    77,
++      77,    77,    32,    77,    28,    28,    28,    29,    34,    41,
++      81,    82,    50,    50,    56,    58,    62,    74,    67,    74,
++      79,    32,    32,    32,    32,    32,    81,    81,    32,    38,
++      39,    30,    40,    12,    16,    17,    19,    20,    21,    22,
++      23,    24,    25,    26,    27,    32,    36,    37,    51,    72,
++      73,    75,    18,    19,    20,    21,    23,    32,    57,    73,
++      75,     5,     8,    15,    45,    54,    78,    45,    55,    60,
++      66,    78,    32,    75,     1,    45,    55,    65,    66,    78,
++      33,    81,    81,    82,    82,    32,    35,    81,    81,    77,
++      81,    76,    77,    81,    76,    81,    76,    76,    76,    28,
++      82,    13,    32,    77,    28,    76,    76,    79,    79,    79,
++      32,    81,    32,    32,    14,    80,    80,    32,    80,    80,
++      32,    80,    32,    32,    32,    80,    82,    80,    80,    32,
++      32,    32,    81,    32,    32,    32,    32,    32,    80,    32,
++      32,    32
++};
++
++#if ! defined (YYSIZE_T) && defined (__SIZE_TYPE__)
++# define YYSIZE_T __SIZE_TYPE__
++#endif
++#if ! defined (YYSIZE_T) && defined (size_t)
++# define YYSIZE_T size_t
++#endif
++#if ! defined (YYSIZE_T)
++# if defined (__STDC__) || defined (__cplusplus)
++#  include <stddef.h> /* INFRINGES ON USER NAME SPACE */
++#  define YYSIZE_T size_t
++# endif
++#endif
++#if ! defined (YYSIZE_T)
++# define YYSIZE_T unsigned int
++#endif
++
++#define yyerrok		(yyerrstatus = 0)
++#define yyclearin	(yychar = YYEMPTY)
++#define YYEMPTY		(-2)
++#define YYEOF		0
++
++#define YYACCEPT	goto yyacceptlab
++#define YYABORT		goto yyabortlab
++#define YYERROR		goto yyerrlab1
++
++
++/* Like YYERROR except do call yyerror.  This remains here temporarily
++   to ease the transition to the new meaning of YYERROR, for GCC.
++   Once GCC version 2 has supplanted version 1, this can go.  */
++
++#define YYFAIL		goto yyerrlab
++
++#define YYRECOVERING()  (!!yyerrstatus)
++
++#define YYBACKUP(Token, Value)					\
++do								\
++  if (yychar == YYEMPTY && yylen == 1)				\
++    {								\
++      yychar = (Token);						\
++      yylval = (Value);						\
++      yytoken = YYTRANSLATE (yychar);				\
++      YYPOPSTACK;						\
++      goto yybackup;						\
++    }								\
++  else								\
++    { 								\
++      yyerror ("syntax error: cannot back up");\
++      YYERROR;							\
++    }								\
++while (0)
++
++#define YYTERROR	1
++#define YYERRCODE	256
++
++/* YYLLOC_DEFAULT -- Compute the default location (before the actions
++   are run).  */
++
++#ifndef YYLLOC_DEFAULT
++# define YYLLOC_DEFAULT(Current, Rhs, N)         \
++  Current.first_line   = Rhs[1].first_line;      \
++  Current.first_column = Rhs[1].first_column;    \
++  Current.last_line    = Rhs[N].last_line;       \
++  Current.last_column  = Rhs[N].last_column;
++#endif
++
++/* YYLEX -- calling `yylex' with the right arguments.  */
++
++#ifdef YYLEX_PARAM
++# define YYLEX yylex (YYLEX_PARAM)
++#else
++# define YYLEX yylex ()
++#endif
++
++/* Enable debugging if requested.  */
++#if YYDEBUG
++
++# ifndef YYFPRINTF
++#  include <stdio.h> /* INFRINGES ON USER NAME SPACE */
++#  define YYFPRINTF fprintf
++# endif
++
++# define YYDPRINTF(Args)			\
++do {						\
++  if (yydebug)					\
++    YYFPRINTF Args;				\
++} while (0)
++
++# define YYDSYMPRINT(Args)			\
++do {						\
++  if (yydebug)					\
++    yysymprint Args;				\
++} while (0)
++
++# define YYDSYMPRINTF(Title, Token, Value, Location)		\
++do {								\
++  if (yydebug)							\
++    {								\
++      YYFPRINTF (stderr, "%s ", Title);				\
++      yysymprint (stderr, 					\
++                  Token, Value);	\
++      YYFPRINTF (stderr, "\n");					\
++    }								\
++} while (0)
++
++/*------------------------------------------------------------------.
++| yy_stack_print -- Print the state stack from its BOTTOM up to its |
++| TOP (cinluded).                                                   |
++`------------------------------------------------------------------*/
++
++#if defined (__STDC__) || defined (__cplusplus)
++static void
++yy_stack_print (short *bottom, short *top)
++#else
++static void
++yy_stack_print (bottom, top)
++    short *bottom;
++    short *top;
++#endif
++{
++  YYFPRINTF (stderr, "Stack now");
++  for (/* Nothing. */; bottom <= top; ++bottom)
++    YYFPRINTF (stderr, " %d", *bottom);
++  YYFPRINTF (stderr, "\n");
++}
++
++# define YY_STACK_PRINT(Bottom, Top)				\
++do {								\
++  if (yydebug)							\
++    yy_stack_print ((Bottom), (Top));				\
++} while (0)
++
++
++/*------------------------------------------------.
++| Report that the YYRULE is going to be reduced.  |
++`------------------------------------------------*/
++
++#if defined (__STDC__) || defined (__cplusplus)
++static void
++yy_reduce_print (int yyrule)
++#else
++static void
++yy_reduce_print (yyrule)
++    int yyrule;
++#endif
++{
++  int yyi;
++  unsigned int yylineno = yyrline[yyrule];
++  YYFPRINTF (stderr, "Reducing stack by rule %d (line %u), ",
++             yyrule - 1, yylineno);
++  /* Print the symbols being reduced, and their result.  */
++  for (yyi = yyprhs[yyrule]; 0 <= yyrhs[yyi]; yyi++)
++    YYFPRINTF (stderr, "%s ", yytname [yyrhs[yyi]]);
++  YYFPRINTF (stderr, "-> %s\n", yytname [yyr1[yyrule]]);
++}
++
++# define YY_REDUCE_PRINT(Rule)		\
++do {					\
++  if (yydebug)				\
++    yy_reduce_print (Rule);		\
++} while (0)
++
++/* Nonzero means print parse trace.  It is left uninitialized so that
++   multiple parsers can coexist.  */
++int yydebug;
++#else /* !YYDEBUG */
++# define YYDPRINTF(Args)
++# define YYDSYMPRINT(Args)
++# define YYDSYMPRINTF(Title, Token, Value, Location)
++# define YY_STACK_PRINT(Bottom, Top)
++# define YY_REDUCE_PRINT(Rule)
++#endif /* !YYDEBUG */
++
++
++/* YYINITDEPTH -- initial size of the parser's stacks.  */
++#ifndef	YYINITDEPTH
++# define YYINITDEPTH 200
++#endif
++
++/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
++   if the built-in stack extension method is used).
++
++   Do not make this value too large; the results are undefined if
++   SIZE_MAX < YYSTACK_BYTES (YYMAXDEPTH)
++   evaluated with infinite-precision integer arithmetic.  */
++
++#if YYMAXDEPTH == 0
++# undef YYMAXDEPTH
++#endif
++
++#ifndef YYMAXDEPTH
++# define YYMAXDEPTH 10000
++#endif
++
++
++
++#if YYERROR_VERBOSE
++
++# ifndef yystrlen
++#  if defined (__GLIBC__) && defined (_STRING_H)
++#   define yystrlen strlen
++#  else
++/* Return the length of YYSTR.  */
++static YYSIZE_T
++#   if defined (__STDC__) || defined (__cplusplus)
++yystrlen (const char *yystr)
++#   else
++yystrlen (yystr)
++     const char *yystr;
++#   endif
++{
++  register const char *yys = yystr;
++
++  while (*yys++ != '\0')
++    continue;
++
++  return yys - yystr - 1;
++}
++#  endif
++# endif
++
++# ifndef yystpcpy
++#  if defined (__GLIBC__) && defined (_STRING_H) && defined (_GNU_SOURCE)
++#   define yystpcpy stpcpy
++#  else
++/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
++   YYDEST.  */
++static char *
++#   if defined (__STDC__) || defined (__cplusplus)
++yystpcpy (char *yydest, const char *yysrc)
++#   else
++yystpcpy (yydest, yysrc)
++     char *yydest;
++     const char *yysrc;
++#   endif
++{
++  register char *yyd = yydest;
++  register const char *yys = yysrc;
++
++  while ((*yyd++ = *yys++) != '\0')
++    continue;
++
++  return yyd - 1;
++}
++#  endif
++# endif
++
++#endif /* !YYERROR_VERBOSE */
++
++
++
++#if YYDEBUG
++/*--------------------------------.
++| Print this symbol on YYOUTPUT.  |
++`--------------------------------*/
++
++#if defined (__STDC__) || defined (__cplusplus)
++static void
++yysymprint (FILE *yyoutput, int yytype, YYSTYPE *yyvaluep)
++#else
++static void
++yysymprint (yyoutput, yytype, yyvaluep)
++    FILE *yyoutput;
++    int yytype;
++    YYSTYPE *yyvaluep;
++#endif
++{
++  /* Pacify ``unused variable'' warnings.  */
++  (void) yyvaluep;
++
++  if (yytype < YYNTOKENS)
++    {
++      YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
++# ifdef YYPRINT
++      YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
++# endif
++    }
++  else
++    YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
++
++  switch (yytype)
++    {
++      default:
++        break;
++    }
++  YYFPRINTF (yyoutput, ")");
++}
++
++#endif /* ! YYDEBUG */
++/*-----------------------------------------------.
++| Release the memory associated to this symbol.  |
++`-----------------------------------------------*/
++
++#if defined (__STDC__) || defined (__cplusplus)
++static void
++yydestruct (int yytype, YYSTYPE *yyvaluep)
++#else
++static void
++yydestruct (yytype, yyvaluep)
++    int yytype;
++    YYSTYPE *yyvaluep;
++#endif
++{
++  /* Pacify ``unused variable'' warnings.  */
++  (void) yyvaluep;
++
++  switch (yytype)
++    {
++
++      default:
++        break;
++    }
++}
++
++
++/* Prevent warnings from -Wmissing-prototypes.  */
++
++#ifdef YYPARSE_PARAM
++# if defined (__STDC__) || defined (__cplusplus)
++int yyparse (void *YYPARSE_PARAM);
++# else
++int yyparse ();
++# endif
++#else /* ! YYPARSE_PARAM */
++#if defined (__STDC__) || defined (__cplusplus)
++int yyparse (void);
++#else
++int yyparse ();
++#endif
++#endif /* ! YYPARSE_PARAM */
++
++
++
++/* The lookahead symbol.  */
++int yychar;
++
++/* The semantic value of the lookahead symbol.  */
++YYSTYPE yylval;
++
++/* Number of syntax errors so far.  */
++int yynerrs;
++
++
++
++/*----------.
++| yyparse.  |
++`----------*/
++
++#ifdef YYPARSE_PARAM
++# if defined (__STDC__) || defined (__cplusplus)
++int yyparse (void *YYPARSE_PARAM)
++# else
++int yyparse (YYPARSE_PARAM)
++  void *YYPARSE_PARAM;
++# endif
++#else /* ! YYPARSE_PARAM */
++#if defined (__STDC__) || defined (__cplusplus)
++int
++yyparse (void)
++#else
++int
++yyparse ()
++
++#endif
++#endif
++{
++  
++  register int yystate;
++  register int yyn;
++  int yyresult;
++  /* Number of tokens to shift before error messages enabled.  */
++  int yyerrstatus;
++  /* Lookahead token as an internal (translated) token number.  */
++  int yytoken = 0;
++
++  /* Three stacks and their tools:
++     `yyss': related to states,
++     `yyvs': related to semantic values,
++     `yyls': related to locations.
++
++     Refer to the stacks thru separate pointers, to allow yyoverflow
++     to reallocate them elsewhere.  */
++
++  /* The state stack.  */
++  short	yyssa[YYINITDEPTH];
++  short *yyss = yyssa;
++  register short *yyssp;
++
++  /* The semantic value stack.  */
++  YYSTYPE yyvsa[YYINITDEPTH];
++  YYSTYPE *yyvs = yyvsa;
++  register YYSTYPE *yyvsp;
++
++
++
++#define YYPOPSTACK   (yyvsp--, yyssp--)
++
++  YYSIZE_T yystacksize = YYINITDEPTH;
++
++  /* The variables used to return semantic value and location from the
++     action routines.  */
++  YYSTYPE yyval;
++
++
++  /* When reducing, the number of symbols on the RHS of the reduced
++     rule.  */
++  int yylen;
++
++  YYDPRINTF ((stderr, "Starting parse\n"));
++
++  yystate = 0;
++  yyerrstatus = 0;
++  yynerrs = 0;
++  yychar = YYEMPTY;		/* Cause a token to be read.  */
++
++  /* Initialize stack pointers.
++     Waste one element of value and location stack
++     so that they stay on the same level as the state stack.
++     The wasted elements are never initialized.  */
++
++  yyssp = yyss;
++  yyvsp = yyvs;
++
++  goto yysetstate;
++
++/*------------------------------------------------------------.
++| yynewstate -- Push a new state, which is found in yystate.  |
++`------------------------------------------------------------*/
++ yynewstate:
++  /* In all cases, when you get here, the value and location stacks
++     have just been pushed. so pushing a state here evens the stacks.
++     */
++  yyssp++;
++
++ yysetstate:
++  *yyssp = yystate;
++
++  if (yyss + yystacksize - 1 <= yyssp)
++    {
++      /* Get the current used size of the three stacks, in elements.  */
++      YYSIZE_T yysize = yyssp - yyss + 1;
++
++#ifdef yyoverflow
++      {
++	/* Give user a chance to reallocate the stack. Use copies of
++	   these so that the &'s don't force the real ones into
++	   memory.  */
++	YYSTYPE *yyvs1 = yyvs;
++	short *yyss1 = yyss;
++
++
++	/* Each stack pointer address is followed by the size of the
++	   data in use in that stack, in bytes.  This used to be a
++	   conditional around just the two extra args, but that might
++	   be undefined if yyoverflow is a macro.  */
++	yyoverflow ("parser stack overflow",
++		    &yyss1, yysize * sizeof (*yyssp),
++		    &yyvs1, yysize * sizeof (*yyvsp),
++
++		    &yystacksize);
++
++	yyss = yyss1;
++	yyvs = yyvs1;
++      }
++#else /* no yyoverflow */
++# ifndef YYSTACK_RELOCATE
++      goto yyoverflowlab;
++# else
++      /* Extend the stack our own way.  */
++      if (YYMAXDEPTH <= yystacksize)
++	goto yyoverflowlab;
++      yystacksize *= 2;
++      if (YYMAXDEPTH < yystacksize)
++	yystacksize = YYMAXDEPTH;
++
++      {
++	short *yyss1 = yyss;
++	union yyalloc *yyptr =
++	  (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
++	if (! yyptr)
++	  goto yyoverflowlab;
++	YYSTACK_RELOCATE (yyss);
++	YYSTACK_RELOCATE (yyvs);
++
++#  undef YYSTACK_RELOCATE
++	if (yyss1 != yyssa)
++	  YYSTACK_FREE (yyss1);
++      }
++# endif
++#endif /* no yyoverflow */
++
++      yyssp = yyss + yysize - 1;
++      yyvsp = yyvs + yysize - 1;
++
++
++      YYDPRINTF ((stderr, "Stack size increased to %lu\n",
++		  (unsigned long int) yystacksize));
++
++      if (yyss + yystacksize - 1 <= yyssp)
++	YYABORT;
++    }
++
++  YYDPRINTF ((stderr, "Entering state %d\n", yystate));
++
++  goto yybackup;
++
++/*-----------.
++| yybackup.  |
++`-----------*/
++yybackup:
++
++/* Do appropriate processing given the current state.  */
++/* Read a lookahead token if we need one and don't already have one.  */
++/* yyresume: */
++
++  /* First try to decide what to do without reference to lookahead token.  */
++
++  yyn = yypact[yystate];
++  if (yyn == YYPACT_NINF)
++    goto yydefault;
++
++  /* Not known => get a lookahead token if don't already have one.  */
++
++  /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol.  */
++  if (yychar == YYEMPTY)
++    {
++      YYDPRINTF ((stderr, "Reading a token: "));
++      yychar = YYLEX;
++    }
++
++  if (yychar <= YYEOF)
++    {
++      yychar = yytoken = YYEOF;
++      YYDPRINTF ((stderr, "Now at end of input.\n"));
++    }
++  else
++    {
++      yytoken = YYTRANSLATE (yychar);
++      YYDSYMPRINTF ("Next token is", yytoken, &yylval, &yylloc);
++    }
++
++  /* If the proper action on seeing token YYTOKEN is to reduce or to
++     detect an error, take that action.  */
++  yyn += yytoken;
++  if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
++    goto yydefault;
++  yyn = yytable[yyn];
++  if (yyn <= 0)
++    {
++      if (yyn == 0 || yyn == YYTABLE_NINF)
++	goto yyerrlab;
++      yyn = -yyn;
++      goto yyreduce;
++    }
++
++  if (yyn == YYFINAL)
++    YYACCEPT;
++
++  /* Shift the lookahead token.  */
++  YYDPRINTF ((stderr, "Shifting token %s, ", yytname[yytoken]));
++
++  /* Discard the token being shifted unless it is eof.  */
++  if (yychar != YYEOF)
++    yychar = YYEMPTY;
++
++  *++yyvsp = yylval;
++
++
++  /* Count tokens shifted since error; after three, turn off error
++     status.  */
++  if (yyerrstatus)
++    yyerrstatus--;
++
++  yystate = yyn;
++  goto yynewstate;
++
++
++/*-----------------------------------------------------------.
++| yydefault -- do the default action for the current state.  |
++`-----------------------------------------------------------*/
++yydefault:
++  yyn = yydefact[yystate];
++  if (yyn == 0)
++    goto yyerrlab;
++  goto yyreduce;
++
++
++/*-----------------------------.
++| yyreduce -- Do a reduction.  |
++`-----------------------------*/
++yyreduce:
++  /* yyn is the number of a rule to reduce with.  */
++  yylen = yyr2[yyn];
++
++  /* If YYLEN is nonzero, implement the default value of the action:
++     `$$ = $1'.
++
++     Otherwise, the following line sets YYVAL to garbage.
++     This behavior is undocumented and Bison
++     users should not rely upon it.  Assigning to YYVAL
++     unconditionally makes the parser a bit smaller, and it avoids a
++     GCC warning that YYVAL may be used uninitialized.  */
++  yyval = yyvsp[1-yylen];
++
++
++  YY_REDUCE_PRINT (yyn);
++  switch (yyn)
++    {
++        case 8:
++
++    { zconfprint("unexpected 'endmenu' statement"); ;}
++    break;
++
++  case 9:
++
++    { zconfprint("unexpected 'endif' statement"); ;}
++    break;
++
++  case 10:
++
++    { zconfprint("unexpected 'endchoice' statement"); ;}
++    break;
++
++  case 11:
++
++    { zconfprint("syntax error"); yyerrok; ;}
++    break;
++
++  case 18:
++
++    {
++	struct symbol *sym = sym_lookup(yyvsp[-1].string, 0);
++	sym->flags |= SYMBOL_OPTIONAL;
++	menu_add_entry(sym);
++	printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), yyvsp[-1].string);
++;}
++    break;
++
++  case 19:
++
++    {
++	menu_end_entry();
++	printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 20:
++
++    {
++	struct symbol *sym = sym_lookup(yyvsp[-1].string, 0);
++	sym->flags |= SYMBOL_OPTIONAL;
++	menu_add_entry(sym);
++	printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), yyvsp[-1].string);
++;}
++    break;
++
++  case 21:
++
++    {
++	if (current_entry->prompt)
++		current_entry->prompt->type = P_MENU;
++	else
++		zconfprint("warning: menuconfig statement without prompt");
++	menu_end_entry();
++	printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 27:
++
++    {
++	menu_set_type(S_TRISTATE);
++	printd(DEBUG_PARSE, "%s:%d:tristate\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 28:
++
++    {
++	menu_add_expr(P_DEFAULT, yyvsp[-2].expr, yyvsp[-1].expr);
++	menu_set_type(S_TRISTATE);
++	printd(DEBUG_PARSE, "%s:%d:def_boolean\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 29:
++
++    {
++	menu_set_type(S_BOOLEAN);
++	printd(DEBUG_PARSE, "%s:%d:boolean\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 30:
++
++    {
++	menu_add_expr(P_DEFAULT, yyvsp[-2].expr, yyvsp[-1].expr);
++	menu_set_type(S_BOOLEAN);
++	printd(DEBUG_PARSE, "%s:%d:def_boolean\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 31:
++
++    {
++	menu_set_type(S_INT);
++	printd(DEBUG_PARSE, "%s:%d:int\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 32:
++
++    {
++	menu_set_type(S_HEX);
++	printd(DEBUG_PARSE, "%s:%d:hex\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 33:
++
++    {
++	menu_set_type(S_STRING);
++	printd(DEBUG_PARSE, "%s:%d:string\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 34:
++
++    {
++	menu_add_prompt(P_PROMPT, yyvsp[-2].string, yyvsp[-1].expr);
++	printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 35:
++
++    {
++	menu_add_expr(P_DEFAULT, yyvsp[-2].expr, yyvsp[-1].expr);
++	printd(DEBUG_PARSE, "%s:%d:default\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 36:
++
++    {
++	menu_add_symbol(P_SELECT, sym_lookup(yyvsp[-2].string, 0), yyvsp[-1].expr);
++	printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 37:
++
++    {
++	menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,yyvsp[-3].symbol, yyvsp[-2].symbol), yyvsp[-1].expr);
++	printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 38:
++
++    {
++	struct symbol *sym = sym_lookup(NULL, 0);
++	sym->flags |= SYMBOL_CHOICE;
++	menu_add_entry(sym);
++	menu_add_expr(P_CHOICE, NULL, NULL);
++	printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 39:
++
++    {
++	menu_end_entry();
++	menu_add_menu();
++;}
++    break;
++
++  case 40:
++
++    {
++	if (zconf_endtoken(yyvsp[0].token, T_CHOICE, T_ENDCHOICE)) {
++		menu_end_menu();
++		printd(DEBUG_PARSE, "%s:%d:endchoice\n", zconf_curname(), zconf_lineno());
++	}
++;}
++    break;
++
++  case 42:
++
++    {
++	printf("%s:%d: missing 'endchoice' for this 'choice' statement\n", current_menu->file->name, current_menu->lineno);
++	zconfnerrs++;
++;}
++    break;
++
++  case 48:
++
++    {
++	menu_add_prompt(P_PROMPT, yyvsp[-2].string, yyvsp[-1].expr);
++	printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 49:
++
++    {
++	menu_set_type(S_TRISTATE);
++	printd(DEBUG_PARSE, "%s:%d:tristate\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 50:
++
++    {
++	menu_set_type(S_BOOLEAN);
++	printd(DEBUG_PARSE, "%s:%d:boolean\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 51:
++
++    {
++	current_entry->sym->flags |= SYMBOL_OPTIONAL;
++	printd(DEBUG_PARSE, "%s:%d:optional\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 52:
++
++    {
++	menu_add_symbol(P_DEFAULT, sym_lookup(yyvsp[-2].string, 0), yyvsp[-1].expr);
++	printd(DEBUG_PARSE, "%s:%d:default\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 55:
++
++    {
++	printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
++	menu_add_entry(NULL);
++	menu_add_dep(yyvsp[-1].expr);
++	menu_end_entry();
++	menu_add_menu();
++;}
++    break;
++
++  case 56:
++
++    {
++	if (zconf_endtoken(yyvsp[0].token, T_IF, T_ENDIF)) {
++		menu_end_menu();
++		printd(DEBUG_PARSE, "%s:%d:endif\n", zconf_curname(), zconf_lineno());
++	}
++;}
++    break;
++
++  case 58:
++
++    {
++	printf("%s:%d: missing 'endif' for this 'if' statement\n", current_menu->file->name, current_menu->lineno);
++	zconfnerrs++;
++;}
++    break;
++
++  case 63:
++
++    {
++	menu_add_entry(NULL);
++	menu_add_prop(P_MENU, yyvsp[-1].string, NULL, NULL);
++	printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 64:
++
++    {
++	menu_end_entry();
++	menu_add_menu();
++;}
++    break;
++
++  case 65:
++
++    {
++	if (zconf_endtoken(yyvsp[0].token, T_MENU, T_ENDMENU)) {
++		menu_end_menu();
++		printd(DEBUG_PARSE, "%s:%d:endmenu\n", zconf_curname(), zconf_lineno());
++	}
++;}
++    break;
++
++  case 67:
++
++    {
++	printf("%s:%d: missing 'endmenu' for this 'menu' statement\n", current_menu->file->name, current_menu->lineno);
++	zconfnerrs++;
++;}
++    break;
++
++  case 72:
++
++    { zconfprint("invalid menu option"); yyerrok; ;}
++    break;
++
++  case 73:
++
++    {
++	yyval.string = yyvsp[-1].string;
++	printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), yyvsp[-1].string);
++;}
++    break;
++
++  case 74:
++
++    {
++	zconf_nextfile(yyvsp[0].string);
++;}
++    break;
++
++  case 75:
++
++    {
++	menu_add_entry(NULL);
++	menu_add_prop(P_COMMENT, yyvsp[-1].string, NULL, NULL);
++	printd(DEBUG_PARSE, "%s:%d:comment\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 76:
++
++    {
++	menu_end_entry();
++;}
++    break;
++
++  case 77:
++
++    {
++	printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
++	zconf_starthelp();
++;}
++    break;
++
++  case 78:
++
++    {
++	current_entry->sym->help = yyvsp[0].string;
++;}
++    break;
++
++  case 82:
++
++    {
++	menu_add_dep(yyvsp[-1].expr);
++	printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 83:
++
++    {
++	menu_add_dep(yyvsp[-1].expr);
++	printd(DEBUG_PARSE, "%s:%d:depends\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 84:
++
++    {
++	menu_add_dep(yyvsp[-1].expr);
++	printd(DEBUG_PARSE, "%s:%d:requires\n", zconf_curname(), zconf_lineno());
++;}
++    break;
++
++  case 86:
++
++    {
++	menu_add_prop(P_PROMPT, yyvsp[-1].string, NULL, yyvsp[0].expr);
++;}
++    break;
++
++  case 89:
++
++    { yyval.token = T_ENDMENU; ;}
++    break;
++
++  case 90:
++
++    { yyval.token = T_ENDCHOICE; ;}
++    break;
++
++  case 91:
++
++    { yyval.token = T_ENDIF; ;}
++    break;
++
++  case 94:
++
++    { yyval.expr = NULL; ;}
++    break;
++
++  case 95:
++
++    { yyval.expr = yyvsp[0].expr; ;}
++    break;
++
++  case 96:
++
++    { yyval.expr = expr_alloc_symbol(yyvsp[0].symbol); ;}
++    break;
++
++  case 97:
++
++    { yyval.expr = expr_alloc_comp(E_EQUAL, yyvsp[-2].symbol, yyvsp[0].symbol); ;}
++    break;
++
++  case 98:
++
++    { yyval.expr = expr_alloc_comp(E_UNEQUAL, yyvsp[-2].symbol, yyvsp[0].symbol); ;}
++    break;
++
++  case 99:
++
++    { yyval.expr = yyvsp[-1].expr; ;}
++    break;
++
++  case 100:
++
++    { yyval.expr = expr_alloc_one(E_NOT, yyvsp[0].expr); ;}
++    break;
++
++  case 101:
++
++    { yyval.expr = expr_alloc_two(E_OR, yyvsp[-2].expr, yyvsp[0].expr); ;}
++    break;
++
++  case 102:
++
++    { yyval.expr = expr_alloc_two(E_AND, yyvsp[-2].expr, yyvsp[0].expr); ;}
++    break;
++
++  case 103:
++
++    { yyval.symbol = sym_lookup(yyvsp[0].string, 0); free(yyvsp[0].string); ;}
++    break;
++
++  case 104:
++
++    { yyval.symbol = sym_lookup(yyvsp[0].string, 1); free(yyvsp[0].string); ;}
++    break;
++
++
++    }
++
++/* Line 999 of yacc.c.  */
++
++
++  yyvsp -= yylen;
++  yyssp -= yylen;
++
++
++  YY_STACK_PRINT (yyss, yyssp);
++
++  *++yyvsp = yyval;
++
++
++  /* Now `shift' the result of the reduction.  Determine what state
++     that goes to, based on the state we popped back to and the rule
++     number reduced by.  */
++
++  yyn = yyr1[yyn];
++
++  yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
++  if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
++    yystate = yytable[yystate];
++  else
++    yystate = yydefgoto[yyn - YYNTOKENS];
++
++  goto yynewstate;
++
++
++/*------------------------------------.
++| yyerrlab -- here on detecting error |
++`------------------------------------*/
++yyerrlab:
++  /* If not already recovering from an error, report this error.  */
++  if (!yyerrstatus)
++    {
++      ++yynerrs;
++#if YYERROR_VERBOSE
++      yyn = yypact[yystate];
++
++      if (YYPACT_NINF < yyn && yyn < YYLAST)
++	{
++	  YYSIZE_T yysize = 0;
++	  int yytype = YYTRANSLATE (yychar);
++	  char *yymsg;
++	  int yyx, yycount;
++
++	  yycount = 0;
++	  /* Start YYX at -YYN if negative to avoid negative indexes in
++	     YYCHECK.  */
++	  for (yyx = yyn < 0 ? -yyn : 0;
++	       yyx < (int) (sizeof (yytname) / sizeof (char *)); yyx++)
++	    if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
++	      yysize += yystrlen (yytname[yyx]) + 15, yycount++;
++	  yysize += yystrlen ("syntax error, unexpected ") + 1;
++	  yysize += yystrlen (yytname[yytype]);
++	  yymsg = (char *) YYSTACK_ALLOC (yysize);
++	  if (yymsg != 0)
++	    {
++	      char *yyp = yystpcpy (yymsg, "syntax error, unexpected ");
++	      yyp = yystpcpy (yyp, yytname[yytype]);
++
++	      if (yycount < 5)
++		{
++		  yycount = 0;
++		  for (yyx = yyn < 0 ? -yyn : 0;
++		       yyx < (int) (sizeof (yytname) / sizeof (char *));
++		       yyx++)
++		    if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
++		      {
++			const char *yyq = ! yycount ? ", expecting " : " or ";
++			yyp = yystpcpy (yyp, yyq);
++			yyp = yystpcpy (yyp, yytname[yyx]);
++			yycount++;
++		      }
++		}
++	      yyerror (yymsg);
++	      YYSTACK_FREE (yymsg);
++	    }
++	  else
++	    yyerror ("syntax error; also virtual memory exhausted");
++	}
++      else
++#endif /* YYERROR_VERBOSE */
++	yyerror ("syntax error");
++    }
++
++
++
++  if (yyerrstatus == 3)
++    {
++      /* If just tried and failed to reuse lookahead token after an
++	 error, discard it.  */
++
++      /* Return failure if at end of input.  */
++      if (yychar == YYEOF)
++        {
++	  /* Pop the error token.  */
++          YYPOPSTACK;
++	  /* Pop the rest of the stack.  */
++	  while (yyss < yyssp)
++	    {
++	      YYDSYMPRINTF ("Error: popping", yystos[*yyssp], yyvsp, yylsp);
++	      yydestruct (yystos[*yyssp], yyvsp);
++	      YYPOPSTACK;
++	    }
++	  YYABORT;
++        }
++
++      YYDSYMPRINTF ("Error: discarding", yytoken, &yylval, &yylloc);
++      yydestruct (yytoken, &yylval);
++      yychar = YYEMPTY;
++
++    }
++
++  /* Else will try to reuse lookahead token after shifting the error
++     token.  */
++  goto yyerrlab1;
++
++
++/*----------------------------------------------------.
++| yyerrlab1 -- error raised explicitly by an action.  |
++`----------------------------------------------------*/
++yyerrlab1:
++  yyerrstatus = 3;	/* Each real token shifted decrements this.  */
++
++  for (;;)
++    {
++      yyn = yypact[yystate];
++      if (yyn != YYPACT_NINF)
++	{
++	  yyn += YYTERROR;
++	  if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
++	    {
++	      yyn = yytable[yyn];
++	      if (0 < yyn)
++		break;
++	    }
++	}
++
++      /* Pop the current state because it cannot handle the error token.  */
++      if (yyssp == yyss)
++	YYABORT;
++
++      YYDSYMPRINTF ("Error: popping", yystos[*yyssp], yyvsp, yylsp);
++      yydestruct (yystos[yystate], yyvsp);
++      yyvsp--;
++      yystate = *--yyssp;
++
++      YY_STACK_PRINT (yyss, yyssp);
++    }
++
++  if (yyn == YYFINAL)
++    YYACCEPT;
++
++  YYDPRINTF ((stderr, "Shifting error token, "));
++
++  *++yyvsp = yylval;
++
++
++  yystate = yyn;
++  goto yynewstate;
++
++
++/*-------------------------------------.
++| yyacceptlab -- YYACCEPT comes here.  |
++`-------------------------------------*/
++yyacceptlab:
++  yyresult = 0;
++  goto yyreturn;
++
++/*-----------------------------------.
++| yyabortlab -- YYABORT comes here.  |
++`-----------------------------------*/
++yyabortlab:
++  yyresult = 1;
++  goto yyreturn;
++
++#ifndef yyoverflow
++/*----------------------------------------------.
++| yyoverflowlab -- parser overflow comes here.  |
++`----------------------------------------------*/
++yyoverflowlab:
++  yyerror ("parser stack overflow");
++  yyresult = 2;
++  /* Fall through.  */
++#endif
++
++yyreturn:
++#ifndef yyoverflow
++  if (yyss != yyssa)
++    YYSTACK_FREE (yyss);
++#endif
++  return yyresult;
++}
++
++
++
++
++
++void conf_parse(const char *name)
++{
++	struct symbol *sym;
++	int i;
++
++	zconf_initscan(name);
++
++	sym_init();
++	menu_init();
++	modules_sym = sym_lookup("MODULES", 0);
++	rootmenu.prompt = menu_add_prop(P_MENU, "Linux Kernel Configuration", NULL, NULL);
++
++	//zconfdebug = 1;
++	zconfparse();
++	if (zconfnerrs)
++		exit(1);
++	menu_finalize(&rootmenu);
++	for_all_symbols(i, sym) {
++                if (!(sym->flags & SYMBOL_CHECKED) && sym_check_deps(sym))
++                        printf("\n");
++		else
++			sym->flags |= SYMBOL_CHECK_DONE;
++        }
++
++	sym_change_count = 1;
++}
++
++const char *zconf_tokenname(int token)
++{
++	switch (token) {
++	case T_MENU:		return "menu";
++	case T_ENDMENU:		return "endmenu";
++	case T_CHOICE:		return "choice";
++	case T_ENDCHOICE:	return "endchoice";
++	case T_IF:		return "if";
++	case T_ENDIF:		return "endif";
++	}
++	return "<token>";
++}
++
++static bool zconf_endtoken(int token, int starttoken, int endtoken)
++{
++	if (token != endtoken) {
++		zconfprint("unexpected '%s' within %s block", zconf_tokenname(token), zconf_tokenname(starttoken));
++		zconfnerrs++;
++		return false;
++	}
++	if (current_menu->file != current_file) {
++		zconfprint("'%s' in different file than '%s'", zconf_tokenname(token), zconf_tokenname(starttoken));
++		zconfprint("location of the '%s'", zconf_tokenname(starttoken));
++		zconfnerrs++;
++		return false;
++	}
++	return true;
++}
++
++static void zconfprint(const char *err, ...)
++{
++	va_list ap;
++
++	fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno() + 1);
++	va_start(ap, err);
++	vfprintf(stderr, err, ap);
++	va_end(ap);
++	fprintf(stderr, "\n");
++}
++
++static void zconferror(const char *err)
++{
++	fprintf(stderr, "%s:%d: %s\n", zconf_curname(), zconf_lineno() + 1, err);
++}
++
++void print_quoted_string(FILE *out, const char *str)
++{
++	const char *p;
++	int len;
++
++	putc('"', out);
++	while ((p = strchr(str, '"'))) {
++		len = p - str;
++		if (len)
++			fprintf(out, "%.*s", len, str);
++		fputs("\\\"", out);
++		str = p + 1;
++	}
++	fputs(str, out);
++	putc('"', out);
++}
++
++void print_symbol(FILE *out, struct menu *menu)
++{
++	struct symbol *sym = menu->sym;
++	struct property *prop;
++
++	if (sym_is_choice(sym))
++		fprintf(out, "choice\n");
++	else
++		fprintf(out, "config %s\n", sym->name);
++	switch (sym->type) {
++	case S_BOOLEAN:
++		fputs("  boolean\n", out);
++		break;
++	case S_TRISTATE:
++		fputs("  tristate\n", out);
++		break;
++	case S_STRING:
++		fputs("  string\n", out);
++		break;
++	case S_INT:
++		fputs("  integer\n", out);
++		break;
++	case S_HEX:
++		fputs("  hex\n", out);
++		break;
++	default:
++		fputs("  ???\n", out);
++		break;
++	}
++	for (prop = sym->prop; prop; prop = prop->next) {
++		if (prop->menu != menu)
++			continue;
++		switch (prop->type) {
++		case P_PROMPT:
++			fputs("  prompt ", out);
++			print_quoted_string(out, prop->text);
++			if (!expr_is_yes(prop->visible.expr)) {
++				fputs(" if ", out);
++				expr_fprint(prop->visible.expr, out);
++			}
++			fputc('\n', out);
++			break;
++		case P_DEFAULT:
++			fputs( "  default ", out);
++			expr_fprint(prop->expr, out);
++			if (!expr_is_yes(prop->visible.expr)) {
++				fputs(" if ", out);
++				expr_fprint(prop->visible.expr, out);
++			}
++			fputc('\n', out);
++			break;
++		case P_CHOICE:
++			fputs("  #choice value\n", out);
++			break;
++		default:
++			fprintf(out, "  unknown prop %d!\n", prop->type);
++			break;
++		}
++	}
++	if (sym->help) {
++		int len = strlen(sym->help);
++		while (sym->help[--len] == '\n')
++			sym->help[len] = 0;
++		fprintf(out, "  help\n%s\n", sym->help);
++	}
++	fputc('\n', out);
++}
++
++void zconfdump(FILE *out)
++{
++	struct property *prop;
++	struct symbol *sym;
++	struct menu *menu;
++
++	menu = rootmenu.list;
++	while (menu) {
++		if ((sym = menu->sym))
++			print_symbol(out, menu);
++		else if ((prop = menu->prompt)) {
++			switch (prop->type) {
++			case P_COMMENT:
++				fputs("\ncomment ", out);
++				print_quoted_string(out, prop->text);
++				fputs("\n", out);
++				break;
++			case P_MENU:
++				fputs("\nmenu ", out);
++				print_quoted_string(out, prop->text);
++				fputs("\n", out);
++				break;
++			default:
++				;
++			}
++			if (!expr_is_yes(prop->visible.expr)) {
++				fputs("  depends ", out);
++				expr_fprint(prop->visible.expr, out);
++				fputc('\n', out);
++			}
++			fputs("\n", out);
++		}
++
++		if (menu->list)
++			menu = menu->list;
++		else if (menu->next)
++			menu = menu->next;
++		else while ((menu = menu->parent)) {
++			if (menu->prompt && menu->prompt->type == P_MENU)
++				fputs("\nendmenu\n", out);
++			if (menu->next) {
++				menu = menu->next;
++				break;
++			}
++		}
++	}
++}
++
++#include "lex.zconf.c"
++#include "util.c"
++#include "confdata.c"
++#include "expr.c"
++#include "symbol.c"
++#include "menu.c"
++
++
+diff -Nurp pristine-linux-2.6.12/scripts/kconfig/zconf.tab.h linux-2.6.12-xen/scripts/kconfig/zconf.tab.h
+--- pristine-linux-2.6.12/scripts/kconfig/zconf.tab.h	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/kconfig/zconf.tab.h	2006-03-05 23:54:55.070387542 +0100
+@@ -0,0 +1,125 @@
++/* A Bison parser, made from zconf.y, by GNU bison 1.75.  */
++
++/* Skeleton parser for Yacc-like parsing with Bison,
++   Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002 Free Software Foundation, Inc.
++
++   This program is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 2, or (at your option)
++   any later version.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 59 Temple Place - Suite 330,
++   Boston, MA 02111-1307, USA.  */
++
++/* As a special exception, when this file is copied by Bison into a
++   Bison output file, you may use that output file without restriction.
++   This special exception was added by the Free Software Foundation
++   in version 1.24 of Bison.  */
++
++#ifndef BISON_ZCONF_TAB_H
++# define BISON_ZCONF_TAB_H
++
++/* Tokens.  */
++#ifndef YYTOKENTYPE
++# define YYTOKENTYPE
++   /* Put the tokens into the symbol table, so that GDB and other debuggers
++      know about them.  */
++   enum yytokentype {
++     T_MAINMENU = 258,
++     T_MENU = 259,
++     T_ENDMENU = 260,
++     T_SOURCE = 261,
++     T_CHOICE = 262,
++     T_ENDCHOICE = 263,
++     T_COMMENT = 264,
++     T_CONFIG = 265,
++     T_HELP = 266,
++     T_HELPTEXT = 267,
++     T_IF = 268,
++     T_ENDIF = 269,
++     T_DEPENDS = 270,
++     T_REQUIRES = 271,
++     T_OPTIONAL = 272,
++     T_PROMPT = 273,
++     T_DEFAULT = 274,
++     T_TRISTATE = 275,
++     T_BOOLEAN = 276,
++     T_INT = 277,
++     T_HEX = 278,
++     T_WORD = 279,
++     T_STRING = 280,
++     T_UNEQUAL = 281,
++     T_EOF = 282,
++     T_EOL = 283,
++     T_CLOSE_PAREN = 284,
++     T_OPEN_PAREN = 285,
++     T_ON = 286,
++     T_OR = 287,
++     T_AND = 288,
++     T_EQUAL = 289,
++     T_NOT = 290
++   };
++#endif
++#define T_MAINMENU 258
++#define T_MENU 259
++#define T_ENDMENU 260
++#define T_SOURCE 261
++#define T_CHOICE 262
++#define T_ENDCHOICE 263
++#define T_COMMENT 264
++#define T_CONFIG 265
++#define T_HELP 266
++#define T_HELPTEXT 267
++#define T_IF 268
++#define T_ENDIF 269
++#define T_DEPENDS 270
++#define T_REQUIRES 271
++#define T_OPTIONAL 272
++#define T_PROMPT 273
++#define T_DEFAULT 274
++#define T_TRISTATE 275
++#define T_BOOLEAN 276
++#define T_INT 277
++#define T_HEX 278
++#define T_WORD 279
++#define T_STRING 280
++#define T_UNEQUAL 281
++#define T_EOF 282
++#define T_EOL 283
++#define T_CLOSE_PAREN 284
++#define T_OPEN_PAREN 285
++#define T_ON 286
++#define T_OR 287
++#define T_AND 288
++#define T_EQUAL 289
++#define T_NOT 290
++
++
++
++
++#ifndef YYSTYPE
++#line 33 "zconf.y"
++typedef union {
++	int token;
++	char *string;
++	struct symbol *symbol;
++	struct expr *expr;
++	struct menu *menu;
++} yystype;
++/* Line 1281 of /usr/share/bison/yacc.c.  */
++#line 118 "zconf.tab.h"
++# define YYSTYPE yystype
++#endif
++
++extern YYSTYPE zconflval;
++
++
++#endif /* not BISON_ZCONF_TAB_H */
++
+Binärdateien pristine-linux-2.6.12/scripts/kconfig/zconf.tab.o and linux-2.6.12-xen/scripts/kconfig/zconf.tab.o sind verschieden.
+diff -Nurp pristine-linux-2.6.12/scripts/kconfig/.zconf.tab.o.cmd linux-2.6.12-xen/scripts/kconfig/.zconf.tab.o.cmd
+--- pristine-linux-2.6.12/scripts/kconfig/.zconf.tab.o.cmd	1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.12-xen/scripts/kconfig/.zconf.tab.o.cmd	2006-03-05 23:55:05.829800206 +0100
+@@ -0,0 +1,80 @@
++cmd_scripts/kconfig/zconf.tab.o := gcc -Wp,-MD,scripts/kconfig/.zconf.tab.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer      -Iscripts/kconfig -c -o scripts/kconfig/zconf.tab.o scripts/kconfig/zconf.tab.c
++
++deps_scripts/kconfig/zconf.tab.o := \
++  scripts/kconfig/zconf.tab.c \
++  /usr/include/ctype.h \
++  /usr/include/features.h \
++  /usr/include/sys/cdefs.h \
++  /usr/include/gnu/stubs.h \
++  /usr/include/bits/types.h \
++  /usr/include/bits/wordsize.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
++  /usr/include/bits/typesizes.h \
++  /usr/include/endian.h \
++  /usr/include/bits/endian.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
++  /usr/include/stdio.h \
++  /usr/include/libio.h \
++  /usr/include/_G_config.h \
++  /usr/include/wchar.h \
++  /usr/include/bits/wchar.h \
++  /usr/include/gconv.h \
++  /usr/include/bits/stdio_lim.h \
++  /usr/include/bits/sys_errlist.h \
++  /usr/include/bits/stdio.h \
++  /usr/include/stdlib.h \
++  /usr/include/sys/types.h \
++  /usr/include/time.h \
++  /usr/include/sys/select.h \
++  /usr/include/bits/select.h \
++  /usr/include/bits/sigset.h \
++  /usr/include/bits/time.h \
++  /usr/include/sys/sysmacros.h \
++  /usr/include/bits/pthreadtypes.h \
++  /usr/include/bits/sched.h \
++  /usr/include/alloca.h \
++  /usr/include/string.h \
++  /usr/include/bits/string.h \
++  /usr/include/bits/string2.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdbool.h \
++  scripts/kconfig/lkc.h \
++  scripts/kconfig/expr.h \
++  /usr/include/libintl.h \
++  /usr/include/locale.h \
++  /usr/include/bits/locale.h \
++  scripts/kconfig/lkc_proto.h \
++  scripts/kconfig/lex.zconf.c \
++  /usr/include/errno.h \
++  /usr/include/bits/errno.h \
++  /usr/include/linux/errno.h \
++  /usr/include/asm/errno.h \
++  /usr/include/asm-i486/errno.h \
++  /usr/include/asm-generic/errno.h \
++  /usr/include/asm-generic/errno-base.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/limits.h \
++  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/syslimits.h \
++  /usr/include/limits.h \
++  /usr/include/bits/posix1_lim.h \
++  /usr/include/bits/local_lim.h \
++  /usr/include/linux/limits.h \
++  /usr/include/bits/posix2_lim.h \
++  /usr/include/unistd.h \
++  /usr/include/bits/posix_opt.h \
++  /usr/include/bits/confname.h \
++  /usr/include/getopt.h \
++  scripts/kconfig/util.c \
++  scripts/kconfig/confdata.c \
++    $(wildcard include/config/.h) \
++    $(wildcard include/config/notimestamp.h) \
++  /usr/include/sys/stat.h \
++  /usr/include/bits/stat.h \
++  scripts/kconfig/expr.c \
++  scripts/kconfig/symbol.c \
++  /usr/include/regex.h \
++  /usr/include/sys/utsname.h \
++  /usr/include/bits/utsname.h \
++  scripts/kconfig/menu.c \
++
++scripts/kconfig/zconf.tab.o: $(deps_scripts/kconfig/zconf.tab.o)
++
++$(deps_scripts/kconfig/zconf.tab.o):
+diff -Nurp pristine-linux-2.6.12/security/keys/keyring.c linux-2.6.12-xen/security/keys/keyring.c
+--- pristine-linux-2.6.12/security/keys/keyring.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/security/keys/keyring.c	2006-03-05 23:54:36.945059715 +0100
+@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
+ 
+ 	if (keyring->description) {
+ 		write_lock(&keyring_name_lock);
+-		list_del(&keyring->type_data.link);
++
++		if (keyring->type_data.link.next != NULL &&
++		    !list_empty(&keyring->type_data.link))
++			list_del(&keyring->type_data.link);
++
+ 		write_unlock(&keyring_name_lock);
+ 	}
+ 
+diff -Nurp pristine-linux-2.6.12/security/keys/process_keys.c linux-2.6.12-xen/security/keys/process_keys.c
+--- pristine-linux-2.6.12/security/keys/process_keys.c	2005-06-17 21:48:29.000000000 +0200
++++ linux-2.6.12-xen/security/keys/process_keys.c	2006-03-05 23:54:36.946059568 +0100
+@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
+ 		keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
+ 		if (IS_ERR(keyring)) {
+ 			ret = PTR_ERR(keyring);
+-			goto error;
++			goto error2;
+ 		}
+ 	}
+ 	else if (IS_ERR(keyring)) {

Deleted: branches/20060307-fern/patches/linux-2.6.12-xen.patch
===================================================================
--- branches/20060307-fern/patches/linux-2.6.12-xen.patch	2006-03-07 11:23:14 UTC (rev 86)
+++ branches/20060307-fern/patches/linux-2.6.12-xen.patch	2006-03-07 11:26:46 UTC (rev 87)
@@ -1,110893 +0,0 @@
-diff -Nurp pristine-linux-2.6.12/arch/i386/Kconfig linux-2.6.12-xen/arch/i386/Kconfig
---- pristine-linux-2.6.12/arch/i386/Kconfig	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/Kconfig	2006-02-25 00:12:33.828985656 +0100
-@@ -487,6 +487,19 @@ config SMP
- 
- 	  If you don't know what to do here, say N.
- 
-+config SMP_ALTERNATIVES
-+	bool "SMP alternatives support (EXPERIMENTAL)"
-+	depends on SMP && EXPERIMENTAL
-+	help
-+	  Try to reduce the overhead of running an SMP kernel on a uniprocessor
-+	  host slightly by replacing certain key instruction sequences
-+	  according to whether we currently have more than one CPU available.
-+	  This should provide a noticeable boost to performance when
-+	  running SMP kernels on UP machines, and have negligible impact
-+	  when running on an true SMP host.
-+
-+          If unsure, say N.
-+	  
- config NR_CPUS
- 	int "Maximum number of CPUs (2-255)"
- 	range 2 255
-@@ -1226,6 +1239,15 @@ config SCx200
- 	  This support is also available as a module.  If compiled as a
- 	  module, it will be called scx200.
- 
-+config HOTPLUG_CPU
-+	bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
-+	depends on SMP && HOTPLUG && EXPERIMENTAL
-+	---help---
-+	  Say Y here to experiment with turning CPUs off and on.  CPUs
-+	  can be controlled through /sys/devices/system/cpu.
-+
-+	  Say N.
-+
- source "drivers/pcmcia/Kconfig"
- 
- source "drivers/pci/hotplug/Kconfig"
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/apic.c linux-2.6.12-xen/arch/i386/kernel/apic.c
---- pristine-linux-2.6.12/arch/i386/kernel/apic.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/apic.c	2006-02-25 00:12:33.792991082 +0100
-@@ -26,6 +26,7 @@
- #include <linux/mc146818rtc.h>
- #include <linux/kernel_stat.h>
- #include <linux/sysdev.h>
-+#include <linux/cpu.h>
- 
- #include <asm/atomic.h>
- #include <asm/smp.h>
-@@ -1048,7 +1049,7 @@ void __init setup_secondary_APIC_clock(v
- 	setup_APIC_timer(calibration_result);
- }
- 
--void __init disable_APIC_timer(void)
-+void __devinit disable_APIC_timer(void)
- {
- 	if (using_apic_timer) {
- 		unsigned long v;
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/cpu/cpufreq/powernow-k8.c linux-2.6.12-xen/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
---- pristine-linux-2.6.12/arch/i386/kernel/cpu/cpufreq/powernow-k8.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/cpu/cpufreq/powernow-k8.c	2006-02-25 00:12:30.020559696 +0100
-@@ -44,7 +44,7 @@
- 
- #define PFX "powernow-k8: "
- #define BFX PFX "BIOS error: "
--#define VERSION "version 1.40.2"
-+#define VERSION "version 1.40.4"
- #include "powernow-k8.h"
- 
- /* serialize freq changes  */
-@@ -978,7 +978,7 @@ static int __init powernowk8_cpu_init(st
- {
- 	struct powernow_k8_data *data;
- 	cpumask_t oldmask = CPU_MASK_ALL;
--	int rc;
-+	int rc, i;
- 
- 	if (!check_supported_cpu(pol->cpu))
- 		return -ENODEV;
-@@ -1064,7 +1064,9 @@ static int __init powernowk8_cpu_init(st
- 	printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
- 	       data->currfid, data->currvid);
- 
--	powernow_data[pol->cpu] = data;
-+	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
-+		powernow_data[i] = data;
-+	}
- 
- 	return 0;
- 
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/io_apic.c linux-2.6.12-xen/arch/i386/kernel/io_apic.c
---- pristine-linux-2.6.12/arch/i386/kernel/io_apic.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/io_apic.c	2006-02-25 00:12:33.793990931 +0100
-@@ -576,9 +576,11 @@ static int balanced_irq(void *unused)
- 		try_to_freeze(PF_FREEZE);
- 		if (time_after(jiffies,
- 				prev_balance_time+balanced_irq_interval)) {
-+			preempt_disable();
- 			do_irq_balance();
- 			prev_balance_time = jiffies;
- 			time_remaining = balanced_irq_interval;
-+			preempt_enable();
- 		}
- 	}
- 	return 0;
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/irq.c linux-2.6.12-xen/arch/i386/kernel/irq.c
---- pristine-linux-2.6.12/arch/i386/kernel/irq.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/irq.c	2006-02-25 00:12:33.793990931 +0100
-@@ -15,6 +15,9 @@
- #include <linux/seq_file.h>
- #include <linux/interrupt.h>
- #include <linux/kernel_stat.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/delay.h>
- 
- DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp;
- EXPORT_PER_CPU_SYMBOL(irq_stat);
-@@ -210,9 +213,8 @@ int show_interrupts(struct seq_file *p, 
- 
- 	if (i == 0) {
- 		seq_printf(p, "           ");
--		for (j=0; j<NR_CPUS; j++)
--			if (cpu_online(j))
--				seq_printf(p, "CPU%d       ",j);
-+		for_each_cpu(j)
-+			seq_printf(p, "CPU%d       ",j);
- 		seq_putc(p, '\n');
- 	}
- 
-@@ -225,9 +227,8 @@ int show_interrupts(struct seq_file *p, 
- #ifndef CONFIG_SMP
- 		seq_printf(p, "%10u ", kstat_irqs(i));
- #else
--		for (j = 0; j < NR_CPUS; j++)
--			if (cpu_online(j))
--				seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-+		for_each_cpu(j)
-+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
- #endif
- 		seq_printf(p, " %14s", irq_desc[i].handler->typename);
- 		seq_printf(p, "  %s", action->name);
-@@ -240,16 +241,13 @@ skip:
- 		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- 	} else if (i == NR_IRQS) {
- 		seq_printf(p, "NMI: ");
--		for (j = 0; j < NR_CPUS; j++)
--			if (cpu_online(j))
--				seq_printf(p, "%10u ", nmi_count(j));
-+		for_each_cpu(j)
-+ 			seq_printf(p, "%10u ", nmi_count(j));
- 		seq_putc(p, '\n');
- #ifdef CONFIG_X86_LOCAL_APIC
- 		seq_printf(p, "LOC: ");
--		for (j = 0; j < NR_CPUS; j++)
--			if (cpu_online(j))
--				seq_printf(p, "%10u ",
--					per_cpu(irq_stat,j).apic_timer_irqs);
-+		for_each_cpu(j)
-+			seq_printf(p, "%10u ", per_cpu(irq_stat,j).apic_timer_irqs);
- 		seq_putc(p, '\n');
- #endif
- 		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-@@ -259,3 +257,45 @@ skip:
- 	}
- 	return 0;
- }
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+#include <mach_apic.h>
-+
-+void fixup_irqs(cpumask_t map)
-+{
-+	unsigned int irq;
-+	static int warned;
-+
-+	for (irq = 0; irq < NR_IRQS; irq++) {
-+		cpumask_t mask;
-+		if (irq == 2)
-+			continue;
-+
-+		cpus_and(mask, irq_affinity[irq], map);
-+		if (any_online_cpu(mask) == NR_CPUS) {
-+			printk("Breaking affinity for irq %i\n", irq);
-+			mask = map;
-+		}
-+		if (irq_desc[irq].handler->set_affinity)
-+			irq_desc[irq].handler->set_affinity(irq, mask);
-+		else if (irq_desc[irq].action && !(warned++))
-+			printk("Cannot set affinity for irq %i\n", irq);
-+	}
-+
-+#if 0
-+	barrier();
-+	/* Ingo Molnar says: "after the IO-APIC masks have been redirected
-+	   [note the nop - the interrupt-enable boundary on x86 is two
-+	   instructions from sti] - to flush out pending hardirqs and
-+	   IPIs. After this point nothing is supposed to reach this CPU." */
-+	__asm__ __volatile__("sti; nop; cli");
-+	barrier();
-+#else
-+	/* That doesn't seem sufficient.  Give it 1ms. */
-+	local_irq_enable();
-+	mdelay(1);
-+	local_irq_disable();
-+#endif
-+}
-+#endif
-+
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/Makefile linux-2.6.12-xen/arch/i386/kernel/Makefile
---- pristine-linux-2.6.12/arch/i386/kernel/Makefile	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/Makefile	2006-02-25 00:12:33.829985506 +0100
-@@ -33,6 +33,7 @@ obj-$(CONFIG_ACPI_SRAT) 	+= srat.o
- obj-$(CONFIG_HPET_TIMER) 	+= time_hpet.o
- obj-$(CONFIG_EFI) 		+= efi.o efi_stub.o
- obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
-+obj-$(CONFIG_SMP_ALTERNATIVES)  += smpalts.o
- 
- EXTRA_AFLAGS   := -traditional
- 
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/msr.c linux-2.6.12-xen/arch/i386/kernel/msr.c
---- pristine-linux-2.6.12/arch/i386/kernel/msr.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/msr.c	2006-02-25 00:12:33.809988520 +0100
-@@ -260,7 +260,7 @@ static struct file_operations msr_fops =
- 	.open = msr_open,
- };
- 
--static int msr_class_simple_device_add(int i)
-+static int __devinit msr_class_simple_device_add(int i)
- {
- 	int err = 0;
- 	struct class_device *class_err;
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/process.c linux-2.6.12-xen/arch/i386/kernel/process.c
---- pristine-linux-2.6.12/arch/i386/kernel/process.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/process.c	2006-02-25 00:12:33.810988369 +0100
-@@ -13,6 +13,7 @@
- 
- #include <stdarg.h>
- 
-+#include <linux/cpu.h>
- #include <linux/errno.h>
- #include <linux/sched.h>
- #include <linux/fs.h>
-@@ -54,6 +55,9 @@
- #include <linux/irq.h>
- #include <linux/err.h>
- 
-+#include <asm/tlbflush.h>
-+#include <asm/cpu.h>
-+
- asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
- 
- static int hlt_counter;
-@@ -138,6 +142,34 @@ static void poll_idle (void)
- 	}
- }
- 
-+#ifdef CONFIG_HOTPLUG_CPU
-+#include <asm/nmi.h>
-+/* We don't actually take CPU down, just spin without interrupts. */
-+static inline void play_dead(void)
-+{
-+	/* Ack it */
-+	__get_cpu_var(cpu_state) = CPU_DEAD;
-+
-+	/* We shouldn't have to disable interrupts while dead, but
-+	 * some interrupts just don't seem to go away, and this makes
-+	 * it "work" for testing purposes. */
-+	/* Death loop */
-+	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
-+		cpu_relax();
-+
-+	local_irq_disable();
-+	__flush_tlb_all();
-+	cpu_set(smp_processor_id(), cpu_online_map);
-+	enable_APIC_timer();
-+	local_irq_enable();
-+}
-+#else
-+static inline void play_dead(void)
-+{
-+	BUG();
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
- /*
-  * The idle thread. There's no useful work to be
-  * done, so just try to conserve power and have a
-@@ -160,6 +192,9 @@ void cpu_idle (void)
- 			if (!idle)
- 				idle = default_idle;
- 
-+			if (cpu_is_offline(cpu))
-+				play_dead();
-+
- 			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
- 			idle();
- 		}
-@@ -827,6 +862,8 @@ asmlinkage int sys_get_thread_area(struc
- 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- 		return -EINVAL;
- 
-+	memset(&info, 0, sizeof(info));
-+
- 	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
- 
- 	info.entry_number = idx;
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/smpalts.c linux-2.6.12-xen/arch/i386/kernel/smpalts.c
---- pristine-linux-2.6.12/arch/i386/kernel/smpalts.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/i386/kernel/smpalts.c	2006-02-25 00:12:33.830985355 +0100
-@@ -0,0 +1,85 @@
-+#include <linux/kernel.h>
-+#include <asm/system.h>
-+#include <asm/smp_alt.h>
-+#include <asm/processor.h>
-+#include <asm/string.h>
-+
-+struct smp_replacement_record {
-+	unsigned char targ_size;
-+	unsigned char smp1_size;
-+	unsigned char smp2_size;
-+	unsigned char up_size;
-+	unsigned char feature;
-+	unsigned char data[0];
-+};
-+
-+struct smp_alternative_record {
-+	void *targ_start;
-+	struct smp_replacement_record *repl;
-+};
-+
-+extern struct smp_alternative_record __start_smp_alternatives_table,
-+  __stop_smp_alternatives_table;
-+extern unsigned long __init_begin, __init_end;
-+
-+void prepare_for_smp(void)
-+{
-+	struct smp_alternative_record *r;
-+	printk(KERN_INFO "Enabling SMP...\n");
-+	for (r = &__start_smp_alternatives_table;
-+	     r != &__stop_smp_alternatives_table;
-+	     r++) {
-+		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
-+		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
-+		BUG_ON(r->repl->targ_size < r->repl->up_size);
-+               if (system_state == SYSTEM_RUNNING &&
-+                   r->targ_start >= (void *)&__init_begin &&
-+                   r->targ_start < (void *)&__init_end)
-+                       continue;
-+		if (r->repl->feature != (unsigned char)-1 &&
-+		    boot_cpu_has(r->repl->feature)) {
-+			memcpy(r->targ_start,
-+			       r->repl->data + r->repl->smp1_size,
-+			       r->repl->smp2_size);
-+			memset(r->targ_start + r->repl->smp2_size,
-+			       0x90,
-+			       r->repl->targ_size - r->repl->smp2_size);
-+		} else {
-+			memcpy(r->targ_start,
-+			       r->repl->data,
-+			       r->repl->smp1_size);
-+			memset(r->targ_start + r->repl->smp1_size,
-+			       0x90,
-+			       r->repl->targ_size - r->repl->smp1_size);
-+		}
-+	}
-+	/* Paranoia */
-+	asm volatile ("jmp 1f\n1:");
-+	mb();
-+}
-+
-+void unprepare_for_smp(void)
-+{
-+	struct smp_alternative_record *r;
-+	printk(KERN_INFO "Disabling SMP...\n");
-+	for (r = &__start_smp_alternatives_table;
-+	     r != &__stop_smp_alternatives_table;
-+	     r++) {
-+		BUG_ON(r->repl->targ_size < r->repl->smp1_size);
-+		BUG_ON(r->repl->targ_size < r->repl->smp2_size);
-+		BUG_ON(r->repl->targ_size < r->repl->up_size);
-+               if (system_state == SYSTEM_RUNNING &&
-+                   r->targ_start >= (void *)&__init_begin &&
-+                   r->targ_start < (void *)&__init_end)
-+                       continue;
-+		memcpy(r->targ_start,
-+		       r->repl->data + r->repl->smp1_size + r->repl->smp2_size,
-+		       r->repl->up_size);
-+		memset(r->targ_start + r->repl->up_size,
-+		       0x90,
-+		       r->repl->targ_size - r->repl->up_size);
-+	}
-+	/* Paranoia */
-+	asm volatile ("jmp 1f\n1:");
-+	mb();
-+}
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/smpboot.c linux-2.6.12-xen/arch/i386/kernel/smpboot.c
---- pristine-linux-2.6.12/arch/i386/kernel/smpboot.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/smpboot.c	2006-02-25 00:12:33.831985204 +0100
-@@ -44,6 +44,9 @@
- #include <linux/smp_lock.h>
- #include <linux/irq.h>
- #include <linux/bootmem.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/percpu.h>
- 
- #include <linux/delay.h>
- #include <linux/mc146818rtc.h>
-@@ -90,6 +93,9 @@ static int trampoline_exec;
- 
- static void map_cpu_to_logical_apicid(void);
- 
-+/* State of each CPU. */
-+DEFINE_PER_CPU(int, cpu_state) = { 0 };
-+
- /*
-  * Currently trivial. Write the real->protected mode
-  * bootstrap into the page concerned. The caller
-@@ -1001,6 +1007,11 @@ static void __init smp_boot_cpus(unsigne
- 		if (max_cpus <= cpucount+1)
- 			continue;
- 
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+		if (kicked == 1)
-+			prepare_for_smp();
-+#endif
-+
- 		if (do_boot_cpu(apicid))
- 			printk("CPU #%d not responding - cannot use it.\n",
- 								apicid);
-@@ -1107,6 +1118,9 @@ static void __init smp_boot_cpus(unsigne
-    who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
- void __init smp_prepare_cpus(unsigned int max_cpus)
- {
-+	smp_commenced_mask = cpumask_of_cpu(0);
-+	cpu_callin_map = cpumask_of_cpu(0);
-+	mb();
- 	smp_boot_cpus(max_cpus);
- }
- 
-@@ -1116,20 +1130,104 @@ void __devinit smp_prepare_boot_cpu(void
- 	cpu_set(smp_processor_id(), cpu_callout_map);
- }
- 
--int __devinit __cpu_up(unsigned int cpu)
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+/* must be called with the cpucontrol mutex held */
-+static int __devinit cpu_enable(unsigned int cpu)
- {
--	/* This only works at boot for x86.  See "rewrite" above. */
--	if (cpu_isset(cpu, smp_commenced_mask)) {
--		local_irq_enable();
--		return -ENOSYS;
-+	/* get the target out of its holding state */
-+	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-+	wmb();
-+
-+	/* wait for the processor to ack it. timeout? */
-+	while (!cpu_online(cpu))
-+		cpu_relax();
-+
-+	fixup_irqs(cpu_online_map);
-+	/* counter the disable in fixup_irqs() */
-+	local_irq_enable();
-+	return 0;
-+}
-+
-+int __cpu_disable(void)
-+{
-+	cpumask_t map = cpu_online_map;
-+	int cpu = smp_processor_id();
-+
-+	/*
-+	 * Perhaps use cpufreq to drop frequency, but that could go
-+	 * into generic code.
-+ 	 *
-+	 * We won't take down the boot processor on i386 due to some
-+	 * interrupts only being able to be serviced by the BSP.
-+	 * Especially so if we're not using an IOAPIC	-zwane
-+	 */
-+	if (cpu == 0)
-+		return -EBUSY;
-+
-+	/* We enable the timer again on the exit path of the death loop */
-+	disable_APIC_timer();
-+	/* Allow any queued timer interrupts to get serviced */
-+	local_irq_enable();
-+	mdelay(1);
-+	local_irq_disable();
-+
-+	cpu_clear(cpu, map);
-+	fixup_irqs(map);
-+	/* It's now safe to remove this processor from the online map */
-+	cpu_clear(cpu, cpu_online_map);
-+	return 0;
-+}
-+
-+void __cpu_die(unsigned int cpu)
-+{
-+	/* We don't do anything here: idle task is faking death itself. */
-+	unsigned int i;
-+
-+	for (i = 0; i < 10; i++) {
-+		/* They ack this in play_dead by setting CPU_DEAD */
-+		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
-+			return;
-+		current->state = TASK_UNINTERRUPTIBLE;
-+		schedule_timeout(HZ/10);
- 	}
-+ 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
-+}
-+#else /* ... !CONFIG_HOTPLUG_CPU */
-+int __cpu_disable(void)
-+{
-+	return -ENOSYS;
-+}
- 
-+void __cpu_die(unsigned int cpu)
-+{
-+	/* We said "no" in __cpu_disable */
-+	BUG();
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+int __devinit __cpu_up(unsigned int cpu)
-+{
- 	/* In case one didn't come up */
- 	if (!cpu_isset(cpu, cpu_callin_map)) {
-+		printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
- 		local_irq_enable();
- 		return -EIO;
- 	}
- 
-+#ifdef CONFIG_HOTPLUG_CPU
-+	/* Already up, and in cpu_quiescent now? */
-+	if (cpu_isset(cpu, smp_commenced_mask)) {
-+		cpu_enable(cpu);
-+		return 0;
-+	}
-+#endif
-+
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+	if (num_online_cpus() == 1)
-+		prepare_for_smp();
-+#endif
-+
- 	local_irq_enable();
- 	/* Unleash the CPU! */
- 	cpu_set(cpu, smp_commenced_mask);
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/smpboot.c.orig linux-2.6.12-xen/arch/i386/kernel/smpboot.c.orig
---- pristine-linux-2.6.12/arch/i386/kernel/smpboot.c.orig	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/i386/kernel/smpboot.c.orig	2006-02-25 00:12:33.811988218 +0100
-@@ -0,0 +1,1260 @@
-+/*
-+ *	x86 SMP booting functions
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Much of the core SMP work is based on previous work by Thomas Radke, to
-+ *	whom a great many thanks are extended.
-+ *
-+ *	Thanks to Intel for making available several different Pentium,
-+ *	Pentium Pro and Pentium-II/Xeon MP machines.
-+ *	Original development of Linux SMP code supported by Caldera.
-+ *
-+ *	This code is released under the GNU General Public License version 2 or
-+ *	later.
-+ *
-+ *	Fixes
-+ *		Felix Koop	:	NR_CPUS used properly
-+ *		Jose Renau	:	Handle single CPU case.
-+ *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
-+ *		Greg Wright	:	Fix for kernel stacks panic.
-+ *		Erich Boleyn	:	MP v1.4 and additional changes.
-+ *	Matthias Sattler	:	Changes for 2.1 kernel map.
-+ *	Michel Lespinasse	:	Changes for 2.1 kernel map.
-+ *	Michael Chastain	:	Change trampoline.S to gnu as.
-+ *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
-+ *		Ingo Molnar	:	Added APIC timers, based on code
-+ *					from Jose Renau
-+ *		Ingo Molnar	:	various cleanups and rewrites
-+ *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
-+ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
-+ *		Martin J. Bligh	: 	Added support for multi-quad systems
-+ *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
-+*		Rusty Russell	:	Hacked into shape for new "hotplug" boot process. */
-+
-+#include <linux/module.h>
-+#include <linux/config.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/smp_lock.h>
-+#include <linux/irq.h>
-+#include <linux/bootmem.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/percpu.h>
-+
-+#include <linux/delay.h>
-+#include <linux/mc146818rtc.h>
-+#include <asm/tlbflush.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+
-+#include <mach_apic.h>
-+#include <mach_wakecpu.h>
-+#include <smpboot_hooks.h>
-+
-+/* Set if we find a B stepping CPU */
-+static int __initdata smp_b_stepping;
-+
-+/* Number of siblings per CPU package */
-+int smp_num_siblings = 1;
-+int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
-+EXPORT_SYMBOL(phys_proc_id);
-+int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
-+EXPORT_SYMBOL(cpu_core_id);
-+
-+/* bitmap of online cpus */
-+cpumask_t cpu_online_map;
-+
-+cpumask_t cpu_callin_map;
-+cpumask_t cpu_callout_map;
-+static cpumask_t smp_commenced_mask;
-+
-+/* Per CPU bogomips and other parameters */
-+struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-+
-+u8 x86_cpu_to_apicid[NR_CPUS] =
-+			{ [0 ... NR_CPUS-1] = 0xff };
-+EXPORT_SYMBOL(x86_cpu_to_apicid);
-+
-+/*
-+ * Trampoline 80x86 program as an array.
-+ */
-+
-+extern unsigned char trampoline_data [];
-+extern unsigned char trampoline_end  [];
-+static unsigned char *trampoline_base;
-+static int trampoline_exec;
-+
-+static void map_cpu_to_logical_apicid(void);
-+
-+/* State of each CPU. */
-+DEFINE_PER_CPU(int, cpu_state) = { 0 };
-+
-+/*
-+ * Currently trivial. Write the real->protected mode
-+ * bootstrap into the page concerned. The caller
-+ * has made sure it's suitably aligned.
-+ */
-+
-+static unsigned long __init setup_trampoline(void)
-+{
-+	memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
-+	return virt_to_phys(trampoline_base);
-+}
-+
-+/*
-+ * We are called very early to get the low memory for the
-+ * SMP bootup trampoline page.
-+ */
-+void __init smp_alloc_memory(void)
-+{
-+	trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
-+	/*
-+	 * Has to be in very low memory so we can execute
-+	 * real-mode AP code.
-+	 */
-+	if (__pa(trampoline_base) >= 0x9F000)
-+		BUG();
-+	/*
-+	 * Make the SMP trampoline executable:
-+	 */
-+	trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
-+}
-+
-+/*
-+ * The bootstrap kernel entry code has set these up. Save them for
-+ * a given CPU
-+ */
-+
-+static void __init smp_store_cpu_info(int id)
-+{
-+	struct cpuinfo_x86 *c = cpu_data + id;
-+
-+	*c = boot_cpu_data;
-+	if (id!=0)
-+		identify_cpu(c);
-+	/*
-+	 * Mask B, Pentium, but not Pentium MMX
-+	 */
-+	if (c->x86_vendor == X86_VENDOR_INTEL &&
-+	    c->x86 == 5 &&
-+	    c->x86_mask >= 1 && c->x86_mask <= 4 &&
-+	    c->x86_model <= 3)
-+		/*
-+		 * Remember we have B step Pentia with bugs
-+		 */
-+		smp_b_stepping = 1;
-+
-+	/*
-+	 * Certain Athlons might work (for various values of 'work') in SMP
-+	 * but they are not certified as MP capable.
-+	 */
-+	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
-+
-+		/* Athlon 660/661 is valid. */	
-+		if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
-+			goto valid_k7;
-+
-+		/* Duron 670 is valid */
-+		if ((c->x86_model==7) && (c->x86_mask==0))
-+			goto valid_k7;
-+
-+		/*
-+		 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
-+		 * It's worth noting that the A5 stepping (662) of some Athlon XP's
-+		 * have the MP bit set.
-+		 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
-+		 */
-+		if (((c->x86_model==6) && (c->x86_mask>=2)) ||
-+		    ((c->x86_model==7) && (c->x86_mask>=1)) ||
-+		     (c->x86_model> 7))
-+			if (cpu_has_mp)
-+				goto valid_k7;
-+
-+		/* If we get here, it's not a certified SMP capable AMD system. */
-+		tainted |= TAINT_UNSAFE_SMP;
-+	}
-+
-+valid_k7:
-+	;
-+}
-+
-+/*
-+ * TSC synchronization.
-+ *
-+ * We first check whether all CPUs have their TSC's synchronized,
-+ * then we print a warning if not, and always resync.
-+ */
-+
-+static atomic_t tsc_start_flag = ATOMIC_INIT(0);
-+static atomic_t tsc_count_start = ATOMIC_INIT(0);
-+static atomic_t tsc_count_stop = ATOMIC_INIT(0);
-+static unsigned long long tsc_values[NR_CPUS];
-+
-+#define NR_LOOPS 5
-+
-+static void __init synchronize_tsc_bp (void)
-+{
-+	int i;
-+	unsigned long long t0;
-+	unsigned long long sum, avg;
-+	long long delta;
-+	unsigned long one_usec;
-+	int buggy = 0;
-+
-+	printk(KERN_INFO "checking TSC synchronization across %u CPUs: ", num_booting_cpus());
-+
-+	/* convert from kcyc/sec to cyc/usec */
-+	one_usec = cpu_khz / 1000;
-+
-+	atomic_set(&tsc_start_flag, 1);
-+	wmb();
-+
-+	/*
-+	 * We loop a few times to get a primed instruction cache,
-+	 * then the last pass is more or less synchronized and
-+	 * the BP and APs set their cycle counters to zero all at
-+	 * once. This reduces the chance of having random offsets
-+	 * between the processors, and guarantees that the maximum
-+	 * delay between the cycle counters is never bigger than
-+	 * the latency of information-passing (cachelines) between
-+	 * two CPUs.
-+	 */
-+	for (i = 0; i < NR_LOOPS; i++) {
-+		/*
-+		 * all APs synchronize but they loop on '== num_cpus'
-+		 */
-+		while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
-+			mb();
-+		atomic_set(&tsc_count_stop, 0);
-+		wmb();
-+		/*
-+		 * this lets the APs save their current TSC:
-+		 */
-+		atomic_inc(&tsc_count_start);
-+
-+		rdtscll(tsc_values[smp_processor_id()]);
-+		/*
-+		 * We clear the TSC in the last loop:
-+		 */
-+		if (i == NR_LOOPS-1)
-+			write_tsc(0, 0);
-+
-+		/*
-+		 * Wait for all APs to leave the synchronization point:
-+		 */
-+		while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
-+			mb();
-+		atomic_set(&tsc_count_start, 0);
-+		wmb();
-+		atomic_inc(&tsc_count_stop);
-+	}
-+
-+	sum = 0;
-+	for (i = 0; i < NR_CPUS; i++) {
-+		if (cpu_isset(i, cpu_callout_map)) {
-+			t0 = tsc_values[i];
-+			sum += t0;
-+		}
-+	}
-+	avg = sum;
-+	do_div(avg, num_booting_cpus());
-+
-+	sum = 0;
-+	for (i = 0; i < NR_CPUS; i++) {
-+		if (!cpu_isset(i, cpu_callout_map))
-+			continue;
-+		delta = tsc_values[i] - avg;
-+		if (delta < 0)
-+			delta = -delta;
-+		/*
-+		 * We report bigger than 2 microseconds clock differences.
-+		 */
-+		if (delta > 2*one_usec) {
-+			long realdelta;
-+			if (!buggy) {
-+				buggy = 1;
-+				printk("\n");
-+			}
-+			realdelta = delta;
-+			do_div(realdelta, one_usec);
-+			if (tsc_values[i] < avg)
-+				realdelta = -realdelta;
-+
-+			printk(KERN_INFO "CPU#%d had %ld usecs TSC skew, fixed it up.\n", i, realdelta);
-+		}
-+
-+		sum += delta;
-+	}
-+	if (!buggy)
-+		printk("passed.\n");
-+}
-+
-+static void __init synchronize_tsc_ap (void)
-+{
-+	int i;
-+
-+	/*
-+	 * Not every cpu is online at the time
-+	 * this gets called, so we first wait for the BP to
-+	 * finish SMP initialization:
-+	 */
-+	while (!atomic_read(&tsc_start_flag)) mb();
-+
-+	for (i = 0; i < NR_LOOPS; i++) {
-+		atomic_inc(&tsc_count_start);
-+		while (atomic_read(&tsc_count_start) != num_booting_cpus())
-+			mb();
-+
-+		rdtscll(tsc_values[smp_processor_id()]);
-+		if (i == NR_LOOPS-1)
-+			write_tsc(0, 0);
-+
-+		atomic_inc(&tsc_count_stop);
-+		while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
-+	}
-+}
-+#undef NR_LOOPS
-+
-+extern void calibrate_delay(void);
-+
-+static atomic_t init_deasserted;
-+
-+static void __init smp_callin(void)
-+{
-+	int cpuid, phys_id;
-+	unsigned long timeout;
-+
-+	/*
-+	 * If waken up by an INIT in an 82489DX configuration
-+	 * we may get here before an INIT-deassert IPI reaches
-+	 * our local APIC.  We have to wait for the IPI or we'll
-+	 * lock up on an APIC access.
-+	 */
-+	wait_for_init_deassert(&init_deasserted);
-+
-+	/*
-+	 * (This works even if the APIC is not enabled.)
-+	 */
-+	phys_id = GET_APIC_ID(apic_read(APIC_ID));
-+	cpuid = smp_processor_id();
-+	if (cpu_isset(cpuid, cpu_callin_map)) {
-+		printk("huh, phys CPU#%d, CPU#%d already present??\n",
-+					phys_id, cpuid);
-+		BUG();
-+	}
-+	Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
-+
-+	/*
-+	 * STARTUP IPIs are fragile beasts as they might sometimes
-+	 * trigger some glue motherboard logic. Complete APIC bus
-+	 * silence for 1 second, this overestimates the time the
-+	 * boot CPU is spending to send the up to 2 STARTUP IPIs
-+	 * by a factor of two. This should be enough.
-+	 */
-+
-+	/*
-+	 * Waiting 2s total for startup (udelay is not yet working)
-+	 */
-+	timeout = jiffies + 2*HZ;
-+	while (time_before(jiffies, timeout)) {
-+		/*
-+		 * Has the boot CPU finished it's STARTUP sequence?
-+		 */
-+		if (cpu_isset(cpuid, cpu_callout_map))
-+			break;
-+		rep_nop();
-+	}
-+
-+	if (!time_before(jiffies, timeout)) {
-+		printk("BUG: CPU%d started up but did not get a callout!\n",
-+			cpuid);
-+		BUG();
-+	}
-+
-+	/*
-+	 * the boot CPU has finished the init stage and is spinning
-+	 * on callin_map until we finish. We are free to set up this
-+	 * CPU, first the APIC. (this is probably redundant on most
-+	 * boards)
-+	 */
-+
-+	Dprintk("CALLIN, before setup_local_APIC().\n");
-+	smp_callin_clear_local_apic();
-+	setup_local_APIC();
-+	map_cpu_to_logical_apicid();
-+
-+	/*
-+	 * Get our bogomips.
-+	 */
-+	calibrate_delay();
-+	Dprintk("Stack at about %p\n",&cpuid);
-+
-+	/*
-+	 * Save our processor parameters
-+	 */
-+ 	smp_store_cpu_info(cpuid);
-+
-+	disable_APIC_timer();
-+
-+	/*
-+	 * Allow the master to continue.
-+	 */
-+	cpu_set(cpuid, cpu_callin_map);
-+
-+	/*
-+	 *      Synchronize the TSC with the BP
-+	 */
-+	if (cpu_has_tsc && cpu_khz)
-+		synchronize_tsc_ap();
-+}
-+
-+static int cpucount;
-+
-+/*
-+ * Activate a secondary processor.
-+ */
-+static void __init start_secondary(void *unused)
-+{
-+	/*
-+	 * Dont put anything before smp_callin(), SMP
-+	 * booting is too fragile that we want to limit the
-+	 * things done here to the most necessary things.
-+	 */
-+	cpu_init();
-+	smp_callin();
-+	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
-+		rep_nop();
-+	setup_secondary_APIC_clock();
-+	if (nmi_watchdog == NMI_IO_APIC) {
-+		disable_8259A_irq(0);
-+		enable_NMI_through_LVT0(NULL);
-+		enable_8259A_irq(0);
-+	}
-+	enable_APIC_timer();
-+	/*
-+	 * low-memory mappings have been cleared, flush them from
-+	 * the local TLBs too.
-+	 */
-+	local_flush_tlb();
-+	cpu_set(smp_processor_id(), cpu_online_map);
-+
-+	/* We can take interrupts now: we're officially "up". */
-+	local_irq_enable();
-+
-+	wmb();
-+	cpu_idle();
-+}
-+
-+/*
-+ * Everything has been set up for the secondary
-+ * CPUs - they just need to reload everything
-+ * from the task structure
-+ * This function must not return.
-+ */
-+void __init initialize_secondary(void)
-+{
-+	/*
-+	 * We don't actually need to load the full TSS,
-+	 * basically just the stack pointer and the eip.
-+	 */
-+
-+	asm volatile(
-+		"movl %0,%%esp\n\t"
-+		"jmp *%1"
-+		:
-+		:"r" (current->thread.esp),"r" (current->thread.eip));
-+}
-+
-+extern struct {
-+	void * esp;
-+	unsigned short ss;
-+} stack_start;
-+
-+#ifdef CONFIG_NUMA
-+
-+/* which logical CPUs are on which nodes */
-+cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
-+				{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
-+/* which node each logical CPU is on */
-+int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
-+EXPORT_SYMBOL(cpu_2_node);
-+
-+/* set up a mapping between cpu and node. */
-+static inline void map_cpu_to_node(int cpu, int node)
-+{
-+	printk("Mapping cpu %d to node %d\n", cpu, node);
-+	cpu_set(cpu, node_2_cpu_mask[node]);
-+	cpu_2_node[cpu] = node;
-+}
-+
-+/* undo a mapping between cpu and node. */
-+static inline void unmap_cpu_to_node(int cpu)
-+{
-+	int node;
-+
-+	printk("Unmapping cpu %d from all nodes\n", cpu);
-+	for (node = 0; node < MAX_NUMNODES; node ++)
-+		cpu_clear(cpu, node_2_cpu_mask[node]);
-+	cpu_2_node[cpu] = 0;
-+}
-+#else /* !CONFIG_NUMA */
-+
-+#define map_cpu_to_node(cpu, node)	({})
-+#define unmap_cpu_to_node(cpu)	({})
-+
-+#endif /* CONFIG_NUMA */
-+
-+u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+static void map_cpu_to_logical_apicid(void)
-+{
-+	int cpu = smp_processor_id();
-+	int apicid = logical_smp_processor_id();
-+
-+	cpu_2_logical_apicid[cpu] = apicid;
-+	map_cpu_to_node(cpu, apicid_to_node(apicid));
-+}
-+
-+static void unmap_cpu_to_logical_apicid(int cpu)
-+{
-+	cpu_2_logical_apicid[cpu] = BAD_APICID;
-+	unmap_cpu_to_node(cpu);
-+}
-+
-+#if APIC_DEBUG
-+static inline void __inquire_remote_apic(int apicid)
-+{
-+	int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
-+	char *names[] = { "ID", "VERSION", "SPIV" };
-+	int timeout, status;
-+
-+	printk("Inquiring remote APIC #%d...\n", apicid);
-+
-+	for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
-+		printk("... APIC #%d %s: ", apicid, names[i]);
-+
-+		/*
-+		 * Wait for idle.
-+		 */
-+		apic_wait_icr_idle();
-+
-+		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
-+		apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
-+
-+		timeout = 0;
-+		do {
-+			udelay(100);
-+			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
-+		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
-+
-+		switch (status) {
-+		case APIC_ICR_RR_VALID:
-+			status = apic_read(APIC_RRR);
-+			printk("%08x\n", status);
-+			break;
-+		default:
-+			printk("failed\n");
-+		}
-+	}
-+}
-+#endif
-+
-+#ifdef WAKE_SECONDARY_VIA_NMI
-+/* 
-+ * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
-+ * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
-+ * won't ... remember to clear down the APIC, etc later.
-+ */
-+static int __init
-+wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
-+{
-+	unsigned long send_status = 0, accept_status = 0;
-+	int timeout, maxlvt;
-+
-+	/* Target chip */
-+	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
-+
-+	/* Boot on the stack */
-+	/* Kick the second */
-+	apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
-+
-+	Dprintk("Waiting for send to finish...\n");
-+	timeout = 0;
-+	do {
-+		Dprintk("+");
-+		udelay(100);
-+		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-+	} while (send_status && (timeout++ < 1000));
-+
-+	/*
-+	 * Give the other CPU some time to accept the IPI.
-+	 */
-+	udelay(200);
-+	/*
-+	 * Due to the Pentium erratum 3AP.
-+	 */
-+	maxlvt = get_maxlvt();
-+	if (maxlvt > 3) {
-+		apic_read_around(APIC_SPIV);
-+		apic_write(APIC_ESR, 0);
-+	}
-+	accept_status = (apic_read(APIC_ESR) & 0xEF);
-+	Dprintk("NMI sent.\n");
-+
-+	if (send_status)
-+		printk("APIC never delivered???\n");
-+	if (accept_status)
-+		printk("APIC delivery error (%lx).\n", accept_status);
-+
-+	return (send_status | accept_status);
-+}
-+#endif	/* WAKE_SECONDARY_VIA_NMI */
-+
-+#ifdef WAKE_SECONDARY_VIA_INIT
-+static int __init
-+wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
-+{
-+	unsigned long send_status = 0, accept_status = 0;
-+	int maxlvt, timeout, num_starts, j;
-+
-+	/*
-+	 * Be paranoid about clearing APIC errors.
-+	 */
-+	if (APIC_INTEGRATED(apic_version[phys_apicid])) {
-+		apic_read_around(APIC_SPIV);
-+		apic_write(APIC_ESR, 0);
-+		apic_read(APIC_ESR);
-+	}
-+
-+	Dprintk("Asserting INIT.\n");
-+
-+	/*
-+	 * Turn INIT on target chip
-+	 */
-+	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-+
-+	/*
-+	 * Send IPI
-+	 */
-+	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
-+				| APIC_DM_INIT);
-+
-+	Dprintk("Waiting for send to finish...\n");
-+	timeout = 0;
-+	do {
-+		Dprintk("+");
-+		udelay(100);
-+		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-+	} while (send_status && (timeout++ < 1000));
-+
-+	mdelay(10);
-+
-+	Dprintk("Deasserting INIT.\n");
-+
-+	/* Target chip */
-+	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-+
-+	/* Send IPI */
-+	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
-+
-+	Dprintk("Waiting for send to finish...\n");
-+	timeout = 0;
-+	do {
-+		Dprintk("+");
-+		udelay(100);
-+		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-+	} while (send_status && (timeout++ < 1000));
-+
-+	atomic_set(&init_deasserted, 1);
-+
-+	/*
-+	 * Should we send STARTUP IPIs ?
-+	 *
-+	 * Determine this based on the APIC version.
-+	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
-+	 */
-+	if (APIC_INTEGRATED(apic_version[phys_apicid]))
-+		num_starts = 2;
-+	else
-+		num_starts = 0;
-+
-+	/*
-+	 * Run STARTUP IPI loop.
-+	 */
-+	Dprintk("#startup loops: %d.\n", num_starts);
-+
-+	maxlvt = get_maxlvt();
-+
-+	for (j = 1; j <= num_starts; j++) {
-+		Dprintk("Sending STARTUP #%d.\n",j);
-+		apic_read_around(APIC_SPIV);
-+		apic_write(APIC_ESR, 0);
-+		apic_read(APIC_ESR);
-+		Dprintk("After apic_write.\n");
-+
-+		/*
-+		 * STARTUP IPI
-+		 */
-+
-+		/* Target chip */
-+		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
-+
-+		/* Boot on the stack */
-+		/* Kick the second */
-+		apic_write_around(APIC_ICR, APIC_DM_STARTUP
-+					| (start_eip >> 12));
-+
-+		/*
-+		 * Give the other CPU some time to accept the IPI.
-+		 */
-+		udelay(300);
-+
-+		Dprintk("Startup point 1.\n");
-+
-+		Dprintk("Waiting for send to finish...\n");
-+		timeout = 0;
-+		do {
-+			Dprintk("+");
-+			udelay(100);
-+			send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-+		} while (send_status && (timeout++ < 1000));
-+
-+		/*
-+		 * Give the other CPU some time to accept the IPI.
-+		 */
-+		udelay(200);
-+		/*
-+		 * Due to the Pentium erratum 3AP.
-+		 */
-+		if (maxlvt > 3) {
-+			apic_read_around(APIC_SPIV);
-+			apic_write(APIC_ESR, 0);
-+		}
-+		accept_status = (apic_read(APIC_ESR) & 0xEF);
-+		if (send_status || accept_status)
-+			break;
-+	}
-+	Dprintk("After Startup.\n");
-+
-+	if (send_status)
-+		printk("APIC never delivered???\n");
-+	if (accept_status)
-+		printk("APIC delivery error (%lx).\n", accept_status);
-+
-+	return (send_status | accept_status);
-+}
-+#endif	/* WAKE_SECONDARY_VIA_INIT */
-+
-+extern cpumask_t cpu_initialized;
-+
-+static int __init do_boot_cpu(int apicid)
-+/*
-+ * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
-+ * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
-+ * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
-+ */
-+{
-+	struct task_struct *idle;
-+	unsigned long boot_error;
-+	int timeout, cpu;
-+	unsigned long start_eip;
-+	unsigned short nmi_high = 0, nmi_low = 0;
-+
-+	cpu = ++cpucount;
-+	/*
-+	 * We can't use kernel_thread since we must avoid to
-+	 * reschedule the child.
-+	 */
-+	idle = fork_idle(cpu);
-+	if (IS_ERR(idle))
-+		panic("failed fork for CPU %d", cpu);
-+	idle->thread.eip = (unsigned long) start_secondary;
-+	/* start_eip had better be page-aligned! */
-+	start_eip = setup_trampoline();
-+
-+	/* So we see what's up   */
-+	printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
-+	/* Stack for startup_32 can be just as for start_secondary onwards */
-+	stack_start.esp = (void *) idle->thread.esp;
-+
-+	irq_ctx_init(cpu);
-+
-+	/*
-+	 * This grunge runs the startup process for
-+	 * the targeted processor.
-+	 */
-+
-+	atomic_set(&init_deasserted, 0);
-+
-+	Dprintk("Setting warm reset code and vector.\n");
-+
-+	store_NMI_vector(&nmi_high, &nmi_low);
-+
-+	smpboot_setup_warm_reset_vector(start_eip);
-+
-+	/*
-+	 * Starting actual IPI sequence...
-+	 */
-+	boot_error = wakeup_secondary_cpu(apicid, start_eip);
-+
-+	if (!boot_error) {
-+		/*
-+		 * allow APs to start initializing.
-+		 */
-+		Dprintk("Before Callout %d.\n", cpu);
-+		cpu_set(cpu, cpu_callout_map);
-+		Dprintk("After Callout %d.\n", cpu);
-+
-+		/*
-+		 * Wait 5s total for a response
-+		 */
-+		for (timeout = 0; timeout < 50000; timeout++) {
-+			if (cpu_isset(cpu, cpu_callin_map))
-+				break;	/* It has booted */
-+			udelay(100);
-+		}
-+
-+		if (cpu_isset(cpu, cpu_callin_map)) {
-+			/* number CPUs logically, starting from 1 (BSP is 0) */
-+			Dprintk("OK.\n");
-+			printk("CPU%d: ", cpu);
-+			print_cpu_info(&cpu_data[cpu]);
-+			Dprintk("CPU has booted.\n");
-+		} else {
-+			boot_error= 1;
-+			if (*((volatile unsigned char *)trampoline_base)
-+					== 0xA5)
-+				/* trampoline started but...? */
-+				printk("Stuck ??\n");
-+			else
-+				/* trampoline code not run */
-+				printk("Not responding.\n");
-+			inquire_remote_apic(apicid);
-+		}
-+	}
-+	x86_cpu_to_apicid[cpu] = apicid;
-+	if (boot_error) {
-+		/* Try to put things back the way they were before ... */
-+		unmap_cpu_to_logical_apicid(cpu);
-+		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
-+		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
-+		cpucount--;
-+	}
-+
-+	/* mark "stuck" area as not stuck */
-+	*((volatile unsigned long *)trampoline_base) = 0;
-+
-+	return boot_error;
-+}
-+
-+static void smp_tune_scheduling (void)
-+{
-+	unsigned long cachesize;       /* kB   */
-+	unsigned long bandwidth = 350; /* MB/s */
-+	/*
-+	 * Rough estimation for SMP scheduling, this is the number of
-+	 * cycles it takes for a fully memory-limited process to flush
-+	 * the SMP-local cache.
-+	 *
-+	 * (For a P5 this pretty much means we will choose another idle
-+	 *  CPU almost always at wakeup time (this is due to the small
-+	 *  L1 cache), on PIIs it's around 50-100 usecs, depending on
-+	 *  the cache size)
-+	 */
-+
-+	if (!cpu_khz) {
-+		/*
-+		 * this basically disables processor-affinity
-+		 * scheduling on SMP without a TSC.
-+		 */
-+		return;
-+	} else {
-+		cachesize = boot_cpu_data.x86_cache_size;
-+		if (cachesize == -1) {
-+			cachesize = 16; /* Pentiums, 2x8kB cache */
-+			bandwidth = 100;
-+		}
-+	}
-+}
-+
-+/*
-+ * Cycle through the processors sending APIC IPIs to boot each.
-+ */
-+
-+static int boot_cpu_logical_apicid;
-+/* Where the IO area was mapped on multiquad, always 0 otherwise */
-+void *xquad_portio;
-+
-+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-+EXPORT_SYMBOL(cpu_core_map);
-+
-+static void __init smp_boot_cpus(unsigned int max_cpus)
-+{
-+	int apicid, cpu, bit, kicked;
-+	unsigned long bogosum = 0;
-+
-+	/*
-+	 * Setup boot CPU information
-+	 */
-+	smp_store_cpu_info(0); /* Final full version of the data */
-+	printk("CPU%d: ", 0);
-+	print_cpu_info(&cpu_data[0]);
-+
-+	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
-+	boot_cpu_logical_apicid = logical_smp_processor_id();
-+	x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
-+
-+	current_thread_info()->cpu = 0;
-+	smp_tune_scheduling();
-+	cpus_clear(cpu_sibling_map[0]);
-+	cpu_set(0, cpu_sibling_map[0]);
-+
-+	cpus_clear(cpu_core_map[0]);
-+	cpu_set(0, cpu_core_map[0]);
-+
-+	/*
-+	 * If we couldn't find an SMP configuration at boot time,
-+	 * get out of here now!
-+	 */
-+	if (!smp_found_config && !acpi_lapic) {
-+		printk(KERN_NOTICE "SMP motherboard not detected.\n");
-+		smpboot_clear_io_apic_irqs();
-+		phys_cpu_present_map = physid_mask_of_physid(0);
-+		if (APIC_init_uniprocessor())
-+			printk(KERN_NOTICE "Local APIC not detected."
-+					   " Using dummy APIC emulation.\n");
-+		map_cpu_to_logical_apicid();
-+		cpu_set(0, cpu_sibling_map[0]);
-+		cpu_set(0, cpu_core_map[0]);
-+		return;
-+	}
-+
-+	/*
-+	 * Should not be necessary because the MP table should list the boot
-+	 * CPU too, but we do it for the sake of robustness anyway.
-+	 * Makes no sense to do this check in clustered apic mode, so skip it
-+	 */
-+	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
-+		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
-+				boot_cpu_physical_apicid);
-+		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
-+	}
-+
-+	/*
-+	 * If we couldn't find a local APIC, then get out of here now!
-+	 */
-+	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
-+		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
-+			boot_cpu_physical_apicid);
-+		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
-+		smpboot_clear_io_apic_irqs();
-+		phys_cpu_present_map = physid_mask_of_physid(0);
-+		cpu_set(0, cpu_sibling_map[0]);
-+		cpu_set(0, cpu_core_map[0]);
-+		return;
-+	}
-+
-+	verify_local_APIC();
-+
-+	/*
-+	 * If SMP should be disabled, then really disable it!
-+	 */
-+	if (!max_cpus) {
-+		smp_found_config = 0;
-+		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
-+		smpboot_clear_io_apic_irqs();
-+		phys_cpu_present_map = physid_mask_of_physid(0);
-+		cpu_set(0, cpu_sibling_map[0]);
-+		cpu_set(0, cpu_core_map[0]);
-+		return;
-+	}
-+
-+	connect_bsp_APIC();
-+	setup_local_APIC();
-+	map_cpu_to_logical_apicid();
-+
-+
-+	setup_portio_remap();
-+
-+	/*
-+	 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
-+	 *
-+	 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
-+	 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 
-+	 * clustered apic ID.
-+	 */
-+	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
-+
-+	kicked = 1;
-+	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
-+		apicid = cpu_present_to_apicid(bit);
-+		/*
-+		 * Don't even attempt to start the boot CPU!
-+		 */
-+		if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
-+			continue;
-+
-+		if (!check_apicid_present(bit))
-+			continue;
-+		if (max_cpus <= cpucount+1)
-+			continue;
-+
-+		if (do_boot_cpu(apicid))
-+			printk("CPU #%d not responding - cannot use it.\n",
-+								apicid);
-+		else
-+			++kicked;
-+	}
-+
-+	/*
-+	 * Cleanup possible dangling ends...
-+	 */
-+	smpboot_restore_warm_reset_vector();
-+
-+	/*
-+	 * Allow the user to impress friends.
-+	 */
-+	Dprintk("Before bogomips.\n");
-+	for (cpu = 0; cpu < NR_CPUS; cpu++)
-+		if (cpu_isset(cpu, cpu_callout_map))
-+			bogosum += cpu_data[cpu].loops_per_jiffy;
-+	printk(KERN_INFO
-+		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
-+		cpucount+1,
-+		bogosum/(500000/HZ),
-+		(bogosum/(5000/HZ))%100);
-+	
-+	Dprintk("Before bogocount - setting activated=1.\n");
-+
-+	if (smp_b_stepping)
-+		printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
-+
-+	/*
-+	 * Don't taint if we are running SMP kernel on a single non-MP
-+	 * approved Athlon
-+	 */
-+	if (tainted & TAINT_UNSAFE_SMP) {
-+		if (cpucount)
-+			printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
-+		else
-+			tainted &= ~TAINT_UNSAFE_SMP;
-+	}
-+
-+	Dprintk("Boot done.\n");
-+
-+	/*
-+	 * construct cpu_sibling_map[], so that we can tell sibling CPUs
-+	 * efficiently.
-+	 */
-+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+		cpus_clear(cpu_sibling_map[cpu]);
-+		cpus_clear(cpu_core_map[cpu]);
-+	}
-+
-+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+		struct cpuinfo_x86 *c = cpu_data + cpu;
-+		int siblings = 0;
-+		int i;
-+		if (!cpu_isset(cpu, cpu_callout_map))
-+			continue;
-+
-+		if (smp_num_siblings > 1) {
-+			for (i = 0; i < NR_CPUS; i++) {
-+				if (!cpu_isset(i, cpu_callout_map))
-+					continue;
-+				if (cpu_core_id[cpu] == cpu_core_id[i]) {
-+					siblings++;
-+					cpu_set(i, cpu_sibling_map[cpu]);
-+				}
-+			}
-+		} else {
-+			siblings++;
-+			cpu_set(cpu, cpu_sibling_map[cpu]);
-+		}
-+
-+		if (siblings != smp_num_siblings) {
-+			printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
-+			smp_num_siblings = siblings;
-+		}
-+
-+		if (c->x86_num_cores > 1) {
-+			for (i = 0; i < NR_CPUS; i++) {
-+				if (!cpu_isset(i, cpu_callout_map))
-+					continue;
-+				if (phys_proc_id[cpu] == phys_proc_id[i]) {
-+					cpu_set(i, cpu_core_map[cpu]);
-+				}
-+			}
-+		} else {
-+			cpu_core_map[cpu] = cpu_sibling_map[cpu];
-+		}
-+	}
-+
-+	smpboot_setup_io_apic();
-+
-+	setup_boot_APIC_clock();
-+
-+	/*
-+	 * Synchronize the TSC with the AP
-+	 */
-+	if (cpu_has_tsc && cpucount && cpu_khz)
-+		synchronize_tsc_bp();
-+}
-+
-+/* These are wrappers to interface to the new boot process.  Someone
-+   who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
-+void __init smp_prepare_cpus(unsigned int max_cpus)
-+{
-+	smp_commenced_mask = cpumask_of_cpu(0);
-+	cpu_callin_map = cpumask_of_cpu(0);
-+	mb();
-+	smp_boot_cpus(max_cpus);
-+}
-+
-+void __devinit smp_prepare_boot_cpu(void)
-+{
-+	cpu_set(smp_processor_id(), cpu_online_map);
-+	cpu_set(smp_processor_id(), cpu_callout_map);
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+/* must be called with the cpucontrol mutex held */
-+static int __devinit cpu_enable(unsigned int cpu)
-+{
-+	/* get the target out of its holding state */
-+	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
-+	wmb();
-+
-+	/* wait for the processor to ack it. timeout? */
-+	while (!cpu_online(cpu))
-+		cpu_relax();
-+
-+	fixup_irqs(cpu_online_map);
-+	/* counter the disable in fixup_irqs() */
-+	local_irq_enable();
-+	return 0;
-+}
-+
-+int __cpu_disable(void)
-+{
-+	cpumask_t map = cpu_online_map;
-+	int cpu = smp_processor_id();
-+
-+	/*
-+	 * Perhaps use cpufreq to drop frequency, but that could go
-+	 * into generic code.
-+ 	 *
-+	 * We won't take down the boot processor on i386 due to some
-+	 * interrupts only being able to be serviced by the BSP.
-+	 * Especially so if we're not using an IOAPIC	-zwane
-+	 */
-+	if (cpu == 0)
-+		return -EBUSY;
-+
-+	/* We enable the timer again on the exit path of the death loop */
-+	disable_APIC_timer();
-+	/* Allow any queued timer interrupts to get serviced */
-+	local_irq_enable();
-+	mdelay(1);
-+	local_irq_disable();
-+
-+	cpu_clear(cpu, map);
-+	fixup_irqs(map);
-+	/* It's now safe to remove this processor from the online map */
-+	cpu_clear(cpu, cpu_online_map);
-+	return 0;
-+}
-+
-+void __cpu_die(unsigned int cpu)
-+{
-+	/* We don't do anything here: idle task is faking death itself. */
-+	unsigned int i;
-+
-+	for (i = 0; i < 10; i++) {
-+		/* They ack this in play_dead by setting CPU_DEAD */
-+		if (per_cpu(cpu_state, cpu) == CPU_DEAD)
-+			return;
-+		current->state = TASK_UNINTERRUPTIBLE;
-+		schedule_timeout(HZ/10);
-+	}
-+ 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);
-+}
-+#else /* ... !CONFIG_HOTPLUG_CPU */
-+int __cpu_disable(void)
-+{
-+	return -ENOSYS;
-+}
-+
-+void __cpu_die(unsigned int cpu)
-+{
-+	/* We said "no" in __cpu_disable */
-+	BUG();
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+int __devinit __cpu_up(unsigned int cpu)
-+{
-+	/* In case one didn't come up */
-+	if (!cpu_isset(cpu, cpu_callin_map)) {
-+		printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
-+		local_irq_enable();
-+		return -EIO;
-+	}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+	/* Already up, and in cpu_quiescent now? */
-+	if (cpu_isset(cpu, smp_commenced_mask)) {
-+		cpu_enable(cpu);
-+		return 0;
-+	}
-+#endif
-+
-+	local_irq_enable();
-+	/* Unleash the CPU! */
-+	cpu_set(cpu, smp_commenced_mask);
-+	while (!cpu_isset(cpu, cpu_online_map))
-+		mb();
-+	return 0;
-+}
-+
-+void __init smp_cpus_done(unsigned int max_cpus)
-+{
-+#ifdef CONFIG_X86_IO_APIC
-+	setup_ioapic_dest();
-+#endif
-+	zap_low_mappings();
-+	/*
-+	 * Disable executability of the SMP trampoline:
-+	 */
-+	set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
-+}
-+
-+void __init smp_intr_init(void)
-+{
-+	/*
-+	 * IRQ0 must be given a fixed assignment and initialized,
-+	 * because it's used before the IO-APIC is set up.
-+	 */
-+	set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
-+
-+	/*
-+	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
-+	 * IPI, driven by wakeup.
-+	 */
-+	set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
-+
-+	/* IPI for invalidation */
-+	set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
-+
-+	/* IPI for generic function call */
-+	set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
-+}
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/smp.c linux-2.6.12-xen/arch/i386/kernel/smp.c
---- pristine-linux-2.6.12/arch/i386/kernel/smp.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/smp.c	2006-02-25 00:12:33.811988218 +0100
-@@ -19,6 +19,7 @@
- #include <linux/mc146818rtc.h>
- #include <linux/cache.h>
- #include <linux/interrupt.h>
-+#include <linux/cpu.h>
- 
- #include <asm/mtrr.h>
- #include <asm/tlbflush.h>
-@@ -163,7 +164,7 @@ void send_IPI_mask_bitmask(cpumask_t cpu
- 	unsigned long flags;
- 
- 	local_irq_save(flags);
--		
-+	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
- 	/*
- 	 * Wait for idle.
- 	 */
-@@ -345,21 +346,21 @@ out:
- static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
- 						unsigned long va)
- {
--	cpumask_t tmp;
- 	/*
- 	 * A couple of (to be removed) sanity checks:
- 	 *
--	 * - we do not send IPIs to not-yet booted CPUs.
- 	 * - current CPU must not be in mask
- 	 * - mask must exist :)
- 	 */
- 	BUG_ON(cpus_empty(cpumask));
--
--	cpus_and(tmp, cpumask, cpu_online_map);
--	BUG_ON(!cpus_equal(cpumask, tmp));
- 	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
- 	BUG_ON(!mm);
- 
-+	/* If a CPU which we ran on has gone down, OK. */
-+	cpus_and(cpumask, cpumask, cpu_online_map);
-+	if (cpus_empty(cpumask))
-+		return;
-+
- 	/*
- 	 * i'm not happy about this global shared spinlock in the
- 	 * MM hot path, but we'll see how contended it is.
-@@ -474,6 +475,7 @@ void flush_tlb_all(void)
-  */
- void smp_send_reschedule(int cpu)
- {
-+	WARN_ON(cpu_is_offline(cpu));
- 	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
- }
- 
-@@ -514,10 +516,16 @@ int smp_call_function (void (*func) (voi
-  */
- {
- 	struct call_data_struct data;
--	int cpus = num_online_cpus()-1;
-+	int cpus;
- 
--	if (!cpus)
-+	/* Holding any lock stops cpus from going down. */
-+	spin_lock(&call_lock);
-+	cpus = num_online_cpus()-1;
-+
-+	if (!cpus) {
-+		spin_unlock(&call_lock);
- 		return 0;
-+	}
- 
- 	/* Can deadlock when called with interrupts disabled */
- 	WARN_ON(irqs_disabled());
-@@ -529,7 +537,6 @@ int smp_call_function (void (*func) (voi
- 	if (wait)
- 		atomic_set(&data.finished, 0);
- 
--	spin_lock(&call_lock);
- 	call_data = &data;
- 	mb();
- 	
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/traps.c linux-2.6.12-xen/arch/i386/kernel/traps.c
---- pristine-linux-2.6.12/arch/i386/kernel/traps.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/traps.c	2006-02-25 00:12:33.817987314 +0100
-@@ -521,18 +521,11 @@ static void mem_parity_error(unsigned ch
- 
- static void io_check_error(unsigned char reason, struct pt_regs * regs)
- {
--	unsigned long i;
--
- 	printk("NMI: IOCK error (debug interrupt?)\n");
- 	show_registers(regs);
- 
- 	/* Re-enable the IOCK line, wait for a few seconds */
--	reason = (reason & 0xf) | 8;
--	outb(reason, 0x61);
--	i = 2000;
--	while (--i) udelay(1000);
--	reason &= ~8;
--	outb(reason, 0x61);
-+	clear_io_check_error(reason);
- }
- 
- static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-@@ -624,6 +617,14 @@ fastcall void do_nmi(struct pt_regs * re
- 	nmi_enter();
- 
- 	cpu = smp_processor_id();
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+	if (!cpu_online(cpu)) {
-+		nmi_exit();
-+		return;
-+	}
-+#endif
-+
- 	++nmi_count(cpu);
- 
- 	if (!nmi_callback(regs, cpu))
-diff -Nurp pristine-linux-2.6.12/arch/i386/kernel/vmlinux.lds.S linux-2.6.12-xen/arch/i386/kernel/vmlinux.lds.S
---- pristine-linux-2.6.12/arch/i386/kernel/vmlinux.lds.S	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/kernel/vmlinux.lds.S	2006-02-25 00:12:33.831985204 +0100
-@@ -30,6 +30,13 @@ SECTIONS
-   __ex_table : { *(__ex_table) }
-   __stop___ex_table = .;
- 
-+  . = ALIGN(16);
-+  __start_smp_alternatives_table = .;
-+  __smp_alternatives : { *(__smp_alternatives) }
-+  __stop_smp_alternatives_table = .;
-+
-+  __smp_replacements : { *(__smp_replacements) }
-+
-   RODATA
- 
-   /* writeable */
-diff -Nurp pristine-linux-2.6.12/arch/i386/mm/pageattr.c linux-2.6.12-xen/arch/i386/mm/pageattr.c
---- pristine-linux-2.6.12/arch/i386/mm/pageattr.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/mm/pageattr.c	2006-02-25 00:12:33.823986410 +0100
-@@ -75,7 +75,7 @@ static void set_pmd_pte(pte_t *kpte, uns
- 	unsigned long flags;
- 
- 	set_pte_atomic(kpte, pte); 	/* change init_mm */
--	if (PTRS_PER_PMD > 1)
-+	if (HAVE_SHARED_KERNEL_PMD)
- 		return;
- 
- 	spin_lock_irqsave(&pgd_lock, flags);
-diff -Nurp pristine-linux-2.6.12/arch/i386/mm/pgtable.c linux-2.6.12-xen/arch/i386/mm/pgtable.c
---- pristine-linux-2.6.12/arch/i386/mm/pgtable.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/i386/mm/pgtable.c	2006-02-25 00:12:33.824986259 +0100
-@@ -199,19 +199,20 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
- {
- 	unsigned long flags;
- 
--	if (PTRS_PER_PMD == 1)
-+	if (PTRS_PER_PMD > 1) {
-+		if (HAVE_SHARED_KERNEL_PMD)
-+			memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+			       swapper_pg_dir + USER_PTRS_PER_PGD,
-+			       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-+	} else {
- 		spin_lock_irqsave(&pgd_lock, flags);
--
--	memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
--			swapper_pg_dir + USER_PTRS_PER_PGD,
--			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
--
--	if (PTRS_PER_PMD > 1)
--		return;
--
--	pgd_list_add(pgd);
--	spin_unlock_irqrestore(&pgd_lock, flags);
--	memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+		memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+		       swapper_pg_dir + USER_PTRS_PER_PGD,
-+		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-+		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+		pgd_list_add(pgd);
-+		spin_unlock_irqrestore(&pgd_lock, flags);
-+	}
- }
- 
- /* never called when PTRS_PER_PMD > 1 */
-@@ -238,6 +239,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
- 			goto out_oom;
- 		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
- 	}
-+
-+	if (!HAVE_SHARED_KERNEL_PMD) {
-+		unsigned long flags;
-+
-+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+			if (!pmd)
-+				goto out_oom;
-+			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
-+		}
-+
-+		spin_lock_irqsave(&pgd_lock, flags);
-+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
-+			pgd_t *kpgd = pgd_offset_k(v);
-+			pud_t *kpud = pud_offset(kpgd, v);
-+			pmd_t *kpmd = pmd_offset(kpud, v);
-+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+			memcpy(pmd, kpmd, PAGE_SIZE);
-+		}
-+		pgd_list_add(pgd);
-+		spin_unlock_irqrestore(&pgd_lock, flags);
-+	}
-+
- 	return pgd;
- 
- out_oom:
-@@ -252,9 +277,23 @@ void pgd_free(pgd_t *pgd)
- 	int i;
- 
- 	/* in the PAE case user pgd entries are overwritten before usage */
--	if (PTRS_PER_PMD > 1)
--		for (i = 0; i < USER_PTRS_PER_PGD; ++i)
--			kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
-+	if (PTRS_PER_PMD > 1) {
-+		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
-+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+			kmem_cache_free(pmd_cache, pmd);
-+		}
-+		if (!HAVE_SHARED_KERNEL_PMD) {
-+			unsigned long flags;
-+			spin_lock_irqsave(&pgd_lock, flags);
-+			pgd_list_del(pgd);
-+			spin_unlock_irqrestore(&pgd_lock, flags);
-+			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+				kmem_cache_free(pmd_cache, pmd);
-+			}
-+		}
-+	}
- 	/* in the non-PAE case, free_pgtables() clears user pgd entries */
- 	kmem_cache_free(pgd_cache, pgd);
- }
-diff -Nurp pristine-linux-2.6.12/arch/ia64/hp/sim/Makefile linux-2.6.12-xen/arch/ia64/hp/sim/Makefile
---- pristine-linux-2.6.12/arch/ia64/hp/sim/Makefile	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ia64/hp/sim/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -14,3 +14,5 @@ obj-$(CONFIG_HP_SIMETH)	+= simeth.o
- obj-$(CONFIG_HP_SIMSERIAL) += simserial.o
- obj-$(CONFIG_HP_SIMSERIAL_CONSOLE) += hpsim_console.o
- obj-$(CONFIG_HP_SIMSCSI) += simscsi.o
-+obj-$(CONFIG_XEN) += simserial.o
-+obj-$(CONFIG_XEN) += hpsim_console.o
-diff -Nurp pristine-linux-2.6.12/arch/ia64/Kconfig linux-2.6.12-xen/arch/ia64/Kconfig
---- pristine-linux-2.6.12/arch/ia64/Kconfig	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ia64/Kconfig	2006-02-16 23:44:08.000000000 +0100
-@@ -46,6 +46,53 @@ config GENERIC_IOMAP
- 	bool
- 	default y
- 
-+config XEN
-+	bool
-+	default y
-+	help
-+	  Enable Xen hypervisor support.  Resulting kernel runs
-+	  both as a guest OS on Xen and natively on hardware.
-+
-+config ARCH_XEN
-+	bool
-+	default y
-+	help
-+	  TEMP ONLY. Needs to be on for drivers/xen to build.
-+
-+config XEN_PRIVILEGED_GUEST
-+	bool "Privileged Guest"
-+	default n
-+	help
-+	  Used in drivers/xen/privcmd.c.  Should go away?
-+
-+config XEN_PHYSDEV_ACCESS
-+	depends on XEN
-+	bool
-+	default y
-+
-+config XEN_BLKDEV_GRANT
-+	depends on XEN
-+	bool
-+	default y
-+
-+config XEN_BLKDEV_FRONTEND
-+	depends on XEN
-+	bool
-+	default y
-+
-+config XEN_VT
-+	bool "Override for turning on CONFIG_VT for domU"
-+	default y
-+	help
-+	  Hack to turn off CONFIG_VT for domU
-+
-+config VT
-+	bool
-+	default y if XEN && XEN_VT
-+	default n if XEN && !XEN_VT
-+	help
-+	  Hack to turn off CONFIG_VT for domU
-+
- config SCHED_NO_NO_OMIT_FRAME_POINTER
- 	bool
- 	default y
-diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/entry.S linux-2.6.12-xen/arch/ia64/kernel/entry.S
---- pristine-linux-2.6.12/arch/ia64/kernel/entry.S	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ia64/kernel/entry.S	2006-02-16 23:44:08.000000000 +0100
-@@ -181,7 +181,7 @@ END(sys_clone)
-  *	called.  The code starting at .map relies on this.  The rest of the code
-  *	doesn't care about the interrupt masking status.
-  */
--GLOBAL_ENTRY(ia64_switch_to)
-+GLOBAL_ENTRY(__ia64_switch_to)
- 	.prologue
- 	alloc r16=ar.pfs,1,0,0,0
- 	DO_SAVE_SWITCH_STACK
-@@ -235,7 +235,7 @@ GLOBAL_ENTRY(ia64_switch_to)
- 	;;
- 	itr.d dtr[r25]=r23		// wire in new mapping...
- 	br.cond.sptk .done
--END(ia64_switch_to)
-+END(__ia64_switch_to)
- 
- /*
-  * Note that interrupts are enabled during save_switch_stack and load_switch_stack.  This
-@@ -376,7 +376,7 @@ END(save_switch_stack)
-  *	- b7 holds address to return to
-  *	- must not touch r8-r11
-  */
--ENTRY(load_switch_stack)
-+GLOBAL_ENTRY(load_switch_stack)
- 	.prologue
- 	.altrp b7
- 
-@@ -500,7 +500,7 @@ END(clone)
- 	 * because some system calls (such as ia64_execve) directly
- 	 * manipulate ar.pfs.
- 	 */
--GLOBAL_ENTRY(ia64_trace_syscall)
-+GLOBAL_ENTRY(__ia64_trace_syscall)
- 	PT_REGS_UNWIND_INFO(0)
- 	/*
- 	 * We need to preserve the scratch registers f6-f11 in case the system
-@@ -570,7 +570,7 @@ strace_error:
- (p6)	mov r10=-1
- (p6)	mov r8=r9
- 	br.cond.sptk .strace_save_retval
--END(ia64_trace_syscall)
-+END(__ia64_trace_syscall)
- 
- 	/*
- 	 * When traced and returning from sigreturn, we invoke syscall_trace but then
-@@ -623,8 +623,11 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
- 	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
- 	mov r10=r0				// clear error indication in r10
- (p7)	br.cond.spnt handle_syscall_error	// handle potential syscall failure
-+	;;
-+	// don't fall through, ia64_leave_syscall may be #define'd
-+	br.cond.sptk.few ia64_leave_syscall
-+	;;
- END(ia64_ret_from_syscall)
--	// fall through
- /*
-  * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
-  *	need to switch to bank 0 and doesn't restore the scratch registers.
-@@ -669,7 +672,7 @@ END(ia64_ret_from_syscall)
-  *	      ar.csd: cleared
-  *	      ar.ssd: cleared
-  */
--ENTRY(ia64_leave_syscall)
-+GLOBAL_ENTRY(__ia64_leave_syscall)
- 	PT_REGS_UNWIND_INFO(0)
- 	/*
- 	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
-@@ -770,7 +773,7 @@ ENTRY(ia64_leave_syscall)
- 	mov.m ar.ccv=r0		// clear ar.ccv
- (pNonSys) br.cond.dpnt.many dont_preserve_current_frame
- 	br.cond.sptk.many rbs_switch
--END(ia64_leave_syscall)
-+END(__ia64_leave_syscall)
- 
- #ifdef CONFIG_IA32_SUPPORT
- GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
-@@ -782,10 +785,13 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
- 	st8.spill [r2]=r8	// store return value in slot for r8 and set unat bit
- 	.mem.offset 8,0
- 	st8.spill [r3]=r0	// clear error indication in slot for r10 and set unat bit
-+	;;
-+	// don't fall through, ia64_leave_kernel may be #define'd
-+	br.cond.sptk.few ia64_leave_kernel
-+	;;
- END(ia64_ret_from_ia32_execve)
--	// fall through
- #endif /* CONFIG_IA32_SUPPORT */
--GLOBAL_ENTRY(ia64_leave_kernel)
-+GLOBAL_ENTRY(__ia64_leave_kernel)
- 	PT_REGS_UNWIND_INFO(0)
- 	/*
- 	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
-@@ -1131,7 +1137,7 @@ skip_rbs_switch:
- 	ld8 r10=[r3]
- 	br.cond.sptk.many .work_processed_syscall	// re-check
- 
--END(ia64_leave_kernel)
-+END(__ia64_leave_kernel)
- 
- ENTRY(handle_syscall_error)
- 	/*
-@@ -1171,7 +1177,7 @@ END(ia64_invoke_schedule_tail)
- 	 * be set up by the caller.  We declare 8 input registers so the system call
- 	 * args get preserved, in case we need to restart a system call.
- 	 */
--ENTRY(notify_resume_user)
-+GLOBAL_ENTRY(notify_resume_user)
- 	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- 	alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
- 	mov r9=ar.unat
-@@ -1259,7 +1265,7 @@ ENTRY(sys_rt_sigreturn)
- 	adds sp=16,sp
- 	;;
- 	ld8 r9=[sp]				// load new ar.unat
--	mov.sptk b7=r8,ia64_leave_kernel
-+	mov.sptk b7=r8,__ia64_leave_kernel
- 	;;
- 	mov ar.unat=r9
- 	br.many b7
-diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/head.S linux-2.6.12-xen/arch/ia64/kernel/head.S
---- pristine-linux-2.6.12/arch/ia64/kernel/head.S	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ia64/kernel/head.S	2006-02-16 23:44:08.000000000 +0100
-@@ -370,6 +370,10 @@ start_ap:
- 
- 	// This is executed by the bootstrap processor (bsp) only:
- 
-+#ifdef CONFIG_XEN
-+	br.call.sptk.many rp=early_xen_setup
-+	;;
-+#endif
- #ifdef CONFIG_IA64_FW_EMU
- 	// initialize PAL & SAL emulator:
- 	br.call.sptk.many rp=sys_fw_init
-diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/pal.S linux-2.6.12-xen/arch/ia64/kernel/pal.S
---- pristine-linux-2.6.12/arch/ia64/kernel/pal.S	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ia64/kernel/pal.S	2006-02-16 23:44:08.000000000 +0100
-@@ -16,6 +16,7 @@
- #include <asm/processor.h>
- 
- 	.data
-+	.globl pal_entry_point
- pal_entry_point:
- 	data8 ia64_pal_default_handler
- 	.text
-@@ -53,7 +54,7 @@ END(ia64_pal_default_handler)
-  * in4	       1 ==> clear psr.ic,  0 ==> don't clear psr.ic
-  *
-  */
--GLOBAL_ENTRY(ia64_pal_call_static)
-+GLOBAL_ENTRY(__ia64_pal_call_static)
- 	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
- 	alloc loc1 = ar.pfs,5,5,0,0
- 	movl loc2 = pal_entry_point
-@@ -90,7 +91,7 @@ GLOBAL_ENTRY(ia64_pal_call_static)
- 	;;
- 	srlz.d				// seralize restoration of psr.l
- 	br.ret.sptk.many b0
--END(ia64_pal_call_static)
-+END(__ia64_pal_call_static)
- 
- /*
-  * Make a PAL call using the stacked registers calling convention.
-diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/ptrace.c linux-2.6.12-xen/arch/ia64/kernel/ptrace.c
---- pristine-linux-2.6.12/arch/ia64/kernel/ptrace.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ia64/kernel/ptrace.c	2006-02-25 00:12:30.022559394 +0100
-@@ -945,6 +945,13 @@ access_uarea (struct task_struct *child,
- 				*data = (pt->cr_ipsr & IPSR_MASK);
- 			return 0;
- 
-+		      case PT_AR_RSC:
-+			if (write_access)
-+				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
-+			else
-+				*data = pt->ar_rsc;
-+			return 0;
-+
- 		      case PT_AR_RNAT:
- 			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
- 			rnat_addr = (long) ia64_rse_rnat_addr((long *)
-@@ -996,9 +1003,6 @@ access_uarea (struct task_struct *child,
- 		      case PT_AR_BSPSTORE:
- 			ptr = pt_reg_addr(pt, ar_bspstore);
- 			break;
--		      case PT_AR_RSC:
--			ptr = pt_reg_addr(pt, ar_rsc);
--			break;
- 		      case PT_AR_UNAT:
- 			ptr = pt_reg_addr(pt, ar_unat);
- 			break;
-@@ -1234,7 +1238,7 @@ ptrace_getregs (struct task_struct *chil
- static long
- ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
- {
--	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
-+	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
- 	struct unw_frame_info info;
- 	struct switch_stack *sw;
- 	struct ia64_fpreg fpval;
-@@ -1267,7 +1271,7 @@ ptrace_setregs (struct task_struct *chil
- 	/* app regs */
- 
- 	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
--	retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
-+	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
- 	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
- 	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
- 	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
-@@ -1365,6 +1369,7 @@ ptrace_setregs (struct task_struct *chil
- 	retval |= __get_user(nat_bits, &ppr->nat);
- 
- 	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
-+	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
- 	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
- 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
- 	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
-diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/setup.c linux-2.6.12-xen/arch/ia64/kernel/setup.c
---- pristine-linux-2.6.12/arch/ia64/kernel/setup.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ia64/kernel/setup.c	2006-02-16 23:44:08.000000000 +0100
-@@ -273,6 +273,9 @@ io_port_init (void)
- static inline int __init
- early_console_setup (char *cmdline)
- {
-+#ifdef CONFIG_XEN
-+	if (!early_xen_console_setup(cmdline)) return 0;
-+#endif
- #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
- 	{
- 		extern int sn_serial_console_early_setup(void);
-diff -Nurp pristine-linux-2.6.12/arch/ia64/kernel/signal.c linux-2.6.12-xen/arch/ia64/kernel/signal.c
---- pristine-linux-2.6.12/arch/ia64/kernel/signal.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ia64/kernel/signal.c	2006-02-25 00:12:30.022559394 +0100
-@@ -94,7 +94,7 @@ sys_sigaltstack (const stack_t __user *u
- static long
- restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
- {
--	unsigned long ip, flags, nat, um, cfm;
-+	unsigned long ip, flags, nat, um, cfm, rsc;
- 	long err;
- 
- 	/* Always make any pending restarted system calls return -EINTR */
-@@ -106,7 +106,7 @@ restore_sigcontext (struct sigcontext __
- 	err |= __get_user(ip, &sc->sc_ip);			/* instruction pointer */
- 	err |= __get_user(cfm, &sc->sc_cfm);
- 	err |= __get_user(um, &sc->sc_um);			/* user mask */
--	err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
-+	err |= __get_user(rsc, &sc->sc_ar_rsc);
- 	err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
- 	err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
- 	err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
-@@ -119,6 +119,7 @@ restore_sigcontext (struct sigcontext __
- 	err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8);	/* r15 */
- 
- 	scr->pt.cr_ifs = cfm | (1UL << 63);
-+	scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */
- 
- 	/* establish new instruction pointer: */
- 	scr->pt.cr_iip = ip & ~0x3UL;
-diff -Nurp pristine-linux-2.6.12/arch/ia64/Makefile linux-2.6.12-xen/arch/ia64/Makefile
---- pristine-linux-2.6.12/arch/ia64/Makefile	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ia64/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -11,6 +11,9 @@
- NM := $(CROSS_COMPILE)nm -B
- READELF := $(CROSS_COMPILE)readelf
- 
-+# following is temporary pending xen directory restructuring
-+NOSTDINC_FLAGS += -Iinclude/asm-xen
-+
- export AWK
- 
- CHECKFLAGS	+= -m64 -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
-@@ -57,9 +60,15 @@ core-$(CONFIG_IA64_GENERIC) 	+= arch/ia6
- core-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/dig/
- core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
- core-$(CONFIG_IA64_SGI_SN2)	+= arch/ia64/sn/
-+core-$(CONFIG_XEN)		+= arch/ia64/xen/
- 
- drivers-$(CONFIG_PCI)		+= arch/ia64/pci/
-+ifneq ($(CONFIG_XEN),y)
- drivers-$(CONFIG_IA64_HP_SIM)	+= arch/ia64/hp/sim/
-+endif
-+ifneq ($(CONFIG_IA64_GENERIC),y)
-+drivers-$(CONFIG_XEN)		+= arch/ia64/hp/sim/
-+endif
- drivers-$(CONFIG_IA64_HP_ZX1)	+= arch/ia64/hp/common/ arch/ia64/hp/zx1/
- drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
- drivers-$(CONFIG_IA64_GENERIC)	+= arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
-@@ -83,6 +92,8 @@ archclean:
- 	$(Q)$(MAKE) $(clean)=$(boot)
- 
- CLEAN_FILES += include/asm-ia64/.offsets.h.stamp vmlinux.gz bootloader
-+#CLEAN_FILES += include/asm-xen/xen-public include/asm-ia64/xen/asm-xsi-offsets.h
-+#CLEAN_FILES += include/asm-xen/linux-public include/asm-xen/asm-ia64/hypervisor.h
- 
- MRPROPER_FILES += include/asm-ia64/offsets.h
- 
-@@ -95,11 +106,27 @@ include/asm-ia64/offsets.h: arch/ia64/ke
- 
- arch/ia64/kernel/asm-offsets.s: include/asm-ia64/.offsets.h.stamp
- 
-+#XEN_PATH ?= $(srctree)/../xen-ia64-unstable.hg/
- include/asm-ia64/.offsets.h.stamp:
- 	mkdir -p include/asm-ia64
- 	[ -s include/asm-ia64/offsets.h ] \
- 	 || echo "#define IA64_TASK_SIZE 0" > include/asm-ia64/offsets.h
- 	touch $@
-+	[ -e include/asm-xen/asm ] \
-+	 || ln -s asm-ia64 include/asm-xen/asm
-+#	[ -e include/asm-xen/xen-public ] \
-+#	 || ln -s $(XEN_PATH)/xen/include/public \
-+#		include/asm-xen/xen-public
-+#	[ -e include/asm-ia64/xen/asm-xsi-offsets.h ] \
-+#	 || ln -s $(XEN_PATH)/xen/include/asm-ia64/asm-xsi-offsets.h \
-+#		include/asm-ia64/xen/asm-xsi-offsets.h
-+#	[ -e include/asm-xen/linux-public ] \
-+#	 || ln -s $(XEN_PATH)/linux-2.6-xen-sparse/include/asm-xen/linux-public \
-+		include/asm-xen/linux-public
-+	[ -e include/asm-xen/asm-ia64/hypervisor.h ] \
-+	 || ln -s $(XEN_PATH)/linux-2.6-xen-sparse/include/asm-xen/asm-ia64/hypervisor.h \
-+		include/asm-xen/asm-ia64/hypervisor.h
-+
- 
- boot:	lib/lib.a vmlinux
- 	$(Q)$(MAKE) $(build)=$(boot) $@
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/coreMakefile linux-2.6.12-xen/arch/ia64/xen/drivers/coreMakefile
---- pristine-linux-2.6.12/arch/ia64/xen/drivers/coreMakefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/drivers/coreMakefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,24 @@
-+#
-+# Makefile for the linux kernel.
-+#
-+
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+CPPFLAGS_vmlinux.lds += -U$(XENARCH)
-+
-+$(obj)/vmlinux.lds.S:
-+	@ln -fsn $(srctree)/arch/$(XENARCH)/kernel/vmlinux.lds.S $@
-+
-+
-+obj-y   := gnttab.o
-+obj-$(CONFIG_PROC_FS) += xen_proc.o
-+
-+ifeq ($(ARCH),ia64)
-+obj-y   += evtchn_ia64.o
-+obj-y   += xenia64_init.o
-+else
-+extra-y += vmlinux.lds
-+obj-y   += reboot.o evtchn.o fixup.o 
-+obj-$(CONFIG_SMP)     += smp.o		# setup_profiling_timer def'd in ia64
-+obj-$(CONFIG_NET)     += skbuff.o	# until networking is up on ia64
-+endif
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/evtchn_ia64.c linux-2.6.12-xen/arch/ia64/xen/drivers/evtchn_ia64.c
---- pristine-linux-2.6.12/arch/ia64/xen/drivers/evtchn_ia64.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/drivers/evtchn_ia64.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,266 @@
-+/* NOTE: This file split off from evtchn.c because there was
-+   some discussion that the mechanism is sufficiently different.
-+   It may be possible to merge it back in the future... djm */
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <asm/hw_irq.h>
-+#include <asm-xen/evtchn.h>
-+
-+#define MAX_EVTCHN 1024
-+
-+/* Xen will never allocate port zero for any purpose. */
-+#define VALID_EVTCHN(_chn) (((_chn) != 0) && ((_chn) < MAX_EVTCHN))
-+
-+/* Binding types. Hey, only IRQT_VIRQ and IRQT_EVTCHN are supported now
-+ * for XEN/IA64 - ktian1
-+ */
-+enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
-+
-+/* Constructor for packed IRQ information. */
-+#define mk_irq_info(type, index, evtchn)				\
-+	(((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
-+/* Convenient shorthand for packed representation of an unbound IRQ. */
-+#define IRQ_UNBOUND	mk_irq_info(IRQT_UNBOUND, 0, 0)
-+/* Accessor macros for packed IRQ information. */
-+#define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
-+#define index_from_irq(irq)  ((u8)(irq_info[irq] >> 16))
-+#define type_from_irq(irq)   ((u8)(irq_info[irq] >> 24))
-+
-+/* Packed IRQ information: binding type, sub-type index, and event channel. */
-+static u32 irq_info[NR_IRQS];
-+
-+/* One note for XEN/IA64 is that we have all event channels bound to one
-+ * physical irq vector. So we always mean evtchn vector identical to 'irq'
-+ * vector in this context. - ktian1
-+ */
-+static struct {
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *);
-+	void *dev_id;
-+	char opened;	/* Whether allocated */
-+} evtchns[MAX_EVTCHN];
-+
-+/*
-+ * This lock protects updates to the following mapping and reference-count
-+ * arrays. The lock does not need to be acquired to read the mapping tables.
-+ */
-+static spinlock_t irq_mapping_update_lock;
-+
-+void mask_evtchn(int port)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	synch_set_bit(port, &s->evtchn_mask[0]);
-+}
-+EXPORT_SYMBOL(mask_evtchn);
-+
-+void unmask_evtchn(int port)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	unsigned int cpu = smp_processor_id();
-+	vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
-+
-+#if 0	// FIXME: diverged from x86 evtchn.c
-+	/* Slow path (hypercall) if this is a non-local port. */
-+	if (unlikely(cpu != cpu_from_evtchn(port))) {
-+		evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
-+				   .u.unmask.port = port };
-+		(void)HYPERVISOR_event_channel_op(&op);
-+		return;
-+	}
-+#endif
-+
-+	synch_clear_bit(port, &s->evtchn_mask[0]);
-+
-+	/*
-+	 * The following is basically the equivalent of 'hw_resend_irq'. Just
-+	 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
-+	 * masked.
-+	 */
-+	if (synch_test_bit(port, &s->evtchn_pending[0]) && 
-+	    !synch_test_and_set_bit(port / BITS_PER_LONG,
-+				    &vcpu_info->evtchn_pending_sel)) {
-+		vcpu_info->evtchn_upcall_pending = 1;
-+		if (!vcpu_info->evtchn_upcall_mask)
-+			force_evtchn_callback();
-+	}
-+}
-+EXPORT_SYMBOL(unmask_evtchn);
-+
-+
-+#define unbound_irq(e) (VALID_EVTCHN(e) && (!evtchns[(e)].opened))
-+int bind_virq_to_irqhandler(
-+	unsigned int virq,
-+	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id)
-+{
-+    evtchn_op_t op;
-+    int evtchn;
-+
-+    spin_lock(&irq_mapping_update_lock);
-+
-+    op.cmd = EVTCHNOP_bind_virq;
-+    op.u.bind_virq.virq = virq;
-+    op.u.bind_virq.vcpu = cpu;
-+    BUG_ON(HYPERVISOR_event_channel_op(&op) != 0 );
-+    evtchn = op.u.bind_virq.port;
-+
-+    if (!unbound_irq(evtchn))
-+	return -EINVAL;
-+
-+    evtchns[evtchn].handler = handler;
-+    evtchns[evtchn].dev_id = dev_id;
-+    evtchns[evtchn].opened = 1;
-+    irq_info[evtchn] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-+
-+    unmask_evtchn(evtchn);
-+    spin_unlock(&irq_mapping_update_lock);
-+    return evtchn;
-+}
-+
-+int bind_evtchn_to_irqhandler(unsigned int evtchn,
-+                   irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+                   unsigned long irqflags, const char * devname, void *dev_id)
-+{
-+    spin_lock(&irq_mapping_update_lock);
-+
-+    if (!unbound_irq(evtchn))
-+	return -EINVAL;
-+
-+    evtchns[evtchn].handler = handler;
-+    evtchns[evtchn].dev_id = dev_id;
-+    evtchns[evtchn].opened = 1;
-+    irq_info[evtchn] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
-+
-+    unmask_evtchn(evtchn);
-+    spin_unlock(&irq_mapping_update_lock);
-+    return evtchn;
-+}
-+
-+int bind_ipi_to_irqhandler(
-+	unsigned int ipi,
-+	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id)
-+{
-+    printk("%s is called which has not been supported now...?\n", __FUNCTION__);
-+    while(1);
-+}
-+
-+void unbind_from_irqhandler(unsigned int irq, void *dev_id)
-+{
-+    evtchn_op_t op;
-+    int evtchn = evtchn_from_irq(irq);
-+
-+    spin_lock(&irq_mapping_update_lock);
-+
-+    if (unbound_irq(irq))
-+        return;
-+
-+    op.cmd = EVTCHNOP_close;
-+    op.u.close.port = evtchn;
-+    BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+
-+    switch (type_from_irq(irq)) {
-+	case IRQT_VIRQ:
-+	    /* Add smp stuff later... */
-+	    break;
-+	case IRQT_IPI:
-+	    /* Add smp stuff later... */
-+	    break;
-+	default:
-+	    break;
-+    }
-+
-+    mask_evtchn(evtchn);
-+    evtchns[evtchn].handler = NULL;
-+    evtchns[evtchn].opened = 0;
-+
-+    spin_unlock(&irq_mapping_update_lock);
-+}
-+
-+void notify_remote_via_irq(int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (!unbound_irq(evtchn))
-+		notify_remote_via_evtchn(evtchn);
-+}
-+
-+irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+    unsigned long  l1, l2;
-+    unsigned int   l1i, l2i, port;
-+    irqreturn_t (*handler)(int, void *, struct pt_regs *);
-+    shared_info_t *s = HYPERVISOR_shared_info;
-+    vcpu_info_t   *vcpu_info = &s->vcpu_info[smp_processor_id()];
-+
-+    vcpu_info->evtchn_upcall_mask = 1;
-+    vcpu_info->evtchn_upcall_pending = 0;
-+
-+    /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
-+    l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
-+    while ( l1 != 0 )
-+    {
-+        l1i = __ffs(l1);
-+        l1 &= ~(1UL << l1i);
-+
-+        while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
-+        {
-+            l2i = __ffs(l2);
-+            l2 &= ~(1UL << l2i);
-+
-+            port = (l1i * BITS_PER_LONG) + l2i;
-+            if ( (handler = evtchns[port].handler) != NULL )
-+	    {
-+		clear_evtchn(port);
-+                handler(port, evtchns[port].dev_id, regs);
-+	    }
-+            else
-+	    {
-+                evtchn_device_upcall(port);
-+	    }
-+        }
-+    }
-+    vcpu_info->evtchn_upcall_mask = 0;
-+    return IRQ_HANDLED;
-+}
-+
-+void force_evtchn_callback(void)
-+{
-+	//(void)HYPERVISOR_xen_version(0, NULL);
-+}
-+
-+static struct irqaction evtchn_irqaction = {
-+	.handler =	evtchn_interrupt,
-+	.flags =	SA_INTERRUPT,
-+	.name =		"xen-event-channel"
-+};
-+
-+int evtchn_irq = 0xe9;
-+void __init evtchn_init(void)
-+{
-+    shared_info_t *s = HYPERVISOR_shared_info;
-+    vcpu_info_t   *vcpu_info = &s->vcpu_info[smp_processor_id()];
-+
-+#if 0
-+    int ret;
-+    irq = assign_irq_vector(AUTO_ASSIGN);
-+    ret = request_irq(irq, evtchn_interrupt, 0, "xen-event-channel", NULL);
-+    if (ret < 0)
-+    {
-+	printk("xen-event-channel unable to get irq %d (%d)\n", irq, ret);
-+	return;
-+    }
-+#endif
-+    register_percpu_irq(evtchn_irq, &evtchn_irqaction);
-+
-+    vcpu_info->arch.evtchn_vector = evtchn_irq;
-+    printk("xen-event-channel using irq %d\n", evtchn_irq);
-+
-+    spin_lock_init(&irq_mapping_update_lock);
-+    memset(evtchns, 0, sizeof(evtchns));
-+}
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/Makefile linux-2.6.12-xen/arch/ia64/xen/drivers/Makefile
---- pristine-linux-2.6.12/arch/ia64/xen/drivers/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/drivers/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,20 @@
-+
-+obj-y   += util.o
-+
-+obj-y	+= core/
-+obj-y	+= console/
-+obj-y	+= evtchn/
-+#obj-y	+= balloon/
-+obj-y	+= privcmd/
-+obj-y	+= blkback/
-+#obj-y	+= netback/
-+obj-y	+= blkfront/
-+obj-y	+= xenbus/
-+#obj-y	+= netfront/
-+#obj-$(CONFIG_XEN_PRIVILEGED_GUEST)	+= privcmd/
-+#obj-$(CONFIG_XEN_BLKDEV_BACKEND)	+= blkback/
-+#obj-$(CONFIG_XEN_NETDEV_BACKEND)	+= netback/
-+#obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= blkfront/
-+#obj-$(CONFIG_XEN_NETDEV_FRONTEND)	+= netfront/
-+#obj-$(CONFIG_XEN_BLKDEV_TAP)    	+= blktap/
-+
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/blkback.c.patch linux-2.6.12-xen/arch/ia64/xen/drivers/patches/blkback.c.patch
---- pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/blkback.c.patch	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/drivers/patches/blkback.c.patch	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,57 @@
-+diff -Naur xen/blkback/blkback.c xen.patched/blkback/blkback.c
-+--- xen/blkback/blkback.c	2005-09-23 10:54:50.000000000 -0600
-++++ xen.patched/blkback/blkback.c	2005-09-23 10:57:51.000000000 -0600
-+@@ -30,10 +30,16 @@
-+ static unsigned long mmap_vstart;
-+ #define MMAP_PAGES						\
-+ 	(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
-++#ifdef __ia64__
-++static void *pending_vaddrs[MMAP_PAGES];
-++#define MMAP_VADDR(_idx, _i) \
-++	(unsigned long)(pending_vaddrs[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
-++#else
-+ #define MMAP_VADDR(_req,_seg)						\
-+ 	(mmap_vstart +							\
-+ 	 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +	\
-+ 	 ((_seg) * PAGE_SIZE))
-++#endif
-+ 
-+ /*
-+  * Each outstanding request that we've passed to the lower device layers has a 
-+@@ -377,9 +383,13 @@
-+ 			goto bad_descriptor;
-+ 		}
-+ 
-++#ifdef __ia64__
-++		MMAP_VADDR(pending_idx,i) = gnttab_map_vaddr(map[i]);
-++#else
-+ 		phys_to_machine_mapping[__pa(MMAP_VADDR(
-+ 			pending_idx, i)) >> PAGE_SHIFT] =
-+ 			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
-++#endif
-+ 
-+ 		pending_handle(pending_idx, i) = map[i].handle;
-+ 	}
-+@@ -500,9 +510,22 @@
-+ 
-+ 	blkif_interface_init();
-+ 
-++#ifdef __ia64__
-++    {
-++	extern unsigned long alloc_empty_foreign_map_page_range(unsigned long pages);
-++	int i;
-++
-++	mmap_vstart =  alloc_empty_foreign_map_page_range(MMAP_PAGES);
-++	printk("Allocated mmap_vstart: 0x%lx\n", mmap_vstart);
-++	for(i = 0; i < MMAP_PAGES; i++)
-++	    pending_vaddrs[i] = mmap_vstart + (i << PAGE_SHIFT);
-++	BUG_ON(mmap_vstart == NULL);
-++    }
-++#else
-+ 	page = balloon_alloc_empty_page_range(MMAP_PAGES);
-+ 	BUG_ON(page == NULL);
-+ 	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-++#endif
-+ 
-+ 	pending_cons = 0;
-+ 	pending_prod = MAX_PENDING_REQS;
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/console.c.patch linux-2.6.12-xen/arch/ia64/xen/drivers/patches/console.c.patch
---- pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/console.c.patch	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/drivers/patches/console.c.patch	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,18 @@
-+--- xen/console/console.c	2005-11-02 14:13:07.000000000 +0100
-++++ xen.patched/console/console.c	2005-11-02 14:21:20.000000000 +0100
-+@@ -768,9 +771,15 @@
-+ #endif
-+ 
-+ 	if (xen_start_info->flags & SIF_INITDOMAIN) {
-++#ifdef __ia64__
-++		xencons_priv_irq = bind_virq_to_evtchn(VIRQ_CONSOLE);
-++		bind_evtchn_to_irqhandler(xencons_priv_irq,
-++				xencons_priv_interrupt, 0, "console", NULL);
-++#else
-+ 		xencons_priv_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
-+ 		(void)request_irq(xencons_priv_irq,
-+ 				  xencons_priv_interrupt, 0, "console", NULL);
-++#endif
-+ 	} else {
-+ 		xencons_ring_register_receiver(xencons_rx);
-+ 	}
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/devmem.c.patch linux-2.6.12-xen/arch/ia64/xen/drivers/patches/devmem.c.patch
---- pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/devmem.c.patch	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/drivers/patches/devmem.c.patch	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,3 @@
-+diff -Naur xen/core/devmem.c xen.patched/core/devmem.c
-+--- xen/core/devmem.c	2005-09-23 10:54:50.000000000 -0600
-++++ xen.patched/core/devmem.c	2005-09-23 10:57:51.000000000 -0600
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/gnttab.c.patch linux-2.6.12-xen/arch/ia64/xen/drivers/patches/gnttab.c.patch
---- pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/gnttab.c.patch	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/drivers/patches/gnttab.c.patch	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,46 @@
-+diff -Naur xen/core/gnttab.c xen.patched/core/gnttab.c
-+--- xen/core/gnttab.c	2005-09-23 10:54:50.000000000 -0600
-++++ xen.patched/core/gnttab.c	2005-09-23 10:57:51.000000000 -0600
-+@@ -346,6 +350,10 @@
-+ 	if ( hypercall.op != __HYPERVISOR_grant_table_op )
-+ 		return -ENOSYS;
-+ 
-++
-++#ifdef __ia64__
-++	ret = HYPERVISOR_grant_table_op(hypercall.arg[0], (void *)hypercall.arg[1], hypercall.arg[2]);
-++#else
-+ 	/* hypercall-invoking asm taken from privcmd.c */
-+ 	__asm__ __volatile__ (
-+ 		"pushl %%ebx; pushl %%ecx; pushl %%edx; "
-+@@ -359,6 +367,7 @@
-+ 		TRAP_INSTR "; "
-+ 		"popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
-+ 		: "=a" (ret) : "0" (&hypercall) : "memory" );
-++#endif
-+ 
-+ 	return ret;
-+ }
-+@@ -423,8 +432,13 @@
-+ 	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
-+ 	BUG_ON(setup.status != 0);
-+ 
-++#ifdef __ia64__
-++	shared = __va(frames[0] << PAGE_SHIFT);
-++	printk("grant table at %p\n", shared);
-++#else
-+ 	for (i = 0; i < NR_GRANT_FRAMES; i++)
-+ 		set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
-++#endif
-+ 
-+ 	return 0;
-+ }
-+@@ -450,7 +466,9 @@
-+ 
-+ 	BUG_ON(gnttab_resume());
-+ 
-++#ifndef __ia64__
-+ 	shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
-++#endif
-+ 
-+ 	for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
-+ 		gnttab_list[i] = i + 1;
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/privcmd.c.patch linux-2.6.12-xen/arch/ia64/xen/drivers/patches/privcmd.c.patch
---- pristine-linux-2.6.12/arch/ia64/xen/drivers/patches/privcmd.c.patch	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/drivers/patches/privcmd.c.patch	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,43 @@
-+diff -Naur xen/privcmd/privcmd.c xen.patched/privcmd/privcmd.c
-+--- xen/privcmd/privcmd.c	2005-09-23 10:54:50.000000000 -0600
-++++ xen.patched/privcmd/privcmd.c	2005-09-23 10:57:51.000000000 -0600
-+@@ -180,6 +183,15 @@
-+ 		for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
-+ 			if (get_user(mfn, p))
-+ 				return -EFAULT;
-++#ifdef __ia64__
-++			ret = remap_pfn_range(vma,
-++					      addr&PAGE_MASK,
-++					      mfn,
-++					      1<<PAGE_SHIFT,
-++					      vma->vm_page_prot);
-++			if (ret < 0)
-++			    goto batch_err;
-++#else
-+ 
-+ 			ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
-+ 			if (ret)
-+@@ -190,6 +202,7 @@
-+ 
-+ 			if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
-+ 				put_user(0xF0000000 | mfn, p);
-++#endif
-+ 		}
-+ 
-+ 		ret = 0;
-+@@ -205,6 +218,7 @@
-+ 	break;
-+ #endif
-+ 
-++#ifndef __ia64__
-+ 	case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN: {
-+ 		unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
-+ 		pgd_t *pgd = pgd_offset_k(m2pv);
-+@@ -216,6 +230,7 @@
-+ 			-EFAULT: 0;
-+ 	}
-+ 	break;
-++#endif
-+ 
-+ 	default:
-+ 		ret = -EINVAL;
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/README linux-2.6.12-xen/arch/ia64/xen/drivers/README
---- pristine-linux-2.6.12/arch/ia64/xen/drivers/README	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/drivers/README	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+This is a temporary location for source/Makefiles that need to be
-+patched/reworked in drivers/xen to work with xenlinux/ia64.
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/drivers/xenia64_init.c linux-2.6.12-xen/arch/ia64/xen/drivers/xenia64_init.c
---- pristine-linux-2.6.12/arch/ia64/xen/drivers/xenia64_init.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/drivers/xenia64_init.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,55 @@
-+#ifdef __ia64__
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/efi.h>
-+#include <asm/sal.h>
-+#include <asm/hypervisor.h>
-+/* #include <asm-xen/evtchn.h> */
-+#include <linux/vmalloc.h>
-+
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)0xf100000000000000;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
-+
-+static int initialized;
-+start_info_t *xen_start_info;
-+
-+int xen_init(void)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+
-+	if (initialized)
-+		return running_on_xen ? 0 : -1;
-+
-+	if (!running_on_xen)
-+		return -1;
-+
-+	xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
-+	xen_start_info->flags = s->arch.flags;
-+	printk("Running on Xen! start_info_pfn=0x%lx nr_pages=%d flags=0x%x\n",
-+		s->arch.start_info_pfn, xen_start_info->nr_pages,
-+		xen_start_info->flags);
-+
-+	evtchn_init();
-+	initialized = 1;
-+	return 0;
-+}
-+
-+/* We just need a range of legal va here, though finally identity
-+ * mapped one is instead used for gnttab mapping.
-+ */
-+unsigned long alloc_empty_foreign_map_page_range(unsigned long pages)
-+{
-+	struct vm_struct *vma;
-+
-+	if ( (vma = get_vm_area(PAGE_SIZE * pages, VM_ALLOC)) == NULL )
-+		return NULL;
-+
-+	return (unsigned long)vma->addr;
-+}
-+
-+#if 0
-+/* These should be define'd but some drivers use them without
-+ * a convenient arch include */
-+unsigned long mfn_to_pfn(unsigned long mfn) { return mfn; }
-+#endif
-+#endif
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/hypercall.S linux-2.6.12-xen/arch/ia64/xen/hypercall.S
---- pristine-linux-2.6.12/arch/ia64/xen/hypercall.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/hypercall.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,323 @@
-+/*
-+ * Support routines for Xen hypercalls
-+ *
-+ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer at hp.com>
-+ */
-+
-+#include <linux/config.h>
-+#include <asm/processor.h>
-+#include <asm/asmmacro.h>
-+
-+GLOBAL_ENTRY(xen_get_ivr)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov r8=cr.ivr;;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	movl r9=XSI_PSR_IC
-+	;;
-+	ld8 r10=[r9]
-+	;;
-+	st8 [r9]=r0
-+	;;
-+	XEN_HYPER_GET_IVR
-+	;;
-+	st8 [r9]=r10
-+	br.ret.sptk.many rp
-+	;;
-+END(xen_get_ivr)
-+
-+GLOBAL_ENTRY(xen_get_tpr)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov r8=cr.tpr;;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	movl r9=XSI_PSR_IC
-+	;;
-+	ld8 r10=[r9]
-+	;;
-+	st8 [r9]=r0
-+	;;
-+	XEN_HYPER_GET_TPR
-+	;;
-+	st8 [r9]=r10
-+	br.ret.sptk.many rp
-+	;;
-+END(xen_get_tpr)
-+
-+GLOBAL_ENTRY(xen_set_tpr)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov cr.tpr=r32;;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	movl r9=XSI_PSR_IC
-+	mov r8=r32
-+	;;
-+	ld8 r10=[r9]
-+	;;
-+	st8 [r9]=r0
-+	;;
-+	XEN_HYPER_SET_TPR
-+	;;
-+	st8 [r9]=r10
-+	br.ret.sptk.many rp
-+	;;
-+END(xen_set_tpr)
-+
-+GLOBAL_ENTRY(xen_eoi)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov cr.eoi=r0;;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	movl r9=XSI_PSR_IC
-+	mov r8=r32
-+	;;
-+	ld8 r10=[r9]
-+	;;
-+	st8 [r9]=r0
-+	;;
-+	XEN_HYPER_EOI
-+	;;
-+	st8 [r9]=r10
-+	br.ret.sptk.many rp
-+	;;
-+END(xen_eoi)
-+
-+GLOBAL_ENTRY(xen_thash)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	thash r8=r32;;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	movl r9=XSI_PSR_IC
-+	mov r8=r32
-+	;;
-+	ld8 r10=[r9]
-+	;;
-+	st8 [r9]=r0
-+	;;
-+	XEN_HYPER_THASH
-+	;;
-+	st8 [r9]=r10
-+	;;
-+	br.ret.sptk.many rp
-+	;;
-+END(xen_thash)
-+
-+GLOBAL_ENTRY(xen_set_itm)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov cr.itm=r32;;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	movl r9=XSI_PSR_IC
-+	mov r8=r32
-+	;;
-+	ld8 r10=[r9]
-+	;;
-+	st8 [r9]=r0
-+	;;
-+	XEN_HYPER_SET_ITM
-+	;;
-+	st8 [r9]=r10
-+	;;
-+	br.ret.sptk.many rp
-+	;;
-+END(xen_set_itm)
-+
-+GLOBAL_ENTRY(xen_ptcga)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	ptc.ga r32,r33;;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	movl r11=XSI_PSR_IC
-+	mov r8=r32
-+	mov r9=r33
-+	;;
-+	ld8 r10=[r11]
-+	;;
-+	st8 [r11]=r0
-+	;;
-+	XEN_HYPER_PTC_GA
-+	;;
-+	st8 [r11]=r10
-+	;;
-+	br.ret.sptk.many rp
-+	;;
-+END(xen_ptcga)
-+
-+GLOBAL_ENTRY(xen_get_rr)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov r8=rr[r32];;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	movl r9=XSI_PSR_IC
-+	mov r8=r32
-+	;;
-+	ld8 r10=[r9]
-+	;;
-+	st8 [r9]=r0
-+	;;
-+	XEN_HYPER_GET_RR
-+	;;
-+	st8 [r9]=r10
-+	;;
-+	br.ret.sptk.many rp
-+	;;
-+END(xen_get_rr)
-+
-+GLOBAL_ENTRY(xen_set_rr)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov rr[r32]=r33;;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	movl r11=XSI_PSR_IC
-+	mov r8=r32
-+	mov r9=r33
-+	;;
-+	ld8 r10=[r11]
-+	;;
-+	st8 [r11]=r0
-+	;;
-+	XEN_HYPER_SET_RR
-+	;;
-+	st8 [r11]=r10
-+	;;
-+	br.ret.sptk.many rp
-+	;;
-+END(xen_set_rr)
-+
-+GLOBAL_ENTRY(xen_set_kr)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.ne p7,p0=r8,r0;;
-+(p7)	br.cond.spnt.few 1f;
-+	;;
-+	cmp.eq p7,p0=r8,r0
-+	adds r8=-1,r8;;
-+(p7)	mov ar0=r9
-+(p7)	br.ret.sptk.many rp;;
-+	cmp.eq p7,p0=r8,r0
-+	adds r8=-1,r8;;
-+(p7)	mov ar1=r9
-+(p7)	br.ret.sptk.many rp;;
-+	cmp.eq p7,p0=r8,r0
-+	adds r8=-1,r8;;
-+(p7)	mov ar2=r9
-+(p7)	br.ret.sptk.many rp;;
-+	cmp.eq p7,p0=r8,r0
-+	adds r8=-1,r8;;
-+(p7)	mov ar3=r9
-+(p7)	br.ret.sptk.many rp;;
-+	cmp.eq p7,p0=r8,r0
-+	adds r8=-1,r8;;
-+(p7)	mov ar4=r9
-+(p7)	br.ret.sptk.many rp;;
-+	cmp.eq p7,p0=r8,r0
-+	adds r8=-1,r8;;
-+(p7)	mov ar5=r9
-+(p7)	br.ret.sptk.many rp;;
-+	cmp.eq p7,p0=r8,r0
-+	adds r8=-1,r8;;
-+(p7)	mov ar6=r9
-+(p7)	br.ret.sptk.many rp;;
-+	cmp.eq p7,p0=r8,r0
-+	adds r8=-1,r8;;
-+(p7)	mov ar7=r9
-+(p7)	br.ret.sptk.many rp;;
-+
-+1:	movl r11=XSI_PSR_IC
-+	mov r8=r32
-+	mov r9=r33
-+	;;
-+	ld8 r10=[r11]
-+	;;
-+	st8 [r11]=r0
-+	;;
-+	XEN_HYPER_SET_KR
-+	;;
-+	st8 [r11]=r10
-+	;;
-+	br.ret.sptk.many rp
-+	;;
-+END(xen_set_rr)
-+
-+GLOBAL_ENTRY(xen_fc)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	fc r32;;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	ptc.e r96		// this is a "privified" fc r32
-+	;;
-+	br.ret.sptk.many rp
-+END(xen_fc)
-+
-+GLOBAL_ENTRY(xen_get_cpuid)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov r8=cpuid[r32];;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	mov r72=rr[r32]		// this is a "privified" mov r8=cpuid[r32]
-+	;;
-+	br.ret.sptk.many rp
-+END(xen_get_cpuid)
-+
-+GLOBAL_ENTRY(xen_get_pmd)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov r8=pmd[r32];;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	mov r72=pmc[r32] 	// this is a "privified" mov r8=pmd[r32]
-+	;;
-+	br.ret.sptk.many rp
-+END(xen_get_pmd)
-+
-+#ifdef CONFIG_IA32_SUPPORT
-+GLOBAL_ENTRY(xen_get_eflag)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov r8=ar24;;
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	mov ar24=r72		// this is a "privified" mov r8=ar.eflg
-+	;;
-+	br.ret.sptk.many rp
-+END(xen_get_eflag)
-+// some bits aren't set if pl!=0, see SDM vol1 3.1.8
-+GLOBAL_ENTRY(xen_set_eflag)
-+	movl r8=running_on_xen;;
-+	ld4 r8=[r8];;
-+	cmp.eq p7,p0=r8,r0;;
-+(p7)	mov ar24=r32
-+(p7)	br.ret.sptk.many rp
-+	;;
-+	// FIXME: this remains no-op'd because it generates
-+	// a privileged register (general exception) trap rather than
-+	// a privileged operation fault
-+	//mov ar24=r32
-+	;;
-+	br.ret.sptk.many rp
-+END(xen_get_eflag)
-+#endif
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/Makefile linux-2.6.12-xen/arch/ia64/xen/Makefile
---- pristine-linux-2.6.12/arch/ia64/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,5 @@
-+#
-+# Makefile for Xen components
-+#
-+
-+obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o xenhpski.o xenconsole.o xen_ksyms.o
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenconsole.c linux-2.6.12-xen/arch/ia64/xen/xenconsole.c
---- pristine-linux-2.6.12/arch/ia64/xen/xenconsole.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/xenconsole.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,19 @@
-+#include <linux/config.h>
-+#include <linux/console.h>
-+
-+int
-+early_xen_console_setup (char *cmdline)
-+{
-+#ifdef CONFIG_XEN
-+#ifndef CONFIG_IA64_HP_SIM
-+	extern int running_on_xen;
-+	if (running_on_xen) {
-+		extern struct console hpsim_cons;
-+		hpsim_cons.flags |= CON_BOOT;
-+		register_console(&hpsim_cons);
-+		return 0;
-+	}
-+#endif
-+#endif
-+	return -1;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenentry.S linux-2.6.12-xen/arch/ia64/xen/xenentry.S
---- pristine-linux-2.6.12/arch/ia64/xen/xenentry.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/xenentry.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,850 @@
-+/*
-+ * ia64/xen/entry.S
-+ *
-+ * Alternate kernel routines for Xen.  Heavily leveraged from
-+ *   ia64/kernel/entry.S
-+ *
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ *	Dan Magenheimer <dan.magenheimer at .hp.com>
-+ */
-+
-+#include <linux/config.h>
-+
-+#include <asm/asmmacro.h>
-+#include <asm/cache.h>
-+#include <asm/errno.h>
-+#include <asm/kregs.h>
-+#include <asm/offsets.h>
-+#include <asm/pgtable.h>
-+#include <asm/percpu.h>
-+#include <asm/processor.h>
-+#include <asm/thread_info.h>
-+#include <asm/unistd.h>
-+
-+#ifdef CONFIG_XEN
-+#include "xenminstate.h"
-+#else
-+#include "minstate.h"
-+#endif
-+
-+/*
-+ * prev_task <- ia64_switch_to(struct task_struct *next)
-+ *	With Ingo's new scheduler, interrupts are disabled when this routine gets
-+ *	called.  The code starting at .map relies on this.  The rest of the code
-+ *	doesn't care about the interrupt masking status.
-+ */
-+#ifdef CONFIG_XEN
-+GLOBAL_ENTRY(xen_switch_to)
-+	.prologue
-+	alloc r16=ar.pfs,1,0,0,0
-+	movl r22=running_on_xen;;
-+	ld4 r22=[r22];;
-+	cmp.eq p7,p0=r22,r0
-+(p7)	br.cond.sptk.many __ia64_switch_to;;
-+#else
-+GLOBAL_ENTRY(ia64_switch_to)
-+	.prologue
-+	alloc r16=ar.pfs,1,0,0,0
-+#endif
-+	DO_SAVE_SWITCH_STACK
-+	.body
-+
-+	adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
-+	movl r25=init_task
-+	mov r27=IA64_KR(CURRENT_STACK)
-+	adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
-+	dep r20=0,in0,61,3		// physical address of "next"
-+	;;
-+	st8 [r22]=sp			// save kernel stack pointer of old task
-+	shr.u r26=r20,IA64_GRANULE_SHIFT
-+	cmp.eq p7,p6=r25,in0
-+	;;
-+#ifdef CONFIG_XEN
-+	movl r8=XSI_PSR_IC
-+	;;
-+	st4 [r8]=r0	// force psr.ic off for hyperprivop(s)
-+	;;
-+#endif
-+	/*
-+	 * If we've already mapped this task's page, we can skip doing it again.
-+	 */
-+(p6)	cmp.eq p7,p6=r26,r27
-+(p6)	br.cond.dpnt .map
-+	;;
-+.done:
-+#ifdef CONFIG_XEN
-+	// psr.ic already off
-+	// update "current" application register
-+	mov r8=IA64_KR_CURRENT
-+	mov r9=in0;;
-+	XEN_HYPER_SET_KR
-+	ld8 sp=[r21]			// load kernel stack pointer of new task
-+	movl r27=XSI_PSR_IC
-+	mov r8=1
-+	;;
-+	st4 [r27]=r8			// psr.ic back on
-+	;;
-+#else
-+(p6)	ssm psr.ic			// if we had to map, reenable the psr.ic bit FIRST!!!
-+	;;
-+(p6)	srlz.d
-+	ld8 sp=[r21]			// load kernel stack pointer of new task
-+	mov IA64_KR(CURRENT)=in0	// update "current" application register
-+#endif
-+	mov r8=r13			// return pointer to previously running task
-+	mov r13=in0			// set "current" pointer
-+	;;
-+	DO_LOAD_SWITCH_STACK
-+
-+#ifdef CONFIG_SMP
-+	sync.i				// ensure "fc"s done by this CPU are visible on other CPUs
-+#endif
-+	br.ret.sptk.many rp		// boogie on out in new context
-+
-+.map:
-+#ifdef CONFIG_XEN
-+	// psr.ic already off
-+#else
-+	rsm psr.ic			// interrupts (psr.i) are already disabled here
-+#endif
-+	movl r25=PAGE_KERNEL
-+	;;
-+	srlz.d
-+	or r23=r25,r20			// construct PA | page properties
-+	mov r25=IA64_GRANULE_SHIFT<<2
-+	;;
-+#ifdef CONFIG_XEN
-+	movl r8=XSI_ITIR
-+	;;
-+	st8 [r8]=r25
-+	;;
-+	movl r8=XSI_IFA
-+	;;
-+	st8 [r8]=in0			 // VA of next task...
-+	;;
-+	mov r25=IA64_TR_CURRENT_STACK
-+	// remember last page we mapped...
-+	mov r8=IA64_KR_CURRENT_STACK
-+	mov r9=r26;;
-+	XEN_HYPER_SET_KR;;
-+#else
-+	mov cr.itir=r25
-+	mov cr.ifa=in0			// VA of next task...
-+	;;
-+	mov r25=IA64_TR_CURRENT_STACK
-+	mov IA64_KR(CURRENT_STACK)=r26	// remember last page we mapped...
-+#endif
-+	;;
-+	itr.d dtr[r25]=r23		// wire in new mapping...
-+	br.cond.sptk .done
-+#ifdef CONFIG_XEN
-+END(xen_switch_to)
-+#else
-+END(ia64_switch_to)
-+#endif
-+
-+	/*
-+	 * Invoke a system call, but do some tracing before and after the call.
-+	 * We MUST preserve the current register frame throughout this routine
-+	 * because some system calls (such as ia64_execve) directly
-+	 * manipulate ar.pfs.
-+	 */
-+#ifdef CONFIG_XEN
-+GLOBAL_ENTRY(xen_trace_syscall)
-+	PT_REGS_UNWIND_INFO(0)
-+	movl r16=running_on_xen;;
-+	ld4 r16=[r16];;
-+	cmp.eq p7,p0=r16,r0
-+(p7)	br.cond.sptk.many __ia64_trace_syscall;;
-+#else
-+GLOBAL_ENTRY(ia64_trace_syscall)
-+	PT_REGS_UNWIND_INFO(0)
-+#endif
-+	/*
-+	 * We need to preserve the scratch registers f6-f11 in case the system
-+	 * call is sigreturn.
-+	 */
-+	adds r16=PT(F6)+16,sp
-+	adds r17=PT(F7)+16,sp
-+	;;
-+ 	stf.spill [r16]=f6,32
-+ 	stf.spill [r17]=f7,32
-+	;;
-+ 	stf.spill [r16]=f8,32
-+ 	stf.spill [r17]=f9,32
-+	;;
-+ 	stf.spill [r16]=f10
-+ 	stf.spill [r17]=f11
-+	br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
-+	adds r16=PT(F6)+16,sp
-+	adds r17=PT(F7)+16,sp
-+	;;
-+	ldf.fill f6=[r16],32
-+	ldf.fill f7=[r17],32
-+	;;
-+	ldf.fill f8=[r16],32
-+	ldf.fill f9=[r17],32
-+	;;
-+	ldf.fill f10=[r16]
-+	ldf.fill f11=[r17]
-+	// the syscall number may have changed, so re-load it and re-calculate the
-+	// syscall entry-point:
-+	adds r15=PT(R15)+16,sp			// r15 = &pt_regs.r15 (syscall #)
-+	;;
-+	ld8 r15=[r15]
-+	mov r3=NR_syscalls - 1
-+	;;
-+	adds r15=-1024,r15
-+	movl r16=sys_call_table
-+	;;
-+	shladd r20=r15,3,r16			// r20 = sys_call_table + 8*(syscall-1024)
-+	cmp.leu p6,p7=r15,r3
-+	;;
-+(p6)	ld8 r20=[r20]				// load address of syscall entry point
-+(p7)	movl r20=sys_ni_syscall
-+	;;
-+	mov b6=r20
-+	br.call.sptk.many rp=b6			// do the syscall
-+.strace_check_retval:
-+	cmp.lt p6,p0=r8,r0			// syscall failed?
-+	adds r2=PT(R8)+16,sp			// r2 = &pt_regs.r8
-+	adds r3=PT(R10)+16,sp			// r3 = &pt_regs.r10
-+	mov r10=0
-+(p6)	br.cond.sptk strace_error		// syscall failed ->
-+	;;					// avoid RAW on r10
-+.strace_save_retval:
-+.mem.offset 0,0; st8.spill [r2]=r8		// store return value in slot for r8
-+.mem.offset 8,0; st8.spill [r3]=r10		// clear error indication in slot for r10
-+	br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
-+.ret3:	br.cond.sptk .work_pending_syscall_end
-+
-+strace_error:
-+	ld8 r3=[r2]				// load pt_regs.r8
-+	sub r9=0,r8				// negate return value to get errno value
-+	;;
-+	cmp.ne p6,p0=r3,r0			// is pt_regs.r8!=0?
-+	adds r3=16,r2				// r3=&pt_regs.r10
-+	;;
-+(p6)	mov r10=-1
-+(p6)	mov r8=r9
-+	br.cond.sptk .strace_save_retval
-+#ifdef CONFIG_XEN
-+END(xen_trace_syscall)
-+#else
-+END(ia64_trace_syscall)
-+#endif
-+
-+/*
-+ * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
-+ *	need to switch to bank 0 and doesn't restore the scratch registers.
-+ *	To avoid leaking kernel bits, the scratch registers are set to
-+ *	the following known-to-be-safe values:
-+ *
-+ *		  r1: restored (global pointer)
-+ *		  r2: cleared
-+ *		  r3: 1 (when returning to user-level)
-+ *	      r8-r11: restored (syscall return value(s))
-+ *		 r12: restored (user-level stack pointer)
-+ *		 r13: restored (user-level thread pointer)
-+ *		 r14: cleared
-+ *		 r15: restored (syscall #)
-+ *	     r16-r17: cleared
-+ *		 r18: user-level b6
-+ *		 r19: cleared
-+ *		 r20: user-level ar.fpsr
-+ *		 r21: user-level b0
-+ *		 r22: cleared
-+ *		 r23: user-level ar.bspstore
-+ *		 r24: user-level ar.rnat
-+ *		 r25: user-level ar.unat
-+ *		 r26: user-level ar.pfs
-+ *		 r27: user-level ar.rsc
-+ *		 r28: user-level ip
-+ *		 r29: user-level psr
-+ *		 r30: user-level cfm
-+ *		 r31: user-level pr
-+ *	      f6-f11: cleared
-+ *		  pr: restored (user-level pr)
-+ *		  b0: restored (user-level rp)
-+ *	          b6: restored
-+ *		  b7: cleared
-+ *	     ar.unat: restored (user-level ar.unat)
-+ *	      ar.pfs: restored (user-level ar.pfs)
-+ *	      ar.rsc: restored (user-level ar.rsc)
-+ *	     ar.rnat: restored (user-level ar.rnat)
-+ *	 ar.bspstore: restored (user-level ar.bspstore)
-+ *	     ar.fpsr: restored (user-level ar.fpsr)
-+ *	      ar.ccv: cleared
-+ *	      ar.csd: cleared
-+ *	      ar.ssd: cleared
-+ */
-+#ifdef CONFIG_XEN
-+GLOBAL_ENTRY(xen_leave_syscall)
-+	PT_REGS_UNWIND_INFO(0)
-+	movl r22=running_on_xen;;
-+	ld4 r22=[r22];;
-+	cmp.eq p7,p0=r22,r0
-+(p7)	br.cond.sptk.many __ia64_leave_syscall;;
-+#else
-+ENTRY(ia64_leave_syscall)
-+	PT_REGS_UNWIND_INFO(0)
-+#endif
-+	/*
-+	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
-+	 * user- or fsys-mode, hence we disable interrupts early on.
-+	 *
-+	 * p6 controls whether current_thread_info()->flags needs to be check for
-+	 * extra work.  We always check for extra work when returning to user-level.
-+	 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
-+	 * is 0.  After extra work processing has been completed, execution
-+	 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
-+	 * needs to be redone.
-+	 */
-+#ifdef CONFIG_PREEMPT
-+	rsm psr.i				// disable interrupts
-+	cmp.eq pLvSys,p0=r0,r0			// pLvSys=1: leave from syscall
-+(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
-+	;;
-+	.pred.rel.mutex pUStk,pKStk
-+(pKStk) ld4 r21=[r20]			// r21 <- preempt_count
-+(pUStk)	mov r21=0			// r21 <- 0
-+	;;
-+	cmp.eq p6,p0=r21,r0		// p6 <- pUStk || (preempt_count == 0)
-+#else /* !CONFIG_PREEMPT */
-+#ifdef CONFIG_XEN
-+	movl r2=XSI_PSR_I
-+	;;
-+(pUStk)	st4 [r2]=r0
-+#else
-+(pUStk)	rsm psr.i
-+#endif
-+	cmp.eq pLvSys,p0=r0,r0		// pLvSys=1: leave from syscall
-+(pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
-+#endif
-+.work_processed_syscall:
-+	adds r2=PT(LOADRS)+16,r12
-+	adds r3=PT(AR_BSPSTORE)+16,r12
-+	adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
-+	;;
-+(p6)	ld4 r31=[r18]				// load current_thread_info()->flags
-+	ld8 r19=[r2],PT(B6)-PT(LOADRS)		// load ar.rsc value for "loadrs"
-+	mov b7=r0		// clear b7
-+	;;
-+	ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE)	// load ar.bspstore (may be garbage)
-+	ld8 r18=[r2],PT(R9)-PT(B6)		// load b6
-+(p6)	and r15=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
-+	;;
-+	mov r16=ar.bsp				// M2  get existing backing store pointer
-+(p6)	cmp4.ne.unc p6,p0=r15, r0		// any special work pending?
-+(p6)	br.cond.spnt .work_pending_syscall
-+	;;
-+	// start restoring the state saved on the kernel stack (struct pt_regs):
-+	ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
-+	ld8 r11=[r3],PT(CR_IIP)-PT(R11)
-+	mov f6=f0		// clear f6
-+	;;
-+	invala			// M0|1 invalidate ALAT
-+#ifdef CONFIG_XEN
-+	movl r29=XSI_PSR_IC
-+	;;
-+	st8	[r29]=r0	// note: clears both vpsr.i and vpsr.ic!
-+	;;
-+#else
-+	rsm psr.i | psr.ic	// M2 initiate turning off of interrupt and interruption collection
-+#endif
-+	mov f9=f0		// clear f9
-+
-+	ld8 r29=[r2],16		// load cr.ipsr
-+	ld8 r28=[r3],16			// load cr.iip
-+	mov f8=f0		// clear f8
-+	;;
-+	ld8 r30=[r2],16		// M0|1 load cr.ifs
-+	mov.m ar.ssd=r0		// M2 clear ar.ssd
-+	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
-+	;;
-+	ld8 r25=[r3],16		// M0|1 load ar.unat
-+	mov.m ar.csd=r0		// M2 clear ar.csd
-+	mov r22=r0		// clear r22
-+	;;
-+	ld8 r26=[r2],PT(B0)-PT(AR_PFS)	// M0|1 load ar.pfs
-+(pKStk)	mov r22=psr		// M2 read PSR now that interrupts are disabled
-+	mov f10=f0		// clear f10
-+	;;
-+	ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
-+	ld8 r27=[r3],PT(PR)-PT(AR_RSC)	// load ar.rsc
-+	mov f11=f0		// clear f11
-+	;;
-+	ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT)	// load ar.rnat (may be garbage)
-+	ld8 r31=[r3],PT(R1)-PT(PR)		// load predicates
-+(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
-+	;;
-+	ld8 r20=[r2],PT(R12)-PT(AR_FPSR)	// load ar.fpsr
-+	ld8.fill r1=[r3],16	// load r1
-+(pUStk) mov r17=1
-+	;;
-+	srlz.d			// M0  ensure interruption collection is off
-+	ld8.fill r13=[r3],16
-+	mov f7=f0		// clear f7
-+	;;
-+	ld8.fill r12=[r2]	// restore r12 (sp)
-+	ld8.fill r15=[r3]	// restore r15
-+	addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
-+	;;
-+(pUStk)	ld4 r3=[r3]		// r3 = cpu_data->phys_stacked_size_p8
-+(pUStk) st1 [r14]=r17
-+	mov b6=r18		// I0  restore b6
-+	;;
-+	mov r14=r0		// clear r14
-+	shr.u r18=r19,16	// I0|1 get byte size of existing "dirty" partition
-+(pKStk) br.cond.dpnt.many skip_rbs_switch
-+
-+	mov.m ar.ccv=r0		// clear ar.ccv
-+(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
-+	br.cond.sptk.many rbs_switch
-+#ifdef CONFIG_XEN
-+END(xen_leave_syscall)
-+#else
-+END(ia64_leave_syscall)
-+#endif
-+
-+#ifdef CONFIG_XEN
-+GLOBAL_ENTRY(xen_leave_kernel)
-+	PT_REGS_UNWIND_INFO(0)
-+	movl r22=running_on_xen;;
-+	ld4 r22=[r22];;
-+	cmp.eq p7,p0=r22,r0
-+(p7)	br.cond.sptk.many __ia64_leave_kernel;;
-+#else
-+GLOBAL_ENTRY(ia64_leave_kernel)
-+	PT_REGS_UNWIND_INFO(0)
-+#endif
-+	/*
-+	 * work.need_resched etc. mustn't get changed by this CPU before it returns to
-+	 * user- or fsys-mode, hence we disable interrupts early on.
-+	 *
-+	 * p6 controls whether current_thread_info()->flags needs to be check for
-+	 * extra work.  We always check for extra work when returning to user-level.
-+	 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
-+	 * is 0.  After extra work processing has been completed, execution
-+	 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
-+	 * needs to be redone.
-+	 */
-+#ifdef CONFIG_PREEMPT
-+	rsm psr.i				// disable interrupts
-+	cmp.eq p0,pLvSys=r0,r0			// pLvSys=0: leave from kernel
-+(pKStk)	adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
-+	;;
-+	.pred.rel.mutex pUStk,pKStk
-+(pKStk)	ld4 r21=[r20]			// r21 <- preempt_count
-+(pUStk)	mov r21=0			// r21 <- 0
-+	;;
-+	cmp.eq p6,p0=r21,r0		// p6 <- pUStk || (preempt_count == 0)
-+#else
-+#ifdef CONFIG_XEN
-+(pUStk)	movl r17=XSI_PSR_I
-+	;;
-+(pUStk)	st4 [r17]=r0
-+	;;
-+#else
-+(pUStk)	rsm psr.i
-+#endif
-+	cmp.eq p0,pLvSys=r0,r0		// pLvSys=0: leave from kernel
-+(pUStk)	cmp.eq.unc p6,p0=r0,r0		// p6 <- pUStk
-+#endif
-+.work_processed_kernel:
-+	adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
-+	;;
-+(p6)	ld4 r31=[r17]				// load current_thread_info()->flags
-+	adds r21=PT(PR)+16,r12
-+	;;
-+
-+	lfetch [r21],PT(CR_IPSR)-PT(PR)
-+	adds r2=PT(B6)+16,r12
-+	adds r3=PT(R16)+16,r12
-+	;;
-+	lfetch [r21]
-+	ld8 r28=[r2],8		// load b6
-+	adds r29=PT(R24)+16,r12
-+
-+	ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
-+	adds r30=PT(AR_CCV)+16,r12
-+(p6)	and r19=TIF_WORK_MASK,r31		// any work other than TIF_SYSCALL_TRACE?
-+	;;
-+	ld8.fill r24=[r29]
-+	ld8 r15=[r30]		// load ar.ccv
-+(p6)	cmp4.ne.unc p6,p0=r19, r0		// any special work pending?
-+	;;
-+	ld8 r29=[r2],16		// load b7
-+	ld8 r30=[r3],16		// load ar.csd
-+(p6)	br.cond.spnt .work_pending
-+	;;
-+	ld8 r31=[r2],16		// load ar.ssd
-+	ld8.fill r8=[r3],16
-+	;;
-+	ld8.fill r9=[r2],16
-+	ld8.fill r10=[r3],PT(R17)-PT(R10)
-+	;;
-+	ld8.fill r11=[r2],PT(R18)-PT(R11)
-+	ld8.fill r17=[r3],16
-+	;;
-+	ld8.fill r18=[r2],16
-+	ld8.fill r19=[r3],16
-+	;;
-+	ld8.fill r20=[r2],16
-+	ld8.fill r21=[r3],16
-+	mov ar.csd=r30
-+	mov ar.ssd=r31
-+	;;
-+#ifdef CONFIG_XEN
-+	movl r22=XSI_PSR_IC
-+	;;
-+	st8 [r22]=r0		// note: clears both vpsr.i and vpsr.ic!
-+	;;
-+#else
-+	rsm psr.i | psr.ic	// initiate turning off of interrupt and interruption collection
-+#endif
-+	invala			// invalidate ALAT
-+	;;
-+	ld8.fill r22=[r2],24
-+	ld8.fill r23=[r3],24
-+	mov b6=r28
-+	;;
-+	ld8.fill r25=[r2],16
-+	ld8.fill r26=[r3],16
-+	mov b7=r29
-+	;;
-+	ld8.fill r27=[r2],16
-+	ld8.fill r28=[r3],16
-+	;;
-+	ld8.fill r29=[r2],16
-+	ld8.fill r30=[r3],24
-+	;;
-+	ld8.fill r31=[r2],PT(F9)-PT(R31)
-+	adds r3=PT(F10)-PT(F6),r3
-+	;;
-+	ldf.fill f9=[r2],PT(F6)-PT(F9)
-+	ldf.fill f10=[r3],PT(F8)-PT(F10)
-+	;;
-+	ldf.fill f6=[r2],PT(F7)-PT(F6)
-+	;;
-+	ldf.fill f7=[r2],PT(F11)-PT(F7)
-+	ldf.fill f8=[r3],32
-+	;;
-+	srlz.i			// ensure interruption collection is off
-+	mov ar.ccv=r15
-+	;;
-+	ldf.fill f11=[r2]
-+#ifdef CONFIG_XEN
-+	;;
-+	// r16-r31 all now hold bank1 values
-+	movl r2=XSI_BANK1_R16
-+	movl r3=XSI_BANK1_R16+8
-+	;;
-+	st8.spill [r2]=r16,16
-+	st8.spill [r3]=r17,16
-+	;;
-+	st8.spill [r2]=r18,16
-+	st8.spill [r3]=r19,16
-+	;;
-+	st8.spill [r2]=r20,16
-+	st8.spill [r3]=r21,16
-+	;;
-+	st8.spill [r2]=r22,16
-+	st8.spill [r3]=r23,16
-+	;;
-+	st8.spill [r2]=r24,16
-+	st8.spill [r3]=r25,16
-+	;;
-+	st8.spill [r2]=r26,16
-+	st8.spill [r3]=r27,16
-+	;;
-+	st8.spill [r2]=r28,16
-+	st8.spill [r3]=r29,16
-+	;;
-+	st8.spill [r2]=r30,16
-+	st8.spill [r3]=r31,16
-+	;;
-+	movl r2=XSI_BANKNUM;;
-+	st4 [r2]=r0;
-+#else
-+	bsw.0			// switch back to bank 0 (no stop bit required beforehand...)
-+#endif
-+	;;
-+(pUStk)	mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
-+	adds r16=PT(CR_IPSR)+16,r12
-+	adds r17=PT(CR_IIP)+16,r12
-+
-+(pKStk)	mov r22=psr		// M2 read PSR now that interrupts are disabled
-+	nop.i 0
-+	nop.i 0
-+	;;
-+	ld8 r29=[r16],16	// load cr.ipsr
-+	ld8 r28=[r17],16	// load cr.iip
-+	;;
-+	ld8 r30=[r16],16	// load cr.ifs
-+	ld8 r25=[r17],16	// load ar.unat
-+	;;
-+	ld8 r26=[r16],16	// load ar.pfs
-+	ld8 r27=[r17],16	// load ar.rsc
-+	cmp.eq p9,p0=r0,r0	// set p9 to indicate that we should restore cr.ifs
-+	;;
-+	ld8 r24=[r16],16	// load ar.rnat (may be garbage)
-+	ld8 r23=[r17],16	// load ar.bspstore (may be garbage)
-+	;;
-+	ld8 r31=[r16],16	// load predicates
-+	ld8 r21=[r17],16	// load b0
-+	;;
-+	ld8 r19=[r16],16	// load ar.rsc value for "loadrs"
-+	ld8.fill r1=[r17],16	// load r1
-+	;;
-+	ld8.fill r12=[r16],16
-+	ld8.fill r13=[r17],16
-+(pUStk)	adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
-+	;;
-+	ld8 r20=[r16],16	// ar.fpsr
-+	ld8.fill r15=[r17],16
-+	;;
-+	ld8.fill r14=[r16],16
-+	ld8.fill r2=[r17]
-+(pUStk)	mov r17=1
-+	;;
-+	ld8.fill r3=[r16]
-+(pUStk)	st1 [r18]=r17		// restore current->thread.on_ustack
-+	shr.u r18=r19,16	// get byte size of existing "dirty" partition
-+	;;
-+	mov r16=ar.bsp		// get existing backing store pointer
-+	addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
-+	;;
-+	ld4 r17=[r17]		// r17 = cpu_data->phys_stacked_size_p8
-+(pKStk)	br.cond.dpnt skip_rbs_switch
-+
-+	/*
-+	 * Restore user backing store.
-+	 *
-+	 * NOTE: alloc, loadrs, and cover can't be predicated.
-+	 */
-+(pNonSys) br.cond.dpnt dont_preserve_current_frame
-+
-+rbs_switch:
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_COVER;
-+#else
-+	cover				// add current frame into dirty partition and set cr.ifs
-+#endif
-+	;;
-+	mov r19=ar.bsp			// get new backing store pointer
-+	sub r16=r16,r18			// krbs = old bsp - size of dirty partition
-+	cmp.ne p9,p0=r0,r0		// clear p9 to skip restore of cr.ifs
-+	;;
-+	sub r19=r19,r16			// calculate total byte size of dirty partition
-+	add r18=64,r18			// don't force in0-in7 into memory...
-+	;;
-+	shl r19=r19,16			// shift size of dirty partition into loadrs position
-+	;;
-+dont_preserve_current_frame:
-+	/*
-+	 * To prevent leaking bits between the kernel and user-space,
-+	 * we must clear the stacked registers in the "invalid" partition here.
-+	 * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
-+	 * 5 registers/cycle on McKinley).
-+	 */
-+#	define pRecurse	p6
-+#	define pReturn	p7
-+#ifdef CONFIG_ITANIUM
-+#	define Nregs	10
-+#else
-+#	define Nregs	14
-+#endif
-+	alloc loc0=ar.pfs,2,Nregs-2,2,0
-+	shr.u loc1=r18,9		// RNaTslots <= floor(dirtySize / (64*8))
-+	sub r17=r17,r18			// r17 = (physStackedSize + 8) - dirtySize
-+	;;
-+	mov ar.rsc=r19			// load ar.rsc to be used for "loadrs"
-+	shladd in0=loc1,3,r17
-+	mov in1=0
-+	;;
-+	TEXT_ALIGN(32)
-+rse_clear_invalid:
-+#ifdef CONFIG_ITANIUM
-+	// cycle 0
-+ { .mii
-+	alloc loc0=ar.pfs,2,Nregs-2,2,0
-+	cmp.lt pRecurse,p0=Nregs*8,in0	// if more than Nregs regs left to clear, (re)curse
-+	add out0=-Nregs*8,in0
-+}{ .mfb
-+	add out1=1,in1			// increment recursion count
-+	nop.f 0
-+	nop.b 0				// can't do br.call here because of alloc (WAW on CFM)
-+	;;
-+}{ .mfi	// cycle 1
-+	mov loc1=0
-+	nop.f 0
-+	mov loc2=0
-+}{ .mib
-+	mov loc3=0
-+	mov loc4=0
-+(pRecurse) br.call.sptk.many b0=rse_clear_invalid
-+
-+}{ .mfi	// cycle 2
-+	mov loc5=0
-+	nop.f 0
-+	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret
-+}{ .mib
-+	mov loc6=0
-+	mov loc7=0
-+(pReturn) br.ret.sptk.many b0
-+}
-+#else /* !CONFIG_ITANIUM */
-+	alloc loc0=ar.pfs,2,Nregs-2,2,0
-+	cmp.lt pRecurse,p0=Nregs*8,in0	// if more than Nregs regs left to clear, (re)curse
-+	add out0=-Nregs*8,in0
-+	add out1=1,in1			// increment recursion count
-+	mov loc1=0
-+	mov loc2=0
-+	;;
-+	mov loc3=0
-+	mov loc4=0
-+	mov loc5=0
-+	mov loc6=0
-+	mov loc7=0
-+(pRecurse) br.call.sptk.few b0=rse_clear_invalid
-+	;;
-+	mov loc8=0
-+	mov loc9=0
-+	cmp.ne pReturn,p0=r0,in1	// if recursion count != 0, we need to do a br.ret
-+	mov loc10=0
-+	mov loc11=0
-+(pReturn) br.ret.sptk.many b0
-+#endif /* !CONFIG_ITANIUM */
-+#	undef pRecurse
-+#	undef pReturn
-+	;;
-+	alloc r17=ar.pfs,0,0,0,0	// drop current register frame
-+	;;
-+	loadrs
-+	;;
-+skip_rbs_switch:
-+	mov ar.unat=r25		// M2
-+(pKStk)	extr.u r22=r22,21,1	// I0 extract current value of psr.pp from r22
-+(pLvSys)mov r19=r0		// A  clear r19 for leave_syscall, no-op otherwise
-+	;;
-+(pUStk)	mov ar.bspstore=r23	// M2
-+(pKStk)	dep r29=r22,r29,21,1	// I0 update ipsr.pp with psr.pp
-+(pLvSys)mov r16=r0		// A  clear r16 for leave_syscall, no-op otherwise
-+	;;
-+#ifdef CONFIG_XEN
-+	movl r25=XSI_IPSR
-+	;;
-+	st8[r25]=r29,XSI_IFS-XSI_IPSR
-+	;;
-+#else
-+	mov cr.ipsr=r29		// M2
-+#endif
-+	mov ar.pfs=r26		// I0
-+(pLvSys)mov r17=r0		// A  clear r17 for leave_syscall, no-op otherwise
-+
-+#ifdef CONFIG_XEN
-+(p9)	st8 [r25]=r30
-+	;;
-+	adds r25=XSI_IIP-XSI_IFS,r25
-+	;;
-+#else
-+(p9)	mov cr.ifs=r30		// M2
-+#endif
-+	mov b0=r21		// I0
-+(pLvSys)mov r18=r0		// A  clear r18 for leave_syscall, no-op otherwise
-+
-+	mov ar.fpsr=r20		// M2
-+#ifdef CONFIG_XEN
-+	st8	[r25]=r28
-+#else
-+	mov cr.iip=r28		// M2
-+#endif
-+	nop 0
-+	;;
-+(pUStk)	mov ar.rnat=r24		// M2 must happen with RSE in lazy mode
-+	nop 0
-+(pLvSys)mov r2=r0
-+
-+	mov ar.rsc=r27		// M2
-+	mov pr=r31,-1		// I0
-+#ifdef CONFIG_XEN
-+	;;
-+	XEN_HYPER_RFI;
-+#else
-+	rfi			// B
-+#endif
-+
-+	/*
-+	 * On entry:
-+	 *	r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
-+	 *	r31 = current->thread_info->flags
-+	 * On exit:
-+	 *	p6 = TRUE if work-pending-check needs to be redone
-+	 */
-+.work_pending_syscall:
-+	add r2=-8,r2
-+	add r3=-8,r3
-+	;;
-+	st8 [r2]=r8
-+	st8 [r3]=r10
-+.work_pending:
-+	tbit.nz p6,p0=r31,TIF_SIGDELAYED		// signal delayed from  MCA/INIT/NMI/PMI context?
-+(p6)	br.cond.sptk.few .sigdelayed
-+	;;
-+	tbit.z p6,p0=r31,TIF_NEED_RESCHED		// current_thread_info()->need_resched==0?
-+(p6)	br.cond.sptk.few .notify
-+#ifdef CONFIG_PREEMPT
-+(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
-+	;;
-+(pKStk) st4 [r20]=r21
-+	ssm psr.i		// enable interrupts
-+#endif
-+	br.call.spnt.many rp=schedule
-+.ret9:	cmp.eq p6,p0=r0,r0				// p6 <- 1
-+#ifdef CONFIG_XEN
-+	movl r2=XSI_PSR_I
-+	;;
-+	st4 [r2]=r0
-+#else
-+	rsm psr.i		// disable interrupts
-+#endif
-+	;;
-+#ifdef CONFIG_PREEMPT
-+(pKStk)	adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
-+	;;
-+(pKStk)	st4 [r20]=r0		// preempt_count() <- 0
-+#endif
-+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
-+	br.cond.sptk.many .work_processed_kernel	// re-check
-+
-+.notify:
-+(pUStk)	br.call.spnt.many rp=notify_resume_user
-+.ret10:	cmp.ne p6,p0=r0,r0				// p6 <- 0
-+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
-+	br.cond.sptk.many .work_processed_kernel	// don't re-check
-+
-+// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
-+// it could not be delivered.  Deliver it now.  The signal might be for us and
-+// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
-+// signal.
-+
-+.sigdelayed:
-+	br.call.sptk.many rp=do_sigdelayed
-+	cmp.eq p6,p0=r0,r0				// p6 <- 1, always re-check
-+(pLvSys)br.cond.sptk.few  .work_pending_syscall_end
-+	br.cond.sptk.many .work_processed_kernel	// re-check
-+
-+.work_pending_syscall_end:
-+	adds r2=PT(R8)+16,r12
-+	adds r3=PT(R10)+16,r12
-+	;;
-+	ld8 r8=[r2]
-+	ld8 r10=[r3]
-+	br.cond.sptk.many .work_processed_syscall	// re-check
-+
-+#ifdef CONFIG_XEN
-+END(xen_leave_kernel)
-+#else
-+END(ia64_leave_kernel)
-+#endif
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenhpski.c linux-2.6.12-xen/arch/ia64/xen/xenhpski.c
---- pristine-linux-2.6.12/arch/ia64/xen/xenhpski.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/xenhpski.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,19 @@
-+
-+extern unsigned long xen_get_cpuid(int);
-+
-+int
-+running_on_sim(void)
-+{
-+	int i;
-+	long cpuid[6];
-+
-+	for (i = 0; i < 5; ++i)
-+		cpuid[i] = xen_get_cpuid(i);
-+	if ((cpuid[0] & 0xff) != 'H') return 0;
-+	if ((cpuid[3] & 0xff) != 0x4) return 0;
-+	if (((cpuid[3] >> 8) & 0xff) != 0x0) return 0;
-+	if (((cpuid[3] >> 16) & 0xff) != 0x0) return 0;
-+	if (((cpuid[3] >> 24) & 0x7) != 0x7) return 0;
-+	return 1;
-+}
-+
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenivt.S linux-2.6.12-xen/arch/ia64/xen/xenivt.S
---- pristine-linux-2.6.12/arch/ia64/xen/xenivt.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/xenivt.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2044 @@
-+/*
-+ * arch/ia64/xen/ivt.S
-+ *
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ *	Dan Magenheimer <dan.magenheimer at hp.com>
-+ */
-+/*
-+ * This file defines the interruption vector table used by the CPU.
-+ * It does not include one entry per possible cause of interruption.
-+ *
-+ * The first 20 entries of the table contain 64 bundles each while the
-+ * remaining 48 entries contain only 16 bundles each.
-+ *
-+ * The 64 bundles are used to allow inlining the whole handler for critical
-+ * interruptions like TLB misses.
-+ *
-+ *  For each entry, the comment is as follows:
-+ *
-+ *		// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
-+ *  entry offset ----/     /         /                  /          /
-+ *  entry number ---------/         /                  /          /
-+ *  size of the entry -------------/                  /          /
-+ *  vector name -------------------------------------/          /
-+ *  interruptions triggering this vector ----------------------/
-+ *
-+ * The table is 32KB in size and must be aligned on 32KB boundary.
-+ * (The CPU ignores the 15 lower bits of the address)
-+ *
-+ * Table is based upon EAS2.6 (Oct 1999)
-+ */
-+
-+#include <linux/config.h>
-+
-+#include <asm/asmmacro.h>
-+#include <asm/break.h>
-+#include <asm/ia32.h>
-+#include <asm/kregs.h>
-+#include <asm/offsets.h>
-+#include <asm/pgtable.h>
-+#include <asm/processor.h>
-+#include <asm/ptrace.h>
-+#include <asm/system.h>
-+#include <asm/thread_info.h>
-+#include <asm/unistd.h>
-+#include <asm/errno.h>
-+
-+#ifdef CONFIG_XEN
-+#define ia64_ivt xen_ivt
-+#endif
-+
-+#if 1
-+# define PSR_DEFAULT_BITS	psr.ac
-+#else
-+# define PSR_DEFAULT_BITS	0
-+#endif
-+
-+#if 0
-+  /*
-+   * This lets you track the last eight faults that occurred on the CPU.  Make sure ar.k2 isn't
-+   * needed for something else before enabling this...
-+   */
-+# define DBG_FAULT(i)	mov r16=ar.k2;;	shl r16=r16,8;;	add r16=(i),r16;;mov ar.k2=r16
-+#else
-+# define DBG_FAULT(i)
-+#endif
-+
-+#define MINSTATE_VIRT	/* needed by minstate.h */
-+#include "xenminstate.h"
-+
-+#define FAULT(n)									\
-+	mov r31=pr;									\
-+	mov r19=n;;			/* prepare to save predicates */		\
-+	br.sptk.many dispatch_to_fault_handler
-+
-+	.section .text.ivt,"ax"
-+
-+	.align 32768	// align on 32KB boundary
-+	.global ia64_ivt
-+ia64_ivt:
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
-+ENTRY(vhpt_miss)
-+	DBG_FAULT(0)
-+	/*
-+	 * The VHPT vector is invoked when the TLB entry for the virtual page table
-+	 * is missing.  This happens only as a result of a previous
-+	 * (the "original") TLB miss, which may either be caused by an instruction
-+	 * fetch or a data access (or non-access).
-+	 *
-+	 * What we do here is normal TLB miss handing for the _original_ miss, followed
-+	 * by inserting the TLB entry for the virtual page table page that the VHPT
-+	 * walker was attempting to access.  The latter gets inserted as long
-+	 * as both L1 and L2 have valid mappings for the faulting address.
-+	 * The TLB entry for the original miss gets inserted only if
-+	 * the L3 entry indicates that the page is present.
-+	 *
-+	 * do_page_fault gets invoked in the following cases:
-+	 *	- the faulting virtual address uses unimplemented address bits
-+	 *	- the faulting virtual address has no L1, L2, or L3 mapping
-+	 */
-+#ifdef CONFIG_XEN
-+	movl r16=XSI_IFA
-+	;;
-+	ld8 r16=[r16]
-+#ifdef CONFIG_HUGETLB_PAGE
-+	movl r18=PAGE_SHIFT
-+	movl r25=XSI_ITIR
-+	;;
-+	ld8 r25=[r25]
-+#endif
-+	;;
-+#else
-+	mov r16=cr.ifa				// get address that caused the TLB miss
-+#ifdef CONFIG_HUGETLB_PAGE
-+	movl r18=PAGE_SHIFT
-+	mov r25=cr.itir
-+#endif
-+#endif
-+	;;
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_RSM_PSR_DT;
-+#else
-+	rsm psr.dt				// use physical addressing for data
-+#endif
-+	mov r31=pr				// save the predicate registers
-+	mov r19=IA64_KR(PT_BASE)		// get page table base address
-+	shl r21=r16,3				// shift bit 60 into sign bit
-+	shr.u r17=r16,61			// get the region number into r17
-+	;;
-+	shr r22=r21,3
-+#ifdef CONFIG_HUGETLB_PAGE
-+	extr.u r26=r25,2,6
-+	;;
-+	cmp.ne p8,p0=r18,r26
-+	sub r27=r26,r18
-+	;;
-+(p8)	dep r25=r18,r25,2,6
-+(p8)	shr r22=r22,r27
-+#endif
-+	;;
-+	cmp.eq p6,p7=5,r17			// is IFA pointing into to region 5?
-+	shr.u r18=r22,PGDIR_SHIFT		// get bits 33-63 of the faulting address
-+	;;
-+(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
-+
-+	srlz.d
-+	LOAD_PHYSICAL(p6, r19, swapper_pg_dir)	// region 5 is rooted at swapper_pg_dir
-+
-+	.pred.rel "mutex", p6, p7
-+(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
-+(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
-+	;;
-+(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
-+(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
-+	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
-+	shr.u r18=r22,PMD_SHIFT			// shift L2 index into position
-+	;;
-+	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
-+	;;
-+(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
-+	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page table entry
-+	;;
-+(p7)	ld8 r20=[r17]				// fetch the L2 entry (may be 0)
-+	shr.u r19=r22,PAGE_SHIFT		// shift L3 index into position
-+	;;
-+(p7)	cmp.eq.or.andcm p6,p7=r20,r0		// was L2 entry NULL?
-+	dep r21=r19,r20,3,(PAGE_SHIFT-3)	// compute address of L3 page table entry
-+	;;
-+#ifdef CONFIG_XEN
-+(p7)	ld8 r18=[r21]				// read the L3 PTE
-+	movl r19=XSI_ISR
-+	;;
-+	ld8 r19=[r19]
-+	;;
-+(p7)	tbit.z p6,p7=r18,_PAGE_P_BIT		// page present bit cleared?
-+	movl r22=XSI_IHA
-+	;;
-+	ld8 r22=[r22]
-+	;;
-+#else
-+(p7)	ld8 r18=[r21]				// read the L3 PTE
-+	mov r19=cr.isr				// cr.isr bit 0 tells us if this is an insn miss
-+	;;
-+(p7)	tbit.z p6,p7=r18,_PAGE_P_BIT		// page present bit cleared?
-+	mov r22=cr.iha				// get the VHPT address that caused the TLB miss
-+	;;					// avoid RAW on p7
-+#endif
-+(p7)	tbit.nz.unc p10,p11=r19,32		// is it an instruction TLB miss?
-+	dep r23=0,r20,0,PAGE_SHIFT		// clear low bits to get page address
-+	;;
-+#ifdef CONFIG_XEN
-+	mov r24=r8
-+	mov r8=r18
-+	;;
-+(p10)	XEN_HYPER_ITC_D
-+	;;
-+(p11)	XEN_HYPER_ITC_I
-+	;;
-+	mov r8=r24
-+	;;
-+(p6)	br.cond.spnt.many page_fault		// handle bad address/page not present (page fault)
-+	;;
-+	movl r24=XSI_IFA
-+	;;
-+	st8 [r24]=r22
-+	;;
-+#else
-+(p10)	itc.i r18				// insert the instruction TLB entry
-+(p11)	itc.d r18				// insert the data TLB entry
-+(p6)	br.cond.spnt.many page_fault		// handle bad address/page not present (page fault)
-+	mov cr.ifa=r22
-+#endif
-+
-+#ifdef CONFIG_HUGETLB_PAGE
-+(p8)	mov cr.itir=r25				// change to default page-size for VHPT
-+#endif
-+
-+	/*
-+	 * Now compute and insert the TLB entry for the virtual page table.  We never
-+	 * execute in a page table page so there is no need to set the exception deferral
-+	 * bit.
-+	 */
-+	adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
-+	;;
-+#ifdef CONFIG_XEN
-+(p7)	mov r25=r8
-+(p7)	mov r8=r24
-+	;;
-+(p7)	XEN_HYPER_ITC_D
-+	;;
-+(p7)	mov r8=r25
-+	;;
-+#else
-+(p7)	itc.d r24
-+#endif
-+	;;
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+	 * cannot possibly affect the following loads:
-+	 */
-+	dv_serialize_data
-+
-+	/*
-+	 * Re-check L2 and L3 pagetable.  If they changed, we may have received a ptc.g
-+	 * between reading the pagetable and the "itc".  If so, flush the entry we
-+	 * inserted and retry.
-+	 */
-+	ld8 r25=[r21]				// read L3 PTE again
-+	ld8 r26=[r17]				// read L2 entry again
-+	;;
-+	cmp.ne p6,p7=r26,r20			// did L2 entry change
-+	mov r27=PAGE_SHIFT<<2
-+	;;
-+(p6)	ptc.l r22,r27				// purge PTE page translation
-+(p7)	cmp.ne.or.andcm p6,p7=r25,r18		// did L3 PTE change
-+	;;
-+(p6)	ptc.l r16,r27				// purge translation
-+#endif
-+
-+	mov pr=r31,-1				// restore predicate registers
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_RFI;
-+#else
-+	rfi
-+#endif
-+END(vhpt_miss)
-+
-+	.org ia64_ivt+0x400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
-+ENTRY(itlb_miss)
-+	DBG_FAULT(1)
-+	/*
-+	 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
-+	 * page table.  If a nested TLB miss occurs, we switch into physical
-+	 * mode, walk the page table, and then re-execute the L3 PTE read
-+	 * and go on normally after that.
-+	 */
-+#ifdef CONFIG_XEN
-+	movl r16=XSI_IFA
-+	;;
-+	ld8 r16=[r16]
-+#else
-+	mov r16=cr.ifa				// get virtual address
-+#endif
-+	mov r29=b0				// save b0
-+	mov r31=pr				// save predicates
-+.itlb_fault:
-+#ifdef CONFIG_XEN
-+	movl r17=XSI_IHA
-+	;;
-+	ld8 r17=[r17]				// get virtual address of L3 PTE
-+#else
-+	mov r17=cr.iha				// get virtual address of L3 PTE
-+#endif
-+	movl r30=1f				// load nested fault continuation point
-+	;;
-+1:	ld8 r18=[r17]				// read L3 PTE
-+	;;
-+	mov b0=r29
-+	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
-+(p6)	br.cond.spnt page_fault
-+	;;
-+#ifdef CONFIG_XEN
-+	mov r19=r8
-+	mov r8=r18
-+	;;
-+	XEN_HYPER_ITC_I
-+	;;
-+	mov r8=r19
-+#else
-+	itc.i r18
-+#endif
-+	;;
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+	 * cannot possibly affect the following loads:
-+	 */
-+	dv_serialize_data
-+
-+	ld8 r19=[r17]				// read L3 PTE again and see if same
-+	mov r20=PAGE_SHIFT<<2			// setup page size for purge
-+	;;
-+	cmp.ne p7,p0=r18,r19
-+	;;
-+(p7)	ptc.l r16,r20
-+#endif
-+	mov pr=r31,-1
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_RFI;
-+#else
-+	rfi
-+#endif
-+END(itlb_miss)
-+
-+	.org ia64_ivt+0x0800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
-+ENTRY(dtlb_miss)
-+	DBG_FAULT(2)
-+	/*
-+	 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
-+	 * page table.  If a nested TLB miss occurs, we switch into physical
-+	 * mode, walk the page table, and then re-execute the L3 PTE read
-+	 * and go on normally after that.
-+	 */
-+#ifdef CONFIG_XEN
-+	movl r16=XSI_IFA
-+	;;
-+	ld8 r16=[r16]
-+#else
-+	mov r16=cr.ifa				// get virtual address
-+#endif
-+	mov r29=b0				// save b0
-+	mov r31=pr				// save predicates
-+dtlb_fault:
-+#ifdef CONFIG_XEN
-+	movl r17=XSI_IHA
-+	;;
-+	ld8 r17=[r17]				// get virtual address of L3 PTE
-+#else
-+	mov r17=cr.iha				// get virtual address of L3 PTE
-+#endif
-+	movl r30=1f				// load nested fault continuation point
-+	;;
-+1:	ld8 r18=[r17]				// read L3 PTE
-+	;;
-+	mov b0=r29
-+	tbit.z p6,p0=r18,_PAGE_P_BIT		// page present bit cleared?
-+(p6)	br.cond.spnt page_fault
-+	;;
-+#ifdef CONFIG_XEN
-+	mov r19=r8
-+	mov r8=r18
-+	;;
-+	XEN_HYPER_ITC_D
-+	;;
-+	mov r8=r19
-+	;;
-+#else
-+	itc.d r18
-+#endif
-+	;;
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+	 * cannot possibly affect the following loads:
-+	 */
-+	dv_serialize_data
-+
-+	ld8 r19=[r17]				// read L3 PTE again and see if same
-+	mov r20=PAGE_SHIFT<<2			// setup page size for purge
-+	;;
-+	cmp.ne p7,p0=r18,r19
-+	;;
-+(p7)	ptc.l r16,r20
-+#endif
-+	mov pr=r31,-1
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_RFI;
-+#else
-+	rfi
-+#endif
-+END(dtlb_miss)
-+
-+	.org ia64_ivt+0x0c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
-+ENTRY(alt_itlb_miss)
-+	DBG_FAULT(3)
-+#ifdef CONFIG_XEN
-+	movl r31=XSI_IPSR
-+	;;
-+	ld8 r21=[r31],XSI_IFA-XSI_IPSR	// get ipsr, point to ifa
-+	movl r17=PAGE_KERNEL
-+	;;
-+	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
-+	;;
-+	ld8 r16=[r31]		// get ifa
-+	mov r31=pr
-+	;;
-+#else
-+	mov r16=cr.ifa		// get address that caused the TLB miss
-+	movl r17=PAGE_KERNEL
-+	mov r21=cr.ipsr
-+	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
-+	mov r31=pr
-+	;;
-+#endif
-+#ifdef CONFIG_DISABLE_VHPT
-+	shr.u r22=r16,61			// get the region number into r21
-+	;;
-+	cmp.gt p8,p0=6,r22			// user mode
-+	;;
-+#ifndef CONFIG_XEN
-+(p8)	thash r17=r16
-+	;;
-+(p8)	mov cr.iha=r17
-+#endif
-+(p8)	mov r29=b0				// save b0
-+(p8)	br.cond.dptk .itlb_fault
-+#endif
-+	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
-+	and r19=r19,r16		// clear ed, reserved bits, and PTE control bits
-+	shr.u r18=r16,57	// move address bit 61 to bit 4
-+	;;
-+	andcm r18=0x10,r18	// bit 4=~address-bit(61)
-+	cmp.ne p8,p0=r0,r23	// psr.cpl != 0?
-+	or r19=r17,r19		// insert PTE control bits into r19
-+	;;
-+	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
-+(p8)	br.cond.spnt page_fault
-+	;;
-+#ifdef CONFIG_XEN
-+	mov r18=r8
-+	mov r8=r19
-+	;;
-+	XEN_HYPER_ITC_I
-+	;;
-+	mov r8=r18
-+	;;
-+	mov pr=r31,-1
-+	;;
-+	XEN_HYPER_RFI;
-+#else
-+	itc.i r19		// insert the TLB entry
-+	mov pr=r31,-1
-+	rfi
-+#endif
-+END(alt_itlb_miss)
-+
-+	.org ia64_ivt+0x1000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
-+ENTRY(alt_dtlb_miss)
-+	DBG_FAULT(4)
-+#ifdef CONFIG_XEN
-+	movl r31=XSI_IPSR
-+	;;
-+	ld8 r21=[r31],XSI_ISR-XSI_IPSR	// get ipsr, point to isr
-+	movl r17=PAGE_KERNEL
-+	;;
-+	ld8 r20=[r31],XSI_IFA-XSI_ISR	// get isr, point to ifa
-+	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
-+	;;
-+	ld8 r16=[r31]		// get ifa
-+	mov r31=pr
-+	;;
-+#else
-+	mov r16=cr.ifa		// get address that caused the TLB miss
-+	movl r17=PAGE_KERNEL
-+	mov r20=cr.isr
-+	movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
-+	mov r21=cr.ipsr
-+	mov r31=pr
-+	;;
-+#endif
-+#ifdef CONFIG_DISABLE_VHPT
-+	shr.u r22=r16,61			// get the region number into r21
-+	;;
-+	cmp.gt p8,p0=6,r22			// access to region 0-5
-+	;;
-+#ifndef CONFIG_XEN
-+(p8)	thash r17=r16
-+	;;
-+(p8)	mov cr.iha=r17
-+#endif
-+(p8)	mov r29=b0				// save b0
-+(p8)	br.cond.dptk dtlb_fault
-+#endif
-+	extr.u r23=r21,IA64_PSR_CPL0_BIT,2	// extract psr.cpl
-+	and r22=IA64_ISR_CODE_MASK,r20		// get the isr.code field
-+	tbit.nz p6,p7=r20,IA64_ISR_SP_BIT	// is speculation bit on?
-+	shr.u r18=r16,57			// move address bit 61 to bit 4
-+	and r19=r19,r16				// clear ed, reserved bits, and PTE control bits
-+	tbit.nz p9,p0=r20,IA64_ISR_NA_BIT	// is non-access bit on?
-+	;;
-+	andcm r18=0x10,r18	// bit 4=~address-bit(61)
-+	cmp.ne p8,p0=r0,r23
-+(p9)	cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22	// check isr.code field
-+(p8)	br.cond.spnt page_fault
-+
-+	dep r21=-1,r21,IA64_PSR_ED_BIT,1
-+	or r19=r19,r17		// insert PTE control bits into r19
-+	;;
-+	or r19=r19,r18		// set bit 4 (uncached) if the access was to region 6
-+(p6)	mov cr.ipsr=r21
-+	;;
-+#ifdef CONFIG_XEN
-+(p7)	mov r18=r8
-+(p7)	mov r8=r19
-+	;;
-+(p7)	XEN_HYPER_ITC_D
-+	;;
-+(p7)	mov r8=r18
-+	;;
-+	mov pr=r31,-1
-+	;;
-+	XEN_HYPER_RFI;
-+#else
-+(p7)	itc.d r19		// insert the TLB entry
-+	mov pr=r31,-1
-+	rfi
-+#endif
-+END(alt_dtlb_miss)
-+
-+	.org ia64_ivt+0x1400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
-+ENTRY(nested_dtlb_miss)
-+	/*
-+	 * In the absence of kernel bugs, we get here when the virtually mapped linear
-+	 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
-+	 * Access-bit, or Data Access-bit faults).  If the DTLB entry for the virtual page
-+	 * table is missing, a nested TLB miss fault is triggered and control is
-+	 * transferred to this point.  When this happens, we lookup the pte for the
-+	 * faulting address by walking the page table in physical mode and return to the
-+	 * continuation point passed in register r30 (or call page_fault if the address is
-+	 * not mapped).
-+	 *
-+	 * Input:	r16:	faulting address
-+	 *		r29:	saved b0
-+	 *		r30:	continuation address
-+	 *		r31:	saved pr
-+	 *
-+	 * Output:	r17:	physical address of L3 PTE of faulting address
-+	 *		r29:	saved b0
-+	 *		r30:	continuation address
-+	 *		r31:	saved pr
-+	 *
-+	 * Clobbered:	b0, r18, r19, r21, psr.dt (cleared)
-+	 */
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_RSM_PSR_DT;
-+#else
-+	rsm psr.dt				// switch to using physical data addressing
-+#endif
-+	mov r19=IA64_KR(PT_BASE)		// get the page table base address
-+	shl r21=r16,3				// shift bit 60 into sign bit
-+	;;
-+	shr.u r17=r16,61			// get the region number into r17
-+	;;
-+	cmp.eq p6,p7=5,r17			// is faulting address in region 5?
-+	shr.u r18=r16,PGDIR_SHIFT		// get bits 33-63 of faulting address
-+	;;
-+(p7)	dep r17=r17,r19,(PAGE_SHIFT-3),3	// put region number bits in place
-+
-+	srlz.d
-+	LOAD_PHYSICAL(p6, r19, swapper_pg_dir)	// region 5 is rooted at swapper_pg_dir
-+
-+	.pred.rel "mutex", p6, p7
-+(p6)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
-+(p7)	shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
-+	;;
-+(p6)	dep r17=r18,r19,3,(PAGE_SHIFT-3)	// r17=PTA + IFA(33,42)*8
-+(p7)	dep r17=r18,r17,3,(PAGE_SHIFT-6)	// r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
-+	cmp.eq p7,p6=0,r21			// unused address bits all zeroes?
-+	shr.u r18=r16,PMD_SHIFT			// shift L2 index into position
-+	;;
-+	ld8 r17=[r17]				// fetch the L1 entry (may be 0)
-+	;;
-+(p7)	cmp.eq p6,p7=r17,r0			// was L1 entry NULL?
-+	dep r17=r18,r17,3,(PAGE_SHIFT-3)	// compute address of L2 page table entry
-+	;;
-+(p7)	ld8 r17=[r17]				// fetch the L2 entry (may be 0)
-+	shr.u r19=r16,PAGE_SHIFT		// shift L3 index into position
-+	;;
-+(p7)	cmp.eq.or.andcm p6,p7=r17,r0		// was L2 entry NULL?
-+	dep r17=r19,r17,3,(PAGE_SHIFT-3)	// compute address of L3 page table entry
-+(p6)	br.cond.spnt page_fault
-+	mov b0=r30
-+	br.sptk.many b0				// return to continuation point
-+END(nested_dtlb_miss)
-+
-+	.org ia64_ivt+0x1800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
-+ENTRY(ikey_miss)
-+	DBG_FAULT(6)
-+	FAULT(6)
-+END(ikey_miss)
-+
-+	//-----------------------------------------------------------------------------------
-+	// call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
-+ENTRY(page_fault)
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_SSM_PSR_DT;
-+#else
-+	ssm psr.dt
-+	;;
-+	srlz.i
-+#endif
-+	;;
-+	SAVE_MIN_WITH_COVER
-+	alloc r15=ar.pfs,0,0,3,0
-+#ifdef CONFIG_XEN
-+	movl r3=XSI_ISR
-+	;;
-+	ld8 out1=[r3],XSI_IFA-XSI_ISR		// get vcr.isr, point to ifa
-+	;;
-+	ld8 out0=[r3]				// get vcr.ifa
-+	mov r14=1
-+	;;
-+	add r3=XSI_PSR_IC-XSI_IFA, r3		// point to vpsr.ic
-+	;;
-+	st4 [r3]=r14				// vpsr.ic = 1
-+	adds r3=8,r2				// set up second base pointer
-+	;;
-+#else
-+	mov out0=cr.ifa
-+	mov out1=cr.isr
-+	adds r3=8,r2				// set up second base pointer
-+	;;
-+	ssm psr.ic | PSR_DEFAULT_BITS
-+	;;
-+	srlz.i					// guarantee that interruption collectin is on
-+	;;
-+#endif
-+#ifdef CONFIG_XEN
-+	br.cond.sptk.many	xen_page_fault
-+	;;
-+done_xen_page_fault:
-+#endif
-+(p15)	ssm psr.i				// restore psr.i
-+	movl r14=ia64_leave_kernel
-+	;;
-+	SAVE_REST
-+	mov rp=r14
-+	;;
-+	adds out2=16,r12			// out2 = pointer to pt_regs
-+	br.call.sptk.many b6=ia64_do_page_fault	// ignore return address
-+END(page_fault)
-+
-+	.org ia64_ivt+0x1c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
-+ENTRY(dkey_miss)
-+	DBG_FAULT(7)
-+	FAULT(7)
-+#ifdef CONFIG_XEN
-+	// Leaving this code inline above results in an IVT section overflow
-+	// There is no particular reason for this code to be here...
-+xen_page_fault:
-+(p15)	movl r3=XSI_PSR_I
-+	;;
-+(p15)	st4 [r3]=r14,XSI_PEND-XSI_PSR_I		// if (p15) vpsr.i = 1
-+	mov r14=r0
-+	;;
-+(p15)	ld4 r14=[r3]				// if (pending_interrupts)
-+	adds r3=8,r2				// re-set up second base pointer
-+	;;
-+(p15)	cmp.ne	p15,p0=r14,r0
-+	;;
-+	br.cond.sptk.many done_xen_page_fault
-+	;;
-+#endif
-+END(dkey_miss)
-+
-+	.org ia64_ivt+0x2000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
-+ENTRY(dirty_bit)
-+	DBG_FAULT(8)
-+	/*
-+	 * What we do here is to simply turn on the dirty bit in the PTE.  We need to
-+	 * update both the page-table and the TLB entry.  To efficiently access the PTE,
-+	 * we address it through the virtual page table.  Most likely, the TLB entry for
-+	 * the relevant virtual page table page is still present in the TLB so we can
-+	 * normally do this without additional TLB misses.  In case the necessary virtual
-+	 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
-+	 * up the physical address of the L3 PTE and then continue at label 1 below.
-+	 */
-+#ifdef CONFIG_XEN
-+	movl r16=XSI_IFA
-+	;;
-+	ld8 r16=[r16]
-+	;;
-+#else
-+	mov r16=cr.ifa				// get the address that caused the fault
-+#endif
-+	movl r30=1f				// load continuation point in case of nested fault
-+	;;
-+#ifdef CONFIG_XEN
-+#if 1
-+	mov r18=r8;
-+	mov r8=r16;
-+	XEN_HYPER_THASH;;
-+	mov r17=r8;
-+	mov r8=r18;;
-+#else
-+	tak r17=r80				// "privified" thash
-+#endif
-+#else
-+	thash r17=r16				// compute virtual address of L3 PTE
-+#endif
-+	mov r29=b0				// save b0 in case of nested fault
-+	mov r31=pr				// save pr
-+#ifdef CONFIG_SMP
-+	mov r28=ar.ccv				// save ar.ccv
-+	;;
-+1:	ld8 r18=[r17]
-+	;;					// avoid RAW on r18
-+	mov ar.ccv=r18				// set compare value for cmpxchg
-+	or r25=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits
-+	;;
-+	cmpxchg8.acq r26=[r17],r25,ar.ccv
-+	mov r24=PAGE_SHIFT<<2
-+	;;
-+	cmp.eq p6,p7=r26,r18
-+	;;
-+(p6)	itc.d r25				// install updated PTE
-+	;;
-+	/*
-+	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+	 * cannot possibly affect the following loads:
-+	 */
-+	dv_serialize_data
-+
-+	ld8 r18=[r17]				// read PTE again
-+	;;
-+	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
-+	;;
-+(p7)	ptc.l r16,r24
-+	mov b0=r29				// restore b0
-+	mov ar.ccv=r28
-+#else
-+	;;
-+1:	ld8 r18=[r17]
-+	;;					// avoid RAW on r18
-+	or r18=_PAGE_D|_PAGE_A,r18		// set the dirty and accessed bits
-+	mov b0=r29				// restore b0
-+	;;
-+	st8 [r17]=r18				// store back updated PTE
-+	itc.d r18				// install updated PTE
-+#endif
-+	mov pr=r31,-1				// restore pr
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_RFI;
-+#else
-+	rfi
-+#endif
-+END(dirty_bit)
-+
-+	.org ia64_ivt+0x2400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
-+ENTRY(iaccess_bit)
-+	DBG_FAULT(9)
-+	// Like Entry 8, except for instruction access
-+#ifdef CONFIG_XEN
-+	movl r16=XSI_IFA
-+	;;
-+	ld8 r16=[r16]
-+	;;
-+#else
-+	mov r16=cr.ifa				// get the address that caused the fault
-+#endif
-+	movl r30=1f				// load continuation point in case of nested fault
-+	mov r31=pr				// save predicates
-+#ifdef CONFIG_ITANIUM
-+	/*
-+	 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
-+	 */
-+	mov r17=cr.ipsr
-+	;;
-+	mov r18=cr.iip
-+	tbit.z p6,p0=r17,IA64_PSR_IS_BIT	// IA64 instruction set?
-+	;;
-+(p6)	mov r16=r18				// if so, use cr.iip instead of cr.ifa
-+#endif /* CONFIG_ITANIUM */
-+	;;
-+#ifdef CONFIG_XEN
-+#if 1
-+	mov r18=r8;
-+	mov r8=r16;
-+	XEN_HYPER_THASH;;
-+	mov r17=r8;
-+	mov r8=r18;;
-+#else
-+	tak r17=r80				// "privified" thash
-+#endif
-+#else
-+	thash r17=r16				// compute virtual address of L3 PTE
-+#endif
-+	mov r29=b0				// save b0 in case of nested fault)
-+#ifdef CONFIG_SMP
-+	mov r28=ar.ccv				// save ar.ccv
-+	;;
-+1:	ld8 r18=[r17]
-+	;;
-+	mov ar.ccv=r18				// set compare value for cmpxchg
-+	or r25=_PAGE_A,r18			// set the accessed bit
-+	;;
-+	cmpxchg8.acq r26=[r17],r25,ar.ccv
-+	mov r24=PAGE_SHIFT<<2
-+	;;
-+	cmp.eq p6,p7=r26,r18
-+	;;
-+#ifdef CONFIG_XEN
-+	mov r26=r8
-+	mov r8=r25
-+	;;
-+(p6)	XEN_HYPER_ITC_I
-+	;;
-+	mov r8=r26
-+	;;
-+#else
-+(p6)	itc.i r25				// install updated PTE
-+#endif
-+	;;
-+	/*
-+	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+	 * cannot possibly affect the following loads:
-+	 */
-+	dv_serialize_data
-+
-+	ld8 r18=[r17]				// read PTE again
-+	;;
-+	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
-+	;;
-+(p7)	ptc.l r16,r24
-+	mov b0=r29				// restore b0
-+	mov ar.ccv=r28
-+#else /* !CONFIG_SMP */
-+	;;
-+1:	ld8 r18=[r17]
-+	;;
-+	or r18=_PAGE_A,r18			// set the accessed bit
-+	mov b0=r29				// restore b0
-+	;;
-+	st8 [r17]=r18				// store back updated PTE
-+	itc.i r18				// install updated PTE
-+#endif /* !CONFIG_SMP */
-+	mov pr=r31,-1
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_RFI;
-+#else
-+	rfi
-+#endif
-+END(iaccess_bit)
-+
-+	.org ia64_ivt+0x2800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
-+ENTRY(daccess_bit)
-+	DBG_FAULT(10)
-+	// Like Entry 8, except for data access
-+#ifdef CONFIG_XEN
-+	movl r16=XSI_IFA
-+	;;
-+	ld8 r16=[r16]
-+	;;
-+#else
-+	mov r16=cr.ifa				// get the address that caused the fault
-+#endif
-+	movl r30=1f				// load continuation point in case of nested fault
-+	;;
-+#ifdef CONFIG_XEN
-+#if 1
-+	mov r18=r8;
-+	mov r8=r16;
-+	XEN_HYPER_THASH;;
-+	mov r17=r8;
-+	mov r8=r18;;
-+#else
-+	tak r17=r80				// "privified" thash
-+#endif
-+#else
-+	thash r17=r16				// compute virtual address of L3 PTE
-+#endif
-+	mov r31=pr
-+	mov r29=b0				// save b0 in case of nested fault)
-+#ifdef CONFIG_SMP
-+	mov r28=ar.ccv				// save ar.ccv
-+	;;
-+1:	ld8 r18=[r17]
-+	;;					// avoid RAW on r18
-+	mov ar.ccv=r18				// set compare value for cmpxchg
-+	or r25=_PAGE_A,r18			// set the dirty bit
-+	;;
-+	cmpxchg8.acq r26=[r17],r25,ar.ccv
-+	mov r24=PAGE_SHIFT<<2
-+	;;
-+	cmp.eq p6,p7=r26,r18
-+	;;
-+#ifdef CONFIG_XEN
-+	mov r26=r8
-+	mov r8=r25
-+	;;
-+(p6)	XEN_HYPER_ITC_D
-+	;;
-+	mov r8=r26
-+	;;
-+#else
-+(p6)	itc.d r25				// install updated PTE
-+#endif
-+	/*
-+	 * Tell the assemblers dependency-violation checker that the above "itc" instructions
-+	 * cannot possibly affect the following loads:
-+	 */
-+	dv_serialize_data
-+	;;
-+	ld8 r18=[r17]				// read PTE again
-+	;;
-+	cmp.eq p6,p7=r18,r25			// is it same as the newly installed
-+	;;
-+(p7)	ptc.l r16,r24
-+	mov ar.ccv=r28
-+#else
-+	;;
-+1:	ld8 r18=[r17]
-+	;;					// avoid RAW on r18
-+	or r18=_PAGE_A,r18			// set the accessed bit
-+	;;
-+	st8 [r17]=r18				// store back updated PTE
-+	itc.d r18				// install updated PTE
-+#endif
-+	mov b0=r29				// restore b0
-+	mov pr=r31,-1
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_RFI;
-+#else
-+	rfi
-+#endif
-+END(daccess_bit)
-+
-+	.org ia64_ivt+0x2c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
-+ENTRY(break_fault)
-+	/*
-+	 * The streamlined system call entry/exit paths only save/restore the initial part
-+	 * of pt_regs.  This implies that the callers of system-calls must adhere to the
-+	 * normal procedure calling conventions.
-+	 *
-+	 *   Registers to be saved & restored:
-+	 *	CR registers: cr.ipsr, cr.iip, cr.ifs
-+	 *	AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
-+	 * 	others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
-+	 *   Registers to be restored only:
-+	 * 	r8-r11: output value from the system call.
-+	 *
-+	 * During system call exit, scratch registers (including r15) are modified/cleared
-+	 * to prevent leaking bits from kernel to user level.
-+	 */
-+	DBG_FAULT(11)
-+	mov r16=IA64_KR(CURRENT)		// r16 = current task; 12 cycle read lat.
-+#ifdef CONFIG_XEN
-+	movl r31=XSI_IPSR
-+	;;
-+	ld8 r29=[r31],XSI_IIP-XSI_IPSR		// get ipsr, point to iip
-+	mov r18=__IA64_BREAK_SYSCALL
-+	mov r21=ar.fpsr
-+	;;
-+	ld8 r28=[r31],XSI_IIM-XSI_IIP		// get iip, point to iim
-+	mov r19=b6
-+	mov r25=ar.unat
-+	;;
-+	ld8 r17=[r31]				// get iim
-+	mov r27=ar.rsc
-+	mov r26=ar.pfs
-+	;;
-+#else
-+	mov r17=cr.iim
-+	mov r18=__IA64_BREAK_SYSCALL
-+	mov r21=ar.fpsr
-+	mov r29=cr.ipsr
-+	mov r19=b6
-+	mov r25=ar.unat
-+	mov r27=ar.rsc
-+	mov r26=ar.pfs
-+	mov r28=cr.iip
-+#endif
-+	mov r31=pr				// prepare to save predicates
-+	mov r20=r1
-+	;;
-+	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
-+	cmp.eq p0,p7=r18,r17			// is this a system call? (p7 <- false, if so)
-+(p7)	br.cond.spnt non_syscall
-+	;;
-+	ld1 r17=[r16]				// load current->thread.on_ustack flag
-+	st1 [r16]=r0				// clear current->thread.on_ustack flag
-+	add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16	// set r1 for MINSTATE_START_SAVE_MIN_VIRT
-+	;;
-+	invala
-+
-+	/* adjust return address so we skip over the break instruction: */
-+
-+	extr.u r8=r29,41,2			// extract ei field from cr.ipsr
-+	;;
-+	cmp.eq p6,p7=2,r8			// isr.ei==2?
-+	mov r2=r1				// setup r2 for ia64_syscall_setup
-+	;;
-+(p6)	mov r8=0				// clear ei to 0
-+(p6)	adds r28=16,r28				// switch cr.iip to next bundle cr.ipsr.ei wrapped
-+(p7)	adds r8=1,r8				// increment ei to next slot
-+	;;
-+	cmp.eq pKStk,pUStk=r0,r17		// are we in kernel mode already?
-+	dep r29=r8,r29,41,2			// insert new ei into cr.ipsr
-+	;;
-+
-+	// switch from user to kernel RBS:
-+	MINSTATE_START_SAVE_MIN_VIRT
-+	br.call.sptk.many b7=ia64_syscall_setup
-+	;;
-+#ifdef CONFIG_XEN
-+	mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2;;
-+#else
-+	MINSTATE_END_SAVE_MIN_VIRT		// switch to bank 1
-+#endif
-+#ifdef CONFIG_XEN
-+	movl r3=XSI_PSR_IC
-+	mov r16=1
-+	;;
-+#if 1
-+	st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC	// vpsr.ic = 1
-+	;;
-+(p15)	st4 [r3]=r16,XSI_PEND-XSI_PSR_I		// if (p15) vpsr.i = 1
-+	mov r16=r0
-+	;;
-+(p15)	ld4 r16=[r3]				// if (pending_interrupts)
-+	;;
-+	cmp.ne	p6,p0=r16,r0
-+	;;
-+(p6)	ssm	psr.i				//   do a real ssm psr.i
-+	;;
-+#else
-+//	st4 [r3]=r16,XSI_PSR_I-XSI_PSR_IC	// vpsr.ic = 1
-+	adds r3=XSI_PSR_I-XSI_PSR_IC,r3		// SKIP vpsr.ic = 1
-+	;;
-+(p15)	st4 [r3]=r16,XSI_PEND-XSI_PSR_I		// if (p15) vpsr.i = 1
-+	mov r16=r0
-+	;;
-+(p15)	ld4 r16=[r3]				// if (pending_interrupts)
-+	;;
-+	cmp.ne	p6,p0=r16,r0
-+	;;
-+//(p6)	ssm	psr.i				//   do a real ssm psr.i
-+//(p6)	XEN_HYPER_SSM_I;
-+(p6)	break 0x7;
-+	;;
-+#endif
-+	mov r3=NR_syscalls - 1
-+	;;
-+#else
-+	ssm psr.ic | PSR_DEFAULT_BITS
-+	;;
-+	srlz.i					// guarantee that interruption collection is on
-+	mov r3=NR_syscalls - 1
-+	;;
-+(p15)	ssm psr.i				// restore psr.i
-+#endif
-+	// p10==true means out registers are more than 8 or r15's Nat is true
-+(p10)	br.cond.spnt.many ia64_ret_from_syscall
-+	;;
-+	movl r16=sys_call_table
-+
-+	adds r15=-1024,r15			// r15 contains the syscall number---subtract 1024
-+	movl r2=ia64_ret_from_syscall
-+	;;
-+	shladd r20=r15,3,r16			// r20 = sys_call_table + 8*(syscall-1024)
-+	cmp.leu p6,p7=r15,r3			// (syscall > 0 && syscall < 1024 + NR_syscalls) ?
-+	mov rp=r2				// set the real return addr
-+	;;
-+(p6)	ld8 r20=[r20]				// load address of syscall entry point
-+(p7)	movl r20=sys_ni_syscall
-+
-+	add r2=TI_FLAGS+IA64_TASK_SIZE,r13
-+	;;
-+	ld4 r2=[r2]				// r2 = current_thread_info()->flags
-+	;;
-+	and r2=_TIF_SYSCALL_TRACEAUDIT,r2	// mask trace or audit
-+	;;
-+	cmp.eq p8,p0=r2,r0
-+	mov b6=r20
-+	;;
-+(p8)	br.call.sptk.many b6=b6			// ignore this return addr
-+	br.cond.sptk ia64_trace_syscall
-+	// NOT REACHED
-+END(break_fault)
-+
-+	.org ia64_ivt+0x3000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
-+ENTRY(interrupt)
-+	DBG_FAULT(12)
-+	mov r31=pr		// prepare to save predicates
-+	;;
-+	SAVE_MIN_WITH_COVER	// uses r31; defines r2 and r3
-+#ifdef CONFIG_XEN
-+	movl r3=XSI_PSR_IC
-+	mov r14=1
-+	;;
-+	st4 [r3]=r14
-+#else
-+	ssm psr.ic | PSR_DEFAULT_BITS
-+#endif
-+	;;
-+	adds r3=8,r2		// set up second base pointer for SAVE_REST
-+	srlz.i			// ensure everybody knows psr.ic is back on
-+	;;
-+	SAVE_REST
-+	;;
-+	alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
-+#ifdef CONFIG_XEN
-+	;;
-+	br.call.sptk.many rp=xen_get_ivr
-+	;;
-+	mov out0=r8		// pass cr.ivr as first arg
-+#else
-+	mov out0=cr.ivr		// pass cr.ivr as first arg
-+#endif
-+	add out1=16,sp		// pass pointer to pt_regs as second arg
-+	;;
-+	srlz.d			// make sure we see the effect of cr.ivr
-+	movl r14=ia64_leave_kernel
-+	;;
-+	mov rp=r14
-+	br.call.sptk.many b6=ia64_handle_irq
-+END(interrupt)
-+
-+	.org ia64_ivt+0x3400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x3400 Entry 13 (size 64 bundles) Reserved
-+	DBG_FAULT(13)
-+	FAULT(13)
-+
-+	.org ia64_ivt+0x3800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x3800 Entry 14 (size 64 bundles) Reserved
-+	DBG_FAULT(14)
-+	FAULT(14)
-+
-+	/*
-+	 * There is no particular reason for this code to be here, other than that
-+	 * there happens to be space here that would go unused otherwise.  If this
-+	 * fault ever gets "unreserved", simply moved the following code to a more
-+	 * suitable spot...
-+	 *
-+	 * ia64_syscall_setup() is a separate subroutine so that it can
-+	 *	allocate stacked registers so it can safely demine any
-+	 *	potential NaT values from the input registers.
-+	 *
-+	 * On entry:
-+	 *	- executing on bank 0 or bank 1 register set (doesn't matter)
-+	 *	-  r1: stack pointer
-+	 *	-  r2: current task pointer
-+	 *	-  r3: preserved
-+	 *	- r11: original contents (saved ar.pfs to be saved)
-+	 *	- r12: original contents (sp to be saved)
-+	 *	- r13: original contents (tp to be saved)
-+	 *	- r15: original contents (syscall # to be saved)
-+	 *	- r18: saved bsp (after switching to kernel stack)
-+	 *	- r19: saved b6
-+	 *	- r20: saved r1 (gp)
-+	 *	- r21: saved ar.fpsr
-+	 *	- r22: kernel's register backing store base (krbs_base)
-+	 *	- r23: saved ar.bspstore
-+	 *	- r24: saved ar.rnat
-+	 *	- r25: saved ar.unat
-+	 *	- r26: saved ar.pfs
-+	 *	- r27: saved ar.rsc
-+	 *	- r28: saved cr.iip
-+	 *	- r29: saved cr.ipsr
-+	 *	- r31: saved pr
-+	 *	-  b0: original contents (to be saved)
-+	 * On exit:
-+	 *	- executing on bank 1 registers
-+	 *	- psr.ic enabled, interrupts restored
-+	 *	-  p10: TRUE if syscall is invoked with more than 8 out
-+	 *		registers or r15's Nat is true
-+	 *	-  r1: kernel's gp
-+	 *	-  r3: preserved (same as on entry)
-+	 *	-  r8: -EINVAL if p10 is true
-+	 *	- r12: points to kernel stack
-+	 *	- r13: points to current task
-+	 *	- p15: TRUE if interrupts need to be re-enabled
-+	 *	- ar.fpsr: set to kernel settings
-+	 */
-+#ifndef CONFIG_XEN
-+GLOBAL_ENTRY(ia64_syscall_setup)
-+#if PT(B6) != 0
-+# error This code assumes that b6 is the first field in pt_regs.
-+#endif
-+	st8 [r1]=r19				// save b6
-+	add r16=PT(CR_IPSR),r1			// initialize first base pointer
-+	add r17=PT(R11),r1			// initialize second base pointer
-+	;;
-+	alloc r19=ar.pfs,8,0,0,0		// ensure in0-in7 are writable
-+	st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR)	// save cr.ipsr
-+	tnat.nz p8,p0=in0
-+
-+	st8.spill [r17]=r11,PT(CR_IIP)-PT(R11)	// save r11
-+	tnat.nz p9,p0=in1
-+(pKStk)	mov r18=r0				// make sure r18 isn't NaT
-+	;;
-+
-+	st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS)	// save ar.pfs
-+	st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP)	// save cr.iip
-+	mov r28=b0				// save b0 (2 cyc)
-+	;;
-+
-+	st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT)	// save ar.unat
-+	dep r19=0,r19,38,26			// clear all bits but 0..37 [I0]
-+(p8)	mov in0=-1
-+	;;
-+
-+	st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS)	// store ar.pfs.pfm in cr.ifs
-+	extr.u r11=r19,7,7	// I0		// get sol of ar.pfs
-+	and r8=0x7f,r19		// A		// get sof of ar.pfs
-+
-+	st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
-+	tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
-+(p9)	mov in1=-1
-+	;;
-+
-+(pUStk) sub r18=r18,r22				// r18=RSE.ndirty*8
-+	tnat.nz p10,p0=in2
-+	add r11=8,r11
-+	;;
-+(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16		// skip over ar_rnat field
-+(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17	// skip over ar_bspstore field
-+	tnat.nz p11,p0=in3
-+	;;
-+(p10)	mov in2=-1
-+	tnat.nz p12,p0=in4				// [I0]
-+(p11)	mov in3=-1
-+	;;
-+(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT)	// save ar.rnat
-+(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE)	// save ar.bspstore
-+	shl r18=r18,16				// compute ar.rsc to be used for "loadrs"
-+	;;
-+	st8 [r16]=r31,PT(LOADRS)-PT(PR)		// save predicates
-+	st8 [r17]=r28,PT(R1)-PT(B0)		// save b0
-+	tnat.nz p13,p0=in5				// [I0]
-+	;;
-+	st8 [r16]=r18,PT(R12)-PT(LOADRS)	// save ar.rsc value for "loadrs"
-+	st8.spill [r17]=r20,PT(R13)-PT(R1)	// save original r1
-+(p12)	mov in4=-1
-+	;;
-+
-+.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12)	// save r12
-+.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13)		// save r13
-+(p13)	mov in5=-1
-+	;;
-+	st8 [r16]=r21,PT(R8)-PT(AR_FPSR)	// save ar.fpsr
-+	tnat.nz p14,p0=in6
-+	cmp.lt p10,p9=r11,r8	// frame size can't be more than local+8
-+	;;
-+	stf8 [r16]=f1		// ensure pt_regs.r8 != 0 (see handle_syscall_error)
-+(p9)	tnat.nz p10,p0=r15
-+	adds r12=-16,r1		// switch to kernel memory stack (with 16 bytes of scratch)
-+
-+	st8.spill [r17]=r15			// save r15
-+	tnat.nz p8,p0=in7
-+	nop.i 0
-+
-+	mov r13=r2				// establish `current'
-+	movl r1=__gp				// establish kernel global pointer
-+	;;
-+(p14)	mov in6=-1
-+(p8)	mov in7=-1
-+	nop.i 0
-+
-+	cmp.eq pSys,pNonSys=r0,r0		// set pSys=1, pNonSys=0
-+	movl r17=FPSR_DEFAULT
-+	;;
-+	mov.m ar.fpsr=r17			// set ar.fpsr to kernel default value
-+(p10)	mov r8=-EINVAL
-+	br.ret.sptk.many b7
-+END(ia64_syscall_setup)
-+#endif
-+
-+	.org ia64_ivt+0x3c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x3c00 Entry 15 (size 64 bundles) Reserved
-+	DBG_FAULT(15)
-+	FAULT(15)
-+
-+	/*
-+	 * Squatting in this space ...
-+	 *
-+	 * This special case dispatcher for illegal operation faults allows preserved
-+	 * registers to be modified through a callback function (asm only) that is handed
-+	 * back from the fault handler in r8. Up to three arguments can be passed to the
-+	 * callback function by returning an aggregate with the callback as its first
-+	 * element, followed by the arguments.
-+	 */
-+ENTRY(dispatch_illegal_op_fault)
-+	SAVE_MIN_WITH_COVER
-+	ssm psr.ic | PSR_DEFAULT_BITS
-+	;;
-+	srlz.i		// guarantee that interruption collection is on
-+	;;
-+(p15)	ssm psr.i	// restore psr.i
-+	adds r3=8,r2	// set up second base pointer for SAVE_REST
-+	;;
-+	alloc r14=ar.pfs,0,0,1,0	// must be first in insn group
-+	mov out0=ar.ec
-+	;;
-+	SAVE_REST
-+	;;
-+	br.call.sptk.many rp=ia64_illegal_op_fault
-+.ret0:	;;
-+	alloc r14=ar.pfs,0,0,3,0	// must be first in insn group
-+	mov out0=r9
-+	mov out1=r10
-+	mov out2=r11
-+	movl r15=ia64_leave_kernel
-+	;;
-+	mov rp=r15
-+	mov b6=r8
-+	;;
-+	cmp.ne p6,p0=0,r8
-+(p6)	br.call.dpnt.many b6=b6		// call returns to ia64_leave_kernel
-+	br.sptk.many ia64_leave_kernel
-+END(dispatch_illegal_op_fault)
-+
-+	.org ia64_ivt+0x4000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x4000 Entry 16 (size 64 bundles) Reserved
-+	DBG_FAULT(16)
-+	FAULT(16)
-+
-+	.org ia64_ivt+0x4400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x4400 Entry 17 (size 64 bundles) Reserved
-+	DBG_FAULT(17)
-+	FAULT(17)
-+
-+ENTRY(non_syscall)
-+	SAVE_MIN_WITH_COVER
-+
-+	// There is no particular reason for this code to be here, other than that
-+	// there happens to be space here that would go unused otherwise.  If this
-+	// fault ever gets "unreserved", simply moved the following code to a more
-+	// suitable spot...
-+
-+	alloc r14=ar.pfs,0,0,2,0
-+	mov out0=cr.iim
-+	add out1=16,sp
-+	adds r3=8,r2			// set up second base pointer for SAVE_REST
-+
-+	ssm psr.ic | PSR_DEFAULT_BITS
-+	;;
-+	srlz.i				// guarantee that interruption collection is on
-+	;;
-+(p15)	ssm psr.i			// restore psr.i
-+	movl r15=ia64_leave_kernel
-+	;;
-+	SAVE_REST
-+	mov rp=r15
-+	;;
-+	br.call.sptk.many b6=ia64_bad_break	// avoid WAW on CFM and ignore return addr
-+END(non_syscall)
-+
-+	.org ia64_ivt+0x4800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x4800 Entry 18 (size 64 bundles) Reserved
-+	DBG_FAULT(18)
-+	FAULT(18)
-+
-+	/*
-+	 * There is no particular reason for this code to be here, other than that
-+	 * there happens to be space here that would go unused otherwise.  If this
-+	 * fault ever gets "unreserved", simply moved the following code to a more
-+	 * suitable spot...
-+	 */
-+
-+ENTRY(dispatch_unaligned_handler)
-+	SAVE_MIN_WITH_COVER
-+	;;
-+	alloc r14=ar.pfs,0,0,2,0		// now it's safe (must be first in insn group!)
-+	mov out0=cr.ifa
-+	adds out1=16,sp
-+
-+	ssm psr.ic | PSR_DEFAULT_BITS
-+	;;
-+	srlz.i					// guarantee that interruption collection is on
-+	;;
-+(p15)	ssm psr.i				// restore psr.i
-+	adds r3=8,r2				// set up second base pointer
-+	;;
-+	SAVE_REST
-+	movl r14=ia64_leave_kernel
-+	;;
-+	mov rp=r14
-+	br.sptk.many ia64_prepare_handle_unaligned
-+END(dispatch_unaligned_handler)
-+
-+	.org ia64_ivt+0x4c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x4c00 Entry 19 (size 64 bundles) Reserved
-+	DBG_FAULT(19)
-+	FAULT(19)
-+
-+	/*
-+	 * There is no particular reason for this code to be here, other than that
-+	 * there happens to be space here that would go unused otherwise.  If this
-+	 * fault ever gets "unreserved", simply moved the following code to a more
-+	 * suitable spot...
-+	 */
-+
-+ENTRY(dispatch_to_fault_handler)
-+	/*
-+	 * Input:
-+	 *	psr.ic:	off
-+	 *	r19:	fault vector number (e.g., 24 for General Exception)
-+	 *	r31:	contains saved predicates (pr)
-+	 */
-+	SAVE_MIN_WITH_COVER_R19
-+	alloc r14=ar.pfs,0,0,5,0
-+	mov out0=r15
-+#ifdef CONFIG_XEN
-+	movl out1=XSI_ISR
-+	;;
-+	adds out2=XSI_IFA-XSI_ISR,out1
-+	adds out3=XSI_IIM-XSI_ISR,out1
-+	adds out4=XSI_ITIR-XSI_ISR,out1
-+	;;
-+	ld8 out1=[out1]
-+	ld8 out2=[out2]
-+	ld8 out3=[out4]
-+	ld8 out4=[out4]
-+	;;
-+#else
-+	mov out1=cr.isr
-+	mov out2=cr.ifa
-+	mov out3=cr.iim
-+	mov out4=cr.itir
-+	;;
-+#endif
-+	ssm psr.ic | PSR_DEFAULT_BITS
-+	;;
-+	srlz.i					// guarantee that interruption collection is on
-+	;;
-+(p15)	ssm psr.i				// restore psr.i
-+	adds r3=8,r2				// set up second base pointer for SAVE_REST
-+	;;
-+	SAVE_REST
-+	movl r14=ia64_leave_kernel
-+	;;
-+	mov rp=r14
-+	br.call.sptk.many b6=ia64_fault
-+END(dispatch_to_fault_handler)
-+
-+//
-+// --- End of long entries, Beginning of short entries
-+//
-+
-+	.org ia64_ivt+0x5000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
-+ENTRY(page_not_present)
-+	DBG_FAULT(20)
-+	mov r16=cr.ifa
-+	rsm psr.dt
-+	/*
-+	 * The Linux page fault handler doesn't expect non-present pages to be in
-+	 * the TLB.  Flush the existing entry now, so we meet that expectation.
-+	 */
-+	mov r17=PAGE_SHIFT<<2
-+	;;
-+	ptc.l r16,r17
-+	;;
-+	mov r31=pr
-+	srlz.d
-+	br.sptk.many page_fault
-+END(page_not_present)
-+
-+	.org ia64_ivt+0x5100
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
-+ENTRY(key_permission)
-+	DBG_FAULT(21)
-+	mov r16=cr.ifa
-+	rsm psr.dt
-+	mov r31=pr
-+	;;
-+	srlz.d
-+	br.sptk.many page_fault
-+END(key_permission)
-+
-+	.org ia64_ivt+0x5200
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
-+ENTRY(iaccess_rights)
-+	DBG_FAULT(22)
-+	mov r16=cr.ifa
-+	rsm psr.dt
-+	mov r31=pr
-+	;;
-+	srlz.d
-+	br.sptk.many page_fault
-+END(iaccess_rights)
-+
-+	.org ia64_ivt+0x5300
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
-+ENTRY(daccess_rights)
-+	DBG_FAULT(23)
-+#ifdef CONFIG_XEN
-+	movl r16=XSI_IFA
-+	;;
-+	ld8 r16=[r16]
-+	;;
-+	XEN_HYPER_RSM_PSR_DT;
-+#else
-+	mov r16=cr.ifa
-+	rsm psr.dt
-+#endif
-+	mov r31=pr
-+	;;
-+	srlz.d
-+	br.sptk.many page_fault
-+END(daccess_rights)
-+
-+	.org ia64_ivt+0x5400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
-+ENTRY(general_exception)
-+	DBG_FAULT(24)
-+	mov r16=cr.isr
-+	mov r31=pr
-+	;;
-+	cmp4.eq p6,p0=0,r16
-+(p6)	br.sptk.many dispatch_illegal_op_fault
-+	;;
-+	mov r19=24		// fault number
-+	br.sptk.many dispatch_to_fault_handler
-+END(general_exception)
-+
-+	.org ia64_ivt+0x5500
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
-+ENTRY(disabled_fp_reg)
-+	DBG_FAULT(25)
-+	rsm psr.dfh		// ensure we can access fph
-+	;;
-+	srlz.d
-+	mov r31=pr
-+	mov r19=25
-+	br.sptk.many dispatch_to_fault_handler
-+END(disabled_fp_reg)
-+
-+	.org ia64_ivt+0x5600
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
-+ENTRY(nat_consumption)
-+	DBG_FAULT(26)
-+	FAULT(26)
-+END(nat_consumption)
-+
-+	.org ia64_ivt+0x5700
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
-+ENTRY(speculation_vector)
-+	DBG_FAULT(27)
-+	/*
-+	 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
-+	 * this part of the architecture is not implemented in hardware on some CPUs, such
-+	 * as Itanium.  Thus, in general we need to emulate the behavior.  IIM contains
-+	 * the relative target (not yet sign extended).  So after sign extending it we
-+	 * simply add it to IIP.  We also need to reset the EI field of the IPSR to zero,
-+	 * i.e., the slot to restart into.
-+	 *
-+	 * cr.imm contains zero_ext(imm21)
-+	 */
-+	mov r18=cr.iim
-+	;;
-+	mov r17=cr.iip
-+	shl r18=r18,43			// put sign bit in position (43=64-21)
-+	;;
-+
-+	mov r16=cr.ipsr
-+	shr r18=r18,39			// sign extend (39=43-4)
-+	;;
-+
-+	add r17=r17,r18			// now add the offset
-+	;;
-+	mov cr.iip=r17
-+	dep r16=0,r16,41,2		// clear EI
-+	;;
-+
-+	mov cr.ipsr=r16
-+	;;
-+
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_RFI;
-+#else
-+	rfi
-+#endif
-+END(speculation_vector)
-+
-+	.org ia64_ivt+0x5800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5800 Entry 28 (size 16 bundles) Reserved
-+	DBG_FAULT(28)
-+	FAULT(28)
-+
-+	.org ia64_ivt+0x5900
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
-+ENTRY(debug_vector)
-+	DBG_FAULT(29)
-+	FAULT(29)
-+END(debug_vector)
-+
-+	.org ia64_ivt+0x5a00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
-+ENTRY(unaligned_access)
-+	DBG_FAULT(30)
-+	mov r16=cr.ipsr
-+	mov r31=pr		// prepare to save predicates
-+	;;
-+	br.sptk.many dispatch_unaligned_handler
-+END(unaligned_access)
-+
-+	.org ia64_ivt+0x5b00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
-+ENTRY(unsupported_data_reference)
-+	DBG_FAULT(31)
-+	FAULT(31)
-+END(unsupported_data_reference)
-+
-+	.org ia64_ivt+0x5c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
-+ENTRY(floating_point_fault)
-+	DBG_FAULT(32)
-+	FAULT(32)
-+END(floating_point_fault)
-+
-+	.org ia64_ivt+0x5d00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
-+ENTRY(floating_point_trap)
-+	DBG_FAULT(33)
-+	FAULT(33)
-+END(floating_point_trap)
-+
-+	.org ia64_ivt+0x5e00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
-+ENTRY(lower_privilege_trap)
-+	DBG_FAULT(34)
-+	FAULT(34)
-+END(lower_privilege_trap)
-+
-+	.org ia64_ivt+0x5f00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
-+ENTRY(taken_branch_trap)
-+	DBG_FAULT(35)
-+	FAULT(35)
-+END(taken_branch_trap)
-+
-+	.org ia64_ivt+0x6000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
-+ENTRY(single_step_trap)
-+	DBG_FAULT(36)
-+	FAULT(36)
-+END(single_step_trap)
-+
-+	.org ia64_ivt+0x6100
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6100 Entry 37 (size 16 bundles) Reserved
-+	DBG_FAULT(37)
-+	FAULT(37)
-+
-+	.org ia64_ivt+0x6200
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6200 Entry 38 (size 16 bundles) Reserved
-+	DBG_FAULT(38)
-+	FAULT(38)
-+
-+	.org ia64_ivt+0x6300
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6300 Entry 39 (size 16 bundles) Reserved
-+	DBG_FAULT(39)
-+	FAULT(39)
-+
-+	.org ia64_ivt+0x6400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6400 Entry 40 (size 16 bundles) Reserved
-+	DBG_FAULT(40)
-+	FAULT(40)
-+
-+	.org ia64_ivt+0x6500
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6500 Entry 41 (size 16 bundles) Reserved
-+	DBG_FAULT(41)
-+	FAULT(41)
-+
-+	.org ia64_ivt+0x6600
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6600 Entry 42 (size 16 bundles) Reserved
-+	DBG_FAULT(42)
-+	FAULT(42)
-+
-+	.org ia64_ivt+0x6700
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6700 Entry 43 (size 16 bundles) Reserved
-+	DBG_FAULT(43)
-+	FAULT(43)
-+
-+	.org ia64_ivt+0x6800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6800 Entry 44 (size 16 bundles) Reserved
-+	DBG_FAULT(44)
-+	FAULT(44)
-+
-+	.org ia64_ivt+0x6900
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
-+ENTRY(ia32_exception)
-+	DBG_FAULT(45)
-+	FAULT(45)
-+END(ia32_exception)
-+
-+	.org ia64_ivt+0x6a00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
-+ENTRY(ia32_intercept)
-+	DBG_FAULT(46)
-+#ifdef	CONFIG_IA32_SUPPORT
-+	mov r31=pr
-+	mov r16=cr.isr
-+	;;
-+	extr.u r17=r16,16,8	// get ISR.code
-+	mov r18=ar.eflag
-+	mov r19=cr.iim		// old eflag value
-+	;;
-+	cmp.ne p6,p0=2,r17
-+(p6)	br.cond.spnt 1f		// not a system flag fault
-+	xor r16=r18,r19
-+	;;
-+	extr.u r17=r16,18,1	// get the eflags.ac bit
-+	;;
-+	cmp.eq p6,p0=0,r17
-+(p6)	br.cond.spnt 1f		// eflags.ac bit didn't change
-+	;;
-+	mov pr=r31,-1		// restore predicate registers
-+#ifdef CONFIG_XEN
-+	XEN_HYPER_RFI;
-+#else
-+	rfi
-+#endif
-+
-+1:
-+#endif	// CONFIG_IA32_SUPPORT
-+	FAULT(46)
-+END(ia32_intercept)
-+
-+	.org ia64_ivt+0x6b00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
-+ENTRY(ia32_interrupt)
-+	DBG_FAULT(47)
-+#ifdef CONFIG_IA32_SUPPORT
-+	mov r31=pr
-+	br.sptk.many dispatch_to_ia32_handler
-+#else
-+	FAULT(47)
-+#endif
-+END(ia32_interrupt)
-+
-+	.org ia64_ivt+0x6c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6c00 Entry 48 (size 16 bundles) Reserved
-+	DBG_FAULT(48)
-+	FAULT(48)
-+
-+	.org ia64_ivt+0x6d00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6d00 Entry 49 (size 16 bundles) Reserved
-+	DBG_FAULT(49)
-+	FAULT(49)
-+
-+	.org ia64_ivt+0x6e00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6e00 Entry 50 (size 16 bundles) Reserved
-+	DBG_FAULT(50)
-+	FAULT(50)
-+
-+	.org ia64_ivt+0x6f00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x6f00 Entry 51 (size 16 bundles) Reserved
-+	DBG_FAULT(51)
-+	FAULT(51)
-+
-+	.org ia64_ivt+0x7000
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7000 Entry 52 (size 16 bundles) Reserved
-+	DBG_FAULT(52)
-+	FAULT(52)
-+
-+	.org ia64_ivt+0x7100
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7100 Entry 53 (size 16 bundles) Reserved
-+	DBG_FAULT(53)
-+	FAULT(53)
-+
-+	.org ia64_ivt+0x7200
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7200 Entry 54 (size 16 bundles) Reserved
-+	DBG_FAULT(54)
-+	FAULT(54)
-+
-+	.org ia64_ivt+0x7300
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7300 Entry 55 (size 16 bundles) Reserved
-+	DBG_FAULT(55)
-+	FAULT(55)
-+
-+	.org ia64_ivt+0x7400
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7400 Entry 56 (size 16 bundles) Reserved
-+	DBG_FAULT(56)
-+	FAULT(56)
-+
-+	.org ia64_ivt+0x7500
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7500 Entry 57 (size 16 bundles) Reserved
-+	DBG_FAULT(57)
-+	FAULT(57)
-+
-+	.org ia64_ivt+0x7600
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7600 Entry 58 (size 16 bundles) Reserved
-+	DBG_FAULT(58)
-+	FAULT(58)
-+
-+	.org ia64_ivt+0x7700
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7700 Entry 59 (size 16 bundles) Reserved
-+	DBG_FAULT(59)
-+	FAULT(59)
-+
-+	.org ia64_ivt+0x7800
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7800 Entry 60 (size 16 bundles) Reserved
-+	DBG_FAULT(60)
-+	FAULT(60)
-+
-+	.org ia64_ivt+0x7900
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7900 Entry 61 (size 16 bundles) Reserved
-+	DBG_FAULT(61)
-+	FAULT(61)
-+
-+	.org ia64_ivt+0x7a00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7a00 Entry 62 (size 16 bundles) Reserved
-+	DBG_FAULT(62)
-+	FAULT(62)
-+
-+	.org ia64_ivt+0x7b00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7b00 Entry 63 (size 16 bundles) Reserved
-+	DBG_FAULT(63)
-+	FAULT(63)
-+
-+	.org ia64_ivt+0x7c00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7c00 Entry 64 (size 16 bundles) Reserved
-+	DBG_FAULT(64)
-+	FAULT(64)
-+
-+	.org ia64_ivt+0x7d00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7d00 Entry 65 (size 16 bundles) Reserved
-+	DBG_FAULT(65)
-+	FAULT(65)
-+
-+	.org ia64_ivt+0x7e00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7e00 Entry 66 (size 16 bundles) Reserved
-+	DBG_FAULT(66)
-+	FAULT(66)
-+
-+#ifdef CONFIG_XEN
-+	/*
-+	 * There is no particular reason for this code to be here, other than that
-+	 * there happens to be space here that would go unused otherwise.  If this
-+	 * fault ever gets "unreserved", simply moved the following code to a more
-+	 * suitable spot...
-+	 */
-+
-+GLOBAL_ENTRY(xen_bsw1)
-+	/* FIXME: THIS CODE IS NOT NaT SAFE! */
-+	movl r30=XSI_BANKNUM;
-+	mov r31=1;;
-+	st4 [r30]=r31;
-+	movl r30=XSI_BANK1_R16;
-+	movl r31=XSI_BANK1_R16+8;;
-+	ld8 r16=[r30],16; ld8 r17=[r31],16;;
-+	ld8 r18=[r30],16; ld8 r19=[r31],16;;
-+	ld8 r20=[r30],16; ld8 r21=[r31],16;;
-+	ld8 r22=[r30],16; ld8 r23=[r31],16;;
-+	ld8 r24=[r30],16; ld8 r25=[r31],16;;
-+	ld8 r26=[r30],16; ld8 r27=[r31],16;;
-+	ld8 r28=[r30],16; ld8 r29=[r31],16;;
-+	ld8 r30=[r30]; ld8 r31=[r31];;
-+	br.ret.sptk.many b0
-+#endif
-+
-+	.org ia64_ivt+0x7f00
-+/////////////////////////////////////////////////////////////////////////////////////////
-+// 0x7f00 Entry 67 (size 16 bundles) Reserved
-+	DBG_FAULT(67)
-+	FAULT(67)
-+
-+#ifdef CONFIG_IA32_SUPPORT
-+
-+	/*
-+	 * There is no particular reason for this code to be here, other than that
-+	 * there happens to be space here that would go unused otherwise.  If this
-+	 * fault ever gets "unreserved", simply moved the following code to a more
-+	 * suitable spot...
-+	 */
-+
-+	// IA32 interrupt entry point
-+
-+ENTRY(dispatch_to_ia32_handler)
-+	SAVE_MIN
-+	;;
-+	mov r14=cr.isr
-+	ssm psr.ic | PSR_DEFAULT_BITS
-+	;;
-+	srlz.i					// guarantee that interruption collection is on
-+	;;
-+(p15)	ssm psr.i
-+	adds r3=8,r2		// Base pointer for SAVE_REST
-+	;;
-+	SAVE_REST
-+	;;
-+	mov r15=0x80
-+	shr r14=r14,16		// Get interrupt number
-+	;;
-+	cmp.ne p6,p0=r14,r15
-+(p6)	br.call.dpnt.many b6=non_ia32_syscall
-+
-+	adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp	// 16 byte hole per SW conventions
-+	adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
-+	;;
-+	cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
-+	ld8 r8=[r14]		// get r8
-+	;;
-+	st8 [r15]=r8		// save original EAX in r1 (IA32 procs don't use the GP)
-+	;;
-+	alloc r15=ar.pfs,0,0,6,0	// must first in an insn group
-+	;;
-+	ld4 r8=[r14],8		// r8 == eax (syscall number)
-+	mov r15=IA32_NR_syscalls
-+	;;
-+	cmp.ltu.unc p6,p7=r8,r15
-+	ld4 out1=[r14],8	// r9 == ecx
-+	;;
-+	ld4 out2=[r14],8	// r10 == edx
-+	;;
-+	ld4 out0=[r14]		// r11 == ebx
-+	adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
-+	;;
-+	ld4 out5=[r14],PT(R14)-PT(R13)	// r13 == ebp
-+	;;
-+	ld4 out3=[r14],PT(R15)-PT(R14)	// r14 == esi
-+	adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
-+	;;
-+	ld4 out4=[r14]		// r15 == edi
-+	movl r16=ia32_syscall_table
-+	;;
-+(p6)	shladd r16=r8,3,r16	// force ni_syscall if not valid syscall number
-+	ld4 r2=[r2]		// r2 = current_thread_info()->flags
-+	;;
-+	ld8 r16=[r16]
-+	and r2=_TIF_SYSCALL_TRACEAUDIT,r2	// mask trace or audit
-+	;;
-+	mov b6=r16
-+	movl r15=ia32_ret_from_syscall
-+	cmp.eq p8,p0=r2,r0
-+	;;
-+	mov rp=r15
-+(p8)	br.call.sptk.many b6=b6
-+	br.cond.sptk ia32_trace_syscall
-+
-+non_ia32_syscall:
-+	alloc r15=ar.pfs,0,0,2,0
-+	mov out0=r14				// interrupt #
-+	add out1=16,sp				// pointer to pt_regs
-+	;;			// avoid WAW on CFM
-+	br.call.sptk.many rp=ia32_bad_interrupt
-+.ret1:	movl r15=ia64_leave_kernel
-+	;;
-+	mov rp=r15
-+	br.ret.sptk.many rp
-+END(dispatch_to_ia32_handler)
-+
-+#endif /* CONFIG_IA32_SUPPORT */
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xen_ksyms.c linux-2.6.12-xen/arch/ia64/xen/xen_ksyms.c
---- pristine-linux-2.6.12/arch/ia64/xen/xen_ksyms.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/xen_ksyms.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,12 @@
-+/*
-+ * Architecture-specific kernel symbols
-+ *
-+ * Don't put any exports here unless it's defined in an assembler file.
-+ * All other exports should be put directly after the definition.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+
-+extern int is_running_on_xen(void);
-+EXPORT_SYMBOL(is_running_on_xen);
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenminstate.h linux-2.6.12-xen/arch/ia64/xen/xenminstate.h
---- pristine-linux-2.6.12/arch/ia64/xen/xenminstate.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/xenminstate.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,367 @@
-+#include <linux/config.h>
-+
-+#include <asm/cache.h>
-+
-+#ifdef CONFIG_XEN
-+#include "../kernel/entry.h"
-+#else
-+#include "entry.h"
-+#endif
-+
-+/*
-+ * For ivt.s we want to access the stack virtually so we don't have to disable translation
-+ * on interrupts.
-+ *
-+ *  On entry:
-+ *	r1:	pointer to current task (ar.k6)
-+ */
-+#define MINSTATE_START_SAVE_MIN_VIRT								\
-+(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
-+	;;											\
-+(pUStk)	mov.m r24=ar.rnat;									\
-+(pUStk)	addl r22=IA64_RBS_OFFSET,r1;			/* compute base of RBS */		\
-+(pKStk) mov r1=sp;					/* get sp  */				\
-+	;;											\
-+(pUStk) lfetch.fault.excl.nt1 [r22];								\
-+(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
-+(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
-+	;;											\
-+(pUStk)	mov ar.bspstore=r22;				/* switch to kernel RBS */		\
-+(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;			/* if in kernel mode, use sp (r12) */	\
-+	;;											\
-+(pUStk)	mov r18=ar.bsp;										\
-+(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
-+
-+#define MINSTATE_END_SAVE_MIN_VIRT								\
-+	bsw.1;			/* switch back to bank 1 (must be last in insn group) */	\
-+	;;
-+
-+/*
-+ * For mca_asm.S we want to access the stack physically since the state is saved before we
-+ * go virtual and don't want to destroy the iip or ipsr.
-+ */
-+#define MINSTATE_START_SAVE_MIN_PHYS								\
-+(pKStk) mov r3=IA64_KR(PER_CPU_DATA);;								\
-+(pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;;							\
-+(pKStk) ld8 r3 = [r3];;										\
-+(pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;;						\
-+(pKStk) addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3;						\
-+(pUStk)	mov ar.rsc=0;		/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */	\
-+(pUStk)	addl r22=IA64_RBS_OFFSET,r1;		/* compute base of register backing store */	\
-+	;;											\
-+(pUStk)	mov r24=ar.rnat;									\
-+(pUStk)	addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1;	/* compute base of memory stack */	\
-+(pUStk)	mov r23=ar.bspstore;				/* save ar.bspstore */			\
-+(pUStk)	dep r22=-1,r22,61,3;			/* compute kernel virtual addr of RBS */	\
-+	;;											\
-+(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1;		/* if in kernel mode, use sp (r12) */		\
-+(pUStk)	mov ar.bspstore=r22;			/* switch to kernel RBS */			\
-+	;;											\
-+(pUStk)	mov r18=ar.bsp;										\
-+(pUStk)	mov ar.rsc=0x3;		/* set eager mode, pl 0, little-endian, loadrs=0 */		\
-+
-+#define MINSTATE_END_SAVE_MIN_PHYS								\
-+	dep r12=-1,r12,61,3;		/* make sp a kernel virtual address */			\
-+	;;
-+
-+#ifdef MINSTATE_VIRT
-+# define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT)
-+# define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_VIRT
-+# define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_VIRT
-+#endif
-+
-+#ifdef MINSTATE_PHYS
-+# define MINSTATE_GET_CURRENT(reg)	mov reg=IA64_KR(CURRENT);; tpa reg=reg
-+# define MINSTATE_START_SAVE_MIN	MINSTATE_START_SAVE_MIN_PHYS
-+# define MINSTATE_END_SAVE_MIN		MINSTATE_END_SAVE_MIN_PHYS
-+#endif
-+
-+/*
-+ * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
-+ * the minimum state necessary that allows us to turn psr.ic back
-+ * on.
-+ *
-+ * Assumed state upon entry:
-+ *	psr.ic: off
-+ *	r31:	contains saved predicates (pr)
-+ *
-+ * Upon exit, the state is as follows:
-+ *	psr.ic: off
-+ *	 r2 = points to &pt_regs.r16
-+ *	 r8 = contents of ar.ccv
-+ *	 r9 = contents of ar.csd
-+ *	r10 = contents of ar.ssd
-+ *	r11 = FPSR_DEFAULT
-+ *	r12 = kernel sp (kernel virtual address)
-+ *	r13 = points to current task_struct (kernel virtual address)
-+ *	p15 = TRUE if psr.i is set in cr.ipsr
-+ *	predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
-+ *		preserved
-+ * CONFIG_XEN note: p6/p7 are not preserved
-+ *
-+ * Note that psr.ic is NOT turned on by this macro.  This is so that
-+ * we can pass interruption state as arguments to a handler.
-+ */
-+#ifdef CONFIG_XEN
-+#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)							\
-+	MINSTATE_GET_CURRENT(r16);	/* M (or M;;I) */					\
-+	mov r27=ar.rsc;			/* M */							\
-+	mov r20=r1;			/* A */							\
-+	mov r25=ar.unat;		/* M */							\
-+	/* mov r29=cr.ipsr;		/* M */							\
-+	movl r29=XSI_IPSR;;									\
-+	ld8 r29=[r29];;										\
-+	mov r26=ar.pfs;			/* I */							\
-+	/* mov r28=cr.iip;		/* M */							\
-+	movl r28=XSI_IIP;;									\
-+	ld8 r28=[r28];;										\
-+	mov r21=ar.fpsr;		/* M */							\
-+	COVER;			/* B;; (or nothing) */					\
-+	;;											\
-+	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16;						\
-+	;;											\
-+	ld1 r17=[r16];				/* load current->thread.on_ustack flag */	\
-+	st1 [r16]=r0;				/* clear current->thread.on_ustack flag */	\
-+	adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16						\
-+	/* switch from user to kernel RBS: */							\
-+	;;											\
-+	invala;				/* M */							\
-+	/* SAVE_IFS; /* see xen special handling below */						\
-+	cmp.eq pKStk,pUStk=r0,r17;		/* are we in kernel mode already? */		\
-+	;;											\
-+	MINSTATE_START_SAVE_MIN									\
-+	adds r17=2*L1_CACHE_BYTES,r1;		/* really: biggest cache-line size */		\
-+	adds r16=PT(CR_IPSR),r1;								\
-+	;;											\
-+	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;						\
-+	st8 [r16]=r29;		/* save cr.ipsr */						\
-+	;;											\
-+	lfetch.fault.excl.nt1 [r17];								\
-+	tbit.nz p15,p0=r29,IA64_PSR_I_BIT;							\
-+	mov r29=b0										\
-+	;;											\
-+	adds r16=PT(R8),r1;	/* initialize first base pointer */				\
-+	adds r17=PT(R9),r1;	/* initialize second base pointer */				\
-+(pKStk)	mov r18=r0;		/* make sure r18 isn't NaT */					\
-+	;;											\
-+.mem.offset 0,0; st8.spill [r16]=r8,16;								\
-+.mem.offset 8,0; st8.spill [r17]=r9,16;								\
-+        ;;											\
-+.mem.offset 0,0; st8.spill [r16]=r10,24;							\
-+.mem.offset 8,0; st8.spill [r17]=r11,24;							\
-+        ;;											\
-+	/* xen special handling for possibly lazy cover */					\
-+	movl r8=XSI_INCOMPL_REGFR;								\
-+	;;											\
-+	ld4 r30=[r8];										\
-+	;;											\
-+	cmp.eq	p6,p7=r30,r0;									\
-+	;; /* not sure if this stop bit is necessary */						\
-+(p6)	adds r8=XSI_PRECOVER_IFS-XSI_INCOMPL_REGFR,r8;						\
-+(p7)	adds r8=XSI_IFS-XSI_INCOMPL_REGFR,r8;							\
-+	;;											\
-+	ld8 r30=[r8];										\
-+	;;											\
-+	st8 [r16]=r28,16;	/* save cr.iip */						\
-+	st8 [r17]=r30,16;	/* save cr.ifs */						\
-+(pUStk)	sub r18=r18,r22;	/* r18=RSE.ndirty*8 */						\
-+	mov r8=ar.ccv;										\
-+	mov r9=ar.csd;										\
-+	mov r10=ar.ssd;										\
-+	movl r11=FPSR_DEFAULT;   /* L-unit */							\
-+	;;											\
-+	st8 [r16]=r25,16;	/* save ar.unat */						\
-+	st8 [r17]=r26,16;	/* save ar.pfs */						\
-+	shl r18=r18,16;		/* compute ar.rsc to be used for "loadrs" */			\
-+	;;											\
-+	st8 [r16]=r27,16;	/* save ar.rsc */						\
-+(pUStk)	st8 [r17]=r24,16;	/* save ar.rnat */						\
-+(pKStk)	adds r17=16,r17;	/* skip over ar_rnat field */					\
-+	;;			/* avoid RAW on r16 & r17 */					\
-+(pUStk)	st8 [r16]=r23,16;	/* save ar.bspstore */						\
-+	st8 [r17]=r31,16;	/* save predicates */						\
-+(pKStk)	adds r16=16,r16;	/* skip over ar_bspstore field */				\
-+	;;											\
-+	st8 [r16]=r29,16;	/* save b0 */							\
-+	st8 [r17]=r18,16;	/* save ar.rsc value for "loadrs" */				\
-+	cmp.eq pNonSys,pSys=r0,r0	/* initialize pSys=0, pNonSys=1 */			\
-+	;;											\
-+.mem.offset 0,0; st8.spill [r16]=r20,16;	/* save original r1 */				\
-+.mem.offset 8,0; st8.spill [r17]=r12,16;							\
-+	adds r12=-16,r1;	/* switch to kernel memory stack (with 16 bytes of scratch) */	\
-+	;;											\
-+.mem.offset 0,0; st8.spill [r16]=r13,16;							\
-+.mem.offset 8,0; st8.spill [r17]=r21,16;	/* save ar.fpsr */				\
-+	mov r13=IA64_KR(CURRENT);	/* establish `current' */				\
-+	;;											\
-+.mem.offset 0,0; st8.spill [r16]=r15,16;							\
-+.mem.offset 8,0; st8.spill [r17]=r14,16;							\
-+	;;											\
-+.mem.offset 0,0; st8.spill [r16]=r2,16;								\
-+.mem.offset 8,0; st8.spill [r17]=r3,16;								\
-+	;;											\
-+	EXTRA;											\
-+	mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2;					\
-+	adds r2=IA64_PT_REGS_R16_OFFSET,r1;							\
-+	;;											\
-+	movl r1=__gp;		/* establish kernel global pointer */				\
-+	;;											\
-+	/* MINSTATE_END_SAVE_MIN */
-+#else
-+#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)							\
-+	MINSTATE_GET_CURRENT(r16);	/* M (or M;;I) */					\
-+	mov r27=ar.rsc;			/* M */							\
-+	mov r20=r1;			/* A */							\
-+	mov r25=ar.unat;		/* M */							\
-+	mov r29=cr.ipsr;		/* M */							\
-+	mov r26=ar.pfs;			/* I */							\
-+	mov r28=cr.iip;			/* M */							\
-+	mov r21=ar.fpsr;		/* M */							\
-+	COVER;				/* B;; (or nothing) */					\
-+	;;											\
-+	adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16;						\
-+	;;											\
-+	ld1 r17=[r16];				/* load current->thread.on_ustack flag */	\
-+	st1 [r16]=r0;				/* clear current->thread.on_ustack flag */	\
-+	adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16						\
-+	/* switch from user to kernel RBS: */							\
-+	;;											\
-+	invala;				/* M */							\
-+	SAVE_IFS;										\
-+	cmp.eq pKStk,pUStk=r0,r17;		/* are we in kernel mode already? */		\
-+	;;											\
-+	MINSTATE_START_SAVE_MIN									\
-+	adds r17=2*L1_CACHE_BYTES,r1;		/* really: biggest cache-line size */		\
-+	adds r16=PT(CR_IPSR),r1;								\
-+	;;											\
-+	lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;						\
-+	st8 [r16]=r29;		/* save cr.ipsr */						\
-+	;;											\
-+	lfetch.fault.excl.nt1 [r17];								\
-+	tbit.nz p15,p0=r29,IA64_PSR_I_BIT;							\
-+	mov r29=b0										\
-+	;;											\
-+	adds r16=PT(R8),r1;	/* initialize first base pointer */				\
-+	adds r17=PT(R9),r1;	/* initialize second base pointer */				\
-+(pKStk)	mov r18=r0;		/* make sure r18 isn't NaT */					\
-+	;;											\
-+.mem.offset 0,0; st8.spill [r16]=r8,16;								\
-+.mem.offset 8,0; st8.spill [r17]=r9,16;								\
-+        ;;											\
-+.mem.offset 0,0; st8.spill [r16]=r10,24;							\
-+.mem.offset 8,0; st8.spill [r17]=r11,24;							\
-+        ;;											\
-+	st8 [r16]=r28,16;	/* save cr.iip */						\
-+	st8 [r17]=r30,16;	/* save cr.ifs */						\
-+(pUStk)	sub r18=r18,r22;	/* r18=RSE.ndirty*8 */						\
-+	mov r8=ar.ccv;										\
-+	mov r9=ar.csd;										\
-+	mov r10=ar.ssd;										\
-+	movl r11=FPSR_DEFAULT;   /* L-unit */							\
-+	;;											\
-+	st8 [r16]=r25,16;	/* save ar.unat */						\
-+	st8 [r17]=r26,16;	/* save ar.pfs */						\
-+	shl r18=r18,16;		/* compute ar.rsc to be used for "loadrs" */			\
-+	;;											\
-+	st8 [r16]=r27,16;	/* save ar.rsc */						\
-+(pUStk)	st8 [r17]=r24,16;	/* save ar.rnat */						\
-+(pKStk)	adds r17=16,r17;	/* skip over ar_rnat field */					\
-+	;;			/* avoid RAW on r16 & r17 */					\
-+(pUStk)	st8 [r16]=r23,16;	/* save ar.bspstore */						\
-+	st8 [r17]=r31,16;	/* save predicates */						\
-+(pKStk)	adds r16=16,r16;	/* skip over ar_bspstore field */				\
-+	;;											\
-+	st8 [r16]=r29,16;	/* save b0 */							\
-+	st8 [r17]=r18,16;	/* save ar.rsc value for "loadrs" */				\
-+	cmp.eq pNonSys,pSys=r0,r0	/* initialize pSys=0, pNonSys=1 */			\
-+	;;											\
-+.mem.offset 0,0; st8.spill [r16]=r20,16;	/* save original r1 */				\
-+.mem.offset 8,0; st8.spill [r17]=r12,16;							\
-+	adds r12=-16,r1;	/* switch to kernel memory stack (with 16 bytes of scratch) */	\
-+	;;											\
-+.mem.offset 0,0; st8.spill [r16]=r13,16;							\
-+.mem.offset 8,0; st8.spill [r17]=r21,16;	/* save ar.fpsr */				\
-+	mov r13=IA64_KR(CURRENT);	/* establish `current' */				\
-+	;;											\
-+.mem.offset 0,0; st8.spill [r16]=r15,16;							\
-+.mem.offset 8,0; st8.spill [r17]=r14,16;							\
-+	;;											\
-+.mem.offset 0,0; st8.spill [r16]=r2,16;								\
-+.mem.offset 8,0; st8.spill [r17]=r3,16;								\
-+	adds r2=IA64_PT_REGS_R16_OFFSET,r1;							\
-+	;;											\
-+	EXTRA;											\
-+	movl r1=__gp;		/* establish kernel global pointer */				\
-+	;;											\
-+	MINSTATE_END_SAVE_MIN
-+#endif
-+
-+/*
-+ * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
-+ *
-+ * Assumed state upon entry:
-+ *	psr.ic: on
-+ *	r2:	points to &pt_regs.r16
-+ *	r3:	points to &pt_regs.r17
-+ *	r8:	contents of ar.ccv
-+ *	r9:	contents of ar.csd
-+ *	r10:	contents of ar.ssd
-+ *	r11:	FPSR_DEFAULT
-+ *
-+ * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
-+ */
-+#define SAVE_REST				\
-+.mem.offset 0,0; st8.spill [r2]=r16,16;		\
-+.mem.offset 8,0; st8.spill [r3]=r17,16;		\
-+	;;					\
-+.mem.offset 0,0; st8.spill [r2]=r18,16;		\
-+.mem.offset 8,0; st8.spill [r3]=r19,16;		\
-+	;;					\
-+.mem.offset 0,0; st8.spill [r2]=r20,16;		\
-+.mem.offset 8,0; st8.spill [r3]=r21,16;		\
-+	mov r18=b6;				\
-+	;;					\
-+.mem.offset 0,0; st8.spill [r2]=r22,16;		\
-+.mem.offset 8,0; st8.spill [r3]=r23,16;		\
-+	mov r19=b7;				\
-+	;;					\
-+.mem.offset 0,0; st8.spill [r2]=r24,16;		\
-+.mem.offset 8,0; st8.spill [r3]=r25,16;		\
-+	;;					\
-+.mem.offset 0,0; st8.spill [r2]=r26,16;		\
-+.mem.offset 8,0; st8.spill [r3]=r27,16;		\
-+	;;					\
-+.mem.offset 0,0; st8.spill [r2]=r28,16;		\
-+.mem.offset 8,0; st8.spill [r3]=r29,16;		\
-+	;;					\
-+.mem.offset 0,0; st8.spill [r2]=r30,16;		\
-+.mem.offset 8,0; st8.spill [r3]=r31,32;		\
-+	;;					\
-+	mov ar.fpsr=r11;	/* M-unit */	\
-+	st8 [r2]=r8,8;		/* ar.ccv */	\
-+	adds r24=PT(B6)-PT(F7),r3;		\
-+	;;					\
-+	stf.spill [r2]=f6,32;			\
-+	stf.spill [r3]=f7,32;			\
-+	;;					\
-+	stf.spill [r2]=f8,32;			\
-+	stf.spill [r3]=f9,32;			\
-+	;;					\
-+	stf.spill [r2]=f10;			\
-+	stf.spill [r3]=f11;			\
-+	adds r25=PT(B7)-PT(F11),r3;		\
-+	;;					\
-+	st8 [r24]=r18,16;       /* b6 */	\
-+	st8 [r25]=r19,16;       /* b7 */	\
-+	;;					\
-+	st8 [r24]=r9;        	/* ar.csd */	\
-+	st8 [r25]=r10;      	/* ar.ssd */	\
-+	;;
-+
-+#define SAVE_MIN_WITH_COVER	DO_SAVE_MIN(cover, mov r30=cr.ifs,)
-+#define SAVE_MIN_WITH_COVER_R19	DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
-+#ifdef CONFIG_XEN
-+#define SAVE_MIN		break 0;; /* FIXME: non-cover version only for ia32 support? */
-+#else
-+#define SAVE_MIN		DO_SAVE_MIN(     , mov r30=r0, )
-+#endif
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xenpal.S linux-2.6.12-xen/arch/ia64/xen/xenpal.S
---- pristine-linux-2.6.12/arch/ia64/xen/xenpal.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/xenpal.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,73 @@
-+/*
-+ * ia64/xen/xenpal.S
-+ *
-+ * Alternate PAL  routines for Xen.  Heavily leveraged from
-+ *   ia64/kernel/pal.S
-+ *
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ *	Dan Magenheimer <dan.magenheimer at .hp.com>
-+ */
-+
-+#include <asm/asmmacro.h>
-+#include <asm/processor.h>
-+
-+GLOBAL_ENTRY(xen_pal_call_static)
-+	.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
-+	alloc loc1 = ar.pfs,5,5,0,0
-+#ifdef CONFIG_XEN
-+	movl r22=running_on_xen;;
-+	ld4 r22=[r22];;
-+	cmp.eq p7,p0=r22,r0
-+(p7)	br.cond.spnt.many __ia64_pal_call_static;;
-+#endif
-+	movl loc2 = pal_entry_point
-+1:	{
-+	  mov r28 = in0
-+	  mov r29 = in1
-+	  mov r8 = ip
-+	}
-+	;;
-+	ld8 loc2 = [loc2]		// loc2 <- entry point
-+	tbit.nz p6,p7 = in4, 0
-+	adds r8 = 1f-1b,r8
-+	mov loc4=ar.rsc			// save RSE configuration
-+	;;
-+	mov ar.rsc=0			// put RSE in enforced lazy, LE mode
-+	mov loc3 = psr
-+	mov loc0 = rp
-+	.body
-+	mov r30 = in2
-+
-+#ifdef CONFIG_XEN
-+	// this is low priority for paravirtualization, but is called
-+	// from the idle loop so confuses privop counting
-+	movl r31=XSI_PSR_IC
-+	;;
-+(p6)	st8 [r31]=r0
-+	;;
-+(p7)	adds r31=XSI_PSR_I-XSI_PSR_IC,r31
-+	;;
-+(p7)	st4 [r31]=r0
-+	;;
-+	mov r31 = in3
-+	mov b7 = loc2
-+	;;
-+#else
-+(p6)	rsm psr.i | psr.ic
-+	mov r31 = in3
-+	mov b7 = loc2
-+
-+(p7)	rsm psr.i
-+	;;
-+(p6)	srlz.i
-+#endif
-+	mov rp = r8
-+	br.cond.sptk.many b7
-+1:	mov psr.l = loc3
-+	mov ar.rsc = loc4		// restore RSE configuration
-+	mov ar.pfs = loc1
-+	mov rp = loc0
-+	;;
-+	srlz.d				// seralize restoration of psr.l
-+	br.ret.sptk.many b0
-+END(xen_pal_call_static)
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen/xensetup.S linux-2.6.12-xen/arch/ia64/xen/xensetup.S
---- pristine-linux-2.6.12/arch/ia64/xen/xensetup.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen/xensetup.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,33 @@
-+/*
-+ * Support routines for Xen
-+ *
-+ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer at hp.com>
-+ */
-+
-+#include <linux/config.h>
-+#include <asm/processor.h>
-+#include <asm/asmmacro.h>
-+
-+	.data
-+	.align 8
-+	.globl running_on_xen
-+running_on_xen:
-+	data4 0
-+
-+	.text
-+GLOBAL_ENTRY(early_xen_setup)
-+	mov r8=cr.dcr;;
-+	extr.u r8=r8,63,1
-+	movl r9=running_on_xen;;
-+	st4 [r9]=r8;;
-+	cmp.ne p7,p0=r8,r0;;
-+(p7)	movl r10=xen_ivt;;
-+(p7)	mov cr.iva=r10
-+	br.ret.sptk.many rp;;
-+END(xen_init)
-+
-+GLOBAL_ENTRY(is_running_on_xen)
-+	movl r9=running_on_xen;;
-+	ld4 r8=[r9];;
-+	br.ret.sptk.many rp;;
-+END(is_running_on_xen)
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen-mkbuildtree-post linux-2.6.12-xen/arch/ia64/xen-mkbuildtree-post
---- pristine-linux-2.6.12/arch/ia64/xen-mkbuildtree-post	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen-mkbuildtree-post	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+#!/bin/bash
-+echo 'NOTHING YET IN ' ${0}
-diff -Nurp pristine-linux-2.6.12/arch/ia64/xen-mkbuildtree-pre linux-2.6.12-xen/arch/ia64/xen-mkbuildtree-pre
---- pristine-linux-2.6.12/arch/ia64/xen-mkbuildtree-pre	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/ia64/xen-mkbuildtree-pre	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,50 @@
-+#!/bin/bash
-+# restructure directories to match future drivers/xen plan
-+# and move aside xen/x86 specific changes
-+# WARNING!: This directory movement really confuses hg which makes
-+# it difficult to do development in a directory which is being used
-+# for building (as all files in mv'd directories are thought by hg
-+# to have been deleted).  I don't know how to avoid this right now,
-+# but if someone has a better way, I'm all ears
-+if [ ! -e mm.xen-x86 ]
-+then
-+	mv mm mm.xen-x86
-+	mkdir mm
-+	mv net net.xen-x86
-+	mv kernel kernel.xen-x86
-+	mv drivers/acpi/tables.c drivers/acpi/tables.c.xen-x86
-+	mv arch/xen/kernel drivers/xen/core
-+	mv arch/xen arch/xen.xen-x86
-+	mkdir arch/xen
-+	mv arch/xen.xen-x86/configs arch/xen
-+#	mv include/asm-generic include/asm-generic.xen-x86
-+	mv include/linux include/linux.xen-x86
-+	mkdir include/linux
-+fi
-+
-+# need to grab a couple of xen-modified files for generic_page_range and
-+# typedef pte_fn_t which are used by driver/xen blkif
-+cp mm.xen-x86/memory.c mm/memory.c
-+cp include/linux.xen-x86/mm.h include/linux/mm.h
-+
-+#eventually asm-xsi-offsets needs to be part of hypervisor.h/hypercall.h
-+cp ../xen/include/asm-ia64/asm-xsi-offsets.h include/asm-ia64/xen/
-+
-+#ia64 drivers/xen isn't fully functional yet, workaround...
-+#also ignore core/evtchn.c which uses a different irq mechanism than ia64
-+#(warning: there be dragons here if these files diverge)
-+cp arch/ia64/xen/drivers/Makefile drivers/xen/Makefile
-+cp arch/ia64/xen/drivers/coreMakefile drivers/xen/core/Makefile
-+
-+#not sure where these ia64-specific files will end up in the future
-+cp arch/ia64/xen/drivers/xenia64_init.c drivers/xen/core
-+cp arch/ia64/xen/drivers/evtchn_ia64.c drivers/xen/core
-+
-+#still a few x86-ism's in various drivers/xen files, patch them
-+#cd drivers/xen
-+#if [ ! -e ia64.patch.semaphore ]
-+#then
-+#	cat ../../arch/ia64/xen/drivers/patches/* | patch -p1 -b
-+#fi
-+#touch ia64.patch.semaphore
-+#cd ../..
-diff -Nurp pristine-linux-2.6.12/arch/ppc/kernel/time.c linux-2.6.12-xen/arch/ppc/kernel/time.c
---- pristine-linux-2.6.12/arch/ppc/kernel/time.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ppc/kernel/time.c	2006-02-25 00:12:30.023559244 +0100
-@@ -89,6 +89,9 @@ unsigned long tb_to_ns_scale;
- 
- extern unsigned long wall_jiffies;
- 
-+/* used for timezone offset */
-+static long timezone_offset;
-+
- DEFINE_SPINLOCK(rtc_lock);
- 
- EXPORT_SYMBOL(rtc_lock);
-@@ -170,7 +173,7 @@ void timer_interrupt(struct pt_regs * re
- 		     xtime.tv_sec - last_rtc_update >= 659 &&
- 		     abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
- 		     jiffies - wall_jiffies == 1) {
--		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0)
-+		  	if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
- 				last_rtc_update = xtime.tv_sec+1;
- 			else
- 				/* Try again one minute later */
-@@ -286,7 +289,7 @@ void __init time_init(void)
- 	unsigned old_stamp, stamp, elapsed;
- 
-         if (ppc_md.time_init != NULL)
--                time_offset = ppc_md.time_init();
-+                timezone_offset = ppc_md.time_init();
- 
- 	if (__USE_RTC()) {
- 		/* 601 processor: dec counts down by 128 every 128ns */
-@@ -331,10 +334,10 @@ void __init time_init(void)
- 	set_dec(tb_ticks_per_jiffy);
- 
- 	/* If platform provided a timezone (pmac), we correct the time */
--        if (time_offset) {
--		sys_tz.tz_minuteswest = -time_offset / 60;
-+        if (timezone_offset) {
-+		sys_tz.tz_minuteswest = -timezone_offset / 60;
- 		sys_tz.tz_dsttime = 0;
--		xtime.tv_sec -= time_offset;
-+		xtime.tv_sec -= timezone_offset;
-         }
-         set_normalized_timespec(&wall_to_monotonic,
-                                 -xtime.tv_sec, -xtime.tv_nsec);
-diff -Nurp pristine-linux-2.6.12/arch/ppc64/boot/zlib.c linux-2.6.12-xen/arch/ppc64/boot/zlib.c
---- pristine-linux-2.6.12/arch/ppc64/boot/zlib.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ppc64/boot/zlib.c	2006-02-25 00:12:30.024559093 +0100
-@@ -1307,7 +1307,7 @@ local int huft_build(
-   {
-     *t = (inflate_huft *)Z_NULL;
-     *m = 0;
--    return Z_OK;
-+    return Z_DATA_ERROR;
-   }
- 
- 
-@@ -1351,6 +1351,7 @@ local int huft_build(
-     if ((j = *p++) != 0)
-       v[x[j]++] = i;
-   } while (++i < n);
-+  n = x[g];			/* set n to length of v */
- 
- 
-   /* Generate the Huffman codes and for each, make the table entries */
-diff -Nurp pristine-linux-2.6.12/arch/ppc64/kernel/pSeries_smp.c linux-2.6.12-xen/arch/ppc64/kernel/pSeries_smp.c
---- pristine-linux-2.6.12/arch/ppc64/kernel/pSeries_smp.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/ppc64/kernel/pSeries_smp.c	2006-02-25 00:12:33.813987917 +0100
-@@ -92,10 +92,13 @@ static int query_cpu_stopped(unsigned in
- 
- int pSeries_cpu_disable(void)
- {
-+	int cpu = smp_processor_id();
-+
-+	cpu_clear(cpu, cpu_online_map);
- 	systemcfg->processorCount--;
- 
- 	/*fix boot_cpuid here*/
--	if (smp_processor_id() == boot_cpuid)
-+	if (cpu == boot_cpuid)
- 		boot_cpuid = any_online_cpu(cpu_online_map);
- 
- 	/* FIXME: abstract this to not be platform specific later on */
-diff -Nurp pristine-linux-2.6.12/arch/s390/kernel/smp.c linux-2.6.12-xen/arch/s390/kernel/smp.c
---- pristine-linux-2.6.12/arch/s390/kernel/smp.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/s390/kernel/smp.c	2006-02-25 00:12:33.813987917 +0100
-@@ -679,12 +679,14 @@ __cpu_disable(void)
- {
- 	unsigned long flags;
- 	ec_creg_mask_parms cr_parms;
-+	int cpu = smp_processor_id();
- 
- 	spin_lock_irqsave(&smp_reserve_lock, flags);
--	if (smp_cpu_reserved[smp_processor_id()] != 0) {
-+	if (smp_cpu_reserved[cpu] != 0) {
- 		spin_unlock_irqrestore(&smp_reserve_lock, flags);
- 		return -EBUSY;
- 	}
-+	cpu_clear(cpu, cpu_online_map);
- 
- #ifdef CONFIG_PFAULT
- 	/* Disable pfault pseudo page faults on this cpu. */
-diff -Nurp pristine-linux-2.6.12/arch/um/kernel/process.c linux-2.6.12-xen/arch/um/kernel/process.c
---- pristine-linux-2.6.12/arch/um/kernel/process.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/um/kernel/process.c	2006-02-25 00:12:30.025558942 +0100
-@@ -130,7 +130,7 @@ int start_fork_tramp(void *thread_arg, u
- 	return(arg.pid);
- }
- 
--static int ptrace_child(void)
-+static int ptrace_child(void *arg)
- {
- 	int ret;
- 	int pid = os_getpid(), ppid = getppid();
-@@ -159,16 +159,20 @@ static int ptrace_child(void)
- 	_exit(ret);
- }
- 
--static int start_ptraced_child(void)
-+static int start_ptraced_child(void **stack_out)
- {
-+	void *stack;
-+	unsigned long sp;
- 	int pid, n, status;
- 	
--	pid = fork();
--	if(pid == 0)
--		ptrace_child();
--
-+	stack = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC,
-+		     MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-+	if(stack == MAP_FAILED)
-+		panic("check_ptrace : mmap failed, errno = %d", errno);
-+	sp = (unsigned long) stack + PAGE_SIZE - sizeof(void *);
-+	pid = clone(ptrace_child, (void *) sp, SIGCHLD, NULL);
- 	if(pid < 0)
--		panic("check_ptrace : fork failed, errno = %d", errno);
-+		panic("check_ptrace : clone failed, errno = %d", errno);
- 	CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
- 	if(n < 0)
- 		panic("check_ptrace : wait failed, errno = %d", errno);
-@@ -176,6 +180,7 @@ static int start_ptraced_child(void)
- 		panic("check_ptrace : expected SIGSTOP, got status = %d",
- 		      status);
- 
-+	*stack_out = stack;
- 	return(pid);
- }
- 
-@@ -183,12 +188,12 @@ static int start_ptraced_child(void)
-  * just avoid using sysemu, not panic, but only if SYSEMU features are broken.
-  * So only for SYSEMU features we test mustpanic, while normal host features
-  * must work anyway!*/
--static int stop_ptraced_child(int pid, int exitcode, int mustexit)
-+static int stop_ptraced_child(int pid, void *stack, int exitcode, int mustpanic)
- {
- 	int status, n, ret = 0;
- 
- 	if(ptrace(PTRACE_CONT, pid, 0, 0) < 0)
--		panic("stop_ptraced_child : ptrace failed, errno = %d", errno);
-+		panic("check_ptrace : ptrace failed, errno = %d", errno);
- 	CATCH_EINTR(n = waitpid(pid, &status, 0));
- 	if(!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
- 		int exit_with = WEXITSTATUS(status);
-@@ -199,13 +204,15 @@ static int stop_ptraced_child(int pid, i
- 		printk("check_ptrace : child exited with exitcode %d, while "
- 		      "expecting %d; status 0x%x", exit_with,
- 		      exitcode, status);
--		if (mustexit)
-+		if (mustpanic)
- 			panic("\n");
- 		else
- 			printk("\n");
- 		ret = -1;
- 	}
- 
-+	if(munmap(stack, PAGE_SIZE) < 0)
-+		panic("check_ptrace : munmap failed, errno = %d", errno);
- 	return ret;
- }
- 
-@@ -227,11 +234,12 @@ __uml_setup("nosysemu", nosysemu_cmd_par
- 
- static void __init check_sysemu(void)
- {
-+	void *stack;
- 	int pid, syscall, n, status, count=0;
- 
- 	printk("Checking syscall emulation patch for ptrace...");
- 	sysemu_supported = 0;
--	pid = start_ptraced_child();
-+	pid = start_ptraced_child(&stack);
- 
- 	if(ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0)
- 		goto fail;
-@@ -249,7 +257,7 @@ static void __init check_sysemu(void)
- 		panic("check_sysemu : failed to modify system "
- 		      "call return, errno = %d", errno);
- 
--	if (stop_ptraced_child(pid, 0, 0) < 0)
-+	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
- 		goto fail_stopped;
- 
- 	sysemu_supported = 1;
-@@ -257,7 +265,7 @@ static void __init check_sysemu(void)
- 	set_using_sysemu(!force_sysemu_disabled);
- 
- 	printk("Checking advanced syscall emulation patch for ptrace...");
--	pid = start_ptraced_child();
-+	pid = start_ptraced_child(&stack);
- 	while(1){
- 		count++;
- 		if(ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0)
-@@ -282,7 +290,7 @@ static void __init check_sysemu(void)
- 			break;
- 		}
- 	}
--	if (stop_ptraced_child(pid, 0, 0) < 0)
-+	if (stop_ptraced_child(pid, stack, 0, 0) < 0)
- 		goto fail_stopped;
- 
- 	sysemu_supported = 2;
-@@ -293,17 +301,18 @@ static void __init check_sysemu(void)
- 	return;
- 
- fail:
--	stop_ptraced_child(pid, 1, 0);
-+	stop_ptraced_child(pid, stack, 1, 0);
- fail_stopped:
- 	printk("missing\n");
- }
- 
- void __init check_ptrace(void)
- {
-+	void *stack;
- 	int pid, syscall, n, status;
- 
- 	printk("Checking that ptrace can change system call numbers...");
--	pid = start_ptraced_child();
-+	pid = start_ptraced_child(&stack);
- 
- 	if (ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *)PTRACE_O_TRACESYSGOOD) < 0)
- 		panic("check_ptrace: PTRACE_SETOPTIONS failed, errno = %d", errno);
-@@ -330,7 +339,7 @@ void __init check_ptrace(void)
- 			break;
- 		}
- 	}
--	stop_ptraced_child(pid, 0, 1);
-+	stop_ptraced_child(pid, stack, 0, 1);
- 	printk("OK\n");
- 	check_sysemu();
- }
-@@ -362,10 +371,11 @@ void forward_pending_sigio(int target)
- static inline int check_skas3_ptrace_support(void)
- {
- 	struct ptrace_faultinfo fi;
-+	void *stack;
- 	int pid, n, ret = 1;
- 
- 	printf("Checking for the skas3 patch in the host...");
--	pid = start_ptraced_child();
-+	pid = start_ptraced_child(&stack);
- 
- 	n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
- 	if (n < 0) {
-@@ -380,7 +390,7 @@ static inline int check_skas3_ptrace_sup
- 	}
- 
- 	init_registers(pid);
--	stop_ptraced_child(pid, 1, 1);
-+	stop_ptraced_child(pid, stack, 1, 1);
- 
- 	return(ret);
- }
-diff -Nurp pristine-linux-2.6.12/arch/x86_64/ia32/syscall32.c linux-2.6.12-xen/arch/x86_64/ia32/syscall32.c
---- pristine-linux-2.6.12/arch/x86_64/ia32/syscall32.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/x86_64/ia32/syscall32.c	2006-02-25 00:12:30.025558942 +0100
-@@ -57,6 +57,7 @@ int syscall32_setup_pages(struct linux_b
- 	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
- 	struct vm_area_struct *vma;
- 	struct mm_struct *mm = current->mm;
-+	int ret;
- 
- 	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- 	if (!vma)
-@@ -78,7 +79,11 @@ int syscall32_setup_pages(struct linux_b
- 	vma->vm_mm = mm;
- 
- 	down_write(&mm->mmap_sem);
--	insert_vm_struct(mm, vma);
-+	if ((ret = insert_vm_struct(mm, vma))) {
-+		up_write(&mm->mmap_sem);
-+		kmem_cache_free(vm_area_cachep, vma);
-+		return ret;
-+	}
- 	mm->total_vm += npages;
- 	up_write(&mm->mmap_sem);
- 	return 0;
-diff -Nurp pristine-linux-2.6.12/arch/x86_64/kernel/i387.c linux-2.6.12-xen/arch/x86_64/kernel/i387.c
---- pristine-linux-2.6.12/arch/x86_64/kernel/i387.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/x86_64/kernel/i387.c	2006-02-25 00:12:33.788991685 +0100
-@@ -42,7 +42,7 @@ void mxcsr_feature_mask_init(void)
-  * Called at bootup to set up the initial FPU state that is later cloned
-  * into all processes.
-  */
--void __init fpu_init(void)
-+void __cpuinit fpu_init(void)
- {
- 	unsigned long oldcr0 = read_cr0();
- 	extern void __bad_fxsave_alignment(void);
-diff -Nurp pristine-linux-2.6.12/arch/x86_64/kernel/setup.c linux-2.6.12-xen/arch/x86_64/kernel/setup.c
---- pristine-linux-2.6.12/arch/x86_64/kernel/setup.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/x86_64/kernel/setup.c	2006-02-25 00:12:30.026558792 +0100
-@@ -729,8 +729,6 @@ static void __init amd_detect_cmp(struct
- 	int cpu = smp_processor_id();
- 	int node = 0;
- 	unsigned bits;
--	if (c->x86_num_cores == 1)
--		return;
- 
- 	bits = 0;
- 	while ((1 << bits) < c->x86_num_cores)
-diff -Nurp pristine-linux-2.6.12/arch/x86_64/kernel/smpboot.c linux-2.6.12-xen/arch/x86_64/kernel/smpboot.c
---- pristine-linux-2.6.12/arch/x86_64/kernel/smpboot.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/x86_64/kernel/smpboot.c	2006-02-25 00:12:30.027558641 +0100
-@@ -202,9 +202,6 @@ static __cpuinit void sync_master(void *
- {
- 	unsigned long flags, i;
- 
--	if (smp_processor_id() != boot_cpu_id)
--		return;
--
- 	go[MASTER] = 0;
- 
- 	local_irq_save(flags);
-@@ -253,7 +250,7 @@ get_delta(long *rt, long *master)
- 	return tcenter - best_tm;
- }
- 
--static __cpuinit void sync_tsc(void)
-+static __cpuinit void sync_tsc(unsigned int master)
- {
- 	int i, done = 0;
- 	long delta, adj, adjust_latency = 0;
-@@ -267,9 +264,17 @@ static __cpuinit void sync_tsc(void)
- 	} t[NUM_ROUNDS] __cpuinitdata;
- #endif
- 
-+	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n",
-+		smp_processor_id(), master);
-+
- 	go[MASTER] = 1;
- 
--	smp_call_function(sync_master, NULL, 1, 0);
-+	/* It is dangerous to broadcast IPI as cpus are coming up,
-+	 * as they may not be ready to accept them.  So since
-+	 * we only need to send the ipi to the boot cpu direct
-+	 * the message, and avoid the race.
-+	 */
-+	smp_call_function_single(master, sync_master, NULL, 1, 0);
- 
- 	while (go[MASTER])	/* wait for master to be ready */
- 		no_cpu_relax();
-@@ -313,16 +318,14 @@ static __cpuinit void sync_tsc(void)
- 	printk(KERN_INFO
- 	       "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, "
- 	       "maxerr %lu cycles)\n",
--	       smp_processor_id(), boot_cpu_id, delta, rt);
-+	       smp_processor_id(), master, delta, rt);
- }
- 
- static void __cpuinit tsc_sync_wait(void)
- {
- 	if (notscsync || !cpu_has_tsc)
- 		return;
--	printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(),
--			boot_cpu_id);
--	sync_tsc();
-+	sync_tsc(0);
- }
- 
- static __init int notscsync_setup(char *s)
-diff -Nurp pristine-linux-2.6.12/arch/x86_64/kernel/smp.c linux-2.6.12-xen/arch/x86_64/kernel/smp.c
---- pristine-linux-2.6.12/arch/x86_64/kernel/smp.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/arch/x86_64/kernel/smp.c	2006-02-25 00:12:30.027558641 +0100
-@@ -284,6 +284,71 @@ struct call_data_struct {
- static struct call_data_struct * call_data;
- 
- /*
-+ * this function sends a 'generic call function' IPI to one other CPU
-+ * in the system.
-+ */
-+static void __smp_call_function_single (int cpu, void (*func) (void *info), void *info,
-+				int nonatomic, int wait)
-+{
-+	struct call_data_struct data;
-+	int cpus = 1;
-+
-+	data.func = func;
-+	data.info = info;
-+	atomic_set(&data.started, 0);
-+	data.wait = wait;
-+	if (wait)
-+		atomic_set(&data.finished, 0);
-+
-+	call_data = &data;
-+	wmb();
-+	/* Send a message to all other CPUs and wait for them to respond */
-+	send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
-+
-+	/* Wait for response */
-+	while (atomic_read(&data.started) != cpus)
-+		cpu_relax();
-+
-+	if (!wait)
-+		return;
-+
-+	while (atomic_read(&data.finished) != cpus)
-+		cpu_relax();
-+}
-+
-+/*
-+ * Run a function on another CPU
-+ *  <func>	The function to run. This must be fast and non-blocking.
-+ *  <info>	An arbitrary pointer to pass to the function.
-+ *  <nonatomic>	Currently unused.
-+ *  <wait>	If true, wait until function has completed on other CPUs.
-+ *  [RETURNS]   0 on success, else a negative status code.
-+ *
-+ * Does not return until the remote CPU is nearly ready to execute <func>
-+ * or is or has executed.
-+ */
-+
-+int smp_call_function_single (int cpu, void (*func) (void *info), void *info, 
-+	int nonatomic, int wait)
-+{
-+	
-+	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
-+
-+	if (cpu == me) {
-+		printk("%s: trying to call self\n", __func__);
-+		put_cpu();
-+		return -EBUSY;
-+	}
-+	spin_lock_bh(&call_lock);
-+
-+	__smp_call_function_single(cpu, func,info,nonatomic,wait);	
-+
-+	spin_unlock_bh(&call_lock);
-+	put_cpu();
-+	return 0;
-+}
-+
-+/*
-  * this function sends a 'generic call function' IPI to all other CPUs
-  * in the system.
-  */
-diff -Nurp pristine-linux-2.6.12/arch/xen/boot/Makefile linux-2.6.12-xen/arch/xen/boot/Makefile
---- pristine-linux-2.6.12/arch/xen/boot/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/boot/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,11 @@
-+
-+OBJCOPYFLAGS := -g --strip-unneeded
-+
-+vmlinuz: vmlinux-stripped FORCE
-+	$(call if_changed,gzip)
-+
-+vmlinux-stripped: vmlinux FORCE
-+	$(call if_changed,objcopy)
-+
-+bzImage: vmlinuz
-+	$(Q)ln -sf ../../../vmlinuz $(srctree)/arch/xen/boot/bzImage
-diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_ia64 linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_ia64
---- pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_ia64	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_ia64	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1405 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.12.6-xen0
-+# Wed Dec 21 11:17:02 2005
-+#
-+
-+#
-+# Code maturity level options
-+#
-+CONFIG_EXPERIMENTAL=y
-+CONFIG_CLEAN_COMPILE=y
-+CONFIG_LOCK_KERNEL=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+
-+#
-+# General setup
-+#
-+CONFIG_LOCALVERSION=""
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+CONFIG_POSIX_MQUEUE=y
-+CONFIG_BSD_PROCESS_ACCT=y
-+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-+CONFIG_SYSCTL=y
-+# CONFIG_AUDIT is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_KOBJECT_UEVENT=y
-+CONFIG_IKCONFIG=y
-+CONFIG_IKCONFIG_PROC=y
-+# CONFIG_CPUSETS is not set
-+# CONFIG_EMBEDDED is not set
-+CONFIG_KALLSYMS=y
-+CONFIG_KALLSYMS_ALL=y
-+CONFIG_KALLSYMS_EXTRA_PASS=y
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_EPOLL=y
-+CONFIG_SHMEM=y
-+CONFIG_CC_ALIGN_FUNCTIONS=0
-+CONFIG_CC_ALIGN_LABELS=0
-+CONFIG_CC_ALIGN_LOOPS=0
-+CONFIG_CC_ALIGN_JUMPS=0
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+
-+#
-+# Loadable module support
-+#
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+# CONFIG_MODULE_FORCE_UNLOAD is not set
-+CONFIG_OBSOLETE_MODPARM=y
-+CONFIG_MODVERSIONS=y
-+CONFIG_MODULE_SRCVERSION_ALL=y
-+CONFIG_KMOD=y
-+CONFIG_STOP_MACHINE=y
-+
-+#
-+# Processor type and features
-+#
-+CONFIG_IA64=y
-+CONFIG_64BIT=y
-+CONFIG_MMU=y
-+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_TIME_INTERPOLATION=y
-+CONFIG_EFI=y
-+CONFIG_GENERIC_IOMAP=y
-+CONFIG_XEN=y
-+CONFIG_ARCH_XEN=y
-+CONFIG_XEN_PRIVILEGED_GUEST=y
-+CONFIG_XEN_PHYSDEV_ACCESS=y
-+CONFIG_XEN_BLKDEV_GRANT=y
-+CONFIG_XEN_BLKDEV_FRONTEND=y
-+CONFIG_XEN_VT=y
-+CONFIG_VT=y
-+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-+# CONFIG_IA64_GENERIC is not set
-+CONFIG_IA64_DIG=y
-+# CONFIG_IA64_HP_ZX1 is not set
-+# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
-+# CONFIG_IA64_SGI_SN2 is not set
-+# CONFIG_IA64_HP_SIM is not set
-+# CONFIG_ITANIUM is not set
-+CONFIG_MCKINLEY=y
-+# CONFIG_IA64_PAGE_SIZE_4KB is not set
-+# CONFIG_IA64_PAGE_SIZE_8KB is not set
-+CONFIG_IA64_PAGE_SIZE_16KB=y
-+# CONFIG_IA64_PAGE_SIZE_64KB is not set
-+CONFIG_IA64_L1_CACHE_SHIFT=7
-+# CONFIG_NUMA is not set
-+CONFIG_VIRTUAL_MEM_MAP=n
-+CONFIG_IA64_CYCLONE=y
-+CONFIG_IOSAPIC=y
-+CONFIG_FORCE_MAX_ZONEORDER=18
-+CONFIG_SMP=y
-+CONFIG_NR_CPUS=4
-+CONFIG_HOTPLUG_CPU=y
-+# CONFIG_SCHED_SMT is not set
-+# CONFIG_PREEMPT is not set
-+CONFIG_HAVE_DEC_LOCK=y
-+# CONFIG_IA32_SUPPORT is not set
-+CONFIG_IA64_MCA_RECOVERY=y
-+CONFIG_PERFMON=y
-+CONFIG_IA64_PALINFO=y
-+CONFIG_ACPI_DEALLOCATE_IRQ=y
-+
-+#
-+# Firmware Drivers
-+#
-+CONFIG_EFI_VARS=y
-+CONFIG_EFI_PCDP=y
-+CONFIG_BINFMT_ELF=y
-+CONFIG_BINFMT_MISC=y
-+
-+#
-+# Power management and ACPI
-+#
-+CONFIG_PM=y
-+CONFIG_ACPI=y
-+
-+#
-+# ACPI (Advanced Configuration and Power Interface) Support
-+#
-+CONFIG_ACPI_BOOT=y
-+CONFIG_ACPI_INTERPRETER=y
-+CONFIG_ACPI_BUTTON=y
-+# CONFIG_ACPI_VIDEO is not set
-+CONFIG_ACPI_FAN=y
-+CONFIG_ACPI_PROCESSOR=y
-+# CONFIG_ACPI_HOTPLUG_CPU is not set
-+CONFIG_ACPI_THERMAL=y
-+CONFIG_ACPI_BLACKLIST_YEAR=0
-+# CONFIG_ACPI_DEBUG is not set
-+CONFIG_ACPI_BUS=y
-+CONFIG_ACPI_POWER=y
-+CONFIG_ACPI_PCI=y
-+CONFIG_ACPI_SYSTEM=y
-+# CONFIG_ACPI_CONTAINER is not set
-+
-+#
-+# Bus options (PCI, PCMCIA)
-+#
-+CONFIG_PCI=y
-+CONFIG_PCI_DOMAINS=y
-+# CONFIG_PCI_MSI is not set
-+CONFIG_PCI_LEGACY_PROC=y
-+CONFIG_PCI_NAMES=y
-+# CONFIG_PCI_DEBUG is not set
-+
-+#
-+# PCI Hotplug Support
-+#
-+CONFIG_HOTPLUG_PCI=y
-+# CONFIG_HOTPLUG_PCI_FAKE is not set
-+CONFIG_HOTPLUG_PCI_ACPI=y
-+# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
-+# CONFIG_HOTPLUG_PCI_CPCI is not set
-+# CONFIG_HOTPLUG_PCI_SHPC is not set
-+
-+#
-+# PCCARD (PCMCIA/CardBus) support
-+#
-+# CONFIG_PCCARD is not set
-+
-+#
-+# Device Drivers
-+#
-+
-+#
-+# Generic Driver Options
-+#
-+CONFIG_STANDALONE=y
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+# CONFIG_FW_LOADER is not set
-+# CONFIG_DEBUG_DRIVER is not set
-+
-+#
-+# Memory Technology Devices (MTD)
-+#
-+# CONFIG_MTD is not set
-+
-+#
-+# Parallel port support
-+#
-+# CONFIG_PARPORT is not set
-+
-+#
-+# Plug and Play support
-+#
-+# CONFIG_PNP is not set
-+
-+#
-+# Block devices
-+#
-+# CONFIG_BLK_CPQ_DA is not set
-+# CONFIG_BLK_CPQ_CISS_DA is not set
-+# CONFIG_BLK_DEV_DAC960 is not set
-+# CONFIG_BLK_DEV_UMEM is not set
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=y
-+CONFIG_BLK_DEV_CRYPTOLOOP=y
-+CONFIG_BLK_DEV_NBD=y
-+# CONFIG_BLK_DEV_SX8 is not set
-+# CONFIG_BLK_DEV_UB is not set
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=4096
-+CONFIG_BLK_DEV_INITRD=y
-+CONFIG_INITRAMFS_SOURCE=""
-+# CONFIG_CDROM_PKTCDVD is not set
-+
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+# CONFIG_ATA_OVER_ETH is not set
-+
-+#
-+# ATA/ATAPI/MFM/RLL support
-+#
-+CONFIG_IDE=y
-+CONFIG_BLK_DEV_IDE=y
-+
-+#
-+# Please see Documentation/ide.txt for help/info on IDE drives
-+#
-+# CONFIG_BLK_DEV_IDE_SATA is not set
-+CONFIG_BLK_DEV_IDEDISK=y
-+# CONFIG_IDEDISK_MULTI_MODE is not set
-+CONFIG_BLK_DEV_IDECD=y
-+# CONFIG_BLK_DEV_IDETAPE is not set
-+CONFIG_BLK_DEV_IDEFLOPPY=y
-+CONFIG_BLK_DEV_IDESCSI=y
-+# CONFIG_IDE_TASK_IOCTL is not set
-+
-+#
-+# IDE chipset support/bugfixes
-+#
-+CONFIG_IDE_GENERIC=y
-+CONFIG_BLK_DEV_IDEPCI=y
-+# CONFIG_IDEPCI_SHARE_IRQ is not set
-+# CONFIG_BLK_DEV_OFFBOARD is not set
-+CONFIG_BLK_DEV_GENERIC=y
-+# CONFIG_BLK_DEV_OPTI621 is not set
-+CONFIG_BLK_DEV_IDEDMA_PCI=y
-+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-+CONFIG_IDEDMA_PCI_AUTO=y
-+# CONFIG_IDEDMA_ONLYDISK is not set
-+# CONFIG_BLK_DEV_AEC62XX is not set
-+# CONFIG_BLK_DEV_ALI15X3 is not set
-+# CONFIG_BLK_DEV_AMD74XX is not set
-+CONFIG_BLK_DEV_CMD64X=y
-+# CONFIG_BLK_DEV_TRIFLEX is not set
-+# CONFIG_BLK_DEV_CY82C693 is not set
-+# CONFIG_BLK_DEV_CS5520 is not set
-+# CONFIG_BLK_DEV_CS5530 is not set
-+# CONFIG_BLK_DEV_HPT34X is not set
-+# CONFIG_BLK_DEV_HPT366 is not set
-+# CONFIG_BLK_DEV_SC1200 is not set
-+CONFIG_BLK_DEV_PIIX=y
-+# CONFIG_BLK_DEV_NS87415 is not set
-+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
-+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
-+# CONFIG_BLK_DEV_SVWKS is not set
-+# CONFIG_BLK_DEV_SIIMAGE is not set
-+# CONFIG_BLK_DEV_SLC90E66 is not set
-+# CONFIG_BLK_DEV_TRM290 is not set
-+# CONFIG_BLK_DEV_VIA82CXXX is not set
-+# CONFIG_IDE_ARM is not set
-+CONFIG_BLK_DEV_IDEDMA=y
-+# CONFIG_IDEDMA_IVB is not set
-+CONFIG_IDEDMA_AUTO=y
-+# CONFIG_BLK_DEV_HD is not set
-+
-+#
-+# SCSI device support
-+#
-+CONFIG_SCSI=y
-+CONFIG_SCSI_PROC_FS=y
-+
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=y
-+CONFIG_CHR_DEV_ST=y
-+CONFIG_CHR_DEV_OSST=y
-+CONFIG_BLK_DEV_SR=y
-+CONFIG_BLK_DEV_SR_VENDOR=y
-+CONFIG_CHR_DEV_SG=y
-+
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+CONFIG_SCSI_MULTI_LUN=y
-+CONFIG_SCSI_CONSTANTS=y
-+CONFIG_SCSI_LOGGING=y
-+
-+#
-+# SCSI Transport Attributes
-+#
-+CONFIG_SCSI_SPI_ATTRS=y
-+CONFIG_SCSI_FC_ATTRS=y
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+
-+#
-+# SCSI low-level drivers
-+#
-+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-+# CONFIG_SCSI_3W_9XXX is not set
-+# CONFIG_SCSI_ACARD is not set
-+# CONFIG_SCSI_AACRAID is not set
-+# CONFIG_SCSI_AIC7XXX is not set
-+# CONFIG_SCSI_AIC7XXX_OLD is not set
-+# CONFIG_SCSI_AIC79XX is not set
-+# CONFIG_MEGARAID_NEWGEN is not set
-+# CONFIG_MEGARAID_LEGACY is not set
-+# CONFIG_SCSI_SATA is not set
-+# CONFIG_SCSI_DMX3191D is not set
-+# CONFIG_SCSI_FUTURE_DOMAIN is not set
-+# CONFIG_SCSI_IPS is not set
-+# CONFIG_SCSI_INITIO is not set
-+# CONFIG_SCSI_INIA100 is not set
-+CONFIG_SCSI_SYM53C8XX_2=y
-+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-+# CONFIG_SCSI_IPR is not set
-+CONFIG_SCSI_QLOGIC_FC=y
-+# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
-+CONFIG_SCSI_QLOGIC_1280=y
-+# CONFIG_SCSI_QLOGIC_1280_1040 is not set
-+CONFIG_SCSI_QLA2XXX=y
-+CONFIG_SCSI_QLA21XX=y
-+CONFIG_SCSI_QLA22XX=y
-+CONFIG_SCSI_QLA2300=y
-+CONFIG_SCSI_QLA2322=y
-+# CONFIG_SCSI_QLA6312 is not set
-+# CONFIG_SCSI_LPFC is not set
-+# CONFIG_SCSI_DC395x is not set
-+# CONFIG_SCSI_DC390T is not set
-+# CONFIG_SCSI_DEBUG is not set
-+
-+#
-+# Multi-device support (RAID and LVM)
-+#
-+CONFIG_MD=y
-+# CONFIG_BLK_DEV_MD is not set
-+# CONFIG_BLK_DEV_DM is not set
-+
-+#
-+# Fusion MPT device support
-+#
-+CONFIG_FUSION=y
-+CONFIG_FUSION_MAX_SGE=40
-+# CONFIG_FUSION_CTL is not set
-+
-+#
-+# IEEE 1394 (FireWire) support
-+#
-+# CONFIG_IEEE1394 is not set
-+
-+#
-+# I2O device support
-+#
-+# CONFIG_I2O is not set
-+
-+#
-+# Networking support
-+#
-+CONFIG_NET=y
-+
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=y
-+# CONFIG_PACKET_MMAP is not set
-+CONFIG_UNIX=y
-+# CONFIG_NET_KEY is not set
-+CONFIG_INET=y
-+CONFIG_IP_MULTICAST=y
-+# CONFIG_IP_ADVANCED_ROUTER is not set
-+# CONFIG_IP_PNP is not set
-+# CONFIG_NET_IPIP is not set
-+# CONFIG_NET_IPGRE is not set
-+# CONFIG_IP_MROUTE is not set
-+CONFIG_ARPD=y
-+CONFIG_SYN_COOKIES=y
-+# CONFIG_INET_AH is not set
-+# CONFIG_INET_ESP is not set
-+# CONFIG_INET_IPCOMP is not set
-+# CONFIG_INET_TUNNEL is not set
-+CONFIG_IP_TCPDIAG=y
-+# CONFIG_IP_TCPDIAG_IPV6 is not set
-+
-+#
-+# IP: Virtual Server Configuration
-+#
-+# CONFIG_IP_VS is not set
-+# CONFIG_IPV6 is not set
-+CONFIG_NETFILTER=y
-+# CONFIG_NETFILTER_DEBUG is not set
-+CONFIG_BRIDGE_NETFILTER=y
-+
-+#
-+# IP: Netfilter Configuration
-+#
-+# CONFIG_IP_NF_CONNTRACK is not set
-+# CONFIG_IP_NF_CONNTRACK_MARK is not set
-+# CONFIG_IP_NF_QUEUE is not set
-+# CONFIG_IP_NF_IPTABLES is not set
-+CONFIG_IP_NF_ARPTABLES=y
-+# CONFIG_IP_NF_ARPFILTER is not set
-+# CONFIG_IP_NF_ARP_MANGLE is not set
-+
-+#
-+# Bridge: Netfilter Configuration
-+#
-+# CONFIG_BRIDGE_NF_EBTABLES is not set
-+
-+#
-+# SCTP Configuration (EXPERIMENTAL)
-+#
-+# CONFIG_IP_SCTP is not set
-+# CONFIG_ATM is not set
-+CONFIG_BRIDGE=y
-+# CONFIG_VLAN_8021Q is not set
-+# CONFIG_DECNET is not set
-+# CONFIG_LLC2 is not set
-+# CONFIG_IPX is not set
-+# CONFIG_ATALK is not set
-+# CONFIG_X25 is not set
-+# CONFIG_LAPB is not set
-+# CONFIG_NET_DIVERT is not set
-+# CONFIG_ECONET is not set
-+# CONFIG_WAN_ROUTER is not set
-+
-+#
-+# QoS and/or fair queueing
-+#
-+# CONFIG_NET_SCHED is not set
-+# CONFIG_NET_CLS_ROUTE is not set
-+
-+#
-+# Network testing
-+#
-+# CONFIG_NET_PKTGEN is not set
-+CONFIG_NETPOLL=y
-+# CONFIG_NETPOLL_RX is not set
-+# CONFIG_NETPOLL_TRAP is not set
-+CONFIG_NET_POLL_CONTROLLER=y
-+# CONFIG_HAMRADIO is not set
-+# CONFIG_IRDA is not set
-+# CONFIG_BT is not set
-+CONFIG_NETDEVICES=y
-+CONFIG_DUMMY=y
-+# CONFIG_BONDING is not set
-+# CONFIG_EQUALIZER is not set
-+CONFIG_TUN=y
-+
-+#
-+# ARCnet devices
-+#
-+CONFIG_ARCNET=y
-+# CONFIG_ARCNET_1201 is not set
-+# CONFIG_ARCNET_1051 is not set
-+# CONFIG_ARCNET_RAW is not set
-+# CONFIG_ARCNET_CAP is not set
-+# CONFIG_ARCNET_COM90xx is not set
-+# CONFIG_ARCNET_COM90xxIO is not set
-+# CONFIG_ARCNET_RIM_I is not set
-+# CONFIG_ARCNET_COM20020 is not set
-+
-+#
-+# Ethernet (10 or 100Mbit)
-+#
-+CONFIG_NET_ETHERNET=y
-+CONFIG_MII=y
-+# CONFIG_HAPPYMEAL is not set
-+# CONFIG_SUNGEM is not set
-+# CONFIG_NET_VENDOR_3COM is not set
-+
-+#
-+# Tulip family network device support
-+#
-+CONFIG_NET_TULIP=y
-+# CONFIG_DE2104X is not set
-+CONFIG_TULIP=y
-+CONFIG_TULIP_MWI=y
-+CONFIG_TULIP_MMIO=y
-+CONFIG_TULIP_NAPI=y
-+CONFIG_TULIP_NAPI_HW_MITIGATION=y
-+# CONFIG_DE4X5 is not set
-+# CONFIG_WINBOND_840 is not set
-+# CONFIG_DM9102 is not set
-+# CONFIG_HP100 is not set
-+CONFIG_NET_PCI=y
-+# CONFIG_PCNET32 is not set
-+# CONFIG_AMD8111_ETH is not set
-+# CONFIG_ADAPTEC_STARFIRE is not set
-+# CONFIG_B44 is not set
-+# CONFIG_FORCEDETH is not set
-+# CONFIG_DGRS is not set
-+CONFIG_EEPRO100=y
-+CONFIG_E100=y
-+# CONFIG_FEALNX is not set
-+# CONFIG_NATSEMI is not set
-+# CONFIG_NE2K_PCI is not set
-+# CONFIG_8139CP is not set
-+# CONFIG_8139TOO is not set
-+# CONFIG_SIS900 is not set
-+# CONFIG_EPIC100 is not set
-+# CONFIG_SUNDANCE is not set
-+# CONFIG_VIA_RHINE is not set
-+
-+#
-+# Ethernet (1000 Mbit)
-+#
-+# CONFIG_ACENIC is not set
-+# CONFIG_DL2K is not set
-+CONFIG_E1000=y
-+# CONFIG_E1000_NAPI is not set
-+# CONFIG_NS83820 is not set
-+# CONFIG_HAMACHI is not set
-+# CONFIG_YELLOWFIN is not set
-+# CONFIG_R8169 is not set
-+# CONFIG_SK98LIN is not set
-+# CONFIG_VIA_VELOCITY is not set
-+CONFIG_TIGON3=y
-+# CONFIG_BNX2 is not set
-+
-+#
-+# Ethernet (10000 Mbit)
-+#
-+# CONFIG_IXGB is not set
-+# CONFIG_S2IO is not set
-+
-+#
-+# Token Ring devices
-+#
-+# CONFIG_TR is not set
-+
-+#
-+# Wireless LAN (non-hamradio)
-+#
-+# CONFIG_NET_RADIO is not set
-+
-+#
-+# Wan interfaces
-+#
-+# CONFIG_WAN is not set
-+# CONFIG_FDDI is not set
-+# CONFIG_HIPPI is not set
-+# CONFIG_PPP is not set
-+# CONFIG_SLIP is not set
-+# CONFIG_NET_FC is not set
-+# CONFIG_SHAPER is not set
-+CONFIG_NETCONSOLE=y
-+
-+#
-+# ISDN subsystem
-+#
-+CONFIG_ISDN=m
-+
-+#
-+# Old ISDN4Linux
-+#
-+# CONFIG_ISDN_I4L is not set
-+
-+#
-+# CAPI subsystem
-+#
-+# CONFIG_ISDN_CAPI is not set
-+
-+#
-+# Telephony Support
-+#
-+# CONFIG_PHONE is not set
-+
-+#
-+# Input device support
-+#
-+CONFIG_INPUT=y
-+
-+#
-+# Userland interfaces
-+#
-+CONFIG_INPUT_MOUSEDEV=y
-+CONFIG_INPUT_MOUSEDEV_PSAUX=y
-+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-+CONFIG_INPUT_JOYDEV=y
-+# CONFIG_INPUT_TSDEV is not set
-+CONFIG_INPUT_EVDEV=y
-+# CONFIG_INPUT_EVBUG is not set
-+
-+#
-+# Input Device Drivers
-+#
-+CONFIG_INPUT_KEYBOARD=y
-+CONFIG_KEYBOARD_ATKBD=y
-+# CONFIG_KEYBOARD_SUNKBD is not set
-+# CONFIG_KEYBOARD_LKKBD is not set
-+# CONFIG_KEYBOARD_XTKBD is not set
-+# CONFIG_KEYBOARD_NEWTON is not set
-+CONFIG_INPUT_MOUSE=y
-+CONFIG_MOUSE_PS2=y
-+# CONFIG_MOUSE_SERIAL is not set
-+# CONFIG_MOUSE_VSXXXAA is not set
-+# CONFIG_INPUT_JOYSTICK is not set
-+# CONFIG_INPUT_TOUCHSCREEN is not set
-+# CONFIG_INPUT_MISC is not set
-+
-+#
-+# Hardware I/O ports
-+#
-+CONFIG_SERIO=y
-+CONFIG_SERIO_I8042=y
-+# CONFIG_SERIO_SERPORT is not set
-+# CONFIG_SERIO_PCIPS2 is not set
-+CONFIG_SERIO_LIBPS2=y
-+# CONFIG_SERIO_RAW is not set
-+CONFIG_GAMEPORT=y
-+# CONFIG_GAMEPORT_NS558 is not set
-+# CONFIG_GAMEPORT_L4 is not set
-+# CONFIG_GAMEPORT_EMU10K1 is not set
-+# CONFIG_GAMEPORT_VORTEX is not set
-+# CONFIG_GAMEPORT_FM801 is not set
-+# CONFIG_GAMEPORT_CS461X is not set
-+
-+#
-+# Character devices
-+#
-+CONFIG_VT_CONSOLE=y
-+CONFIG_HW_CONSOLE=y
-+CONFIG_SERIAL_NONSTANDARD=y
-+# CONFIG_ROCKETPORT is not set
-+# CONFIG_CYCLADES is not set
-+# CONFIG_MOXA_SMARTIO is not set
-+# CONFIG_ISI is not set
-+# CONFIG_SYNCLINKMP is not set
-+# CONFIG_N_HDLC is not set
-+# CONFIG_SPECIALIX is not set
-+# CONFIG_SX is not set
-+# CONFIG_STALDRV is not set
-+
-+#
-+# Serial drivers
-+#
-+CONFIG_SERIAL_8250=y
-+CONFIG_SERIAL_8250_CONSOLE=y
-+CONFIG_SERIAL_8250_ACPI=y
-+CONFIG_SERIAL_8250_NR_UARTS=6
-+CONFIG_SERIAL_8250_EXTENDED=y
-+CONFIG_SERIAL_8250_SHARE_IRQ=y
-+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
-+# CONFIG_SERIAL_8250_MULTIPORT is not set
-+# CONFIG_SERIAL_8250_RSA is not set
-+
-+#
-+# Non-8250 serial port support
-+#
-+CONFIG_SERIAL_CORE=y
-+CONFIG_SERIAL_CORE_CONSOLE=y
-+# CONFIG_SERIAL_JSM is not set
-+CONFIG_UNIX98_PTYS=y
-+CONFIG_LEGACY_PTYS=y
-+CONFIG_LEGACY_PTY_COUNT=256
-+
-+#
-+# IPMI
-+#
-+# CONFIG_IPMI_HANDLER is not set
-+
-+#
-+# Watchdog Cards
-+#
-+# CONFIG_WATCHDOG is not set
-+# CONFIG_HW_RANDOM is not set
-+CONFIG_EFI_RTC=y
-+# CONFIG_DTLK is not set
-+# CONFIG_R3964 is not set
-+# CONFIG_APPLICOM is not set
-+
-+#
-+# Ftape, the floppy tape device driver
-+#
-+CONFIG_AGP=y
-+CONFIG_AGP_I460=y
-+CONFIG_DRM=y
-+# CONFIG_DRM_TDFX is not set
-+# CONFIG_DRM_R128 is not set
-+# CONFIG_DRM_RADEON is not set
-+# CONFIG_DRM_MGA is not set
-+# CONFIG_DRM_SIS is not set
-+# CONFIG_RAW_DRIVER is not set
-+# CONFIG_HPET is not set
-+# CONFIG_HANGCHECK_TIMER is not set
-+
-+#
-+# TPM devices
-+#
-+# CONFIG_TCG_TPM is not set
-+
-+#
-+# I2C support
-+#
-+CONFIG_I2C=y
-+CONFIG_I2C_CHARDEV=y
-+
-+#
-+# I2C Algorithms
-+#
-+CONFIG_I2C_ALGOBIT=y
-+CONFIG_I2C_ALGOPCF=y
-+# CONFIG_I2C_ALGOPCA is not set
-+
-+#
-+# I2C Hardware Bus support
-+#
-+# CONFIG_I2C_ALI1535 is not set
-+# CONFIG_I2C_ALI1563 is not set
-+# CONFIG_I2C_ALI15X3 is not set
-+# CONFIG_I2C_AMD756 is not set
-+# CONFIG_I2C_AMD8111 is not set
-+# CONFIG_I2C_I801 is not set
-+# CONFIG_I2C_I810 is not set
-+# CONFIG_I2C_PIIX4 is not set
-+# CONFIG_I2C_ISA is not set
-+# CONFIG_I2C_NFORCE2 is not set
-+# CONFIG_I2C_PARPORT_LIGHT is not set
-+# CONFIG_I2C_PROSAVAGE is not set
-+# CONFIG_I2C_SAVAGE4 is not set
-+# CONFIG_SCx200_ACB is not set
-+# CONFIG_I2C_SIS5595 is not set
-+# CONFIG_I2C_SIS630 is not set
-+# CONFIG_I2C_SIS96X is not set
-+# CONFIG_I2C_STUB is not set
-+# CONFIG_I2C_VIA is not set
-+# CONFIG_I2C_VIAPRO is not set
-+# CONFIG_I2C_VOODOO3 is not set
-+# CONFIG_I2C_PCA_ISA is not set
-+
-+#
-+# Hardware Sensors Chip support
-+#
-+# CONFIG_I2C_SENSOR is not set
-+# CONFIG_SENSORS_ADM1021 is not set
-+# CONFIG_SENSORS_ADM1025 is not set
-+# CONFIG_SENSORS_ADM1026 is not set
-+# CONFIG_SENSORS_ADM1031 is not set
-+# CONFIG_SENSORS_ASB100 is not set
-+# CONFIG_SENSORS_DS1621 is not set
-+# CONFIG_SENSORS_FSCHER is not set
-+# CONFIG_SENSORS_FSCPOS is not set
-+# CONFIG_SENSORS_GL518SM is not set
-+# CONFIG_SENSORS_GL520SM is not set
-+# CONFIG_SENSORS_IT87 is not set
-+# CONFIG_SENSORS_LM63 is not set
-+# CONFIG_SENSORS_LM75 is not set
-+# CONFIG_SENSORS_LM77 is not set
-+# CONFIG_SENSORS_LM78 is not set
-+# CONFIG_SENSORS_LM80 is not set
-+# CONFIG_SENSORS_LM83 is not set
-+# CONFIG_SENSORS_LM85 is not set
-+# CONFIG_SENSORS_LM87 is not set
-+# CONFIG_SENSORS_LM90 is not set
-+# CONFIG_SENSORS_LM92 is not set
-+# CONFIG_SENSORS_MAX1619 is not set
-+# CONFIG_SENSORS_PC87360 is not set
-+# CONFIG_SENSORS_SMSC47B397 is not set
-+# CONFIG_SENSORS_SIS5595 is not set
-+# CONFIG_SENSORS_SMSC47M1 is not set
-+# CONFIG_SENSORS_VIA686A is not set
-+# CONFIG_SENSORS_W83781D is not set
-+# CONFIG_SENSORS_W83L785TS is not set
-+# CONFIG_SENSORS_W83627HF is not set
-+
-+#
-+# Other I2C Chip support
-+#
-+# CONFIG_SENSORS_DS1337 is not set
-+# CONFIG_SENSORS_EEPROM is not set
-+# CONFIG_SENSORS_PCF8574 is not set
-+# CONFIG_SENSORS_PCF8591 is not set
-+# CONFIG_SENSORS_RTC8564 is not set
-+# CONFIG_I2C_DEBUG_CORE is not set
-+# CONFIG_I2C_DEBUG_ALGO is not set
-+# CONFIG_I2C_DEBUG_BUS is not set
-+# CONFIG_I2C_DEBUG_CHIP is not set
-+
-+#
-+# Dallas's 1-wire bus
-+#
-+# CONFIG_W1 is not set
-+
-+#
-+# Misc devices
-+#
-+
-+#
-+# Multimedia devices
-+#
-+CONFIG_VIDEO_DEV=y
-+
-+#
-+# Video For Linux
-+#
-+
-+#
-+# Video Adapters
-+#
-+# CONFIG_VIDEO_BT848 is not set
-+# CONFIG_VIDEO_CPIA is not set
-+# CONFIG_VIDEO_SAA5246A is not set
-+# CONFIG_VIDEO_SAA5249 is not set
-+# CONFIG_TUNER_3036 is not set
-+# CONFIG_VIDEO_STRADIS is not set
-+# CONFIG_VIDEO_ZORAN is not set
-+# CONFIG_VIDEO_SAA7134 is not set
-+# CONFIG_VIDEO_MXB is not set
-+# CONFIG_VIDEO_DPC is not set
-+# CONFIG_VIDEO_HEXIUM_ORION is not set
-+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
-+# CONFIG_VIDEO_CX88 is not set
-+# CONFIG_VIDEO_OVCAMCHIP is not set
-+
-+#
-+# Radio Adapters
-+#
-+# CONFIG_RADIO_GEMTEK_PCI is not set
-+# CONFIG_RADIO_MAXIRADIO is not set
-+# CONFIG_RADIO_MAESTRO is not set
-+
-+#
-+# Digital Video Broadcasting Devices
-+#
-+# CONFIG_DVB is not set
-+
-+#
-+# Graphics support
-+#
-+CONFIG_FB=y
-+CONFIG_FB_CFB_FILLRECT=y
-+CONFIG_FB_CFB_COPYAREA=y
-+CONFIG_FB_CFB_IMAGEBLIT=y
-+CONFIG_FB_SOFT_CURSOR=y
-+# CONFIG_FB_MACMODES is not set
-+CONFIG_FB_MODE_HELPERS=y
-+# CONFIG_FB_TILEBLITTING is not set
-+# CONFIG_FB_CIRRUS is not set
-+# CONFIG_FB_PM2 is not set
-+# CONFIG_FB_CYBER2000 is not set
-+# CONFIG_FB_ASILIANT is not set
-+# CONFIG_FB_IMSTT is not set
-+# CONFIG_FB_NVIDIA is not set
-+# CONFIG_FB_RIVA is not set
-+# CONFIG_FB_MATROX is not set
-+# CONFIG_FB_RADEON_OLD is not set
-+CONFIG_FB_RADEON=y
-+CONFIG_FB_RADEON_I2C=y
-+CONFIG_FB_RADEON_DEBUG=y
-+# CONFIG_FB_ATY128 is not set
-+# CONFIG_FB_ATY is not set
-+# CONFIG_FB_SAVAGE is not set
-+# CONFIG_FB_SIS is not set
-+# CONFIG_FB_NEOMAGIC is not set
-+# CONFIG_FB_KYRO is not set
-+# CONFIG_FB_3DFX is not set
-+# CONFIG_FB_VOODOO1 is not set
-+# CONFIG_FB_TRIDENT is not set
-+# CONFIG_FB_S1D13XXX is not set
-+# CONFIG_FB_VIRTUAL is not set
-+
-+#
-+# Console display driver support
-+#
-+CONFIG_VGA_CONSOLE=y
-+CONFIG_DUMMY_CONSOLE=y
-+# CONFIG_FRAMEBUFFER_CONSOLE is not set
-+
-+#
-+# Logo configuration
-+#
-+CONFIG_LOGO=y
-+# CONFIG_LOGO_LINUX_MONO is not set
-+# CONFIG_LOGO_LINUX_VGA16 is not set
-+CONFIG_LOGO_LINUX_CLUT224=y
-+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-+
-+#
-+# Sound
-+#
-+CONFIG_SOUND=y
-+
-+#
-+# Advanced Linux Sound Architecture
-+#
-+CONFIG_SND=y
-+CONFIG_SND_TIMER=y
-+CONFIG_SND_PCM=y
-+CONFIG_SND_HWDEP=y
-+CONFIG_SND_RAWMIDI=y
-+CONFIG_SND_SEQUENCER=y
-+CONFIG_SND_SEQ_DUMMY=y
-+CONFIG_SND_OSSEMUL=y
-+CONFIG_SND_MIXER_OSS=y
-+CONFIG_SND_PCM_OSS=y
-+CONFIG_SND_SEQUENCER_OSS=y
-+# CONFIG_SND_VERBOSE_PRINTK is not set
-+# CONFIG_SND_DEBUG is not set
-+
-+#
-+# Generic devices
-+#
-+CONFIG_SND_MPU401_UART=y
-+CONFIG_SND_OPL3_LIB=y
-+CONFIG_SND_DUMMY=y
-+CONFIG_SND_VIRMIDI=y
-+CONFIG_SND_MTPAV=y
-+CONFIG_SND_SERIAL_U16550=y
-+CONFIG_SND_MPU401=y
-+
-+#
-+# PCI devices
-+#
-+CONFIG_SND_AC97_CODEC=y
-+# CONFIG_SND_ALI5451 is not set
-+CONFIG_SND_ATIIXP=y
-+# CONFIG_SND_ATIIXP_MODEM is not set
-+# CONFIG_SND_AU8810 is not set
-+# CONFIG_SND_AU8820 is not set
-+# CONFIG_SND_AU8830 is not set
-+# CONFIG_SND_AZT3328 is not set
-+# CONFIG_SND_BT87X is not set
-+# CONFIG_SND_CS46XX is not set
-+# CONFIG_SND_CS4281 is not set
-+# CONFIG_SND_EMU10K1 is not set
-+# CONFIG_SND_EMU10K1X is not set
-+# CONFIG_SND_CA0106 is not set
-+# CONFIG_SND_KORG1212 is not set
-+# CONFIG_SND_MIXART is not set
-+# CONFIG_SND_NM256 is not set
-+# CONFIG_SND_RME32 is not set
-+# CONFIG_SND_RME96 is not set
-+# CONFIG_SND_RME9652 is not set
-+# CONFIG_SND_HDSP is not set
-+# CONFIG_SND_TRIDENT is not set
-+# CONFIG_SND_YMFPCI is not set
-+# CONFIG_SND_ALS4000 is not set
-+# CONFIG_SND_CMIPCI is not set
-+# CONFIG_SND_ENS1370 is not set
-+# CONFIG_SND_ENS1371 is not set
-+# CONFIG_SND_ES1938 is not set
-+# CONFIG_SND_ES1968 is not set
-+# CONFIG_SND_MAESTRO3 is not set
-+CONFIG_SND_FM801=y
-+CONFIG_SND_FM801_TEA575X=y
-+# CONFIG_SND_ICE1712 is not set
-+# CONFIG_SND_ICE1724 is not set
-+# CONFIG_SND_INTEL8X0 is not set
-+# CONFIG_SND_INTEL8X0M is not set
-+# CONFIG_SND_SONICVIBES is not set
-+# CONFIG_SND_VIA82XX is not set
-+# CONFIG_SND_VIA82XX_MODEM is not set
-+# CONFIG_SND_VX222 is not set
-+# CONFIG_SND_HDA_INTEL is not set
-+
-+#
-+# USB devices
-+#
-+# CONFIG_SND_USB_AUDIO is not set
-+
-+#
-+# Open Sound System
-+#
-+CONFIG_SOUND_PRIME=y
-+# CONFIG_SOUND_BT878 is not set
-+# CONFIG_SOUND_CMPCI is not set
-+# CONFIG_SOUND_EMU10K1 is not set
-+# CONFIG_SOUND_FUSION is not set
-+# CONFIG_SOUND_CS4281 is not set
-+# CONFIG_SOUND_ES1370 is not set
-+# CONFIG_SOUND_ES1371 is not set
-+# CONFIG_SOUND_ESSSOLO1 is not set
-+# CONFIG_SOUND_MAESTRO is not set
-+# CONFIG_SOUND_MAESTRO3 is not set
-+# CONFIG_SOUND_ICH is not set
-+# CONFIG_SOUND_SONICVIBES is not set
-+# CONFIG_SOUND_TRIDENT is not set
-+# CONFIG_SOUND_MSNDCLAS is not set
-+# CONFIG_SOUND_MSNDPIN is not set
-+# CONFIG_SOUND_VIA82CXXX is not set
-+# CONFIG_SOUND_OSS is not set
-+# CONFIG_SOUND_TVMIXER is not set
-+# CONFIG_SOUND_ALI5455 is not set
-+# CONFIG_SOUND_FORTE is not set
-+# CONFIG_SOUND_RME96XX is not set
-+# CONFIG_SOUND_AD1980 is not set
-+
-+#
-+# USB support
-+#
-+CONFIG_USB_ARCH_HAS_HCD=y
-+CONFIG_USB_ARCH_HAS_OHCI=y
-+CONFIG_USB=y
-+# CONFIG_USB_DEBUG is not set
-+
-+#
-+# Miscellaneous USB options
-+#
-+CONFIG_USB_DEVICEFS=y
-+CONFIG_USB_BANDWIDTH=y
-+# CONFIG_USB_DYNAMIC_MINORS is not set
-+# CONFIG_USB_SUSPEND is not set
-+# CONFIG_USB_OTG is not set
-+
-+#
-+# USB Host Controller Drivers
-+#
-+CONFIG_USB_EHCI_HCD=y
-+# CONFIG_USB_EHCI_SPLIT_ISO is not set
-+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
-+CONFIG_USB_OHCI_HCD=y
-+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
-+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-+CONFIG_USB_UHCI_HCD=y
-+# CONFIG_USB_SL811_HCD is not set
-+
-+#
-+# USB Device Class drivers
-+#
-+CONFIG_USB_AUDIO=y
-+# CONFIG_USB_BLUETOOTH_TTY is not set
-+# CONFIG_USB_MIDI is not set
-+# CONFIG_USB_ACM is not set
-+# CONFIG_USB_PRINTER is not set
-+
-+#
-+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-+#
-+CONFIG_USB_STORAGE=y
-+# CONFIG_USB_STORAGE_DEBUG is not set
-+# CONFIG_USB_STORAGE_DATAFAB is not set
-+# CONFIG_USB_STORAGE_FREECOM is not set
-+# CONFIG_USB_STORAGE_ISD200 is not set
-+# CONFIG_USB_STORAGE_DPCM is not set
-+# CONFIG_USB_STORAGE_USBAT is not set
-+# CONFIG_USB_STORAGE_SDDR09 is not set
-+# CONFIG_USB_STORAGE_SDDR55 is not set
-+# CONFIG_USB_STORAGE_JUMPSHOT is not set
-+
-+#
-+# USB Input Devices
-+#
-+CONFIG_USB_HID=y
-+CONFIG_USB_HIDINPUT=y
-+# CONFIG_HID_FF is not set
-+CONFIG_USB_HIDDEV=y
-+# CONFIG_USB_AIPTEK is not set
-+# CONFIG_USB_WACOM is not set
-+# CONFIG_USB_KBTAB is not set
-+# CONFIG_USB_POWERMATE is not set
-+# CONFIG_USB_MTOUCH is not set
-+# CONFIG_USB_EGALAX is not set
-+# CONFIG_USB_XPAD is not set
-+# CONFIG_USB_ATI_REMOTE is not set
-+
-+#
-+# USB Imaging devices
-+#
-+# CONFIG_USB_MDC800 is not set
-+# CONFIG_USB_MICROTEK is not set
-+
-+#
-+# USB Multimedia devices
-+#
-+# CONFIG_USB_DABUSB is not set
-+# CONFIG_USB_VICAM is not set
-+# CONFIG_USB_DSBR is not set
-+# CONFIG_USB_IBMCAM is not set
-+# CONFIG_USB_KONICAWC is not set
-+# CONFIG_USB_OV511 is not set
-+# CONFIG_USB_SE401 is not set
-+# CONFIG_USB_SN9C102 is not set
-+# CONFIG_USB_STV680 is not set
-+# CONFIG_USB_PWC is not set
-+
-+#
-+# USB Network Adapters
-+#
-+# CONFIG_USB_CATC is not set
-+# CONFIG_USB_KAWETH is not set
-+# CONFIG_USB_PEGASUS is not set
-+# CONFIG_USB_RTL8150 is not set
-+# CONFIG_USB_USBNET is not set
-+CONFIG_USB_MON=y
-+
-+#
-+# USB port drivers
-+#
-+
-+#
-+# USB Serial Converter support
-+#
-+# CONFIG_USB_SERIAL is not set
-+
-+#
-+# USB Miscellaneous drivers
-+#
-+# CONFIG_USB_EMI62 is not set
-+# CONFIG_USB_EMI26 is not set
-+# CONFIG_USB_AUERSWALD is not set
-+# CONFIG_USB_RIO500 is not set
-+# CONFIG_USB_LEGOTOWER is not set
-+# CONFIG_USB_LCD is not set
-+# CONFIG_USB_LED is not set
-+# CONFIG_USB_CYTHERM is not set
-+# CONFIG_USB_PHIDGETKIT is not set
-+# CONFIG_USB_PHIDGETSERVO is not set
-+# CONFIG_USB_IDMOUSE is not set
-+# CONFIG_USB_SISUSBVGA is not set
-+# CONFIG_USB_TEST is not set
-+
-+#
-+# USB ATM/DSL drivers
-+#
-+
-+#
-+# USB Gadget Support
-+#
-+# CONFIG_USB_GADGET is not set
-+
-+#
-+# MMC/SD Card support
-+#
-+# CONFIG_MMC is not set
-+
-+#
-+# InfiniBand support
-+#
-+# CONFIG_INFINIBAND is not set
-+
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+CONFIG_EXT2_FS_XATTR=y
-+CONFIG_EXT2_FS_POSIX_ACL=y
-+CONFIG_EXT2_FS_SECURITY=y
-+CONFIG_EXT3_FS=y
-+CONFIG_EXT3_FS_XATTR=y
-+CONFIG_EXT3_FS_POSIX_ACL=y
-+CONFIG_EXT3_FS_SECURITY=y
-+CONFIG_JBD=y
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
-+CONFIG_REISERFS_FS=y
-+# CONFIG_REISERFS_CHECK is not set
-+# CONFIG_REISERFS_PROC_INFO is not set
-+CONFIG_REISERFS_FS_XATTR=y
-+CONFIG_REISERFS_FS_POSIX_ACL=y
-+CONFIG_REISERFS_FS_SECURITY=y
-+# CONFIG_JFS_FS is not set
-+CONFIG_FS_POSIX_ACL=y
-+
-+#
-+# XFS support
-+#
-+CONFIG_XFS_FS=y
-+CONFIG_XFS_EXPORT=y
-+# CONFIG_XFS_RT is not set
-+# CONFIG_XFS_QUOTA is not set
-+# CONFIG_XFS_SECURITY is not set
-+# CONFIG_XFS_POSIX_ACL is not set
-+# CONFIG_MINIX_FS is not set
-+# CONFIG_ROMFS_FS is not set
-+# CONFIG_QUOTA is not set
-+CONFIG_DNOTIFY=y
-+CONFIG_AUTOFS_FS=y
-+CONFIG_AUTOFS4_FS=y
-+
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=y
-+CONFIG_JOLIET=y
-+# CONFIG_ZISOFS is not set
-+CONFIG_UDF_FS=y
-+CONFIG_UDF_NLS=y
-+
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=y
-+CONFIG_MSDOS_FS=y
-+CONFIG_VFAT_FS=y
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-+# CONFIG_NTFS_FS is not set
-+
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
-+CONFIG_SYSFS=y
-+# CONFIG_DEVFS_FS is not set
-+# CONFIG_DEVPTS_FS_XATTR is not set
-+CONFIG_TMPFS=y
-+CONFIG_TMPFS_XATTR=y
-+CONFIG_TMPFS_SECURITY=y
-+CONFIG_HUGETLBFS=y
-+CONFIG_HUGETLB_PAGE=y
-+CONFIG_RAMFS=y
-+
-+#
-+# Miscellaneous filesystems
-+#
-+# CONFIG_ADFS_FS is not set
-+# CONFIG_AFFS_FS is not set
-+# CONFIG_HFS_FS is not set
-+# CONFIG_HFSPLUS_FS is not set
-+# CONFIG_BEFS_FS is not set
-+# CONFIG_BFS_FS is not set
-+# CONFIG_EFS_FS is not set
-+# CONFIG_CRAMFS is not set
-+# CONFIG_VXFS_FS is not set
-+# CONFIG_HPFS_FS is not set
-+# CONFIG_QNX4FS_FS is not set
-+# CONFIG_SYSV_FS is not set
-+# CONFIG_UFS_FS is not set
-+
-+#
-+# Network File Systems
-+#
-+CONFIG_NFS_FS=y
-+CONFIG_NFS_V3=y
-+CONFIG_NFS_V4=y
-+CONFIG_NFS_DIRECTIO=y
-+CONFIG_NFSD=y
-+CONFIG_NFSD_V3=y
-+CONFIG_NFSD_V4=y
-+CONFIG_NFSD_TCP=y
-+CONFIG_LOCKD=y
-+CONFIG_LOCKD_V4=y
-+CONFIG_EXPORTFS=y
-+CONFIG_SUNRPC=y
-+CONFIG_SUNRPC_GSS=y
-+CONFIG_RPCSEC_GSS_KRB5=y
-+# CONFIG_RPCSEC_GSS_SPKM3 is not set
-+CONFIG_SMB_FS=y
-+CONFIG_SMB_NLS_DEFAULT=y
-+CONFIG_SMB_NLS_REMOTE="cp437"
-+CONFIG_CIFS=y
-+# CONFIG_CIFS_STATS is not set
-+# CONFIG_CIFS_XATTR is not set
-+# CONFIG_CIFS_EXPERIMENTAL is not set
-+# CONFIG_NCP_FS is not set
-+# CONFIG_CODA_FS is not set
-+# CONFIG_AFS_FS is not set
-+
-+#
-+# Partition Types
-+#
-+CONFIG_PARTITION_ADVANCED=y
-+# CONFIG_ACORN_PARTITION is not set
-+# CONFIG_OSF_PARTITION is not set
-+# CONFIG_AMIGA_PARTITION is not set
-+# CONFIG_ATARI_PARTITION is not set
-+# CONFIG_MAC_PARTITION is not set
-+CONFIG_MSDOS_PARTITION=y
-+# CONFIG_BSD_DISKLABEL is not set
-+# CONFIG_MINIX_SUBPARTITION is not set
-+# CONFIG_SOLARIS_X86_PARTITION is not set
-+# CONFIG_UNIXWARE_DISKLABEL is not set
-+# CONFIG_LDM_PARTITION is not set
-+CONFIG_SGI_PARTITION=y
-+# CONFIG_ULTRIX_PARTITION is not set
-+# CONFIG_SUN_PARTITION is not set
-+CONFIG_EFI_PARTITION=y
-+
-+#
-+# Native Language Support
-+#
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="iso8859-1"
-+CONFIG_NLS_CODEPAGE_437=y
-+CONFIG_NLS_CODEPAGE_737=y
-+CONFIG_NLS_CODEPAGE_775=y
-+CONFIG_NLS_CODEPAGE_850=y
-+CONFIG_NLS_CODEPAGE_852=y
-+CONFIG_NLS_CODEPAGE_855=y
-+CONFIG_NLS_CODEPAGE_857=y
-+CONFIG_NLS_CODEPAGE_860=y
-+CONFIG_NLS_CODEPAGE_861=y
-+CONFIG_NLS_CODEPAGE_862=y
-+CONFIG_NLS_CODEPAGE_863=y
-+CONFIG_NLS_CODEPAGE_864=y
-+CONFIG_NLS_CODEPAGE_865=y
-+CONFIG_NLS_CODEPAGE_866=y
-+CONFIG_NLS_CODEPAGE_869=y
-+CONFIG_NLS_CODEPAGE_936=y
-+CONFIG_NLS_CODEPAGE_950=y
-+CONFIG_NLS_CODEPAGE_932=y
-+CONFIG_NLS_CODEPAGE_949=y
-+CONFIG_NLS_CODEPAGE_874=y
-+CONFIG_NLS_ISO8859_8=y
-+# CONFIG_NLS_CODEPAGE_1250 is not set
-+CONFIG_NLS_CODEPAGE_1251=y
-+# CONFIG_NLS_ASCII is not set
-+CONFIG_NLS_ISO8859_1=y
-+CONFIG_NLS_ISO8859_2=y
-+CONFIG_NLS_ISO8859_3=y
-+CONFIG_NLS_ISO8859_4=y
-+CONFIG_NLS_ISO8859_5=y
-+CONFIG_NLS_ISO8859_6=y
-+CONFIG_NLS_ISO8859_7=y
-+CONFIG_NLS_ISO8859_9=y
-+CONFIG_NLS_ISO8859_13=y
-+CONFIG_NLS_ISO8859_14=y
-+CONFIG_NLS_ISO8859_15=y
-+CONFIG_NLS_KOI8_R=y
-+CONFIG_NLS_KOI8_U=y
-+CONFIG_NLS_UTF8=y
-+
-+#
-+# Library routines
-+#
-+# CONFIG_CRC_CCITT is not set
-+CONFIG_CRC32=y
-+# CONFIG_LIBCRC32C is not set
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+
-+#
-+# Profiling support
-+#
-+# CONFIG_PROFILING is not set
-+
-+#
-+# Kernel hacking
-+#
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_DEBUG_KERNEL=y
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_LOG_BUF_SHIFT=20
-+# CONFIG_SCHEDSTATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+# CONFIG_DEBUG_INFO is not set
-+# CONFIG_DEBUG_FS is not set
-+CONFIG_IA64_GRANULE_16MB=y
-+# CONFIG_IA64_GRANULE_64MB is not set
-+CONFIG_IA64_PRINT_HAZARDS=y
-+# CONFIG_DISABLE_VHPT is not set
-+# CONFIG_IA64_DEBUG_CMPXCHG is not set
-+# CONFIG_IA64_DEBUG_IRQ is not set
-+
-+#
-+# Security options
-+#
-+# CONFIG_KEYS is not set
-+# CONFIG_SECURITY is not set
-+
-+#
-+# Cryptographic options
-+#
-+CONFIG_CRYPTO=y
-+# CONFIG_CRYPTO_HMAC is not set
-+# CONFIG_CRYPTO_NULL is not set
-+# CONFIG_CRYPTO_MD4 is not set
-+CONFIG_CRYPTO_MD5=y
-+# CONFIG_CRYPTO_SHA1 is not set
-+# CONFIG_CRYPTO_SHA256 is not set
-+# CONFIG_CRYPTO_SHA512 is not set
-+# CONFIG_CRYPTO_WP512 is not set
-+# CONFIG_CRYPTO_TGR192 is not set
-+CONFIG_CRYPTO_DES=y
-+# CONFIG_CRYPTO_BLOWFISH is not set
-+# CONFIG_CRYPTO_TWOFISH is not set
-+# CONFIG_CRYPTO_SERPENT is not set
-+# CONFIG_CRYPTO_AES is not set
-+# CONFIG_CRYPTO_CAST5 is not set
-+# CONFIG_CRYPTO_CAST6 is not set
-+# CONFIG_CRYPTO_TEA is not set
-+# CONFIG_CRYPTO_ARC4 is not set
-+# CONFIG_CRYPTO_KHAZAD is not set
-+# CONFIG_CRYPTO_ANUBIS is not set
-+# CONFIG_CRYPTO_DEFLATE is not set
-+# CONFIG_CRYPTO_MICHAEL_MIC is not set
-+# CONFIG_CRYPTO_CRC32C is not set
-+# CONFIG_CRYPTO_TEST is not set
-+
-+#
-+# Hardware crypto devices
-+#
-diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_x86_32 linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_x86_32
---- pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_x86_32	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_x86_32	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1272 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.12.6-xen0
-+# Mon Nov  7 17:22:05 2005
-+#
-+CONFIG_XEN=y
-+CONFIG_ARCH_XEN=y
-+CONFIG_NO_IDLE_HZ=y
-+
-+#
-+# XEN
-+#
-+CONFIG_XEN_PRIVILEGED_GUEST=y
-+CONFIG_XEN_PHYSDEV_ACCESS=y
-+CONFIG_XEN_BLKDEV_BACKEND=y
-+# CONFIG_XEN_BLKDEV_TAP_BE is not set
-+CONFIG_XEN_NETDEV_BACKEND=y
-+# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
-+# CONFIG_XEN_TPMDEV_FRONTEND is not set
-+# CONFIG_XEN_TPMDEV_BACKEND is not set
-+CONFIG_XEN_BLKDEV_FRONTEND=y
-+CONFIG_XEN_NETDEV_FRONTEND=y
-+# CONFIG_XEN_BLKDEV_TAP is not set
-+# CONFIG_XEN_SHADOW_MODE is not set
-+CONFIG_XEN_SCRUB_PAGES=y
-+CONFIG_XEN_X86=y
-+# CONFIG_XEN_X86_64 is not set
-+CONFIG_HAVE_ARCH_ALLOC_SKB=y
-+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-+
-+#
-+# Code maturity level options
-+#
-+CONFIG_EXPERIMENTAL=y
-+# CONFIG_CLEAN_COMPILE is not set
-+CONFIG_BROKEN=y
-+CONFIG_BROKEN_ON_SMP=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+
-+#
-+# General setup
-+#
-+CONFIG_LOCALVERSION=""
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+# CONFIG_POSIX_MQUEUE is not set
-+# CONFIG_BSD_PROCESS_ACCT is not set
-+CONFIG_SYSCTL=y
-+# CONFIG_AUDIT is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_KOBJECT_UEVENT=y
-+# CONFIG_IKCONFIG is not set
-+# CONFIG_EMBEDDED is not set
-+CONFIG_KALLSYMS=y
-+# CONFIG_KALLSYMS_ALL is not set
-+# CONFIG_KALLSYMS_EXTRA_PASS is not set
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_EPOLL=y
-+CONFIG_SHMEM=y
-+CONFIG_CC_ALIGN_FUNCTIONS=0
-+CONFIG_CC_ALIGN_LABELS=0
-+CONFIG_CC_ALIGN_LOOPS=0
-+CONFIG_CC_ALIGN_JUMPS=0
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+
-+#
-+# Loadable module support
-+#
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+# CONFIG_MODULE_FORCE_UNLOAD is not set
-+CONFIG_OBSOLETE_MODPARM=y
-+# CONFIG_MODVERSIONS is not set
-+# CONFIG_MODULE_SRCVERSION_ALL is not set
-+CONFIG_KMOD=y
-+
-+#
-+# X86 Processor Configuration
-+#
-+CONFIG_XENARCH="i386"
-+CONFIG_X86=y
-+CONFIG_MMU=y
-+CONFIG_UID16=y
-+CONFIG_GENERIC_ISA_DMA=y
-+CONFIG_GENERIC_IOMAP=y
-+# CONFIG_M386 is not set
-+# CONFIG_M486 is not set
-+# CONFIG_M586 is not set
-+# CONFIG_M586TSC is not set
-+# CONFIG_M586MMX is not set
-+CONFIG_M686=y
-+# CONFIG_MPENTIUMII is not set
-+# CONFIG_MPENTIUMIII is not set
-+# CONFIG_MPENTIUMM is not set
-+# CONFIG_MPENTIUM4 is not set
-+# CONFIG_MK6 is not set
-+# CONFIG_MK7 is not set
-+# CONFIG_MK8 is not set
-+# CONFIG_MCRUSOE is not set
-+# CONFIG_MEFFICEON is not set
-+# CONFIG_MWINCHIPC6 is not set
-+# CONFIG_MWINCHIP2 is not set
-+# CONFIG_MWINCHIP3D is not set
-+# CONFIG_MGEODEGX1 is not set
-+# CONFIG_MCYRIXIII is not set
-+# CONFIG_MVIAC3_2 is not set
-+# CONFIG_X86_GENERIC is not set
-+CONFIG_X86_CMPXCHG=y
-+CONFIG_X86_XADD=y
-+CONFIG_X86_L1_CACHE_SHIFT=5
-+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_X86_PPRO_FENCE=y
-+CONFIG_X86_WP_WORKS_OK=y
-+CONFIG_X86_INVLPG=y
-+CONFIG_X86_BSWAP=y
-+CONFIG_X86_POPAD_OK=y
-+CONFIG_X86_GOOD_APIC=y
-+CONFIG_X86_USE_PPRO_CHECKSUM=y
-+# CONFIG_HPET_TIMER is not set
-+# CONFIG_HPET_EMULATE_RTC is not set
-+# CONFIG_SMP is not set
-+# CONFIG_X86_REBOOTFIXUPS is not set
-+CONFIG_MICROCODE=y
-+CONFIG_X86_CPUID=y
-+CONFIG_SWIOTLB=y
-+
-+#
-+# Firmware Drivers
-+#
-+# CONFIG_EDD is not set
-+# CONFIG_NOHIGHMEM is not set
-+CONFIG_HIGHMEM4G=y
-+# CONFIG_HIGHMEM64G is not set
-+CONFIG_HIGHMEM=y
-+CONFIG_MTRR=y
-+# CONFIG_REGPARM is not set
-+CONFIG_X86_LOCAL_APIC=y
-+CONFIG_X86_IO_APIC=y
-+
-+#
-+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
-+#
-+CONFIG_X86_UP_APIC=y
-+CONFIG_X86_UP_IOAPIC=y
-+CONFIG_PCI=y
-+# CONFIG_PCI_GOMMCONFIG is not set
-+# CONFIG_PCI_GODIRECT is not set
-+CONFIG_PCI_GOANY=y
-+CONFIG_PCI_DIRECT=y
-+CONFIG_PCI_MMCONFIG=y
-+# CONFIG_PCIEPORTBUS is not set
-+# CONFIG_PCI_MSI is not set
-+CONFIG_PCI_LEGACY_PROC=y
-+# CONFIG_PCI_NAMES is not set
-+# CONFIG_PCI_DEBUG is not set
-+CONFIG_ISA_DMA_API=y
-+CONFIG_ISA=y
-+# CONFIG_EISA is not set
-+# CONFIG_MCA is not set
-+# CONFIG_SCx200 is not set
-+
-+#
-+# PCCARD (PCMCIA/CardBus) support
-+#
-+# CONFIG_PCCARD is not set
-+
-+#
-+# PCI Hotplug Support
-+#
-+# CONFIG_HOTPLUG_PCI is not set
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+CONFIG_X86_BIOS_REBOOT=y
-+CONFIG_PC=y
-+CONFIG_SECCOMP=y
-+CONFIG_EARLY_PRINTK=y
-+
-+#
-+# Executable file formats
-+#
-+CONFIG_BINFMT_ELF=y
-+# CONFIG_BINFMT_AOUT is not set
-+# CONFIG_BINFMT_MISC is not set
-+
-+#
-+# Device Drivers
-+#
-+
-+#
-+# Generic Driver Options
-+#
-+# CONFIG_STANDALONE is not set
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+# CONFIG_FW_LOADER is not set
-+# CONFIG_DEBUG_DRIVER is not set
-+
-+#
-+# Memory Technology Devices (MTD)
-+#
-+# CONFIG_MTD is not set
-+
-+#
-+# Parallel port support
-+#
-+# CONFIG_PARPORT is not set
-+
-+#
-+# Plug and Play support
-+#
-+# CONFIG_PNP is not set
-+
-+#
-+# Block devices
-+#
-+CONFIG_BLK_DEV_FD=y
-+# CONFIG_BLK_DEV_XD is not set
-+# CONFIG_BLK_CPQ_DA is not set
-+CONFIG_BLK_CPQ_CISS_DA=y
-+# CONFIG_CISS_SCSI_TAPE is not set
-+# CONFIG_BLK_DEV_DAC960 is not set
-+# CONFIG_BLK_DEV_UMEM is not set
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=y
-+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-+# CONFIG_BLK_DEV_NBD is not set
-+# CONFIG_BLK_DEV_SX8 is not set
-+# CONFIG_BLK_DEV_UB is not set
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=4096
-+CONFIG_BLK_DEV_INITRD=y
-+CONFIG_INITRAMFS_SOURCE=""
-+# CONFIG_LBD is not set
-+# CONFIG_CDROM_PKTCDVD is not set
-+
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+# CONFIG_ATA_OVER_ETH is not set
-+
-+#
-+# ATA/ATAPI/MFM/RLL support
-+#
-+CONFIG_IDE=y
-+CONFIG_BLK_DEV_IDE=y
-+
-+#
-+# Please see Documentation/ide.txt for help/info on IDE drives
-+#
-+# CONFIG_BLK_DEV_IDE_SATA is not set
-+# CONFIG_BLK_DEV_HD_IDE is not set
-+CONFIG_BLK_DEV_IDEDISK=y
-+# CONFIG_IDEDISK_MULTI_MODE is not set
-+CONFIG_BLK_DEV_IDECD=y
-+# CONFIG_BLK_DEV_IDETAPE is not set
-+# CONFIG_BLK_DEV_IDEFLOPPY is not set
-+# CONFIG_BLK_DEV_IDESCSI is not set
-+# CONFIG_IDE_TASK_IOCTL is not set
-+
-+#
-+# IDE chipset support/bugfixes
-+#
-+CONFIG_IDE_GENERIC=y
-+# CONFIG_BLK_DEV_CMD640 is not set
-+CONFIG_BLK_DEV_IDEPCI=y
-+# CONFIG_IDEPCI_SHARE_IRQ is not set
-+# CONFIG_BLK_DEV_OFFBOARD is not set
-+CONFIG_BLK_DEV_GENERIC=y
-+# CONFIG_BLK_DEV_OPTI621 is not set
-+# CONFIG_BLK_DEV_RZ1000 is not set
-+CONFIG_BLK_DEV_IDEDMA_PCI=y
-+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-+CONFIG_IDEDMA_PCI_AUTO=y
-+# CONFIG_IDEDMA_ONLYDISK is not set
-+# CONFIG_BLK_DEV_AEC62XX is not set
-+# CONFIG_BLK_DEV_ALI15X3 is not set
-+# CONFIG_BLK_DEV_AMD74XX is not set
-+# CONFIG_BLK_DEV_ATIIXP is not set
-+# CONFIG_BLK_DEV_CMD64X is not set
-+# CONFIG_BLK_DEV_TRIFLEX is not set
-+# CONFIG_BLK_DEV_CY82C693 is not set
-+# CONFIG_BLK_DEV_CS5520 is not set
-+# CONFIG_BLK_DEV_CS5530 is not set
-+# CONFIG_BLK_DEV_HPT34X is not set
-+# CONFIG_BLK_DEV_HPT366 is not set
-+# CONFIG_BLK_DEV_SC1200 is not set
-+CONFIG_BLK_DEV_PIIX=y
-+# CONFIG_BLK_DEV_NS87415 is not set
-+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
-+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
-+CONFIG_BLK_DEV_SVWKS=y
-+# CONFIG_BLK_DEV_SIIMAGE is not set
-+# CONFIG_BLK_DEV_SIS5513 is not set
-+# CONFIG_BLK_DEV_SLC90E66 is not set
-+# CONFIG_BLK_DEV_TRM290 is not set
-+# CONFIG_BLK_DEV_VIA82CXXX is not set
-+# CONFIG_IDE_ARM is not set
-+# CONFIG_IDE_CHIPSETS is not set
-+CONFIG_BLK_DEV_IDEDMA=y
-+# CONFIG_IDEDMA_IVB is not set
-+CONFIG_IDEDMA_AUTO=y
-+# CONFIG_BLK_DEV_HD is not set
-+
-+#
-+# SCSI device support
-+#
-+CONFIG_SCSI=y
-+CONFIG_SCSI_PROC_FS=y
-+
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=y
-+# CONFIG_CHR_DEV_ST is not set
-+# CONFIG_CHR_DEV_OSST is not set
-+# CONFIG_BLK_DEV_SR is not set
-+# CONFIG_CHR_DEV_SG is not set
-+
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+# CONFIG_SCSI_MULTI_LUN is not set
-+# CONFIG_SCSI_CONSTANTS is not set
-+# CONFIG_SCSI_LOGGING is not set
-+
-+#
-+# SCSI Transport Attributes
-+#
-+CONFIG_SCSI_SPI_ATTRS=y
-+# CONFIG_SCSI_FC_ATTRS is not set
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+
-+#
-+# SCSI low-level drivers
-+#
-+CONFIG_BLK_DEV_3W_XXXX_RAID=y
-+# CONFIG_SCSI_3W_9XXX is not set
-+# CONFIG_SCSI_7000FASST is not set
-+# CONFIG_SCSI_ACARD is not set
-+# CONFIG_SCSI_AHA152X is not set
-+# CONFIG_SCSI_AHA1542 is not set
-+CONFIG_SCSI_AACRAID=y
-+CONFIG_SCSI_AIC7XXX=y
-+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
-+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-+CONFIG_AIC7XXX_DEBUG_ENABLE=y
-+CONFIG_AIC7XXX_DEBUG_MASK=0
-+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
-+# CONFIG_SCSI_AIC7XXX_OLD is not set
-+CONFIG_SCSI_AIC79XX=y
-+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-+CONFIG_AIC79XX_RESET_DELAY_MS=15000
-+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
-+CONFIG_AIC79XX_DEBUG_ENABLE=y
-+CONFIG_AIC79XX_DEBUG_MASK=0
-+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
-+# CONFIG_SCSI_DPT_I2O is not set
-+# CONFIG_SCSI_ADVANSYS is not set
-+# CONFIG_SCSI_IN2000 is not set
-+CONFIG_MEGARAID_NEWGEN=y
-+# CONFIG_MEGARAID_MM is not set
-+CONFIG_SCSI_SATA=y
-+# CONFIG_SCSI_SATA_AHCI is not set
-+# CONFIG_SCSI_SATA_SVW is not set
-+CONFIG_SCSI_ATA_PIIX=y
-+# CONFIG_SCSI_SATA_NV is not set
-+CONFIG_SCSI_SATA_PROMISE=y
-+# CONFIG_SCSI_SATA_QSTOR is not set
-+CONFIG_SCSI_SATA_SX4=y
-+CONFIG_SCSI_SATA_SIL=y
-+# CONFIG_SCSI_SATA_SIS is not set
-+# CONFIG_SCSI_SATA_ULI is not set
-+# CONFIG_SCSI_SATA_VIA is not set
-+# CONFIG_SCSI_SATA_VITESSE is not set
-+CONFIG_SCSI_BUSLOGIC=y
-+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-+# CONFIG_SCSI_CPQFCTS is not set
-+# CONFIG_SCSI_DMX3191D is not set
-+# CONFIG_SCSI_DTC3280 is not set
-+# CONFIG_SCSI_EATA is not set
-+# CONFIG_SCSI_EATA_PIO is not set
-+# CONFIG_SCSI_FUTURE_DOMAIN is not set
-+# CONFIG_SCSI_GDTH is not set
-+# CONFIG_SCSI_GENERIC_NCR5380 is not set
-+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
-+# CONFIG_SCSI_IPS is not set
-+# CONFIG_SCSI_INITIO is not set
-+# CONFIG_SCSI_INIA100 is not set
-+# CONFIG_SCSI_NCR53C406A is not set
-+# CONFIG_SCSI_SYM53C8XX_2 is not set
-+# CONFIG_SCSI_IPR is not set
-+# CONFIG_SCSI_PAS16 is not set
-+# CONFIG_SCSI_PCI2000 is not set
-+# CONFIG_SCSI_PCI2220I is not set
-+# CONFIG_SCSI_PSI240I is not set
-+# CONFIG_SCSI_QLOGIC_FAS is not set
-+# CONFIG_SCSI_QLOGIC_ISP is not set
-+# CONFIG_SCSI_QLOGIC_FC is not set
-+# CONFIG_SCSI_QLOGIC_1280 is not set
-+CONFIG_SCSI_QLA2XXX=y
-+# CONFIG_SCSI_QLA21XX is not set
-+# CONFIG_SCSI_QLA22XX is not set
-+# CONFIG_SCSI_QLA2300 is not set
-+# CONFIG_SCSI_QLA2322 is not set
-+# CONFIG_SCSI_QLA6312 is not set
-+# CONFIG_SCSI_LPFC is not set
-+# CONFIG_SCSI_SEAGATE is not set
-+# CONFIG_SCSI_SYM53C416 is not set
-+# CONFIG_SCSI_DC395x is not set
-+# CONFIG_SCSI_DC390T is not set
-+# CONFIG_SCSI_T128 is not set
-+# CONFIG_SCSI_U14_34F is not set
-+# CONFIG_SCSI_ULTRASTOR is not set
-+# CONFIG_SCSI_NSP32 is not set
-+# CONFIG_SCSI_DEBUG is not set
-+
-+#
-+# Old CD-ROM drivers (not SCSI, not IDE)
-+#
-+# CONFIG_CD_NO_IDESCSI is not set
-+
-+#
-+# Multi-device support (RAID and LVM)
-+#
-+CONFIG_MD=y
-+CONFIG_BLK_DEV_MD=y
-+# CONFIG_MD_LINEAR is not set
-+CONFIG_MD_RAID0=y
-+CONFIG_MD_RAID1=y
-+# CONFIG_MD_RAID10 is not set
-+CONFIG_MD_RAID5=y
-+# CONFIG_MD_RAID6 is not set
-+# CONFIG_MD_MULTIPATH is not set
-+# CONFIG_MD_FAULTY is not set
-+CONFIG_BLK_DEV_DM=y
-+# CONFIG_DM_CRYPT is not set
-+CONFIG_DM_SNAPSHOT=y
-+CONFIG_DM_MIRROR=y
-+# CONFIG_DM_ZERO is not set
-+# CONFIG_DM_MULTIPATH is not set
-+
-+#
-+# Fusion MPT device support
-+#
-+CONFIG_FUSION=y
-+CONFIG_FUSION_MAX_SGE=40
-+# CONFIG_FUSION_CTL is not set
-+
-+#
-+# IEEE 1394 (FireWire) support
-+#
-+# CONFIG_IEEE1394 is not set
-+
-+#
-+# I2O device support
-+#
-+# CONFIG_I2O is not set
-+
-+#
-+# Networking support
-+#
-+CONFIG_NET=y
-+
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=y
-+# CONFIG_PACKET_MMAP is not set
-+CONFIG_UNIX=y
-+# CONFIG_NET_KEY is not set
-+CONFIG_INET=y
-+# CONFIG_IP_MULTICAST is not set
-+# CONFIG_IP_ADVANCED_ROUTER is not set
-+CONFIG_IP_PNP=y
-+CONFIG_IP_PNP_DHCP=y
-+# CONFIG_IP_PNP_BOOTP is not set
-+# CONFIG_IP_PNP_RARP is not set
-+# CONFIG_NET_IPIP is not set
-+# CONFIG_NET_IPGRE is not set
-+# CONFIG_ARPD is not set
-+# CONFIG_SYN_COOKIES is not set
-+# CONFIG_INET_AH is not set
-+# CONFIG_INET_ESP is not set
-+# CONFIG_INET_IPCOMP is not set
-+# CONFIG_INET_TUNNEL is not set
-+CONFIG_IP_TCPDIAG=y
-+# CONFIG_IP_TCPDIAG_IPV6 is not set
-+
-+#
-+# IP: Virtual Server Configuration
-+#
-+# CONFIG_IP_VS is not set
-+# CONFIG_IPV6 is not set
-+CONFIG_NETFILTER=y
-+# CONFIG_NETFILTER_DEBUG is not set
-+CONFIG_BRIDGE_NETFILTER=y
-+
-+#
-+# IP: Netfilter Configuration
-+#
-+CONFIG_IP_NF_CONNTRACK=m
-+CONFIG_IP_NF_CT_ACCT=y
-+# CONFIG_IP_NF_CONNTRACK_MARK is not set
-+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
-+CONFIG_IP_NF_FTP=m
-+# CONFIG_IP_NF_IRC is not set
-+# CONFIG_IP_NF_TFTP is not set
-+# CONFIG_IP_NF_AMANDA is not set
-+# CONFIG_IP_NF_QUEUE is not set
-+CONFIG_IP_NF_IPTABLES=m
-+# CONFIG_IP_NF_MATCH_LIMIT is not set
-+CONFIG_IP_NF_MATCH_IPRANGE=m
-+# CONFIG_IP_NF_MATCH_MAC is not set
-+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
-+# CONFIG_IP_NF_MATCH_MARK is not set
-+# CONFIG_IP_NF_MATCH_MULTIPORT is not set
-+# CONFIG_IP_NF_MATCH_TOS is not set
-+# CONFIG_IP_NF_MATCH_RECENT is not set
-+# CONFIG_IP_NF_MATCH_ECN is not set
-+# CONFIG_IP_NF_MATCH_DSCP is not set
-+# CONFIG_IP_NF_MATCH_AH_ESP is not set
-+# CONFIG_IP_NF_MATCH_LENGTH is not set
-+# CONFIG_IP_NF_MATCH_TTL is not set
-+# CONFIG_IP_NF_MATCH_TCPMSS is not set
-+# CONFIG_IP_NF_MATCH_HELPER is not set
-+# CONFIG_IP_NF_MATCH_STATE is not set
-+# CONFIG_IP_NF_MATCH_CONNTRACK is not set
-+# CONFIG_IP_NF_MATCH_OWNER is not set
-+CONFIG_IP_NF_MATCH_PHYSDEV=m
-+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
-+# CONFIG_IP_NF_MATCH_REALM is not set
-+# CONFIG_IP_NF_MATCH_SCTP is not set
-+# CONFIG_IP_NF_MATCH_COMMENT is not set
-+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
-+CONFIG_IP_NF_FILTER=m
-+CONFIG_IP_NF_TARGET_REJECT=m
-+# CONFIG_IP_NF_TARGET_LOG is not set
-+# CONFIG_IP_NF_TARGET_ULOG is not set
-+# CONFIG_IP_NF_TARGET_TCPMSS is not set
-+CONFIG_IP_NF_NAT=m
-+CONFIG_IP_NF_NAT_NEEDED=y
-+CONFIG_IP_NF_TARGET_MASQUERADE=m
-+# CONFIG_IP_NF_TARGET_REDIRECT is not set
-+# CONFIG_IP_NF_TARGET_NETMAP is not set
-+# CONFIG_IP_NF_TARGET_SAME is not set
-+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
-+CONFIG_IP_NF_NAT_FTP=m
-+# CONFIG_IP_NF_MANGLE is not set
-+# CONFIG_IP_NF_RAW is not set
-+# CONFIG_IP_NF_ARPTABLES is not set
-+
-+#
-+# Bridge: Netfilter Configuration
-+#
-+# CONFIG_BRIDGE_NF_EBTABLES is not set
-+
-+#
-+# SCTP Configuration (EXPERIMENTAL)
-+#
-+# CONFIG_IP_SCTP is not set
-+# CONFIG_ATM is not set
-+CONFIG_BRIDGE=y
-+# CONFIG_VLAN_8021Q is not set
-+# CONFIG_DECNET is not set
-+# CONFIG_LLC2 is not set
-+# CONFIG_IPX is not set
-+# CONFIG_ATALK is not set
-+# CONFIG_X25 is not set
-+# CONFIG_LAPB is not set
-+# CONFIG_NET_DIVERT is not set
-+# CONFIG_ECONET is not set
-+# CONFIG_WAN_ROUTER is not set
-+
-+#
-+# QoS and/or fair queueing
-+#
-+# CONFIG_NET_SCHED is not set
-+# CONFIG_NET_CLS_ROUTE is not set
-+
-+#
-+# Network testing
-+#
-+# CONFIG_NET_PKTGEN is not set
-+# CONFIG_NETPOLL is not set
-+# CONFIG_NET_POLL_CONTROLLER is not set
-+# CONFIG_HAMRADIO is not set
-+# CONFIG_IRDA is not set
-+# CONFIG_BT is not set
-+CONFIG_NETDEVICES=y
-+# CONFIG_DUMMY is not set
-+# CONFIG_BONDING is not set
-+# CONFIG_EQUALIZER is not set
-+CONFIG_TUN=y
-+
-+#
-+# ARCnet devices
-+#
-+# CONFIG_ARCNET is not set
-+
-+#
-+# Ethernet (10 or 100Mbit)
-+#
-+CONFIG_NET_ETHERNET=y
-+CONFIG_MII=y
-+# CONFIG_HAPPYMEAL is not set
-+# CONFIG_SUNGEM is not set
-+CONFIG_NET_VENDOR_3COM=y
-+# CONFIG_EL1 is not set
-+# CONFIG_EL2 is not set
-+# CONFIG_ELPLUS is not set
-+# CONFIG_EL16 is not set
-+# CONFIG_EL3 is not set
-+# CONFIG_3C515 is not set
-+CONFIG_VORTEX=y
-+# CONFIG_TYPHOON is not set
-+# CONFIG_LANCE is not set
-+# CONFIG_NET_VENDOR_SMC is not set
-+# CONFIG_NET_VENDOR_RACAL is not set
-+
-+#
-+# Tulip family network device support
-+#
-+CONFIG_NET_TULIP=y
-+# CONFIG_DE2104X is not set
-+CONFIG_TULIP=y
-+# CONFIG_TULIP_MWI is not set
-+# CONFIG_TULIP_MMIO is not set
-+# CONFIG_TULIP_NAPI is not set
-+# CONFIG_DE4X5 is not set
-+# CONFIG_WINBOND_840 is not set
-+# CONFIG_DM9102 is not set
-+# CONFIG_AT1700 is not set
-+# CONFIG_DEPCA is not set
-+# CONFIG_HP100 is not set
-+# CONFIG_NET_ISA is not set
-+CONFIG_NET_PCI=y
-+CONFIG_PCNET32=y
-+# CONFIG_AMD8111_ETH is not set
-+# CONFIG_ADAPTEC_STARFIRE is not set
-+# CONFIG_AC3200 is not set
-+# CONFIG_APRICOT is not set
-+# CONFIG_B44 is not set
-+# CONFIG_FORCEDETH is not set
-+# CONFIG_CS89x0 is not set
-+# CONFIG_DGRS is not set
-+# CONFIG_EEPRO100 is not set
-+CONFIG_E100=y
-+# CONFIG_FEALNX is not set
-+# CONFIG_NATSEMI is not set
-+CONFIG_NE2K_PCI=y
-+# CONFIG_8139CP is not set
-+CONFIG_8139TOO=y
-+CONFIG_8139TOO_PIO=y
-+# CONFIG_8139TOO_TUNE_TWISTER is not set
-+# CONFIG_8139TOO_8129 is not set
-+# CONFIG_8139_OLD_RX_RESET is not set
-+# CONFIG_SIS900 is not set
-+# CONFIG_EPIC100 is not set
-+# CONFIG_SUNDANCE is not set
-+# CONFIG_TLAN is not set
-+CONFIG_VIA_RHINE=y
-+# CONFIG_VIA_RHINE_MMIO is not set
-+# CONFIG_NET_POCKET is not set
-+
-+#
-+# Ethernet (1000 Mbit)
-+#
-+CONFIG_ACENIC=y
-+# CONFIG_ACENIC_OMIT_TIGON_I is not set
-+# CONFIG_DL2K is not set
-+CONFIG_E1000=y
-+# CONFIG_E1000_NAPI is not set
-+# CONFIG_NS83820 is not set
-+# CONFIG_HAMACHI is not set
-+# CONFIG_YELLOWFIN is not set
-+# CONFIG_R8169 is not set
-+CONFIG_SK98LIN=y
-+# CONFIG_VIA_VELOCITY is not set
-+CONFIG_TIGON3=y
-+# CONFIG_BNX2 is not set
-+
-+#
-+# Ethernet (10000 Mbit)
-+#
-+# CONFIG_IXGB is not set
-+# CONFIG_S2IO is not set
-+
-+#
-+# Token Ring devices
-+#
-+# CONFIG_TR is not set
-+
-+#
-+# Wireless LAN (non-hamradio)
-+#
-+# CONFIG_NET_RADIO is not set
-+
-+#
-+# Wan interfaces
-+#
-+# CONFIG_WAN is not set
-+# CONFIG_FDDI is not set
-+# CONFIG_HIPPI is not set
-+# CONFIG_PPP is not set
-+# CONFIG_SLIP is not set
-+# CONFIG_NET_FC is not set
-+# CONFIG_SHAPER is not set
-+# CONFIG_NETCONSOLE is not set
-+
-+#
-+# ISDN subsystem
-+#
-+# CONFIG_ISDN is not set
-+
-+#
-+# Telephony Support
-+#
-+# CONFIG_PHONE is not set
-+
-+#
-+# Input device support
-+#
-+CONFIG_INPUT=y
-+
-+#
-+# Userland interfaces
-+#
-+CONFIG_INPUT_MOUSEDEV=y
-+CONFIG_INPUT_MOUSEDEV_PSAUX=y
-+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-+# CONFIG_INPUT_JOYDEV is not set
-+# CONFIG_INPUT_TSDEV is not set
-+# CONFIG_INPUT_EVDEV is not set
-+# CONFIG_INPUT_EVBUG is not set
-+
-+#
-+# Input Device Drivers
-+#
-+CONFIG_INPUT_KEYBOARD=y
-+CONFIG_KEYBOARD_ATKBD=y
-+# CONFIG_KEYBOARD_SUNKBD is not set
-+# CONFIG_KEYBOARD_LKKBD is not set
-+# CONFIG_KEYBOARD_XTKBD is not set
-+# CONFIG_KEYBOARD_NEWTON is not set
-+CONFIG_INPUT_MOUSE=y
-+CONFIG_MOUSE_PS2=y
-+# CONFIG_MOUSE_SERIAL is not set
-+# CONFIG_MOUSE_INPORT is not set
-+# CONFIG_MOUSE_LOGIBM is not set
-+# CONFIG_MOUSE_PC110PAD is not set
-+# CONFIG_MOUSE_VSXXXAA is not set
-+# CONFIG_INPUT_JOYSTICK is not set
-+# CONFIG_INPUT_TOUCHSCREEN is not set
-+# CONFIG_INPUT_MISC is not set
-+
-+#
-+# Hardware I/O ports
-+#
-+CONFIG_SERIO=y
-+CONFIG_SERIO_I8042=y
-+CONFIG_SERIO_SERPORT=y
-+# CONFIG_SERIO_CT82C710 is not set
-+# CONFIG_SERIO_PCIPS2 is not set
-+CONFIG_SERIO_LIBPS2=y
-+# CONFIG_SERIO_RAW is not set
-+# CONFIG_GAMEPORT is not set
-+
-+#
-+# Character devices
-+#
-+CONFIG_VT=y
-+CONFIG_VT_CONSOLE=y
-+CONFIG_HW_CONSOLE=y
-+# CONFIG_SERIAL_NONSTANDARD is not set
-+
-+#
-+# Serial drivers
-+#
-+# CONFIG_SERIAL_8250 is not set
-+
-+#
-+# Non-8250 serial port support
-+#
-+# CONFIG_SERIAL_JSM is not set
-+CONFIG_UNIX98_PTYS=y
-+CONFIG_LEGACY_PTYS=y
-+CONFIG_LEGACY_PTY_COUNT=256
-+
-+#
-+# IPMI
-+#
-+# CONFIG_IPMI_HANDLER is not set
-+
-+#
-+# Watchdog Cards
-+#
-+# CONFIG_WATCHDOG is not set
-+# CONFIG_HW_RANDOM is not set
-+# CONFIG_NVRAM is not set
-+# CONFIG_RTC is not set
-+# CONFIG_GEN_RTC is not set
-+# CONFIG_DTLK is not set
-+# CONFIG_R3964 is not set
-+# CONFIG_APPLICOM is not set
-+# CONFIG_SONYPI is not set
-+
-+#
-+# Ftape, the floppy tape device driver
-+#
-+# CONFIG_FTAPE is not set
-+CONFIG_AGP=m
-+CONFIG_AGP_ALI=m
-+CONFIG_AGP_ATI=m
-+CONFIG_AGP_AMD=m
-+CONFIG_AGP_AMD64=m
-+CONFIG_AGP_INTEL=m
-+CONFIG_AGP_NVIDIA=m
-+CONFIG_AGP_SIS=m
-+CONFIG_AGP_SWORKS=m
-+CONFIG_AGP_VIA=m
-+# CONFIG_AGP_EFFICEON is not set
-+CONFIG_DRM=m
-+CONFIG_DRM_TDFX=m
-+# CONFIG_DRM_GAMMA is not set
-+CONFIG_DRM_R128=m
-+CONFIG_DRM_RADEON=m
-+CONFIG_DRM_I810=m
-+CONFIG_DRM_I830=m
-+CONFIG_DRM_I915=m
-+CONFIG_DRM_MGA=m
-+CONFIG_DRM_SIS=m
-+# CONFIG_MWAVE is not set
-+# CONFIG_RAW_DRIVER is not set
-+# CONFIG_HPET is not set
-+# CONFIG_HANGCHECK_TIMER is not set
-+
-+#
-+# TPM devices
-+#
-+# CONFIG_TCG_TPM is not set
-+
-+#
-+# I2C support
-+#
-+# CONFIG_I2C is not set
-+
-+#
-+# Dallas's 1-wire bus
-+#
-+# CONFIG_W1 is not set
-+
-+#
-+# Misc devices
-+#
-+# CONFIG_IBM_ASM is not set
-+
-+#
-+# Multimedia devices
-+#
-+# CONFIG_VIDEO_DEV is not set
-+
-+#
-+# Digital Video Broadcasting Devices
-+#
-+# CONFIG_DVB is not set
-+
-+#
-+# Graphics support
-+#
-+# CONFIG_FB is not set
-+# CONFIG_VIDEO_SELECT is not set
-+
-+#
-+# Console display driver support
-+#
-+CONFIG_VGA_CONSOLE=y
-+# CONFIG_MDA_CONSOLE is not set
-+CONFIG_DUMMY_CONSOLE=y
-+
-+#
-+# Sound
-+#
-+# CONFIG_SOUND is not set
-+
-+#
-+# USB support
-+#
-+CONFIG_USB_ARCH_HAS_HCD=y
-+CONFIG_USB_ARCH_HAS_OHCI=y
-+CONFIG_USB=y
-+# CONFIG_USB_DEBUG is not set
-+
-+#
-+# Miscellaneous USB options
-+#
-+# CONFIG_USB_DEVICEFS is not set
-+# CONFIG_USB_BANDWIDTH is not set
-+# CONFIG_USB_DYNAMIC_MINORS is not set
-+# CONFIG_USB_OTG is not set
-+
-+#
-+# USB Host Controller Drivers
-+#
-+# CONFIG_USB_EHCI_HCD is not set
-+CONFIG_USB_OHCI_HCD=y
-+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
-+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-+CONFIG_USB_UHCI_HCD=y
-+# CONFIG_USB_SL811_HCD is not set
-+
-+#
-+# USB Device Class drivers
-+#
-+# CONFIG_USB_BLUETOOTH_TTY is not set
-+# CONFIG_USB_ACM is not set
-+# CONFIG_USB_PRINTER is not set
-+
-+#
-+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-+#
-+# CONFIG_USB_STORAGE is not set
-+
-+#
-+# USB Input Devices
-+#
-+CONFIG_USB_HID=y
-+CONFIG_USB_HIDINPUT=y
-+# CONFIG_HID_FF is not set
-+# CONFIG_USB_HIDDEV is not set
-+# CONFIG_USB_AIPTEK is not set
-+# CONFIG_USB_WACOM is not set
-+# CONFIG_USB_KBTAB is not set
-+# CONFIG_USB_POWERMATE is not set
-+# CONFIG_USB_MTOUCH is not set
-+# CONFIG_USB_EGALAX is not set
-+# CONFIG_USB_XPAD is not set
-+# CONFIG_USB_ATI_REMOTE is not set
-+
-+#
-+# USB Imaging devices
-+#
-+# CONFIG_USB_MDC800 is not set
-+# CONFIG_USB_MICROTEK is not set
-+
-+#
-+# USB Multimedia devices
-+#
-+# CONFIG_USB_DABUSB is not set
-+
-+#
-+# Video4Linux support is needed for USB Multimedia device support
-+#
-+
-+#
-+# USB Network Adapters
-+#
-+# CONFIG_USB_CATC is not set
-+# CONFIG_USB_KAWETH is not set
-+# CONFIG_USB_PEGASUS is not set
-+# CONFIG_USB_RTL8150 is not set
-+# CONFIG_USB_USBNET is not set
-+CONFIG_USB_MON=y
-+
-+#
-+# USB port drivers
-+#
-+
-+#
-+# USB Serial Converter support
-+#
-+# CONFIG_USB_SERIAL is not set
-+
-+#
-+# USB Miscellaneous drivers
-+#
-+# CONFIG_USB_EMI62 is not set
-+# CONFIG_USB_EMI26 is not set
-+# CONFIG_USB_AUERSWALD is not set
-+# CONFIG_USB_RIO500 is not set
-+# CONFIG_USB_LEGOTOWER is not set
-+# CONFIG_USB_LCD is not set
-+# CONFIG_USB_LED is not set
-+# CONFIG_USB_CYTHERM is not set
-+# CONFIG_USB_PHIDGETKIT is not set
-+# CONFIG_USB_PHIDGETSERVO is not set
-+# CONFIG_USB_IDMOUSE is not set
-+
-+#
-+# USB ATM/DSL drivers
-+#
-+
-+#
-+# USB Gadget Support
-+#
-+# CONFIG_USB_GADGET is not set
-+
-+#
-+# MMC/SD Card support
-+#
-+# CONFIG_MMC is not set
-+
-+#
-+# InfiniBand support
-+#
-+# CONFIG_INFINIBAND is not set
-+
-+#
-+# Power management options
-+#
-+
-+#
-+# ACPI (Advanced Configuration and Power Interface) Support
-+#
-+CONFIG_ACPI=y
-+CONFIG_ACPI_BOOT=y
-+CONFIG_ACPI_INTERPRETER=y
-+CONFIG_ACPI_AC=m
-+CONFIG_ACPI_BATTERY=m
-+CONFIG_ACPI_BUTTON=m
-+CONFIG_ACPI_VIDEO=m
-+CONFIG_ACPI_FAN=m
-+CONFIG_ACPI_PROCESSOR=m
-+CONFIG_ACPI_THERMAL=m
-+CONFIG_ACPI_ASUS=m
-+CONFIG_ACPI_IBM=m
-+CONFIG_ACPI_TOSHIBA=m
-+# CONFIG_ACPI_CUSTOM_DSDT is not set
-+CONFIG_ACPI_BLACKLIST_YEAR=0
-+# CONFIG_ACPI_DEBUG is not set
-+CONFIG_ACPI_BUS=y
-+CONFIG_ACPI_EC=y
-+CONFIG_ACPI_POWER=y
-+CONFIG_ACPI_PCI=y
-+CONFIG_ACPI_SYSTEM=y
-+# CONFIG_X86_PM_TIMER is not set
-+# CONFIG_ACPI_CONTAINER is not set
-+
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+# CONFIG_EXT2_FS_XATTR is not set
-+CONFIG_EXT3_FS=y
-+CONFIG_EXT3_FS_XATTR=y
-+# CONFIG_EXT3_FS_POSIX_ACL is not set
-+# CONFIG_EXT3_FS_SECURITY is not set
-+CONFIG_JBD=y
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
-+CONFIG_REISERFS_FS=y
-+# CONFIG_REISERFS_CHECK is not set
-+# CONFIG_REISERFS_PROC_INFO is not set
-+# CONFIG_REISERFS_FS_XATTR is not set
-+# CONFIG_JFS_FS is not set
-+
-+#
-+# XFS support
-+#
-+# CONFIG_XFS_FS is not set
-+# CONFIG_MINIX_FS is not set
-+# CONFIG_ROMFS_FS is not set
-+# CONFIG_QUOTA is not set
-+CONFIG_DNOTIFY=y
-+CONFIG_AUTOFS_FS=y
-+CONFIG_AUTOFS4_FS=y
-+
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=y
-+CONFIG_JOLIET=y
-+CONFIG_ZISOFS=y
-+CONFIG_ZISOFS_FS=y
-+# CONFIG_UDF_FS is not set
-+
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=m
-+CONFIG_MSDOS_FS=m
-+CONFIG_VFAT_FS=m
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-+# CONFIG_NTFS_FS is not set
-+
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
-+CONFIG_SYSFS=y
-+# CONFIG_DEVFS_FS is not set
-+# CONFIG_DEVPTS_FS_XATTR is not set
-+CONFIG_TMPFS=y
-+# CONFIG_TMPFS_XATTR is not set
-+# CONFIG_HUGETLBFS is not set
-+# CONFIG_HUGETLB_PAGE is not set
-+CONFIG_RAMFS=y
-+
-+#
-+# Miscellaneous filesystems
-+#
-+# CONFIG_ADFS_FS is not set
-+# CONFIG_AFFS_FS is not set
-+# CONFIG_HFS_FS is not set
-+# CONFIG_HFSPLUS_FS is not set
-+# CONFIG_BEFS_FS is not set
-+# CONFIG_BFS_FS is not set
-+# CONFIG_EFS_FS is not set
-+CONFIG_CRAMFS=y
-+# CONFIG_VXFS_FS is not set
-+# CONFIG_HPFS_FS is not set
-+# CONFIG_QNX4FS_FS is not set
-+# CONFIG_SYSV_FS is not set
-+# CONFIG_UFS_FS is not set
-+
-+#
-+# Network File Systems
-+#
-+CONFIG_NFS_FS=y
-+CONFIG_NFS_V3=y
-+# CONFIG_NFS_V4 is not set
-+# CONFIG_NFS_DIRECTIO is not set
-+CONFIG_NFSD=m
-+CONFIG_NFSD_V3=y
-+# CONFIG_NFSD_V4 is not set
-+CONFIG_NFSD_TCP=y
-+CONFIG_ROOT_NFS=y
-+CONFIG_LOCKD=y
-+CONFIG_LOCKD_V4=y
-+CONFIG_EXPORTFS=m
-+CONFIG_SUNRPC=y
-+# CONFIG_RPCSEC_GSS_KRB5 is not set
-+# CONFIG_RPCSEC_GSS_SPKM3 is not set
-+# CONFIG_SMB_FS is not set
-+# CONFIG_CIFS is not set
-+# CONFIG_NCP_FS is not set
-+# CONFIG_CODA_FS is not set
-+# CONFIG_AFS_FS is not set
-+
-+#
-+# Partition Types
-+#
-+# CONFIG_PARTITION_ADVANCED is not set
-+CONFIG_MSDOS_PARTITION=y
-+
-+#
-+# Native Language Support
-+#
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="iso8859-1"
-+CONFIG_NLS_CODEPAGE_437=y
-+# CONFIG_NLS_CODEPAGE_737 is not set
-+# CONFIG_NLS_CODEPAGE_775 is not set
-+# CONFIG_NLS_CODEPAGE_850 is not set
-+# CONFIG_NLS_CODEPAGE_852 is not set
-+# CONFIG_NLS_CODEPAGE_855 is not set
-+# CONFIG_NLS_CODEPAGE_857 is not set
-+# CONFIG_NLS_CODEPAGE_860 is not set
-+# CONFIG_NLS_CODEPAGE_861 is not set
-+# CONFIG_NLS_CODEPAGE_862 is not set
-+# CONFIG_NLS_CODEPAGE_863 is not set
-+# CONFIG_NLS_CODEPAGE_864 is not set
-+# CONFIG_NLS_CODEPAGE_865 is not set
-+# CONFIG_NLS_CODEPAGE_866 is not set
-+# CONFIG_NLS_CODEPAGE_869 is not set
-+# CONFIG_NLS_CODEPAGE_936 is not set
-+# CONFIG_NLS_CODEPAGE_950 is not set
-+# CONFIG_NLS_CODEPAGE_932 is not set
-+# CONFIG_NLS_CODEPAGE_949 is not set
-+# CONFIG_NLS_CODEPAGE_874 is not set
-+# CONFIG_NLS_ISO8859_8 is not set
-+# CONFIG_NLS_CODEPAGE_1250 is not set
-+# CONFIG_NLS_CODEPAGE_1251 is not set
-+# CONFIG_NLS_ASCII is not set
-+CONFIG_NLS_ISO8859_1=y
-+# CONFIG_NLS_ISO8859_2 is not set
-+# CONFIG_NLS_ISO8859_3 is not set
-+# CONFIG_NLS_ISO8859_4 is not set
-+# CONFIG_NLS_ISO8859_5 is not set
-+# CONFIG_NLS_ISO8859_6 is not set
-+# CONFIG_NLS_ISO8859_7 is not set
-+# CONFIG_NLS_ISO8859_9 is not set
-+# CONFIG_NLS_ISO8859_13 is not set
-+# CONFIG_NLS_ISO8859_14 is not set
-+# CONFIG_NLS_ISO8859_15 is not set
-+# CONFIG_NLS_KOI8_R is not set
-+# CONFIG_NLS_KOI8_U is not set
-+# CONFIG_NLS_UTF8 is not set
-+
-+#
-+# Security options
-+#
-+# CONFIG_KEYS is not set
-+# CONFIG_SECURITY is not set
-+
-+#
-+# Cryptographic options
-+#
-+CONFIG_CRYPTO=y
-+CONFIG_CRYPTO_HMAC=y
-+# CONFIG_CRYPTO_NULL is not set
-+# CONFIG_CRYPTO_MD4 is not set
-+CONFIG_CRYPTO_MD5=m
-+CONFIG_CRYPTO_SHA1=m
-+# CONFIG_CRYPTO_SHA256 is not set
-+# CONFIG_CRYPTO_SHA512 is not set
-+# CONFIG_CRYPTO_WP512 is not set
-+# CONFIG_CRYPTO_TGR192 is not set
-+CONFIG_CRYPTO_DES=m
-+# CONFIG_CRYPTO_BLOWFISH is not set
-+# CONFIG_CRYPTO_TWOFISH is not set
-+# CONFIG_CRYPTO_SERPENT is not set
-+# CONFIG_CRYPTO_AES_586 is not set
-+# CONFIG_CRYPTO_CAST5 is not set
-+# CONFIG_CRYPTO_CAST6 is not set
-+# CONFIG_CRYPTO_TEA is not set
-+# CONFIG_CRYPTO_ARC4 is not set
-+# CONFIG_CRYPTO_KHAZAD is not set
-+# CONFIG_CRYPTO_ANUBIS is not set
-+# CONFIG_CRYPTO_DEFLATE is not set
-+# CONFIG_CRYPTO_MICHAEL_MIC is not set
-+CONFIG_CRYPTO_CRC32C=m
-+# CONFIG_CRYPTO_TEST is not set
-+
-+#
-+# Hardware crypto devices
-+#
-+# CONFIG_CRYPTO_DEV_PADLOCK is not set
-+
-+#
-+# Library routines
-+#
-+# CONFIG_CRC_CCITT is not set
-+CONFIG_CRC32=y
-+CONFIG_LIBCRC32C=y
-+CONFIG_ZLIB_INFLATE=y
-+
-+#
-+# Kernel hacking
-+#
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_DEBUG_KERNEL=y
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_LOG_BUF_SHIFT=14
-+# CONFIG_SCHEDSTATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+# CONFIG_DEBUG_HIGHMEM is not set
-+CONFIG_DEBUG_BUGVERBOSE=y
-+# CONFIG_DEBUG_INFO is not set
-+# CONFIG_DEBUG_FS is not set
-+# CONFIG_FRAME_POINTER is not set
-+# CONFIG_DEBUG_STACKOVERFLOW is not set
-+# CONFIG_KPROBES is not set
-+# CONFIG_DEBUG_STACK_USAGE is not set
-+# CONFIG_DEBUG_PAGEALLOC is not set
-+# CONFIG_4KSTACKS is not set
-+CONFIG_X86_FIND_SMP_CONFIG=y
-+CONFIG_X86_MPPARSE=y
-diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_x86_64 linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_x86_64
---- pristine-linux-2.6.12/arch/xen/configs/xen0_defconfig_x86_64	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/configs/xen0_defconfig_x86_64	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1184 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.12.6-xen0
-+# Mon Nov  7 17:24:18 2005
-+#
-+CONFIG_XEN=y
-+CONFIG_ARCH_XEN=y
-+CONFIG_NO_IDLE_HZ=y
-+
-+#
-+# XEN
-+#
-+CONFIG_XEN_PRIVILEGED_GUEST=y
-+CONFIG_XEN_PHYSDEV_ACCESS=y
-+CONFIG_XEN_BLKDEV_BACKEND=y
-+# CONFIG_XEN_BLKDEV_TAP_BE is not set
-+CONFIG_XEN_NETDEV_BACKEND=y
-+# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
-+# CONFIG_XEN_TPMDEV_FRONTEND is not set
-+# CONFIG_XEN_TPMDEV_BACKEND is not set
-+CONFIG_XEN_BLKDEV_FRONTEND=y
-+CONFIG_XEN_NETDEV_FRONTEND=y
-+# CONFIG_XEN_BLKDEV_TAP is not set
-+# CONFIG_XEN_SHADOW_MODE is not set
-+CONFIG_XEN_SCRUB_PAGES=y
-+# CONFIG_XEN_X86 is not set
-+CONFIG_XEN_X86_64=y
-+CONFIG_HAVE_ARCH_ALLOC_SKB=y
-+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-+
-+#
-+# Code maturity level options
-+#
-+CONFIG_EXPERIMENTAL=y
-+# CONFIG_CLEAN_COMPILE is not set
-+CONFIG_BROKEN=y
-+CONFIG_BROKEN_ON_SMP=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+
-+#
-+# General setup
-+#
-+CONFIG_LOCALVERSION=""
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+# CONFIG_POSIX_MQUEUE is not set
-+# CONFIG_BSD_PROCESS_ACCT is not set
-+CONFIG_SYSCTL=y
-+# CONFIG_AUDIT is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_KOBJECT_UEVENT=y
-+# CONFIG_IKCONFIG is not set
-+# CONFIG_EMBEDDED is not set
-+CONFIG_KALLSYMS=y
-+# CONFIG_KALLSYMS_ALL is not set
-+# CONFIG_KALLSYMS_EXTRA_PASS is not set
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_EPOLL=y
-+CONFIG_SHMEM=y
-+CONFIG_CC_ALIGN_FUNCTIONS=0
-+CONFIG_CC_ALIGN_LABELS=0
-+CONFIG_CC_ALIGN_LOOPS=0
-+CONFIG_CC_ALIGN_JUMPS=0
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+
-+#
-+# Loadable module support
-+#
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+# CONFIG_MODULE_FORCE_UNLOAD is not set
-+CONFIG_OBSOLETE_MODPARM=y
-+# CONFIG_MODVERSIONS is not set
-+# CONFIG_MODULE_SRCVERSION_ALL is not set
-+CONFIG_KMOD=y
-+CONFIG_XENARCH="x86_64"
-+CONFIG_X86=y
-+CONFIG_MMU=y
-+CONFIG_UID16=y
-+CONFIG_GENERIC_ISA_DMA=y
-+CONFIG_GENERIC_IOMAP=y
-+CONFIG_X86_CMPXCHG=y
-+CONFIG_X86_L1_CACHE_SHIFT=7
-+CONFIG_RWSEM_GENERIC_SPINLOCK=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_X86_GOOD_APIC=y
-+# CONFIG_HPET_TIMER is not set
-+# CONFIG_SMP is not set
-+CONFIG_MICROCODE=y
-+# CONFIG_X86_CPUID is not set
-+CONFIG_SWIOTLB=y
-+# CONFIG_NUMA is not set
-+# CONFIG_MTRR is not set
-+CONFIG_X86_LOCAL_APIC=y
-+CONFIG_X86_IO_APIC=y
-+CONFIG_PCI=y
-+CONFIG_PCI_DIRECT=y
-+# CONFIG_PCI_MMCONFIG is not set
-+CONFIG_ISA_DMA_API=y
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+CONFIG_SECCOMP=y
-+
-+#
-+# X86_64 processor configuration
-+#
-+CONFIG_X86_64=y
-+CONFIG_64BIT=y
-+CONFIG_EARLY_PRINTK=y
-+
-+#
-+# Processor type and features
-+#
-+# CONFIG_MPSC is not set
-+CONFIG_GENERIC_CPU=y
-+CONFIG_X86_L1_CACHE_BYTES=128
-+# CONFIG_X86_TSC is not set
-+CONFIG_X86_XEN_GENAPIC=y
-+# CONFIG_X86_MSR is not set
-+# CONFIG_GART_IOMMU is not set
-+CONFIG_DUMMY_IOMMU=y
-+# CONFIG_X86_MCE is not set
-+
-+#
-+# Power management options
-+#
-+# CONFIG_PM is not set
-+
-+#
-+# CPU Frequency scaling
-+#
-+# CONFIG_CPU_FREQ is not set
-+
-+#
-+# Bus options (PCI etc.)
-+#
-+# CONFIG_UNORDERED_IO is not set
-+
-+#
-+# Executable file formats / Emulations
-+#
-+CONFIG_IA32_EMULATION=y
-+# CONFIG_IA32_AOUT is not set
-+CONFIG_COMPAT=y
-+CONFIG_SYSVIPC_COMPAT=y
-+
-+#
-+# Executable file formats
-+#
-+CONFIG_BINFMT_ELF=y
-+CONFIG_BINFMT_MISC=y
-+
-+#
-+# Device Drivers
-+#
-+
-+#
-+# Generic Driver Options
-+#
-+CONFIG_STANDALONE=y
-+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-+# CONFIG_FW_LOADER is not set
-+# CONFIG_DEBUG_DRIVER is not set
-+
-+#
-+# Memory Technology Devices (MTD)
-+#
-+# CONFIG_MTD is not set
-+
-+#
-+# Parallel port support
-+#
-+# CONFIG_PARPORT is not set
-+
-+#
-+# Plug and Play support
-+#
-+# CONFIG_PNP is not set
-+
-+#
-+# Block devices
-+#
-+CONFIG_BLK_DEV_FD=y
-+# CONFIG_BLK_CPQ_DA is not set
-+CONFIG_BLK_CPQ_CISS_DA=y
-+# CONFIG_CISS_SCSI_TAPE is not set
-+# CONFIG_BLK_DEV_DAC960 is not set
-+# CONFIG_BLK_DEV_UMEM is not set
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=y
-+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-+# CONFIG_BLK_DEV_NBD is not set
-+# CONFIG_BLK_DEV_SX8 is not set
-+# CONFIG_BLK_DEV_UB is not set
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=16384
-+CONFIG_BLK_DEV_INITRD=y
-+CONFIG_INITRAMFS_SOURCE=""
-+# CONFIG_LBD is not set
-+# CONFIG_CDROM_PKTCDVD is not set
-+
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+# CONFIG_ATA_OVER_ETH is not set
-+
-+#
-+# ATA/ATAPI/MFM/RLL support
-+#
-+CONFIG_IDE=y
-+CONFIG_BLK_DEV_IDE=y
-+
-+#
-+# Please see Documentation/ide.txt for help/info on IDE drives
-+#
-+# CONFIG_BLK_DEV_IDE_SATA is not set
-+# CONFIG_BLK_DEV_HD_IDE is not set
-+CONFIG_BLK_DEV_IDEDISK=y
-+# CONFIG_IDEDISK_MULTI_MODE is not set
-+CONFIG_BLK_DEV_IDECD=y
-+# CONFIG_BLK_DEV_IDETAPE is not set
-+# CONFIG_BLK_DEV_IDEFLOPPY is not set
-+# CONFIG_BLK_DEV_IDESCSI is not set
-+# CONFIG_IDE_TASK_IOCTL is not set
-+
-+#
-+# IDE chipset support/bugfixes
-+#
-+CONFIG_IDE_GENERIC=y
-+# CONFIG_BLK_DEV_CMD640 is not set
-+CONFIG_BLK_DEV_IDEPCI=y
-+# CONFIG_IDEPCI_SHARE_IRQ is not set
-+# CONFIG_BLK_DEV_OFFBOARD is not set
-+CONFIG_BLK_DEV_GENERIC=y
-+# CONFIG_BLK_DEV_OPTI621 is not set
-+# CONFIG_BLK_DEV_RZ1000 is not set
-+CONFIG_BLK_DEV_IDEDMA_PCI=y
-+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-+CONFIG_IDEDMA_PCI_AUTO=y
-+# CONFIG_IDEDMA_ONLYDISK is not set
-+# CONFIG_BLK_DEV_AEC62XX is not set
-+# CONFIG_BLK_DEV_ALI15X3 is not set
-+# CONFIG_BLK_DEV_AMD74XX is not set
-+# CONFIG_BLK_DEV_ATIIXP is not set
-+# CONFIG_BLK_DEV_CMD64X is not set
-+# CONFIG_BLK_DEV_TRIFLEX is not set
-+# CONFIG_BLK_DEV_CY82C693 is not set
-+# CONFIG_BLK_DEV_CS5520 is not set
-+# CONFIG_BLK_DEV_CS5530 is not set
-+# CONFIG_BLK_DEV_HPT34X is not set
-+# CONFIG_BLK_DEV_HPT366 is not set
-+# CONFIG_BLK_DEV_SC1200 is not set
-+CONFIG_BLK_DEV_PIIX=y
-+# CONFIG_BLK_DEV_NS87415 is not set
-+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
-+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
-+CONFIG_BLK_DEV_SVWKS=y
-+# CONFIG_BLK_DEV_SIIMAGE is not set
-+# CONFIG_BLK_DEV_SIS5513 is not set
-+# CONFIG_BLK_DEV_SLC90E66 is not set
-+# CONFIG_BLK_DEV_TRM290 is not set
-+# CONFIG_BLK_DEV_VIA82CXXX is not set
-+# CONFIG_IDE_ARM is not set
-+CONFIG_BLK_DEV_IDEDMA=y
-+# CONFIG_IDEDMA_IVB is not set
-+CONFIG_IDEDMA_AUTO=y
-+# CONFIG_BLK_DEV_HD is not set
-+
-+#
-+# SCSI device support
-+#
-+CONFIG_SCSI=y
-+CONFIG_SCSI_PROC_FS=y
-+
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=y
-+# CONFIG_CHR_DEV_ST is not set
-+# CONFIG_CHR_DEV_OSST is not set
-+# CONFIG_BLK_DEV_SR is not set
-+# CONFIG_CHR_DEV_SG is not set
-+
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+# CONFIG_SCSI_MULTI_LUN is not set
-+# CONFIG_SCSI_CONSTANTS is not set
-+# CONFIG_SCSI_LOGGING is not set
-+
-+#
-+# SCSI Transport Attributes
-+#
-+CONFIG_SCSI_SPI_ATTRS=y
-+# CONFIG_SCSI_FC_ATTRS is not set
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+
-+#
-+# SCSI low-level drivers
-+#
-+CONFIG_BLK_DEV_3W_XXXX_RAID=y
-+# CONFIG_SCSI_3W_9XXX is not set
-+# CONFIG_SCSI_ACARD is not set
-+CONFIG_SCSI_AACRAID=y
-+CONFIG_SCSI_AIC7XXX=y
-+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
-+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-+# CONFIG_AIC7XXX_BUILD_FIRMWARE is not set
-+CONFIG_AIC7XXX_DEBUG_ENABLE=y
-+CONFIG_AIC7XXX_DEBUG_MASK=0
-+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
-+# CONFIG_SCSI_AIC7XXX_OLD is not set
-+CONFIG_SCSI_AIC79XX=y
-+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-+CONFIG_AIC79XX_RESET_DELAY_MS=15000
-+# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
-+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
-+CONFIG_AIC79XX_DEBUG_ENABLE=y
-+CONFIG_AIC79XX_DEBUG_MASK=0
-+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
-+# CONFIG_SCSI_ADVANSYS is not set
-+CONFIG_MEGARAID_NEWGEN=y
-+# CONFIG_MEGARAID_MM is not set
-+CONFIG_SCSI_SATA=y
-+# CONFIG_SCSI_SATA_AHCI is not set
-+# CONFIG_SCSI_SATA_SVW is not set
-+CONFIG_SCSI_ATA_PIIX=y
-+# CONFIG_SCSI_SATA_NV is not set
-+CONFIG_SCSI_SATA_PROMISE=y
-+# CONFIG_SCSI_SATA_QSTOR is not set
-+CONFIG_SCSI_SATA_SX4=y
-+CONFIG_SCSI_SATA_SIL=y
-+# CONFIG_SCSI_SATA_SIS is not set
-+# CONFIG_SCSI_SATA_ULI is not set
-+# CONFIG_SCSI_SATA_VIA is not set
-+# CONFIG_SCSI_SATA_VITESSE is not set
-+CONFIG_SCSI_BUSLOGIC=y
-+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-+# CONFIG_SCSI_CPQFCTS is not set
-+# CONFIG_SCSI_DMX3191D is not set
-+# CONFIG_SCSI_EATA is not set
-+# CONFIG_SCSI_EATA_PIO is not set
-+# CONFIG_SCSI_FUTURE_DOMAIN is not set
-+# CONFIG_SCSI_GDTH is not set
-+# CONFIG_SCSI_IPS is not set
-+# CONFIG_SCSI_INITIO is not set
-+# CONFIG_SCSI_INIA100 is not set
-+# CONFIG_SCSI_SYM53C8XX_2 is not set
-+# CONFIG_SCSI_IPR is not set
-+# CONFIG_SCSI_PCI2000 is not set
-+# CONFIG_SCSI_PCI2220I is not set
-+# CONFIG_SCSI_QLOGIC_ISP is not set
-+# CONFIG_SCSI_QLOGIC_FC is not set
-+# CONFIG_SCSI_QLOGIC_1280 is not set
-+CONFIG_SCSI_QLA2XXX=y
-+# CONFIG_SCSI_QLA21XX is not set
-+# CONFIG_SCSI_QLA22XX is not set
-+# CONFIG_SCSI_QLA2300 is not set
-+# CONFIG_SCSI_QLA2322 is not set
-+# CONFIG_SCSI_QLA6312 is not set
-+# CONFIG_SCSI_LPFC is not set
-+# CONFIG_SCSI_DC395x is not set
-+# CONFIG_SCSI_DC390T is not set
-+# CONFIG_SCSI_DEBUG is not set
-+
-+#
-+# Multi-device support (RAID and LVM)
-+#
-+CONFIG_MD=y
-+CONFIG_BLK_DEV_MD=y
-+CONFIG_MD_LINEAR=y
-+CONFIG_MD_RAID0=y
-+CONFIG_MD_RAID1=y
-+# CONFIG_MD_RAID10 is not set
-+# CONFIG_MD_RAID5 is not set
-+# CONFIG_MD_RAID6 is not set
-+CONFIG_MD_MULTIPATH=y
-+# CONFIG_MD_FAULTY is not set
-+CONFIG_BLK_DEV_DM=y
-+CONFIG_DM_CRYPT=y
-+CONFIG_DM_SNAPSHOT=y
-+CONFIG_DM_MIRROR=y
-+# CONFIG_DM_ZERO is not set
-+CONFIG_DM_MULTIPATH=y
-+CONFIG_DM_MULTIPATH_EMC=y
-+
-+#
-+# Fusion MPT device support
-+#
-+CONFIG_FUSION=y
-+CONFIG_FUSION_MAX_SGE=40
-+# CONFIG_FUSION_CTL is not set
-+
-+#
-+# IEEE 1394 (FireWire) support
-+#
-+# CONFIG_IEEE1394 is not set
-+
-+#
-+# I2O device support
-+#
-+# CONFIG_I2O is not set
-+
-+#
-+# Networking support
-+#
-+CONFIG_NET=y
-+
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=y
-+# CONFIG_PACKET_MMAP is not set
-+CONFIG_UNIX=y
-+# CONFIG_NET_KEY is not set
-+CONFIG_INET=y
-+# CONFIG_IP_MULTICAST is not set
-+# CONFIG_IP_ADVANCED_ROUTER is not set
-+CONFIG_IP_PNP=y
-+CONFIG_IP_PNP_DHCP=y
-+# CONFIG_IP_PNP_BOOTP is not set
-+# CONFIG_IP_PNP_RARP is not set
-+# CONFIG_NET_IPIP is not set
-+# CONFIG_NET_IPGRE is not set
-+# CONFIG_ARPD is not set
-+# CONFIG_SYN_COOKIES is not set
-+# CONFIG_INET_AH is not set
-+# CONFIG_INET_ESP is not set
-+# CONFIG_INET_IPCOMP is not set
-+# CONFIG_INET_TUNNEL is not set
-+CONFIG_IP_TCPDIAG=y
-+# CONFIG_IP_TCPDIAG_IPV6 is not set
-+
-+#
-+# IP: Virtual Server Configuration
-+#
-+# CONFIG_IP_VS is not set
-+# CONFIG_IPV6 is not set
-+CONFIG_NETFILTER=y
-+# CONFIG_NETFILTER_DEBUG is not set
-+CONFIG_BRIDGE_NETFILTER=y
-+
-+#
-+# IP: Netfilter Configuration
-+#
-+CONFIG_IP_NF_CONNTRACK=m
-+CONFIG_IP_NF_CT_ACCT=y
-+# CONFIG_IP_NF_CONNTRACK_MARK is not set
-+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
-+CONFIG_IP_NF_FTP=m
-+# CONFIG_IP_NF_IRC is not set
-+# CONFIG_IP_NF_TFTP is not set
-+# CONFIG_IP_NF_AMANDA is not set
-+# CONFIG_IP_NF_QUEUE is not set
-+CONFIG_IP_NF_IPTABLES=m
-+# CONFIG_IP_NF_MATCH_LIMIT is not set
-+CONFIG_IP_NF_MATCH_IPRANGE=m
-+# CONFIG_IP_NF_MATCH_MAC is not set
-+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
-+# CONFIG_IP_NF_MATCH_MARK is not set
-+# CONFIG_IP_NF_MATCH_MULTIPORT is not set
-+# CONFIG_IP_NF_MATCH_TOS is not set
-+# CONFIG_IP_NF_MATCH_RECENT is not set
-+# CONFIG_IP_NF_MATCH_ECN is not set
-+# CONFIG_IP_NF_MATCH_DSCP is not set
-+# CONFIG_IP_NF_MATCH_AH_ESP is not set
-+# CONFIG_IP_NF_MATCH_LENGTH is not set
-+# CONFIG_IP_NF_MATCH_TTL is not set
-+# CONFIG_IP_NF_MATCH_TCPMSS is not set
-+# CONFIG_IP_NF_MATCH_HELPER is not set
-+# CONFIG_IP_NF_MATCH_STATE is not set
-+# CONFIG_IP_NF_MATCH_CONNTRACK is not set
-+# CONFIG_IP_NF_MATCH_OWNER is not set
-+CONFIG_IP_NF_MATCH_PHYSDEV=m
-+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
-+# CONFIG_IP_NF_MATCH_REALM is not set
-+# CONFIG_IP_NF_MATCH_SCTP is not set
-+# CONFIG_IP_NF_MATCH_COMMENT is not set
-+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
-+CONFIG_IP_NF_FILTER=m
-+CONFIG_IP_NF_TARGET_REJECT=m
-+# CONFIG_IP_NF_TARGET_LOG is not set
-+# CONFIG_IP_NF_TARGET_ULOG is not set
-+# CONFIG_IP_NF_TARGET_TCPMSS is not set
-+CONFIG_IP_NF_NAT=m
-+CONFIG_IP_NF_NAT_NEEDED=y
-+CONFIG_IP_NF_TARGET_MASQUERADE=m
-+# CONFIG_IP_NF_TARGET_REDIRECT is not set
-+# CONFIG_IP_NF_TARGET_NETMAP is not set
-+# CONFIG_IP_NF_TARGET_SAME is not set
-+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
-+CONFIG_IP_NF_NAT_FTP=m
-+# CONFIG_IP_NF_MANGLE is not set
-+# CONFIG_IP_NF_RAW is not set
-+# CONFIG_IP_NF_ARPTABLES is not set
-+
-+#
-+# Bridge: Netfilter Configuration
-+#
-+# CONFIG_BRIDGE_NF_EBTABLES is not set
-+
-+#
-+# SCTP Configuration (EXPERIMENTAL)
-+#
-+# CONFIG_IP_SCTP is not set
-+# CONFIG_ATM is not set
-+CONFIG_BRIDGE=y
-+# CONFIG_VLAN_8021Q is not set
-+# CONFIG_DECNET is not set
-+# CONFIG_LLC2 is not set
-+# CONFIG_IPX is not set
-+# CONFIG_ATALK is not set
-+# CONFIG_X25 is not set
-+# CONFIG_LAPB is not set
-+# CONFIG_NET_DIVERT is not set
-+# CONFIG_ECONET is not set
-+# CONFIG_WAN_ROUTER is not set
-+
-+#
-+# QoS and/or fair queueing
-+#
-+# CONFIG_NET_SCHED is not set
-+# CONFIG_NET_CLS_ROUTE is not set
-+
-+#
-+# Network testing
-+#
-+# CONFIG_NET_PKTGEN is not set
-+# CONFIG_NETPOLL is not set
-+# CONFIG_NET_POLL_CONTROLLER is not set
-+# CONFIG_HAMRADIO is not set
-+# CONFIG_IRDA is not set
-+# CONFIG_BT is not set
-+CONFIG_NETDEVICES=y
-+# CONFIG_DUMMY is not set
-+# CONFIG_BONDING is not set
-+# CONFIG_EQUALIZER is not set
-+CONFIG_TUN=y
-+
-+#
-+# ARCnet devices
-+#
-+# CONFIG_ARCNET is not set
-+
-+#
-+# Ethernet (10 or 100Mbit)
-+#
-+CONFIG_NET_ETHERNET=y
-+CONFIG_MII=y
-+# CONFIG_HAPPYMEAL is not set
-+# CONFIG_SUNGEM is not set
-+CONFIG_NET_VENDOR_3COM=y
-+CONFIG_VORTEX=y
-+# CONFIG_TYPHOON is not set
-+
-+#
-+# Tulip family network device support
-+#
-+CONFIG_NET_TULIP=y
-+# CONFIG_DE2104X is not set
-+CONFIG_TULIP=y
-+# CONFIG_TULIP_MWI is not set
-+# CONFIG_TULIP_MMIO is not set
-+# CONFIG_TULIP_NAPI is not set
-+# CONFIG_DE4X5 is not set
-+# CONFIG_WINBOND_840 is not set
-+# CONFIG_DM9102 is not set
-+# CONFIG_HP100 is not set
-+CONFIG_NET_PCI=y
-+CONFIG_PCNET32=y
-+# CONFIG_AMD8111_ETH is not set
-+# CONFIG_ADAPTEC_STARFIRE is not set
-+# CONFIG_B44 is not set
-+# CONFIG_FORCEDETH is not set
-+# CONFIG_DGRS is not set
-+# CONFIG_EEPRO100 is not set
-+CONFIG_E100=y
-+# CONFIG_FEALNX is not set
-+# CONFIG_NATSEMI is not set
-+CONFIG_NE2K_PCI=y
-+# CONFIG_8139CP is not set
-+CONFIG_8139TOO=y
-+CONFIG_8139TOO_PIO=y
-+# CONFIG_8139TOO_TUNE_TWISTER is not set
-+# CONFIG_8139TOO_8129 is not set
-+# CONFIG_8139_OLD_RX_RESET is not set
-+# CONFIG_SIS900 is not set
-+# CONFIG_EPIC100 is not set
-+# CONFIG_SUNDANCE is not set
-+CONFIG_VIA_RHINE=y
-+# CONFIG_VIA_RHINE_MMIO is not set
-+
-+#
-+# Ethernet (1000 Mbit)
-+#
-+CONFIG_ACENIC=y
-+# CONFIG_ACENIC_OMIT_TIGON_I is not set
-+# CONFIG_DL2K is not set
-+CONFIG_E1000=y
-+# CONFIG_E1000_NAPI is not set
-+# CONFIG_NS83820 is not set
-+# CONFIG_HAMACHI is not set
-+# CONFIG_YELLOWFIN is not set
-+# CONFIG_R8169 is not set
-+CONFIG_SK98LIN=y
-+# CONFIG_VIA_VELOCITY is not set
-+CONFIG_TIGON3=y
-+# CONFIG_BNX2 is not set
-+
-+#
-+# Ethernet (10000 Mbit)
-+#
-+# CONFIG_IXGB is not set
-+# CONFIG_S2IO is not set
-+
-+#
-+# Token Ring devices
-+#
-+# CONFIG_TR is not set
-+
-+#
-+# Wireless LAN (non-hamradio)
-+#
-+# CONFIG_NET_RADIO is not set
-+
-+#
-+# Wan interfaces
-+#
-+# CONFIG_WAN is not set
-+# CONFIG_FDDI is not set
-+# CONFIG_HIPPI is not set
-+# CONFIG_PPP is not set
-+# CONFIG_SLIP is not set
-+# CONFIG_NET_FC is not set
-+# CONFIG_SHAPER is not set
-+# CONFIG_NETCONSOLE is not set
-+
-+#
-+# ISDN subsystem
-+#
-+# CONFIG_ISDN is not set
-+
-+#
-+# Telephony Support
-+#
-+# CONFIG_PHONE is not set
-+
-+#
-+# Input device support
-+#
-+CONFIG_INPUT=y
-+
-+#
-+# Userland interfaces
-+#
-+CONFIG_INPUT_MOUSEDEV=y
-+CONFIG_INPUT_MOUSEDEV_PSAUX=y
-+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-+# CONFIG_INPUT_JOYDEV is not set
-+# CONFIG_INPUT_TSDEV is not set
-+# CONFIG_INPUT_EVDEV is not set
-+# CONFIG_INPUT_EVBUG is not set
-+
-+#
-+# Input Device Drivers
-+#
-+CONFIG_INPUT_KEYBOARD=y
-+CONFIG_KEYBOARD_ATKBD=y
-+# CONFIG_KEYBOARD_SUNKBD is not set
-+# CONFIG_KEYBOARD_LKKBD is not set
-+# CONFIG_KEYBOARD_XTKBD is not set
-+# CONFIG_KEYBOARD_NEWTON is not set
-+CONFIG_INPUT_MOUSE=y
-+CONFIG_MOUSE_PS2=y
-+# CONFIG_MOUSE_SERIAL is not set
-+# CONFIG_MOUSE_VSXXXAA is not set
-+# CONFIG_INPUT_JOYSTICK is not set
-+# CONFIG_INPUT_TOUCHSCREEN is not set
-+# CONFIG_INPUT_MISC is not set
-+
-+#
-+# Hardware I/O ports
-+#
-+CONFIG_SERIO=y
-+CONFIG_SERIO_I8042=y
-+CONFIG_SERIO_SERPORT=y
-+# CONFIG_SERIO_CT82C710 is not set
-+# CONFIG_SERIO_PCIPS2 is not set
-+CONFIG_SERIO_LIBPS2=y
-+# CONFIG_SERIO_RAW is not set
-+# CONFIG_GAMEPORT is not set
-+
-+#
-+# Character devices
-+#
-+CONFIG_VT=y
-+CONFIG_VT_CONSOLE=y
-+CONFIG_HW_CONSOLE=y
-+# CONFIG_SERIAL_NONSTANDARD is not set
-+
-+#
-+# Serial drivers
-+#
-+# CONFIG_SERIAL_8250 is not set
-+
-+#
-+# Non-8250 serial port support
-+#
-+# CONFIG_SERIAL_JSM is not set
-+CONFIG_UNIX98_PTYS=y
-+CONFIG_LEGACY_PTYS=y
-+CONFIG_LEGACY_PTY_COUNT=256
-+
-+#
-+# IPMI
-+#
-+# CONFIG_IPMI_HANDLER is not set
-+
-+#
-+# Watchdog Cards
-+#
-+# CONFIG_WATCHDOG is not set
-+# CONFIG_HW_RANDOM is not set
-+# CONFIG_NVRAM is not set
-+CONFIG_RTC=y
-+# CONFIG_DTLK is not set
-+# CONFIG_R3964 is not set
-+# CONFIG_APPLICOM is not set
-+
-+#
-+# Ftape, the floppy tape device driver
-+#
-+# CONFIG_FTAPE is not set
-+CONFIG_AGP=m
-+CONFIG_AGP_AMD64=m
-+# CONFIG_AGP_INTEL is not set
-+CONFIG_DRM=m
-+CONFIG_DRM_TDFX=m
-+# CONFIG_DRM_GAMMA is not set
-+CONFIG_DRM_R128=m
-+CONFIG_DRM_RADEON=m
-+CONFIG_DRM_MGA=m
-+CONFIG_DRM_SIS=m
-+# CONFIG_MWAVE is not set
-+# CONFIG_RAW_DRIVER is not set
-+# CONFIG_HPET is not set
-+# CONFIG_HANGCHECK_TIMER is not set
-+
-+#
-+# TPM devices
-+#
-+# CONFIG_TCG_TPM is not set
-+
-+#
-+# I2C support
-+#
-+# CONFIG_I2C is not set
-+
-+#
-+# Dallas's 1-wire bus
-+#
-+# CONFIG_W1 is not set
-+
-+#
-+# Misc devices
-+#
-+# CONFIG_IBM_ASM is not set
-+
-+#
-+# Multimedia devices
-+#
-+# CONFIG_VIDEO_DEV is not set
-+
-+#
-+# Digital Video Broadcasting Devices
-+#
-+# CONFIG_DVB is not set
-+
-+#
-+# Graphics support
-+#
-+# CONFIG_FB is not set
-+# CONFIG_VIDEO_SELECT is not set
-+
-+#
-+# Console display driver support
-+#
-+CONFIG_VGA_CONSOLE=y
-+CONFIG_DUMMY_CONSOLE=y
-+
-+#
-+# Sound
-+#
-+# CONFIG_SOUND is not set
-+
-+#
-+# USB support
-+#
-+CONFIG_USB_ARCH_HAS_HCD=y
-+CONFIG_USB_ARCH_HAS_OHCI=y
-+CONFIG_USB=y
-+# CONFIG_USB_DEBUG is not set
-+
-+#
-+# Miscellaneous USB options
-+#
-+# CONFIG_USB_DEVICEFS is not set
-+# CONFIG_USB_BANDWIDTH is not set
-+# CONFIG_USB_DYNAMIC_MINORS is not set
-+# CONFIG_USB_OTG is not set
-+
-+#
-+# USB Host Controller Drivers
-+#
-+# CONFIG_USB_EHCI_HCD is not set
-+CONFIG_USB_OHCI_HCD=y
-+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
-+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-+CONFIG_USB_UHCI_HCD=y
-+# CONFIG_USB_SL811_HCD is not set
-+
-+#
-+# USB Device Class drivers
-+#
-+# CONFIG_USB_BLUETOOTH_TTY is not set
-+# CONFIG_USB_ACM is not set
-+# CONFIG_USB_PRINTER is not set
-+
-+#
-+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-+#
-+# CONFIG_USB_STORAGE is not set
-+
-+#
-+# USB Input Devices
-+#
-+CONFIG_USB_HID=y
-+CONFIG_USB_HIDINPUT=y
-+# CONFIG_HID_FF is not set
-+# CONFIG_USB_HIDDEV is not set
-+# CONFIG_USB_AIPTEK is not set
-+# CONFIG_USB_WACOM is not set
-+# CONFIG_USB_KBTAB is not set
-+# CONFIG_USB_POWERMATE is not set
-+# CONFIG_USB_MTOUCH is not set
-+# CONFIG_USB_EGALAX is not set
-+# CONFIG_USB_XPAD is not set
-+# CONFIG_USB_ATI_REMOTE is not set
-+
-+#
-+# USB Imaging devices
-+#
-+# CONFIG_USB_MDC800 is not set
-+# CONFIG_USB_MICROTEK is not set
-+
-+#
-+# USB Multimedia devices
-+#
-+# CONFIG_USB_DABUSB is not set
-+
-+#
-+# Video4Linux support is needed for USB Multimedia device support
-+#
-+
-+#
-+# USB Network Adapters
-+#
-+# CONFIG_USB_CATC is not set
-+# CONFIG_USB_KAWETH is not set
-+# CONFIG_USB_PEGASUS is not set
-+# CONFIG_USB_RTL8150 is not set
-+# CONFIG_USB_USBNET is not set
-+CONFIG_USB_MON=y
-+
-+#
-+# USB port drivers
-+#
-+
-+#
-+# USB Serial Converter support
-+#
-+# CONFIG_USB_SERIAL is not set
-+
-+#
-+# USB Miscellaneous drivers
-+#
-+# CONFIG_USB_EMI62 is not set
-+# CONFIG_USB_EMI26 is not set
-+# CONFIG_USB_AUERSWALD is not set
-+# CONFIG_USB_RIO500 is not set
-+# CONFIG_USB_LEGOTOWER is not set
-+# CONFIG_USB_LCD is not set
-+# CONFIG_USB_LED is not set
-+# CONFIG_USB_CYTHERM is not set
-+# CONFIG_USB_PHIDGETKIT is not set
-+# CONFIG_USB_PHIDGETSERVO is not set
-+# CONFIG_USB_IDMOUSE is not set
-+
-+#
-+# USB ATM/DSL drivers
-+#
-+
-+#
-+# USB Gadget Support
-+#
-+# CONFIG_USB_GADGET is not set
-+
-+#
-+# MMC/SD Card support
-+#
-+# CONFIG_MMC is not set
-+
-+#
-+# InfiniBand support
-+#
-+CONFIG_INFINIBAND=y
-+CONFIG_INFINIBAND_MTHCA=y
-+CONFIG_INFINIBAND_MTHCA_DEBUG=y
-+CONFIG_INFINIBAND_IPOIB=y
-+CONFIG_INFINIBAND_IPOIB_DEBUG=y
-+CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y
-+
-+#
-+# Power management options
-+#
-+
-+#
-+# ACPI (Advanced Configuration and Power Interface) Support
-+#
-+CONFIG_ACPI=y
-+CONFIG_ACPI_BOOT=y
-+CONFIG_ACPI_INTERPRETER=y
-+CONFIG_ACPI_AC=m
-+CONFIG_ACPI_BATTERY=m
-+CONFIG_ACPI_BUTTON=m
-+CONFIG_ACPI_VIDEO=m
-+CONFIG_ACPI_FAN=m
-+CONFIG_ACPI_PROCESSOR=m
-+CONFIG_ACPI_THERMAL=m
-+CONFIG_ACPI_ASUS=m
-+CONFIG_ACPI_IBM=m
-+CONFIG_ACPI_TOSHIBA=m
-+CONFIG_ACPI_BLACKLIST_YEAR=0
-+# CONFIG_ACPI_DEBUG is not set
-+CONFIG_ACPI_BUS=y
-+CONFIG_ACPI_EC=y
-+CONFIG_ACPI_POWER=y
-+CONFIG_ACPI_PCI=y
-+CONFIG_ACPI_SYSTEM=y
-+# CONFIG_ACPI_CONTAINER is not set
-+
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+# CONFIG_EXT2_FS_XATTR is not set
-+CONFIG_EXT3_FS=y
-+CONFIG_EXT3_FS_XATTR=y
-+# CONFIG_EXT3_FS_POSIX_ACL is not set
-+# CONFIG_EXT3_FS_SECURITY is not set
-+CONFIG_JBD=y
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
-+CONFIG_REISERFS_FS=y
-+# CONFIG_REISERFS_CHECK is not set
-+# CONFIG_REISERFS_PROC_INFO is not set
-+# CONFIG_REISERFS_FS_XATTR is not set
-+# CONFIG_JFS_FS is not set
-+
-+#
-+# XFS support
-+#
-+# CONFIG_XFS_FS is not set
-+# CONFIG_MINIX_FS is not set
-+# CONFIG_ROMFS_FS is not set
-+# CONFIG_QUOTA is not set
-+CONFIG_DNOTIFY=y
-+CONFIG_AUTOFS_FS=y
-+CONFIG_AUTOFS4_FS=y
-+
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=y
-+CONFIG_JOLIET=y
-+CONFIG_ZISOFS=y
-+CONFIG_ZISOFS_FS=y
-+# CONFIG_UDF_FS is not set
-+
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=y
-+CONFIG_MSDOS_FS=y
-+CONFIG_VFAT_FS=y
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-+# CONFIG_NTFS_FS is not set
-+
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
-+CONFIG_SYSFS=y
-+# CONFIG_DEVFS_FS is not set
-+# CONFIG_DEVPTS_FS_XATTR is not set
-+CONFIG_TMPFS=y
-+# CONFIG_TMPFS_XATTR is not set
-+# CONFIG_HUGETLBFS is not set
-+# CONFIG_HUGETLB_PAGE is not set
-+CONFIG_RAMFS=y
-+
-+#
-+# Miscellaneous filesystems
-+#
-+# CONFIG_ADFS_FS is not set
-+# CONFIG_AFFS_FS is not set
-+# CONFIG_HFS_FS is not set
-+# CONFIG_HFSPLUS_FS is not set
-+# CONFIG_BEFS_FS is not set
-+# CONFIG_BFS_FS is not set
-+# CONFIG_EFS_FS is not set
-+CONFIG_CRAMFS=y
-+# CONFIG_VXFS_FS is not set
-+# CONFIG_HPFS_FS is not set
-+# CONFIG_QNX4FS_FS is not set
-+# CONFIG_SYSV_FS is not set
-+# CONFIG_UFS_FS is not set
-+
-+#
-+# Network File Systems
-+#
-+CONFIG_NFS_FS=y
-+CONFIG_NFS_V3=y
-+# CONFIG_NFS_V4 is not set
-+# CONFIG_NFS_DIRECTIO is not set
-+CONFIG_NFSD=m
-+CONFIG_NFSD_V3=y
-+# CONFIG_NFSD_V4 is not set
-+CONFIG_NFSD_TCP=y
-+CONFIG_ROOT_NFS=y
-+CONFIG_LOCKD=y
-+CONFIG_LOCKD_V4=y
-+CONFIG_EXPORTFS=m
-+CONFIG_SUNRPC=y
-+# CONFIG_RPCSEC_GSS_KRB5 is not set
-+# CONFIG_RPCSEC_GSS_SPKM3 is not set
-+# CONFIG_SMB_FS is not set
-+# CONFIG_CIFS is not set
-+# CONFIG_NCP_FS is not set
-+# CONFIG_CODA_FS is not set
-+# CONFIG_AFS_FS is not set
-+
-+#
-+# Partition Types
-+#
-+# CONFIG_PARTITION_ADVANCED is not set
-+CONFIG_MSDOS_PARTITION=y
-+
-+#
-+# Native Language Support
-+#
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="iso8859-1"
-+CONFIG_NLS_CODEPAGE_437=y
-+# CONFIG_NLS_CODEPAGE_737 is not set
-+# CONFIG_NLS_CODEPAGE_775 is not set
-+# CONFIG_NLS_CODEPAGE_850 is not set
-+# CONFIG_NLS_CODEPAGE_852 is not set
-+# CONFIG_NLS_CODEPAGE_855 is not set
-+# CONFIG_NLS_CODEPAGE_857 is not set
-+# CONFIG_NLS_CODEPAGE_860 is not set
-+# CONFIG_NLS_CODEPAGE_861 is not set
-+# CONFIG_NLS_CODEPAGE_862 is not set
-+# CONFIG_NLS_CODEPAGE_863 is not set
-+# CONFIG_NLS_CODEPAGE_864 is not set
-+# CONFIG_NLS_CODEPAGE_865 is not set
-+# CONFIG_NLS_CODEPAGE_866 is not set
-+# CONFIG_NLS_CODEPAGE_869 is not set
-+# CONFIG_NLS_CODEPAGE_936 is not set
-+# CONFIG_NLS_CODEPAGE_950 is not set
-+# CONFIG_NLS_CODEPAGE_932 is not set
-+# CONFIG_NLS_CODEPAGE_949 is not set
-+# CONFIG_NLS_CODEPAGE_874 is not set
-+# CONFIG_NLS_ISO8859_8 is not set
-+# CONFIG_NLS_CODEPAGE_1250 is not set
-+# CONFIG_NLS_CODEPAGE_1251 is not set
-+# CONFIG_NLS_ASCII is not set
-+CONFIG_NLS_ISO8859_1=y
-+# CONFIG_NLS_ISO8859_2 is not set
-+# CONFIG_NLS_ISO8859_3 is not set
-+# CONFIG_NLS_ISO8859_4 is not set
-+# CONFIG_NLS_ISO8859_5 is not set
-+# CONFIG_NLS_ISO8859_6 is not set
-+# CONFIG_NLS_ISO8859_7 is not set
-+# CONFIG_NLS_ISO8859_9 is not set
-+# CONFIG_NLS_ISO8859_13 is not set
-+# CONFIG_NLS_ISO8859_14 is not set
-+# CONFIG_NLS_ISO8859_15 is not set
-+# CONFIG_NLS_KOI8_R is not set
-+# CONFIG_NLS_KOI8_U is not set
-+# CONFIG_NLS_UTF8 is not set
-+
-+#
-+# Security options
-+#
-+# CONFIG_KEYS is not set
-+# CONFIG_SECURITY is not set
-+
-+#
-+# Cryptographic options
-+#
-+CONFIG_CRYPTO=y
-+CONFIG_CRYPTO_HMAC=y
-+# CONFIG_CRYPTO_NULL is not set
-+# CONFIG_CRYPTO_MD4 is not set
-+CONFIG_CRYPTO_MD5=m
-+CONFIG_CRYPTO_SHA1=m
-+# CONFIG_CRYPTO_SHA256 is not set
-+# CONFIG_CRYPTO_SHA512 is not set
-+# CONFIG_CRYPTO_WP512 is not set
-+# CONFIG_CRYPTO_TGR192 is not set
-+CONFIG_CRYPTO_DES=m
-+# CONFIG_CRYPTO_BLOWFISH is not set
-+# CONFIG_CRYPTO_TWOFISH is not set
-+# CONFIG_CRYPTO_SERPENT is not set
-+# CONFIG_CRYPTO_AES is not set
-+# CONFIG_CRYPTO_CAST5 is not set
-+# CONFIG_CRYPTO_CAST6 is not set
-+# CONFIG_CRYPTO_TEA is not set
-+# CONFIG_CRYPTO_ARC4 is not set
-+# CONFIG_CRYPTO_KHAZAD is not set
-+# CONFIG_CRYPTO_ANUBIS is not set
-+# CONFIG_CRYPTO_DEFLATE is not set
-+# CONFIG_CRYPTO_MICHAEL_MIC is not set
-+CONFIG_CRYPTO_CRC32C=m
-+# CONFIG_CRYPTO_TEST is not set
-+
-+#
-+# Hardware crypto devices
-+#
-+
-+#
-+# Library routines
-+#
-+# CONFIG_CRC_CCITT is not set
-+CONFIG_CRC32=y
-+CONFIG_LIBCRC32C=m
-+CONFIG_ZLIB_INFLATE=y
-+
-+#
-+# Kernel hacking
-+#
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_DEBUG_KERNEL=y
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_LOG_BUF_SHIFT=15
-+# CONFIG_SCHEDSTATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+# CONFIG_DEBUG_INFO is not set
-+# CONFIG_DEBUG_FS is not set
-+# CONFIG_DEBUG_STACKOVERFLOW is not set
-+# CONFIG_KPROBES is not set
-+# CONFIG_DEBUG_STACK_USAGE is not set
-+# CONFIG_DEBUG_PAGEALLOC is not set
-+# CONFIG_4KSTACKS is not set
-+CONFIG_X86_FIND_SMP_CONFIG=y
-+CONFIG_X86_MPPARSE=y
-+# CONFIG_CHECKING is not set
-+# CONFIG_INIT_DEBUG is not set
-diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xen_defconfig_x86_32 linux-2.6.12-xen/arch/xen/configs/xen_defconfig_x86_32
---- pristine-linux-2.6.12/arch/xen/configs/xen_defconfig_x86_32	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/configs/xen_defconfig_x86_32	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2965 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.12.6-xen
-+# Mon Dec 12 10:42:00 2005
-+#
-+CONFIG_XEN=y
-+CONFIG_ARCH_XEN=y
-+CONFIG_NO_IDLE_HZ=y
-+
-+#
-+# XEN
-+#
-+CONFIG_XEN_PRIVILEGED_GUEST=y
-+CONFIG_XEN_PHYSDEV_ACCESS=y
-+CONFIG_XEN_BLKDEV_BACKEND=y
-+# CONFIG_XEN_BLKDEV_TAP_BE is not set
-+CONFIG_XEN_NETDEV_BACKEND=y
-+# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
-+# CONFIG_XEN_TPMDEV_FRONTEND is not set
-+# CONFIG_XEN_TPMDEV_BACKEND is not set
-+CONFIG_XEN_BLKDEV_FRONTEND=y
-+CONFIG_XEN_NETDEV_FRONTEND=y
-+# CONFIG_XEN_BLKDEV_TAP is not set
-+# CONFIG_XEN_SHADOW_MODE is not set
-+CONFIG_XEN_SCRUB_PAGES=y
-+CONFIG_XEN_X86=y
-+# CONFIG_XEN_X86_64 is not set
-+CONFIG_HAVE_ARCH_ALLOC_SKB=y
-+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-+
-+#
-+# Code maturity level options
-+#
-+CONFIG_EXPERIMENTAL=y
-+# CONFIG_CLEAN_COMPILE is not set
-+CONFIG_BROKEN=y
-+CONFIG_BROKEN_ON_SMP=y
-+CONFIG_LOCK_KERNEL=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+
-+#
-+# General setup
-+#
-+CONFIG_LOCALVERSION=""
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+CONFIG_POSIX_MQUEUE=y
-+CONFIG_BSD_PROCESS_ACCT=y
-+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-+CONFIG_SYSCTL=y
-+# CONFIG_AUDIT is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_KOBJECT_UEVENT=y
-+# CONFIG_IKCONFIG is not set
-+# CONFIG_CPUSETS is not set
-+CONFIG_EMBEDDED=y
-+CONFIG_KALLSYMS=y
-+# CONFIG_KALLSYMS_ALL is not set
-+# CONFIG_KALLSYMS_EXTRA_PASS is not set
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_EPOLL=y
-+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-+CONFIG_SHMEM=y
-+CONFIG_CC_ALIGN_FUNCTIONS=0
-+CONFIG_CC_ALIGN_LABELS=0
-+CONFIG_CC_ALIGN_LOOPS=0
-+CONFIG_CC_ALIGN_JUMPS=0
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+
-+#
-+# Loadable module support
-+#
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+CONFIG_MODULE_FORCE_UNLOAD=y
-+CONFIG_OBSOLETE_MODPARM=y
-+CONFIG_MODVERSIONS=y
-+# CONFIG_MODULE_SRCVERSION_ALL is not set
-+CONFIG_KMOD=y
-+CONFIG_STOP_MACHINE=y
-+
-+#
-+# X86 Processor Configuration
-+#
-+CONFIG_XENARCH="i386"
-+CONFIG_X86=y
-+CONFIG_MMU=y
-+CONFIG_UID16=y
-+CONFIG_GENERIC_ISA_DMA=y
-+CONFIG_GENERIC_IOMAP=y
-+# CONFIG_M386 is not set
-+# CONFIG_M486 is not set
-+# CONFIG_M586 is not set
-+# CONFIG_M586TSC is not set
-+# CONFIG_M586MMX is not set
-+CONFIG_M686=y
-+# CONFIG_MPENTIUMII is not set
-+# CONFIG_MPENTIUMIII is not set
-+# CONFIG_MPENTIUMM is not set
-+# CONFIG_MPENTIUM4 is not set
-+# CONFIG_MK6 is not set
-+# CONFIG_MK7 is not set
-+# CONFIG_MK8 is not set
-+# CONFIG_MCRUSOE is not set
-+# CONFIG_MEFFICEON is not set
-+# CONFIG_MWINCHIPC6 is not set
-+# CONFIG_MWINCHIP2 is not set
-+# CONFIG_MWINCHIP3D is not set
-+# CONFIG_MGEODEGX1 is not set
-+# CONFIG_MCYRIXIII is not set
-+# CONFIG_MVIAC3_2 is not set
-+# CONFIG_X86_GENERIC is not set
-+CONFIG_X86_CMPXCHG=y
-+CONFIG_X86_XADD=y
-+CONFIG_X86_L1_CACHE_SHIFT=5
-+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_X86_PPRO_FENCE=y
-+CONFIG_X86_WP_WORKS_OK=y
-+CONFIG_X86_INVLPG=y
-+CONFIG_X86_BSWAP=y
-+CONFIG_X86_POPAD_OK=y
-+CONFIG_X86_GOOD_APIC=y
-+CONFIG_X86_USE_PPRO_CHECKSUM=y
-+# CONFIG_HPET_TIMER is not set
-+# CONFIG_HPET_EMULATE_RTC is not set
-+CONFIG_SMP=y
-+CONFIG_SMP_ALTERNATIVES=y
-+CONFIG_NR_CPUS=8
-+# CONFIG_SCHED_SMT is not set
-+# CONFIG_X86_REBOOTFIXUPS is not set
-+CONFIG_MICROCODE=y
-+CONFIG_X86_CPUID=m
-+CONFIG_SWIOTLB=y
-+
-+#
-+# Firmware Drivers
-+#
-+CONFIG_EDD=m
-+# CONFIG_NOHIGHMEM is not set
-+CONFIG_HIGHMEM4G=y
-+# CONFIG_HIGHMEM64G is not set
-+CONFIG_HIGHMEM=y
-+CONFIG_MTRR=y
-+CONFIG_HAVE_DEC_LOCK=y
-+# CONFIG_REGPARM is not set
-+CONFIG_X86_LOCAL_APIC=y
-+CONFIG_X86_IO_APIC=y
-+CONFIG_HOTPLUG_CPU=y
-+
-+#
-+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
-+#
-+CONFIG_PCI=y
-+# CONFIG_PCI_GOMMCONFIG is not set
-+# CONFIG_PCI_GODIRECT is not set
-+CONFIG_PCI_GOANY=y
-+CONFIG_PCI_DIRECT=y
-+CONFIG_PCI_MMCONFIG=y
-+# CONFIG_PCIEPORTBUS is not set
-+# CONFIG_PCI_MSI is not set
-+# CONFIG_PCI_LEGACY_PROC is not set
-+CONFIG_PCI_NAMES=y
-+# CONFIG_PCI_DEBUG is not set
-+CONFIG_ISA_DMA_API=y
-+CONFIG_ISA=y
-+# CONFIG_EISA is not set
-+# CONFIG_MCA is not set
-+CONFIG_SCx200=m
-+
-+#
-+# PCCARD (PCMCIA/CardBus) support
-+#
-+CONFIG_PCCARD=m
-+# CONFIG_PCMCIA_DEBUG is not set
-+CONFIG_PCMCIA=m
-+CONFIG_CARDBUS=y
-+
-+#
-+# PC-card bridges
-+#
-+CONFIG_YENTA=m
-+CONFIG_PD6729=m
-+CONFIG_I82092=m
-+CONFIG_I82365=m
-+CONFIG_TCIC=m
-+CONFIG_PCMCIA_PROBE=y
-+CONFIG_PCCARD_NONSTATIC=m
-+
-+#
-+# PCI Hotplug Support
-+#
-+CONFIG_HOTPLUG_PCI=m
-+CONFIG_HOTPLUG_PCI_FAKE=m
-+# CONFIG_HOTPLUG_PCI_ACPI is not set
-+CONFIG_HOTPLUG_PCI_CPCI=y
-+CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
-+CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
-+CONFIG_HOTPLUG_PCI_SHPC=m
-+# CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE is not set
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+CONFIG_X86_SMP=y
-+CONFIG_X86_BIOS_REBOOT=y
-+CONFIG_X86_TRAMPOLINE=y
-+CONFIG_SECCOMP=y
-+# CONFIG_EARLY_PRINTK is not set
-+
-+#
-+# Executable file formats
-+#
-+CONFIG_BINFMT_ELF=y
-+CONFIG_BINFMT_AOUT=m
-+CONFIG_BINFMT_MISC=m
-+
-+#
-+# Device Drivers
-+#
-+
-+#
-+# Generic Driver Options
-+#
-+CONFIG_STANDALONE=y
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+CONFIG_FW_LOADER=m
-+# CONFIG_DEBUG_DRIVER is not set
-+
-+#
-+# Memory Technology Devices (MTD)
-+#
-+CONFIG_MTD=m
-+# CONFIG_MTD_DEBUG is not set
-+CONFIG_MTD_CONCAT=m
-+CONFIG_MTD_PARTITIONS=y
-+CONFIG_MTD_REDBOOT_PARTS=m
-+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-+# CONFIG_MTD_CMDLINE_PARTS is not set
-+
-+#
-+# User Modules And Translation Layers
-+#
-+CONFIG_MTD_CHAR=m
-+CONFIG_MTD_BLOCK=m
-+CONFIG_MTD_BLOCK_RO=m
-+CONFIG_FTL=m
-+CONFIG_NFTL=m
-+CONFIG_NFTL_RW=y
-+CONFIG_INFTL=m
-+
-+#
-+# RAM/ROM/Flash chip drivers
-+#
-+CONFIG_MTD_CFI=m
-+CONFIG_MTD_JEDECPROBE=m
-+CONFIG_MTD_GEN_PROBE=m
-+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-+CONFIG_MTD_MAP_BANK_WIDTH_1=y
-+CONFIG_MTD_MAP_BANK_WIDTH_2=y
-+CONFIG_MTD_MAP_BANK_WIDTH_4=y
-+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-+CONFIG_MTD_CFI_I1=y
-+CONFIG_MTD_CFI_I2=y
-+# CONFIG_MTD_CFI_I4 is not set
-+# CONFIG_MTD_CFI_I8 is not set
-+CONFIG_MTD_CFI_INTELEXT=m
-+CONFIG_MTD_CFI_AMDSTD=m
-+CONFIG_MTD_CFI_AMDSTD_RETRY=0
-+CONFIG_MTD_CFI_STAA=m
-+CONFIG_MTD_CFI_UTIL=m
-+CONFIG_MTD_RAM=m
-+CONFIG_MTD_ROM=m
-+CONFIG_MTD_ABSENT=m
-+# CONFIG_MTD_OBSOLETE_CHIPS is not set
-+
-+#
-+# Mapping drivers for chip access
-+#
-+CONFIG_MTD_COMPLEX_MAPPINGS=y
-+CONFIG_MTD_PHYSMAP=m
-+CONFIG_MTD_PHYSMAP_START=0x8000000
-+CONFIG_MTD_PHYSMAP_LEN=0x4000000
-+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
-+CONFIG_MTD_PNC2000=m
-+CONFIG_MTD_SC520CDP=m
-+CONFIG_MTD_NETSC520=m
-+CONFIG_MTD_TS5500=m
-+CONFIG_MTD_SBC_GXX=m
-+CONFIG_MTD_ELAN_104NC=m
-+CONFIG_MTD_SCx200_DOCFLASH=m
-+# CONFIG_MTD_AMD76XROM is not set
-+# CONFIG_MTD_ICHXROM is not set
-+# CONFIG_MTD_SCB2_FLASH is not set
-+CONFIG_MTD_NETtel=m
-+CONFIG_MTD_DILNETPC=m
-+CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
-+# CONFIG_MTD_L440GX is not set
-+CONFIG_MTD_PCI=m
-+CONFIG_MTD_PCMCIA=m
-+
-+#
-+# Self-contained MTD device drivers
-+#
-+CONFIG_MTD_PMC551=m
-+# CONFIG_MTD_PMC551_BUGFIX is not set
-+# CONFIG_MTD_PMC551_DEBUG is not set
-+CONFIG_MTD_SLRAM=m
-+CONFIG_MTD_PHRAM=m
-+CONFIG_MTD_MTDRAM=m
-+CONFIG_MTDRAM_TOTAL_SIZE=4096
-+CONFIG_MTDRAM_ERASE_SIZE=128
-+CONFIG_MTD_BLKMTD=m
-+# CONFIG_MTD_BLOCK2MTD is not set
-+
-+#
-+# Disk-On-Chip Device Drivers
-+#
-+CONFIG_MTD_DOC2000=m
-+CONFIG_MTD_DOC2001=m
-+CONFIG_MTD_DOC2001PLUS=m
-+CONFIG_MTD_DOCPROBE=m
-+CONFIG_MTD_DOCECC=m
-+# CONFIG_MTD_DOCPROBE_ADVANCED is not set
-+CONFIG_MTD_DOCPROBE_ADDRESS=0
-+
-+#
-+# NAND Flash Device Drivers
-+#
-+CONFIG_MTD_NAND=m
-+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-+CONFIG_MTD_NAND_IDS=m
-+CONFIG_MTD_NAND_DISKONCHIP=m
-+# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
-+CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
-+# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
-+# CONFIG_MTD_NAND_NANDSIM is not set
-+
-+#
-+# Parallel port support
-+#
-+CONFIG_PARPORT=m
-+CONFIG_PARPORT_PC=m
-+CONFIG_PARPORT_SERIAL=m
-+CONFIG_PARPORT_PC_FIFO=y
-+# CONFIG_PARPORT_PC_SUPERIO is not set
-+CONFIG_PARPORT_PC_PCMCIA=m
-+CONFIG_PARPORT_NOT_PC=y
-+# CONFIG_PARPORT_GSC is not set
-+CONFIG_PARPORT_1284=y
-+
-+#
-+# Plug and Play support
-+#
-+CONFIG_PNP=y
-+# CONFIG_PNP_DEBUG is not set
-+
-+#
-+# Protocols
-+#
-+CONFIG_ISAPNP=y
-+# CONFIG_PNPBIOS is not set
-+# CONFIG_PNPACPI is not set
-+
-+#
-+# Block devices
-+#
-+CONFIG_BLK_DEV_FD=m
-+CONFIG_BLK_DEV_XD=m
-+CONFIG_PARIDE=m
-+CONFIG_PARIDE_PARPORT=m
-+
-+#
-+# Parallel IDE high-level drivers
-+#
-+CONFIG_PARIDE_PD=m
-+CONFIG_PARIDE_PCD=m
-+CONFIG_PARIDE_PF=m
-+CONFIG_PARIDE_PT=m
-+CONFIG_PARIDE_PG=m
-+
-+#
-+# Parallel IDE protocol modules
-+#
-+CONFIG_PARIDE_ATEN=m
-+CONFIG_PARIDE_BPCK=m
-+CONFIG_PARIDE_BPCK6=m
-+CONFIG_PARIDE_COMM=m
-+CONFIG_PARIDE_DSTR=m
-+CONFIG_PARIDE_FIT2=m
-+CONFIG_PARIDE_FIT3=m
-+CONFIG_PARIDE_EPAT=m
-+# CONFIG_PARIDE_EPATC8 is not set
-+CONFIG_PARIDE_EPIA=m
-+CONFIG_PARIDE_FRIQ=m
-+CONFIG_PARIDE_FRPW=m
-+CONFIG_PARIDE_KBIC=m
-+CONFIG_PARIDE_KTTI=m
-+CONFIG_PARIDE_ON20=m
-+CONFIG_PARIDE_ON26=m
-+CONFIG_BLK_CPQ_DA=m
-+CONFIG_BLK_CPQ_CISS_DA=m
-+CONFIG_CISS_SCSI_TAPE=y
-+CONFIG_BLK_DEV_DAC960=m
-+CONFIG_BLK_DEV_UMEM=m
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=m
-+CONFIG_BLK_DEV_CRYPTOLOOP=m
-+CONFIG_BLK_DEV_NBD=m
-+CONFIG_BLK_DEV_SX8=m
-+# CONFIG_BLK_DEV_UB is not set
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=16384
-+CONFIG_BLK_DEV_INITRD=y
-+CONFIG_INITRAMFS_SOURCE=""
-+CONFIG_LBD=y
-+CONFIG_CDROM_PKTCDVD=m
-+CONFIG_CDROM_PKTCDVD_BUFFERS=8
-+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
-+
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+CONFIG_ATA_OVER_ETH=m
-+
-+#
-+# ATA/ATAPI/MFM/RLL support
-+#
-+CONFIG_IDE=y
-+CONFIG_BLK_DEV_IDE=y
-+
-+#
-+# Please see Documentation/ide.txt for help/info on IDE drives
-+#
-+# CONFIG_BLK_DEV_IDE_SATA is not set
-+# CONFIG_BLK_DEV_HD_IDE is not set
-+CONFIG_BLK_DEV_IDEDISK=y
-+CONFIG_IDEDISK_MULTI_MODE=y
-+CONFIG_BLK_DEV_IDECS=m
-+CONFIG_BLK_DEV_IDECD=y
-+CONFIG_BLK_DEV_IDETAPE=m
-+CONFIG_BLK_DEV_IDEFLOPPY=y
-+CONFIG_BLK_DEV_IDESCSI=m
-+# CONFIG_IDE_TASK_IOCTL is not set
-+
-+#
-+# IDE chipset support/bugfixes
-+#
-+CONFIG_IDE_GENERIC=y
-+CONFIG_BLK_DEV_CMD640=y
-+CONFIG_BLK_DEV_CMD640_ENHANCED=y
-+CONFIG_BLK_DEV_IDEPNP=y
-+CONFIG_BLK_DEV_IDEPCI=y
-+CONFIG_IDEPCI_SHARE_IRQ=y
-+# CONFIG_BLK_DEV_OFFBOARD is not set
-+CONFIG_BLK_DEV_GENERIC=y
-+CONFIG_BLK_DEV_OPTI621=m
-+CONFIG_BLK_DEV_RZ1000=y
-+CONFIG_BLK_DEV_IDEDMA_PCI=y
-+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-+CONFIG_IDEDMA_PCI_AUTO=y
-+# CONFIG_IDEDMA_ONLYDISK is not set
-+CONFIG_BLK_DEV_AEC62XX=y
-+CONFIG_BLK_DEV_ALI15X3=y
-+# CONFIG_WDC_ALI15X3 is not set
-+CONFIG_BLK_DEV_AMD74XX=y
-+CONFIG_BLK_DEV_ATIIXP=y
-+CONFIG_BLK_DEV_CMD64X=y
-+CONFIG_BLK_DEV_TRIFLEX=y
-+CONFIG_BLK_DEV_CY82C693=y
-+CONFIG_BLK_DEV_CS5520=y
-+CONFIG_BLK_DEV_CS5530=y
-+CONFIG_BLK_DEV_HPT34X=y
-+# CONFIG_HPT34X_AUTODMA is not set
-+CONFIG_BLK_DEV_HPT366=y
-+CONFIG_BLK_DEV_SC1200=m
-+CONFIG_BLK_DEV_PIIX=y
-+CONFIG_BLK_DEV_NS87415=m
-+CONFIG_BLK_DEV_PDC202XX_OLD=y
-+CONFIG_PDC202XX_BURST=y
-+CONFIG_BLK_DEV_PDC202XX_NEW=y
-+CONFIG_PDC202XX_FORCE=y
-+CONFIG_BLK_DEV_SVWKS=y
-+CONFIG_BLK_DEV_SIIMAGE=y
-+CONFIG_BLK_DEV_SIS5513=y
-+CONFIG_BLK_DEV_SLC90E66=y
-+CONFIG_BLK_DEV_TRM290=m
-+CONFIG_BLK_DEV_VIA82CXXX=y
-+# CONFIG_IDE_ARM is not set
-+# CONFIG_IDE_CHIPSETS is not set
-+CONFIG_BLK_DEV_IDEDMA=y
-+# CONFIG_IDEDMA_IVB is not set
-+CONFIG_IDEDMA_AUTO=y
-+# CONFIG_BLK_DEV_HD is not set
-+
-+#
-+# SCSI device support
-+#
-+CONFIG_SCSI=m
-+CONFIG_SCSI_PROC_FS=y
-+
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=m
-+CONFIG_CHR_DEV_ST=m
-+CONFIG_CHR_DEV_OSST=m
-+CONFIG_BLK_DEV_SR=m
-+# CONFIG_BLK_DEV_SR_VENDOR is not set
-+CONFIG_CHR_DEV_SG=m
-+
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+CONFIG_SCSI_MULTI_LUN=y
-+CONFIG_SCSI_CONSTANTS=y
-+CONFIG_SCSI_LOGGING=y
-+
-+#
-+# SCSI Transport Attributes
-+#
-+CONFIG_SCSI_SPI_ATTRS=m
-+CONFIG_SCSI_FC_ATTRS=m
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+
-+#
-+# SCSI low-level drivers
-+#
-+CONFIG_BLK_DEV_3W_XXXX_RAID=m
-+CONFIG_SCSI_3W_9XXX=m
-+# CONFIG_SCSI_7000FASST is not set
-+CONFIG_SCSI_ACARD=m
-+CONFIG_SCSI_AHA152X=m
-+# CONFIG_SCSI_AHA1542 is not set
-+CONFIG_SCSI_AACRAID=m
-+CONFIG_SCSI_AIC7XXX=m
-+CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
-+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-+CONFIG_AIC7XXX_DEBUG_ENABLE=y
-+CONFIG_AIC7XXX_DEBUG_MASK=0
-+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
-+CONFIG_SCSI_AIC7XXX_OLD=m
-+CONFIG_SCSI_AIC79XX=m
-+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-+CONFIG_AIC79XX_RESET_DELAY_MS=15000
-+CONFIG_AIC79XX_ENABLE_RD_STRM=y
-+CONFIG_AIC79XX_DEBUG_ENABLE=y
-+CONFIG_AIC79XX_DEBUG_MASK=0
-+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
-+CONFIG_SCSI_DPT_I2O=m
-+CONFIG_SCSI_ADVANSYS=m
-+CONFIG_SCSI_IN2000=m
-+CONFIG_MEGARAID_NEWGEN=y
-+CONFIG_MEGARAID_MM=m
-+CONFIG_MEGARAID_MAILBOX=m
-+CONFIG_SCSI_SATA=y
-+CONFIG_SCSI_SATA_AHCI=m
-+CONFIG_SCSI_SATA_SVW=m
-+CONFIG_SCSI_ATA_PIIX=m
-+CONFIG_SCSI_SATA_NV=m
-+CONFIG_SCSI_SATA_PROMISE=m
-+# CONFIG_SCSI_SATA_QSTOR is not set
-+CONFIG_SCSI_SATA_SX4=m
-+CONFIG_SCSI_SATA_SIL=m
-+CONFIG_SCSI_SATA_SIS=m
-+CONFIG_SCSI_SATA_ULI=m
-+CONFIG_SCSI_SATA_VIA=m
-+CONFIG_SCSI_SATA_VITESSE=m
-+CONFIG_SCSI_BUSLOGIC=m
-+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-+# CONFIG_SCSI_CPQFCTS is not set
-+CONFIG_SCSI_DMX3191D=m
-+CONFIG_SCSI_DTC3280=m
-+CONFIG_SCSI_EATA=m
-+CONFIG_SCSI_EATA_TAGGED_QUEUE=y
-+CONFIG_SCSI_EATA_LINKED_COMMANDS=y
-+CONFIG_SCSI_EATA_MAX_TAGS=16
-+CONFIG_SCSI_EATA_PIO=m
-+CONFIG_SCSI_FUTURE_DOMAIN=m
-+CONFIG_SCSI_GDTH=m
-+CONFIG_SCSI_GENERIC_NCR5380=m
-+CONFIG_SCSI_GENERIC_NCR5380_MMIO=m
-+CONFIG_SCSI_GENERIC_NCR53C400=y
-+CONFIG_SCSI_IPS=m
-+# CONFIG_SCSI_INITIO is not set
-+# CONFIG_SCSI_INIA100 is not set
-+CONFIG_SCSI_PPA=m
-+CONFIG_SCSI_IMM=m
-+# CONFIG_SCSI_IZIP_EPP16 is not set
-+# CONFIG_SCSI_IZIP_SLOW_CTR is not set
-+CONFIG_SCSI_NCR53C406A=m
-+CONFIG_SCSI_SYM53C8XX_2=m
-+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-+CONFIG_SCSI_IPR=m
-+# CONFIG_SCSI_IPR_TRACE is not set
-+# CONFIG_SCSI_IPR_DUMP is not set
-+CONFIG_SCSI_PAS16=m
-+# CONFIG_SCSI_PCI2000 is not set
-+# CONFIG_SCSI_PCI2220I is not set
-+CONFIG_SCSI_PSI240I=m
-+CONFIG_SCSI_QLOGIC_FAS=m
-+CONFIG_SCSI_QLOGIC_ISP=m
-+CONFIG_SCSI_QLOGIC_FC=m
-+CONFIG_SCSI_QLOGIC_FC_FIRMWARE=y
-+CONFIG_SCSI_QLOGIC_1280=m
-+CONFIG_SCSI_QLOGIC_1280_1040=y
-+CONFIG_SCSI_QLA2XXX=m
-+CONFIG_SCSI_QLA21XX=m
-+CONFIG_SCSI_QLA22XX=m
-+CONFIG_SCSI_QLA2300=m
-+CONFIG_SCSI_QLA2322=m
-+CONFIG_SCSI_QLA6312=m
-+CONFIG_SCSI_LPFC=m
-+# CONFIG_SCSI_SEAGATE is not set
-+CONFIG_SCSI_SYM53C416=m
-+CONFIG_SCSI_DC395x=m
-+CONFIG_SCSI_DC390T=m
-+CONFIG_SCSI_T128=m
-+CONFIG_SCSI_U14_34F=m
-+CONFIG_SCSI_U14_34F_TAGGED_QUEUE=y
-+CONFIG_SCSI_U14_34F_LINKED_COMMANDS=y
-+CONFIG_SCSI_U14_34F_MAX_TAGS=8
-+# CONFIG_SCSI_ULTRASTOR is not set
-+CONFIG_SCSI_NSP32=m
-+CONFIG_SCSI_DEBUG=m
-+
-+#
-+# PCMCIA SCSI adapter support
-+#
-+CONFIG_PCMCIA_AHA152X=m
-+CONFIG_PCMCIA_FDOMAIN=m
-+CONFIG_PCMCIA_NINJA_SCSI=m
-+CONFIG_PCMCIA_QLOGIC=m
-+CONFIG_PCMCIA_SYM53C500=m
-+
-+#
-+# Old CD-ROM drivers (not SCSI, not IDE)
-+#
-+CONFIG_CD_NO_IDESCSI=y
-+CONFIG_AZTCD=m
-+CONFIG_GSCD=m
-+# CONFIG_SBPCD is not set
-+CONFIG_MCDX=m
-+CONFIG_OPTCD=m
-+# CONFIG_CM206 is not set
-+CONFIG_SJCD=m
-+CONFIG_ISP16_CDI=m
-+CONFIG_CDU31A=m
-+CONFIG_CDU535=m
-+
-+#
-+# Multi-device support (RAID and LVM)
-+#
-+CONFIG_MD=y
-+CONFIG_BLK_DEV_MD=m
-+CONFIG_MD_LINEAR=m
-+CONFIG_MD_RAID0=m
-+CONFIG_MD_RAID1=m
-+CONFIG_MD_RAID10=m
-+CONFIG_MD_RAID5=m
-+CONFIG_MD_RAID6=m
-+CONFIG_MD_MULTIPATH=m
-+CONFIG_MD_FAULTY=m
-+CONFIG_BLK_DEV_DM=m
-+CONFIG_DM_CRYPT=m
-+CONFIG_DM_SNAPSHOT=m
-+CONFIG_DM_MIRROR=m
-+CONFIG_DM_ZERO=m
-+CONFIG_DM_MULTIPATH=m
-+CONFIG_DM_MULTIPATH_EMC=m
-+
-+#
-+# Fusion MPT device support
-+#
-+CONFIG_FUSION=m
-+CONFIG_FUSION_MAX_SGE=40
-+CONFIG_FUSION_CTL=m
-+CONFIG_FUSION_LAN=m
-+
-+#
-+# IEEE 1394 (FireWire) support
-+#
-+CONFIG_IEEE1394=m
-+
-+#
-+# Subsystem Options
-+#
-+# CONFIG_IEEE1394_VERBOSEDEBUG is not set
-+# CONFIG_IEEE1394_OUI_DB is not set
-+CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
-+CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
-+
-+#
-+# Device Drivers
-+#
-+CONFIG_IEEE1394_PCILYNX=m
-+CONFIG_IEEE1394_OHCI1394=m
-+
-+#
-+# Protocol Drivers
-+#
-+CONFIG_IEEE1394_VIDEO1394=m
-+CONFIG_IEEE1394_SBP2=m
-+# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-+CONFIG_IEEE1394_ETH1394=m
-+CONFIG_IEEE1394_DV1394=m
-+CONFIG_IEEE1394_RAWIO=m
-+CONFIG_IEEE1394_CMP=m
-+CONFIG_IEEE1394_AMDTP=m
-+
-+#
-+# I2O device support
-+#
-+CONFIG_I2O=m
-+CONFIG_I2O_CONFIG=m
-+CONFIG_I2O_BLOCK=m
-+CONFIG_I2O_SCSI=m
-+CONFIG_I2O_PROC=m
-+
-+#
-+# Networking support
-+#
-+CONFIG_NET=y
-+
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=m
-+CONFIG_PACKET_MMAP=y
-+CONFIG_UNIX=m
-+CONFIG_NET_KEY=m
-+CONFIG_INET=y
-+CONFIG_IP_MULTICAST=y
-+CONFIG_IP_ADVANCED_ROUTER=y
-+CONFIG_IP_MULTIPLE_TABLES=y
-+CONFIG_IP_ROUTE_FWMARK=y
-+CONFIG_IP_ROUTE_MULTIPATH=y
-+# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
-+CONFIG_IP_ROUTE_VERBOSE=y
-+# CONFIG_IP_PNP is not set
-+CONFIG_NET_IPIP=m
-+CONFIG_NET_IPGRE=m
-+CONFIG_NET_IPGRE_BROADCAST=y
-+CONFIG_IP_MROUTE=y
-+CONFIG_IP_PIMSM_V1=y
-+CONFIG_IP_PIMSM_V2=y
-+# CONFIG_ARPD is not set
-+CONFIG_SYN_COOKIES=y
-+CONFIG_INET_AH=m
-+CONFIG_INET_ESP=m
-+CONFIG_INET_IPCOMP=m
-+CONFIG_INET_TUNNEL=m
-+CONFIG_IP_TCPDIAG=m
-+CONFIG_IP_TCPDIAG_IPV6=y
-+
-+#
-+# IP: Virtual Server Configuration
-+#
-+CONFIG_IP_VS=m
-+# CONFIG_IP_VS_DEBUG is not set
-+CONFIG_IP_VS_TAB_BITS=12
-+
-+#
-+# IPVS transport protocol load balancing support
-+#
-+CONFIG_IP_VS_PROTO_TCP=y
-+CONFIG_IP_VS_PROTO_UDP=y
-+CONFIG_IP_VS_PROTO_ESP=y
-+CONFIG_IP_VS_PROTO_AH=y
-+
-+#
-+# IPVS scheduler
-+#
-+CONFIG_IP_VS_RR=m
-+CONFIG_IP_VS_WRR=m
-+CONFIG_IP_VS_LC=m
-+CONFIG_IP_VS_WLC=m
-+CONFIG_IP_VS_LBLC=m
-+CONFIG_IP_VS_LBLCR=m
-+CONFIG_IP_VS_DH=m
-+CONFIG_IP_VS_SH=m
-+CONFIG_IP_VS_SED=m
-+CONFIG_IP_VS_NQ=m
-+
-+#
-+# IPVS application helper
-+#
-+CONFIG_IP_VS_FTP=m
-+CONFIG_IPV6=m
-+CONFIG_IPV6_PRIVACY=y
-+CONFIG_INET6_AH=m
-+CONFIG_INET6_ESP=m
-+CONFIG_INET6_IPCOMP=m
-+CONFIG_INET6_TUNNEL=m
-+CONFIG_IPV6_TUNNEL=m
-+CONFIG_NETFILTER=y
-+# CONFIG_NETFILTER_DEBUG is not set
-+CONFIG_BRIDGE_NETFILTER=y
-+
-+#
-+# IP: Netfilter Configuration
-+#
-+CONFIG_IP_NF_CONNTRACK=m
-+CONFIG_IP_NF_CT_ACCT=y
-+CONFIG_IP_NF_CONNTRACK_MARK=y
-+CONFIG_IP_NF_CT_PROTO_SCTP=m
-+CONFIG_IP_NF_FTP=m
-+CONFIG_IP_NF_IRC=m
-+CONFIG_IP_NF_TFTP=m
-+CONFIG_IP_NF_AMANDA=m
-+CONFIG_IP_NF_QUEUE=m
-+CONFIG_IP_NF_IPTABLES=m
-+CONFIG_IP_NF_MATCH_LIMIT=m
-+CONFIG_IP_NF_MATCH_IPRANGE=m
-+CONFIG_IP_NF_MATCH_MAC=m
-+CONFIG_IP_NF_MATCH_PKTTYPE=m
-+CONFIG_IP_NF_MATCH_MARK=m
-+CONFIG_IP_NF_MATCH_MULTIPORT=m
-+CONFIG_IP_NF_MATCH_TOS=m
-+CONFIG_IP_NF_MATCH_RECENT=m
-+CONFIG_IP_NF_MATCH_ECN=m
-+CONFIG_IP_NF_MATCH_DSCP=m
-+CONFIG_IP_NF_MATCH_AH_ESP=m
-+CONFIG_IP_NF_MATCH_LENGTH=m
-+CONFIG_IP_NF_MATCH_TTL=m
-+CONFIG_IP_NF_MATCH_TCPMSS=m
-+CONFIG_IP_NF_MATCH_HELPER=m
-+CONFIG_IP_NF_MATCH_STATE=m
-+CONFIG_IP_NF_MATCH_CONNTRACK=m
-+CONFIG_IP_NF_MATCH_OWNER=m
-+CONFIG_IP_NF_MATCH_PHYSDEV=m
-+CONFIG_IP_NF_MATCH_ADDRTYPE=m
-+CONFIG_IP_NF_MATCH_REALM=m
-+CONFIG_IP_NF_MATCH_SCTP=m
-+CONFIG_IP_NF_MATCH_COMMENT=m
-+CONFIG_IP_NF_MATCH_CONNMARK=m
-+CONFIG_IP_NF_MATCH_HASHLIMIT=m
-+CONFIG_IP_NF_FILTER=m
-+CONFIG_IP_NF_TARGET_REJECT=m
-+CONFIG_IP_NF_TARGET_LOG=m
-+CONFIG_IP_NF_TARGET_ULOG=m
-+CONFIG_IP_NF_TARGET_TCPMSS=m
-+CONFIG_IP_NF_NAT=m
-+CONFIG_IP_NF_NAT_NEEDED=y
-+CONFIG_IP_NF_TARGET_MASQUERADE=m
-+CONFIG_IP_NF_TARGET_REDIRECT=m
-+CONFIG_IP_NF_TARGET_NETMAP=m
-+CONFIG_IP_NF_TARGET_SAME=m
-+CONFIG_IP_NF_NAT_SNMP_BASIC=m
-+CONFIG_IP_NF_NAT_IRC=m
-+CONFIG_IP_NF_NAT_FTP=m
-+CONFIG_IP_NF_NAT_TFTP=m
-+CONFIG_IP_NF_NAT_AMANDA=m
-+CONFIG_IP_NF_MANGLE=m
-+CONFIG_IP_NF_TARGET_TOS=m
-+CONFIG_IP_NF_TARGET_ECN=m
-+CONFIG_IP_NF_TARGET_DSCP=m
-+CONFIG_IP_NF_TARGET_MARK=m
-+CONFIG_IP_NF_TARGET_CLASSIFY=m
-+CONFIG_IP_NF_TARGET_CONNMARK=m
-+CONFIG_IP_NF_TARGET_CLUSTERIP=m
-+CONFIG_IP_NF_RAW=m
-+CONFIG_IP_NF_TARGET_NOTRACK=m
-+CONFIG_IP_NF_ARPTABLES=m
-+CONFIG_IP_NF_ARPFILTER=m
-+CONFIG_IP_NF_ARP_MANGLE=m
-+
-+#
-+# IPv6: Netfilter Configuration (EXPERIMENTAL)
-+#
-+CONFIG_IP6_NF_QUEUE=m
-+CONFIG_IP6_NF_IPTABLES=m
-+CONFIG_IP6_NF_MATCH_LIMIT=m
-+CONFIG_IP6_NF_MATCH_MAC=m
-+CONFIG_IP6_NF_MATCH_RT=m
-+CONFIG_IP6_NF_MATCH_OPTS=m
-+CONFIG_IP6_NF_MATCH_FRAG=m
-+CONFIG_IP6_NF_MATCH_HL=m
-+CONFIG_IP6_NF_MATCH_MULTIPORT=m
-+CONFIG_IP6_NF_MATCH_OWNER=m
-+CONFIG_IP6_NF_MATCH_MARK=m
-+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-+CONFIG_IP6_NF_MATCH_AHESP=m
-+CONFIG_IP6_NF_MATCH_LENGTH=m
-+CONFIG_IP6_NF_MATCH_EUI64=m
-+CONFIG_IP6_NF_MATCH_PHYSDEV=m
-+CONFIG_IP6_NF_FILTER=m
-+CONFIG_IP6_NF_TARGET_LOG=m
-+CONFIG_IP6_NF_MANGLE=m
-+CONFIG_IP6_NF_TARGET_MARK=m
-+CONFIG_IP6_NF_RAW=m
-+
-+#
-+# DECnet: Netfilter Configuration
-+#
-+CONFIG_DECNET_NF_GRABULATOR=m
-+
-+#
-+# Bridge: Netfilter Configuration
-+#
-+CONFIG_BRIDGE_NF_EBTABLES=m
-+CONFIG_BRIDGE_EBT_BROUTE=m
-+CONFIG_BRIDGE_EBT_T_FILTER=m
-+CONFIG_BRIDGE_EBT_T_NAT=m
-+CONFIG_BRIDGE_EBT_802_3=m
-+CONFIG_BRIDGE_EBT_AMONG=m
-+CONFIG_BRIDGE_EBT_ARP=m
-+CONFIG_BRIDGE_EBT_IP=m
-+CONFIG_BRIDGE_EBT_LIMIT=m
-+CONFIG_BRIDGE_EBT_MARK=m
-+CONFIG_BRIDGE_EBT_PKTTYPE=m
-+CONFIG_BRIDGE_EBT_STP=m
-+CONFIG_BRIDGE_EBT_VLAN=m
-+CONFIG_BRIDGE_EBT_ARPREPLY=m
-+CONFIG_BRIDGE_EBT_DNAT=m
-+CONFIG_BRIDGE_EBT_MARK_T=m
-+CONFIG_BRIDGE_EBT_REDIRECT=m
-+CONFIG_BRIDGE_EBT_SNAT=m
-+CONFIG_BRIDGE_EBT_LOG=m
-+# CONFIG_BRIDGE_EBT_ULOG is not set
-+CONFIG_XFRM=y
-+CONFIG_XFRM_USER=m
-+
-+#
-+# SCTP Configuration (EXPERIMENTAL)
-+#
-+CONFIG_IP_SCTP=m
-+# CONFIG_SCTP_DBG_MSG is not set
-+# CONFIG_SCTP_DBG_OBJCNT is not set
-+# CONFIG_SCTP_HMAC_NONE is not set
-+# CONFIG_SCTP_HMAC_SHA1 is not set
-+CONFIG_SCTP_HMAC_MD5=y
-+CONFIG_ATM=y
-+CONFIG_ATM_CLIP=y
-+# CONFIG_ATM_CLIP_NO_ICMP is not set
-+CONFIG_ATM_LANE=m
-+CONFIG_ATM_MPOA=m
-+CONFIG_ATM_BR2684=m
-+# CONFIG_ATM_BR2684_IPFILTER is not set
-+CONFIG_BRIDGE=m
-+CONFIG_VLAN_8021Q=m
-+CONFIG_DECNET=m
-+# CONFIG_DECNET_ROUTER is not set
-+CONFIG_LLC=y
-+CONFIG_LLC2=m
-+CONFIG_IPX=m
-+# CONFIG_IPX_INTERN is not set
-+CONFIG_ATALK=m
-+CONFIG_DEV_APPLETALK=y
-+CONFIG_LTPC=m
-+CONFIG_COPS=m
-+CONFIG_COPS_DAYNA=y
-+CONFIG_COPS_TANGENT=y
-+CONFIG_IPDDP=m
-+CONFIG_IPDDP_ENCAP=y
-+CONFIG_IPDDP_DECAP=y
-+CONFIG_X25=m
-+CONFIG_LAPB=m
-+# CONFIG_NET_DIVERT is not set
-+CONFIG_ECONET=m
-+CONFIG_ECONET_AUNUDP=y
-+CONFIG_ECONET_NATIVE=y
-+CONFIG_WAN_ROUTER=m
-+
-+#
-+# QoS and/or fair queueing
-+#
-+CONFIG_NET_SCHED=y
-+CONFIG_NET_SCH_CLK_JIFFIES=y
-+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-+# CONFIG_NET_SCH_CLK_CPU is not set
-+CONFIG_NET_SCH_CBQ=m
-+CONFIG_NET_SCH_HTB=m
-+CONFIG_NET_SCH_HFSC=m
-+CONFIG_NET_SCH_ATM=m
-+CONFIG_NET_SCH_PRIO=m
-+CONFIG_NET_SCH_RED=m
-+CONFIG_NET_SCH_SFQ=m
-+CONFIG_NET_SCH_TEQL=m
-+CONFIG_NET_SCH_TBF=m
-+CONFIG_NET_SCH_GRED=m
-+CONFIG_NET_SCH_DSMARK=m
-+CONFIG_NET_SCH_NETEM=m
-+CONFIG_NET_SCH_INGRESS=m
-+CONFIG_NET_QOS=y
-+CONFIG_NET_ESTIMATOR=y
-+CONFIG_NET_CLS=y
-+CONFIG_NET_CLS_BASIC=m
-+CONFIG_NET_CLS_TCINDEX=m
-+CONFIG_NET_CLS_ROUTE4=m
-+CONFIG_NET_CLS_ROUTE=y
-+CONFIG_NET_CLS_FW=m
-+CONFIG_NET_CLS_U32=m
-+# CONFIG_CLS_U32_PERF is not set
-+# CONFIG_NET_CLS_IND is not set
-+# CONFIG_CLS_U32_MARK is not set
-+CONFIG_NET_CLS_RSVP=m
-+CONFIG_NET_CLS_RSVP6=m
-+CONFIG_NET_EMATCH=y
-+CONFIG_NET_EMATCH_STACK=32
-+CONFIG_NET_EMATCH_CMP=m
-+CONFIG_NET_EMATCH_NBYTE=m
-+CONFIG_NET_EMATCH_U32=m
-+CONFIG_NET_EMATCH_META=m
-+# CONFIG_NET_CLS_ACT is not set
-+CONFIG_NET_CLS_POLICE=y
-+
-+#
-+# Network testing
-+#
-+CONFIG_NET_PKTGEN=m
-+CONFIG_NETPOLL=y
-+# CONFIG_NETPOLL_RX is not set
-+# CONFIG_NETPOLL_TRAP is not set
-+CONFIG_NET_POLL_CONTROLLER=y
-+CONFIG_HAMRADIO=y
-+
-+#
-+# Packet Radio protocols
-+#
-+CONFIG_AX25=m
-+# CONFIG_AX25_DAMA_SLAVE is not set
-+CONFIG_NETROM=m
-+CONFIG_ROSE=m
-+
-+#
-+# AX.25 network device drivers
-+#
-+CONFIG_MKISS=m
-+CONFIG_6PACK=m
-+CONFIG_BPQETHER=m
-+# CONFIG_DMASCC is not set
-+CONFIG_SCC=m
-+# CONFIG_SCC_DELAY is not set
-+# CONFIG_SCC_TRXECHO is not set
-+CONFIG_BAYCOM_SER_FDX=m
-+CONFIG_BAYCOM_SER_HDX=m
-+CONFIG_BAYCOM_PAR=m
-+CONFIG_BAYCOM_EPP=m
-+CONFIG_YAM=m
-+CONFIG_IRDA=m
-+
-+#
-+# IrDA protocols
-+#
-+CONFIG_IRLAN=m
-+CONFIG_IRNET=m
-+CONFIG_IRCOMM=m
-+# CONFIG_IRDA_ULTRA is not set
-+
-+#
-+# IrDA options
-+#
-+CONFIG_IRDA_CACHE_LAST_LSAP=y
-+CONFIG_IRDA_FAST_RR=y
-+CONFIG_IRDA_DEBUG=y
-+
-+#
-+# Infrared-port device drivers
-+#
-+
-+#
-+# SIR device drivers
-+#
-+CONFIG_IRTTY_SIR=m
-+
-+#
-+# Dongle support
-+#
-+CONFIG_DONGLE=y
-+CONFIG_ESI_DONGLE=m
-+CONFIG_ACTISYS_DONGLE=m
-+CONFIG_TEKRAM_DONGLE=m
-+CONFIG_LITELINK_DONGLE=m
-+CONFIG_MA600_DONGLE=m
-+CONFIG_GIRBIL_DONGLE=m
-+CONFIG_MCP2120_DONGLE=m
-+CONFIG_OLD_BELKIN_DONGLE=m
-+CONFIG_ACT200L_DONGLE=m
-+
-+#
-+# Old SIR device drivers
-+#
-+CONFIG_IRPORT_SIR=m
-+
-+#
-+# Old Serial dongle support
-+#
-+# CONFIG_DONGLE_OLD is not set
-+
-+#
-+# FIR device drivers
-+#
-+CONFIG_USB_IRDA=m
-+CONFIG_SIGMATEL_FIR=m
-+CONFIG_NSC_FIR=m
-+CONFIG_WINBOND_FIR=m
-+# CONFIG_TOSHIBA_FIR is not set
-+CONFIG_SMC_IRCC_FIR=m
-+CONFIG_ALI_FIR=m
-+CONFIG_VLSI_FIR=m
-+CONFIG_VIA_FIR=m
-+CONFIG_BT=m
-+CONFIG_BT_L2CAP=m
-+CONFIG_BT_SCO=m
-+CONFIG_BT_RFCOMM=m
-+CONFIG_BT_RFCOMM_TTY=y
-+CONFIG_BT_BNEP=m
-+CONFIG_BT_BNEP_MC_FILTER=y
-+CONFIG_BT_BNEP_PROTO_FILTER=y
-+CONFIG_BT_CMTP=m
-+CONFIG_BT_HIDP=m
-+
-+#
-+# Bluetooth device drivers
-+#
-+CONFIG_BT_HCIUSB=m
-+CONFIG_BT_HCIUSB_SCO=y
-+CONFIG_BT_HCIUART=m
-+CONFIG_BT_HCIUART_H4=y
-+CONFIG_BT_HCIUART_BCSP=y
-+# CONFIG_BT_HCIUART_BCSP_TXCRC is not set
-+CONFIG_BT_HCIBCM203X=m
-+# CONFIG_BT_HCIBPA10X is not set
-+CONFIG_BT_HCIBFUSB=m
-+CONFIG_BT_HCIDTL1=m
-+CONFIG_BT_HCIBT3C=m
-+CONFIG_BT_HCIBLUECARD=m
-+CONFIG_BT_HCIBTUART=m
-+CONFIG_BT_HCIVHCI=m
-+CONFIG_NETDEVICES=y
-+CONFIG_DUMMY=m
-+CONFIG_BONDING=m
-+CONFIG_EQUALIZER=m
-+CONFIG_TUN=m
-+CONFIG_NET_SB1000=m
-+
-+#
-+# ARCnet devices
-+#
-+CONFIG_ARCNET=m
-+CONFIG_ARCNET_1201=m
-+CONFIG_ARCNET_1051=m
-+CONFIG_ARCNET_RAW=m
-+# CONFIG_ARCNET_CAP is not set
-+CONFIG_ARCNET_COM90xx=m
-+CONFIG_ARCNET_COM90xxIO=m
-+CONFIG_ARCNET_RIM_I=m
-+CONFIG_ARCNET_COM20020=m
-+CONFIG_ARCNET_COM20020_ISA=m
-+CONFIG_ARCNET_COM20020_PCI=m
-+
-+#
-+# Ethernet (10 or 100Mbit)
-+#
-+CONFIG_NET_ETHERNET=y
-+CONFIG_MII=m
-+CONFIG_HAPPYMEAL=m
-+CONFIG_SUNGEM=m
-+CONFIG_NET_VENDOR_3COM=y
-+CONFIG_EL1=m
-+CONFIG_EL2=m
-+# CONFIG_ELPLUS is not set
-+CONFIG_EL16=m
-+CONFIG_EL3=m
-+# CONFIG_3C515 is not set
-+CONFIG_VORTEX=m
-+CONFIG_TYPHOON=m
-+# CONFIG_LANCE is not set
-+CONFIG_NET_VENDOR_SMC=y
-+CONFIG_WD80x3=m
-+CONFIG_ULTRA=m
-+CONFIG_SMC9194=m
-+CONFIG_NET_VENDOR_RACAL=y
-+CONFIG_NI5010=m
-+CONFIG_NI52=m
-+# CONFIG_NI65 is not set
-+
-+#
-+# Tulip family network device support
-+#
-+CONFIG_NET_TULIP=y
-+CONFIG_DE2104X=m
-+CONFIG_TULIP=m
-+# CONFIG_TULIP_MWI is not set
-+# CONFIG_TULIP_MMIO is not set
-+# CONFIG_TULIP_NAPI is not set
-+CONFIG_DE4X5=m
-+CONFIG_WINBOND_840=m
-+CONFIG_DM9102=m
-+CONFIG_PCMCIA_XIRCOM=m
-+# CONFIG_PCMCIA_XIRTULIP is not set
-+CONFIG_AT1700=m
-+CONFIG_DEPCA=m
-+CONFIG_HP100=m
-+CONFIG_NET_ISA=y
-+CONFIG_E2100=m
-+CONFIG_EWRK3=m
-+CONFIG_EEXPRESS=m
-+CONFIG_EEXPRESS_PRO=m
-+CONFIG_HPLAN_PLUS=m
-+CONFIG_HPLAN=m
-+CONFIG_LP486E=m
-+CONFIG_ETH16I=m
-+CONFIG_NE2000=m
-+CONFIG_ZNET=m
-+CONFIG_SEEQ8005=m
-+CONFIG_NET_PCI=y
-+CONFIG_PCNET32=m
-+CONFIG_AMD8111_ETH=m
-+# CONFIG_AMD8111E_NAPI is not set
-+CONFIG_ADAPTEC_STARFIRE=m
-+# CONFIG_ADAPTEC_STARFIRE_NAPI is not set
-+CONFIG_AC3200=m
-+CONFIG_APRICOT=m
-+CONFIG_B44=m
-+CONFIG_FORCEDETH=m
-+CONFIG_CS89x0=m
-+# CONFIG_DGRS is not set
-+CONFIG_EEPRO100=m
-+CONFIG_E100=m
-+CONFIG_FEALNX=m
-+CONFIG_NATSEMI=m
-+CONFIG_NE2K_PCI=m
-+CONFIG_8139CP=m
-+CONFIG_8139TOO=m
-+CONFIG_8139TOO_PIO=y
-+CONFIG_8139TOO_TUNE_TWISTER=y
-+CONFIG_8139TOO_8129=y
-+# CONFIG_8139_OLD_RX_RESET is not set
-+CONFIG_SIS900=m
-+CONFIG_EPIC100=m
-+CONFIG_SUNDANCE=m
-+# CONFIG_SUNDANCE_MMIO is not set
-+CONFIG_TLAN=m
-+CONFIG_VIA_RHINE=m
-+# CONFIG_VIA_RHINE_MMIO is not set
-+CONFIG_NET_POCKET=y
-+CONFIG_ATP=m
-+CONFIG_DE600=m
-+CONFIG_DE620=m
-+
-+#
-+# Ethernet (1000 Mbit)
-+#
-+# CONFIG_ACENIC is not set
-+CONFIG_DL2K=m
-+CONFIG_E1000=m
-+# CONFIG_E1000_NAPI is not set
-+CONFIG_NS83820=m
-+CONFIG_HAMACHI=m
-+CONFIG_YELLOWFIN=m
-+CONFIG_R8169=m
-+# CONFIG_R8169_NAPI is not set
-+# CONFIG_R8169_VLAN is not set
-+CONFIG_SK98LIN=m
-+CONFIG_VIA_VELOCITY=m
-+CONFIG_TIGON3=m
-+CONFIG_BNX2=m
-+
-+#
-+# Ethernet (10000 Mbit)
-+#
-+CONFIG_IXGB=m
-+# CONFIG_IXGB_NAPI is not set
-+CONFIG_S2IO=m
-+# CONFIG_S2IO_NAPI is not set
-+# CONFIG_2BUFF_MODE is not set
-+
-+#
-+# Token Ring devices
-+#
-+CONFIG_TR=y
-+CONFIG_IBMTR=m
-+CONFIG_IBMOL=m
-+CONFIG_IBMLS=m
-+CONFIG_3C359=m
-+CONFIG_TMS380TR=m
-+CONFIG_TMSPCI=m
-+CONFIG_SKISA=m
-+CONFIG_PROTEON=m
-+CONFIG_ABYSS=m
-+# CONFIG_SMCTR is not set
-+
-+#
-+# Wireless LAN (non-hamradio)
-+#
-+CONFIG_NET_RADIO=y
-+
-+#
-+# Obsolete Wireless cards support (pre-802.11)
-+#
-+CONFIG_STRIP=m
-+CONFIG_ARLAN=m
-+CONFIG_WAVELAN=m
-+CONFIG_PCMCIA_WAVELAN=m
-+CONFIG_PCMCIA_NETWAVE=m
-+
-+#
-+# Wireless 802.11 Frequency Hopping cards support
-+#
-+CONFIG_PCMCIA_RAYCS=m
-+
-+#
-+# Wireless 802.11b ISA/PCI cards support
-+#
-+CONFIG_AIRO=m
-+CONFIG_HERMES=m
-+CONFIG_PLX_HERMES=m
-+CONFIG_TMD_HERMES=m
-+CONFIG_PCI_HERMES=m
-+CONFIG_ATMEL=m
-+CONFIG_PCI_ATMEL=m
-+
-+#
-+# Wireless 802.11b Pcmcia/Cardbus cards support
-+#
-+CONFIG_PCMCIA_HERMES=m
-+CONFIG_AIRO_CS=m
-+CONFIG_PCMCIA_ATMEL=m
-+CONFIG_PCMCIA_WL3501=m
-+
-+#
-+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
-+#
-+CONFIG_PRISM54=m
-+CONFIG_NET_WIRELESS=y
-+
-+#
-+# PCMCIA network device support
-+#
-+CONFIG_NET_PCMCIA=y
-+CONFIG_PCMCIA_3C589=m
-+CONFIG_PCMCIA_3C574=m
-+CONFIG_PCMCIA_FMVJ18X=m
-+CONFIG_PCMCIA_PCNET=m
-+CONFIG_PCMCIA_NMCLAN=m
-+CONFIG_PCMCIA_SMC91C92=m
-+CONFIG_PCMCIA_XIRC2PS=m
-+CONFIG_PCMCIA_AXNET=m
-+CONFIG_ARCNET_COM20020_CS=m
-+CONFIG_PCMCIA_IBMTR=m
-+
-+#
-+# Wan interfaces
-+#
-+CONFIG_WAN=y
-+CONFIG_HOSTESS_SV11=m
-+CONFIG_COSA=m
-+CONFIG_DSCC4=m
-+CONFIG_DSCC4_PCISYNC=y
-+CONFIG_DSCC4_PCI_RST=y
-+CONFIG_LANMEDIA=m
-+CONFIG_SEALEVEL_4021=m
-+CONFIG_SYNCLINK_SYNCPPP=m
-+CONFIG_HDLC=m
-+CONFIG_HDLC_RAW=y
-+CONFIG_HDLC_RAW_ETH=y
-+CONFIG_HDLC_CISCO=y
-+CONFIG_HDLC_FR=y
-+CONFIG_HDLC_PPP=y
-+CONFIG_HDLC_X25=y
-+CONFIG_PCI200SYN=m
-+CONFIG_WANXL=m
-+CONFIG_PC300=m
-+CONFIG_PC300_MLPPP=y
-+CONFIG_N2=m
-+CONFIG_C101=m
-+CONFIG_FARSYNC=m
-+CONFIG_DLCI=m
-+CONFIG_DLCI_COUNT=24
-+CONFIG_DLCI_MAX=8
-+CONFIG_SDLA=m
-+CONFIG_WAN_ROUTER_DRIVERS=y
-+# CONFIG_VENDOR_SANGOMA is not set
-+CONFIG_CYCLADES_SYNC=m
-+CONFIG_CYCLOMX_X25=y
-+CONFIG_LAPBETHER=m
-+CONFIG_X25_ASY=m
-+CONFIG_SBNI=m
-+# CONFIG_SBNI_MULTILINE is not set
-+
-+#
-+# ATM drivers
-+#
-+CONFIG_ATM_TCP=m
-+CONFIG_ATM_LANAI=m
-+CONFIG_ATM_ENI=m
-+# CONFIG_ATM_ENI_DEBUG is not set
-+# CONFIG_ATM_ENI_TUNE_BURST is not set
-+CONFIG_ATM_FIRESTREAM=m
-+CONFIG_ATM_ZATM=m
-+# CONFIG_ATM_ZATM_DEBUG is not set
-+CONFIG_ATM_NICSTAR=m
-+# CONFIG_ATM_NICSTAR_USE_SUNI is not set
-+# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
-+CONFIG_ATM_IDT77252=m
-+# CONFIG_ATM_IDT77252_DEBUG is not set
-+# CONFIG_ATM_IDT77252_RCV_ALL is not set
-+CONFIG_ATM_IDT77252_USE_SUNI=y
-+CONFIG_ATM_AMBASSADOR=m
-+# CONFIG_ATM_AMBASSADOR_DEBUG is not set
-+CONFIG_ATM_HORIZON=m
-+# CONFIG_ATM_HORIZON_DEBUG is not set
-+CONFIG_ATM_IA=m
-+# CONFIG_ATM_IA_DEBUG is not set
-+CONFIG_ATM_FORE200E_MAYBE=m
-+CONFIG_ATM_FORE200E_PCA=y
-+CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
-+# CONFIG_ATM_FORE200E_USE_TASKLET is not set
-+CONFIG_ATM_FORE200E_TX_RETRY=16
-+CONFIG_ATM_FORE200E_DEBUG=0
-+CONFIG_ATM_FORE200E=m
-+CONFIG_ATM_HE=m
-+CONFIG_ATM_HE_USE_SUNI=y
-+CONFIG_FDDI=y
-+CONFIG_DEFXX=m
-+CONFIG_SKFP=m
-+CONFIG_HIPPI=y
-+CONFIG_ROADRUNNER=m
-+# CONFIG_ROADRUNNER_LARGE_RINGS is not set
-+CONFIG_PLIP=m
-+CONFIG_PPP=m
-+CONFIG_PPP_MULTILINK=y
-+CONFIG_PPP_FILTER=y
-+CONFIG_PPP_ASYNC=m
-+CONFIG_PPP_SYNC_TTY=m
-+CONFIG_PPP_DEFLATE=m
-+CONFIG_PPP_BSDCOMP=m
-+CONFIG_PPPOE=m
-+CONFIG_PPPOATM=m
-+CONFIG_SLIP=m
-+CONFIG_SLIP_COMPRESSED=y
-+CONFIG_SLIP_SMART=y
-+CONFIG_SLIP_MODE_SLIP6=y
-+CONFIG_NET_FC=y
-+CONFIG_SHAPER=m
-+CONFIG_NETCONSOLE=m
-+
-+#
-+# ISDN subsystem
-+#
-+CONFIG_ISDN=m
-+
-+#
-+# Old ISDN4Linux
-+#
-+CONFIG_ISDN_I4L=m
-+CONFIG_ISDN_PPP=y
-+CONFIG_ISDN_PPP_VJ=y
-+CONFIG_ISDN_MPP=y
-+CONFIG_IPPP_FILTER=y
-+CONFIG_ISDN_PPP_BSDCOMP=m
-+CONFIG_ISDN_AUDIO=y
-+CONFIG_ISDN_TTY_FAX=y
-+CONFIG_ISDN_X25=y
-+
-+#
-+# ISDN feature submodules
-+#
-+# CONFIG_ISDN_DRV_LOOP is not set
-+# CONFIG_ISDN_DIVERSION is not set
-+
-+#
-+# ISDN4Linux hardware drivers
-+#
-+
-+#
-+# Passive cards
-+#
-+CONFIG_ISDN_DRV_HISAX=m
-+
-+#
-+# D-channel protocol features
-+#
-+CONFIG_HISAX_EURO=y
-+CONFIG_DE_AOC=y
-+# CONFIG_HISAX_NO_SENDCOMPLETE is not set
-+# CONFIG_HISAX_NO_LLC is not set
-+# CONFIG_HISAX_NO_KEYPAD is not set
-+CONFIG_HISAX_1TR6=y
-+CONFIG_HISAX_NI1=y
-+CONFIG_HISAX_MAX_CARDS=8
-+
-+#
-+# HiSax supported cards
-+#
-+CONFIG_HISAX_16_0=y
-+CONFIG_HISAX_16_3=y
-+CONFIG_HISAX_TELESPCI=y
-+CONFIG_HISAX_S0BOX=y
-+CONFIG_HISAX_AVM_A1=y
-+CONFIG_HISAX_FRITZPCI=y
-+CONFIG_HISAX_AVM_A1_PCMCIA=y
-+CONFIG_HISAX_ELSA=y
-+CONFIG_HISAX_IX1MICROR2=y
-+CONFIG_HISAX_DIEHLDIVA=y
-+CONFIG_HISAX_ASUSCOM=y
-+CONFIG_HISAX_TELEINT=y
-+CONFIG_HISAX_HFCS=y
-+CONFIG_HISAX_SEDLBAUER=y
-+CONFIG_HISAX_SPORTSTER=y
-+CONFIG_HISAX_MIC=y
-+CONFIG_HISAX_NETJET=y
-+CONFIG_HISAX_NETJET_U=y
-+CONFIG_HISAX_NICCY=y
-+CONFIG_HISAX_ISURF=y
-+CONFIG_HISAX_HSTSAPHIR=y
-+CONFIG_HISAX_BKM_A4T=y
-+CONFIG_HISAX_SCT_QUADRO=y
-+CONFIG_HISAX_GAZEL=y
-+CONFIG_HISAX_HFC_PCI=y
-+CONFIG_HISAX_W6692=y
-+CONFIG_HISAX_HFC_SX=y
-+CONFIG_HISAX_ENTERNOW_PCI=y
-+# CONFIG_HISAX_DEBUG is not set
-+
-+#
-+# HiSax PCMCIA card service modules
-+#
-+CONFIG_HISAX_SEDLBAUER_CS=m
-+CONFIG_HISAX_ELSA_CS=m
-+CONFIG_HISAX_AVM_A1_CS=m
-+CONFIG_HISAX_TELES_CS=m
-+
-+#
-+# HiSax sub driver modules
-+#
-+CONFIG_HISAX_ST5481=m
-+CONFIG_HISAX_HFCUSB=m
-+CONFIG_HISAX_HFC4S8S=m
-+CONFIG_HISAX_FRITZ_PCIPNP=m
-+CONFIG_HISAX_HDLC=y
-+
-+#
-+# Active cards
-+#
-+CONFIG_ISDN_DRV_ICN=m
-+CONFIG_ISDN_DRV_PCBIT=m
-+CONFIG_ISDN_DRV_SC=m
-+CONFIG_ISDN_DRV_ACT2000=m
-+# CONFIG_HYSDN is not set
-+
-+#
-+# CAPI subsystem
-+#
-+CONFIG_ISDN_CAPI=m
-+CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
-+CONFIG_ISDN_CAPI_MIDDLEWARE=y
-+CONFIG_ISDN_CAPI_CAPI20=m
-+CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
-+CONFIG_ISDN_CAPI_CAPIFS=m
-+CONFIG_ISDN_CAPI_CAPIDRV=m
-+
-+#
-+# CAPI hardware drivers
-+#
-+
-+#
-+# Active AVM cards
-+#
-+CONFIG_CAPI_AVM=y
-+CONFIG_ISDN_DRV_AVMB1_B1ISA=m
-+CONFIG_ISDN_DRV_AVMB1_B1PCI=m
-+CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-+CONFIG_ISDN_DRV_AVMB1_T1ISA=m
-+CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-+CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
-+CONFIG_ISDN_DRV_AVMB1_T1PCI=m
-+CONFIG_ISDN_DRV_AVMB1_C4=m
-+
-+#
-+# Active Eicon DIVA Server cards
-+#
-+CONFIG_CAPI_EICON=y
-+CONFIG_ISDN_DIVAS=m
-+CONFIG_ISDN_DIVAS_BRIPCI=y
-+CONFIG_ISDN_DIVAS_PRIPCI=y
-+CONFIG_ISDN_DIVAS_DIVACAPI=m
-+CONFIG_ISDN_DIVAS_USERIDI=m
-+CONFIG_ISDN_DIVAS_MAINT=m
-+
-+#
-+# Telephony Support
-+#
-+CONFIG_PHONE=m
-+CONFIG_PHONE_IXJ=m
-+CONFIG_PHONE_IXJ_PCMCIA=m
-+
-+#
-+# Input device support
-+#
-+CONFIG_INPUT=y
-+
-+#
-+# Userland interfaces
-+#
-+CONFIG_INPUT_MOUSEDEV=y
-+CONFIG_INPUT_MOUSEDEV_PSAUX=y
-+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-+CONFIG_INPUT_JOYDEV=m
-+CONFIG_INPUT_TSDEV=m
-+CONFIG_INPUT_TSDEV_SCREEN_X=240
-+CONFIG_INPUT_TSDEV_SCREEN_Y=320
-+CONFIG_INPUT_EVDEV=m
-+CONFIG_INPUT_EVBUG=m
-+
-+#
-+# Input Device Drivers
-+#
-+CONFIG_INPUT_KEYBOARD=y
-+CONFIG_KEYBOARD_ATKBD=y
-+CONFIG_KEYBOARD_SUNKBD=m
-+CONFIG_KEYBOARD_LKKBD=m
-+CONFIG_KEYBOARD_XTKBD=m
-+CONFIG_KEYBOARD_NEWTON=m
-+CONFIG_INPUT_MOUSE=y
-+CONFIG_MOUSE_PS2=y
-+CONFIG_MOUSE_SERIAL=m
-+CONFIG_MOUSE_INPORT=m
-+# CONFIG_MOUSE_ATIXL is not set
-+CONFIG_MOUSE_LOGIBM=m
-+CONFIG_MOUSE_PC110PAD=m
-+CONFIG_MOUSE_VSXXXAA=m
-+CONFIG_INPUT_JOYSTICK=y
-+CONFIG_JOYSTICK_ANALOG=m
-+CONFIG_JOYSTICK_A3D=m
-+CONFIG_JOYSTICK_ADI=m
-+CONFIG_JOYSTICK_COBRA=m
-+CONFIG_JOYSTICK_GF2K=m
-+CONFIG_JOYSTICK_GRIP=m
-+CONFIG_JOYSTICK_GRIP_MP=m
-+CONFIG_JOYSTICK_GUILLEMOT=m
-+CONFIG_JOYSTICK_INTERACT=m
-+CONFIG_JOYSTICK_SIDEWINDER=m
-+CONFIG_JOYSTICK_TMDC=m
-+CONFIG_JOYSTICK_IFORCE=m
-+CONFIG_JOYSTICK_IFORCE_USB=y
-+CONFIG_JOYSTICK_IFORCE_232=y
-+CONFIG_JOYSTICK_WARRIOR=m
-+CONFIG_JOYSTICK_MAGELLAN=m
-+CONFIG_JOYSTICK_SPACEORB=m
-+CONFIG_JOYSTICK_SPACEBALL=m
-+CONFIG_JOYSTICK_STINGER=m
-+CONFIG_JOYSTICK_TWIDJOY=m
-+CONFIG_JOYSTICK_DB9=m
-+CONFIG_JOYSTICK_GAMECON=m
-+CONFIG_JOYSTICK_TURBOGRAFX=m
-+CONFIG_JOYSTICK_JOYDUMP=m
-+CONFIG_INPUT_TOUCHSCREEN=y
-+CONFIG_TOUCHSCREEN_GUNZE=m
-+CONFIG_TOUCHSCREEN_ELO=m
-+CONFIG_TOUCHSCREEN_MTOUCH=m
-+CONFIG_TOUCHSCREEN_MK712=m
-+CONFIG_INPUT_MISC=y
-+CONFIG_INPUT_PCSPKR=m
-+CONFIG_INPUT_UINPUT=m
-+
-+#
-+# Hardware I/O ports
-+#
-+CONFIG_SERIO=y
-+CONFIG_SERIO_I8042=y
-+CONFIG_SERIO_SERPORT=m
-+CONFIG_SERIO_CT82C710=m
-+CONFIG_SERIO_PARKBD=m
-+CONFIG_SERIO_PCIPS2=m
-+CONFIG_SERIO_LIBPS2=y
-+CONFIG_SERIO_RAW=m
-+CONFIG_GAMEPORT=m
-+CONFIG_GAMEPORT_NS558=m
-+CONFIG_GAMEPORT_L4=m
-+CONFIG_GAMEPORT_EMU10K1=m
-+CONFIG_GAMEPORT_VORTEX=m
-+CONFIG_GAMEPORT_FM801=m
-+# CONFIG_GAMEPORT_CS461X is not set
-+
-+#
-+# Character devices
-+#
-+CONFIG_VT=y
-+CONFIG_VT_CONSOLE=y
-+CONFIG_HW_CONSOLE=y
-+# CONFIG_SERIAL_NONSTANDARD is not set
-+
-+#
-+# Serial drivers
-+#
-+CONFIG_SERIAL_8250=m
-+# CONFIG_SERIAL_8250_CS is not set
-+# CONFIG_SERIAL_8250_ACPI is not set
-+CONFIG_SERIAL_8250_NR_UARTS=4
-+# CONFIG_SERIAL_8250_EXTENDED is not set
-+
-+#
-+# Non-8250 serial port support
-+#
-+CONFIG_SERIAL_CORE=m
-+CONFIG_SERIAL_JSM=m
-+CONFIG_UNIX98_PTYS=y
-+CONFIG_LEGACY_PTYS=y
-+CONFIG_LEGACY_PTY_COUNT=256
-+CONFIG_PRINTER=m
-+# CONFIG_LP_CONSOLE is not set
-+CONFIG_PPDEV=m
-+CONFIG_TIPAR=m
-+
-+#
-+# IPMI
-+#
-+CONFIG_IPMI_HANDLER=m
-+# CONFIG_IPMI_PANIC_EVENT is not set
-+CONFIG_IPMI_DEVICE_INTERFACE=m
-+CONFIG_IPMI_SI=m
-+CONFIG_IPMI_WATCHDOG=m
-+CONFIG_IPMI_POWEROFF=m
-+
-+#
-+# Watchdog Cards
-+#
-+CONFIG_WATCHDOG=y
-+# CONFIG_WATCHDOG_NOWAYOUT is not set
-+
-+#
-+# Watchdog Device Drivers
-+#
-+CONFIG_SOFT_WATCHDOG=m
-+CONFIG_ACQUIRE_WDT=m
-+CONFIG_ADVANTECH_WDT=m
-+CONFIG_ALIM1535_WDT=m
-+CONFIG_ALIM7101_WDT=m
-+CONFIG_SC520_WDT=m
-+CONFIG_EUROTECH_WDT=m
-+CONFIG_IB700_WDT=m
-+CONFIG_WAFER_WDT=m
-+CONFIG_I8XX_TCO=m
-+CONFIG_SC1200_WDT=m
-+CONFIG_SCx200_WDT=m
-+CONFIG_60XX_WDT=m
-+CONFIG_CPU5_WDT=m
-+CONFIG_W83627HF_WDT=m
-+CONFIG_W83877F_WDT=m
-+CONFIG_MACHZ_WDT=m
-+
-+#
-+# ISA-based Watchdog Cards
-+#
-+CONFIG_PCWATCHDOG=m
-+CONFIG_MIXCOMWD=m
-+CONFIG_WDT=m
-+CONFIG_WDT_501=y
-+
-+#
-+# PCI-based Watchdog Cards
-+#
-+CONFIG_PCIPCWATCHDOG=m
-+CONFIG_WDTPCI=m
-+CONFIG_WDT_501_PCI=y
-+
-+#
-+# USB-based Watchdog Cards
-+#
-+CONFIG_USBPCWATCHDOG=m
-+CONFIG_HW_RANDOM=m
-+CONFIG_NVRAM=m
-+CONFIG_RTC=m
-+CONFIG_GEN_RTC=m
-+CONFIG_GEN_RTC_X=y
-+CONFIG_DTLK=m
-+CONFIG_R3964=m
-+CONFIG_APPLICOM=m
-+CONFIG_SONYPI=m
-+
-+#
-+# Ftape, the floppy tape device driver
-+#
-+# CONFIG_FTAPE is not set
-+CONFIG_AGP=m
-+CONFIG_AGP_ALI=m
-+CONFIG_AGP_ATI=m
-+CONFIG_AGP_AMD=m
-+CONFIG_AGP_AMD64=m
-+CONFIG_AGP_INTEL=m
-+CONFIG_AGP_NVIDIA=m
-+CONFIG_AGP_SIS=m
-+CONFIG_AGP_SWORKS=m
-+CONFIG_AGP_VIA=m
-+CONFIG_AGP_EFFICEON=m
-+CONFIG_DRM=m
-+CONFIG_DRM_TDFX=m
-+# CONFIG_DRM_GAMMA is not set
-+CONFIG_DRM_R128=m
-+CONFIG_DRM_RADEON=m
-+CONFIG_DRM_I810=m
-+CONFIG_DRM_I830=m
-+CONFIG_DRM_I915=m
-+CONFIG_DRM_MGA=m
-+CONFIG_DRM_SIS=m
-+
-+#
-+# PCMCIA character devices
-+#
-+CONFIG_SYNCLINK_CS=m
-+CONFIG_MWAVE=m
-+CONFIG_SCx200_GPIO=m
-+CONFIG_RAW_DRIVER=m
-+# CONFIG_HPET is not set
-+CONFIG_MAX_RAW_DEVS=256
-+CONFIG_HANGCHECK_TIMER=m
-+
-+#
-+# TPM devices
-+#
-+# CONFIG_TCG_TPM is not set
-+
-+#
-+# I2C support
-+#
-+CONFIG_I2C=m
-+CONFIG_I2C_CHARDEV=m
-+
-+#
-+# I2C Algorithms
-+#
-+CONFIG_I2C_ALGOBIT=m
-+CONFIG_I2C_ALGOPCF=m
-+CONFIG_I2C_ALGOPCA=m
-+
-+#
-+# I2C Hardware Bus support
-+#
-+CONFIG_I2C_ALI1535=m
-+CONFIG_I2C_ALI1563=m
-+CONFIG_I2C_ALI15X3=m
-+CONFIG_I2C_AMD756=m
-+CONFIG_I2C_AMD756_S4882=m
-+CONFIG_I2C_AMD8111=m
-+CONFIG_I2C_ELEKTOR=m
-+CONFIG_I2C_I801=m
-+CONFIG_I2C_I810=m
-+CONFIG_I2C_PIIX4=m
-+CONFIG_I2C_ISA=m
-+CONFIG_I2C_NFORCE2=m
-+CONFIG_I2C_PARPORT=m
-+CONFIG_I2C_PARPORT_LIGHT=m
-+CONFIG_I2C_PROSAVAGE=m
-+CONFIG_I2C_SAVAGE4=m
-+CONFIG_SCx200_I2C=m
-+CONFIG_SCx200_I2C_SCL=12
-+CONFIG_SCx200_I2C_SDA=13
-+CONFIG_SCx200_ACB=m
-+CONFIG_I2C_SIS5595=m
-+CONFIG_I2C_SIS630=m
-+CONFIG_I2C_SIS96X=m
-+CONFIG_I2C_STUB=m
-+CONFIG_I2C_VIA=m
-+CONFIG_I2C_VIAPRO=m
-+CONFIG_I2C_VOODOO3=m
-+CONFIG_I2C_PCA_ISA=m
-+
-+#
-+# Hardware Sensors Chip support
-+#
-+CONFIG_I2C_SENSOR=m
-+CONFIG_SENSORS_ADM1021=m
-+CONFIG_SENSORS_ADM1025=m
-+CONFIG_SENSORS_ADM1026=m
-+CONFIG_SENSORS_ADM1031=m
-+CONFIG_SENSORS_ASB100=m
-+CONFIG_SENSORS_DS1621=m
-+CONFIG_SENSORS_FSCHER=m
-+CONFIG_SENSORS_FSCPOS=m
-+CONFIG_SENSORS_GL518SM=m
-+CONFIG_SENSORS_GL520SM=m
-+CONFIG_SENSORS_IT87=m
-+CONFIG_SENSORS_LM63=m
-+CONFIG_SENSORS_LM75=m
-+CONFIG_SENSORS_LM77=m
-+CONFIG_SENSORS_LM78=m
-+CONFIG_SENSORS_LM80=m
-+CONFIG_SENSORS_LM83=m
-+CONFIG_SENSORS_LM85=m
-+CONFIG_SENSORS_LM87=m
-+CONFIG_SENSORS_LM90=m
-+CONFIG_SENSORS_LM92=m
-+CONFIG_SENSORS_MAX1619=m
-+CONFIG_SENSORS_PC87360=m
-+# CONFIG_SENSORS_SMSC47B397 is not set
-+CONFIG_SENSORS_SIS5595=m
-+CONFIG_SENSORS_SMSC47M1=m
-+CONFIG_SENSORS_VIA686A=m
-+CONFIG_SENSORS_W83781D=m
-+CONFIG_SENSORS_W83L785TS=m
-+CONFIG_SENSORS_W83627HF=m
-+
-+#
-+# Other I2C Chip support
-+#
-+CONFIG_SENSORS_DS1337=m
-+CONFIG_SENSORS_EEPROM=m
-+CONFIG_SENSORS_PCF8574=m
-+CONFIG_SENSORS_PCF8591=m
-+CONFIG_SENSORS_RTC8564=m
-+# CONFIG_I2C_DEBUG_CORE is not set
-+# CONFIG_I2C_DEBUG_ALGO is not set
-+# CONFIG_I2C_DEBUG_BUS is not set
-+# CONFIG_I2C_DEBUG_CHIP is not set
-+
-+#
-+# Dallas's 1-wire bus
-+#
-+CONFIG_W1=m
-+CONFIG_W1_MATROX=m
-+CONFIG_W1_DS9490=m
-+CONFIG_W1_DS9490_BRIDGE=m
-+CONFIG_W1_THERM=m
-+CONFIG_W1_SMEM=m
-+
-+#
-+# Misc devices
-+#
-+CONFIG_IBM_ASM=m
-+
-+#
-+# Multimedia devices
-+#
-+CONFIG_VIDEO_DEV=m
-+
-+#
-+# Video For Linux
-+#
-+
-+#
-+# Video Adapters
-+#
-+CONFIG_VIDEO_BT848=m
-+CONFIG_VIDEO_PMS=m
-+CONFIG_VIDEO_BWQCAM=m
-+CONFIG_VIDEO_CQCAM=m
-+CONFIG_VIDEO_W9966=m
-+CONFIG_VIDEO_CPIA=m
-+CONFIG_VIDEO_CPIA_PP=m
-+CONFIG_VIDEO_CPIA_USB=m
-+CONFIG_VIDEO_SAA5246A=m
-+CONFIG_VIDEO_SAA5249=m
-+CONFIG_TUNER_3036=m
-+CONFIG_VIDEO_STRADIS=m
-+CONFIG_VIDEO_ZORAN=m
-+CONFIG_VIDEO_ZORAN_BUZ=m
-+CONFIG_VIDEO_ZORAN_DC10=m
-+CONFIG_VIDEO_ZORAN_DC30=m
-+CONFIG_VIDEO_ZORAN_LML33=m
-+CONFIG_VIDEO_ZORAN_LML33R10=m
-+# CONFIG_VIDEO_ZR36120 is not set
-+CONFIG_VIDEO_MEYE=m
-+# CONFIG_VIDEO_SAA7134 is not set
-+CONFIG_VIDEO_MXB=m
-+CONFIG_VIDEO_DPC=m
-+CONFIG_VIDEO_HEXIUM_ORION=m
-+CONFIG_VIDEO_HEXIUM_GEMINI=m
-+CONFIG_VIDEO_CX88=m
-+# CONFIG_VIDEO_CX88_DVB is not set
-+CONFIG_VIDEO_OVCAMCHIP=m
-+
-+#
-+# Radio Adapters
-+#
-+CONFIG_RADIO_CADET=m
-+CONFIG_RADIO_RTRACK=m
-+CONFIG_RADIO_RTRACK2=m
-+CONFIG_RADIO_AZTECH=m
-+CONFIG_RADIO_GEMTEK=m
-+CONFIG_RADIO_GEMTEK_PCI=m
-+CONFIG_RADIO_MAXIRADIO=m
-+CONFIG_RADIO_MAESTRO=m
-+CONFIG_RADIO_MIROPCM20=m
-+CONFIG_RADIO_MIROPCM20_RDS=m
-+CONFIG_RADIO_SF16FMI=m
-+CONFIG_RADIO_SF16FMR2=m
-+CONFIG_RADIO_TERRATEC=m
-+CONFIG_RADIO_TRUST=m
-+CONFIG_RADIO_TYPHOON=m
-+CONFIG_RADIO_TYPHOON_PROC_FS=y
-+CONFIG_RADIO_ZOLTRIX=m
-+
-+#
-+# Digital Video Broadcasting Devices
-+#
-+CONFIG_DVB=y
-+CONFIG_DVB_CORE=m
-+
-+#
-+# Supported SAA7146 based PCI Adapters
-+#
-+CONFIG_DVB_AV7110=m
-+# CONFIG_DVB_AV7110_OSD is not set
-+CONFIG_DVB_BUDGET=m
-+CONFIG_DVB_BUDGET_CI=m
-+CONFIG_DVB_BUDGET_AV=m
-+CONFIG_DVB_BUDGET_PATCH=m
-+
-+#
-+# Supported USB Adapters
-+#
-+CONFIG_DVB_TTUSB_BUDGET=m
-+CONFIG_DVB_TTUSB_DEC=m
-+CONFIG_DVB_DIBUSB=m
-+CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES=y
-+# CONFIG_DVB_DIBCOM_DEBUG is not set
-+CONFIG_DVB_CINERGYT2=m
-+# CONFIG_DVB_CINERGYT2_TUNING is not set
-+
-+#
-+# Supported FlexCopII (B2C2) Adapters
-+#
-+CONFIG_DVB_B2C2_FLEXCOP=m
-+CONFIG_DVB_B2C2_FLEXCOP_PCI=m
-+CONFIG_DVB_B2C2_FLEXCOP_USB=m
-+# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
-+CONFIG_DVB_B2C2_SKYSTAR=m
-+
-+#
-+# Supported BT878 Adapters
-+#
-+CONFIG_DVB_BT8XX=m
-+
-+#
-+# Supported DVB Frontends
-+#
-+
-+#
-+# Customise DVB Frontends
-+#
-+
-+#
-+# DVB-S (satellite) frontends
-+#
-+CONFIG_DVB_STV0299=m
-+CONFIG_DVB_CX24110=m
-+CONFIG_DVB_TDA8083=m
-+CONFIG_DVB_TDA80XX=m
-+CONFIG_DVB_MT312=m
-+CONFIG_DVB_VES1X93=m
-+
-+#
-+# DVB-T (terrestrial) frontends
-+#
-+CONFIG_DVB_SP8870=m
-+CONFIG_DVB_SP887X=m
-+CONFIG_DVB_CX22700=m
-+CONFIG_DVB_CX22702=m
-+CONFIG_DVB_L64781=m
-+CONFIG_DVB_TDA1004X=m
-+CONFIG_DVB_NXT6000=m
-+CONFIG_DVB_MT352=m
-+CONFIG_DVB_DIB3000MB=m
-+CONFIG_DVB_DIB3000MC=m
-+
-+#
-+# DVB-C (cable) frontends
-+#
-+CONFIG_DVB_ATMEL_AT76C651=m
-+CONFIG_DVB_VES1820=m
-+CONFIG_DVB_TDA10021=m
-+CONFIG_DVB_STV0297=m
-+
-+#
-+# ATSC (North American/Korean Terresterial DTV) frontends
-+#
-+CONFIG_DVB_NXT2002=m
-+CONFIG_DVB_OR51211=m
-+CONFIG_DVB_OR51132=m
-+CONFIG_VIDEO_SAA7146=m
-+CONFIG_VIDEO_SAA7146_VV=m
-+CONFIG_VIDEO_VIDEOBUF=m
-+CONFIG_VIDEO_TUNER=m
-+CONFIG_VIDEO_BUF=m
-+CONFIG_VIDEO_BTCX=m
-+CONFIG_VIDEO_IR=m
-+CONFIG_VIDEO_TVEEPROM=m
-+
-+#
-+# Graphics support
-+#
-+CONFIG_FB=y
-+CONFIG_FB_CFB_FILLRECT=m
-+CONFIG_FB_CFB_COPYAREA=m
-+CONFIG_FB_CFB_IMAGEBLIT=m
-+CONFIG_FB_SOFT_CURSOR=m
-+# CONFIG_FB_MACMODES is not set
-+CONFIG_FB_MODE_HELPERS=y
-+CONFIG_FB_TILEBLITTING=y
-+CONFIG_FB_CIRRUS=m
-+CONFIG_FB_PM2=m
-+CONFIG_FB_PM2_FIFO_DISCONNECT=y
-+CONFIG_FB_CYBER2000=m
-+# CONFIG_FB_ASILIANT is not set
-+# CONFIG_FB_IMSTT is not set
-+CONFIG_FB_VGA16=m
-+# CONFIG_FB_VESA is not set
-+CONFIG_VIDEO_SELECT=y
-+CONFIG_FB_HGA=m
-+# CONFIG_FB_HGA_ACCEL is not set
-+CONFIG_FB_NVIDIA=m
-+CONFIG_FB_NVIDIA_I2C=y
-+CONFIG_FB_RIVA=m
-+CONFIG_FB_RIVA_I2C=y
-+CONFIG_FB_RIVA_DEBUG=y
-+CONFIG_FB_I810=m
-+# CONFIG_FB_I810_GTF is not set
-+CONFIG_FB_INTEL=m
-+# CONFIG_FB_INTEL_DEBUG is not set
-+CONFIG_FB_MATROX=m
-+CONFIG_FB_MATROX_MILLENIUM=y
-+CONFIG_FB_MATROX_MYSTIQUE=y
-+# CONFIG_FB_MATROX_G is not set
-+CONFIG_FB_MATROX_I2C=m
-+CONFIG_FB_MATROX_MULTIHEAD=y
-+CONFIG_FB_RADEON_OLD=m
-+CONFIG_FB_RADEON=m
-+CONFIG_FB_RADEON_I2C=y
-+# CONFIG_FB_RADEON_DEBUG is not set
-+CONFIG_FB_ATY128=m
-+CONFIG_FB_ATY=m
-+CONFIG_FB_ATY_CT=y
-+CONFIG_FB_ATY_GENERIC_LCD=y
-+CONFIG_FB_ATY_XL_INIT=y
-+CONFIG_FB_ATY_GX=y
-+CONFIG_FB_SAVAGE=m
-+CONFIG_FB_SAVAGE_I2C=y
-+CONFIG_FB_SAVAGE_ACCEL=y
-+CONFIG_FB_SIS=m
-+CONFIG_FB_SIS_300=y
-+CONFIG_FB_SIS_315=y
-+CONFIG_FB_NEOMAGIC=m
-+CONFIG_FB_KYRO=m
-+CONFIG_FB_3DFX=m
-+# CONFIG_FB_3DFX_ACCEL is not set
-+CONFIG_FB_VOODOO1=m
-+CONFIG_FB_TRIDENT=m
-+# CONFIG_FB_TRIDENT_ACCEL is not set
-+# CONFIG_FB_PM3 is not set
-+CONFIG_FB_GEODE=y
-+CONFIG_FB_GEODE_GX1=m
-+CONFIG_FB_S1D13XXX=m
-+CONFIG_FB_VIRTUAL=m
-+
-+#
-+# Console display driver support
-+#
-+CONFIG_VGA_CONSOLE=y
-+CONFIG_MDA_CONSOLE=m
-+CONFIG_DUMMY_CONSOLE=y
-+CONFIG_FRAMEBUFFER_CONSOLE=m
-+# CONFIG_FONTS is not set
-+CONFIG_FONT_8x8=y
-+CONFIG_FONT_8x16=y
-+
-+#
-+# Logo configuration
-+#
-+# CONFIG_LOGO is not set
-+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-+
-+#
-+# Sound
-+#
-+CONFIG_SOUND=m
-+
-+#
-+# Advanced Linux Sound Architecture
-+#
-+CONFIG_SND=m
-+CONFIG_SND_TIMER=m
-+CONFIG_SND_PCM=m
-+CONFIG_SND_HWDEP=m
-+CONFIG_SND_RAWMIDI=m
-+CONFIG_SND_SEQUENCER=m
-+CONFIG_SND_SEQ_DUMMY=m
-+CONFIG_SND_OSSEMUL=y
-+CONFIG_SND_MIXER_OSS=m
-+CONFIG_SND_PCM_OSS=m
-+CONFIG_SND_SEQUENCER_OSS=y
-+CONFIG_SND_RTCTIMER=m
-+# CONFIG_SND_VERBOSE_PRINTK is not set
-+# CONFIG_SND_DEBUG is not set
-+CONFIG_SND_GENERIC_PM=y
-+
-+#
-+# Generic devices
-+#
-+CONFIG_SND_MPU401_UART=m
-+CONFIG_SND_OPL3_LIB=m
-+CONFIG_SND_OPL4_LIB=m
-+CONFIG_SND_VX_LIB=m
-+CONFIG_SND_DUMMY=m
-+CONFIG_SND_VIRMIDI=m
-+CONFIG_SND_MTPAV=m
-+CONFIG_SND_SERIAL_U16550=m
-+CONFIG_SND_MPU401=m
-+
-+#
-+# ISA devices
-+#
-+CONFIG_SND_AD1848_LIB=m
-+CONFIG_SND_CS4231_LIB=m
-+CONFIG_SND_AD1816A=m
-+CONFIG_SND_AD1848=m
-+CONFIG_SND_CS4231=m
-+CONFIG_SND_CS4232=m
-+CONFIG_SND_CS4236=m
-+CONFIG_SND_ES968=m
-+CONFIG_SND_ES1688=m
-+CONFIG_SND_ES18XX=m
-+CONFIG_SND_GUS_SYNTH=m
-+CONFIG_SND_GUSCLASSIC=m
-+CONFIG_SND_GUSEXTREME=m
-+CONFIG_SND_GUSMAX=m
-+CONFIG_SND_INTERWAVE=m
-+CONFIG_SND_INTERWAVE_STB=m
-+CONFIG_SND_OPTI92X_AD1848=m
-+CONFIG_SND_OPTI92X_CS4231=m
-+CONFIG_SND_OPTI93X=m
-+CONFIG_SND_SB8=m
-+CONFIG_SND_SB16=m
-+CONFIG_SND_SBAWE=m
-+CONFIG_SND_SB16_CSP=y
-+CONFIG_SND_WAVEFRONT=m
-+CONFIG_SND_ALS100=m
-+CONFIG_SND_AZT2320=m
-+CONFIG_SND_CMI8330=m
-+CONFIG_SND_DT019X=m
-+CONFIG_SND_OPL3SA2=m
-+CONFIG_SND_SGALAXY=m
-+CONFIG_SND_SSCAPE=m
-+
-+#
-+# PCI devices
-+#
-+CONFIG_SND_AC97_CODEC=m
-+CONFIG_SND_ALI5451=m
-+CONFIG_SND_ATIIXP=m
-+CONFIG_SND_ATIIXP_MODEM=m
-+CONFIG_SND_AU8810=m
-+CONFIG_SND_AU8820=m
-+CONFIG_SND_AU8830=m
-+CONFIG_SND_AZT3328=m
-+CONFIG_SND_BT87X=m
-+# CONFIG_SND_BT87X_OVERCLOCK is not set
-+CONFIG_SND_CS46XX=m
-+CONFIG_SND_CS46XX_NEW_DSP=y
-+CONFIG_SND_CS4281=m
-+CONFIG_SND_EMU10K1=m
-+# CONFIG_SND_EMU10K1X is not set
-+# CONFIG_SND_CA0106 is not set
-+CONFIG_SND_KORG1212=m
-+CONFIG_SND_MIXART=m
-+CONFIG_SND_NM256=m
-+CONFIG_SND_RME32=m
-+CONFIG_SND_RME96=m
-+CONFIG_SND_RME9652=m
-+CONFIG_SND_HDSP=m
-+CONFIG_SND_TRIDENT=m
-+CONFIG_SND_YMFPCI=m
-+CONFIG_SND_ALS4000=m
-+CONFIG_SND_CMIPCI=m
-+CONFIG_SND_ENS1370=m
-+CONFIG_SND_ENS1371=m
-+CONFIG_SND_ES1938=m
-+CONFIG_SND_ES1968=m
-+CONFIG_SND_MAESTRO3=m
-+CONFIG_SND_FM801=m
-+CONFIG_SND_FM801_TEA575X=m
-+CONFIG_SND_ICE1712=m
-+CONFIG_SND_ICE1724=m
-+CONFIG_SND_INTEL8X0=m
-+CONFIG_SND_INTEL8X0M=m
-+CONFIG_SND_SONICVIBES=m
-+CONFIG_SND_VIA82XX=m
-+# CONFIG_SND_VIA82XX_MODEM is not set
-+CONFIG_SND_VX222=m
-+CONFIG_SND_HDA_INTEL=m
-+
-+#
-+# USB devices
-+#
-+CONFIG_SND_USB_AUDIO=m
-+CONFIG_SND_USB_USX2Y=m
-+
-+#
-+# PCMCIA devices
-+#
-+CONFIG_SND_VXPOCKET=m
-+CONFIG_SND_VXP440=m
-+CONFIG_SND_PDAUDIOCF=m
-+
-+#
-+# Open Sound System
-+#
-+CONFIG_SOUND_PRIME=m
-+CONFIG_SOUND_BT878=m
-+CONFIG_SOUND_CMPCI=m
-+# CONFIG_SOUND_CMPCI_FM is not set
-+# CONFIG_SOUND_CMPCI_MIDI is not set
-+CONFIG_SOUND_CMPCI_JOYSTICK=y
-+CONFIG_SOUND_EMU10K1=m
-+CONFIG_MIDI_EMU10K1=y
-+CONFIG_SOUND_FUSION=m
-+CONFIG_SOUND_CS4281=m
-+CONFIG_SOUND_ES1370=m
-+CONFIG_SOUND_ES1371=m
-+CONFIG_SOUND_ESSSOLO1=m
-+CONFIG_SOUND_MAESTRO=m
-+CONFIG_SOUND_MAESTRO3=m
-+CONFIG_SOUND_ICH=m
-+CONFIG_SOUND_SONICVIBES=m
-+CONFIG_SOUND_TRIDENT=m
-+# CONFIG_SOUND_MSNDCLAS is not set
-+# CONFIG_SOUND_MSNDPIN is not set
-+CONFIG_SOUND_VIA82CXXX=m
-+CONFIG_MIDI_VIA82CXXX=y
-+CONFIG_SOUND_OSS=m
-+# CONFIG_SOUND_TRACEINIT is not set
-+# CONFIG_SOUND_DMAP is not set
-+# CONFIG_SOUND_AD1816 is not set
-+CONFIG_SOUND_AD1889=m
-+CONFIG_SOUND_SGALAXY=m
-+CONFIG_SOUND_ADLIB=m
-+CONFIG_SOUND_ACI_MIXER=m
-+CONFIG_SOUND_CS4232=m
-+CONFIG_SOUND_SSCAPE=m
-+CONFIG_SOUND_GUS=m
-+CONFIG_SOUND_GUS16=y
-+CONFIG_SOUND_GUSMAX=y
-+CONFIG_SOUND_VMIDI=m
-+CONFIG_SOUND_TRIX=m
-+CONFIG_SOUND_MSS=m
-+CONFIG_SOUND_MPU401=m
-+CONFIG_SOUND_NM256=m
-+CONFIG_SOUND_MAD16=m
-+CONFIG_MAD16_OLDCARD=y
-+CONFIG_SOUND_PAS=m
-+CONFIG_SOUND_PSS=m
-+CONFIG_PSS_MIXER=y
-+CONFIG_SOUND_SB=m
-+# CONFIG_SOUND_AWE32_SYNTH is not set
-+CONFIG_SOUND_WAVEFRONT=m
-+CONFIG_SOUND_MAUI=m
-+CONFIG_SOUND_YM3812=m
-+CONFIG_SOUND_OPL3SA1=m
-+CONFIG_SOUND_OPL3SA2=m
-+CONFIG_SOUND_YMFPCI=m
-+# CONFIG_SOUND_YMFPCI_LEGACY is not set
-+CONFIG_SOUND_UART6850=m
-+CONFIG_SOUND_AEDSP16=m
-+CONFIG_SC6600=y
-+CONFIG_SC6600_JOY=y
-+CONFIG_SC6600_CDROM=4
-+CONFIG_SC6600_CDROMBASE=0x0
-+# CONFIG_AEDSP16_MSS is not set
-+# CONFIG_AEDSP16_SBPRO is not set
-+# CONFIG_AEDSP16_MPU401 is not set
-+CONFIG_SOUND_TVMIXER=m
-+CONFIG_SOUND_KAHLUA=m
-+CONFIG_SOUND_ALI5455=m
-+CONFIG_SOUND_FORTE=m
-+CONFIG_SOUND_RME96XX=m
-+CONFIG_SOUND_AD1980=m
-+
-+#
-+# USB support
-+#
-+CONFIG_USB_ARCH_HAS_HCD=y
-+CONFIG_USB_ARCH_HAS_OHCI=y
-+CONFIG_USB=y
-+# CONFIG_USB_DEBUG is not set
-+
-+#
-+# Miscellaneous USB options
-+#
-+CONFIG_USB_DEVICEFS=y
-+CONFIG_USB_BANDWIDTH=y
-+# CONFIG_USB_DYNAMIC_MINORS is not set
-+# CONFIG_USB_OTG is not set
-+
-+#
-+# USB Host Controller Drivers
-+#
-+CONFIG_USB_EHCI_HCD=y
-+CONFIG_USB_EHCI_SPLIT_ISO=y
-+CONFIG_USB_EHCI_ROOT_HUB_TT=y
-+CONFIG_USB_OHCI_HCD=m
-+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
-+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-+CONFIG_USB_UHCI_HCD=m
-+CONFIG_USB_SL811_HCD=m
-+CONFIG_USB_SL811_CS=m
-+
-+#
-+# USB Device Class drivers
-+#
-+CONFIG_USB_AUDIO=m
-+
-+#
-+# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
-+#
-+CONFIG_USB_MIDI=m
-+CONFIG_USB_ACM=m
-+CONFIG_USB_PRINTER=m
-+
-+#
-+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-+#
-+CONFIG_USB_STORAGE=m
-+# CONFIG_USB_STORAGE_DEBUG is not set
-+CONFIG_USB_STORAGE_DATAFAB=y
-+CONFIG_USB_STORAGE_FREECOM=y
-+CONFIG_USB_STORAGE_ISD200=y
-+CONFIG_USB_STORAGE_DPCM=y
-+CONFIG_USB_STORAGE_USBAT=y
-+CONFIG_USB_STORAGE_SDDR09=y
-+CONFIG_USB_STORAGE_SDDR55=y
-+CONFIG_USB_STORAGE_JUMPSHOT=y
-+
-+#
-+# USB Input Devices
-+#
-+CONFIG_USB_HID=m
-+CONFIG_USB_HIDINPUT=y
-+# CONFIG_HID_FF is not set
-+CONFIG_USB_HIDDEV=y
-+
-+#
-+# USB HID Boot Protocol drivers
-+#
-+CONFIG_USB_KBD=m
-+CONFIG_USB_MOUSE=m
-+CONFIG_USB_AIPTEK=m
-+CONFIG_USB_WACOM=m
-+CONFIG_USB_KBTAB=m
-+CONFIG_USB_POWERMATE=m
-+CONFIG_USB_MTOUCH=m
-+CONFIG_USB_EGALAX=m
-+CONFIG_USB_XPAD=m
-+CONFIG_USB_ATI_REMOTE=m
-+
-+#
-+# USB Imaging devices
-+#
-+CONFIG_USB_MDC800=m
-+CONFIG_USB_MICROTEK=m
-+
-+#
-+# USB Multimedia devices
-+#
-+# CONFIG_USB_DABUSB is not set
-+CONFIG_USB_VICAM=m
-+CONFIG_USB_DSBR=m
-+CONFIG_USB_IBMCAM=m
-+CONFIG_USB_KONICAWC=m
-+CONFIG_USB_OV511=m
-+CONFIG_USB_SE401=m
-+CONFIG_USB_SN9C102=m
-+CONFIG_USB_STV680=m
-+CONFIG_USB_W9968CF=m
-+CONFIG_USB_PWC=m
-+
-+#
-+# USB Network Adapters
-+#
-+CONFIG_USB_CATC=m
-+CONFIG_USB_KAWETH=m
-+CONFIG_USB_PEGASUS=m
-+CONFIG_USB_RTL8150=m
-+CONFIG_USB_USBNET=m
-+
-+#
-+# USB Host-to-Host Cables
-+#
-+CONFIG_USB_ALI_M5632=y
-+CONFIG_USB_AN2720=y
-+CONFIG_USB_BELKIN=y
-+CONFIG_USB_GENESYS=y
-+CONFIG_USB_NET1080=y
-+CONFIG_USB_PL2301=y
-+CONFIG_USB_KC2190=y
-+
-+#
-+# Intelligent USB Devices/Gadgets
-+#
-+CONFIG_USB_ARMLINUX=y
-+CONFIG_USB_EPSON2888=y
-+CONFIG_USB_ZAURUS=y
-+CONFIG_USB_CDCETHER=y
-+
-+#
-+# USB Network Adapters
-+#
-+CONFIG_USB_AX8817X=y
-+CONFIG_USB_ZD1201=m
-+CONFIG_USB_MON=m
-+
-+#
-+# USB port drivers
-+#
-+CONFIG_USB_USS720=m
-+
-+#
-+# USB Serial Converter support
-+#
-+CONFIG_USB_SERIAL=m
-+CONFIG_USB_SERIAL_GENERIC=y
-+CONFIG_USB_SERIAL_AIRPRIME=m
-+CONFIG_USB_SERIAL_BELKIN=m
-+CONFIG_USB_SERIAL_WHITEHEAT=m
-+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-+CONFIG_USB_SERIAL_CP2101=m
-+CONFIG_USB_SERIAL_CYPRESS_M8=m
-+CONFIG_USB_SERIAL_EMPEG=m
-+CONFIG_USB_SERIAL_FTDI_SIO=m
-+CONFIG_USB_SERIAL_VISOR=m
-+CONFIG_USB_SERIAL_IPAQ=m
-+CONFIG_USB_SERIAL_IR=m
-+CONFIG_USB_SERIAL_EDGEPORT=m
-+CONFIG_USB_SERIAL_EDGEPORT_TI=m
-+# CONFIG_USB_SERIAL_GARMIN is not set
-+CONFIG_USB_SERIAL_IPW=m
-+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
-+CONFIG_USB_SERIAL_KEYSPAN=m
-+# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set
-+CONFIG_USB_SERIAL_KLSI=m
-+CONFIG_USB_SERIAL_KOBIL_SCT=m
-+CONFIG_USB_SERIAL_MCT_U232=m
-+CONFIG_USB_SERIAL_PL2303=m
-+CONFIG_USB_SERIAL_HP4X=m
-+CONFIG_USB_SERIAL_SAFE=m
-+# CONFIG_USB_SERIAL_SAFE_PADDED is not set
-+# CONFIG_USB_SERIAL_TI is not set
-+CONFIG_USB_SERIAL_CYBERJACK=m
-+CONFIG_USB_SERIAL_XIRCOM=m
-+CONFIG_USB_SERIAL_OPTION=m
-+CONFIG_USB_SERIAL_OMNINET=m
-+CONFIG_USB_EZUSB=y
-+
-+#
-+# USB Miscellaneous drivers
-+#
-+# CONFIG_USB_EMI62 is not set
-+# CONFIG_USB_EMI26 is not set
-+CONFIG_USB_AUERSWALD=m
-+CONFIG_USB_RIO500=m
-+CONFIG_USB_LEGOTOWER=m
-+CONFIG_USB_LCD=m
-+CONFIG_USB_LED=m
-+CONFIG_USB_CYTHERM=m
-+CONFIG_USB_PHIDGETKIT=m
-+CONFIG_USB_PHIDGETSERVO=m
-+# CONFIG_USB_IDMOUSE is not set
-+CONFIG_USB_SISUSBVGA=m
-+CONFIG_USB_TEST=m
-+
-+#
-+# USB ATM/DSL drivers
-+#
-+CONFIG_USB_ATM=m
-+CONFIG_USB_SPEEDTOUCH=m
-+
-+#
-+# USB Gadget Support
-+#
-+CONFIG_USB_GADGET=m
-+# CONFIG_USB_GADGET_DEBUG_FILES is not set
-+CONFIG_USB_GADGET_NET2280=y
-+CONFIG_USB_NET2280=m
-+# CONFIG_USB_GADGET_PXA2XX is not set
-+# CONFIG_USB_GADGET_GOKU is not set
-+# CONFIG_USB_GADGET_LH7A40X is not set
-+# CONFIG_USB_GADGET_OMAP is not set
-+# CONFIG_USB_GADGET_DUMMY_HCD is not set
-+CONFIG_USB_GADGET_DUALSPEED=y
-+CONFIG_USB_ZERO=m
-+CONFIG_USB_ETH=m
-+CONFIG_USB_ETH_RNDIS=y
-+CONFIG_USB_GADGETFS=m
-+CONFIG_USB_FILE_STORAGE=m
-+# CONFIG_USB_FILE_STORAGE_TEST is not set
-+CONFIG_USB_G_SERIAL=m
-+
-+#
-+# MMC/SD Card support
-+#
-+# CONFIG_MMC is not set
-+
-+#
-+# InfiniBand support
-+#
-+# CONFIG_INFINIBAND is not set
-+
-+#
-+# Power management options
-+#
-+
-+#
-+# ACPI (Advanced Configuration and Power Interface) Support
-+#
-+CONFIG_ACPI=y
-+CONFIG_ACPI_BOOT=y
-+CONFIG_ACPI_INTERPRETER=y
-+CONFIG_ACPI_AC=m
-+CONFIG_ACPI_BATTERY=m
-+CONFIG_ACPI_BUTTON=m
-+CONFIG_ACPI_VIDEO=m
-+CONFIG_ACPI_FAN=m
-+CONFIG_ACPI_PROCESSOR=m
-+# CONFIG_ACPI_HOTPLUG_CPU is not set
-+CONFIG_ACPI_THERMAL=m
-+CONFIG_ACPI_ASUS=m
-+CONFIG_ACPI_IBM=m
-+CONFIG_ACPI_TOSHIBA=m
-+CONFIG_ACPI_BLACKLIST_YEAR=0
-+# CONFIG_ACPI_DEBUG is not set
-+CONFIG_ACPI_BUS=y
-+CONFIG_ACPI_EC=y
-+CONFIG_ACPI_POWER=y
-+CONFIG_ACPI_PCI=y
-+CONFIG_ACPI_SYSTEM=y
-+# CONFIG_X86_PM_TIMER is not set
-+# CONFIG_ACPI_CONTAINER is not set
-+
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+CONFIG_EXT2_FS_XATTR=y
-+CONFIG_EXT2_FS_POSIX_ACL=y
-+CONFIG_EXT2_FS_SECURITY=y
-+CONFIG_EXT3_FS=m
-+CONFIG_EXT3_FS_XATTR=y
-+CONFIG_EXT3_FS_POSIX_ACL=y
-+CONFIG_EXT3_FS_SECURITY=y
-+CONFIG_JBD=m
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
-+CONFIG_REISERFS_FS=m
-+# CONFIG_REISERFS_CHECK is not set
-+# CONFIG_REISERFS_PROC_INFO is not set
-+# CONFIG_REISERFS_FS_XATTR is not set
-+CONFIG_JFS_FS=m
-+CONFIG_JFS_POSIX_ACL=y
-+# CONFIG_JFS_SECURITY is not set
-+# CONFIG_JFS_DEBUG is not set
-+CONFIG_JFS_STATISTICS=y
-+CONFIG_FS_POSIX_ACL=y
-+
-+#
-+# XFS support
-+#
-+CONFIG_XFS_FS=m
-+CONFIG_XFS_EXPORT=y
-+CONFIG_XFS_RT=y
-+CONFIG_XFS_QUOTA=y
-+CONFIG_XFS_SECURITY=y
-+CONFIG_XFS_POSIX_ACL=y
-+CONFIG_MINIX_FS=m
-+CONFIG_ROMFS_FS=m
-+CONFIG_QUOTA=y
-+CONFIG_QFMT_V1=m
-+CONFIG_QFMT_V2=m
-+CONFIG_QUOTACTL=y
-+CONFIG_DNOTIFY=y
-+CONFIG_AUTOFS_FS=m
-+CONFIG_AUTOFS4_FS=m
-+
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=m
-+CONFIG_JOLIET=y
-+CONFIG_ZISOFS=y
-+CONFIG_ZISOFS_FS=m
-+CONFIG_UDF_FS=m
-+CONFIG_UDF_NLS=y
-+
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=m
-+CONFIG_MSDOS_FS=m
-+CONFIG_VFAT_FS=m
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-+CONFIG_NTFS_FS=m
-+# CONFIG_NTFS_DEBUG is not set
-+# CONFIG_NTFS_RW is not set
-+
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
-+CONFIG_SYSFS=y
-+# CONFIG_DEVFS_FS is not set
-+CONFIG_DEVPTS_FS_XATTR=y
-+CONFIG_DEVPTS_FS_SECURITY=y
-+CONFIG_TMPFS=y
-+CONFIG_TMPFS_XATTR=y
-+CONFIG_TMPFS_SECURITY=y
-+# CONFIG_HUGETLBFS is not set
-+# CONFIG_HUGETLB_PAGE is not set
-+CONFIG_RAMFS=y
-+
-+#
-+# Miscellaneous filesystems
-+#
-+CONFIG_ADFS_FS=m
-+# CONFIG_ADFS_FS_RW is not set
-+CONFIG_AFFS_FS=m
-+CONFIG_HFS_FS=m
-+CONFIG_HFSPLUS_FS=m
-+CONFIG_BEFS_FS=m
-+# CONFIG_BEFS_DEBUG is not set
-+CONFIG_BFS_FS=m
-+CONFIG_EFS_FS=m
-+CONFIG_JFFS_FS=m
-+CONFIG_JFFS_FS_VERBOSE=0
-+CONFIG_JFFS_PROC_FS=y
-+CONFIG_JFFS2_FS=m
-+CONFIG_JFFS2_FS_DEBUG=0
-+# CONFIG_JFFS2_FS_NAND is not set
-+# CONFIG_JFFS2_FS_NOR_ECC is not set
-+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-+CONFIG_JFFS2_ZLIB=y
-+CONFIG_JFFS2_RTIME=y
-+# CONFIG_JFFS2_RUBIN is not set
-+CONFIG_CRAMFS=y
-+CONFIG_VXFS_FS=m
-+CONFIG_HPFS_FS=m
-+CONFIG_QNX4FS_FS=m
-+# CONFIG_QNX4FS_RW is not set
-+CONFIG_SYSV_FS=m
-+CONFIG_UFS_FS=m
-+# CONFIG_UFS_FS_WRITE is not set
-+
-+#
-+# Network File Systems
-+#
-+CONFIG_NFS_FS=m
-+CONFIG_NFS_V3=y
-+CONFIG_NFS_V4=y
-+CONFIG_NFS_DIRECTIO=y
-+CONFIG_NFSD=m
-+CONFIG_NFSD_V3=y
-+CONFIG_NFSD_V4=y
-+CONFIG_NFSD_TCP=y
-+CONFIG_LOCKD=m
-+CONFIG_LOCKD_V4=y
-+CONFIG_EXPORTFS=m
-+CONFIG_SUNRPC=m
-+CONFIG_SUNRPC_GSS=m
-+CONFIG_RPCSEC_GSS_KRB5=m
-+CONFIG_RPCSEC_GSS_SPKM3=m
-+CONFIG_SMB_FS=m
-+# CONFIG_SMB_NLS_DEFAULT is not set
-+CONFIG_CIFS=m
-+# CONFIG_CIFS_STATS is not set
-+# CONFIG_CIFS_XATTR is not set
-+# CONFIG_CIFS_EXPERIMENTAL is not set
-+CONFIG_NCP_FS=m
-+CONFIG_NCPFS_PACKET_SIGNING=y
-+CONFIG_NCPFS_IOCTL_LOCKING=y
-+CONFIG_NCPFS_STRONG=y
-+CONFIG_NCPFS_NFS_NS=y
-+CONFIG_NCPFS_OS2_NS=y
-+# CONFIG_NCPFS_SMALLDOS is not set
-+CONFIG_NCPFS_NLS=y
-+CONFIG_NCPFS_EXTRAS=y
-+CONFIG_CODA_FS=m
-+# CONFIG_CODA_FS_OLD_API is not set
-+CONFIG_AFS_FS=m
-+CONFIG_RXRPC=m
-+
-+#
-+# Partition Types
-+#
-+CONFIG_PARTITION_ADVANCED=y
-+CONFIG_ACORN_PARTITION=y
-+CONFIG_ACORN_PARTITION_CUMANA=y
-+# CONFIG_ACORN_PARTITION_EESOX is not set
-+CONFIG_ACORN_PARTITION_ICS=y
-+# CONFIG_ACORN_PARTITION_ADFS is not set
-+# CONFIG_ACORN_PARTITION_POWERTEC is not set
-+CONFIG_ACORN_PARTITION_RISCIX=y
-+CONFIG_OSF_PARTITION=y
-+CONFIG_AMIGA_PARTITION=y
-+CONFIG_ATARI_PARTITION=y
-+CONFIG_MAC_PARTITION=y
-+CONFIG_MSDOS_PARTITION=y
-+CONFIG_BSD_DISKLABEL=y
-+CONFIG_MINIX_SUBPARTITION=y
-+CONFIG_SOLARIS_X86_PARTITION=y
-+CONFIG_UNIXWARE_DISKLABEL=y
-+CONFIG_LDM_PARTITION=y
-+# CONFIG_LDM_DEBUG is not set
-+CONFIG_SGI_PARTITION=y
-+CONFIG_ULTRIX_PARTITION=y
-+CONFIG_SUN_PARTITION=y
-+CONFIG_EFI_PARTITION=y
-+
-+#
-+# Native Language Support
-+#
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="cp437"
-+CONFIG_NLS_CODEPAGE_437=m
-+CONFIG_NLS_CODEPAGE_737=m
-+CONFIG_NLS_CODEPAGE_775=m
-+CONFIG_NLS_CODEPAGE_850=m
-+CONFIG_NLS_CODEPAGE_852=m
-+CONFIG_NLS_CODEPAGE_855=m
-+CONFIG_NLS_CODEPAGE_857=m
-+CONFIG_NLS_CODEPAGE_860=m
-+CONFIG_NLS_CODEPAGE_861=m
-+CONFIG_NLS_CODEPAGE_862=m
-+CONFIG_NLS_CODEPAGE_863=m
-+CONFIG_NLS_CODEPAGE_864=m
-+CONFIG_NLS_CODEPAGE_865=m
-+CONFIG_NLS_CODEPAGE_866=m
-+CONFIG_NLS_CODEPAGE_869=m
-+CONFIG_NLS_CODEPAGE_936=m
-+CONFIG_NLS_CODEPAGE_950=m
-+CONFIG_NLS_CODEPAGE_932=m
-+CONFIG_NLS_CODEPAGE_949=m
-+CONFIG_NLS_CODEPAGE_874=m
-+CONFIG_NLS_ISO8859_8=m
-+CONFIG_NLS_CODEPAGE_1250=m
-+CONFIG_NLS_CODEPAGE_1251=m
-+CONFIG_NLS_ASCII=m
-+CONFIG_NLS_ISO8859_1=m
-+CONFIG_NLS_ISO8859_2=m
-+CONFIG_NLS_ISO8859_3=m
-+CONFIG_NLS_ISO8859_4=m
-+CONFIG_NLS_ISO8859_5=m
-+CONFIG_NLS_ISO8859_6=m
-+CONFIG_NLS_ISO8859_7=m
-+CONFIG_NLS_ISO8859_9=m
-+CONFIG_NLS_ISO8859_13=m
-+CONFIG_NLS_ISO8859_14=m
-+CONFIG_NLS_ISO8859_15=m
-+CONFIG_NLS_KOI8_R=m
-+CONFIG_NLS_KOI8_U=m
-+CONFIG_NLS_UTF8=m
-+
-+#
-+# Security options
-+#
-+CONFIG_KEYS=y
-+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
-+CONFIG_SECURITY=y
-+# CONFIG_SECURITY_NETWORK is not set
-+CONFIG_SECURITY_CAPABILITIES=y
-+CONFIG_SECURITY_ROOTPLUG=m
-+CONFIG_SECURITY_SECLVL=m
-+CONFIG_SECURITY_SELINUX=y
-+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
-+CONFIG_SECURITY_SELINUX_DISABLE=y
-+CONFIG_SECURITY_SELINUX_DEVELOP=y
-+CONFIG_SECURITY_SELINUX_AVC_STATS=y
-+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-+
-+#
-+# Cryptographic options
-+#
-+CONFIG_CRYPTO=y
-+CONFIG_CRYPTO_HMAC=y
-+CONFIG_CRYPTO_NULL=m
-+CONFIG_CRYPTO_MD4=m
-+CONFIG_CRYPTO_MD5=y
-+CONFIG_CRYPTO_SHA1=m
-+CONFIG_CRYPTO_SHA256=m
-+CONFIG_CRYPTO_SHA512=m
-+CONFIG_CRYPTO_WP512=m
-+CONFIG_CRYPTO_TGR192=m
-+CONFIG_CRYPTO_DES=m
-+CONFIG_CRYPTO_BLOWFISH=m
-+CONFIG_CRYPTO_TWOFISH=m
-+CONFIG_CRYPTO_SERPENT=m
-+CONFIG_CRYPTO_AES_586=m
-+CONFIG_CRYPTO_CAST5=m
-+CONFIG_CRYPTO_CAST6=m
-+CONFIG_CRYPTO_TEA=m
-+CONFIG_CRYPTO_ARC4=m
-+CONFIG_CRYPTO_KHAZAD=m
-+CONFIG_CRYPTO_ANUBIS=m
-+CONFIG_CRYPTO_DEFLATE=m
-+CONFIG_CRYPTO_MICHAEL_MIC=m
-+CONFIG_CRYPTO_CRC32C=m
-+CONFIG_CRYPTO_TEST=m
-+
-+#
-+# Hardware crypto devices
-+#
-+# CONFIG_CRYPTO_DEV_PADLOCK is not set
-+
-+#
-+# Library routines
-+#
-+CONFIG_CRC_CCITT=m
-+CONFIG_CRC32=y
-+CONFIG_LIBCRC32C=m
-+CONFIG_ZLIB_INFLATE=y
-+CONFIG_ZLIB_DEFLATE=m
-+CONFIG_REED_SOLOMON=m
-+CONFIG_REED_SOLOMON_DEC16=y
-+
-+#
-+# Kernel hacking
-+#
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_DEBUG_KERNEL=y
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_LOG_BUF_SHIFT=14
-+# CONFIG_SCHEDSTATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+# CONFIG_DEBUG_HIGHMEM is not set
-+# CONFIG_DEBUG_BUGVERBOSE is not set
-+# CONFIG_DEBUG_INFO is not set
-+# CONFIG_DEBUG_FS is not set
-+# CONFIG_FRAME_POINTER is not set
-+# CONFIG_DEBUG_STACKOVERFLOW is not set
-+# CONFIG_KPROBES is not set
-+# CONFIG_DEBUG_STACK_USAGE is not set
-+# CONFIG_DEBUG_PAGEALLOC is not set
-+# CONFIG_4KSTACKS is not set
-+CONFIG_X86_FIND_SMP_CONFIG=y
-+CONFIG_X86_MPPARSE=y
-diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xen_defconfig_x86_64 linux-2.6.12-xen/arch/xen/configs/xen_defconfig_x86_64
---- pristine-linux-2.6.12/arch/xen/configs/xen_defconfig_x86_64	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/configs/xen_defconfig_x86_64	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2425 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.12.4-xen
-+# Mon Aug 15 19:54:11 2005
-+#
-+CONFIG_XEN=y
-+CONFIG_ARCH_XEN=y
-+CONFIG_NO_IDLE_HZ=y
-+
-+#
-+# XEN
-+#
-+CONFIG_XEN_PRIVILEGED_GUEST=y
-+CONFIG_XEN_PHYSDEV_ACCESS=y
-+CONFIG_XEN_BLKDEV_BACKEND=y
-+# CONFIG_XEN_BLKDEV_TAP_BE is not set
-+CONFIG_XEN_NETDEV_BACKEND=y
-+# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
-+# CONFIG_XEN_TPMDEV_FRONTEND is not set
-+# CONFIG_XEN_TPMDEV_BACKEND is not set
-+CONFIG_XEN_BLKDEV_FRONTEND=y
-+CONFIG_XEN_NETDEV_FRONTEND=y
-+# CONFIG_XEN_BLKDEV_TAP is not set
-+# CONFIG_XEN_SHADOW_MODE is not set
-+CONFIG_XEN_SCRUB_PAGES=y
-+# CONFIG_XEN_X86 is not set
-+CONFIG_XEN_X86_64=y
-+CONFIG_HAVE_ARCH_ALLOC_SKB=y
-+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-+
-+#
-+# Code maturity level options
-+#
-+CONFIG_EXPERIMENTAL=y
-+# CONFIG_CLEAN_COMPILE is not set
-+CONFIG_BROKEN=y
-+CONFIG_BROKEN_ON_SMP=y
-+CONFIG_LOCK_KERNEL=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+
-+#
-+# General setup
-+#
-+CONFIG_LOCALVERSION=""
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+# CONFIG_POSIX_MQUEUE is not set
-+# CONFIG_BSD_PROCESS_ACCT is not set
-+CONFIG_SYSCTL=y
-+# CONFIG_AUDIT is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_KOBJECT_UEVENT=y
-+# CONFIG_IKCONFIG is not set
-+# CONFIG_CPUSETS is not set
-+# CONFIG_EMBEDDED is not set
-+CONFIG_KALLSYMS=y
-+# CONFIG_KALLSYMS_ALL is not set
-+CONFIG_KALLSYMS_EXTRA_PASS=y
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_EPOLL=y
-+CONFIG_SHMEM=y
-+CONFIG_CC_ALIGN_FUNCTIONS=0
-+CONFIG_CC_ALIGN_LABELS=0
-+CONFIG_CC_ALIGN_LOOPS=0
-+CONFIG_CC_ALIGN_JUMPS=0
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+
-+#
-+# Loadable module support
-+#
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+# CONFIG_MODULE_FORCE_UNLOAD is not set
-+CONFIG_OBSOLETE_MODPARM=y
-+# CONFIG_MODVERSIONS is not set
-+CONFIG_MODULE_SRCVERSION_ALL=y
-+CONFIG_KMOD=y
-+CONFIG_STOP_MACHINE=y
-+CONFIG_XENARCH="x86_64"
-+CONFIG_X86=y
-+CONFIG_MMU=y
-+CONFIG_UID16=y
-+CONFIG_GENERIC_ISA_DMA=y
-+CONFIG_GENERIC_IOMAP=y
-+CONFIG_X86_CMPXCHG=y
-+CONFIG_X86_L1_CACHE_SHIFT=7
-+CONFIG_RWSEM_GENERIC_SPINLOCK=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_X86_GOOD_APIC=y
-+# CONFIG_HPET_TIMER is not set
-+CONFIG_SMP=y
-+CONFIG_NR_CPUS=8
-+# CONFIG_SCHED_SMT is not set
-+CONFIG_MICROCODE=y
-+# CONFIG_X86_CPUID is not set
-+# CONFIG_NUMA is not set
-+# CONFIG_MTRR is not set
-+CONFIG_HAVE_DEC_LOCK=y
-+CONFIG_X86_LOCAL_APIC=y
-+CONFIG_X86_IO_APIC=y
-+CONFIG_PCI=y
-+CONFIG_PCI_DIRECT=y
-+# CONFIG_PCI_MMCONFIG is not set
-+CONFIG_ISA_DMA_API=y
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+CONFIG_SECCOMP=y
-+
-+#
-+# X86_64 processor configuration
-+#
-+CONFIG_X86_64=y
-+CONFIG_64BIT=y
-+CONFIG_EARLY_PRINTK=y
-+
-+#
-+# Processor type and features
-+#
-+# CONFIG_MPSC is not set
-+CONFIG_GENERIC_CPU=y
-+CONFIG_X86_L1_CACHE_BYTES=128
-+# CONFIG_X86_TSC is not set
-+CONFIG_X86_XEN_GENAPIC=y
-+# CONFIG_X86_MSR is not set
-+CONFIG_X86_HT=y
-+# CONFIG_K8_NUMA is not set
-+# CONFIG_NUMA_EMU is not set
-+# CONFIG_GART_IOMMU is not set
-+CONFIG_DUMMY_IOMMU=y
-+CONFIG_SWIOTLB=y
-+# CONFIG_X86_MCE is not set
-+
-+#
-+# Power management options
-+#
-+# CONFIG_PM is not set
-+
-+#
-+# CPU Frequency scaling
-+#
-+# CONFIG_CPU_FREQ is not set
-+
-+#
-+# Bus options (PCI etc.)
-+#
-+# CONFIG_UNORDERED_IO is not set
-+
-+#
-+# Executable file formats / Emulations
-+#
-+CONFIG_IA32_EMULATION=y
-+# CONFIG_IA32_AOUT is not set
-+CONFIG_COMPAT=y
-+CONFIG_SYSVIPC_COMPAT=y
-+
-+#
-+# Executable file formats
-+#
-+CONFIG_BINFMT_ELF=y
-+CONFIG_BINFMT_MISC=y
-+
-+#
-+# Device Drivers
-+#
-+
-+#
-+# Generic Driver Options
-+#
-+CONFIG_STANDALONE=y
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+CONFIG_FW_LOADER=y
-+# CONFIG_DEBUG_DRIVER is not set
-+
-+#
-+# Memory Technology Devices (MTD)
-+#
-+CONFIG_MTD=m
-+# CONFIG_MTD_DEBUG is not set
-+CONFIG_MTD_CONCAT=m
-+CONFIG_MTD_PARTITIONS=y
-+CONFIG_MTD_REDBOOT_PARTS=m
-+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-+CONFIG_MTD_CMDLINE_PARTS=y
-+
-+#
-+# User Modules And Translation Layers
-+#
-+CONFIG_MTD_CHAR=m
-+CONFIG_MTD_BLOCK=m
-+CONFIG_MTD_BLOCK_RO=m
-+CONFIG_FTL=m
-+CONFIG_NFTL=m
-+CONFIG_NFTL_RW=y
-+CONFIG_INFTL=m
-+
-+#
-+# RAM/ROM/Flash chip drivers
-+#
-+CONFIG_MTD_CFI=m
-+CONFIG_MTD_JEDECPROBE=m
-+CONFIG_MTD_GEN_PROBE=m
-+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-+CONFIG_MTD_MAP_BANK_WIDTH_1=y
-+CONFIG_MTD_MAP_BANK_WIDTH_2=y
-+CONFIG_MTD_MAP_BANK_WIDTH_4=y
-+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-+CONFIG_MTD_CFI_I1=y
-+CONFIG_MTD_CFI_I2=y
-+# CONFIG_MTD_CFI_I4 is not set
-+# CONFIG_MTD_CFI_I8 is not set
-+CONFIG_MTD_CFI_INTELEXT=m
-+CONFIG_MTD_CFI_AMDSTD=m
-+CONFIG_MTD_CFI_AMDSTD_RETRY=3
-+CONFIG_MTD_CFI_STAA=m
-+CONFIG_MTD_CFI_UTIL=m
-+CONFIG_MTD_RAM=m
-+CONFIG_MTD_ROM=m
-+CONFIG_MTD_ABSENT=m
-+# CONFIG_MTD_OBSOLETE_CHIPS is not set
-+
-+#
-+# Mapping drivers for chip access
-+#
-+CONFIG_MTD_COMPLEX_MAPPINGS=y
-+# CONFIG_MTD_PHYSMAP is not set
-+# CONFIG_MTD_PNC2000 is not set
-+CONFIG_MTD_SC520CDP=m
-+CONFIG_MTD_NETSC520=m
-+CONFIG_MTD_TS5500=m
-+CONFIG_MTD_SBC_GXX=m
-+CONFIG_MTD_ELAN_104NC=m
-+# CONFIG_MTD_AMD76XROM is not set
-+# CONFIG_MTD_ICHXROM is not set
-+CONFIG_MTD_SCB2_FLASH=m
-+# CONFIG_MTD_NETtel is not set
-+# CONFIG_MTD_DILNETPC is not set
-+# CONFIG_MTD_L440GX is not set
-+CONFIG_MTD_PCI=m
-+
-+#
-+# Self-contained MTD device drivers
-+#
-+CONFIG_MTD_PMC551=m
-+# CONFIG_MTD_PMC551_BUGFIX is not set
-+# CONFIG_MTD_PMC551_DEBUG is not set
-+# CONFIG_MTD_SLRAM is not set
-+# CONFIG_MTD_PHRAM is not set
-+CONFIG_MTD_MTDRAM=m
-+CONFIG_MTDRAM_TOTAL_SIZE=4096
-+CONFIG_MTDRAM_ERASE_SIZE=128
-+# CONFIG_MTD_BLKMTD is not set
-+CONFIG_MTD_BLOCK2MTD=m
-+
-+#
-+# Disk-On-Chip Device Drivers
-+#
-+CONFIG_MTD_DOC2000=m
-+# CONFIG_MTD_DOC2001 is not set
-+CONFIG_MTD_DOC2001PLUS=m
-+CONFIG_MTD_DOCPROBE=m
-+CONFIG_MTD_DOCECC=m
-+# CONFIG_MTD_DOCPROBE_ADVANCED is not set
-+CONFIG_MTD_DOCPROBE_ADDRESS=0
-+
-+#
-+# NAND Flash Device Drivers
-+#
-+CONFIG_MTD_NAND=m
-+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-+CONFIG_MTD_NAND_IDS=m
-+# CONFIG_MTD_NAND_DISKONCHIP is not set
-+# CONFIG_MTD_NAND_NANDSIM is not set
-+
-+#
-+# Parallel port support
-+#
-+CONFIG_PARPORT=m
-+CONFIG_PARPORT_PC=m
-+# CONFIG_PARPORT_PC_FIFO is not set
-+# CONFIG_PARPORT_PC_SUPERIO is not set
-+CONFIG_PARPORT_NOT_PC=y
-+# CONFIG_PARPORT_GSC is not set
-+CONFIG_PARPORT_1284=y
-+
-+#
-+# Plug and Play support
-+#
-+# CONFIG_PNP is not set
-+
-+#
-+# Block devices
-+#
-+CONFIG_BLK_DEV_FD=m
-+CONFIG_PARIDE=m
-+CONFIG_PARIDE_PARPORT=m
-+
-+#
-+# Parallel IDE high-level drivers
-+#
-+CONFIG_PARIDE_PD=m
-+CONFIG_PARIDE_PCD=m
-+CONFIG_PARIDE_PF=m
-+CONFIG_PARIDE_PT=m
-+CONFIG_PARIDE_PG=m
-+
-+#
-+# Parallel IDE protocol modules
-+#
-+CONFIG_PARIDE_ATEN=m
-+CONFIG_PARIDE_BPCK=m
-+CONFIG_PARIDE_COMM=m
-+CONFIG_PARIDE_DSTR=m
-+CONFIG_PARIDE_FIT2=m
-+CONFIG_PARIDE_FIT3=m
-+CONFIG_PARIDE_EPAT=m
-+CONFIG_PARIDE_EPATC8=y
-+CONFIG_PARIDE_EPIA=m
-+CONFIG_PARIDE_FRIQ=m
-+CONFIG_PARIDE_FRPW=m
-+CONFIG_PARIDE_KBIC=m
-+CONFIG_PARIDE_KTTI=m
-+CONFIG_PARIDE_ON20=m
-+CONFIG_PARIDE_ON26=m
-+CONFIG_BLK_CPQ_DA=m
-+CONFIG_BLK_CPQ_CISS_DA=m
-+CONFIG_CISS_SCSI_TAPE=y
-+CONFIG_BLK_DEV_DAC960=m
-+CONFIG_BLK_DEV_UMEM=m
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=m
-+CONFIG_BLK_DEV_CRYPTOLOOP=m
-+CONFIG_BLK_DEV_NBD=m
-+CONFIG_BLK_DEV_SX8=m
-+# CONFIG_BLK_DEV_UB is not set
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=16384
-+CONFIG_BLK_DEV_INITRD=y
-+CONFIG_INITRAMFS_SOURCE=""
-+CONFIG_LBD=y
-+CONFIG_CDROM_PKTCDVD=m
-+CONFIG_CDROM_PKTCDVD_BUFFERS=8
-+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
-+
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+CONFIG_ATA_OVER_ETH=m
-+
-+#
-+# ATA/ATAPI/MFM/RLL support
-+#
-+CONFIG_IDE=y
-+CONFIG_BLK_DEV_IDE=y
-+
-+#
-+# Please see Documentation/ide.txt for help/info on IDE drives
-+#
-+# CONFIG_BLK_DEV_IDE_SATA is not set
-+# CONFIG_BLK_DEV_HD_IDE is not set
-+CONFIG_BLK_DEV_IDEDISK=y
-+CONFIG_IDEDISK_MULTI_MODE=y
-+CONFIG_BLK_DEV_IDECD=y
-+# CONFIG_BLK_DEV_IDETAPE is not set
-+CONFIG_BLK_DEV_IDEFLOPPY=y
-+CONFIG_BLK_DEV_IDESCSI=m
-+# CONFIG_IDE_TASK_IOCTL is not set
-+
-+#
-+# IDE chipset support/bugfixes
-+#
-+CONFIG_IDE_GENERIC=y
-+CONFIG_BLK_DEV_CMD640=y
-+CONFIG_BLK_DEV_CMD640_ENHANCED=y
-+CONFIG_BLK_DEV_IDEPCI=y
-+CONFIG_IDEPCI_SHARE_IRQ=y
-+# CONFIG_BLK_DEV_OFFBOARD is not set
-+CONFIG_BLK_DEV_GENERIC=y
-+# CONFIG_BLK_DEV_OPTI621 is not set
-+CONFIG_BLK_DEV_RZ1000=y
-+CONFIG_BLK_DEV_IDEDMA_PCI=y
-+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-+CONFIG_IDEDMA_PCI_AUTO=y
-+# CONFIG_IDEDMA_ONLYDISK is not set
-+CONFIG_BLK_DEV_AEC62XX=y
-+CONFIG_BLK_DEV_ALI15X3=y
-+# CONFIG_WDC_ALI15X3 is not set
-+CONFIG_BLK_DEV_AMD74XX=y
-+CONFIG_BLK_DEV_ATIIXP=y
-+CONFIG_BLK_DEV_CMD64X=y
-+CONFIG_BLK_DEV_TRIFLEX=y
-+CONFIG_BLK_DEV_CY82C693=y
-+CONFIG_BLK_DEV_CS5520=y
-+CONFIG_BLK_DEV_CS5530=y
-+CONFIG_BLK_DEV_HPT34X=y
-+# CONFIG_HPT34X_AUTODMA is not set
-+CONFIG_BLK_DEV_HPT366=y
-+# CONFIG_BLK_DEV_SC1200 is not set
-+CONFIG_BLK_DEV_PIIX=y
-+# CONFIG_BLK_DEV_NS87415 is not set
-+CONFIG_BLK_DEV_PDC202XX_OLD=y
-+# CONFIG_PDC202XX_BURST is not set
-+CONFIG_BLK_DEV_PDC202XX_NEW=y
-+CONFIG_PDC202XX_FORCE=y
-+CONFIG_BLK_DEV_SVWKS=y
-+CONFIG_BLK_DEV_SIIMAGE=y
-+CONFIG_BLK_DEV_SIS5513=y
-+CONFIG_BLK_DEV_SLC90E66=y
-+# CONFIG_BLK_DEV_TRM290 is not set
-+CONFIG_BLK_DEV_VIA82CXXX=y
-+# CONFIG_IDE_ARM is not set
-+CONFIG_BLK_DEV_IDEDMA=y
-+# CONFIG_IDEDMA_IVB is not set
-+CONFIG_IDEDMA_AUTO=y
-+# CONFIG_BLK_DEV_HD is not set
-+
-+#
-+# SCSI device support
-+#
-+CONFIG_SCSI=y
-+CONFIG_SCSI_PROC_FS=y
-+
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=y
-+CONFIG_CHR_DEV_ST=m
-+CONFIG_CHR_DEV_OSST=m
-+CONFIG_BLK_DEV_SR=m
-+CONFIG_BLK_DEV_SR_VENDOR=y
-+CONFIG_CHR_DEV_SG=m
-+
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+CONFIG_SCSI_MULTI_LUN=y
-+CONFIG_SCSI_CONSTANTS=y
-+CONFIG_SCSI_LOGGING=y
-+
-+#
-+# SCSI Transport Attributes
-+#
-+CONFIG_SCSI_SPI_ATTRS=m
-+CONFIG_SCSI_FC_ATTRS=m
-+CONFIG_SCSI_ISCSI_ATTRS=m
-+
-+#
-+# SCSI low-level drivers
-+#
-+CONFIG_BLK_DEV_3W_XXXX_RAID=m
-+CONFIG_SCSI_3W_9XXX=m
-+CONFIG_SCSI_ACARD=m
-+CONFIG_SCSI_AACRAID=m
-+CONFIG_SCSI_AIC7XXX=m
-+CONFIG_AIC7XXX_CMDS_PER_DEVICE=4
-+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
-+CONFIG_AIC7XXX_DEBUG_MASK=0
-+# CONFIG_AIC7XXX_REG_PRETTY_PRINT is not set
-+CONFIG_SCSI_AIC7XXX_OLD=m
-+CONFIG_SCSI_AIC79XX=m
-+CONFIG_AIC79XX_CMDS_PER_DEVICE=4
-+CONFIG_AIC79XX_RESET_DELAY_MS=15000
-+# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
-+# CONFIG_AIC79XX_DEBUG_ENABLE is not set
-+CONFIG_AIC79XX_DEBUG_MASK=0
-+# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
-+# CONFIG_SCSI_ADVANSYS is not set
-+CONFIG_MEGARAID_NEWGEN=y
-+CONFIG_MEGARAID_MM=m
-+CONFIG_MEGARAID_MAILBOX=m
-+CONFIG_SCSI_SATA=y
-+CONFIG_SCSI_SATA_AHCI=m
-+CONFIG_SCSI_SATA_SVW=m
-+CONFIG_SCSI_ATA_PIIX=y
-+CONFIG_SCSI_SATA_NV=m
-+CONFIG_SCSI_SATA_PROMISE=m
-+CONFIG_SCSI_SATA_QSTOR=m
-+CONFIG_SCSI_SATA_SX4=m
-+CONFIG_SCSI_SATA_SIL=m
-+CONFIG_SCSI_SATA_SIS=m
-+CONFIG_SCSI_SATA_ULI=m
-+CONFIG_SCSI_SATA_VIA=m
-+CONFIG_SCSI_SATA_VITESSE=m
-+CONFIG_SCSI_BUSLOGIC=m
-+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-+# CONFIG_SCSI_CPQFCTS is not set
-+# CONFIG_SCSI_DMX3191D is not set
-+# CONFIG_SCSI_EATA is not set
-+# CONFIG_SCSI_EATA_PIO is not set
-+# CONFIG_SCSI_FUTURE_DOMAIN is not set
-+CONFIG_SCSI_GDTH=m
-+CONFIG_SCSI_IPS=m
-+CONFIG_SCSI_INITIO=m
-+CONFIG_SCSI_INIA100=m
-+CONFIG_SCSI_PPA=m
-+CONFIG_SCSI_IMM=m
-+# CONFIG_SCSI_IZIP_EPP16 is not set
-+# CONFIG_SCSI_IZIP_SLOW_CTR is not set
-+CONFIG_SCSI_SYM53C8XX_2=m
-+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-+# CONFIG_SCSI_IPR is not set
-+# CONFIG_SCSI_PCI2000 is not set
-+# CONFIG_SCSI_PCI2220I is not set
-+# CONFIG_SCSI_QLOGIC_ISP is not set
-+# CONFIG_SCSI_QLOGIC_FC is not set
-+CONFIG_SCSI_QLOGIC_1280=m
-+CONFIG_SCSI_QLOGIC_1280_1040=y
-+CONFIG_SCSI_QLA2XXX=y
-+CONFIG_SCSI_QLA21XX=m
-+CONFIG_SCSI_QLA22XX=m
-+CONFIG_SCSI_QLA2300=m
-+CONFIG_SCSI_QLA2322=m
-+CONFIG_SCSI_QLA6312=m
-+CONFIG_SCSI_LPFC=m
-+CONFIG_SCSI_DC395x=m
-+CONFIG_SCSI_DC390T=m
-+# CONFIG_SCSI_DEBUG is not set
-+
-+#
-+# Multi-device support (RAID and LVM)
-+#
-+CONFIG_MD=y
-+CONFIG_BLK_DEV_MD=y
-+CONFIG_MD_LINEAR=m
-+CONFIG_MD_RAID0=m
-+CONFIG_MD_RAID1=m
-+CONFIG_MD_RAID10=m
-+CONFIG_MD_RAID5=m
-+CONFIG_MD_RAID6=m
-+CONFIG_MD_MULTIPATH=m
-+CONFIG_MD_FAULTY=m
-+CONFIG_BLK_DEV_DM=m
-+CONFIG_DM_CRYPT=m
-+CONFIG_DM_SNAPSHOT=m
-+CONFIG_DM_MIRROR=m
-+CONFIG_DM_ZERO=m
-+CONFIG_DM_MULTIPATH=m
-+CONFIG_DM_MULTIPATH_EMC=m
-+
-+#
-+# Fusion MPT device support
-+#
-+CONFIG_FUSION=m
-+CONFIG_FUSION_MAX_SGE=40
-+CONFIG_FUSION_CTL=m
-+CONFIG_FUSION_LAN=m
-+
-+#
-+# IEEE 1394 (FireWire) support
-+#
-+CONFIG_IEEE1394=m
-+
-+#
-+# Subsystem Options
-+#
-+# CONFIG_IEEE1394_VERBOSEDEBUG is not set
-+CONFIG_IEEE1394_OUI_DB=y
-+CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
-+CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
-+
-+#
-+# Device Drivers
-+#
-+CONFIG_IEEE1394_PCILYNX=m
-+CONFIG_IEEE1394_OHCI1394=m
-+
-+#
-+# Protocol Drivers
-+#
-+CONFIG_IEEE1394_VIDEO1394=m
-+CONFIG_IEEE1394_SBP2=m
-+# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-+CONFIG_IEEE1394_ETH1394=m
-+CONFIG_IEEE1394_DV1394=m
-+CONFIG_IEEE1394_RAWIO=m
-+CONFIG_IEEE1394_CMP=m
-+CONFIG_IEEE1394_AMDTP=m
-+
-+#
-+# I2O device support
-+#
-+CONFIG_I2O=m
-+CONFIG_I2O_CONFIG=m
-+CONFIG_I2O_BLOCK=m
-+CONFIG_I2O_SCSI=m
-+CONFIG_I2O_PROC=m
-+
-+#
-+# Networking support
-+#
-+CONFIG_NET=y
-+
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=y
-+CONFIG_PACKET_MMAP=y
-+CONFIG_UNIX=y
-+CONFIG_NET_KEY=m
-+CONFIG_INET=y
-+CONFIG_IP_MULTICAST=y
-+CONFIG_IP_ADVANCED_ROUTER=y
-+CONFIG_IP_MULTIPLE_TABLES=y
-+CONFIG_IP_ROUTE_FWMARK=y
-+CONFIG_IP_ROUTE_MULTIPATH=y
-+# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
-+CONFIG_IP_ROUTE_VERBOSE=y
-+# CONFIG_IP_PNP is not set
-+CONFIG_NET_IPIP=m
-+CONFIG_NET_IPGRE=m
-+CONFIG_NET_IPGRE_BROADCAST=y
-+CONFIG_IP_MROUTE=y
-+CONFIG_IP_PIMSM_V1=y
-+CONFIG_IP_PIMSM_V2=y
-+# CONFIG_ARPD is not set
-+CONFIG_SYN_COOKIES=y
-+CONFIG_INET_AH=m
-+CONFIG_INET_ESP=m
-+CONFIG_INET_IPCOMP=m
-+CONFIG_INET_TUNNEL=m
-+CONFIG_IP_TCPDIAG=m
-+CONFIG_IP_TCPDIAG_IPV6=y
-+
-+#
-+# IP: Virtual Server Configuration
-+#
-+CONFIG_IP_VS=m
-+# CONFIG_IP_VS_DEBUG is not set
-+CONFIG_IP_VS_TAB_BITS=12
-+
-+#
-+# IPVS transport protocol load balancing support
-+#
-+CONFIG_IP_VS_PROTO_TCP=y
-+CONFIG_IP_VS_PROTO_UDP=y
-+CONFIG_IP_VS_PROTO_ESP=y
-+CONFIG_IP_VS_PROTO_AH=y
-+
-+#
-+# IPVS scheduler
-+#
-+CONFIG_IP_VS_RR=m
-+CONFIG_IP_VS_WRR=m
-+CONFIG_IP_VS_LC=m
-+CONFIG_IP_VS_WLC=m
-+CONFIG_IP_VS_LBLC=m
-+CONFIG_IP_VS_LBLCR=m
-+CONFIG_IP_VS_DH=m
-+CONFIG_IP_VS_SH=m
-+CONFIG_IP_VS_SED=m
-+CONFIG_IP_VS_NQ=m
-+
-+#
-+# IPVS application helper
-+#
-+CONFIG_IP_VS_FTP=m
-+CONFIG_IPV6=m
-+CONFIG_IPV6_PRIVACY=y
-+CONFIG_INET6_AH=m
-+CONFIG_INET6_ESP=m
-+CONFIG_INET6_IPCOMP=m
-+CONFIG_INET6_TUNNEL=m
-+CONFIG_IPV6_TUNNEL=m
-+CONFIG_NETFILTER=y
-+# CONFIG_NETFILTER_DEBUG is not set
-+CONFIG_BRIDGE_NETFILTER=y
-+
-+#
-+# IP: Netfilter Configuration
-+#
-+CONFIG_IP_NF_CONNTRACK=m
-+CONFIG_IP_NF_CT_ACCT=y
-+CONFIG_IP_NF_CONNTRACK_MARK=y
-+CONFIG_IP_NF_CT_PROTO_SCTP=m
-+CONFIG_IP_NF_FTP=m
-+CONFIG_IP_NF_IRC=m
-+CONFIG_IP_NF_TFTP=m
-+CONFIG_IP_NF_AMANDA=m
-+CONFIG_IP_NF_QUEUE=m
-+CONFIG_IP_NF_IPTABLES=m
-+CONFIG_IP_NF_MATCH_LIMIT=m
-+CONFIG_IP_NF_MATCH_IPRANGE=m
-+CONFIG_IP_NF_MATCH_MAC=m
-+CONFIG_IP_NF_MATCH_PKTTYPE=m
-+CONFIG_IP_NF_MATCH_MARK=m
-+CONFIG_IP_NF_MATCH_MULTIPORT=m
-+CONFIG_IP_NF_MATCH_TOS=m
-+CONFIG_IP_NF_MATCH_RECENT=m
-+CONFIG_IP_NF_MATCH_ECN=m
-+CONFIG_IP_NF_MATCH_DSCP=m
-+CONFIG_IP_NF_MATCH_AH_ESP=m
-+CONFIG_IP_NF_MATCH_LENGTH=m
-+CONFIG_IP_NF_MATCH_TTL=m
-+CONFIG_IP_NF_MATCH_TCPMSS=m
-+CONFIG_IP_NF_MATCH_HELPER=m
-+CONFIG_IP_NF_MATCH_STATE=m
-+CONFIG_IP_NF_MATCH_CONNTRACK=m
-+CONFIG_IP_NF_MATCH_OWNER=m
-+CONFIG_IP_NF_MATCH_PHYSDEV=m
-+CONFIG_IP_NF_MATCH_ADDRTYPE=m
-+CONFIG_IP_NF_MATCH_REALM=m
-+CONFIG_IP_NF_MATCH_SCTP=m
-+CONFIG_IP_NF_MATCH_COMMENT=m
-+CONFIG_IP_NF_MATCH_CONNMARK=m
-+CONFIG_IP_NF_MATCH_HASHLIMIT=m
-+CONFIG_IP_NF_FILTER=m
-+CONFIG_IP_NF_TARGET_REJECT=m
-+CONFIG_IP_NF_TARGET_LOG=m
-+CONFIG_IP_NF_TARGET_ULOG=m
-+CONFIG_IP_NF_TARGET_TCPMSS=m
-+CONFIG_IP_NF_NAT=m
-+CONFIG_IP_NF_NAT_NEEDED=y
-+CONFIG_IP_NF_TARGET_MASQUERADE=m
-+CONFIG_IP_NF_TARGET_REDIRECT=m
-+CONFIG_IP_NF_TARGET_NETMAP=m
-+CONFIG_IP_NF_TARGET_SAME=m
-+CONFIG_IP_NF_NAT_SNMP_BASIC=m
-+CONFIG_IP_NF_NAT_IRC=m
-+CONFIG_IP_NF_NAT_FTP=m
-+CONFIG_IP_NF_NAT_TFTP=m
-+CONFIG_IP_NF_NAT_AMANDA=m
-+CONFIG_IP_NF_MANGLE=m
-+CONFIG_IP_NF_TARGET_TOS=m
-+CONFIG_IP_NF_TARGET_ECN=m
-+CONFIG_IP_NF_TARGET_DSCP=m
-+CONFIG_IP_NF_TARGET_MARK=m
-+CONFIG_IP_NF_TARGET_CLASSIFY=m
-+CONFIG_IP_NF_TARGET_CONNMARK=m
-+CONFIG_IP_NF_TARGET_CLUSTERIP=m
-+CONFIG_IP_NF_RAW=m
-+CONFIG_IP_NF_TARGET_NOTRACK=m
-+CONFIG_IP_NF_ARPTABLES=m
-+CONFIG_IP_NF_ARPFILTER=m
-+CONFIG_IP_NF_ARP_MANGLE=m
-+
-+#
-+# IPv6: Netfilter Configuration (EXPERIMENTAL)
-+#
-+CONFIG_IP6_NF_QUEUE=m
-+CONFIG_IP6_NF_IPTABLES=m
-+CONFIG_IP6_NF_MATCH_LIMIT=m
-+CONFIG_IP6_NF_MATCH_MAC=m
-+CONFIG_IP6_NF_MATCH_RT=m
-+CONFIG_IP6_NF_MATCH_OPTS=m
-+CONFIG_IP6_NF_MATCH_FRAG=m
-+CONFIG_IP6_NF_MATCH_HL=m
-+CONFIG_IP6_NF_MATCH_MULTIPORT=m
-+CONFIG_IP6_NF_MATCH_OWNER=m
-+CONFIG_IP6_NF_MATCH_MARK=m
-+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-+CONFIG_IP6_NF_MATCH_AHESP=m
-+CONFIG_IP6_NF_MATCH_LENGTH=m
-+CONFIG_IP6_NF_MATCH_EUI64=m
-+CONFIG_IP6_NF_MATCH_PHYSDEV=m
-+CONFIG_IP6_NF_FILTER=m
-+CONFIG_IP6_NF_TARGET_LOG=m
-+CONFIG_IP6_NF_MANGLE=m
-+CONFIG_IP6_NF_TARGET_MARK=m
-+CONFIG_IP6_NF_RAW=m
-+
-+#
-+# Bridge: Netfilter Configuration
-+#
-+CONFIG_BRIDGE_NF_EBTABLES=m
-+CONFIG_BRIDGE_EBT_BROUTE=m
-+CONFIG_BRIDGE_EBT_T_FILTER=m
-+CONFIG_BRIDGE_EBT_T_NAT=m
-+CONFIG_BRIDGE_EBT_802_3=m
-+CONFIG_BRIDGE_EBT_AMONG=m
-+CONFIG_BRIDGE_EBT_ARP=m
-+CONFIG_BRIDGE_EBT_IP=m
-+CONFIG_BRIDGE_EBT_LIMIT=m
-+CONFIG_BRIDGE_EBT_MARK=m
-+CONFIG_BRIDGE_EBT_PKTTYPE=m
-+CONFIG_BRIDGE_EBT_STP=m
-+CONFIG_BRIDGE_EBT_VLAN=m
-+CONFIG_BRIDGE_EBT_ARPREPLY=m
-+CONFIG_BRIDGE_EBT_DNAT=m
-+CONFIG_BRIDGE_EBT_MARK_T=m
-+CONFIG_BRIDGE_EBT_REDIRECT=m
-+CONFIG_BRIDGE_EBT_SNAT=m
-+CONFIG_BRIDGE_EBT_LOG=m
-+CONFIG_BRIDGE_EBT_ULOG=m
-+CONFIG_XFRM=y
-+CONFIG_XFRM_USER=y
-+
-+#
-+# SCTP Configuration (EXPERIMENTAL)
-+#
-+CONFIG_IP_SCTP=m
-+# CONFIG_SCTP_DBG_MSG is not set
-+# CONFIG_SCTP_DBG_OBJCNT is not set
-+# CONFIG_SCTP_HMAC_NONE is not set
-+# CONFIG_SCTP_HMAC_SHA1 is not set
-+CONFIG_SCTP_HMAC_MD5=y
-+CONFIG_ATM=m
-+CONFIG_ATM_CLIP=m
-+# CONFIG_ATM_CLIP_NO_ICMP is not set
-+CONFIG_ATM_LANE=m
-+# CONFIG_ATM_MPOA is not set
-+CONFIG_ATM_BR2684=m
-+# CONFIG_ATM_BR2684_IPFILTER is not set
-+CONFIG_BRIDGE=m
-+CONFIG_VLAN_8021Q=m
-+# CONFIG_DECNET is not set
-+CONFIG_LLC=y
-+# CONFIG_LLC2 is not set
-+CONFIG_IPX=m
-+# CONFIG_IPX_INTERN is not set
-+CONFIG_ATALK=m
-+CONFIG_DEV_APPLETALK=y
-+CONFIG_IPDDP=m
-+CONFIG_IPDDP_ENCAP=y
-+CONFIG_IPDDP_DECAP=y
-+# CONFIG_X25 is not set
-+# CONFIG_LAPB is not set
-+CONFIG_NET_DIVERT=y
-+# CONFIG_ECONET is not set
-+CONFIG_WAN_ROUTER=m
-+
-+#
-+# QoS and/or fair queueing
-+#
-+CONFIG_NET_SCHED=y
-+CONFIG_NET_SCH_CLK_JIFFIES=y
-+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-+# CONFIG_NET_SCH_CLK_CPU is not set
-+CONFIG_NET_SCH_CBQ=m
-+CONFIG_NET_SCH_HTB=m
-+CONFIG_NET_SCH_HFSC=m
-+CONFIG_NET_SCH_ATM=m
-+CONFIG_NET_SCH_PRIO=m
-+CONFIG_NET_SCH_RED=m
-+CONFIG_NET_SCH_SFQ=m
-+CONFIG_NET_SCH_TEQL=m
-+CONFIG_NET_SCH_TBF=m
-+CONFIG_NET_SCH_GRED=m
-+CONFIG_NET_SCH_DSMARK=m
-+CONFIG_NET_SCH_NETEM=m
-+CONFIG_NET_SCH_INGRESS=m
-+CONFIG_NET_QOS=y
-+CONFIG_NET_ESTIMATOR=y
-+CONFIG_NET_CLS=y
-+CONFIG_NET_CLS_BASIC=m
-+CONFIG_NET_CLS_TCINDEX=m
-+CONFIG_NET_CLS_ROUTE4=m
-+CONFIG_NET_CLS_ROUTE=y
-+CONFIG_NET_CLS_FW=m
-+CONFIG_NET_CLS_U32=m
-+CONFIG_CLS_U32_PERF=y
-+CONFIG_NET_CLS_IND=y
-+CONFIG_CLS_U32_MARK=y
-+CONFIG_NET_CLS_RSVP=m
-+CONFIG_NET_CLS_RSVP6=m
-+CONFIG_NET_EMATCH=y
-+CONFIG_NET_EMATCH_STACK=32
-+CONFIG_NET_EMATCH_CMP=m
-+CONFIG_NET_EMATCH_NBYTE=m
-+CONFIG_NET_EMATCH_U32=m
-+CONFIG_NET_EMATCH_META=m
-+# CONFIG_NET_CLS_ACT is not set
-+CONFIG_NET_CLS_POLICE=y
-+
-+#
-+# Network testing
-+#
-+# CONFIG_NET_PKTGEN is not set
-+CONFIG_NETPOLL=y
-+# CONFIG_NETPOLL_RX is not set
-+CONFIG_NETPOLL_TRAP=y
-+CONFIG_NET_POLL_CONTROLLER=y
-+# CONFIG_HAMRADIO is not set
-+CONFIG_IRDA=m
-+
-+#
-+# IrDA protocols
-+#
-+CONFIG_IRLAN=m
-+CONFIG_IRNET=m
-+CONFIG_IRCOMM=m
-+# CONFIG_IRDA_ULTRA is not set
-+
-+#
-+# IrDA options
-+#
-+CONFIG_IRDA_CACHE_LAST_LSAP=y
-+CONFIG_IRDA_FAST_RR=y
-+# CONFIG_IRDA_DEBUG is not set
-+
-+#
-+# Infrared-port device drivers
-+#
-+
-+#
-+# SIR device drivers
-+#
-+CONFIG_IRTTY_SIR=m
-+
-+#
-+# Dongle support
-+#
-+CONFIG_DONGLE=y
-+CONFIG_ESI_DONGLE=m
-+CONFIG_ACTISYS_DONGLE=m
-+CONFIG_TEKRAM_DONGLE=m
-+CONFIG_LITELINK_DONGLE=m
-+CONFIG_MA600_DONGLE=m
-+CONFIG_GIRBIL_DONGLE=m
-+CONFIG_MCP2120_DONGLE=m
-+CONFIG_OLD_BELKIN_DONGLE=m
-+CONFIG_ACT200L_DONGLE=m
-+
-+#
-+# Old SIR device drivers
-+#
-+CONFIG_IRPORT_SIR=m
-+
-+#
-+# Old Serial dongle support
-+#
-+# CONFIG_DONGLE_OLD is not set
-+
-+#
-+# FIR device drivers
-+#
-+CONFIG_USB_IRDA=m
-+CONFIG_SIGMATEL_FIR=m
-+CONFIG_NSC_FIR=m
-+CONFIG_WINBOND_FIR=m
-+CONFIG_SMC_IRCC_FIR=m
-+CONFIG_ALI_FIR=m
-+CONFIG_VLSI_FIR=m
-+CONFIG_VIA_FIR=m
-+CONFIG_BT=m
-+CONFIG_BT_L2CAP=m
-+CONFIG_BT_SCO=m
-+CONFIG_BT_RFCOMM=m
-+CONFIG_BT_RFCOMM_TTY=y
-+CONFIG_BT_BNEP=m
-+CONFIG_BT_BNEP_MC_FILTER=y
-+CONFIG_BT_BNEP_PROTO_FILTER=y
-+CONFIG_BT_CMTP=m
-+CONFIG_BT_HIDP=m
-+
-+#
-+# Bluetooth device drivers
-+#
-+CONFIG_BT_HCIUSB=m
-+CONFIG_BT_HCIUSB_SCO=y
-+CONFIG_BT_HCIUART=m
-+CONFIG_BT_HCIUART_H4=y
-+CONFIG_BT_HCIUART_BCSP=y
-+CONFIG_BT_HCIUART_BCSP_TXCRC=y
-+CONFIG_BT_HCIBCM203X=m
-+CONFIG_BT_HCIBPA10X=m
-+CONFIG_BT_HCIBFUSB=m
-+CONFIG_BT_HCIVHCI=m
-+CONFIG_NETDEVICES=y
-+CONFIG_DUMMY=m
-+CONFIG_BONDING=m
-+CONFIG_EQUALIZER=m
-+CONFIG_TUN=m
-+
-+#
-+# ARCnet devices
-+#
-+# CONFIG_ARCNET is not set
-+
-+#
-+# Ethernet (10 or 100Mbit)
-+#
-+CONFIG_NET_ETHERNET=y
-+CONFIG_MII=m
-+CONFIG_HAPPYMEAL=m
-+CONFIG_SUNGEM=m
-+CONFIG_NET_VENDOR_3COM=y
-+CONFIG_VORTEX=m
-+CONFIG_TYPHOON=m
-+
-+#
-+# Tulip family network device support
-+#
-+CONFIG_NET_TULIP=y
-+CONFIG_DE2104X=m
-+CONFIG_TULIP=m
-+# CONFIG_TULIP_MWI is not set
-+CONFIG_TULIP_MMIO=y
-+# CONFIG_TULIP_NAPI is not set
-+CONFIG_DE4X5=m
-+CONFIG_WINBOND_840=m
-+CONFIG_DM9102=m
-+# CONFIG_HP100 is not set
-+CONFIG_NET_PCI=y
-+CONFIG_PCNET32=m
-+CONFIG_AMD8111_ETH=m
-+CONFIG_AMD8111E_NAPI=y
-+CONFIG_ADAPTEC_STARFIRE=m
-+CONFIG_ADAPTEC_STARFIRE_NAPI=y
-+CONFIG_B44=m
-+CONFIG_FORCEDETH=m
-+CONFIG_DGRS=m
-+CONFIG_EEPRO100=m
-+CONFIG_E100=m
-+CONFIG_FEALNX=m
-+CONFIG_NATSEMI=m
-+CONFIG_NE2K_PCI=m
-+CONFIG_8139CP=m
-+CONFIG_8139TOO=m
-+CONFIG_8139TOO_PIO=y
-+# CONFIG_8139TOO_TUNE_TWISTER is not set
-+CONFIG_8139TOO_8129=y
-+# CONFIG_8139_OLD_RX_RESET is not set
-+CONFIG_SIS900=m
-+CONFIG_EPIC100=m
-+CONFIG_SUNDANCE=m
-+# CONFIG_SUNDANCE_MMIO is not set
-+CONFIG_VIA_RHINE=m
-+CONFIG_VIA_RHINE_MMIO=y
-+
-+#
-+# Ethernet (1000 Mbit)
-+#
-+CONFIG_ACENIC=m
-+# CONFIG_ACENIC_OMIT_TIGON_I is not set
-+CONFIG_DL2K=m
-+CONFIG_E1000=m
-+CONFIG_E1000_NAPI=y
-+CONFIG_NS83820=m
-+CONFIG_HAMACHI=m
-+CONFIG_YELLOWFIN=m
-+CONFIG_R8169=m
-+CONFIG_R8169_NAPI=y
-+CONFIG_R8169_VLAN=y
-+CONFIG_SK98LIN=m
-+CONFIG_VIA_VELOCITY=m
-+CONFIG_TIGON3=m
-+CONFIG_BNX2=m
-+
-+#
-+# Ethernet (10000 Mbit)
-+#
-+CONFIG_IXGB=m
-+CONFIG_IXGB_NAPI=y
-+CONFIG_S2IO=m
-+CONFIG_S2IO_NAPI=y
-+# CONFIG_2BUFF_MODE is not set
-+
-+#
-+# Token Ring devices
-+#
-+CONFIG_TR=y
-+CONFIG_IBMOL=m
-+CONFIG_3C359=m
-+CONFIG_TMS380TR=m
-+CONFIG_TMSPCI=m
-+CONFIG_ABYSS=m
-+
-+#
-+# Wireless LAN (non-hamradio)
-+#
-+CONFIG_NET_RADIO=y
-+
-+#
-+# Obsolete Wireless cards support (pre-802.11)
-+#
-+# CONFIG_STRIP is not set
-+
-+#
-+# Wireless 802.11b ISA/PCI cards support
-+#
-+CONFIG_HERMES=m
-+CONFIG_PLX_HERMES=m
-+CONFIG_TMD_HERMES=m
-+CONFIG_PCI_HERMES=m
-+CONFIG_ATMEL=m
-+CONFIG_PCI_ATMEL=m
-+
-+#
-+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
-+#
-+CONFIG_PRISM54=m
-+CONFIG_NET_WIRELESS=y
-+
-+#
-+# Wan interfaces
-+#
-+# CONFIG_WAN is not set
-+
-+#
-+# ATM drivers
-+#
-+CONFIG_ATM_TCP=m
-+CONFIG_ATM_LANAI=m
-+CONFIG_ATM_ENI=m
-+# CONFIG_ATM_ENI_DEBUG is not set
-+# CONFIG_ATM_ENI_TUNE_BURST is not set
-+CONFIG_ATM_FIRESTREAM=m
-+# CONFIG_ATM_ZATM is not set
-+CONFIG_ATM_IDT77252=m
-+# CONFIG_ATM_IDT77252_DEBUG is not set
-+# CONFIG_ATM_IDT77252_RCV_ALL is not set
-+CONFIG_ATM_IDT77252_USE_SUNI=y
-+CONFIG_ATM_AMBASSADOR=m
-+# CONFIG_ATM_AMBASSADOR_DEBUG is not set
-+CONFIG_ATM_HORIZON=m
-+# CONFIG_ATM_HORIZON_DEBUG is not set
-+CONFIG_ATM_FORE200E_MAYBE=m
-+# CONFIG_ATM_FORE200E_PCA is not set
-+CONFIG_ATM_HE=m
-+# CONFIG_ATM_HE_USE_SUNI is not set
-+CONFIG_FDDI=y
-+# CONFIG_DEFXX is not set
-+CONFIG_SKFP=m
-+# CONFIG_HIPPI is not set
-+CONFIG_PLIP=m
-+CONFIG_PPP=m
-+CONFIG_PPP_MULTILINK=y
-+CONFIG_PPP_FILTER=y
-+CONFIG_PPP_ASYNC=m
-+CONFIG_PPP_SYNC_TTY=m
-+CONFIG_PPP_DEFLATE=m
-+# CONFIG_PPP_BSDCOMP is not set
-+CONFIG_PPPOE=m
-+CONFIG_PPPOATM=m
-+CONFIG_SLIP=m
-+CONFIG_SLIP_COMPRESSED=y
-+CONFIG_SLIP_SMART=y
-+# CONFIG_SLIP_MODE_SLIP6 is not set
-+CONFIG_NET_FC=y
-+# CONFIG_SHAPER is not set
-+CONFIG_NETCONSOLE=m
-+
-+#
-+# ISDN subsystem
-+#
-+CONFIG_ISDN=m
-+
-+#
-+# Old ISDN4Linux
-+#
-+CONFIG_ISDN_I4L=m
-+CONFIG_ISDN_PPP=y
-+CONFIG_ISDN_PPP_VJ=y
-+CONFIG_ISDN_MPP=y
-+CONFIG_IPPP_FILTER=y
-+# CONFIG_ISDN_PPP_BSDCOMP is not set
-+CONFIG_ISDN_AUDIO=y
-+CONFIG_ISDN_TTY_FAX=y
-+
-+#
-+# ISDN feature submodules
-+#
-+CONFIG_ISDN_DRV_LOOP=m
-+CONFIG_ISDN_DIVERSION=m
-+
-+#
-+# ISDN4Linux hardware drivers
-+#
-+
-+#
-+# Passive cards
-+#
-+CONFIG_ISDN_DRV_HISAX=m
-+
-+#
-+# D-channel protocol features
-+#
-+CONFIG_HISAX_EURO=y
-+CONFIG_DE_AOC=y
-+CONFIG_HISAX_NO_SENDCOMPLETE=y
-+CONFIG_HISAX_NO_LLC=y
-+CONFIG_HISAX_NO_KEYPAD=y
-+CONFIG_HISAX_1TR6=y
-+CONFIG_HISAX_NI1=y
-+CONFIG_HISAX_MAX_CARDS=8
-+
-+#
-+# HiSax supported cards
-+#
-+CONFIG_HISAX_16_3=y
-+CONFIG_HISAX_TELESPCI=y
-+CONFIG_HISAX_S0BOX=y
-+CONFIG_HISAX_FRITZPCI=y
-+CONFIG_HISAX_AVM_A1_PCMCIA=y
-+CONFIG_HISAX_ELSA=y
-+CONFIG_HISAX_DIEHLDIVA=y
-+CONFIG_HISAX_SEDLBAUER=y
-+CONFIG_HISAX_NETJET=y
-+CONFIG_HISAX_NETJET_U=y
-+CONFIG_HISAX_NICCY=y
-+CONFIG_HISAX_BKM_A4T=y
-+CONFIG_HISAX_SCT_QUADRO=y
-+CONFIG_HISAX_GAZEL=y
-+CONFIG_HISAX_HFC_PCI=y
-+CONFIG_HISAX_W6692=y
-+CONFIG_HISAX_HFC_SX=y
-+CONFIG_HISAX_ENTERNOW_PCI=y
-+# CONFIG_HISAX_DEBUG is not set
-+
-+#
-+# HiSax PCMCIA card service modules
-+#
-+
-+#
-+# HiSax sub driver modules
-+#
-+CONFIG_HISAX_ST5481=m
-+CONFIG_HISAX_HFCUSB=m
-+CONFIG_HISAX_HFC4S8S=m
-+CONFIG_HISAX_FRITZ_PCIPNP=m
-+CONFIG_HISAX_HDLC=y
-+
-+#
-+# Active cards
-+#
-+CONFIG_HYSDN=m
-+CONFIG_HYSDN_CAPI=y
-+
-+#
-+# CAPI subsystem
-+#
-+CONFIG_ISDN_CAPI=m
-+CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
-+CONFIG_ISDN_CAPI_MIDDLEWARE=y
-+CONFIG_ISDN_CAPI_CAPI20=m
-+CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
-+CONFIG_ISDN_CAPI_CAPIFS=m
-+CONFIG_ISDN_CAPI_CAPIDRV=m
-+
-+#
-+# CAPI hardware drivers
-+#
-+
-+#
-+# Active AVM cards
-+#
-+CONFIG_CAPI_AVM=y
-+CONFIG_ISDN_DRV_AVMB1_B1PCI=m
-+CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-+CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-+CONFIG_ISDN_DRV_AVMB1_T1PCI=m
-+CONFIG_ISDN_DRV_AVMB1_C4=m
-+
-+#
-+# Active Eicon DIVA Server cards
-+#
-+# CONFIG_CAPI_EICON is not set
-+
-+#
-+# Telephony Support
-+#
-+# CONFIG_PHONE is not set
-+
-+#
-+# Input device support
-+#
-+CONFIG_INPUT=y
-+
-+#
-+# Userland interfaces
-+#
-+CONFIG_INPUT_MOUSEDEV=y
-+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-+CONFIG_INPUT_JOYDEV=m
-+# CONFIG_INPUT_TSDEV is not set
-+CONFIG_INPUT_EVDEV=y
-+# CONFIG_INPUT_EVBUG is not set
-+
-+#
-+# Input Device Drivers
-+#
-+CONFIG_INPUT_KEYBOARD=y
-+CONFIG_KEYBOARD_ATKBD=y
-+# CONFIG_KEYBOARD_SUNKBD is not set
-+# CONFIG_KEYBOARD_LKKBD is not set
-+# CONFIG_KEYBOARD_XTKBD is not set
-+# CONFIG_KEYBOARD_NEWTON is not set
-+CONFIG_INPUT_MOUSE=y
-+CONFIG_MOUSE_PS2=y
-+CONFIG_MOUSE_SERIAL=m
-+CONFIG_MOUSE_VSXXXAA=m
-+CONFIG_INPUT_JOYSTICK=y
-+CONFIG_JOYSTICK_ANALOG=m
-+CONFIG_JOYSTICK_A3D=m
-+CONFIG_JOYSTICK_ADI=m
-+CONFIG_JOYSTICK_COBRA=m
-+CONFIG_JOYSTICK_GF2K=m
-+CONFIG_JOYSTICK_GRIP=m
-+CONFIG_JOYSTICK_GRIP_MP=m
-+CONFIG_JOYSTICK_GUILLEMOT=m
-+CONFIG_JOYSTICK_INTERACT=m
-+CONFIG_JOYSTICK_SIDEWINDER=m
-+CONFIG_JOYSTICK_TMDC=m
-+CONFIG_JOYSTICK_IFORCE=m
-+CONFIG_JOYSTICK_IFORCE_USB=y
-+CONFIG_JOYSTICK_IFORCE_232=y
-+CONFIG_JOYSTICK_WARRIOR=m
-+CONFIG_JOYSTICK_MAGELLAN=m
-+CONFIG_JOYSTICK_SPACEORB=m
-+CONFIG_JOYSTICK_SPACEBALL=m
-+CONFIG_JOYSTICK_STINGER=m
-+CONFIG_JOYSTICK_TWIDJOY=m
-+CONFIG_JOYSTICK_DB9=m
-+CONFIG_JOYSTICK_GAMECON=m
-+CONFIG_JOYSTICK_TURBOGRAFX=m
-+CONFIG_JOYSTICK_JOYDUMP=m
-+CONFIG_INPUT_TOUCHSCREEN=y
-+CONFIG_TOUCHSCREEN_GUNZE=m
-+CONFIG_TOUCHSCREEN_ELO=m
-+CONFIG_TOUCHSCREEN_MTOUCH=m
-+CONFIG_TOUCHSCREEN_MK712=m
-+CONFIG_INPUT_MISC=y
-+CONFIG_INPUT_PCSPKR=m
-+CONFIG_INPUT_UINPUT=m
-+
-+#
-+# Hardware I/O ports
-+#
-+CONFIG_SERIO=y
-+CONFIG_SERIO_I8042=y
-+CONFIG_SERIO_SERPORT=y
-+# CONFIG_SERIO_CT82C710 is not set
-+# CONFIG_SERIO_PARKBD is not set
-+# CONFIG_SERIO_PCIPS2 is not set
-+CONFIG_SERIO_LIBPS2=y
-+# CONFIG_SERIO_RAW is not set
-+CONFIG_GAMEPORT=m
-+CONFIG_GAMEPORT_NS558=m
-+CONFIG_GAMEPORT_L4=m
-+CONFIG_GAMEPORT_EMU10K1=m
-+CONFIG_GAMEPORT_VORTEX=m
-+CONFIG_GAMEPORT_FM801=m
-+CONFIG_GAMEPORT_CS461X=m
-+
-+#
-+# Character devices
-+#
-+CONFIG_VT=y
-+CONFIG_VT_CONSOLE=y
-+CONFIG_HW_CONSOLE=y
-+# CONFIG_SERIAL_NONSTANDARD is not set
-+
-+#
-+# Serial drivers
-+#
-+# CONFIG_SERIAL_8250 is not set
-+
-+#
-+# Non-8250 serial port support
-+#
-+# CONFIG_SERIAL_JSM is not set
-+CONFIG_UNIX98_PTYS=y
-+# CONFIG_LEGACY_PTYS is not set
-+CONFIG_PRINTER=m
-+CONFIG_LP_CONSOLE=y
-+CONFIG_PPDEV=m
-+CONFIG_TIPAR=m
-+
-+#
-+# IPMI
-+#
-+CONFIG_IPMI_HANDLER=m
-+# CONFIG_IPMI_PANIC_EVENT is not set
-+CONFIG_IPMI_DEVICE_INTERFACE=m
-+CONFIG_IPMI_SI=m
-+CONFIG_IPMI_WATCHDOG=m
-+CONFIG_IPMI_POWEROFF=m
-+
-+#
-+# Watchdog Cards
-+#
-+CONFIG_WATCHDOG=y
-+# CONFIG_WATCHDOG_NOWAYOUT is not set
-+
-+#
-+# Watchdog Device Drivers
-+#
-+CONFIG_SOFT_WATCHDOG=m
-+CONFIG_ACQUIRE_WDT=m
-+CONFIG_ADVANTECH_WDT=m
-+CONFIG_ALIM1535_WDT=m
-+CONFIG_ALIM7101_WDT=m
-+CONFIG_SC520_WDT=m
-+CONFIG_EUROTECH_WDT=m
-+CONFIG_IB700_WDT=m
-+CONFIG_WAFER_WDT=m
-+CONFIG_I8XX_TCO=m
-+CONFIG_SC1200_WDT=m
-+# CONFIG_60XX_WDT is not set
-+CONFIG_CPU5_WDT=m
-+CONFIG_W83627HF_WDT=m
-+CONFIG_W83877F_WDT=m
-+CONFIG_MACHZ_WDT=m
-+
-+#
-+# PCI-based Watchdog Cards
-+#
-+CONFIG_PCIPCWATCHDOG=m
-+CONFIG_WDTPCI=m
-+CONFIG_WDT_501_PCI=y
-+
-+#
-+# USB-based Watchdog Cards
-+#
-+CONFIG_USBPCWATCHDOG=m
-+CONFIG_HW_RANDOM=m
-+# CONFIG_NVRAM is not set
-+CONFIG_RTC=y
-+CONFIG_DTLK=m
-+CONFIG_R3964=m
-+# CONFIG_APPLICOM is not set
-+
-+#
-+# Ftape, the floppy tape device driver
-+#
-+# CONFIG_FTAPE is not set
-+# CONFIG_AGP is not set
-+CONFIG_DRM=m
-+CONFIG_DRM_TDFX=m
-+# CONFIG_DRM_GAMMA is not set
-+CONFIG_DRM_R128=m
-+CONFIG_DRM_RADEON=m
-+# CONFIG_MWAVE is not set
-+# CONFIG_RAW_DRIVER is not set
-+# CONFIG_HPET is not set
-+CONFIG_HANGCHECK_TIMER=m
-+
-+#
-+# TPM devices
-+#
-+# CONFIG_TCG_TPM is not set
-+
-+#
-+# I2C support
-+#
-+CONFIG_I2C=m
-+CONFIG_I2C_CHARDEV=m
-+
-+#
-+# I2C Algorithms
-+#
-+CONFIG_I2C_ALGOBIT=m
-+CONFIG_I2C_ALGOPCF=m
-+CONFIG_I2C_ALGOPCA=m
-+
-+#
-+# I2C Hardware Bus support
-+#
-+# CONFIG_I2C_ALI1535 is not set
-+# CONFIG_I2C_ALI1563 is not set
-+# CONFIG_I2C_ALI15X3 is not set
-+CONFIG_I2C_AMD756=m
-+CONFIG_I2C_AMD756_S4882=m
-+CONFIG_I2C_AMD8111=m
-+# CONFIG_I2C_I801 is not set
-+# CONFIG_I2C_I810 is not set
-+# CONFIG_I2C_PIIX4 is not set
-+CONFIG_I2C_ISA=m
-+CONFIG_I2C_NFORCE2=m
-+# CONFIG_I2C_PARPORT is not set
-+# CONFIG_I2C_PARPORT_LIGHT is not set
-+CONFIG_I2C_PROSAVAGE=m
-+CONFIG_I2C_SAVAGE4=m
-+# CONFIG_SCx200_ACB is not set
-+# CONFIG_I2C_SIS5595 is not set
-+# CONFIG_I2C_SIS630 is not set
-+CONFIG_I2C_SIS96X=m
-+CONFIG_I2C_STUB=m
-+CONFIG_I2C_VIA=m
-+CONFIG_I2C_VIAPRO=m
-+CONFIG_I2C_VOODOO3=m
-+CONFIG_I2C_PCA_ISA=m
-+
-+#
-+# Hardware Sensors Chip support
-+#
-+CONFIG_I2C_SENSOR=m
-+CONFIG_SENSORS_ADM1021=m
-+CONFIG_SENSORS_ADM1025=m
-+CONFIG_SENSORS_ADM1026=m
-+CONFIG_SENSORS_ADM1031=m
-+CONFIG_SENSORS_ASB100=m
-+CONFIG_SENSORS_DS1621=m
-+CONFIG_SENSORS_FSCHER=m
-+CONFIG_SENSORS_FSCPOS=m
-+CONFIG_SENSORS_GL518SM=m
-+CONFIG_SENSORS_GL520SM=m
-+CONFIG_SENSORS_IT87=m
-+CONFIG_SENSORS_LM63=m
-+CONFIG_SENSORS_LM75=m
-+CONFIG_SENSORS_LM77=m
-+CONFIG_SENSORS_LM78=m
-+CONFIG_SENSORS_LM80=m
-+CONFIG_SENSORS_LM83=m
-+CONFIG_SENSORS_LM85=m
-+CONFIG_SENSORS_LM87=m
-+CONFIG_SENSORS_LM90=m
-+CONFIG_SENSORS_LM92=m
-+CONFIG_SENSORS_MAX1619=m
-+CONFIG_SENSORS_PC87360=m
-+CONFIG_SENSORS_SMSC47B397=m
-+CONFIG_SENSORS_SIS5595=m
-+CONFIG_SENSORS_SMSC47M1=m
-+CONFIG_SENSORS_VIA686A=m
-+CONFIG_SENSORS_W83781D=m
-+CONFIG_SENSORS_W83L785TS=m
-+CONFIG_SENSORS_W83627HF=m
-+
-+#
-+# Other I2C Chip support
-+#
-+CONFIG_SENSORS_DS1337=m
-+CONFIG_SENSORS_EEPROM=m
-+CONFIG_SENSORS_PCF8574=m
-+CONFIG_SENSORS_PCF8591=m
-+CONFIG_SENSORS_RTC8564=m
-+# CONFIG_I2C_DEBUG_CORE is not set
-+# CONFIG_I2C_DEBUG_ALGO is not set
-+# CONFIG_I2C_DEBUG_BUS is not set
-+# CONFIG_I2C_DEBUG_CHIP is not set
-+
-+#
-+# Dallas's 1-wire bus
-+#
-+CONFIG_W1=m
-+CONFIG_W1_MATROX=m
-+CONFIG_W1_DS9490=m
-+CONFIG_W1_DS9490_BRIDGE=m
-+CONFIG_W1_THERM=m
-+CONFIG_W1_SMEM=m
-+
-+#
-+# Misc devices
-+#
-+# CONFIG_IBM_ASM is not set
-+
-+#
-+# Multimedia devices
-+#
-+CONFIG_VIDEO_DEV=m
-+
-+#
-+# Video For Linux
-+#
-+
-+#
-+# Video Adapters
-+#
-+CONFIG_VIDEO_BT848=m
-+CONFIG_VIDEO_BWQCAM=m
-+CONFIG_VIDEO_CQCAM=m
-+CONFIG_VIDEO_W9966=m
-+CONFIG_VIDEO_CPIA=m
-+CONFIG_VIDEO_CPIA_PP=m
-+CONFIG_VIDEO_CPIA_USB=m
-+CONFIG_VIDEO_SAA5246A=m
-+CONFIG_VIDEO_SAA5249=m
-+CONFIG_TUNER_3036=m
-+CONFIG_VIDEO_STRADIS=m
-+CONFIG_VIDEO_ZORAN=m
-+CONFIG_VIDEO_ZORAN_BUZ=m
-+CONFIG_VIDEO_ZORAN_DC10=m
-+CONFIG_VIDEO_ZORAN_DC30=m
-+CONFIG_VIDEO_ZORAN_LML33=m
-+CONFIG_VIDEO_ZORAN_LML33R10=m
-+# CONFIG_VIDEO_ZR36120 is not set
-+CONFIG_VIDEO_SAA7134=m
-+CONFIG_VIDEO_SAA7134_DVB=m
-+CONFIG_VIDEO_MXB=m
-+CONFIG_VIDEO_DPC=m
-+CONFIG_VIDEO_HEXIUM_ORION=m
-+CONFIG_VIDEO_HEXIUM_GEMINI=m
-+CONFIG_VIDEO_CX88=m
-+CONFIG_VIDEO_CX88_DVB=m
-+CONFIG_VIDEO_OVCAMCHIP=m
-+
-+#
-+# Radio Adapters
-+#
-+CONFIG_RADIO_GEMTEK_PCI=m
-+CONFIG_RADIO_MAXIRADIO=m
-+CONFIG_RADIO_MAESTRO=m
-+
-+#
-+# Digital Video Broadcasting Devices
-+#
-+CONFIG_DVB=y
-+CONFIG_DVB_CORE=m
-+
-+#
-+# Supported SAA7146 based PCI Adapters
-+#
-+CONFIG_DVB_AV7110=m
-+CONFIG_DVB_AV7110_OSD=y
-+CONFIG_DVB_BUDGET=m
-+CONFIG_DVB_BUDGET_CI=m
-+CONFIG_DVB_BUDGET_AV=m
-+CONFIG_DVB_BUDGET_PATCH=m
-+
-+#
-+# Supported USB Adapters
-+#
-+CONFIG_DVB_TTUSB_BUDGET=m
-+CONFIG_DVB_TTUSB_DEC=m
-+CONFIG_DVB_DIBUSB=m
-+CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES=y
-+# CONFIG_DVB_DIBCOM_DEBUG is not set
-+CONFIG_DVB_CINERGYT2=m
-+CONFIG_DVB_CINERGYT2_TUNING=y
-+CONFIG_DVB_CINERGYT2_STREAM_URB_COUNT=32
-+CONFIG_DVB_CINERGYT2_STREAM_BUF_SIZE=512
-+CONFIG_DVB_CINERGYT2_QUERY_INTERVAL=250
-+CONFIG_DVB_CINERGYT2_ENABLE_RC_INPUT_DEVICE=y
-+CONFIG_DVB_CINERGYT2_RC_QUERY_INTERVAL=100
-+
-+#
-+# Supported FlexCopII (B2C2) Adapters
-+#
-+CONFIG_DVB_B2C2_FLEXCOP=m
-+CONFIG_DVB_B2C2_FLEXCOP_PCI=m
-+CONFIG_DVB_B2C2_FLEXCOP_USB=m
-+# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
-+CONFIG_DVB_B2C2_SKYSTAR=m
-+
-+#
-+# Supported BT878 Adapters
-+#
-+CONFIG_DVB_BT8XX=m
-+
-+#
-+# Supported DVB Frontends
-+#
-+
-+#
-+# Customise DVB Frontends
-+#
-+
-+#
-+# DVB-S (satellite) frontends
-+#
-+CONFIG_DVB_STV0299=m
-+CONFIG_DVB_CX24110=m
-+CONFIG_DVB_TDA8083=m
-+CONFIG_DVB_TDA80XX=m
-+CONFIG_DVB_MT312=m
-+CONFIG_DVB_VES1X93=m
-+
-+#
-+# DVB-T (terrestrial) frontends
-+#
-+CONFIG_DVB_SP8870=m
-+CONFIG_DVB_SP887X=m
-+CONFIG_DVB_CX22700=m
-+CONFIG_DVB_CX22702=m
-+CONFIG_DVB_L64781=m
-+CONFIG_DVB_TDA1004X=m
-+CONFIG_DVB_NXT6000=m
-+CONFIG_DVB_MT352=m
-+CONFIG_DVB_DIB3000MB=m
-+CONFIG_DVB_DIB3000MC=m
-+
-+#
-+# DVB-C (cable) frontends
-+#
-+CONFIG_DVB_ATMEL_AT76C651=m
-+CONFIG_DVB_VES1820=m
-+CONFIG_DVB_TDA10021=m
-+CONFIG_DVB_STV0297=m
-+
-+#
-+# ATSC (North American/Korean Terresterial DTV) frontends
-+#
-+CONFIG_DVB_NXT2002=m
-+CONFIG_DVB_OR51211=m
-+CONFIG_DVB_OR51132=m
-+CONFIG_VIDEO_SAA7146=m
-+CONFIG_VIDEO_SAA7146_VV=m
-+CONFIG_VIDEO_VIDEOBUF=m
-+CONFIG_VIDEO_TUNER=m
-+CONFIG_VIDEO_BUF=m
-+CONFIG_VIDEO_BUF_DVB=m
-+CONFIG_VIDEO_BTCX=m
-+CONFIG_VIDEO_IR=m
-+CONFIG_VIDEO_TVEEPROM=m
-+
-+#
-+# Graphics support
-+#
-+CONFIG_FB=y
-+CONFIG_FB_CFB_FILLRECT=y
-+CONFIG_FB_CFB_COPYAREA=y
-+CONFIG_FB_CFB_IMAGEBLIT=y
-+CONFIG_FB_SOFT_CURSOR=y
-+# CONFIG_FB_MACMODES is not set
-+CONFIG_FB_MODE_HELPERS=y
-+CONFIG_FB_TILEBLITTING=y
-+CONFIG_FB_CIRRUS=m
-+# CONFIG_FB_PM2 is not set
-+# CONFIG_FB_CYBER2000 is not set
-+# CONFIG_FB_ASILIANT is not set
-+# CONFIG_FB_IMSTT is not set
-+CONFIG_FB_VGA16=m
-+CONFIG_FB_VESA=y
-+CONFIG_VIDEO_SELECT=y
-+# CONFIG_FB_HGA is not set
-+# CONFIG_FB_NVIDIA is not set
-+CONFIG_FB_RIVA=m
-+# CONFIG_FB_RIVA_I2C is not set
-+# CONFIG_FB_RIVA_DEBUG is not set
-+CONFIG_FB_MATROX=m
-+CONFIG_FB_MATROX_MILLENIUM=y
-+CONFIG_FB_MATROX_MYSTIQUE=y
-+CONFIG_FB_MATROX_G=y
-+CONFIG_FB_MATROX_I2C=m
-+CONFIG_FB_MATROX_MAVEN=m
-+CONFIG_FB_MATROX_MULTIHEAD=y
-+# CONFIG_FB_RADEON_OLD is not set
-+CONFIG_FB_RADEON=m
-+CONFIG_FB_RADEON_I2C=y
-+# CONFIG_FB_RADEON_DEBUG is not set
-+CONFIG_FB_ATY128=m
-+CONFIG_FB_ATY=m
-+CONFIG_FB_ATY_CT=y
-+CONFIG_FB_ATY_GENERIC_LCD=y
-+# CONFIG_FB_ATY_XL_INIT is not set
-+CONFIG_FB_ATY_GX=y
-+CONFIG_FB_SAVAGE=m
-+CONFIG_FB_SAVAGE_I2C=y
-+CONFIG_FB_SAVAGE_ACCEL=y
-+# CONFIG_FB_SIS is not set
-+CONFIG_FB_NEOMAGIC=m
-+CONFIG_FB_KYRO=m
-+CONFIG_FB_3DFX=m
-+CONFIG_FB_3DFX_ACCEL=y
-+CONFIG_FB_VOODOO1=m
-+CONFIG_FB_TRIDENT=m
-+CONFIG_FB_TRIDENT_ACCEL=y
-+# CONFIG_FB_PM3 is not set
-+# CONFIG_FB_GEODE is not set
-+# CONFIG_FB_S1D13XXX is not set
-+# CONFIG_FB_VIRTUAL is not set
-+
-+#
-+# Console display driver support
-+#
-+CONFIG_VGA_CONSOLE=y
-+CONFIG_DUMMY_CONSOLE=y
-+CONFIG_FRAMEBUFFER_CONSOLE=y
-+# CONFIG_FONTS is not set
-+CONFIG_FONT_8x8=y
-+CONFIG_FONT_8x16=y
-+
-+#
-+# Logo configuration
-+#
-+CONFIG_LOGO=y
-+# CONFIG_LOGO_LINUX_MONO is not set
-+# CONFIG_LOGO_LINUX_VGA16 is not set
-+CONFIG_LOGO_LINUX_CLUT224=y
-+CONFIG_BACKLIGHT_LCD_SUPPORT=y
-+CONFIG_BACKLIGHT_CLASS_DEVICE=m
-+CONFIG_BACKLIGHT_DEVICE=y
-+CONFIG_LCD_CLASS_DEVICE=m
-+CONFIG_LCD_DEVICE=y
-+
-+#
-+# Sound
-+#
-+CONFIG_SOUND=m
-+
-+#
-+# Advanced Linux Sound Architecture
-+#
-+CONFIG_SND=m
-+CONFIG_SND_TIMER=m
-+CONFIG_SND_PCM=m
-+CONFIG_SND_HWDEP=m
-+CONFIG_SND_RAWMIDI=m
-+CONFIG_SND_SEQUENCER=m
-+CONFIG_SND_SEQ_DUMMY=m
-+CONFIG_SND_OSSEMUL=y
-+CONFIG_SND_MIXER_OSS=m
-+CONFIG_SND_PCM_OSS=m
-+CONFIG_SND_SEQUENCER_OSS=y
-+CONFIG_SND_RTCTIMER=m
-+# CONFIG_SND_VERBOSE_PRINTK is not set
-+# CONFIG_SND_DEBUG is not set
-+
-+#
-+# Generic devices
-+#
-+CONFIG_SND_MPU401_UART=m
-+CONFIG_SND_OPL3_LIB=m
-+CONFIG_SND_VX_LIB=m
-+CONFIG_SND_DUMMY=m
-+CONFIG_SND_VIRMIDI=m
-+CONFIG_SND_MTPAV=m
-+# CONFIG_SND_SERIAL_U16550 is not set
-+CONFIG_SND_MPU401=m
-+
-+#
-+# PCI devices
-+#
-+CONFIG_SND_AC97_CODEC=m
-+CONFIG_SND_ALI5451=m
-+CONFIG_SND_ATIIXP=m
-+CONFIG_SND_ATIIXP_MODEM=m
-+CONFIG_SND_AU8810=m
-+CONFIG_SND_AU8820=m
-+CONFIG_SND_AU8830=m
-+CONFIG_SND_AZT3328=m
-+CONFIG_SND_BT87X=m
-+# CONFIG_SND_BT87X_OVERCLOCK is not set
-+CONFIG_SND_CS46XX=m
-+CONFIG_SND_CS46XX_NEW_DSP=y
-+CONFIG_SND_CS4281=m
-+CONFIG_SND_EMU10K1=m
-+CONFIG_SND_EMU10K1X=m
-+CONFIG_SND_CA0106=m
-+CONFIG_SND_KORG1212=m
-+CONFIG_SND_MIXART=m
-+CONFIG_SND_NM256=m
-+CONFIG_SND_RME32=m
-+CONFIG_SND_RME96=m
-+CONFIG_SND_RME9652=m
-+CONFIG_SND_HDSP=m
-+CONFIG_SND_TRIDENT=m
-+CONFIG_SND_YMFPCI=m
-+CONFIG_SND_ALS4000=m
-+CONFIG_SND_CMIPCI=m
-+CONFIG_SND_ENS1370=m
-+CONFIG_SND_ENS1371=m
-+CONFIG_SND_ES1938=m
-+CONFIG_SND_ES1968=m
-+CONFIG_SND_MAESTRO3=m
-+CONFIG_SND_FM801=m
-+CONFIG_SND_FM801_TEA575X=m
-+CONFIG_SND_ICE1712=m
-+CONFIG_SND_ICE1724=m
-+CONFIG_SND_INTEL8X0=m
-+CONFIG_SND_INTEL8X0M=m
-+CONFIG_SND_SONICVIBES=m
-+CONFIG_SND_VIA82XX=m
-+CONFIG_SND_VIA82XX_MODEM=m
-+CONFIG_SND_VX222=m
-+CONFIG_SND_HDA_INTEL=m
-+
-+#
-+# USB devices
-+#
-+CONFIG_SND_USB_AUDIO=m
-+CONFIG_SND_USB_USX2Y=m
-+
-+#
-+# Open Sound System
-+#
-+# CONFIG_SOUND_PRIME is not set
-+
-+#
-+# USB support
-+#
-+CONFIG_USB_ARCH_HAS_HCD=y
-+CONFIG_USB_ARCH_HAS_OHCI=y
-+CONFIG_USB=y
-+# CONFIG_USB_DEBUG is not set
-+
-+#
-+# Miscellaneous USB options
-+#
-+CONFIG_USB_DEVICEFS=y
-+# CONFIG_USB_BANDWIDTH is not set
-+# CONFIG_USB_DYNAMIC_MINORS is not set
-+# CONFIG_USB_OTG is not set
-+
-+#
-+# USB Host Controller Drivers
-+#
-+CONFIG_USB_EHCI_HCD=m
-+CONFIG_USB_EHCI_SPLIT_ISO=y
-+CONFIG_USB_EHCI_ROOT_HUB_TT=y
-+CONFIG_USB_OHCI_HCD=m
-+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
-+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-+CONFIG_USB_UHCI_HCD=m
-+CONFIG_USB_SL811_HCD=m
-+
-+#
-+# USB Device Class drivers
-+#
-+# CONFIG_USB_AUDIO is not set
-+
-+#
-+# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
-+#
-+CONFIG_USB_MIDI=m
-+CONFIG_USB_ACM=m
-+CONFIG_USB_PRINTER=m
-+
-+#
-+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-+#
-+CONFIG_USB_STORAGE=m
-+# CONFIG_USB_STORAGE_DEBUG is not set
-+CONFIG_USB_STORAGE_DATAFAB=y
-+CONFIG_USB_STORAGE_FREECOM=y
-+CONFIG_USB_STORAGE_ISD200=y
-+CONFIG_USB_STORAGE_DPCM=y
-+CONFIG_USB_STORAGE_USBAT=y
-+CONFIG_USB_STORAGE_SDDR09=y
-+CONFIG_USB_STORAGE_SDDR55=y
-+CONFIG_USB_STORAGE_JUMPSHOT=y
-+
-+#
-+# USB Input Devices
-+#
-+CONFIG_USB_HID=y
-+CONFIG_USB_HIDINPUT=y
-+CONFIG_HID_FF=y
-+CONFIG_HID_PID=y
-+CONFIG_LOGITECH_FF=y
-+CONFIG_THRUSTMASTER_FF=y
-+CONFIG_USB_HIDDEV=y
-+CONFIG_USB_AIPTEK=m
-+CONFIG_USB_WACOM=m
-+CONFIG_USB_KBTAB=m
-+CONFIG_USB_POWERMATE=m
-+CONFIG_USB_MTOUCH=m
-+CONFIG_USB_EGALAX=m
-+CONFIG_USB_XPAD=m
-+CONFIG_USB_ATI_REMOTE=m
-+
-+#
-+# USB Imaging devices
-+#
-+CONFIG_USB_MDC800=m
-+CONFIG_USB_MICROTEK=m
-+
-+#
-+# USB Multimedia devices
-+#
-+CONFIG_USB_DABUSB=m
-+CONFIG_USB_VICAM=m
-+CONFIG_USB_DSBR=m
-+CONFIG_USB_IBMCAM=m
-+CONFIG_USB_KONICAWC=m
-+CONFIG_USB_OV511=m
-+CONFIG_USB_SE401=m
-+CONFIG_USB_SN9C102=m
-+CONFIG_USB_STV680=m
-+CONFIG_USB_W9968CF=m
-+CONFIG_USB_PWC=m
-+
-+#
-+# USB Network Adapters
-+#
-+CONFIG_USB_CATC=m
-+CONFIG_USB_KAWETH=m
-+CONFIG_USB_PEGASUS=m
-+CONFIG_USB_RTL8150=m
-+CONFIG_USB_USBNET=m
-+
-+#
-+# USB Host-to-Host Cables
-+#
-+CONFIG_USB_ALI_M5632=y
-+CONFIG_USB_AN2720=y
-+CONFIG_USB_BELKIN=y
-+CONFIG_USB_GENESYS=y
-+CONFIG_USB_NET1080=y
-+CONFIG_USB_PL2301=y
-+CONFIG_USB_KC2190=y
-+
-+#
-+# Intelligent USB Devices/Gadgets
-+#
-+CONFIG_USB_ARMLINUX=y
-+CONFIG_USB_EPSON2888=y
-+CONFIG_USB_ZAURUS=y
-+CONFIG_USB_CDCETHER=y
-+
-+#
-+# USB Network Adapters
-+#
-+CONFIG_USB_AX8817X=y
-+CONFIG_USB_ZD1201=m
-+CONFIG_USB_MON=m
-+
-+#
-+# USB port drivers
-+#
-+CONFIG_USB_USS720=m
-+
-+#
-+# USB Serial Converter support
-+#
-+CONFIG_USB_SERIAL=m
-+CONFIG_USB_SERIAL_GENERIC=y
-+CONFIG_USB_SERIAL_AIRPRIME=m
-+CONFIG_USB_SERIAL_BELKIN=m
-+CONFIG_USB_SERIAL_WHITEHEAT=m
-+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-+CONFIG_USB_SERIAL_CP2101=m
-+CONFIG_USB_SERIAL_CYPRESS_M8=m
-+CONFIG_USB_SERIAL_EMPEG=m
-+CONFIG_USB_SERIAL_FTDI_SIO=m
-+CONFIG_USB_SERIAL_VISOR=m
-+CONFIG_USB_SERIAL_IPAQ=m
-+CONFIG_USB_SERIAL_IR=m
-+CONFIG_USB_SERIAL_EDGEPORT=m
-+CONFIG_USB_SERIAL_EDGEPORT_TI=m
-+CONFIG_USB_SERIAL_GARMIN=m
-+CONFIG_USB_SERIAL_IPW=m
-+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
-+CONFIG_USB_SERIAL_KEYSPAN=m
-+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
-+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
-+CONFIG_USB_SERIAL_KLSI=m
-+CONFIG_USB_SERIAL_KOBIL_SCT=m
-+CONFIG_USB_SERIAL_MCT_U232=m
-+CONFIG_USB_SERIAL_PL2303=m
-+CONFIG_USB_SERIAL_HP4X=m
-+CONFIG_USB_SERIAL_SAFE=m
-+CONFIG_USB_SERIAL_SAFE_PADDED=y
-+CONFIG_USB_SERIAL_TI=m
-+CONFIG_USB_SERIAL_CYBERJACK=m
-+CONFIG_USB_SERIAL_XIRCOM=m
-+CONFIG_USB_SERIAL_OMNINET=m
-+CONFIG_USB_EZUSB=y
-+
-+#
-+# USB Miscellaneous drivers
-+#
-+CONFIG_USB_EMI62=m
-+# CONFIG_USB_EMI26 is not set
-+CONFIG_USB_AUERSWALD=m
-+CONFIG_USB_RIO500=m
-+CONFIG_USB_LEGOTOWER=m
-+CONFIG_USB_LCD=m
-+CONFIG_USB_LED=m
-+# CONFIG_USB_CYTHERM is not set
-+CONFIG_USB_PHIDGETKIT=m
-+CONFIG_USB_PHIDGETSERVO=m
-+CONFIG_USB_IDMOUSE=m
-+CONFIG_USB_SISUSBVGA=m
-+CONFIG_USB_TEST=m
-+
-+#
-+# USB ATM/DSL drivers
-+#
-+CONFIG_USB_ATM=m
-+CONFIG_USB_SPEEDTOUCH=m
-+
-+#
-+# USB Gadget Support
-+#
-+# CONFIG_USB_GADGET is not set
-+
-+#
-+# MMC/SD Card support
-+#
-+CONFIG_MMC=m
-+# CONFIG_MMC_DEBUG is not set
-+CONFIG_MMC_BLOCK=m
-+CONFIG_MMC_WBSD=m
-+
-+#
-+# InfiniBand support
-+#
-+CONFIG_INFINIBAND=m
-+CONFIG_INFINIBAND_MTHCA=m
-+# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
-+CONFIG_INFINIBAND_IPOIB=m
-+# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
-+
-+#
-+# Power management options
-+#
-+
-+#
-+# ACPI (Advanced Configuration and Power Interface) Support
-+#
-+CONFIG_ACPI=y
-+CONFIG_ACPI_BOOT=y
-+CONFIG_ACPI_INTERPRETER=y
-+CONFIG_ACPI_AC=m
-+CONFIG_ACPI_BATTERY=m
-+CONFIG_ACPI_BUTTON=m
-+CONFIG_ACPI_VIDEO=m
-+CONFIG_ACPI_FAN=m
-+CONFIG_ACPI_PROCESSOR=m
-+CONFIG_ACPI_THERMAL=m
-+CONFIG_ACPI_ASUS=m
-+CONFIG_ACPI_IBM=m
-+CONFIG_ACPI_TOSHIBA=m
-+CONFIG_ACPI_BLACKLIST_YEAR=0
-+# CONFIG_ACPI_DEBUG is not set
-+CONFIG_ACPI_BUS=y
-+CONFIG_ACPI_EC=y
-+CONFIG_ACPI_POWER=y
-+CONFIG_ACPI_PCI=y
-+CONFIG_ACPI_SYSTEM=y
-+# CONFIG_ACPI_CONTAINER is not set
-+
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+CONFIG_EXT2_FS_XATTR=y
-+CONFIG_EXT2_FS_POSIX_ACL=y
-+CONFIG_EXT2_FS_SECURITY=y
-+CONFIG_EXT3_FS=m
-+CONFIG_EXT3_FS_XATTR=y
-+CONFIG_EXT3_FS_POSIX_ACL=y
-+CONFIG_EXT3_FS_SECURITY=y
-+CONFIG_JBD=m
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
-+CONFIG_REISERFS_FS=m
-+# CONFIG_REISERFS_CHECK is not set
-+CONFIG_REISERFS_PROC_INFO=y
-+CONFIG_REISERFS_FS_XATTR=y
-+CONFIG_REISERFS_FS_POSIX_ACL=y
-+CONFIG_REISERFS_FS_SECURITY=y
-+CONFIG_JFS_FS=m
-+CONFIG_JFS_POSIX_ACL=y
-+CONFIG_JFS_SECURITY=y
-+# CONFIG_JFS_DEBUG is not set
-+# CONFIG_JFS_STATISTICS is not set
-+CONFIG_FS_POSIX_ACL=y
-+
-+#
-+# XFS support
-+#
-+CONFIG_XFS_FS=m
-+CONFIG_XFS_EXPORT=y
-+# CONFIG_XFS_RT is not set
-+CONFIG_XFS_QUOTA=y
-+CONFIG_XFS_SECURITY=y
-+CONFIG_XFS_POSIX_ACL=y
-+CONFIG_MINIX_FS=m
-+CONFIG_ROMFS_FS=m
-+CONFIG_QUOTA=y
-+# CONFIG_QFMT_V1 is not set
-+CONFIG_QFMT_V2=y
-+CONFIG_QUOTACTL=y
-+CONFIG_DNOTIFY=y
-+CONFIG_AUTOFS_FS=m
-+CONFIG_AUTOFS4_FS=m
-+
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=y
-+CONFIG_JOLIET=y
-+CONFIG_ZISOFS=y
-+CONFIG_ZISOFS_FS=y
-+CONFIG_UDF_FS=m
-+CONFIG_UDF_NLS=y
-+
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=m
-+CONFIG_MSDOS_FS=m
-+CONFIG_VFAT_FS=m
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
-+# CONFIG_NTFS_FS is not set
-+
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
-+CONFIG_SYSFS=y
-+# CONFIG_DEVFS_FS is not set
-+CONFIG_DEVPTS_FS_XATTR=y
-+CONFIG_DEVPTS_FS_SECURITY=y
-+CONFIG_TMPFS=y
-+CONFIG_TMPFS_XATTR=y
-+CONFIG_TMPFS_SECURITY=y
-+# CONFIG_HUGETLBFS is not set
-+# CONFIG_HUGETLB_PAGE is not set
-+CONFIG_RAMFS=y
-+
-+#
-+# Miscellaneous filesystems
-+#
-+# CONFIG_ADFS_FS is not set
-+CONFIG_AFFS_FS=m
-+CONFIG_HFS_FS=m
-+CONFIG_HFSPLUS_FS=m
-+CONFIG_BEFS_FS=m
-+# CONFIG_BEFS_DEBUG is not set
-+CONFIG_BFS_FS=m
-+CONFIG_EFS_FS=m
-+# CONFIG_JFFS_FS is not set
-+CONFIG_JFFS2_FS=m
-+CONFIG_JFFS2_FS_DEBUG=0
-+CONFIG_JFFS2_FS_NAND=y
-+# CONFIG_JFFS2_FS_NOR_ECC is not set
-+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-+CONFIG_JFFS2_ZLIB=y
-+CONFIG_JFFS2_RTIME=y
-+# CONFIG_JFFS2_RUBIN is not set
-+CONFIG_CRAMFS=y
-+CONFIG_VXFS_FS=m
-+# CONFIG_HPFS_FS is not set
-+CONFIG_QNX4FS_FS=m
-+# CONFIG_QNX4FS_RW is not set
-+CONFIG_SYSV_FS=m
-+CONFIG_UFS_FS=m
-+# CONFIG_UFS_FS_WRITE is not set
-+
-+#
-+# Network File Systems
-+#
-+CONFIG_NFS_FS=m
-+CONFIG_NFS_V3=y
-+CONFIG_NFS_V4=y
-+CONFIG_NFS_DIRECTIO=y
-+CONFIG_NFSD=m
-+CONFIG_NFSD_V3=y
-+CONFIG_NFSD_V4=y
-+CONFIG_NFSD_TCP=y
-+CONFIG_LOCKD=m
-+CONFIG_LOCKD_V4=y
-+CONFIG_EXPORTFS=m
-+CONFIG_SUNRPC=m
-+CONFIG_SUNRPC_GSS=m
-+CONFIG_RPCSEC_GSS_KRB5=m
-+CONFIG_RPCSEC_GSS_SPKM3=m
-+CONFIG_SMB_FS=m
-+# CONFIG_SMB_NLS_DEFAULT is not set
-+CONFIG_CIFS=m
-+# CONFIG_CIFS_STATS is not set
-+CONFIG_CIFS_XATTR=y
-+CONFIG_CIFS_POSIX=y
-+# CONFIG_CIFS_EXPERIMENTAL is not set
-+CONFIG_NCP_FS=m
-+CONFIG_NCPFS_PACKET_SIGNING=y
-+CONFIG_NCPFS_IOCTL_LOCKING=y
-+CONFIG_NCPFS_STRONG=y
-+CONFIG_NCPFS_NFS_NS=y
-+CONFIG_NCPFS_OS2_NS=y
-+CONFIG_NCPFS_SMALLDOS=y
-+CONFIG_NCPFS_NLS=y
-+CONFIG_NCPFS_EXTRAS=y
-+# CONFIG_CODA_FS is not set
-+# CONFIG_AFS_FS is not set
-+
-+#
-+# Partition Types
-+#
-+CONFIG_PARTITION_ADVANCED=y
-+# CONFIG_ACORN_PARTITION is not set
-+CONFIG_OSF_PARTITION=y
-+CONFIG_AMIGA_PARTITION=y
-+# CONFIG_ATARI_PARTITION is not set
-+CONFIG_MAC_PARTITION=y
-+CONFIG_MSDOS_PARTITION=y
-+CONFIG_BSD_DISKLABEL=y
-+CONFIG_MINIX_SUBPARTITION=y
-+CONFIG_SOLARIS_X86_PARTITION=y
-+CONFIG_UNIXWARE_DISKLABEL=y
-+# CONFIG_LDM_PARTITION is not set
-+CONFIG_SGI_PARTITION=y
-+# CONFIG_ULTRIX_PARTITION is not set
-+CONFIG_SUN_PARTITION=y
-+CONFIG_EFI_PARTITION=y
-+
-+#
-+# Native Language Support
-+#
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="utf8"
-+CONFIG_NLS_CODEPAGE_437=y
-+CONFIG_NLS_CODEPAGE_737=m
-+CONFIG_NLS_CODEPAGE_775=m
-+CONFIG_NLS_CODEPAGE_850=m
-+CONFIG_NLS_CODEPAGE_852=m
-+CONFIG_NLS_CODEPAGE_855=m
-+CONFIG_NLS_CODEPAGE_857=m
-+CONFIG_NLS_CODEPAGE_860=m
-+CONFIG_NLS_CODEPAGE_861=m
-+CONFIG_NLS_CODEPAGE_862=m
-+CONFIG_NLS_CODEPAGE_863=m
-+CONFIG_NLS_CODEPAGE_864=m
-+CONFIG_NLS_CODEPAGE_865=m
-+CONFIG_NLS_CODEPAGE_866=m
-+CONFIG_NLS_CODEPAGE_869=m
-+CONFIG_NLS_CODEPAGE_936=m
-+CONFIG_NLS_CODEPAGE_950=m
-+CONFIG_NLS_CODEPAGE_932=m
-+CONFIG_NLS_CODEPAGE_949=m
-+CONFIG_NLS_CODEPAGE_874=m
-+CONFIG_NLS_ISO8859_8=m
-+CONFIG_NLS_CODEPAGE_1250=m
-+CONFIG_NLS_CODEPAGE_1251=m
-+CONFIG_NLS_ASCII=y
-+CONFIG_NLS_ISO8859_1=m
-+CONFIG_NLS_ISO8859_2=m
-+CONFIG_NLS_ISO8859_3=m
-+CONFIG_NLS_ISO8859_4=m
-+CONFIG_NLS_ISO8859_5=m
-+CONFIG_NLS_ISO8859_6=m
-+CONFIG_NLS_ISO8859_7=m
-+CONFIG_NLS_ISO8859_9=m
-+CONFIG_NLS_ISO8859_13=m
-+CONFIG_NLS_ISO8859_14=m
-+CONFIG_NLS_ISO8859_15=m
-+CONFIG_NLS_KOI8_R=m
-+CONFIG_NLS_KOI8_U=m
-+CONFIG_NLS_UTF8=m
-+
-+#
-+# Security options
-+#
-+CONFIG_KEYS=y
-+CONFIG_KEYS_DEBUG_PROC_KEYS=y
-+CONFIG_SECURITY=y
-+CONFIG_SECURITY_NETWORK=y
-+CONFIG_SECURITY_CAPABILITIES=y
-+# CONFIG_SECURITY_ROOTPLUG is not set
-+# CONFIG_SECURITY_SECLVL is not set
-+CONFIG_SECURITY_SELINUX=y
-+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=1
-+CONFIG_SECURITY_SELINUX_DISABLE=y
-+CONFIG_SECURITY_SELINUX_DEVELOP=y
-+CONFIG_SECURITY_SELINUX_AVC_STATS=y
-+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-+
-+#
-+# Cryptographic options
-+#
-+CONFIG_CRYPTO=y
-+CONFIG_CRYPTO_HMAC=y
-+CONFIG_CRYPTO_NULL=m
-+CONFIG_CRYPTO_MD4=m
-+CONFIG_CRYPTO_MD5=m
-+CONFIG_CRYPTO_SHA1=y
-+CONFIG_CRYPTO_SHA256=m
-+CONFIG_CRYPTO_SHA512=m
-+CONFIG_CRYPTO_WP512=m
-+CONFIG_CRYPTO_TGR192=m
-+CONFIG_CRYPTO_DES=m
-+CONFIG_CRYPTO_BLOWFISH=m
-+CONFIG_CRYPTO_TWOFISH=m
-+CONFIG_CRYPTO_SERPENT=m
-+CONFIG_CRYPTO_AES=m
-+CONFIG_CRYPTO_CAST5=m
-+CONFIG_CRYPTO_CAST6=m
-+CONFIG_CRYPTO_TEA=m
-+CONFIG_CRYPTO_ARC4=m
-+CONFIG_CRYPTO_KHAZAD=m
-+CONFIG_CRYPTO_ANUBIS=m
-+CONFIG_CRYPTO_DEFLATE=m
-+CONFIG_CRYPTO_MICHAEL_MIC=m
-+CONFIG_CRYPTO_CRC32C=m
-+# CONFIG_CRYPTO_TEST is not set
-+
-+#
-+# Hardware crypto devices
-+#
-+
-+#
-+# Library routines
-+#
-+CONFIG_CRC_CCITT=m
-+CONFIG_CRC32=y
-+CONFIG_LIBCRC32C=m
-+CONFIG_ZLIB_INFLATE=y
-+CONFIG_ZLIB_DEFLATE=m
-+
-+#
-+# Kernel hacking
-+#
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_DEBUG_KERNEL=y
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_LOG_BUF_SHIFT=15
-+# CONFIG_SCHEDSTATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+# CONFIG_DEBUG_INFO is not set
-+# CONFIG_DEBUG_FS is not set
-+# CONFIG_DEBUG_STACKOVERFLOW is not set
-+# CONFIG_KPROBES is not set
-+# CONFIG_DEBUG_STACK_USAGE is not set
-+# CONFIG_DEBUG_PAGEALLOC is not set
-+# CONFIG_4KSTACKS is not set
-+CONFIG_X86_FIND_SMP_CONFIG=y
-+CONFIG_X86_MPPARSE=y
-+# CONFIG_INIT_DEBUG is not set
-diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_ia64 linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_ia64
---- pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_ia64	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_ia64	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1261 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.12
-+# Thu Sep 15 11:04:33 2005
-+#
-+
-+#
-+# Code maturity level options
-+#
-+CONFIG_EXPERIMENTAL=y
-+# CONFIG_CLEAN_COMPILE is not set
-+CONFIG_BROKEN=y
-+CONFIG_BROKEN_ON_SMP=y
-+CONFIG_LOCK_KERNEL=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+
-+#
-+# General setup
-+#
-+CONFIG_LOCALVERSION=""
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+# CONFIG_POSIX_MQUEUE is not set
-+CONFIG_BSD_PROCESS_ACCT=y
-+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-+CONFIG_SYSCTL=y
-+# CONFIG_AUDIT is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_KOBJECT_UEVENT=y
-+# CONFIG_IKCONFIG is not set
-+# CONFIG_CPUSETS is not set
-+# CONFIG_EMBEDDED is not set
-+CONFIG_KALLSYMS=y
-+# CONFIG_KALLSYMS_ALL is not set
-+# CONFIG_KALLSYMS_EXTRA_PASS is not set
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_EPOLL=y
-+CONFIG_SHMEM=y
-+CONFIG_CC_ALIGN_FUNCTIONS=0
-+CONFIG_CC_ALIGN_LABELS=0
-+CONFIG_CC_ALIGN_LOOPS=0
-+CONFIG_CC_ALIGN_JUMPS=0
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+
-+#
-+# Loadable module support
-+#
-+CONFIG_MODULES=y
-+# CONFIG_MODULE_UNLOAD is not set
-+CONFIG_OBSOLETE_MODPARM=y
-+# CONFIG_MODVERSIONS is not set
-+# CONFIG_MODULE_SRCVERSION_ALL is not set
-+# CONFIG_KMOD is not set
-+
-+#
-+# Processor type and features
-+#
-+CONFIG_IA64=y
-+CONFIG_64BIT=y
-+CONFIG_MMU=y
-+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_TIME_INTERPOLATION=y
-+CONFIG_EFI=y
-+CONFIG_GENERIC_IOMAP=y
-+CONFIG_XEN=y
-+CONFIG_XEN_VT=n
-+CONFIG_ARCH_XEN=y
-+CONFIG_XEN_PRIVILEGED_GUEST=y
-+CONFIG_XEN_PHYSDEV_ACCESS=y
-+CONFIG_XEN_BLKDEV_GRANT=y
-+CONFIG_XEN_BLKDEV_FRONTEND=y
-+CONFIG_VGA_CONSOLE=y
-+CONFIG_FRAMEBUFFER_CONSOLE=n
-+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-+# CONFIG_IA64_GENERIC is not set
-+# CONFIG_IA64_DIG is not set
-+CONFIG_IA64_HP_ZX1=y
-+# CONFIG_IA64_HP_ZX1_SWIOTLB is not set
-+# CONFIG_IA64_SGI_SN2 is not set
-+# CONFIG_IA64_HP_SIM is not set
-+# CONFIG_ITANIUM is not set
-+CONFIG_MCKINLEY=y
-+# CONFIG_IA64_PAGE_SIZE_4KB is not set
-+# CONFIG_IA64_PAGE_SIZE_8KB is not set
-+CONFIG_IA64_PAGE_SIZE_16KB=y
-+# CONFIG_IA64_PAGE_SIZE_64KB is not set
-+CONFIG_IA64_L1_CACHE_SHIFT=7
-+# CONFIG_NUMA is not set
-+CONFIG_VIRTUAL_MEM_MAP=y
-+CONFIG_HOLES_IN_ZONE=y
-+# CONFIG_IA64_CYCLONE is not set
-+CONFIG_IOSAPIC=y
-+CONFIG_FORCE_MAX_ZONEORDER=18
-+CONFIG_SMP=y
-+CONFIG_NR_CPUS=16
-+# CONFIG_HOTPLUG_CPU is not set
-+# CONFIG_SCHED_SMT is not set
-+# CONFIG_PREEMPT is not set
-+CONFIG_HAVE_DEC_LOCK=y
-+# CONFIG_IA32_SUPPORT is not set
-+CONFIG_IA64_MCA_RECOVERY=y
-+CONFIG_PERFMON=y
-+CONFIG_IA64_PALINFO=y
-+CONFIG_ACPI_DEALLOCATE_IRQ=y
-+
-+#
-+# Firmware Drivers
-+#
-+CONFIG_EFI_VARS=y
-+CONFIG_EFI_PCDP=y
-+CONFIG_BINFMT_ELF=y
-+CONFIG_BINFMT_MISC=y
-+
-+#
-+# Power management and ACPI
-+#
-+CONFIG_PM=y
-+CONFIG_ACPI=y
-+
-+#
-+# ACPI (Advanced Configuration and Power Interface) Support
-+#
-+CONFIG_ACPI_BOOT=y
-+CONFIG_ACPI_INTERPRETER=y
-+CONFIG_ACPI_BUTTON=y
-+# CONFIG_ACPI_VIDEO is not set
-+CONFIG_ACPI_FAN=y
-+CONFIG_ACPI_PROCESSOR=y
-+CONFIG_ACPI_THERMAL=y
-+CONFIG_ACPI_BLACKLIST_YEAR=0
-+# CONFIG_ACPI_DEBUG is not set
-+CONFIG_ACPI_BUS=y
-+CONFIG_ACPI_POWER=y
-+CONFIG_ACPI_PCI=y
-+CONFIG_ACPI_SYSTEM=y
-+# CONFIG_ACPI_CONTAINER is not set
-+
-+#
-+# Bus options (PCI, PCMCIA)
-+#
-+CONFIG_PCI=y
-+CONFIG_PCI_DOMAINS=y
-+# CONFIG_PCI_MSI is not set
-+CONFIG_PCI_LEGACY_PROC=y
-+CONFIG_PCI_NAMES=y
-+# CONFIG_PCI_DEBUG is not set
-+
-+#
-+# PCI Hotplug Support
-+#
-+CONFIG_HOTPLUG_PCI=y
-+# CONFIG_HOTPLUG_PCI_FAKE is not set
-+CONFIG_HOTPLUG_PCI_ACPI=y
-+# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set
-+# CONFIG_HOTPLUG_PCI_CPCI is not set
-+# CONFIG_HOTPLUG_PCI_SHPC is not set
-+
-+#
-+# PCCARD (PCMCIA/CardBus) support
-+#
-+# CONFIG_PCCARD is not set
-+
-+#
-+# Device Drivers
-+#
-+
-+#
-+# Generic Driver Options
-+#
-+CONFIG_STANDALONE=y
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+# CONFIG_FW_LOADER is not set
-+# CONFIG_DEBUG_DRIVER is not set
-+
-+#
-+# Memory Technology Devices (MTD)
-+#
-+# CONFIG_MTD is not set
-+
-+#
-+# Parallel port support
-+#
-+# CONFIG_PARPORT is not set
-+
-+#
-+# Plug and Play support
-+#
-+# CONFIG_PNP is not set
-+
-+#
-+# Block devices
-+#
-+# CONFIG_BLK_CPQ_DA is not set
-+# CONFIG_BLK_CPQ_CISS_DA is not set
-+# CONFIG_BLK_DEV_DAC960 is not set
-+# CONFIG_BLK_DEV_UMEM is not set
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=y
-+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-+# CONFIG_BLK_DEV_NBD is not set
-+# CONFIG_BLK_DEV_SX8 is not set
-+# CONFIG_BLK_DEV_UB is not set
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=4096
-+CONFIG_BLK_DEV_INITRD=y
-+CONFIG_INITRAMFS_SOURCE=""
-+# CONFIG_CDROM_PKTCDVD is not set
-+
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+# CONFIG_ATA_OVER_ETH is not set
-+
-+#
-+# ATA/ATAPI/MFM/RLL support
-+#
-+# CONFIG_IDE is not set
-+
-+#
-+# SCSI device support
-+#
-+CONFIG_SCSI=y
-+CONFIG_SCSI_PROC_FS=y
-+
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=y
-+CONFIG_CHR_DEV_ST=y
-+CONFIG_CHR_DEV_OSST=y
-+CONFIG_BLK_DEV_SR=y
-+CONFIG_BLK_DEV_SR_VENDOR=y
-+CONFIG_CHR_DEV_SG=y
-+
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+CONFIG_SCSI_MULTI_LUN=y
-+CONFIG_SCSI_CONSTANTS=y
-+CONFIG_SCSI_LOGGING=y
-+
-+#
-+# SCSI Transport Attributes
-+#
-+CONFIG_SCSI_SPI_ATTRS=y
-+# CONFIG_SCSI_FC_ATTRS is not set
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+
-+#
-+# SCSI low-level drivers
-+#
-+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-+# CONFIG_SCSI_3W_9XXX is not set
-+# CONFIG_SCSI_ACARD is not set
-+# CONFIG_SCSI_AACRAID is not set
-+# CONFIG_SCSI_AIC7XXX is not set
-+# CONFIG_SCSI_AIC7XXX_OLD is not set
-+# CONFIG_SCSI_AIC79XX is not set
-+# CONFIG_SCSI_ADVANSYS is not set
-+# CONFIG_MEGARAID_NEWGEN is not set
-+# CONFIG_MEGARAID_LEGACY is not set
-+# CONFIG_SCSI_SATA is not set
-+# CONFIG_SCSI_CPQFCTS is not set
-+# CONFIG_SCSI_DMX3191D is not set
-+# CONFIG_SCSI_EATA_PIO is not set
-+# CONFIG_SCSI_FUTURE_DOMAIN is not set
-+# CONFIG_SCSI_IPS is not set
-+# CONFIG_SCSI_INITIO is not set
-+# CONFIG_SCSI_INIA100 is not set
-+CONFIG_SCSI_SYM53C8XX_2=y
-+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-+# CONFIG_SCSI_IPR is not set
-+# CONFIG_SCSI_PCI2000 is not set
-+# CONFIG_SCSI_PCI2220I is not set
-+# CONFIG_SCSI_QLOGIC_ISP is not set
-+# CONFIG_SCSI_QLOGIC_FC is not set
-+CONFIG_SCSI_QLOGIC_1280=y
-+# CONFIG_SCSI_QLOGIC_1280_1040 is not set
-+CONFIG_SCSI_QLA2XXX=y
-+# CONFIG_SCSI_QLA21XX is not set
-+# CONFIG_SCSI_QLA22XX is not set
-+# CONFIG_SCSI_QLA2300 is not set
-+# CONFIG_SCSI_QLA2322 is not set
-+# CONFIG_SCSI_QLA6312 is not set
-+# CONFIG_SCSI_LPFC is not set
-+# CONFIG_SCSI_DC395x is not set
-+# CONFIG_SCSI_DC390T is not set
-+# CONFIG_SCSI_DEBUG is not set
-+
-+#
-+# Multi-device support (RAID and LVM)
-+#
-+# CONFIG_MD is not set
-+
-+#
-+# Fusion MPT device support
-+#
-+CONFIG_FUSION=y
-+CONFIG_FUSION_MAX_SGE=40
-+# CONFIG_FUSION_CTL is not set
-+
-+#
-+# IEEE 1394 (FireWire) support
-+#
-+# CONFIG_IEEE1394 is not set
-+
-+#
-+# I2O device support
-+#
-+# CONFIG_I2O is not set
-+
-+#
-+# Networking support
-+#
-+CONFIG_NET=y
-+
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=y
-+# CONFIG_PACKET_MMAP is not set
-+CONFIG_UNIX=y
-+# CONFIG_NET_KEY is not set
-+CONFIG_INET=y
-+CONFIG_IP_MULTICAST=y
-+# CONFIG_IP_ADVANCED_ROUTER is not set
-+# CONFIG_IP_PNP is not set
-+# CONFIG_NET_IPIP is not set
-+# CONFIG_NET_IPGRE is not set
-+# CONFIG_IP_MROUTE is not set
-+# CONFIG_ARPD is not set
-+# CONFIG_SYN_COOKIES is not set
-+# CONFIG_INET_AH is not set
-+# CONFIG_INET_ESP is not set
-+# CONFIG_INET_IPCOMP is not set
-+# CONFIG_INET_TUNNEL is not set
-+# CONFIG_IP_TCPDIAG is not set
-+# CONFIG_IP_TCPDIAG_IPV6 is not set
-+
-+#
-+# IP: Virtual Server Configuration
-+#
-+# CONFIG_IP_VS is not set
-+# CONFIG_IPV6 is not set
-+CONFIG_NETFILTER=y
-+# CONFIG_NETFILTER_DEBUG is not set
-+
-+#
-+# IP: Netfilter Configuration
-+#
-+# CONFIG_IP_NF_CONNTRACK is not set
-+# CONFIG_IP_NF_CONNTRACK_MARK is not set
-+# CONFIG_IP_NF_QUEUE is not set
-+# CONFIG_IP_NF_IPTABLES is not set
-+CONFIG_IP_NF_ARPTABLES=y
-+# CONFIG_IP_NF_ARPFILTER is not set
-+# CONFIG_IP_NF_ARP_MANGLE is not set
-+
-+#
-+# SCTP Configuration (EXPERIMENTAL)
-+#
-+# CONFIG_IP_SCTP is not set
-+# CONFIG_ATM is not set
-+# CONFIG_BRIDGE is not set
-+# CONFIG_VLAN_8021Q is not set
-+# CONFIG_DECNET is not set
-+# CONFIG_LLC2 is not set
-+# CONFIG_IPX is not set
-+# CONFIG_ATALK is not set
-+# CONFIG_X25 is not set
-+# CONFIG_LAPB is not set
-+# CONFIG_NET_DIVERT is not set
-+# CONFIG_ECONET is not set
-+# CONFIG_WAN_ROUTER is not set
-+
-+#
-+# QoS and/or fair queueing
-+#
-+# CONFIG_NET_SCHED is not set
-+# CONFIG_NET_CLS_ROUTE is not set
-+
-+#
-+# Network testing
-+#
-+# CONFIG_NET_PKTGEN is not set
-+# CONFIG_NETPOLL is not set
-+# CONFIG_NET_POLL_CONTROLLER is not set
-+# CONFIG_HAMRADIO is not set
-+# CONFIG_IRDA is not set
-+# CONFIG_BT is not set
-+CONFIG_NETDEVICES=y
-+CONFIG_DUMMY=y
-+# CONFIG_BONDING is not set
-+# CONFIG_EQUALIZER is not set
-+# CONFIG_TUN is not set
-+
-+#
-+# ARCnet devices
-+#
-+# CONFIG_ARCNET is not set
-+
-+#
-+# Ethernet (10 or 100Mbit)
-+#
-+CONFIG_NET_ETHERNET=y
-+CONFIG_MII=y
-+# CONFIG_HAPPYMEAL is not set
-+# CONFIG_SUNGEM is not set
-+# CONFIG_NET_VENDOR_3COM is not set
-+
-+#
-+# Tulip family network device support
-+#
-+CONFIG_NET_TULIP=y
-+# CONFIG_DE2104X is not set
-+CONFIG_TULIP=y
-+CONFIG_TULIP_MWI=y
-+CONFIG_TULIP_MMIO=y
-+CONFIG_TULIP_NAPI=y
-+CONFIG_TULIP_NAPI_HW_MITIGATION=y
-+# CONFIG_DE4X5 is not set
-+# CONFIG_WINBOND_840 is not set
-+# CONFIG_DM9102 is not set
-+# CONFIG_HP100 is not set
-+CONFIG_NET_PCI=y
-+# CONFIG_PCNET32 is not set
-+# CONFIG_AMD8111_ETH is not set
-+# CONFIG_ADAPTEC_STARFIRE is not set
-+# CONFIG_B44 is not set
-+# CONFIG_FORCEDETH is not set
-+# CONFIG_DGRS is not set
-+# CONFIG_EEPRO100 is not set
-+CONFIG_E100=y
-+# CONFIG_FEALNX is not set
-+# CONFIG_NATSEMI is not set
-+# CONFIG_NE2K_PCI is not set
-+# CONFIG_8139CP is not set
-+# CONFIG_8139TOO is not set
-+# CONFIG_SIS900 is not set
-+# CONFIG_EPIC100 is not set
-+# CONFIG_SUNDANCE is not set
-+# CONFIG_VIA_RHINE is not set
-+
-+#
-+# Ethernet (1000 Mbit)
-+#
-+# CONFIG_ACENIC is not set
-+# CONFIG_DL2K is not set
-+CONFIG_E1000=y
-+# CONFIG_E1000_NAPI is not set
-+# CONFIG_NS83820 is not set
-+# CONFIG_HAMACHI is not set
-+# CONFIG_YELLOWFIN is not set
-+# CONFIG_R8169 is not set
-+# CONFIG_SK98LIN is not set
-+# CONFIG_VIA_VELOCITY is not set
-+CONFIG_TIGON3=y
-+# CONFIG_BNX2 is not set
-+
-+#
-+# Ethernet (10000 Mbit)
-+#
-+# CONFIG_IXGB is not set
-+# CONFIG_S2IO is not set
-+
-+#
-+# Token Ring devices
-+#
-+# CONFIG_TR is not set
-+
-+#
-+# Wireless LAN (non-hamradio)
-+#
-+# CONFIG_NET_RADIO is not set
-+
-+#
-+# Wan interfaces
-+#
-+# CONFIG_WAN is not set
-+# CONFIG_FDDI is not set
-+# CONFIG_HIPPI is not set
-+# CONFIG_PPP is not set
-+# CONFIG_SLIP is not set
-+# CONFIG_NET_FC is not set
-+# CONFIG_SHAPER is not set
-+# CONFIG_NETCONSOLE is not set
-+
-+#
-+# ISDN subsystem
-+#
-+# CONFIG_ISDN is not set
-+
-+#
-+# Telephony Support
-+#
-+# CONFIG_PHONE is not set
-+
-+#
-+# Input device support
-+#
-+CONFIG_INPUT=y
-+
-+#
-+# Userland interfaces
-+#
-+CONFIG_INPUT_MOUSEDEV=y
-+CONFIG_INPUT_MOUSEDEV_PSAUX=y
-+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-+CONFIG_INPUT_JOYDEV=y
-+# CONFIG_INPUT_TSDEV is not set
-+CONFIG_INPUT_EVDEV=y
-+# CONFIG_INPUT_EVBUG is not set
-+
-+#
-+# Input Device Drivers
-+#
-+# CONFIG_INPUT_KEYBOARD is not set
-+# CONFIG_INPUT_MOUSE is not set
-+# CONFIG_INPUT_JOYSTICK is not set
-+# CONFIG_INPUT_TOUCHSCREEN is not set
-+# CONFIG_INPUT_MISC is not set
-+
-+#
-+# Hardware I/O ports
-+#
-+CONFIG_SERIO=y
-+# CONFIG_SERIO_I8042 is not set
-+# CONFIG_SERIO_SERPORT is not set
-+# CONFIG_SERIO_PCIPS2 is not set
-+# CONFIG_SERIO_RAW is not set
-+# CONFIG_GAMEPORT is not set
-+
-+#
-+# Character devices
-+#
-+# CONFIG_VT is not set
-+# CONFIG_SERIAL_NONSTANDARD is not set
-+
-+#
-+# Serial drivers
-+#
-+CONFIG_SERIAL_8250=y
-+CONFIG_SERIAL_8250_CONSOLE=y
-+CONFIG_SERIAL_8250_ACPI=y
-+CONFIG_SERIAL_8250_NR_UARTS=8
-+CONFIG_SERIAL_8250_EXTENDED=y
-+CONFIG_SERIAL_8250_SHARE_IRQ=y
-+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
-+# CONFIG_SERIAL_8250_MULTIPORT is not set
-+# CONFIG_SERIAL_8250_RSA is not set
-+
-+#
-+# Non-8250 serial port support
-+#
-+CONFIG_SERIAL_CORE=y
-+CONFIG_SERIAL_CORE_CONSOLE=y
-+# CONFIG_SERIAL_JSM is not set
-+CONFIG_UNIX98_PTYS=y
-+CONFIG_LEGACY_PTYS=y
-+CONFIG_LEGACY_PTY_COUNT=256
-+
-+#
-+# IPMI
-+#
-+# CONFIG_IPMI_HANDLER is not set
-+
-+#
-+# Watchdog Cards
-+#
-+# CONFIG_WATCHDOG is not set
-+# CONFIG_HW_RANDOM is not set
-+CONFIG_EFI_RTC=y
-+# CONFIG_DTLK is not set
-+# CONFIG_R3964 is not set
-+# CONFIG_APPLICOM is not set
-+
-+#
-+# Ftape, the floppy tape device driver
-+#
-+CONFIG_AGP=y
-+CONFIG_AGP_HP_ZX1=y
-+CONFIG_DRM=y
-+# CONFIG_DRM_TDFX is not set
-+# CONFIG_DRM_GAMMA is not set
-+# CONFIG_DRM_R128 is not set
-+CONFIG_DRM_RADEON=y
-+# CONFIG_DRM_MGA is not set
-+# CONFIG_DRM_SIS is not set
-+# CONFIG_RAW_DRIVER is not set
-+# CONFIG_HPET is not set
-+# CONFIG_HANGCHECK_TIMER is not set
-+
-+#
-+# TPM devices
-+#
-+# CONFIG_TCG_TPM is not set
-+
-+#
-+# I2C support
-+#
-+CONFIG_I2C=y
-+CONFIG_I2C_CHARDEV=y
-+
-+#
-+# I2C Algorithms
-+#
-+CONFIG_I2C_ALGOBIT=y
-+CONFIG_I2C_ALGOPCF=y
-+# CONFIG_I2C_ALGOPCA is not set
-+
-+#
-+# I2C Hardware Bus support
-+#
-+# CONFIG_I2C_ALI1535 is not set
-+# CONFIG_I2C_ALI1563 is not set
-+# CONFIG_I2C_ALI15X3 is not set
-+# CONFIG_I2C_AMD756 is not set
-+# CONFIG_I2C_AMD8111 is not set
-+# CONFIG_I2C_I801 is not set
-+# CONFIG_I2C_I810 is not set
-+# CONFIG_I2C_PIIX4 is not set
-+# CONFIG_I2C_ISA is not set
-+# CONFIG_I2C_NFORCE2 is not set
-+# CONFIG_I2C_PARPORT_LIGHT is not set
-+# CONFIG_I2C_PROSAVAGE is not set
-+# CONFIG_I2C_SAVAGE4 is not set
-+# CONFIG_SCx200_ACB is not set
-+# CONFIG_I2C_SIS5595 is not set
-+# CONFIG_I2C_SIS630 is not set
-+# CONFIG_I2C_SIS96X is not set
-+# CONFIG_I2C_STUB is not set
-+# CONFIG_I2C_VIA is not set
-+# CONFIG_I2C_VIAPRO is not set
-+# CONFIG_I2C_VOODOO3 is not set
-+# CONFIG_I2C_PCA_ISA is not set
-+
-+#
-+# Hardware Sensors Chip support
-+#
-+# CONFIG_I2C_SENSOR is not set
-+# CONFIG_SENSORS_ADM1021 is not set
-+# CONFIG_SENSORS_ADM1025 is not set
-+# CONFIG_SENSORS_ADM1026 is not set
-+# CONFIG_SENSORS_ADM1031 is not set
-+# CONFIG_SENSORS_ASB100 is not set
-+# CONFIG_SENSORS_DS1621 is not set
-+# CONFIG_SENSORS_FSCHER is not set
-+# CONFIG_SENSORS_FSCPOS is not set
-+# CONFIG_SENSORS_GL518SM is not set
-+# CONFIG_SENSORS_GL520SM is not set
-+# CONFIG_SENSORS_IT87 is not set
-+# CONFIG_SENSORS_LM63 is not set
-+# CONFIG_SENSORS_LM75 is not set
-+# CONFIG_SENSORS_LM77 is not set
-+# CONFIG_SENSORS_LM78 is not set
-+# CONFIG_SENSORS_LM80 is not set
-+# CONFIG_SENSORS_LM83 is not set
-+# CONFIG_SENSORS_LM85 is not set
-+# CONFIG_SENSORS_LM87 is not set
-+# CONFIG_SENSORS_LM90 is not set
-+# CONFIG_SENSORS_LM92 is not set
-+# CONFIG_SENSORS_MAX1619 is not set
-+# CONFIG_SENSORS_PC87360 is not set
-+# CONFIG_SENSORS_SMSC47B397 is not set
-+# CONFIG_SENSORS_SIS5595 is not set
-+# CONFIG_SENSORS_SMSC47M1 is not set
-+# CONFIG_SENSORS_VIA686A is not set
-+# CONFIG_SENSORS_W83781D is not set
-+# CONFIG_SENSORS_W83L785TS is not set
-+# CONFIG_SENSORS_W83627HF is not set
-+
-+#
-+# Other I2C Chip support
-+#
-+# CONFIG_SENSORS_DS1337 is not set
-+# CONFIG_SENSORS_EEPROM is not set
-+# CONFIG_SENSORS_PCF8574 is not set
-+# CONFIG_SENSORS_PCF8591 is not set
-+# CONFIG_SENSORS_RTC8564 is not set
-+# CONFIG_I2C_DEBUG_CORE is not set
-+# CONFIG_I2C_DEBUG_ALGO is not set
-+# CONFIG_I2C_DEBUG_BUS is not set
-+# CONFIG_I2C_DEBUG_CHIP is not set
-+
-+#
-+# Dallas's 1-wire bus
-+#
-+# CONFIG_W1 is not set
-+
-+#
-+# Misc devices
-+#
-+
-+#
-+# Multimedia devices
-+#
-+CONFIG_VIDEO_DEV=y
-+
-+#
-+# Video For Linux
-+#
-+
-+#
-+# Video Adapters
-+#
-+# CONFIG_VIDEO_BT848 is not set
-+# CONFIG_VIDEO_CPIA is not set
-+# CONFIG_VIDEO_SAA5246A is not set
-+# CONFIG_VIDEO_SAA5249 is not set
-+# CONFIG_TUNER_3036 is not set
-+# CONFIG_VIDEO_STRADIS is not set
-+# CONFIG_VIDEO_ZORAN is not set
-+# CONFIG_VIDEO_ZR36120 is not set
-+# CONFIG_VIDEO_SAA7134 is not set
-+# CONFIG_VIDEO_MXB is not set
-+# CONFIG_VIDEO_DPC is not set
-+# CONFIG_VIDEO_HEXIUM_ORION is not set
-+# CONFIG_VIDEO_HEXIUM_GEMINI is not set
-+# CONFIG_VIDEO_CX88 is not set
-+# CONFIG_VIDEO_OVCAMCHIP is not set
-+
-+#
-+# Radio Adapters
-+#
-+# CONFIG_RADIO_GEMTEK_PCI is not set
-+# CONFIG_RADIO_MAXIRADIO is not set
-+# CONFIG_RADIO_MAESTRO is not set
-+
-+#
-+# Digital Video Broadcasting Devices
-+#
-+# CONFIG_DVB is not set
-+
-+#
-+# Graphics support
-+#
-+CONFIG_FB=y
-+CONFIG_FB_CFB_FILLRECT=y
-+CONFIG_FB_CFB_COPYAREA=y
-+CONFIG_FB_CFB_IMAGEBLIT=y
-+CONFIG_FB_SOFT_CURSOR=y
-+# CONFIG_FB_MACMODES is not set
-+CONFIG_FB_MODE_HELPERS=y
-+# CONFIG_FB_TILEBLITTING is not set
-+# CONFIG_FB_CIRRUS is not set
-+# CONFIG_FB_PM2 is not set
-+# CONFIG_FB_CYBER2000 is not set
-+# CONFIG_FB_ASILIANT is not set
-+# CONFIG_FB_IMSTT is not set
-+# CONFIG_FB_NVIDIA is not set
-+# CONFIG_FB_RIVA is not set
-+# CONFIG_FB_MATROX is not set
-+# CONFIG_FB_RADEON_OLD is not set
-+CONFIG_FB_RADEON=y
-+CONFIG_FB_RADEON_I2C=y
-+CONFIG_FB_RADEON_DEBUG=y
-+# CONFIG_FB_ATY128 is not set
-+# CONFIG_FB_ATY is not set
-+# CONFIG_FB_SAVAGE is not set
-+# CONFIG_FB_SIS is not set
-+# CONFIG_FB_NEOMAGIC is not set
-+# CONFIG_FB_KYRO is not set
-+# CONFIG_FB_3DFX is not set
-+# CONFIG_FB_VOODOO1 is not set
-+# CONFIG_FB_TRIDENT is not set
-+# CONFIG_FB_PM3 is not set
-+# CONFIG_FB_S1D13XXX is not set
-+# CONFIG_FB_VIRTUAL is not set
-+
-+#
-+# Logo configuration
-+#
-+CONFIG_LOGO=y
-+# CONFIG_LOGO_LINUX_MONO is not set
-+# CONFIG_LOGO_LINUX_VGA16 is not set
-+CONFIG_LOGO_LINUX_CLUT224=y
-+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-+
-+#
-+# Sound
-+#
-+CONFIG_SOUND=y
-+
-+#
-+# Advanced Linux Sound Architecture
-+#
-+CONFIG_SND=y
-+CONFIG_SND_TIMER=y
-+CONFIG_SND_PCM=y
-+CONFIG_SND_HWDEP=y
-+CONFIG_SND_RAWMIDI=y
-+CONFIG_SND_SEQUENCER=y
-+# CONFIG_SND_SEQ_DUMMY is not set
-+CONFIG_SND_OSSEMUL=y
-+CONFIG_SND_MIXER_OSS=y
-+CONFIG_SND_PCM_OSS=y
-+CONFIG_SND_SEQUENCER_OSS=y
-+# CONFIG_SND_VERBOSE_PRINTK is not set
-+# CONFIG_SND_DEBUG is not set
-+
-+#
-+# Generic devices
-+#
-+CONFIG_SND_MPU401_UART=y
-+CONFIG_SND_OPL3_LIB=y
-+# CONFIG_SND_DUMMY is not set
-+# CONFIG_SND_VIRMIDI is not set
-+# CONFIG_SND_MTPAV is not set
-+# CONFIG_SND_SERIAL_U16550 is not set
-+# CONFIG_SND_MPU401 is not set
-+
-+#
-+# PCI devices
-+#
-+CONFIG_SND_AC97_CODEC=y
-+# CONFIG_SND_ALI5451 is not set
-+# CONFIG_SND_ATIIXP is not set
-+# CONFIG_SND_ATIIXP_MODEM is not set
-+# CONFIG_SND_AU8810 is not set
-+# CONFIG_SND_AU8820 is not set
-+# CONFIG_SND_AU8830 is not set
-+# CONFIG_SND_AZT3328 is not set
-+# CONFIG_SND_BT87X is not set
-+# CONFIG_SND_CS46XX is not set
-+# CONFIG_SND_CS4281 is not set
-+# CONFIG_SND_EMU10K1 is not set
-+# CONFIG_SND_EMU10K1X is not set
-+# CONFIG_SND_CA0106 is not set
-+# CONFIG_SND_KORG1212 is not set
-+# CONFIG_SND_MIXART is not set
-+# CONFIG_SND_NM256 is not set
-+# CONFIG_SND_RME32 is not set
-+# CONFIG_SND_RME96 is not set
-+# CONFIG_SND_RME9652 is not set
-+# CONFIG_SND_HDSP is not set
-+# CONFIG_SND_TRIDENT is not set
-+# CONFIG_SND_YMFPCI is not set
-+# CONFIG_SND_ALS4000 is not set
-+# CONFIG_SND_CMIPCI is not set
-+# CONFIG_SND_ENS1370 is not set
-+# CONFIG_SND_ENS1371 is not set
-+# CONFIG_SND_ES1938 is not set
-+# CONFIG_SND_ES1968 is not set
-+# CONFIG_SND_MAESTRO3 is not set
-+CONFIG_SND_FM801=y
-+CONFIG_SND_FM801_TEA575X=y
-+# CONFIG_SND_ICE1712 is not set
-+# CONFIG_SND_ICE1724 is not set
-+# CONFIG_SND_INTEL8X0 is not set
-+# CONFIG_SND_INTEL8X0M is not set
-+# CONFIG_SND_SONICVIBES is not set
-+# CONFIG_SND_VIA82XX is not set
-+# CONFIG_SND_VIA82XX_MODEM is not set
-+# CONFIG_SND_VX222 is not set
-+# CONFIG_SND_HDA_INTEL is not set
-+
-+#
-+# USB devices
-+#
-+# CONFIG_SND_USB_AUDIO is not set
-+
-+#
-+# Open Sound System
-+#
-+# CONFIG_SOUND_PRIME is not set
-+
-+#
-+# USB support
-+#
-+CONFIG_USB_ARCH_HAS_HCD=y
-+CONFIG_USB_ARCH_HAS_OHCI=y
-+CONFIG_USB=y
-+# CONFIG_USB_DEBUG is not set
-+
-+#
-+# Miscellaneous USB options
-+#
-+# CONFIG_USB_DEVICEFS is not set
-+CONFIG_USB_BANDWIDTH=y
-+# CONFIG_USB_DYNAMIC_MINORS is not set
-+# CONFIG_USB_SUSPEND is not set
-+# CONFIG_USB_OTG is not set
-+
-+#
-+# USB Host Controller Drivers
-+#
-+CONFIG_USB_EHCI_HCD=y
-+# CONFIG_USB_EHCI_SPLIT_ISO is not set
-+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
-+CONFIG_USB_OHCI_HCD=y
-+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
-+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-+CONFIG_USB_UHCI_HCD=y
-+# CONFIG_USB_SL811_HCD is not set
-+
-+#
-+# USB Device Class drivers
-+#
-+# CONFIG_USB_AUDIO is not set
-+# CONFIG_USB_BLUETOOTH_TTY is not set
-+# CONFIG_USB_MIDI is not set
-+# CONFIG_USB_ACM is not set
-+# CONFIG_USB_PRINTER is not set
-+
-+#
-+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-+#
-+CONFIG_USB_STORAGE=y
-+# CONFIG_USB_STORAGE_DEBUG is not set
-+# CONFIG_USB_STORAGE_DATAFAB is not set
-+# CONFIG_USB_STORAGE_FREECOM is not set
-+# CONFIG_USB_STORAGE_DPCM is not set
-+# CONFIG_USB_STORAGE_USBAT is not set
-+# CONFIG_USB_STORAGE_SDDR09 is not set
-+# CONFIG_USB_STORAGE_SDDR55 is not set
-+# CONFIG_USB_STORAGE_JUMPSHOT is not set
-+
-+#
-+# USB Input Devices
-+#
-+CONFIG_USB_HID=y
-+CONFIG_USB_HIDINPUT=y
-+# CONFIG_HID_FF is not set
-+CONFIG_USB_HIDDEV=y
-+# CONFIG_USB_AIPTEK is not set
-+# CONFIG_USB_WACOM is not set
-+# CONFIG_USB_KBTAB is not set
-+# CONFIG_USB_POWERMATE is not set
-+# CONFIG_USB_MTOUCH is not set
-+# CONFIG_USB_EGALAX is not set
-+# CONFIG_USB_XPAD is not set
-+# CONFIG_USB_ATI_REMOTE is not set
-+
-+#
-+# USB Imaging devices
-+#
-+# CONFIG_USB_MDC800 is not set
-+# CONFIG_USB_MICROTEK is not set
-+
-+#
-+# USB Multimedia devices
-+#
-+# CONFIG_USB_DABUSB is not set
-+# CONFIG_USB_VICAM is not set
-+# CONFIG_USB_DSBR is not set
-+# CONFIG_USB_IBMCAM is not set
-+# CONFIG_USB_KONICAWC is not set
-+# CONFIG_USB_OV511 is not set
-+# CONFIG_USB_SE401 is not set
-+# CONFIG_USB_SN9C102 is not set
-+# CONFIG_USB_STV680 is not set
-+# CONFIG_USB_PWC is not set
-+
-+#
-+# USB Network Adapters
-+#
-+# CONFIG_USB_CATC is not set
-+# CONFIG_USB_KAWETH is not set
-+# CONFIG_USB_PEGASUS is not set
-+# CONFIG_USB_RTL8150 is not set
-+# CONFIG_USB_USBNET is not set
-+CONFIG_USB_MON=y
-+
-+#
-+# USB port drivers
-+#
-+
-+#
-+# USB Serial Converter support
-+#
-+# CONFIG_USB_SERIAL is not set
-+
-+#
-+# USB Miscellaneous drivers
-+#
-+# CONFIG_USB_EMI62 is not set
-+# CONFIG_USB_EMI26 is not set
-+# CONFIG_USB_AUERSWALD is not set
-+# CONFIG_USB_RIO500 is not set
-+# CONFIG_USB_LEGOTOWER is not set
-+# CONFIG_USB_LCD is not set
-+# CONFIG_USB_LED is not set
-+# CONFIG_USB_CYTHERM is not set
-+# CONFIG_USB_PHIDGETKIT is not set
-+# CONFIG_USB_PHIDGETSERVO is not set
-+# CONFIG_USB_IDMOUSE is not set
-+# CONFIG_USB_SISUSBVGA is not set
-+
-+#
-+# USB ATM/DSL drivers
-+#
-+
-+#
-+# USB Gadget Support
-+#
-+# CONFIG_USB_GADGET is not set
-+
-+#
-+# MMC/SD Card support
-+#
-+# CONFIG_MMC is not set
-+
-+#
-+# InfiniBand support
-+#
-+# CONFIG_INFINIBAND is not set
-+
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+CONFIG_EXT2_FS_XATTR=y
-+# CONFIG_EXT2_FS_POSIX_ACL is not set
-+# CONFIG_EXT2_FS_SECURITY is not set
-+CONFIG_EXT3_FS=y
-+CONFIG_EXT3_FS_XATTR=y
-+# CONFIG_EXT3_FS_POSIX_ACL is not set
-+# CONFIG_EXT3_FS_SECURITY is not set
-+CONFIG_JBD=y
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
-+# CONFIG_REISERFS_FS is not set
-+# CONFIG_JFS_FS is not set
-+
-+#
-+# XFS support
-+#
-+# CONFIG_XFS_FS is not set
-+# CONFIG_MINIX_FS is not set
-+# CONFIG_ROMFS_FS is not set
-+# CONFIG_QUOTA is not set
-+CONFIG_DNOTIFY=y
-+CONFIG_AUTOFS_FS=y
-+# CONFIG_AUTOFS4_FS is not set
-+
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=y
-+CONFIG_JOLIET=y
-+# CONFIG_ZISOFS is not set
-+CONFIG_UDF_FS=y
-+CONFIG_UDF_NLS=y
-+
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=y
-+CONFIG_MSDOS_FS=y
-+CONFIG_VFAT_FS=y
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-+# CONFIG_NTFS_FS is not set
-+
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
-+CONFIG_SYSFS=y
-+# CONFIG_DEVFS_FS is not set
-+# CONFIG_DEVPTS_FS_XATTR is not set
-+CONFIG_TMPFS=y
-+CONFIG_TMPFS_XATTR=y
-+CONFIG_TMPFS_SECURITY=y
-+CONFIG_HUGETLBFS=y
-+CONFIG_HUGETLB_PAGE=y
-+CONFIG_RAMFS=y
-+
-+#
-+# Miscellaneous filesystems
-+#
-+# CONFIG_ADFS_FS is not set
-+# CONFIG_AFFS_FS is not set
-+# CONFIG_HFS_FS is not set
-+# CONFIG_HFSPLUS_FS is not set
-+# CONFIG_BEFS_FS is not set
-+# CONFIG_BFS_FS is not set
-+# CONFIG_EFS_FS is not set
-+# CONFIG_CRAMFS is not set
-+# CONFIG_VXFS_FS is not set
-+# CONFIG_HPFS_FS is not set
-+# CONFIG_QNX4FS_FS is not set
-+# CONFIG_SYSV_FS is not set
-+# CONFIG_UFS_FS is not set
-+
-+#
-+# Network File Systems
-+#
-+CONFIG_NFS_FS=y
-+CONFIG_NFS_V3=y
-+CONFIG_NFS_V4=y
-+# CONFIG_NFS_DIRECTIO is not set
-+CONFIG_NFSD=y
-+CONFIG_NFSD_V3=y
-+# CONFIG_NFSD_V4 is not set
-+# CONFIG_NFSD_TCP is not set
-+CONFIG_LOCKD=y
-+CONFIG_LOCKD_V4=y
-+CONFIG_EXPORTFS=y
-+CONFIG_SUNRPC=y
-+CONFIG_SUNRPC_GSS=y
-+CONFIG_RPCSEC_GSS_KRB5=y
-+# CONFIG_RPCSEC_GSS_SPKM3 is not set
-+# CONFIG_SMB_FS is not set
-+# CONFIG_CIFS is not set
-+# CONFIG_NCP_FS is not set
-+# CONFIG_CODA_FS is not set
-+# CONFIG_AFS_FS is not set
-+
-+#
-+# Partition Types
-+#
-+CONFIG_PARTITION_ADVANCED=y
-+# CONFIG_ACORN_PARTITION is not set
-+# CONFIG_OSF_PARTITION is not set
-+# CONFIG_AMIGA_PARTITION is not set
-+# CONFIG_ATARI_PARTITION is not set
-+# CONFIG_MAC_PARTITION is not set
-+CONFIG_MSDOS_PARTITION=y
-+# CONFIG_BSD_DISKLABEL is not set
-+# CONFIG_MINIX_SUBPARTITION is not set
-+# CONFIG_SOLARIS_X86_PARTITION is not set
-+# CONFIG_UNIXWARE_DISKLABEL is not set
-+# CONFIG_LDM_PARTITION is not set
-+# CONFIG_SGI_PARTITION is not set
-+# CONFIG_ULTRIX_PARTITION is not set
-+# CONFIG_SUN_PARTITION is not set
-+CONFIG_EFI_PARTITION=y
-+
-+#
-+# Native Language Support
-+#
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="iso8859-1"
-+CONFIG_NLS_CODEPAGE_437=y
-+CONFIG_NLS_CODEPAGE_737=y
-+CONFIG_NLS_CODEPAGE_775=y
-+CONFIG_NLS_CODEPAGE_850=y
-+CONFIG_NLS_CODEPAGE_852=y
-+CONFIG_NLS_CODEPAGE_855=y
-+CONFIG_NLS_CODEPAGE_857=y
-+CONFIG_NLS_CODEPAGE_860=y
-+CONFIG_NLS_CODEPAGE_861=y
-+CONFIG_NLS_CODEPAGE_862=y
-+CONFIG_NLS_CODEPAGE_863=y
-+CONFIG_NLS_CODEPAGE_864=y
-+CONFIG_NLS_CODEPAGE_865=y
-+CONFIG_NLS_CODEPAGE_866=y
-+CONFIG_NLS_CODEPAGE_869=y
-+CONFIG_NLS_CODEPAGE_936=y
-+CONFIG_NLS_CODEPAGE_950=y
-+CONFIG_NLS_CODEPAGE_932=y
-+CONFIG_NLS_CODEPAGE_949=y
-+CONFIG_NLS_CODEPAGE_874=y
-+CONFIG_NLS_ISO8859_8=y
-+# CONFIG_NLS_CODEPAGE_1250 is not set
-+CONFIG_NLS_CODEPAGE_1251=y
-+# CONFIG_NLS_ASCII is not set
-+CONFIG_NLS_ISO8859_1=y
-+CONFIG_NLS_ISO8859_2=y
-+CONFIG_NLS_ISO8859_3=y
-+CONFIG_NLS_ISO8859_4=y
-+CONFIG_NLS_ISO8859_5=y
-+CONFIG_NLS_ISO8859_6=y
-+CONFIG_NLS_ISO8859_7=y
-+CONFIG_NLS_ISO8859_9=y
-+CONFIG_NLS_ISO8859_13=y
-+CONFIG_NLS_ISO8859_14=y
-+CONFIG_NLS_ISO8859_15=y
-+CONFIG_NLS_KOI8_R=y
-+CONFIG_NLS_KOI8_U=y
-+CONFIG_NLS_UTF8=y
-+
-+#
-+# Library routines
-+#
-+# CONFIG_CRC_CCITT is not set
-+CONFIG_CRC32=y
-+# CONFIG_LIBCRC32C is not set
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+
-+#
-+# Profiling support
-+#
-+# CONFIG_PROFILING is not set
-+
-+#
-+# Kernel hacking
-+#
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_DEBUG_KERNEL=y
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_LOG_BUF_SHIFT=17
-+# CONFIG_SCHEDSTATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+# CONFIG_DEBUG_INFO is not set
-+# CONFIG_DEBUG_FS is not set
-+CONFIG_IA64_GRANULE_16MB=y
-+# CONFIG_IA64_GRANULE_64MB is not set
-+CONFIG_IA64_PRINT_HAZARDS=y
-+# CONFIG_DISABLE_VHPT is not set
-+# CONFIG_IA64_DEBUG_CMPXCHG is not set
-+# CONFIG_IA64_DEBUG_IRQ is not set
-+
-+#
-+# Security options
-+#
-+# CONFIG_KEYS is not set
-+# CONFIG_SECURITY is not set
-+
-+#
-+# Cryptographic options
-+#
-+CONFIG_CRYPTO=y
-+# CONFIG_CRYPTO_HMAC is not set
-+# CONFIG_CRYPTO_NULL is not set
-+# CONFIG_CRYPTO_MD4 is not set
-+CONFIG_CRYPTO_MD5=y
-+# CONFIG_CRYPTO_SHA1 is not set
-+# CONFIG_CRYPTO_SHA256 is not set
-+# CONFIG_CRYPTO_SHA512 is not set
-+# CONFIG_CRYPTO_WP512 is not set
-+# CONFIG_CRYPTO_TGR192 is not set
-+CONFIG_CRYPTO_DES=y
-+# CONFIG_CRYPTO_BLOWFISH is not set
-+# CONFIG_CRYPTO_TWOFISH is not set
-+# CONFIG_CRYPTO_SERPENT is not set
-+# CONFIG_CRYPTO_AES is not set
-+# CONFIG_CRYPTO_CAST5 is not set
-+# CONFIG_CRYPTO_CAST6 is not set
-+# CONFIG_CRYPTO_TEA is not set
-+# CONFIG_CRYPTO_ARC4 is not set
-+# CONFIG_CRYPTO_KHAZAD is not set
-+# CONFIG_CRYPTO_ANUBIS is not set
-+# CONFIG_CRYPTO_DEFLATE is not set
-+# CONFIG_CRYPTO_MICHAEL_MIC is not set
-+# CONFIG_CRYPTO_CRC32C is not set
-+# CONFIG_CRYPTO_TEST is not set
-+
-+#
-+# Hardware crypto devices
-+#
-diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_x86_32 linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_x86_32
---- pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_x86_32	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_x86_32	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,562 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.12-xenU
-+# Wed Aug  3 09:57:44 2005
-+#
-+CONFIG_XEN=y
-+CONFIG_ARCH_XEN=y
-+CONFIG_NO_IDLE_HZ=y
-+
-+#
-+# XEN
-+#
-+# CONFIG_XEN_PRIVILEGED_GUEST is not set
-+# CONFIG_XEN_PHYSDEV_ACCESS is not set
-+# CONFIG_XEN_TPMDEV_FRONTEND is not set
-+# CONFIG_XEN_TPMDEV_BACKEND is not set
-+CONFIG_XEN_BLKDEV_FRONTEND=y
-+CONFIG_XEN_NETDEV_FRONTEND=y
-+# CONFIG_XEN_BLKDEV_TAP is not set
-+# CONFIG_XEN_SHADOW_MODE is not set
-+CONFIG_XEN_SCRUB_PAGES=y
-+CONFIG_XEN_X86=y
-+# CONFIG_XEN_X86_64 is not set
-+CONFIG_HAVE_ARCH_ALLOC_SKB=y
-+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-+
-+#
-+# Code maturity level options
-+#
-+CONFIG_EXPERIMENTAL=y
-+CONFIG_CLEAN_COMPILE=y
-+CONFIG_LOCK_KERNEL=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+
-+#
-+# General setup
-+#
-+CONFIG_LOCALVERSION=""
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+# CONFIG_POSIX_MQUEUE is not set
-+# CONFIG_BSD_PROCESS_ACCT is not set
-+CONFIG_SYSCTL=y
-+# CONFIG_AUDIT is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_KOBJECT_UEVENT=y
-+# CONFIG_IKCONFIG is not set
-+# CONFIG_CPUSETS is not set
-+# CONFIG_EMBEDDED is not set
-+CONFIG_KALLSYMS=y
-+# CONFIG_KALLSYMS_ALL is not set
-+# CONFIG_KALLSYMS_EXTRA_PASS is not set
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_EPOLL=y
-+CONFIG_SHMEM=y
-+CONFIG_CC_ALIGN_FUNCTIONS=0
-+CONFIG_CC_ALIGN_LABELS=0
-+CONFIG_CC_ALIGN_LOOPS=0
-+CONFIG_CC_ALIGN_JUMPS=0
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+
-+#
-+# Loadable module support
-+#
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+# CONFIG_MODULE_FORCE_UNLOAD is not set
-+CONFIG_OBSOLETE_MODPARM=y
-+# CONFIG_MODVERSIONS is not set
-+# CONFIG_MODULE_SRCVERSION_ALL is not set
-+CONFIG_KMOD=y
-+CONFIG_STOP_MACHINE=y
-+
-+#
-+# X86 Processor Configuration
-+#
-+CONFIG_XENARCH="i386"
-+CONFIG_X86=y
-+CONFIG_MMU=y
-+CONFIG_UID16=y
-+CONFIG_GENERIC_ISA_DMA=y
-+CONFIG_GENERIC_IOMAP=y
-+# CONFIG_M386 is not set
-+# CONFIG_M486 is not set
-+# CONFIG_M586 is not set
-+# CONFIG_M586TSC is not set
-+# CONFIG_M586MMX is not set
-+CONFIG_M686=y
-+# CONFIG_MPENTIUMII is not set
-+# CONFIG_MPENTIUMIII is not set
-+# CONFIG_MPENTIUMM is not set
-+# CONFIG_MPENTIUM4 is not set
-+# CONFIG_MK6 is not set
-+# CONFIG_MK7 is not set
-+# CONFIG_MK8 is not set
-+# CONFIG_MCRUSOE is not set
-+# CONFIG_MEFFICEON is not set
-+# CONFIG_MWINCHIPC6 is not set
-+# CONFIG_MWINCHIP2 is not set
-+# CONFIG_MWINCHIP3D is not set
-+# CONFIG_MGEODEGX1 is not set
-+# CONFIG_MCYRIXIII is not set
-+# CONFIG_MVIAC3_2 is not set
-+# CONFIG_X86_GENERIC is not set
-+CONFIG_X86_CMPXCHG=y
-+CONFIG_X86_XADD=y
-+CONFIG_X86_L1_CACHE_SHIFT=5
-+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_X86_PPRO_FENCE=y
-+CONFIG_X86_WP_WORKS_OK=y
-+CONFIG_X86_INVLPG=y
-+CONFIG_X86_BSWAP=y
-+CONFIG_X86_POPAD_OK=y
-+CONFIG_X86_GOOD_APIC=y
-+CONFIG_X86_USE_PPRO_CHECKSUM=y
-+# CONFIG_HPET_TIMER is not set
-+# CONFIG_HPET_EMULATE_RTC is not set
-+CONFIG_SMP=y
-+CONFIG_SMP_ALTERNATIVES=y
-+CONFIG_NR_CPUS=8
-+# CONFIG_SCHED_SMT is not set
-+# CONFIG_X86_REBOOTFIXUPS is not set
-+CONFIG_X86_CPUID=y
-+
-+#
-+# Firmware Drivers
-+#
-+# CONFIG_EDD is not set
-+# CONFIG_NOHIGHMEM is not set
-+CONFIG_HIGHMEM4G=y
-+# CONFIG_HIGHMEM64G is not set
-+CONFIG_HIGHMEM=y
-+CONFIG_HAVE_DEC_LOCK=y
-+# CONFIG_REGPARM is not set
-+CONFIG_HOTPLUG_CPU=y
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+CONFIG_X86_SMP=y
-+CONFIG_X86_BIOS_REBOOT=y
-+CONFIG_X86_TRAMPOLINE=y
-+CONFIG_PC=y
-+CONFIG_SECCOMP=y
-+CONFIG_EARLY_PRINTK=y
-+
-+#
-+# Executable file formats
-+#
-+CONFIG_BINFMT_ELF=y
-+# CONFIG_BINFMT_AOUT is not set
-+# CONFIG_BINFMT_MISC is not set
-+
-+#
-+# Device Drivers
-+#
-+
-+#
-+# Generic Driver Options
-+#
-+CONFIG_STANDALONE=y
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+# CONFIG_FW_LOADER is not set
-+# CONFIG_DEBUG_DRIVER is not set
-+
-+#
-+# Block devices
-+#
-+# CONFIG_BLK_DEV_FD is not set
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=m
-+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-+CONFIG_BLK_DEV_NBD=m
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=4096
-+CONFIG_BLK_DEV_INITRD=y
-+CONFIG_INITRAMFS_SOURCE=""
-+# CONFIG_LBD is not set
-+# CONFIG_CDROM_PKTCDVD is not set
-+
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+# CONFIG_ATA_OVER_ETH is not set
-+
-+#
-+# SCSI device support
-+#
-+CONFIG_SCSI=m
-+CONFIG_SCSI_PROC_FS=y
-+
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=m
-+# CONFIG_CHR_DEV_ST is not set
-+# CONFIG_CHR_DEV_OSST is not set
-+# CONFIG_BLK_DEV_SR is not set
-+# CONFIG_CHR_DEV_SG is not set
-+
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+# CONFIG_SCSI_MULTI_LUN is not set
-+# CONFIG_SCSI_CONSTANTS is not set
-+# CONFIG_SCSI_LOGGING is not set
-+
-+#
-+# SCSI Transport Attributes
-+#
-+# CONFIG_SCSI_SPI_ATTRS is not set
-+# CONFIG_SCSI_FC_ATTRS is not set
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+
-+#
-+# SCSI low-level drivers
-+#
-+# CONFIG_SCSI_SATA is not set
-+# CONFIG_SCSI_DEBUG is not set
-+
-+#
-+# Multi-device support (RAID and LVM)
-+#
-+# CONFIG_MD is not set
-+
-+#
-+# Networking support
-+#
-+CONFIG_NET=y
-+
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=y
-+# CONFIG_PACKET_MMAP is not set
-+CONFIG_UNIX=y
-+# CONFIG_NET_KEY is not set
-+CONFIG_INET=y
-+# CONFIG_IP_MULTICAST is not set
-+# CONFIG_IP_ADVANCED_ROUTER is not set
-+CONFIG_IP_PNP=y
-+# CONFIG_IP_PNP_DHCP is not set
-+# CONFIG_IP_PNP_BOOTP is not set
-+# CONFIG_IP_PNP_RARP is not set
-+# CONFIG_NET_IPIP is not set
-+# CONFIG_NET_IPGRE is not set
-+# CONFIG_ARPD is not set
-+# CONFIG_SYN_COOKIES is not set
-+# CONFIG_INET_AH is not set
-+# CONFIG_INET_ESP is not set
-+# CONFIG_INET_IPCOMP is not set
-+# CONFIG_INET_TUNNEL is not set
-+CONFIG_IP_TCPDIAG=y
-+# CONFIG_IP_TCPDIAG_IPV6 is not set
-+# CONFIG_IPV6 is not set
-+# CONFIG_NETFILTER is not set
-+
-+#
-+# SCTP Configuration (EXPERIMENTAL)
-+#
-+# CONFIG_IP_SCTP is not set
-+# CONFIG_ATM is not set
-+# CONFIG_BRIDGE is not set
-+# CONFIG_VLAN_8021Q is not set
-+# CONFIG_DECNET is not set
-+# CONFIG_LLC2 is not set
-+# CONFIG_IPX is not set
-+# CONFIG_ATALK is not set
-+# CONFIG_X25 is not set
-+# CONFIG_LAPB is not set
-+# CONFIG_NET_DIVERT is not set
-+# CONFIG_ECONET is not set
-+# CONFIG_WAN_ROUTER is not set
-+
-+#
-+# QoS and/or fair queueing
-+#
-+# CONFIG_NET_SCHED is not set
-+# CONFIG_NET_CLS_ROUTE is not set
-+
-+#
-+# Network testing
-+#
-+# CONFIG_NET_PKTGEN is not set
-+# CONFIG_NETPOLL is not set
-+# CONFIG_NET_POLL_CONTROLLER is not set
-+# CONFIG_HAMRADIO is not set
-+# CONFIG_IRDA is not set
-+# CONFIG_BT is not set
-+CONFIG_NETDEVICES=y
-+# CONFIG_DUMMY is not set
-+# CONFIG_BONDING is not set
-+# CONFIG_EQUALIZER is not set
-+# CONFIG_TUN is not set
-+
-+#
-+# Ethernet (10 or 100Mbit)
-+#
-+# CONFIG_NET_ETHERNET is not set
-+
-+#
-+# Ethernet (1000 Mbit)
-+#
-+
-+#
-+# Ethernet (10000 Mbit)
-+#
-+
-+#
-+# Token Ring devices
-+#
-+
-+#
-+# Wireless LAN (non-hamradio)
-+#
-+# CONFIG_NET_RADIO is not set
-+
-+#
-+# Wan interfaces
-+#
-+# CONFIG_WAN is not set
-+# CONFIG_PPP is not set
-+# CONFIG_SLIP is not set
-+# CONFIG_SHAPER is not set
-+# CONFIG_NETCONSOLE is not set
-+CONFIG_UNIX98_PTYS=y
-+CONFIG_LEGACY_PTYS=y
-+CONFIG_LEGACY_PTY_COUNT=256
-+# CONFIG_TCG_TPM is not set
-+
-+#
-+# Character devices
-+#
-+
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+# CONFIG_EXT2_FS_XATTR is not set
-+CONFIG_EXT3_FS=y
-+CONFIG_EXT3_FS_XATTR=y
-+# CONFIG_EXT3_FS_POSIX_ACL is not set
-+# CONFIG_EXT3_FS_SECURITY is not set
-+CONFIG_JBD=y
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
-+CONFIG_REISERFS_FS=y
-+# CONFIG_REISERFS_CHECK is not set
-+# CONFIG_REISERFS_PROC_INFO is not set
-+# CONFIG_REISERFS_FS_XATTR is not set
-+# CONFIG_JFS_FS is not set
-+
-+#
-+# XFS support
-+#
-+# CONFIG_XFS_FS is not set
-+# CONFIG_MINIX_FS is not set
-+# CONFIG_ROMFS_FS is not set
-+# CONFIG_QUOTA is not set
-+CONFIG_DNOTIFY=y
-+CONFIG_AUTOFS_FS=y
-+CONFIG_AUTOFS4_FS=y
-+
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=y
-+CONFIG_JOLIET=y
-+CONFIG_ZISOFS=y
-+CONFIG_ZISOFS_FS=y
-+# CONFIG_UDF_FS is not set
-+
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=m
-+CONFIG_MSDOS_FS=m
-+CONFIG_VFAT_FS=m
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-+# CONFIG_NTFS_FS is not set
-+
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
-+CONFIG_SYSFS=y
-+# CONFIG_DEVFS_FS is not set
-+CONFIG_DEVPTS_FS_XATTR=y
-+# CONFIG_DEVPTS_FS_SECURITY is not set
-+CONFIG_TMPFS=y
-+# CONFIG_TMPFS_XATTR is not set
-+# CONFIG_HUGETLBFS is not set
-+# CONFIG_HUGETLB_PAGE is not set
-+CONFIG_RAMFS=y
-+
-+#
-+# Miscellaneous filesystems
-+#
-+# CONFIG_ADFS_FS is not set
-+# CONFIG_AFFS_FS is not set
-+# CONFIG_HFS_FS is not set
-+# CONFIG_HFSPLUS_FS is not set
-+# CONFIG_BEFS_FS is not set
-+# CONFIG_BFS_FS is not set
-+# CONFIG_EFS_FS is not set
-+CONFIG_CRAMFS=y
-+# CONFIG_VXFS_FS is not set
-+# CONFIG_HPFS_FS is not set
-+# CONFIG_QNX4FS_FS is not set
-+# CONFIG_SYSV_FS is not set
-+# CONFIG_UFS_FS is not set
-+
-+#
-+# Network File Systems
-+#
-+CONFIG_NFS_FS=y
-+CONFIG_NFS_V3=y
-+# CONFIG_NFS_V4 is not set
-+# CONFIG_NFS_DIRECTIO is not set
-+# CONFIG_NFSD is not set
-+CONFIG_ROOT_NFS=y
-+CONFIG_LOCKD=y
-+CONFIG_LOCKD_V4=y
-+CONFIG_SUNRPC=y
-+# CONFIG_RPCSEC_GSS_KRB5 is not set
-+# CONFIG_RPCSEC_GSS_SPKM3 is not set
-+# CONFIG_SMB_FS is not set
-+# CONFIG_CIFS is not set
-+# CONFIG_NCP_FS is not set
-+# CONFIG_CODA_FS is not set
-+# CONFIG_AFS_FS is not set
-+
-+#
-+# Partition Types
-+#
-+# CONFIG_PARTITION_ADVANCED is not set
-+CONFIG_MSDOS_PARTITION=y
-+
-+#
-+# Native Language Support
-+#
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="iso8859-1"
-+CONFIG_NLS_CODEPAGE_437=y
-+# CONFIG_NLS_CODEPAGE_737 is not set
-+# CONFIG_NLS_CODEPAGE_775 is not set
-+# CONFIG_NLS_CODEPAGE_850 is not set
-+# CONFIG_NLS_CODEPAGE_852 is not set
-+# CONFIG_NLS_CODEPAGE_855 is not set
-+# CONFIG_NLS_CODEPAGE_857 is not set
-+# CONFIG_NLS_CODEPAGE_860 is not set
-+# CONFIG_NLS_CODEPAGE_861 is not set
-+# CONFIG_NLS_CODEPAGE_862 is not set
-+# CONFIG_NLS_CODEPAGE_863 is not set
-+# CONFIG_NLS_CODEPAGE_864 is not set
-+# CONFIG_NLS_CODEPAGE_865 is not set
-+# CONFIG_NLS_CODEPAGE_866 is not set
-+# CONFIG_NLS_CODEPAGE_869 is not set
-+# CONFIG_NLS_CODEPAGE_936 is not set
-+# CONFIG_NLS_CODEPAGE_950 is not set
-+# CONFIG_NLS_CODEPAGE_932 is not set
-+# CONFIG_NLS_CODEPAGE_949 is not set
-+# CONFIG_NLS_CODEPAGE_874 is not set
-+# CONFIG_NLS_ISO8859_8 is not set
-+# CONFIG_NLS_CODEPAGE_1250 is not set
-+# CONFIG_NLS_CODEPAGE_1251 is not set
-+# CONFIG_NLS_ASCII is not set
-+CONFIG_NLS_ISO8859_1=y
-+# CONFIG_NLS_ISO8859_2 is not set
-+# CONFIG_NLS_ISO8859_3 is not set
-+# CONFIG_NLS_ISO8859_4 is not set
-+# CONFIG_NLS_ISO8859_5 is not set
-+# CONFIG_NLS_ISO8859_6 is not set
-+# CONFIG_NLS_ISO8859_7 is not set
-+# CONFIG_NLS_ISO8859_9 is not set
-+# CONFIG_NLS_ISO8859_13 is not set
-+# CONFIG_NLS_ISO8859_14 is not set
-+# CONFIG_NLS_ISO8859_15 is not set
-+# CONFIG_NLS_KOI8_R is not set
-+# CONFIG_NLS_KOI8_U is not set
-+# CONFIG_NLS_UTF8 is not set
-+
-+#
-+# Security options
-+#
-+# CONFIG_KEYS is not set
-+# CONFIG_SECURITY is not set
-+
-+#
-+# Cryptographic options
-+#
-+CONFIG_CRYPTO=y
-+# CONFIG_CRYPTO_HMAC is not set
-+# CONFIG_CRYPTO_NULL is not set
-+# CONFIG_CRYPTO_MD4 is not set
-+CONFIG_CRYPTO_MD5=m
-+# CONFIG_CRYPTO_SHA1 is not set
-+# CONFIG_CRYPTO_SHA256 is not set
-+# CONFIG_CRYPTO_SHA512 is not set
-+# CONFIG_CRYPTO_WP512 is not set
-+# CONFIG_CRYPTO_TGR192 is not set
-+# CONFIG_CRYPTO_DES is not set
-+# CONFIG_CRYPTO_BLOWFISH is not set
-+# CONFIG_CRYPTO_TWOFISH is not set
-+# CONFIG_CRYPTO_SERPENT is not set
-+# CONFIG_CRYPTO_AES_586 is not set
-+# CONFIG_CRYPTO_CAST5 is not set
-+# CONFIG_CRYPTO_CAST6 is not set
-+# CONFIG_CRYPTO_TEA is not set
-+# CONFIG_CRYPTO_ARC4 is not set
-+# CONFIG_CRYPTO_KHAZAD is not set
-+# CONFIG_CRYPTO_ANUBIS is not set
-+# CONFIG_CRYPTO_DEFLATE is not set
-+# CONFIG_CRYPTO_MICHAEL_MIC is not set
-+CONFIG_CRYPTO_CRC32C=m
-+# CONFIG_CRYPTO_TEST is not set
-+
-+#
-+# Hardware crypto devices
-+#
-+# CONFIG_CRYPTO_DEV_PADLOCK is not set
-+
-+#
-+# Library routines
-+#
-+# CONFIG_CRC_CCITT is not set
-+# CONFIG_CRC32 is not set
-+CONFIG_LIBCRC32C=m
-+CONFIG_ZLIB_INFLATE=y
-+
-+#
-+# Kernel hacking
-+#
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_DEBUG_KERNEL=y
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_LOG_BUF_SHIFT=14
-+# CONFIG_SCHEDSTATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+# CONFIG_DEBUG_HIGHMEM is not set
-+CONFIG_DEBUG_BUGVERBOSE=y
-+# CONFIG_DEBUG_INFO is not set
-+# CONFIG_DEBUG_FS is not set
-+# CONFIG_FRAME_POINTER is not set
-+# CONFIG_DEBUG_STACKOVERFLOW is not set
-+# CONFIG_KPROBES is not set
-+# CONFIG_DEBUG_STACK_USAGE is not set
-+# CONFIG_DEBUG_PAGEALLOC is not set
-+# CONFIG_4KSTACKS is not set
-diff -Nurp pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_x86_64 linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_x86_64
---- pristine-linux-2.6.12/arch/xen/configs/xenU_defconfig_x86_64	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/configs/xenU_defconfig_x86_64	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,939 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.12-xenU
-+# Thu Aug 18 11:15:14 2005
-+#
-+CONFIG_XEN=y
-+CONFIG_ARCH_XEN=y
-+CONFIG_NO_IDLE_HZ=y
-+
-+#
-+# XEN
-+#
-+# CONFIG_XEN_PRIVILEGED_GUEST is not set
-+# CONFIG_XEN_PHYSDEV_ACCESS is not set
-+# CONFIG_XEN_TPMDEV_FRONTEND is not set
-+# CONFIG_XEN_TPMDEV_BACKEND is not set
-+CONFIG_XEN_BLKDEV_FRONTEND=y
-+CONFIG_XEN_NETDEV_FRONTEND=y
-+# CONFIG_XEN_BLKDEV_TAP is not set
-+# CONFIG_XEN_SHADOW_MODE is not set
-+CONFIG_XEN_SCRUB_PAGES=y
-+# CONFIG_XEN_X86 is not set
-+CONFIG_XEN_X86_64=y
-+CONFIG_HAVE_ARCH_ALLOC_SKB=y
-+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-+
-+#
-+# Code maturity level options
-+#
-+CONFIG_EXPERIMENTAL=y
-+CONFIG_CLEAN_COMPILE=y
-+CONFIG_LOCK_KERNEL=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+
-+#
-+# General setup
-+#
-+CONFIG_LOCALVERSION=""
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+CONFIG_POSIX_MQUEUE=y
-+CONFIG_BSD_PROCESS_ACCT=y
-+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-+CONFIG_SYSCTL=y
-+CONFIG_AUDIT=y
-+CONFIG_AUDITSYSCALL=y
-+CONFIG_HOTPLUG=y
-+CONFIG_KOBJECT_UEVENT=y
-+# CONFIG_IKCONFIG is not set
-+# CONFIG_CPUSETS is not set
-+# CONFIG_EMBEDDED is not set
-+CONFIG_KALLSYMS=y
-+# CONFIG_KALLSYMS_ALL is not set
-+CONFIG_KALLSYMS_EXTRA_PASS=y
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_EPOLL=y
-+CONFIG_SHMEM=y
-+CONFIG_CC_ALIGN_FUNCTIONS=0
-+CONFIG_CC_ALIGN_LABELS=0
-+CONFIG_CC_ALIGN_LOOPS=0
-+CONFIG_CC_ALIGN_JUMPS=0
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+
-+#
-+# Loadable module support
-+#
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+# CONFIG_MODULE_FORCE_UNLOAD is not set
-+CONFIG_OBSOLETE_MODPARM=y
-+CONFIG_MODVERSIONS=y
-+# CONFIG_MODULE_SRCVERSION_ALL is not set
-+CONFIG_KMOD=y
-+CONFIG_STOP_MACHINE=y
-+CONFIG_XENARCH="x86_64"
-+CONFIG_X86=y
-+CONFIG_MMU=y
-+CONFIG_UID16=y
-+CONFIG_GENERIC_ISA_DMA=y
-+CONFIG_GENERIC_IOMAP=y
-+CONFIG_X86_CMPXCHG=y
-+CONFIG_X86_L1_CACHE_SHIFT=7
-+CONFIG_RWSEM_GENERIC_SPINLOCK=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_X86_GOOD_APIC=y
-+# CONFIG_HPET_TIMER is not set
-+CONFIG_SMP=y
-+CONFIG_NR_CPUS=8
-+# CONFIG_SCHED_SMT is not set
-+# CONFIG_MICROCODE is not set
-+CONFIG_X86_CPUID=y
-+# CONFIG_NUMA is not set
-+# CONFIG_MTRR is not set
-+CONFIG_HAVE_DEC_LOCK=y
-+# CONFIG_X86_LOCAL_APIC is not set
-+# CONFIG_X86_IO_APIC is not set
-+# CONFIG_PCI is not set
-+CONFIG_ISA_DMA_API=y
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+CONFIG_SECCOMP=y
-+
-+#
-+# X86_64 processor configuration
-+#
-+CONFIG_X86_64=y
-+CONFIG_64BIT=y
-+CONFIG_EARLY_PRINTK=y
-+
-+#
-+# Processor type and features
-+#
-+CONFIG_MPSC=y
-+# CONFIG_GENERIC_CPU is not set
-+CONFIG_X86_L1_CACHE_BYTES=128
-+# CONFIG_X86_TSC is not set
-+CONFIG_X86_XEN_GENAPIC=y
-+# CONFIG_X86_MSR is not set
-+CONFIG_X86_HT=y
-+# CONFIG_K8_NUMA is not set
-+# CONFIG_NUMA_EMU is not set
-+CONFIG_DUMMY_IOMMU=y
-+# CONFIG_X86_MCE is not set
-+
-+#
-+# Power management options
-+#
-+# CONFIG_PM is not set
-+
-+#
-+# CPU Frequency scaling
-+#
-+# CONFIG_CPU_FREQ is not set
-+
-+#
-+# Bus options (PCI etc.)
-+#
-+# CONFIG_UNORDERED_IO is not set
-+
-+#
-+# Executable file formats / Emulations
-+#
-+CONFIG_IA32_EMULATION=y
-+# CONFIG_IA32_AOUT is not set
-+CONFIG_COMPAT=y
-+CONFIG_SYSVIPC_COMPAT=y
-+
-+#
-+# Executable file formats
-+#
-+CONFIG_BINFMT_ELF=y
-+CONFIG_BINFMT_MISC=y
-+
-+#
-+# Device Drivers
-+#
-+
-+#
-+# Generic Driver Options
-+#
-+CONFIG_STANDALONE=y
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+CONFIG_FW_LOADER=y
-+# CONFIG_DEBUG_DRIVER is not set
-+
-+#
-+# Block devices
-+#
-+CONFIG_BLK_DEV_FD=m
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=m
-+CONFIG_BLK_DEV_CRYPTOLOOP=m
-+CONFIG_BLK_DEV_NBD=m
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=16384
-+CONFIG_BLK_DEV_INITRD=y
-+CONFIG_INITRAMFS_SOURCE=""
-+CONFIG_LBD=y
-+# CONFIG_CDROM_PKTCDVD is not set
-+
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+# CONFIG_ATA_OVER_ETH is not set
-+
-+#
-+# SCSI device support
-+#
-+CONFIG_SCSI=m
-+CONFIG_SCSI_PROC_FS=y
-+
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=m
-+CONFIG_CHR_DEV_ST=m
-+CONFIG_CHR_DEV_OSST=m
-+CONFIG_BLK_DEV_SR=m
-+CONFIG_BLK_DEV_SR_VENDOR=y
-+CONFIG_CHR_DEV_SG=m
-+
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+# CONFIG_SCSI_MULTI_LUN is not set
-+CONFIG_SCSI_CONSTANTS=y
-+CONFIG_SCSI_LOGGING=y
-+
-+#
-+# SCSI Transport Attributes
-+#
-+CONFIG_SCSI_SPI_ATTRS=m
-+CONFIG_SCSI_FC_ATTRS=m
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+
-+#
-+# SCSI low-level drivers
-+#
-+CONFIG_SCSI_SATA=y
-+# CONFIG_SCSI_DEBUG is not set
-+
-+#
-+# Multi-device support (RAID and LVM)
-+#
-+CONFIG_MD=y
-+CONFIG_BLK_DEV_MD=y
-+CONFIG_MD_LINEAR=m
-+CONFIG_MD_RAID0=m
-+CONFIG_MD_RAID1=m
-+CONFIG_MD_RAID10=m
-+CONFIG_MD_RAID5=m
-+CONFIG_MD_RAID6=m
-+CONFIG_MD_MULTIPATH=m
-+# CONFIG_MD_FAULTY is not set
-+CONFIG_BLK_DEV_DM=m
-+CONFIG_DM_CRYPT=m
-+CONFIG_DM_SNAPSHOT=m
-+CONFIG_DM_MIRROR=m
-+CONFIG_DM_ZERO=m
-+# CONFIG_DM_MULTIPATH is not set
-+
-+#
-+# Networking support
-+#
-+CONFIG_NET=y
-+
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=y
-+CONFIG_PACKET_MMAP=y
-+CONFIG_UNIX=y
-+CONFIG_NET_KEY=m
-+CONFIG_INET=y
-+CONFIG_IP_MULTICAST=y
-+CONFIG_IP_ADVANCED_ROUTER=y
-+CONFIG_IP_MULTIPLE_TABLES=y
-+CONFIG_IP_ROUTE_FWMARK=y
-+CONFIG_IP_ROUTE_MULTIPATH=y
-+# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
-+CONFIG_IP_ROUTE_VERBOSE=y
-+CONFIG_IP_PNP=y
-+CONFIG_IP_PNP_DHCP=y
-+CONFIG_IP_PNP_BOOTP=y
-+CONFIG_IP_PNP_RARP=y
-+CONFIG_NET_IPIP=m
-+CONFIG_NET_IPGRE=m
-+CONFIG_NET_IPGRE_BROADCAST=y
-+CONFIG_IP_MROUTE=y
-+CONFIG_IP_PIMSM_V1=y
-+CONFIG_IP_PIMSM_V2=y
-+# CONFIG_ARPD is not set
-+CONFIG_SYN_COOKIES=y
-+CONFIG_INET_AH=m
-+CONFIG_INET_ESP=m
-+CONFIG_INET_IPCOMP=m
-+CONFIG_INET_TUNNEL=m
-+CONFIG_IP_TCPDIAG=y
-+# CONFIG_IP_TCPDIAG_IPV6 is not set
-+
-+#
-+# IP: Virtual Server Configuration
-+#
-+CONFIG_IP_VS=m
-+# CONFIG_IP_VS_DEBUG is not set
-+CONFIG_IP_VS_TAB_BITS=12
-+
-+#
-+# IPVS transport protocol load balancing support
-+#
-+CONFIG_IP_VS_PROTO_TCP=y
-+CONFIG_IP_VS_PROTO_UDP=y
-+CONFIG_IP_VS_PROTO_ESP=y
-+CONFIG_IP_VS_PROTO_AH=y
-+
-+#
-+# IPVS scheduler
-+#
-+CONFIG_IP_VS_RR=m
-+CONFIG_IP_VS_WRR=m
-+CONFIG_IP_VS_LC=m
-+CONFIG_IP_VS_WLC=m
-+CONFIG_IP_VS_LBLC=m
-+CONFIG_IP_VS_LBLCR=m
-+CONFIG_IP_VS_DH=m
-+CONFIG_IP_VS_SH=m
-+CONFIG_IP_VS_SED=m
-+CONFIG_IP_VS_NQ=m
-+
-+#
-+# IPVS application helper
-+#
-+CONFIG_IP_VS_FTP=m
-+CONFIG_IPV6=m
-+CONFIG_IPV6_PRIVACY=y
-+CONFIG_INET6_AH=m
-+CONFIG_INET6_ESP=m
-+CONFIG_INET6_IPCOMP=m
-+CONFIG_INET6_TUNNEL=m
-+CONFIG_IPV6_TUNNEL=m
-+CONFIG_NETFILTER=y
-+# CONFIG_NETFILTER_DEBUG is not set
-+CONFIG_BRIDGE_NETFILTER=y
-+
-+#
-+# IP: Netfilter Configuration
-+#
-+CONFIG_IP_NF_CONNTRACK=m
-+CONFIG_IP_NF_CT_ACCT=y
-+# CONFIG_IP_NF_CONNTRACK_MARK is not set
-+CONFIG_IP_NF_CT_PROTO_SCTP=m
-+CONFIG_IP_NF_FTP=m
-+CONFIG_IP_NF_IRC=m
-+CONFIG_IP_NF_TFTP=m
-+CONFIG_IP_NF_AMANDA=m
-+CONFIG_IP_NF_QUEUE=m
-+CONFIG_IP_NF_IPTABLES=m
-+CONFIG_IP_NF_MATCH_LIMIT=m
-+CONFIG_IP_NF_MATCH_IPRANGE=m
-+CONFIG_IP_NF_MATCH_MAC=m
-+CONFIG_IP_NF_MATCH_PKTTYPE=m
-+CONFIG_IP_NF_MATCH_MARK=m
-+CONFIG_IP_NF_MATCH_MULTIPORT=m
-+CONFIG_IP_NF_MATCH_TOS=m
-+CONFIG_IP_NF_MATCH_RECENT=m
-+CONFIG_IP_NF_MATCH_ECN=m
-+CONFIG_IP_NF_MATCH_DSCP=m
-+CONFIG_IP_NF_MATCH_AH_ESP=m
-+CONFIG_IP_NF_MATCH_LENGTH=m
-+CONFIG_IP_NF_MATCH_TTL=m
-+CONFIG_IP_NF_MATCH_TCPMSS=m
-+CONFIG_IP_NF_MATCH_HELPER=m
-+CONFIG_IP_NF_MATCH_STATE=m
-+CONFIG_IP_NF_MATCH_CONNTRACK=m
-+CONFIG_IP_NF_MATCH_OWNER=m
-+CONFIG_IP_NF_MATCH_PHYSDEV=m
-+CONFIG_IP_NF_MATCH_ADDRTYPE=m
-+CONFIG_IP_NF_MATCH_REALM=m
-+CONFIG_IP_NF_MATCH_SCTP=m
-+CONFIG_IP_NF_MATCH_COMMENT=m
-+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
-+CONFIG_IP_NF_FILTER=m
-+CONFIG_IP_NF_TARGET_REJECT=m
-+CONFIG_IP_NF_TARGET_LOG=m
-+CONFIG_IP_NF_TARGET_ULOG=m
-+CONFIG_IP_NF_TARGET_TCPMSS=m
-+CONFIG_IP_NF_NAT=m
-+CONFIG_IP_NF_NAT_NEEDED=y
-+CONFIG_IP_NF_TARGET_MASQUERADE=m
-+CONFIG_IP_NF_TARGET_REDIRECT=m
-+CONFIG_IP_NF_TARGET_NETMAP=m
-+CONFIG_IP_NF_TARGET_SAME=m
-+CONFIG_IP_NF_NAT_SNMP_BASIC=m
-+CONFIG_IP_NF_NAT_IRC=m
-+CONFIG_IP_NF_NAT_FTP=m
-+CONFIG_IP_NF_NAT_TFTP=m
-+CONFIG_IP_NF_NAT_AMANDA=m
-+CONFIG_IP_NF_MANGLE=m
-+CONFIG_IP_NF_TARGET_TOS=m
-+CONFIG_IP_NF_TARGET_ECN=m
-+CONFIG_IP_NF_TARGET_DSCP=m
-+CONFIG_IP_NF_TARGET_MARK=m
-+CONFIG_IP_NF_TARGET_CLASSIFY=m
-+CONFIG_IP_NF_RAW=m
-+CONFIG_IP_NF_TARGET_NOTRACK=m
-+CONFIG_IP_NF_ARPTABLES=m
-+CONFIG_IP_NF_ARPFILTER=m
-+CONFIG_IP_NF_ARP_MANGLE=m
-+
-+#
-+# IPv6: Netfilter Configuration (EXPERIMENTAL)
-+#
-+# CONFIG_IP6_NF_QUEUE is not set
-+CONFIG_IP6_NF_IPTABLES=m
-+CONFIG_IP6_NF_MATCH_LIMIT=m
-+CONFIG_IP6_NF_MATCH_MAC=m
-+CONFIG_IP6_NF_MATCH_RT=m
-+CONFIG_IP6_NF_MATCH_OPTS=m
-+CONFIG_IP6_NF_MATCH_FRAG=m
-+CONFIG_IP6_NF_MATCH_HL=m
-+CONFIG_IP6_NF_MATCH_MULTIPORT=m
-+CONFIG_IP6_NF_MATCH_OWNER=m
-+CONFIG_IP6_NF_MATCH_MARK=m
-+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-+CONFIG_IP6_NF_MATCH_AHESP=m
-+CONFIG_IP6_NF_MATCH_LENGTH=m
-+CONFIG_IP6_NF_MATCH_EUI64=m
-+CONFIG_IP6_NF_MATCH_PHYSDEV=m
-+CONFIG_IP6_NF_FILTER=m
-+CONFIG_IP6_NF_TARGET_LOG=m
-+CONFIG_IP6_NF_MANGLE=m
-+CONFIG_IP6_NF_TARGET_MARK=m
-+CONFIG_IP6_NF_RAW=m
-+
-+#
-+# Bridge: Netfilter Configuration
-+#
-+CONFIG_BRIDGE_NF_EBTABLES=m
-+CONFIG_BRIDGE_EBT_BROUTE=m
-+CONFIG_BRIDGE_EBT_T_FILTER=m
-+CONFIG_BRIDGE_EBT_T_NAT=m
-+CONFIG_BRIDGE_EBT_802_3=m
-+CONFIG_BRIDGE_EBT_AMONG=m
-+CONFIG_BRIDGE_EBT_ARP=m
-+CONFIG_BRIDGE_EBT_IP=m
-+CONFIG_BRIDGE_EBT_LIMIT=m
-+CONFIG_BRIDGE_EBT_MARK=m
-+CONFIG_BRIDGE_EBT_PKTTYPE=m
-+CONFIG_BRIDGE_EBT_STP=m
-+CONFIG_BRIDGE_EBT_VLAN=m
-+CONFIG_BRIDGE_EBT_ARPREPLY=m
-+CONFIG_BRIDGE_EBT_DNAT=m
-+CONFIG_BRIDGE_EBT_MARK_T=m
-+CONFIG_BRIDGE_EBT_REDIRECT=m
-+CONFIG_BRIDGE_EBT_SNAT=m
-+CONFIG_BRIDGE_EBT_LOG=m
-+# CONFIG_BRIDGE_EBT_ULOG is not set
-+CONFIG_XFRM=y
-+CONFIG_XFRM_USER=y
-+
-+#
-+# SCTP Configuration (EXPERIMENTAL)
-+#
-+CONFIG_IP_SCTP=m
-+# CONFIG_SCTP_DBG_MSG is not set
-+# CONFIG_SCTP_DBG_OBJCNT is not set
-+# CONFIG_SCTP_HMAC_NONE is not set
-+# CONFIG_SCTP_HMAC_SHA1 is not set
-+CONFIG_SCTP_HMAC_MD5=y
-+CONFIG_ATM=m
-+CONFIG_ATM_CLIP=m
-+# CONFIG_ATM_CLIP_NO_ICMP is not set
-+CONFIG_ATM_LANE=m
-+# CONFIG_ATM_MPOA is not set
-+CONFIG_ATM_BR2684=m
-+# CONFIG_ATM_BR2684_IPFILTER is not set
-+CONFIG_BRIDGE=m
-+CONFIG_VLAN_8021Q=m
-+# CONFIG_DECNET is not set
-+CONFIG_LLC=m
-+# CONFIG_LLC2 is not set
-+CONFIG_IPX=m
-+# CONFIG_IPX_INTERN is not set
-+CONFIG_ATALK=m
-+CONFIG_DEV_APPLETALK=y
-+CONFIG_IPDDP=m
-+CONFIG_IPDDP_ENCAP=y
-+CONFIG_IPDDP_DECAP=y
-+# CONFIG_X25 is not set
-+# CONFIG_LAPB is not set
-+CONFIG_NET_DIVERT=y
-+# CONFIG_ECONET is not set
-+CONFIG_WAN_ROUTER=m
-+
-+#
-+# QoS and/or fair queueing
-+#
-+CONFIG_NET_SCHED=y
-+CONFIG_NET_SCH_CLK_JIFFIES=y
-+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-+# CONFIG_NET_SCH_CLK_CPU is not set
-+CONFIG_NET_SCH_CBQ=m
-+CONFIG_NET_SCH_HTB=m
-+CONFIG_NET_SCH_HFSC=m
-+CONFIG_NET_SCH_ATM=m
-+CONFIG_NET_SCH_PRIO=m
-+CONFIG_NET_SCH_RED=m
-+CONFIG_NET_SCH_SFQ=m
-+CONFIG_NET_SCH_TEQL=m
-+CONFIG_NET_SCH_TBF=m
-+CONFIG_NET_SCH_GRED=m
-+CONFIG_NET_SCH_DSMARK=m
-+CONFIG_NET_SCH_NETEM=m
-+CONFIG_NET_SCH_INGRESS=m
-+CONFIG_NET_QOS=y
-+CONFIG_NET_ESTIMATOR=y
-+CONFIG_NET_CLS=y
-+# CONFIG_NET_CLS_BASIC is not set
-+CONFIG_NET_CLS_TCINDEX=m
-+CONFIG_NET_CLS_ROUTE4=m
-+CONFIG_NET_CLS_ROUTE=y
-+CONFIG_NET_CLS_FW=m
-+CONFIG_NET_CLS_U32=m
-+CONFIG_CLS_U32_PERF=y
-+CONFIG_NET_CLS_IND=y
-+# CONFIG_CLS_U32_MARK is not set
-+CONFIG_NET_CLS_RSVP=m
-+CONFIG_NET_CLS_RSVP6=m
-+# CONFIG_NET_EMATCH is not set
-+# CONFIG_NET_CLS_ACT is not set
-+CONFIG_NET_CLS_POLICE=y
-+
-+#
-+# Network testing
-+#
-+# CONFIG_NET_PKTGEN is not set
-+CONFIG_NETPOLL=y
-+# CONFIG_NETPOLL_RX is not set
-+CONFIG_NETPOLL_TRAP=y
-+CONFIG_NET_POLL_CONTROLLER=y
-+# CONFIG_HAMRADIO is not set
-+CONFIG_IRDA=m
-+
-+#
-+# IrDA protocols
-+#
-+CONFIG_IRLAN=m
-+CONFIG_IRNET=m
-+CONFIG_IRCOMM=m
-+# CONFIG_IRDA_ULTRA is not set
-+
-+#
-+# IrDA options
-+#
-+CONFIG_IRDA_CACHE_LAST_LSAP=y
-+CONFIG_IRDA_FAST_RR=y
-+# CONFIG_IRDA_DEBUG is not set
-+
-+#
-+# Infrared-port device drivers
-+#
-+
-+#
-+# SIR device drivers
-+#
-+CONFIG_IRTTY_SIR=m
-+
-+#
-+# Dongle support
-+#
-+CONFIG_DONGLE=y
-+CONFIG_ESI_DONGLE=m
-+CONFIG_ACTISYS_DONGLE=m
-+CONFIG_TEKRAM_DONGLE=m
-+CONFIG_LITELINK_DONGLE=m
-+CONFIG_MA600_DONGLE=m
-+CONFIG_GIRBIL_DONGLE=m
-+CONFIG_MCP2120_DONGLE=m
-+CONFIG_OLD_BELKIN_DONGLE=m
-+CONFIG_ACT200L_DONGLE=m
-+
-+#
-+# Old SIR device drivers
-+#
-+
-+#
-+# Old Serial dongle support
-+#
-+
-+#
-+# FIR device drivers
-+#
-+# CONFIG_NSC_FIR is not set
-+# CONFIG_WINBOND_FIR is not set
-+# CONFIG_SMC_IRCC_FIR is not set
-+# CONFIG_ALI_FIR is not set
-+# CONFIG_VIA_FIR is not set
-+CONFIG_BT=m
-+CONFIG_BT_L2CAP=m
-+CONFIG_BT_SCO=m
-+CONFIG_BT_RFCOMM=m
-+CONFIG_BT_RFCOMM_TTY=y
-+CONFIG_BT_BNEP=m
-+CONFIG_BT_BNEP_MC_FILTER=y
-+CONFIG_BT_BNEP_PROTO_FILTER=y
-+CONFIG_BT_HIDP=m
-+
-+#
-+# Bluetooth device drivers
-+#
-+CONFIG_BT_HCIUART=m
-+CONFIG_BT_HCIUART_H4=y
-+CONFIG_BT_HCIUART_BCSP=y
-+CONFIG_BT_HCIUART_BCSP_TXCRC=y
-+CONFIG_BT_HCIVHCI=m
-+CONFIG_NETDEVICES=y
-+CONFIG_DUMMY=m
-+CONFIG_BONDING=m
-+CONFIG_EQUALIZER=m
-+CONFIG_TUN=m
-+
-+#
-+# Ethernet (10 or 100Mbit)
-+#
-+CONFIG_NET_ETHERNET=y
-+CONFIG_MII=m
-+
-+#
-+# Ethernet (1000 Mbit)
-+#
-+
-+#
-+# Ethernet (10000 Mbit)
-+#
-+
-+#
-+# Token Ring devices
-+#
-+
-+#
-+# Wireless LAN (non-hamradio)
-+#
-+CONFIG_NET_RADIO=y
-+
-+#
-+# Obsolete Wireless cards support (pre-802.11)
-+#
-+# CONFIG_STRIP is not set
-+CONFIG_ATMEL=m
-+
-+#
-+# Wan interfaces
-+#
-+# CONFIG_WAN is not set
-+
-+#
-+# ATM drivers
-+#
-+CONFIG_ATM_TCP=m
-+CONFIG_PPP=m
-+CONFIG_PPP_MULTILINK=y
-+CONFIG_PPP_FILTER=y
-+CONFIG_PPP_ASYNC=m
-+CONFIG_PPP_SYNC_TTY=m
-+CONFIG_PPP_DEFLATE=m
-+# CONFIG_PPP_BSDCOMP is not set
-+CONFIG_PPPOE=m
-+CONFIG_PPPOATM=m
-+# CONFIG_SLIP is not set
-+# CONFIG_SHAPER is not set
-+CONFIG_NETCONSOLE=m
-+CONFIG_INPUT=m
-+CONFIG_UNIX98_PTYS=y
-+# CONFIG_LEGACY_PTYS is not set
-+# CONFIG_TCG_TPM is not set
-+
-+#
-+# Character devices
-+#
-+
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+CONFIG_EXT2_FS_XATTR=y
-+CONFIG_EXT2_FS_POSIX_ACL=y
-+CONFIG_EXT2_FS_SECURITY=y
-+CONFIG_EXT3_FS=y
-+CONFIG_EXT3_FS_XATTR=y
-+# CONFIG_EXT3_FS_POSIX_ACL is not set
-+# CONFIG_EXT3_FS_SECURITY is not set
-+CONFIG_JBD=m
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
-+CONFIG_REISERFS_FS=y
-+# CONFIG_REISERFS_CHECK is not set
-+CONFIG_REISERFS_PROC_INFO=y
-+CONFIG_REISERFS_FS_XATTR=y
-+CONFIG_REISERFS_FS_POSIX_ACL=y
-+CONFIG_REISERFS_FS_SECURITY=y
-+CONFIG_JFS_FS=m
-+CONFIG_JFS_POSIX_ACL=y
-+# CONFIG_JFS_SECURITY is not set
-+# CONFIG_JFS_DEBUG is not set
-+# CONFIG_JFS_STATISTICS is not set
-+CONFIG_FS_POSIX_ACL=y
-+
-+#
-+# XFS support
-+#
-+CONFIG_XFS_FS=m
-+CONFIG_XFS_EXPORT=y
-+# CONFIG_XFS_RT is not set
-+CONFIG_XFS_QUOTA=y
-+CONFIG_XFS_SECURITY=y
-+CONFIG_XFS_POSIX_ACL=y
-+CONFIG_MINIX_FS=m
-+CONFIG_ROMFS_FS=m
-+CONFIG_QUOTA=y
-+# CONFIG_QFMT_V1 is not set
-+CONFIG_QFMT_V2=y
-+CONFIG_QUOTACTL=y
-+CONFIG_DNOTIFY=y
-+CONFIG_AUTOFS_FS=m
-+CONFIG_AUTOFS4_FS=m
-+
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=y
-+CONFIG_JOLIET=y
-+CONFIG_ZISOFS=y
-+CONFIG_ZISOFS_FS=y
-+CONFIG_UDF_FS=m
-+CONFIG_UDF_NLS=y
-+
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=m
-+CONFIG_MSDOS_FS=m
-+CONFIG_VFAT_FS=m
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
-+# CONFIG_NTFS_FS is not set
-+
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
-+CONFIG_SYSFS=y
-+# CONFIG_DEVFS_FS is not set
-+CONFIG_DEVPTS_FS_XATTR=y
-+CONFIG_DEVPTS_FS_SECURITY=y
-+CONFIG_TMPFS=y
-+CONFIG_TMPFS_XATTR=y
-+CONFIG_TMPFS_SECURITY=y
-+# CONFIG_HUGETLBFS is not set
-+# CONFIG_HUGETLB_PAGE is not set
-+CONFIG_RAMFS=y
-+
-+#
-+# Miscellaneous filesystems
-+#
-+# CONFIG_ADFS_FS is not set
-+CONFIG_AFFS_FS=m
-+CONFIG_HFS_FS=m
-+CONFIG_HFSPLUS_FS=m
-+CONFIG_BEFS_FS=m
-+# CONFIG_BEFS_DEBUG is not set
-+CONFIG_BFS_FS=m
-+CONFIG_EFS_FS=m
-+CONFIG_CRAMFS=y
-+CONFIG_VXFS_FS=m
-+# CONFIG_HPFS_FS is not set
-+CONFIG_QNX4FS_FS=m
-+CONFIG_SYSV_FS=m
-+CONFIG_UFS_FS=m
-+# CONFIG_UFS_FS_WRITE is not set
-+
-+#
-+# Network File Systems
-+#
-+CONFIG_NFS_FS=m
-+CONFIG_NFS_V3=y
-+CONFIG_NFS_V4=y
-+CONFIG_NFS_DIRECTIO=y
-+CONFIG_NFSD=m
-+CONFIG_NFSD_V3=y
-+CONFIG_NFSD_V4=y
-+CONFIG_NFSD_TCP=y
-+CONFIG_LOCKD=m
-+CONFIG_LOCKD_V4=y
-+CONFIG_EXPORTFS=m
-+CONFIG_SUNRPC=m
-+CONFIG_SUNRPC_GSS=m
-+CONFIG_RPCSEC_GSS_KRB5=m
-+CONFIG_RPCSEC_GSS_SPKM3=m
-+CONFIG_SMB_FS=m
-+# CONFIG_SMB_NLS_DEFAULT is not set
-+CONFIG_CIFS=m
-+# CONFIG_CIFS_STATS is not set
-+CONFIG_CIFS_XATTR=y
-+CONFIG_CIFS_POSIX=y
-+# CONFIG_CIFS_EXPERIMENTAL is not set
-+CONFIG_NCP_FS=m
-+CONFIG_NCPFS_PACKET_SIGNING=y
-+CONFIG_NCPFS_IOCTL_LOCKING=y
-+CONFIG_NCPFS_STRONG=y
-+CONFIG_NCPFS_NFS_NS=y
-+CONFIG_NCPFS_OS2_NS=y
-+CONFIG_NCPFS_SMALLDOS=y
-+CONFIG_NCPFS_NLS=y
-+CONFIG_NCPFS_EXTRAS=y
-+# CONFIG_CODA_FS is not set
-+# CONFIG_AFS_FS is not set
-+
-+#
-+# Partition Types
-+#
-+CONFIG_PARTITION_ADVANCED=y
-+# CONFIG_ACORN_PARTITION is not set
-+CONFIG_OSF_PARTITION=y
-+# CONFIG_AMIGA_PARTITION is not set
-+# CONFIG_ATARI_PARTITION is not set
-+CONFIG_MAC_PARTITION=y
-+CONFIG_MSDOS_PARTITION=y
-+CONFIG_BSD_DISKLABEL=y
-+CONFIG_MINIX_SUBPARTITION=y
-+CONFIG_SOLARIS_X86_PARTITION=y
-+CONFIG_UNIXWARE_DISKLABEL=y
-+# CONFIG_LDM_PARTITION is not set
-+CONFIG_SGI_PARTITION=y
-+# CONFIG_ULTRIX_PARTITION is not set
-+CONFIG_SUN_PARTITION=y
-+CONFIG_EFI_PARTITION=y
-+
-+#
-+# Native Language Support
-+#
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="utf8"
-+CONFIG_NLS_CODEPAGE_437=y
-+CONFIG_NLS_CODEPAGE_737=m
-+CONFIG_NLS_CODEPAGE_775=m
-+CONFIG_NLS_CODEPAGE_850=m
-+CONFIG_NLS_CODEPAGE_852=m
-+CONFIG_NLS_CODEPAGE_855=m
-+CONFIG_NLS_CODEPAGE_857=m
-+CONFIG_NLS_CODEPAGE_860=m
-+CONFIG_NLS_CODEPAGE_861=m
-+CONFIG_NLS_CODEPAGE_862=m
-+CONFIG_NLS_CODEPAGE_863=m
-+CONFIG_NLS_CODEPAGE_864=m
-+CONFIG_NLS_CODEPAGE_865=m
-+CONFIG_NLS_CODEPAGE_866=m
-+CONFIG_NLS_CODEPAGE_869=m
-+CONFIG_NLS_CODEPAGE_936=m
-+CONFIG_NLS_CODEPAGE_950=m
-+CONFIG_NLS_CODEPAGE_932=m
-+CONFIG_NLS_CODEPAGE_949=m
-+CONFIG_NLS_CODEPAGE_874=m
-+CONFIG_NLS_ISO8859_8=m
-+CONFIG_NLS_CODEPAGE_1250=m
-+CONFIG_NLS_CODEPAGE_1251=m
-+CONFIG_NLS_ASCII=y
-+CONFIG_NLS_ISO8859_1=m
-+CONFIG_NLS_ISO8859_2=m
-+CONFIG_NLS_ISO8859_3=m
-+CONFIG_NLS_ISO8859_4=m
-+CONFIG_NLS_ISO8859_5=m
-+CONFIG_NLS_ISO8859_6=m
-+CONFIG_NLS_ISO8859_7=m
-+CONFIG_NLS_ISO8859_9=m
-+CONFIG_NLS_ISO8859_13=m
-+CONFIG_NLS_ISO8859_14=m
-+CONFIG_NLS_ISO8859_15=m
-+CONFIG_NLS_KOI8_R=m
-+CONFIG_NLS_KOI8_U=m
-+CONFIG_NLS_UTF8=m
-+
-+#
-+# Security options
-+#
-+# CONFIG_KEYS is not set
-+# CONFIG_SECURITY is not set
-+
-+#
-+# Cryptographic options
-+#
-+CONFIG_CRYPTO=y
-+CONFIG_CRYPTO_HMAC=y
-+CONFIG_CRYPTO_NULL=m
-+CONFIG_CRYPTO_MD4=m
-+CONFIG_CRYPTO_MD5=m
-+CONFIG_CRYPTO_SHA1=y
-+CONFIG_CRYPTO_SHA256=m
-+CONFIG_CRYPTO_SHA512=m
-+CONFIG_CRYPTO_WP512=m
-+# CONFIG_CRYPTO_TGR192 is not set
-+CONFIG_CRYPTO_DES=m
-+CONFIG_CRYPTO_BLOWFISH=m
-+CONFIG_CRYPTO_TWOFISH=m
-+CONFIG_CRYPTO_SERPENT=m
-+# CONFIG_CRYPTO_AES is not set
-+CONFIG_CRYPTO_CAST5=m
-+CONFIG_CRYPTO_CAST6=m
-+CONFIG_CRYPTO_TEA=m
-+CONFIG_CRYPTO_ARC4=m
-+CONFIG_CRYPTO_KHAZAD=m
-+# CONFIG_CRYPTO_ANUBIS is not set
-+CONFIG_CRYPTO_DEFLATE=m
-+CONFIG_CRYPTO_MICHAEL_MIC=m
-+CONFIG_CRYPTO_CRC32C=m
-+# CONFIG_CRYPTO_TEST is not set
-+
-+#
-+# Hardware crypto devices
-+#
-+
-+#
-+# Library routines
-+#
-+CONFIG_CRC_CCITT=m
-+CONFIG_CRC32=y
-+CONFIG_LIBCRC32C=m
-+CONFIG_ZLIB_INFLATE=y
-+CONFIG_ZLIB_DEFLATE=m
-+
-+#
-+# Kernel hacking
-+#
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_DEBUG_KERNEL=y
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_LOG_BUF_SHIFT=15
-+# CONFIG_SCHEDSTATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+# CONFIG_DEBUG_INFO is not set
-+# CONFIG_DEBUG_FS is not set
-+# CONFIG_DEBUG_STACKOVERFLOW is not set
-+# CONFIG_KPROBES is not set
-+# CONFIG_DEBUG_STACK_USAGE is not set
-+# CONFIG_DEBUG_PAGEALLOC is not set
-+# CONFIG_4KSTACKS is not set
-+# CONFIG_INIT_DEBUG is not set
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/Kconfig linux-2.6.12-xen/arch/xen/i386/Kconfig
---- pristine-linux-2.6.12/arch/xen/i386/Kconfig	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/Kconfig	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,960 @@
-+#
-+# For a description of the syntax of this configuration file,
-+# see Documentation/kbuild/kconfig-language.txt.
-+#
-+
-+menu "X86 Processor Configuration"
-+
-+config XENARCH
-+	string
-+	default i386
-+
-+config X86
-+	bool
-+	default y
-+	help
-+	  This is Linux's home port.  Linux was originally native to the Intel
-+	  386, and runs on all the later x86 processors including the Intel
-+	  486, 586, Pentiums, and various instruction-set-compatible chips by
-+	  AMD, Cyrix, and others.
-+
-+config MMU
-+	bool
-+	default y
-+
-+config SBUS
-+	bool
-+
-+config UID16
-+	bool
-+	default y
-+
-+config GENERIC_ISA_DMA
-+	bool
-+	default y
-+
-+config GENERIC_IOMAP
-+	bool
-+	default y
-+
-+choice
-+	prompt "Processor family"
-+	default M686
-+
-+config M386
-+	bool "386"
-+	---help---
-+	  This is the processor type of your CPU. This information is used for
-+	  optimizing purposes. In order to compile a kernel that can run on
-+	  all x86 CPU types (albeit not optimally fast), you can specify
-+	  "386" here.
-+
-+	  The kernel will not necessarily run on earlier architectures than
-+	  the one you have chosen, e.g. a Pentium optimized kernel will run on
-+	  a PPro, but not necessarily on a i486.
-+
-+	  Here are the settings recommended for greatest speed:
-+	  - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
-+	  486DLC/DLC2, UMC 486SX-S and NexGen Nx586.  Only "386" kernels
-+	  will run on a 386 class machine.
-+	  - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
-+	  SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
-+	  - "586" for generic Pentium CPUs lacking the TSC
-+	  (time stamp counter) register.
-+	  - "Pentium-Classic" for the Intel Pentium.
-+	  - "Pentium-MMX" for the Intel Pentium MMX.
-+	  - "Pentium-Pro" for the Intel Pentium Pro.
-+	  - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
-+	  - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
-+	  - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
-+	  - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
-+	  - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
-+	  - "Crusoe" for the Transmeta Crusoe series.
-+	  - "Efficeon" for the Transmeta Efficeon series.
-+	  - "Winchip-C6" for original IDT Winchip.
-+	  - "Winchip-2" for IDT Winchip 2.
-+	  - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
-+	  - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
-+	  - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
-+	  - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
-+
-+	  If you don't know what to do, choose "386".
-+
-+config M486
-+	bool "486"
-+	help
-+	  Select this for a 486 series processor, either Intel or one of the
-+	  compatible processors from AMD, Cyrix, IBM, or Intel.  Includes DX,
-+	  DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
-+	  U5S.
-+
-+config M586
-+	bool "586/K5/5x86/6x86/6x86MX"
-+	help
-+	  Select this for an 586 or 686 series processor such as the AMD K5,
-+	  the Cyrix 5x86, 6x86 and 6x86MX.  This choice does not
-+	  assume the RDTSC (Read Time Stamp Counter) instruction.
-+
-+config M586TSC
-+	bool "Pentium-Classic"
-+	help
-+	  Select this for a Pentium Classic processor with the RDTSC (Read
-+	  Time Stamp Counter) instruction for benchmarking.
-+
-+config M586MMX
-+	bool "Pentium-MMX"
-+	help
-+	  Select this for a Pentium with the MMX graphics/multimedia
-+	  extended instructions.
-+
-+config M686
-+	bool "Pentium-Pro"
-+	help
-+	  Select this for Intel Pentium Pro chips.  This enables the use of
-+	  Pentium Pro extended instructions, and disables the init-time guard
-+	  against the f00f bug found in earlier Pentiums.
-+
-+config MPENTIUMII
-+	bool "Pentium-II/Celeron(pre-Coppermine)"
-+	help
-+	  Select this for Intel chips based on the Pentium-II and
-+	  pre-Coppermine Celeron core.  This option enables an unaligned
-+	  copy optimization, compiles the kernel with optimization flags
-+	  tailored for the chip, and applies any applicable Pentium Pro
-+	  optimizations.
-+
-+config MPENTIUMIII
-+	bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
-+	help
-+	  Select this for Intel chips based on the Pentium-III and
-+	  Celeron-Coppermine core.  This option enables use of some
-+	  extended prefetch instructions in addition to the Pentium II
-+	  extensions.
-+
-+config MPENTIUMM
-+	bool "Pentium M"
-+	help
-+	  Select this for Intel Pentium M (not Pentium-4 M)
-+	  notebook chips.
-+
-+config MPENTIUM4
-+	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
-+	help
-+	  Select this for Intel Pentium 4 chips.  This includes the
-+	  Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
-+	  (not Pentium M) chips.  This option enables compile flags
-+	  optimized for the chip, uses the correct cache shift, and
-+	  applies any applicable Pentium III optimizations.
-+
-+config MK6
-+	bool "K6/K6-II/K6-III"
-+	help
-+	  Select this for an AMD K6-family processor.  Enables use of
-+	  some extended instructions, and passes appropriate optimization
-+	  flags to GCC.
-+
-+config MK7
-+	bool "Athlon/Duron/K7"
-+	help
-+	  Select this for an AMD Athlon K7-family processor.  Enables use of
-+	  some extended instructions, and passes appropriate optimization
-+	  flags to GCC.
-+
-+config MK8
-+	bool "Opteron/Athlon64/Hammer/K8"
-+	help
-+	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.  Enables
-+	  use of some extended instructions, and passes appropriate optimization
-+	  flags to GCC.
-+
-+config MCRUSOE
-+	bool "Crusoe"
-+	help
-+	  Select this for a Transmeta Crusoe processor.  Treats the processor
-+	  like a 586 with TSC, and sets some GCC optimization flags (like a
-+	  Pentium Pro with no alignment requirements).
-+
-+config MEFFICEON
-+	bool "Efficeon"
-+	help
-+	  Select this for a Transmeta Efficeon processor.
-+
-+config MWINCHIPC6
-+	bool "Winchip-C6"
-+	help
-+	  Select this for an IDT Winchip C6 chip.  Linux and GCC
-+	  treat this chip as a 586TSC with some extended instructions
-+	  and alignment requirements.
-+
-+config MWINCHIP2
-+	bool "Winchip-2"
-+	help
-+	  Select this for an IDT Winchip-2.  Linux and GCC
-+	  treat this chip as a 586TSC with some extended instructions
-+	  and alignment requirements.
-+
-+config MWINCHIP3D
-+	bool "Winchip-2A/Winchip-3"
-+	help
-+	  Select this for an IDT Winchip-2A or 3.  Linux and GCC
-+	  treat this chip as a 586TSC with some extended instructions
-+	  and alignment reqirements.  Also enable out of order memory
-+	  stores for this CPU, which can increase performance of some
-+	  operations.
-+
-+config MGEODEGX1
-+	bool "GeodeGX1"
-+	help
-+	  Select this for a Geode GX1 (Cyrix MediaGX) chip.
-+
-+config MCYRIXIII
-+	bool "CyrixIII/VIA-C3"
-+	help
-+	  Select this for a Cyrix III or C3 chip.  Presently Linux and GCC
-+	  treat this chip as a generic 586. Whilst the CPU is 686 class,
-+	  it lacks the cmov extension which gcc assumes is present when
-+	  generating 686 code.
-+	  Note that Nehemiah (Model 9) and above will not boot with this
-+	  kernel due to them lacking the 3DNow! instructions used in earlier
-+	  incarnations of the CPU.
-+
-+config MVIAC3_2
-+	bool "VIA C3-2 (Nehemiah)"
-+	help
-+	  Select this for a VIA C3 "Nehemiah". Selecting this enables usage
-+	  of SSE and tells gcc to treat the CPU as a 686.
-+	  Note, this kernel will not boot on older (pre model 9) C3s.
-+
-+endchoice
-+
-+config X86_GENERIC
-+       bool "Generic x86 support"
-+       help
-+	  Instead of just including optimizations for the selected
-+	  x86 variant (e.g. PII, Crusoe or Athlon), include some more
-+	  generic optimizations as well. This will make the kernel
-+	  perform better on x86 CPUs other than that selected.
-+
-+	  This is really intended for distributors who need more
-+	  generic optimizations.
-+
-+#
-+# Define implied options from the CPU selection here
-+#
-+config X86_CMPXCHG
-+	bool
-+	depends on !M386
-+	default y
-+
-+config X86_XADD
-+	bool
-+	depends on !M386
-+	default y
-+
-+config X86_L1_CACHE_SHIFT
-+	int
-+	default "7" if MPENTIUM4 || X86_GENERIC
-+	default "4" if X86_ELAN || M486 || M386
-+	default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1
-+	default "6" if MK7 || MK8 || MPENTIUMM
-+
-+config RWSEM_GENERIC_SPINLOCK
-+	bool
-+	depends on M386
-+	default y
-+
-+config RWSEM_XCHGADD_ALGORITHM
-+	bool
-+	depends on !M386
-+	default y
-+
-+config GENERIC_CALIBRATE_DELAY
-+	bool
-+	default y
-+
-+config X86_PPRO_FENCE
-+	bool
-+	depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
-+	default y
-+
-+config X86_F00F_BUG
-+	bool
-+	depends on M586MMX || M586TSC || M586 || M486 || M386
-+	default y
-+
-+config X86_WP_WORKS_OK
-+	bool
-+	depends on !M386
-+	default y
-+
-+config X86_INVLPG
-+	bool
-+	depends on !M386
-+	default y
-+
-+config X86_BSWAP
-+	bool
-+	depends on !M386
-+	default y
-+
-+config X86_POPAD_OK
-+	bool
-+	depends on !M386
-+	default y
-+
-+config X86_ALIGNMENT_16
-+	bool
-+	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
-+	default y
-+
-+config X86_GOOD_APIC
-+	bool
-+	depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
-+	default y
-+
-+config X86_INTEL_USERCOPY
-+	bool
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
-+	default y
-+
-+config X86_USE_PPRO_CHECKSUM
-+	bool
-+	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON
-+	default y
-+
-+config X86_USE_3DNOW
-+	bool
-+	depends on MCYRIXIII || MK7
-+	default y
-+
-+config X86_OOSTORE
-+	bool
-+	depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
-+	default y
-+
-+config HPET_TIMER
-+	bool
-+	default n
-+#config HPET_TIMER
-+#	bool "HPET Timer Support"
-+#	help
-+#	  This enables the use of the HPET for the kernel's internal timer.
-+#	  HPET is the next generation timer replacing legacy 8254s.
-+#	  You can safely choose Y here.  However, HPET will only be
-+#	  activated if the platform and the BIOS support this feature.
-+#	  Otherwise the 8254 will be used for timing services.
-+#
-+#	  Choose N to continue using the legacy 8254 timer.
-+
-+config HPET_EMULATE_RTC
-+	def_bool HPET_TIMER && RTC=y
-+
-+config SMP
-+	bool "Symmetric multi-processing support"
-+	---help---
-+	  This enables support for systems with more than one CPU. If you have
-+	  a system with only one CPU, like most personal computers, say N. If
-+	  you have a system with more than one CPU, say Y.
-+
-+	  If you say N here, the kernel will run on single and multiprocessor
-+	  machines, but will use only one CPU of a multiprocessor machine. If
-+	  you say Y here, the kernel will run on many, but not all,
-+	  singleprocessor machines. On a singleprocessor machine, the kernel
-+	  will run faster if you say N here.
-+
-+	  Note that if you say Y here and choose architecture "586" or
-+	  "Pentium" under "Processor family", the kernel will not work on 486
-+	  architectures. Similarly, multiprocessor kernels for the "PPro"
-+	  architecture may not work on all Pentium based boards.
-+
-+	  People using multiprocessor machines who say Y here should also say
-+	  Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
-+	  Management" code will be disabled if you say Y here.
-+
-+	  See also the <file:Documentation/smp.txt>,
-+	  <file:Documentation/i386/IO-APIC.txt>,
-+	  <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
-+	  <http://www.tldp.org/docs.html#howto>.
-+
-+	  If you don't know what to do here, say N.
-+
-+config SMP_ALTERNATIVES
-+	bool "SMP alternatives support (EXPERIMENTAL)"
-+	depends on SMP && EXPERIMENTAL
-+	help
-+	  Try to reduce the overhead of running an SMP kernel on a uniprocessor
-+	  host slightly by replacing certain key instruction sequences
-+	  according to whether we currently have more than one CPU available.
-+	  This should provide a noticeable boost to performance when
-+	  running SMP kernels on UP machines, and have negligible impact
-+	  when running on an true SMP host.
-+
-+          If unsure, say N.
-+	  
-+config NR_CPUS
-+	int "Maximum number of CPUs (2-255)"
-+	range 2 255
-+	depends on SMP
-+	default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
-+	default "8"
-+	help
-+	  This allows you to specify the maximum number of CPUs which this
-+	  kernel will support.  The maximum supported value is 255 and the
-+	  minimum value which makes sense is 2.
-+
-+	  This is purely to save memory - each supported CPU adds
-+	  approximately eight kilobytes to the kernel image.
-+
-+config SCHED_SMT
-+	bool "SMT (Hyperthreading) scheduler support"
-+	depends on SMP
-+	default off
-+	help
-+	  SMT scheduler support improves the CPU scheduler's decision making
-+	  when dealing with Intel Pentium 4 chips with HyperThreading at a
-+	  cost of slightly increased overhead in some places. If unsure say
-+	  N here.
-+
-+#config PREEMPT
-+#	bool "Preemptible Kernel"
-+#	help
-+#	  This option reduces the latency of the kernel when reacting to
-+#	  real-time or interactive events by allowing a low priority process to
-+#	  be preempted even if it is in kernel mode executing a system call.
-+#	  This allows applications to run more reliably even when the system is
-+#	  under load.
-+#
-+#	  Say Y here if you are building a kernel for a desktop, embedded
-+#	  or real-time system.  Say N if you are unsure.
-+
-+config PREEMPT_BKL
-+	bool "Preempt The Big Kernel Lock"
-+	depends on PREEMPT
-+	default y
-+	help
-+	  This option reduces the latency of the kernel by making the
-+	  big kernel lock preemptible.
-+
-+	  Say Y here if you are building a kernel for a desktop system.
-+	  Say N if you are unsure.
-+
-+#config X86_TSC
-+#	 bool
-+# 	depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1) && !X86_NUMAQ
-+#	 default y
-+
-+#config X86_MCE
-+#	 bool "Machine Check Exception"
-+#	depends on !X86_VOYAGER
-+#	 ---help---
-+#	   Machine Check Exception support allows the processor to notify the
-+#	   kernel if it detects a problem (e.g. overheating, component failure).
-+#	   The action the kernel takes depends on the severity of the problem,
-+#	   ranging from a warning message on the console, to halting the machine.
-+#	   Your processor must be a Pentium or newer to support this - check the
-+#	   flags in /proc/cpuinfo for mce.  Note that some older Pentium systems
-+#	   have a design flaw which leads to false MCE events - hence MCE is
-+#	   disabled on all P5 processors, unless explicitly enabled with "mce"
-+#	   as a boot argument.  Similarly, if MCE is built in and creates a
-+#	   problem on some new non-standard machine, you can boot with "nomce"
-+#	   to disable it.  MCE support simply ignores non-MCE processors like
-+#	   the 386 and 486, so nearly everyone can say Y here.
-+
-+#config X86_MCE_NONFATAL
-+#	tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
-+#	 depends on X86_MCE
-+#	 help
-+#	   Enabling this feature starts a timer that triggers every 5 seconds which
-+#	   will look at the machine check registers to see if anything happened.
-+#	   Non-fatal problems automatically get corrected (but still logged).
-+#	   Disable this if you don't want to see these messages.
-+#	   Seeing the messages this option prints out may be indicative of dying hardware,
-+#	   or out-of-spec (ie, overclocked) hardware.
-+#	   This option only does something on certain CPUs.
-+#	   (AMD Athlon/Duron and Intel Pentium 4)
-+
-+#config X86_MCE_P4THERMAL
-+#	 bool "check for P4 thermal throttling interrupt."
-+#	 depends on X86_MCE && (X86_UP_APIC || SMP)
-+#	 help
-+#	   Enabling this feature will cause a message to be printed when the P4
-+#	   enters thermal throttling.
-+
-+config X86_REBOOTFIXUPS
-+	bool "Enable X86 board specific fixups for reboot"
-+	depends on X86
-+	default n
-+	---help---
-+	  This enables chipset and/or board specific fixups to be done
-+	  in order to get reboot to work correctly. This is only needed on
-+	  some combinations of hardware and BIOS. The symptom, for which
-+	  this config is intended, is when reboot ends with a stalled/hung
-+	  system.
-+
-+	  Currently, the only fixup is for the Geode GX1/CS5530A/TROM2.1.
-+	  combination.
-+
-+	  Say Y if you want to enable the fixup. Currently, it's safe to
-+	  enable this option even if you don't need it.
-+	  Say N otherwise.
-+
-+config MICROCODE
-+	tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
-+	 depends on XEN_PRIVILEGED_GUEST
-+	---help---
-+	  If you say Y here and also to "/dev file system support" in the
-+	  'File systems' section, you will be able to update the microcode on
-+	  Intel processors in the IA32 family, e.g. Pentium Pro, Pentium II,
-+	  Pentium III, Pentium 4, Xeon etc.  You will obviously need the
-+	  actual microcode binary data itself which is not shipped with the
-+	  Linux kernel.
-+
-+	  For latest news and information on obtaining all the required
-+	  ingredients for this driver, check:
-+	  <http://www.urbanmyth.org/microcode/>.
-+
-+	  To compile this driver as a module, choose M here: the
-+	  module will be called microcode.
-+
-+#config X86_MSR
-+#	 tristate "/dev/cpu/*/msr - Model-specific register support"
-+#	 help
-+#	   This device gives privileged processes access to the x86
-+#	   Model-Specific Registers (MSRs).  It is a character device with
-+#	   major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
-+#	   MSR accesses are directed to a specific CPU on multi-processor
-+#	   systems.
-+
-+config X86_CPUID
-+	tristate "/dev/cpu/*/cpuid - CPU information support"
-+	help
-+	  This device gives processes access to the x86 CPUID instruction to
-+	  be executed on a specific processor.  It is a character device
-+	  with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
-+	  /dev/cpu/31/cpuid.
-+
-+config SWIOTLB
-+       bool
-+       depends on PCI
-+       default y
-+
-+source "drivers/firmware/Kconfig"
-+
-+choice
-+	prompt "High Memory Support"
-+	default NOHIGHMEM
-+
-+config NOHIGHMEM
-+	bool "off"
-+	---help---
-+	  Linux can use up to 64 Gigabytes of physical memory on x86 systems.
-+	  However, the address space of 32-bit x86 processors is only 4
-+	  Gigabytes large. That means that, if you have a large amount of
-+	  physical memory, not all of it can be "permanently mapped" by the
-+	  kernel. The physical memory that's not permanently mapped is called
-+	  "high memory".
-+
-+	  If you are compiling a kernel which will never run on a machine with
-+	  more than 1 Gigabyte total physical RAM, answer "off" here (default
-+	  choice and suitable for most users). This will result in a "3GB/1GB"
-+	  split: 3GB are mapped so that each process sees a 3GB virtual memory
-+	  space and the remaining part of the 4GB virtual memory space is used
-+	  by the kernel to permanently map as much physical memory as
-+	  possible.
-+
-+	  If the machine has between 1 and 4 Gigabytes physical RAM, then
-+	  answer "4GB" here.
-+
-+	  If more than 4 Gigabytes is used then answer "64GB" here. This
-+	  selection turns Intel PAE (Physical Address Extension) mode on.
-+	  PAE implements 3-level paging on IA32 processors. PAE is fully
-+	  supported by Linux, PAE mode is implemented on all recent Intel
-+	  processors (Pentium Pro and better). NOTE: If you say "64GB" here,
-+	  then the kernel will not boot on CPUs that don't support PAE!
-+
-+	  The actual amount of total physical memory will either be
-+	  auto detected or can be forced by using a kernel command line option
-+	  such as "mem=256M". (Try "man bootparam" or see the documentation of
-+	  your boot loader (lilo or loadlin) about how to pass options to the
-+	  kernel at boot time.)
-+
-+	  If unsure, say "off".
-+
-+config HIGHMEM4G
-+	bool "4GB"
-+	help
-+	  Select this if you have a 32-bit processor and between 1 and 4
-+	  gigabytes of physical RAM.
-+
-+config HIGHMEM64G
-+	bool "64GB"
-+	help
-+	  Select this if you have a 32-bit processor and more than 4
-+	  gigabytes of physical RAM.
-+
-+endchoice
-+
-+config HIGHMEM
-+	bool
-+	depends on HIGHMEM64G || HIGHMEM4G
-+	default y
-+
-+config X86_PAE
-+	bool
-+	depends on HIGHMEM64G
-+	default y
-+
-+# Common NUMA Features
-+config NUMA
-+	bool "Numa Memory Allocation and Scheduler Support"
-+	depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
-+	default n if X86_PC
-+	default y if (X86_NUMAQ || X86_SUMMIT)
-+
-+# Need comments to help the hapless user trying to turn on NUMA support
-+comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
-+	depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
-+
-+comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
-+	depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
-+
-+config DISCONTIGMEM
-+	bool
-+	depends on NUMA
-+	default y
-+
-+config HAVE_ARCH_BOOTMEM_NODE
-+	bool
-+	depends on NUMA
-+	default y
-+
-+config HAVE_MEMORY_PRESENT
-+	bool
-+	depends on DISCONTIGMEM
-+	default y
-+
-+config NEED_NODE_MEMMAP_SIZE
-+	bool
-+	depends on DISCONTIGMEM
-+	default y
-+
-+#config HIGHPTE
-+#	bool "Allocate 3rd-level pagetables from highmem"
-+#	depends on HIGHMEM4G || HIGHMEM64G
-+#	help
-+#	  The VM uses one page table entry for each page of physical memory.
-+#	  For systems with a lot of RAM, this can be wasteful of precious
-+#	  low memory.  Setting this option will put user-space page table
-+#	  entries in high memory.
-+
-+config MTRR
-+	bool
-+	depends on XEN_PRIVILEGED_GUEST
-+	default y
-+
-+#config MTRR
-+#	 bool "MTRR (Memory Type Range Register) support"
-+#	 ---help---
-+#	   On Intel P6 family processors (Pentium Pro, Pentium II and later)
-+#	   the Memory Type Range Registers (MTRRs) may be used to control
-+#	   processor access to memory ranges. This is most useful if you have
-+#	   a video (VGA) card on a PCI or AGP bus. Enabling write-combining
-+#	   allows bus write transfers to be combined into a larger transfer
-+#	   before bursting over the PCI/AGP bus. This can increase performance
-+#	   of image write operations 2.5 times or more. Saying Y here creates a
-+#	   /proc/mtrr file which may be used to manipulate your processor's
-+#	   MTRRs. Typically the X server should use this.
-+#
-+#	   This code has a reasonably generic interface so that similar
-+#	   control registers on other processors can be easily supported
-+#	   as well:
-+#
-+#	   The Cyrix 6x86, 6x86MX and M II processors have Address Range
-+#	   Registers (ARRs) which provide a similar functionality to MTRRs. For
-+#	   these, the ARRs are used to emulate the MTRRs.
-+#	   The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
-+#	   MTRRs. The Centaur C6 (WinChip) has 8 MCRs, allowing
-+#	   write-combining. All of these processors are supported by this code
-+#	   and it makes sense to say Y here if you have one of them.
-+#
-+#	   Saying Y here also fixes a problem with buggy SMP BIOSes which only
-+#	   set the MTRRs for the boot CPU and not for the secondary CPUs. This
-+#	   can lead to all sorts of problems, so it's good to say Y here.
-+#
-+#	   You can safely say Y even if your machine doesn't have MTRRs, you'll
-+#	   just add about 9 KB to your kernel.
-+#
-+#	   See <file:Documentation/mtrr.txt> for more information.
-+
-+config IRQBALANCE
-+ 	bool "Enable kernel irq balancing"
-+	depends on SMP && X86_IO_APIC && !XEN
-+	default y
-+	help
-+ 	  The default yes will allow the kernel to do irq load balancing.
-+	  Saying no will keep the kernel from doing irq load balancing.
-+
-+config HAVE_DEC_LOCK
-+	bool
-+	depends on (SMP || PREEMPT) && X86_CMPXCHG
-+	default y
-+
-+# turning this on wastes a bunch of space.
-+# Summit needs it only when NUMA is on
-+config BOOT_IOREMAP
-+	bool
-+	depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
-+	default y
-+
-+config REGPARM
-+	bool "Use register arguments (EXPERIMENTAL)"
-+	depends on EXPERIMENTAL
-+	default n
-+	help
-+	Compile the kernel with -mregparm=3. This uses a different ABI
-+	and passes the first three arguments of a function call in registers.
-+	This will probably break binary only modules.
-+
-+	This feature is only enabled for gcc-3.0 and later - earlier compilers
-+	generate incorrect output with certain kernel constructs when
-+	-mregparm=3 is used.
-+
-+config X86_LOCAL_APIC
-+	bool
-+	depends on XEN_PRIVILEGED_GUEST && (X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER))
-+	default y
-+
-+config X86_IO_APIC
-+	bool
-+	depends on XEN_PRIVILEGED_GUEST && (X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER)))
-+	default y
-+
-+config X86_VISWS_APIC
-+	bool
-+	depends on X86_VISWS
-+  	default y
-+
-+config HOTPLUG_CPU
-+	bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
-+	depends on SMP && HOTPLUG && EXPERIMENTAL
-+	---help---
-+	  Say Y here to experiment with turning CPUs off and on.  CPUs
-+	  can be controlled through /sys/devices/system/cpu.
-+
-+	  Say N.
-+
-+
-+if XEN_PHYSDEV_ACCESS
-+
-+menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
-+
-+config X86_UP_APIC
-+	bool "Local APIC support on uniprocessors"
-+	depends on !SMP && !(X86_VISWS || X86_VOYAGER)
-+	help
-+	  A local APIC (Advanced Programmable Interrupt Controller) is an
-+	  integrated interrupt controller in the CPU. If you have a single-CPU
-+	  system which has a processor with a local APIC, you can say Y here to
-+	  enable and use it. If you say Y here even though your machine doesn't
-+	  have a local APIC, then the kernel will still run with no slowdown at
-+	  all. The local APIC supports CPU-generated self-interrupts (timer,
-+	  performance counters), and the NMI watchdog which detects hard
-+	  lockups.
-+
-+config X86_UP_IOAPIC
-+	bool "IO-APIC support on uniprocessors"
-+	depends on X86_UP_APIC
-+	help
-+	  An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
-+	  SMP-capable replacement for PC-style interrupt controllers. Most
-+	  SMP systems and many recent uniprocessor systems have one.
-+
-+	  If you have a single-CPU system with an IO-APIC, you can say Y here
-+	  to use it. If you say Y here even though your machine doesn't have
-+	  an IO-APIC, then the kernel will still run with no slowdown at all.
-+
-+config PCI
-+	bool "PCI support" if !X86_VISWS
-+	depends on !X86_VOYAGER
-+	default y if X86_VISWS
-+	help
-+	  Find out whether you have a PCI motherboard. PCI is the name of a
-+	  bus system, i.e. the way the CPU talks to the other stuff inside
-+	  your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
-+	  VESA. If you have PCI, say Y, otherwise N.
-+
-+	  The PCI-HOWTO, available from
-+	  <http://www.tldp.org/docs.html#howto>, contains valuable
-+	  information about which PCI hardware does work under Linux and which
-+	  doesn't.
-+
-+choice
-+	prompt "PCI access mode"
-+	depends on PCI && !X86_VISWS
-+	default PCI_GOANY
-+	---help---
-+	  On PCI systems, the BIOS can be used to detect the PCI devices and
-+	  determine their configuration. However, some old PCI motherboards
-+	  have BIOS bugs and may crash if this is done. Also, some embedded
-+	  PCI-based systems don't have any BIOS at all. Linux can also try to
-+	  detect the PCI hardware directly without using the BIOS.
-+
-+	  With this option, you can specify how Linux should detect the
-+	  PCI devices. If you choose "BIOS", the BIOS will be used,
-+	  if you choose "Direct", the BIOS won't be used, and if you
-+	  choose "MMConfig", then PCI Express MMCONFIG will be used.
-+	  If you choose "Any", the kernel will try MMCONFIG, then the
-+	  direct access method and falls back to the BIOS if that doesn't
-+	  work. If unsure, go with the default, which is "Any".
-+
-+#config PCI_GOBIOS
-+#	bool "BIOS"
-+
-+config PCI_GOMMCONFIG
-+	bool "MMConfig"
-+
-+config PCI_GODIRECT
-+	bool "Direct"
-+
-+config PCI_GOANY
-+	bool "Any"
-+
-+endchoice
-+
-+#config PCI_BIOS
-+#	bool
-+#	depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
-+#	default y
-+
-+config PCI_DIRECT
-+	bool
-+ 	depends on PCI && ((PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
-+	default y
-+
-+config PCI_MMCONFIG
-+	bool
-+	depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
-+	select ACPI_BOOT
-+	default y
-+
-+source "drivers/pci/pcie/Kconfig"
-+
-+source "drivers/pci/Kconfig"
-+
-+config ISA_DMA_API
-+	bool
-+	default y
-+
-+config ISA
-+	bool "ISA support"
-+	depends on !(X86_VOYAGER || X86_VISWS)
-+	help
-+	  Find out whether you have ISA slots on your motherboard.  ISA is the
-+	  name of a bus system, i.e. the way the CPU talks to the other stuff
-+	  inside your box.  Other bus systems are PCI, EISA, MicroChannel
-+	  (MCA) or VESA.  ISA is an older system, now being displaced by PCI;
-+	  newer boards don't support it.  If you have ISA, say Y, otherwise N.
-+
-+config EISA
-+	bool "EISA support"
-+	depends on ISA
-+	---help---
-+	  The Extended Industry Standard Architecture (EISA) bus was
-+	  developed as an open alternative to the IBM MicroChannel bus.
-+
-+	  The EISA bus provided some of the features of the IBM MicroChannel
-+	  bus while maintaining backward compatibility with cards made for
-+	  the older ISA bus.  The EISA bus saw limited use between 1988 and
-+	  1995 when it was made obsolete by the PCI bus.
-+
-+	  Say Y here if you are building a kernel for an EISA-based machine.
-+
-+	  Otherwise, say N.
-+
-+source "drivers/eisa/Kconfig"
-+
-+config MCA
-+	bool "MCA support" if !(X86_VISWS || X86_VOYAGER)
-+	default y if X86_VOYAGER
-+	help
-+	  MicroChannel Architecture is found in some IBM PS/2 machines and
-+	  laptops.  It is a bus system similar to PCI or ISA. See
-+	  <file:Documentation/mca.txt> (and especially the web page given
-+	  there) before attempting to build an MCA bus kernel.
-+
-+source "drivers/mca/Kconfig"
-+
-+config SCx200
-+	tristate "NatSemi SCx200 support"
-+	depends on !X86_VOYAGER
-+	help
-+	  This provides basic support for the National Semiconductor SCx200
-+	  processor.  Right now this is just a driver for the GPIO pins.
-+
-+	  If you don't know what to do here, say N.
-+
-+	  This support is also available as a module.  If compiled as a
-+	  module, it will be called scx200.
-+
-+source "drivers/pcmcia/Kconfig"
-+
-+source "drivers/pci/hotplug/Kconfig"
-+
-+endmenu
-+
-+endif
-+
-+#
-+# Use the generic interrupt handling code in kernel/irq/:
-+#
-+config GENERIC_HARDIRQS
-+	bool
-+	default y
-+
-+config GENERIC_IRQ_PROBE
-+	bool
-+	default y
-+
-+config X86_SMP
-+	bool
-+	depends on SMP && !X86_VOYAGER
-+	default y
-+
-+#config X86_HT
-+#	bool
-+#	depends on SMP && !(X86_VISWS || X86_VOYAGER)
-+#	default y
-+
-+config X86_BIOS_REBOOT
-+	bool
-+	depends on !(X86_VISWS || X86_VOYAGER)
-+	default y
-+
-+config X86_TRAMPOLINE
-+	bool
-+	depends on X86_SMP || (X86_VOYAGER && SMP)
-+	default y
-+
-+config PC
-+	bool
-+	depends on X86 && !EMBEDDED
-+	default y
-+
-+config SECCOMP
-+	bool "Enable seccomp to safely compute untrusted bytecode"
-+	depends on PROC_FS
-+	default y
-+	help
-+	  This kernel feature is useful for number crunching applications
-+	  that may need to compute untrusted bytecode during their
-+	  execution. By using pipes or other transports made available to
-+	  the process as file descriptors supporting the read/write
-+	  syscalls, it's possible to isolate those applications in
-+	  their own address space using seccomp. Once seccomp is
-+	  enabled via /proc/<pid>/seccomp, it cannot be disabled
-+	  and the task is only allowed to execute a few safe syscalls
-+	  defined by each seccomp mode.
-+
-+	  If unsure, say Y. Only embedded should say N here.
-+
-+endmenu
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/acpi/boot.c linux-2.6.12-xen/arch/xen/i386/kernel/acpi/boot.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/acpi/boot.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/acpi/boot.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,920 @@
-+/*
-+ *  boot.c - Architecture-Specific Low-Level ACPI Boot Support
-+ *
-+ *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh at intel.com>
-+ *  Copyright (C) 2001 Jun Nakajima <jun.nakajima at intel.com>
-+ *
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ *
-+ *  This program is free software; you can redistribute it and/or modify
-+ *  it under the terms of the GNU General Public License as published by
-+ *  the Free Software Foundation; either version 2 of the License, or
-+ *  (at your option) any later version.
-+ *
-+ *  This program is distributed in the hope that it will be useful,
-+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ *  GNU General Public License for more details.
-+ *
-+ *  You should have received a copy of the GNU General Public License
-+ *  along with this program; if not, write to the Free Software
-+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+ *
-+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/config.h>
-+#include <linux/acpi.h>
-+#include <linux/efi.h>
-+#include <linux/irq.h>
-+#include <linux/module.h>
-+
-+#include <asm/pgtable.h>
-+#include <asm/io_apic.h>
-+#include <asm/apic.h>
-+#include <asm/io.h>
-+#include <asm/irq.h>
-+#include <asm/mpspec.h>
-+#ifdef CONFIG_XEN
-+#include <asm/fixmap.h>
-+#endif
-+
-+#ifdef	CONFIG_X86_64
-+
-+static inline void  acpi_madt_oem_check(char *oem_id, char *oem_table_id) { }
-+extern void __init clustered_apic_check(void);
-+static inline int ioapic_setup_disabled(void) { return 0; }
-+#include <asm/proto.h>
-+
-+#else	/* X86 */
-+
-+#ifdef	CONFIG_X86_LOCAL_APIC
-+#include <mach_apic.h>
-+#include <mach_mpparse.h>
-+#endif	/* CONFIG_X86_LOCAL_APIC */
-+
-+#endif	/* X86 */
-+
-+#define BAD_MADT_ENTRY(entry, end) (					    \
-+		(!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
-+		((acpi_table_entry_header *)entry)->length != sizeof(*entry))
-+
-+#define PREFIX			"ACPI: "
-+
-+#ifdef CONFIG_ACPI_PCI
-+int acpi_noirq __initdata;	/* skip ACPI IRQ initialization */
-+int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
-+#else
-+int acpi_noirq __initdata = 1;
-+int acpi_pci_disabled __initdata = 1;
-+#endif
-+int acpi_ht __initdata = 1;	/* enable HT */
-+
-+int acpi_lapic;
-+int acpi_ioapic;
-+int acpi_strict;
-+EXPORT_SYMBOL(acpi_strict);
-+
-+acpi_interrupt_flags acpi_sci_flags __initdata;
-+int acpi_sci_override_gsi __initdata;
-+int acpi_skip_timer_override __initdata;
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
-+#endif
-+
-+#ifndef __HAVE_ARCH_CMPXCHG
-+#warning ACPI uses CMPXCHG, i486 and later hardware
-+#endif
-+
-+#define MAX_MADT_ENTRIES	256
-+u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
-+			{ [0 ... MAX_MADT_ENTRIES-1] = 0xff };
-+EXPORT_SYMBOL(x86_acpiid_to_apicid);
-+
-+/* --------------------------------------------------------------------------
-+                              Boot-time Configuration
-+   -------------------------------------------------------------------------- */
-+
-+/*
-+ * The default interrupt routing model is PIC (8259).  This gets
-+ * overriden if IOAPICs are enumerated (below).
-+ */
-+enum acpi_irq_model_id		acpi_irq_model = ACPI_IRQ_MODEL_PIC;
-+
-+#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
-+
-+/* rely on all ACPI tables being in the direct mapping */
-+char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
-+{
-+	if (!phys_addr || !size)
-+	return NULL;
-+
-+	if (phys_addr < (end_pfn_map << PAGE_SHIFT))
-+		return __va(phys_addr);
-+
-+	return NULL;
-+}
-+
-+#else
-+
-+/*
-+ * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
-+ * to map the target physical address. The problem is that set_fixmap()
-+ * provides a single page, and it is possible that the page is not
-+ * sufficient.
-+ * By using this area, we can map up to MAX_IO_APICS pages temporarily,
-+ * i.e. until the next __va_range() call.
-+ *
-+ * Important Safety Note:  The fixed I/O APIC page numbers are *subtracted*
-+ * from the fixed base.  That's why we start at FIX_IO_APIC_BASE_END and
-+ * count idx down while incrementing the phys address.
-+ */
-+char *__acpi_map_table(unsigned long phys, unsigned long size)
-+{
-+	unsigned long base, offset, mapped_size;
-+	int idx;
-+
-+#ifndef CONFIG_XEN
-+	if (phys + size < 8*1024*1024) 
-+		return __va(phys); 
-+#endif
-+
-+	offset = phys & (PAGE_SIZE - 1);
-+	mapped_size = PAGE_SIZE - offset;
-+	set_fixmap(FIX_ACPI_END, phys);
-+	base = fix_to_virt(FIX_ACPI_END);
-+
-+	/*
-+	 * Most cases can be covered by the below.
-+	 */
-+	idx = FIX_ACPI_END;
-+	while (mapped_size < size) {
-+		if (--idx < FIX_ACPI_BEGIN)
-+			return NULL;	/* cannot handle this */
-+		phys += PAGE_SIZE;
-+		set_fixmap(idx, phys);
-+		mapped_size += PAGE_SIZE;
-+	}
-+
-+	return ((unsigned char *) base + offset);
-+}
-+#endif
-+
-+#ifdef CONFIG_PCI_MMCONFIG
-+static int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
-+{
-+	struct acpi_table_mcfg *mcfg;
-+
-+	if (!phys_addr || !size)
-+		return -EINVAL;
-+
-+	mcfg = (struct acpi_table_mcfg *) __acpi_map_table(phys_addr, size);
-+	if (!mcfg) {
-+		printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
-+		return -ENODEV;
-+	}
-+
-+	if (mcfg->base_reserved) {
-+		printk(KERN_ERR PREFIX "MMCONFIG not in low 4GB of memory\n");
-+		return -ENODEV;
-+	}
-+
-+	pci_mmcfg_base_addr = mcfg->base_address;
-+
-+	return 0;
-+}
-+#else
-+#define	acpi_parse_mcfg NULL
-+#endif /* !CONFIG_PCI_MMCONFIG */
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static int __init
-+acpi_parse_madt (
-+	unsigned long		phys_addr,
-+	unsigned long		size)
-+{
-+	struct acpi_table_madt	*madt = NULL;
-+
-+	if (!phys_addr || !size)
-+		return -EINVAL;
-+
-+	madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size);
-+	if (!madt) {
-+		printk(KERN_WARNING PREFIX "Unable to map MADT\n");
-+		return -ENODEV;
-+	}
-+
-+	if (madt->lapic_address) {
-+		acpi_lapic_addr = (u64) madt->lapic_address;
-+
-+		printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
-+			madt->lapic_address);
-+	}
-+
-+	acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
-+	
-+	return 0;
-+}
-+
-+
-+static int __init
-+acpi_parse_lapic (
-+	acpi_table_entry_header *header, const unsigned long end)
-+{
-+	struct acpi_table_lapic	*processor = NULL;
-+
-+	processor = (struct acpi_table_lapic*) header;
-+
-+	if (BAD_MADT_ENTRY(processor, end))
-+		return -EINVAL;
-+
-+	acpi_table_print_madt_entry(header);
-+
-+	/* no utility in registering a disabled processor */
-+	if (processor->flags.enabled == 0)
-+		return 0;
-+
-+	x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
-+
-+	mp_register_lapic (
-+		processor->id,					   /* APIC ID */
-+		processor->flags.enabled);			  /* Enabled? */
-+
-+	return 0;
-+}
-+
-+static int __init
-+acpi_parse_lapic_addr_ovr (
-+	acpi_table_entry_header *header, const unsigned long end)
-+{
-+	struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
-+
-+	lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header;
-+
-+	if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
-+		return -EINVAL;
-+
-+	acpi_lapic_addr = lapic_addr_ovr->address;
-+
-+	return 0;
-+}
-+
-+static int __init
-+acpi_parse_lapic_nmi (
-+	acpi_table_entry_header *header, const unsigned long end)
-+{
-+	struct acpi_table_lapic_nmi *lapic_nmi = NULL;
-+
-+	lapic_nmi = (struct acpi_table_lapic_nmi*) header;
-+
-+	if (BAD_MADT_ENTRY(lapic_nmi, end))
-+		return -EINVAL;
-+
-+	acpi_table_print_madt_entry(header);
-+
-+	if (lapic_nmi->lint != 1)
-+		printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
-+
-+	return 0;
-+}
-+
-+
-+#endif /*CONFIG_X86_LOCAL_APIC*/
-+
-+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
-+
-+static int __init
-+acpi_parse_ioapic (
-+	acpi_table_entry_header *header, const unsigned long end)
-+{
-+	struct acpi_table_ioapic *ioapic = NULL;
-+
-+	ioapic = (struct acpi_table_ioapic*) header;
-+
-+	if (BAD_MADT_ENTRY(ioapic, end))
-+		return -EINVAL;
-+ 
-+	acpi_table_print_madt_entry(header);
-+
-+	mp_register_ioapic (
-+		ioapic->id,
-+		ioapic->address,
-+		ioapic->global_irq_base);
-+ 
-+	return 0;
-+}
-+
-+/*
-+ * Parse Interrupt Source Override for the ACPI SCI
-+ */
-+static void
-+acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
-+{
-+	if (trigger == 0)	/* compatible SCI trigger is level */
-+		trigger = 3;
-+
-+	if (polarity == 0)	/* compatible SCI polarity is low */
-+		polarity = 3;
-+
-+	/* Command-line over-ride via acpi_sci= */
-+	if (acpi_sci_flags.trigger)
-+		trigger = acpi_sci_flags.trigger;
-+
-+	if (acpi_sci_flags.polarity)
-+		polarity = acpi_sci_flags.polarity;
-+
-+	/*
-+ 	 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
-+	 * If GSI is < 16, this will update its flags,
-+	 * else it will create a new mp_irqs[] entry.
-+	 */
-+	mp_override_legacy_irq(gsi, polarity, trigger, gsi);
-+
-+	/*
-+	 * stash over-ride to indicate we've been here
-+	 * and for later update of acpi_fadt
-+	 */
-+	acpi_sci_override_gsi = gsi;
-+	return;
-+}
-+
-+static int __init
-+acpi_parse_int_src_ovr (
-+	acpi_table_entry_header *header, const unsigned long end)
-+{
-+	struct acpi_table_int_src_ovr *intsrc = NULL;
-+
-+	intsrc = (struct acpi_table_int_src_ovr*) header;
-+
-+	if (BAD_MADT_ENTRY(intsrc, end))
-+		return -EINVAL;
-+
-+	acpi_table_print_madt_entry(header);
-+
-+	if (intsrc->bus_irq == acpi_fadt.sci_int) {
-+		acpi_sci_ioapic_setup(intsrc->global_irq,
-+			intsrc->flags.polarity, intsrc->flags.trigger);
-+		return 0;
-+	}
-+
-+	if (acpi_skip_timer_override &&
-+		intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
-+			printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
-+			return 0;
-+	}
-+
-+	mp_override_legacy_irq (
-+		intsrc->bus_irq,
-+		intsrc->flags.polarity,
-+		intsrc->flags.trigger,
-+		intsrc->global_irq);
-+
-+	return 0;
-+}
-+
-+
-+static int __init
-+acpi_parse_nmi_src (
-+	acpi_table_entry_header *header, const unsigned long end)
-+{
-+	struct acpi_table_nmi_src *nmi_src = NULL;
-+
-+	nmi_src = (struct acpi_table_nmi_src*) header;
-+
-+	if (BAD_MADT_ENTRY(nmi_src, end))
-+		return -EINVAL;
-+
-+	acpi_table_print_madt_entry(header);
-+
-+	/* TBD: Support nimsrc entries? */
-+
-+	return 0;
-+}
-+
-+#endif /* CONFIG_X86_IO_APIC */
-+
-+#ifdef	CONFIG_ACPI_BUS
-+
-+/*
-+ * acpi_pic_sci_set_trigger()
-+ * 
-+ * use ELCR to set PIC-mode trigger type for SCI
-+ *
-+ * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
-+ * it may require Edge Trigger -- use "acpi_sci=edge"
-+ *
-+ * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
-+ * for the 8259 PIC.  bit[n] = 1 means irq[n] is Level, otherwise Edge.
-+ * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
-+ * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
-+ */
-+
-+void __init
-+acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
-+{
-+	unsigned int mask = 1 << irq;
-+	unsigned int old, new;
-+
-+	/* Real old ELCR mask */
-+	old = inb(0x4d0) | (inb(0x4d1) << 8);
-+
-+	/*
-+	 * If we use ACPI to set PCI irq's, then we should clear ELCR
-+	 * since we will set it correctly as we enable the PCI irq
-+	 * routing.
-+	 */
-+	new = acpi_noirq ? old : 0;
-+
-+	/*
-+	 * Update SCI information in the ELCR, it isn't in the PCI
-+	 * routing tables..
-+	 */
-+	switch (trigger) {
-+	case 1:	/* Edge - clear */
-+		new &= ~mask;
-+		break;
-+	case 3: /* Level - set */
-+		new |= mask;
-+		break;
-+	}
-+
-+	if (old == new)
-+		return;
-+
-+	printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
-+	outb(new, 0x4d0);
-+	outb(new >> 8, 0x4d1);
-+}
-+
-+
-+#endif /* CONFIG_ACPI_BUS */
-+
-+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
-+{
-+#ifdef CONFIG_X86_IO_APIC
-+	if (use_pci_vector() && !platform_legacy_irq(gsi))
-+ 		*irq = IO_APIC_VECTOR(gsi);
-+	else
-+#endif
-+		*irq = gsi;
-+	return 0;
-+}
-+
-+unsigned int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
-+{
-+	unsigned int irq;
-+	unsigned int plat_gsi = gsi;
-+
-+#ifdef CONFIG_PCI
-+	/*
-+	 * Make sure all (legacy) PCI IRQs are set as level-triggered.
-+	 */
-+	if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
-+		extern void eisa_set_level_irq(unsigned int irq);
-+
-+		if (edge_level == ACPI_LEVEL_SENSITIVE)
-+				eisa_set_level_irq(gsi);
-+	}
-+#endif
-+
-+#ifdef CONFIG_X86_IO_APIC
-+	if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
-+		plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low);
-+	}
-+#endif
-+	acpi_gsi_to_irq(plat_gsi, &irq);
-+	return irq;
-+}
-+EXPORT_SYMBOL(acpi_register_gsi);
-+
-+/*
-+ *  ACPI based hotplug support for CPU
-+ */
-+#ifdef CONFIG_ACPI_HOTPLUG_CPU
-+int
-+acpi_map_lsapic(acpi_handle handle, int *pcpu)
-+{
-+	/* TBD */
-+	return -EINVAL;
-+}
-+EXPORT_SYMBOL(acpi_map_lsapic);
-+
-+
-+int
-+acpi_unmap_lsapic(int cpu)
-+{
-+	/* TBD */
-+	return -EINVAL;
-+}
-+EXPORT_SYMBOL(acpi_unmap_lsapic);
-+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-+
-+static unsigned long __init
-+acpi_scan_rsdp (
-+	unsigned long		start,
-+	unsigned long		length)
-+{
-+	unsigned long		offset = 0;
-+	unsigned long		sig_len = sizeof("RSD PTR ") - 1;
-+	unsigned long		vstart = (unsigned long)isa_bus_to_virt(start);
-+
-+	/*
-+	 * Scan all 16-byte boundaries of the physical memory region for the
-+	 * RSDP signature.
-+	 */
-+	for (offset = 0; offset < length; offset += 16) {
-+		if (strncmp((char *) (vstart + offset), "RSD PTR ", sig_len))
-+			continue;
-+		return (start + offset);
-+	}
-+
-+	return 0;
-+}
-+
-+static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
-+{
-+	struct acpi_table_sbf *sb;
-+
-+	if (!phys_addr || !size)
-+	return -EINVAL;
-+
-+	sb = (struct acpi_table_sbf *) __acpi_map_table(phys_addr, size);
-+	if (!sb) {
-+		printk(KERN_WARNING PREFIX "Unable to map SBF\n");
-+		return -ENODEV;
-+	}
-+
-+	sbf_port = sb->sbf_cmos; /* Save CMOS port */
-+
-+	return 0;
-+}
-+
-+
-+#ifdef CONFIG_HPET_TIMER
-+
-+static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
-+{
-+	struct acpi_table_hpet *hpet_tbl;
-+
-+	if (!phys || !size)
-+		return -EINVAL;
-+
-+	hpet_tbl = (struct acpi_table_hpet *) __acpi_map_table(phys, size);
-+	if (!hpet_tbl) {
-+		printk(KERN_WARNING PREFIX "Unable to map HPET\n");
-+		return -ENODEV;
-+	}
-+
-+	if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
-+		printk(KERN_WARNING PREFIX "HPET timers must be located in "
-+		       "memory.\n");
-+		return -1;
-+	}
-+
-+#ifdef	CONFIG_X86_64
-+        vxtime.hpet_address = hpet_tbl->addr.addrl |
-+                ((long) hpet_tbl->addr.addrh << 32);
-+
-+        printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
-+               hpet_tbl->id, vxtime.hpet_address);
-+#else	/* X86 */
-+	{
-+		extern unsigned long hpet_address;
-+
-+		hpet_address = hpet_tbl->addr.addrl;
-+		printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
-+			hpet_tbl->id, hpet_address);
-+	}
-+#endif	/* X86 */
-+
-+	return 0;
-+}
-+#else
-+#define	acpi_parse_hpet	NULL
-+#endif
-+
-+#ifdef CONFIG_X86_PM_TIMER
-+extern u32 pmtmr_ioport;
-+#endif
-+
-+static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
-+{
-+	struct fadt_descriptor_rev2 *fadt = NULL;
-+
-+	fadt = (struct fadt_descriptor_rev2*) __acpi_map_table(phys,size);
-+	if(!fadt) {
-+		printk(KERN_WARNING PREFIX "Unable to map FADT\n");
-+		return 0;
-+	}
-+
-+#ifdef	CONFIG_ACPI_INTERPRETER
-+	/* initialize sci_int early for INT_SRC_OVR MADT parsing */
-+	acpi_fadt.sci_int = fadt->sci_int;
-+#endif
-+
-+#ifdef CONFIG_ACPI_BUS
-+	/* initialize rev and apic_phys_dest_mode for x86_64 genapic */
-+	acpi_fadt.revision = fadt->revision;
-+	acpi_fadt.force_apic_physical_destination_mode = fadt->force_apic_physical_destination_mode;
-+#endif
-+
-+#if defined(CONFIG_X86_PM_TIMER) && !defined(CONFIG_XEN)
-+	/* detect the location of the ACPI PM Timer */
-+	if (fadt->revision >= FADT2_REVISION_ID) {
-+		/* FADT rev. 2 */
-+		if (fadt->xpm_tmr_blk.address_space_id != ACPI_ADR_SPACE_SYSTEM_IO)
-+			return 0;
-+
-+		pmtmr_ioport = fadt->xpm_tmr_blk.address;
-+	} else {
-+		/* FADT rev. 1 */
-+		pmtmr_ioport = fadt->V1_pm_tmr_blk;
-+	}
-+	if (pmtmr_ioport)
-+		printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", pmtmr_ioport);
-+#endif
-+	return 0;
-+}
-+
-+
-+unsigned long __init
-+acpi_find_rsdp (void)
-+{
-+	unsigned long		rsdp_phys = 0;
-+
-+	if (efi_enabled) {
-+		if (efi.acpi20)
-+			return __pa(efi.acpi20);
-+		else if (efi.acpi)
-+			return __pa(efi.acpi);
-+	}
-+	/*
-+	 * Scan memory looking for the RSDP signature. First search EBDA (low
-+	 * memory) paragraphs and then search upper memory (E0000-FFFFF).
-+	 */
-+	rsdp_phys = acpi_scan_rsdp (0, 0x400);
-+	if (!rsdp_phys)
-+		rsdp_phys = acpi_scan_rsdp (0xE0000, 0x20000);
-+
-+	return rsdp_phys;
-+}
-+
-+#ifdef	CONFIG_X86_LOCAL_APIC
-+/*
-+ * Parse LAPIC entries in MADT
-+ * returns 0 on success, < 0 on error
-+ */
-+static int __init
-+acpi_parse_madt_lapic_entries(void)
-+{
-+	int count;
-+
-+	/* 
-+	 * Note that the LAPIC address is obtained from the MADT (32-bit value)
-+	 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
-+	 */
-+
-+	count = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0);
-+	if (count < 0) {
-+		printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
-+		return count;
-+	}
-+
-+	mp_register_lapic_address(acpi_lapic_addr);
-+
-+	count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
-+				       MAX_APICS);
-+	if (!count) { 
-+		printk(KERN_ERR PREFIX "No LAPIC entries present\n");
-+		/* TBD: Cleanup to allow fallback to MPS */
-+		return -ENODEV;
-+	}
-+	else if (count < 0) {
-+		printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
-+		/* TBD: Cleanup to allow fallback to MPS */
-+		return count;
-+	}
-+
-+	count = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
-+	if (count < 0) {
-+		printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
-+		/* TBD: Cleanup to allow fallback to MPS */
-+		return count;
-+	}
-+	return 0;
-+}
-+#endif /* CONFIG_X86_LOCAL_APIC */
-+
-+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
-+/*
-+ * Parse IOAPIC related entries in MADT
-+ * returns 0 on success, < 0 on error
-+ */
-+static int __init
-+acpi_parse_madt_ioapic_entries(void)
-+{
-+	int count;
-+
-+	/*
-+	 * ACPI interpreter is required to complete interrupt setup,
-+	 * so if it is off, don't enumerate the io-apics with ACPI.
-+	 * If MPS is present, it will handle them,
-+	 * otherwise the system will stay in PIC mode
-+	 */
-+	if (acpi_disabled || acpi_noirq) {
-+		return -ENODEV;
-+        }
-+
-+	/*
-+ 	 * if "noapic" boot option, don't look for IO-APICs
-+	 */
-+	if (skip_ioapic_setup) {
-+		printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
-+			"due to 'noapic' option.\n");
-+		return -ENODEV;
-+	}
-+
-+	count = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, MAX_IO_APICS);
-+	if (!count) {
-+		printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
-+		return -ENODEV;
-+	}
-+	else if (count < 0) {
-+		printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
-+		return count;
-+	}
-+
-+	count = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, NR_IRQ_VECTORS);
-+	if (count < 0) {
-+		printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
-+		/* TBD: Cleanup to allow fallback to MPS */
-+		return count;
-+	}
-+
-+	/*
-+	 * If BIOS did not supply an INT_SRC_OVR for the SCI
-+	 * pretend we got one so we can set the SCI flags.
-+	 */
-+	if (!acpi_sci_override_gsi)
-+		acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
-+
-+	/* Fill in identity legacy mapings where no override */
-+	mp_config_acpi_legacy_irqs();
-+
-+	count = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, NR_IRQ_VECTORS);
-+	if (count < 0) {
-+		printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
-+		/* TBD: Cleanup to allow fallback to MPS */
-+		return count;
-+	}
-+
-+	return 0;
-+}
-+#else
-+static inline int acpi_parse_madt_ioapic_entries(void)
-+{
-+	return -1;
-+}
-+#endif /* !(CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER) */
-+
-+
-+static void __init
-+acpi_process_madt(void)
-+{
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	int count, error;
-+
-+	count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
-+	if (count >= 1) {
-+
-+		/*
-+		 * Parse MADT LAPIC entries
-+		 */
-+		error = acpi_parse_madt_lapic_entries();
-+		if (!error) {
-+			acpi_lapic = 1;
-+
-+			/*
-+			 * Parse MADT IO-APIC entries
-+			 */
-+			error = acpi_parse_madt_ioapic_entries();
-+			if (!error) {
-+				acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
-+				acpi_irq_balance_set(NULL);
-+				acpi_ioapic = 1;
-+
-+				smp_found_config = 1;
-+				clustered_apic_check();
-+			}
-+		}
-+		if (error == -EINVAL) {
-+			/*
-+			 * Dell Precision Workstation 410, 610 come here.
-+			 */
-+			printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n");
-+			disable_acpi();
-+		}
-+	}
-+#endif
-+	return;
-+}
-+
-+/*
-+ * acpi_boot_table_init() and acpi_boot_init()
-+ *  called from setup_arch(), always.
-+ *	1. checksums all tables
-+ *	2. enumerates lapics
-+ *	3. enumerates io-apics
-+ *
-+ * acpi_table_init() is separate to allow reading SRAT without
-+ * other side effects.
-+ *
-+ * side effects of acpi_boot_init:
-+ *	acpi_lapic = 1 if LAPIC found
-+ *	acpi_ioapic = 1 if IOAPIC found
-+ *	if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
-+ *	if acpi_blacklisted() acpi_disabled = 1;
-+ *	acpi_irq_model=...
-+ *	...
-+ *
-+ * return value: (currently ignored)
-+ *	0: success
-+ *	!0: failure
-+ */
-+
-+int __init
-+acpi_boot_table_init(void)
-+{
-+	int error;
-+
-+	/*
-+	 * If acpi_disabled, bail out
-+	 * One exception: acpi=ht continues far enough to enumerate LAPICs
-+	 */
-+	if (acpi_disabled && !acpi_ht)
-+		 return 1;
-+
-+	/* 
-+	 * Initialize the ACPI boot-time table parser.
-+	 */
-+	error = acpi_table_init();
-+	if (error) {
-+		disable_acpi();
-+		return error;
-+	}
-+
-+#ifdef __i386__
-+	check_acpi_pci();
-+#endif
-+
-+	acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
-+
-+	/*
-+	 * blacklist may disable ACPI entirely
-+	 */
-+	error = acpi_blacklisted();
-+	if (error) {
-+		extern int acpi_force;
-+
-+		if (acpi_force) {
-+			printk(KERN_WARNING PREFIX "acpi=force override\n");
-+		} else {
-+			printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
-+			disable_acpi();
-+			return error;
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+
-+int __init acpi_boot_init(void)
-+{
-+	/*
-+	 * If acpi_disabled, bail out
-+	 * One exception: acpi=ht continues far enough to enumerate LAPICs
-+	 */
-+	if (acpi_disabled && !acpi_ht)
-+		 return 1;
-+
-+	acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
-+
-+	/*
-+	 * set sci_int and PM timer address
-+	 */
-+	acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
-+
-+	/*
-+	 * Process the Multiple APIC Description Table (MADT), if present
-+	 */
-+	acpi_process_madt();
-+
-+	acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
-+	acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
-+
-+	return 0;
-+}
-+
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/acpi/Makefile linux-2.6.12-xen/arch/xen/i386/kernel/acpi/Makefile
---- pristine-linux-2.6.12/arch/xen/i386/kernel/acpi/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/acpi/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,13 @@
-+obj-$(CONFIG_ACPI_BOOT)		        := boot.o
-+c-obj-$(CONFIG_X86_IO_APIC)	        += earlyquirk.o
-+c-obj-$(CONFIG_ACPI_SLEEP)	        += sleep.o wakeup.o
-+
-+c-link                                  :=
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-+	@ln -fsn $(srctree)/arch/i386/kernel/acpi/$(notdir $@) $@
-+
-+obj-y	+= $(c-obj-y) $(s-obj-y)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
-+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/apic.c linux-2.6.12-xen/arch/xen/i386/kernel/apic.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/apic.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/apic.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,83 @@
-+/*
-+ *	Local APIC handling, local APIC timers
-+ *
-+ *	(c) 1999, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
-+ *					thanks to Eric Gilmore
-+ *					and Rolf G. Tews
-+ *					for testing these extensively.
-+ *	Maciej W. Rozycki	:	Various updates and fixes.
-+ *	Mikael Pettersson	:	Power Management for UP-APIC.
-+ *	Pavel Machek and
-+ *	Mikael Pettersson	:	PM converted to driver model.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/irq.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/sysdev.h>
-+
-+#include <asm/atomic.h>
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/hpet.h>
-+
-+#include <mach_apic.h>
-+
-+#include "io_ports.h"
-+
-+/*
-+ * Debug level
-+ */
-+int apic_verbosity;
-+
-+int get_physical_broadcast(void)
-+{
-+        return 0xff;
-+}
-+
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+	printk("unexpected IRQ trap at vector %02x\n", irq);
-+	/*
-+	 * Currently unexpected vectors happen only on SMP and APIC.
-+	 * We _must_ ack these because every local APIC has only N
-+	 * irq slots per priority level, and a 'hanging, unacked' IRQ
-+	 * holds up an irq slot - in excessive cases (when multiple
-+	 * unexpected vectors occur) that might lock up the APIC
-+	 * completely.
-+	 */
-+	ack_APIC_irq();
-+}
-+
-+/*
-+ * This initializes the IO-APIC and APIC hardware if this is
-+ * a UP kernel.
-+ */
-+int __init APIC_init_uniprocessor (void)
-+{
-+#ifdef CONFIG_X86_IO_APIC
-+	if (smp_found_config)
-+		if (!skip_ioapic_setup && nr_ioapics)
-+			setup_IO_APIC();
-+#endif
-+
-+	return 0;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/common.c linux-2.6.12-xen/arch/xen/i386/kernel/cpu/common.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/common.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/cpu/common.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,653 @@
-+#include <linux/init.h>
-+#include <linux/string.h>
-+#include <linux/delay.h>
-+#include <linux/smp.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
-+#include <asm/semaphore.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/msr.h>
-+#include <asm/io.h>
-+#include <asm/mmu_context.h>
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#include <asm/mpspec.h>
-+#include <asm/apic.h>
-+#include <mach_apic.h>
-+#endif
-+#include <asm/hypervisor.h>
-+
-+#include "cpu.h"
-+
-+#ifndef CONFIG_XEN
-+DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
-+EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
-+
-+DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-+EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
-+#endif
-+
-+static int cachesize_override __initdata = -1;
-+static int disable_x86_fxsr __initdata = 0;
-+static int disable_x86_serial_nr __initdata = 1;
-+
-+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
-+
-+extern void mcheck_init(struct cpuinfo_x86 *c);
-+
-+extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
-+
-+extern int disable_pse;
-+
-+static void default_init(struct cpuinfo_x86 * c)
-+{
-+	/* Not much we can do here... */
-+	/* Check if at least it has cpuid */
-+	if (c->cpuid_level == -1) {
-+		/* No cpuid. It must be an ancient CPU */
-+		if (c->x86 == 4)
-+			strcpy(c->x86_model_id, "486");
-+		else if (c->x86 == 3)
-+			strcpy(c->x86_model_id, "386");
-+	}
-+}
-+
-+static struct cpu_dev default_cpu = {
-+	.c_init	= default_init,
-+};
-+static struct cpu_dev * this_cpu = &default_cpu;
-+
-+static int __init cachesize_setup(char *str)
-+{
-+	get_option (&str, &cachesize_override);
-+	return 1;
-+}
-+__setup("cachesize=", cachesize_setup);
-+
-+int __init get_model_name(struct cpuinfo_x86 *c)
-+{
-+	unsigned int *v;
-+	char *p, *q;
-+
-+	if (cpuid_eax(0x80000000) < 0x80000004)
-+		return 0;
-+
-+	v = (unsigned int *) c->x86_model_id;
-+	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-+	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-+	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-+	c->x86_model_id[48] = 0;
-+
-+	/* Intel chips right-justify this string for some dumb reason;
-+	   undo that brain damage */
-+	p = q = &c->x86_model_id[0];
-+	while ( *p == ' ' )
-+	     p++;
-+	if ( p != q ) {
-+	     while ( *p )
-+		  *q++ = *p++;
-+	     while ( q <= &c->x86_model_id[48] )
-+		  *q++ = '\0';	/* Zero-pad the rest */
-+	}
-+
-+	return 1;
-+}
-+
-+
-+void __init display_cacheinfo(struct cpuinfo_x86 *c)
-+{
-+	unsigned int n, dummy, ecx, edx, l2size;
-+
-+	n = cpuid_eax(0x80000000);
-+
-+	if (n >= 0x80000005) {
-+		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
-+		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-+			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-+		c->x86_cache_size=(ecx>>24)+(edx>>24);	
-+	}
-+
-+	if (n < 0x80000006)	/* Some chips just has a large L1. */
-+		return;
-+
-+	ecx = cpuid_ecx(0x80000006);
-+	l2size = ecx >> 16;
-+	
-+	/* do processor-specific cache resizing */
-+	if (this_cpu->c_size_cache)
-+		l2size = this_cpu->c_size_cache(c,l2size);
-+
-+	/* Allow user to override all this if necessary. */
-+	if (cachesize_override != -1)
-+		l2size = cachesize_override;
-+
-+	if ( l2size == 0 )
-+		return;		/* Again, no L2 cache is possible */
-+
-+	c->x86_cache_size = l2size;
-+
-+	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-+	       l2size, ecx & 0xFF);
-+}
-+
-+/* Naming convention should be: <Name> [(<Codename>)] */
-+/* This table only is used unless init_<vendor>() below doesn't set it; */
-+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
-+
-+/* Look up CPU names by table lookup. */
-+static char __init *table_lookup_model(struct cpuinfo_x86 *c)
-+{
-+	struct cpu_model_info *info;
-+
-+	if ( c->x86_model >= 16 )
-+		return NULL;	/* Range check */
-+
-+	if (!this_cpu)
-+		return NULL;
-+
-+	info = this_cpu->c_models;
-+
-+	while (info && info->family) {
-+		if (info->family == c->x86)
-+			return info->model_names[c->x86_model];
-+		info++;
-+	}
-+	return NULL;		/* Not found */
-+}
-+
-+
-+void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
-+{
-+	char *v = c->x86_vendor_id;
-+	int i;
-+
-+	for (i = 0; i < X86_VENDOR_NUM; i++) {
-+		if (cpu_devs[i]) {
-+			if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
-+			    (cpu_devs[i]->c_ident[1] && 
-+			     !strcmp(v,cpu_devs[i]->c_ident[1]))) {
-+				c->x86_vendor = i;
-+				if (!early)
-+					this_cpu = cpu_devs[i];
-+				break;
-+			}
-+		}
-+	}
-+}
-+
-+
-+static int __init x86_fxsr_setup(char * s)
-+{
-+	disable_x86_fxsr = 1;
-+	return 1;
-+}
-+__setup("nofxsr", x86_fxsr_setup);
-+
-+
-+/* Standard macro to see if a specific flag is changeable */
-+static inline int flag_is_changeable_p(u32 flag)
-+{
-+	u32 f1, f2;
-+
-+	asm("pushfl\n\t"
-+	    "pushfl\n\t"
-+	    "popl %0\n\t"
-+	    "movl %0,%1\n\t"
-+	    "xorl %2,%0\n\t"
-+	    "pushl %0\n\t"
-+	    "popfl\n\t"
-+	    "pushfl\n\t"
-+	    "popl %0\n\t"
-+	    "popfl\n\t"
-+	    : "=&r" (f1), "=&r" (f2)
-+	    : "ir" (flag));
-+
-+	return ((f1^f2) & flag) != 0;
-+}
-+
-+
-+/* Probe for the CPUID instruction */
-+static int __init have_cpuid_p(void)
-+{
-+	return flag_is_changeable_p(X86_EFLAGS_ID);
-+}
-+
-+/* Do minimum CPU detection early.
-+   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
-+   The others are not touched to avoid unwanted side effects. */
-+static void __init early_cpu_detect(void)
-+{
-+	struct cpuinfo_x86 *c = &boot_cpu_data;
-+
-+	c->x86_cache_alignment = 32;
-+
-+	if (!have_cpuid_p())
-+		return;
-+
-+	/* Get vendor name */
-+	cpuid(0x00000000, &c->cpuid_level,
-+	      (int *)&c->x86_vendor_id[0],
-+	      (int *)&c->x86_vendor_id[8],
-+	      (int *)&c->x86_vendor_id[4]);
-+
-+	get_cpu_vendor(c, 1);
-+
-+	c->x86 = 4;
-+	if (c->cpuid_level >= 0x00000001) {
-+		u32 junk, tfms, cap0, misc;
-+		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
-+		c->x86 = (tfms >> 8) & 15;
-+		c->x86_model = (tfms >> 4) & 15;
-+		if (c->x86 == 0xf) {
-+			c->x86 += (tfms >> 20) & 0xff;
-+			c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+		}
-+		c->x86_mask = tfms & 15;
-+		if (cap0 & (1<<19))
-+			c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
-+	}
-+
-+	early_intel_workaround(c);
-+
-+#ifdef CONFIG_X86_HT
-+	phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
-+#endif
-+}
-+
-+void __init generic_identify(struct cpuinfo_x86 * c)
-+{
-+	u32 tfms, xlvl;
-+	int junk;
-+
-+	if (have_cpuid_p()) {
-+		/* Get vendor name */
-+		cpuid(0x00000000, &c->cpuid_level,
-+		      (int *)&c->x86_vendor_id[0],
-+		      (int *)&c->x86_vendor_id[8],
-+		      (int *)&c->x86_vendor_id[4]);
-+		
-+		get_cpu_vendor(c, 0);
-+		/* Initialize the standard set of capabilities */
-+		/* Note that the vendor-specific code below might override */
-+	
-+		/* Intel-defined flags: level 0x00000001 */
-+		if ( c->cpuid_level >= 0x00000001 ) {
-+			u32 capability, excap;
-+			cpuid(0x00000001, &tfms, &junk, &excap, &capability);
-+			c->x86_capability[0] = capability;
-+			c->x86_capability[4] = excap;
-+			c->x86 = (tfms >> 8) & 15;
-+			c->x86_model = (tfms >> 4) & 15;
-+			if (c->x86 == 0xf) {
-+				c->x86 += (tfms >> 20) & 0xff;
-+				c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+			} 
-+			c->x86_mask = tfms & 15;
-+		} else {
-+			/* Have CPUID level 0 only - unheard of */
-+			c->x86 = 4;
-+		}
-+
-+		/* AMD-defined flags: level 0x80000001 */
-+		xlvl = cpuid_eax(0x80000000);
-+		if ( (xlvl & 0xffff0000) == 0x80000000 ) {
-+			if ( xlvl >= 0x80000001 ) {
-+				c->x86_capability[1] = cpuid_edx(0x80000001);
-+				c->x86_capability[6] = cpuid_ecx(0x80000001);
-+			}
-+			if ( xlvl >= 0x80000004 )
-+				get_model_name(c); /* Default name */
-+		}
-+	}
-+}
-+
-+static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
-+{
-+	if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
-+		/* Disable processor serial number */
-+		unsigned long lo,hi;
-+		rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-+		lo |= 0x200000;
-+		wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
-+		printk(KERN_NOTICE "CPU serial number disabled.\n");
-+		clear_bit(X86_FEATURE_PN, c->x86_capability);
-+
-+		/* Disabling the serial number may affect the cpuid level */
-+		c->cpuid_level = cpuid_eax(0);
-+	}
-+}
-+
-+static int __init x86_serial_nr_setup(char *s)
-+{
-+	disable_x86_serial_nr = 0;
-+	return 1;
-+}
-+__setup("serialnumber", x86_serial_nr_setup);
-+
-+
-+
-+/*
-+ * This does the hard work of actually picking apart the CPU stuff...
-+ */
-+void __init identify_cpu(struct cpuinfo_x86 *c)
-+{
-+	int i;
-+
-+	c->loops_per_jiffy = loops_per_jiffy;
-+	c->x86_cache_size = -1;
-+	c->x86_vendor = X86_VENDOR_UNKNOWN;
-+	c->cpuid_level = -1;	/* CPUID not detected */
-+	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
-+	c->x86_vendor_id[0] = '\0'; /* Unset */
-+	c->x86_model_id[0] = '\0';  /* Unset */
-+	c->x86_num_cores = 1;
-+	memset(&c->x86_capability, 0, sizeof c->x86_capability);
-+
-+	if (!have_cpuid_p()) {
-+		/* First of all, decide if this is a 486 or higher */
-+		/* It's a 486 if we can modify the AC flag */
-+		if ( flag_is_changeable_p(X86_EFLAGS_AC) )
-+			c->x86 = 4;
-+		else
-+			c->x86 = 3;
-+	}
-+
-+	generic_identify(c);
-+
-+	printk(KERN_DEBUG "CPU: After generic identify, caps:");
-+	for (i = 0; i < NCAPINTS; i++)
-+		printk(" %08lx", c->x86_capability[i]);
-+	printk("\n");
-+
-+	if (this_cpu->c_identify) {
-+		this_cpu->c_identify(c);
-+
-+		printk(KERN_DEBUG "CPU: After vendor identify, caps:");
-+		for (i = 0; i < NCAPINTS; i++)
-+			printk(" %08lx", c->x86_capability[i]);
-+		printk("\n");
-+	}
-+
-+	/*
-+	 * Vendor-specific initialization.  In this section we
-+	 * canonicalize the feature flags, meaning if there are
-+	 * features a certain CPU supports which CPUID doesn't
-+	 * tell us, CPUID claiming incorrect flags, or other bugs,
-+	 * we handle them here.
-+	 *
-+	 * At the end of this section, c->x86_capability better
-+	 * indicate the features this CPU genuinely supports!
-+	 */
-+	if (this_cpu->c_init)
-+		this_cpu->c_init(c);
-+
-+	/* Disable the PN if appropriate */
-+	squash_the_stupid_serial_number(c);
-+
-+	/*
-+	 * The vendor-specific functions might have changed features.  Now
-+	 * we do "generic changes."
-+	 */
-+
-+	/* TSC disabled? */
-+	if ( tsc_disable )
-+		clear_bit(X86_FEATURE_TSC, c->x86_capability);
-+
-+	/* FXSR disabled? */
-+	if (disable_x86_fxsr) {
-+		clear_bit(X86_FEATURE_FXSR, c->x86_capability);
-+		clear_bit(X86_FEATURE_XMM, c->x86_capability);
-+	}
-+
-+	if (disable_pse)
-+		clear_bit(X86_FEATURE_PSE, c->x86_capability);
-+
-+	/* If the model name is still unset, do table lookup. */
-+	if ( !c->x86_model_id[0] ) {
-+		char *p;
-+		p = table_lookup_model(c);
-+		if ( p )
-+			strcpy(c->x86_model_id, p);
-+		else
-+			/* Last resort... */
-+			sprintf(c->x86_model_id, "%02x/%02x",
-+				c->x86_vendor, c->x86_model);
-+	}
-+
-+	machine_specific_modify_cpu_capabilities(c);
-+
-+	/* Now the feature flags better reflect actual CPU features! */
-+
-+	printk(KERN_DEBUG "CPU: After all inits, caps:");
-+	for (i = 0; i < NCAPINTS; i++)
-+		printk(" %08lx", c->x86_capability[i]);
-+	printk("\n");
-+
-+	/*
-+	 * On SMP, boot_cpu_data holds the common feature set between
-+	 * all CPUs; so make sure that we indicate which features are
-+	 * common between the CPUs.  The first time this routine gets
-+	 * executed, c == &boot_cpu_data.
-+	 */
-+	if ( c != &boot_cpu_data ) {
-+		/* AND the already accumulated flags with these */
-+		for ( i = 0 ; i < NCAPINTS ; i++ )
-+			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-+	}
-+
-+	/* Init Machine Check Exception if available. */
-+#ifdef CONFIG_X86_MCE
-+	mcheck_init(c);
-+#endif
-+}
-+
-+#ifdef CONFIG_X86_HT
-+void __init detect_ht(struct cpuinfo_x86 *c)
-+{
-+	u32 	eax, ebx, ecx, edx;
-+	int 	index_msb, tmp;
-+	int 	cpu = smp_processor_id();
-+
-+	if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
-+		return;
-+
-+	cpuid(1, &eax, &ebx, &ecx, &edx);
-+	smp_num_siblings = (ebx & 0xff0000) >> 16;
-+
-+	if (smp_num_siblings == 1) {
-+		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
-+	} else if (smp_num_siblings > 1 ) {
-+		index_msb = 31;
-+
-+		if (smp_num_siblings > NR_CPUS) {
-+			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
-+			smp_num_siblings = 1;
-+			return;
-+		}
-+		tmp = smp_num_siblings;
-+		while ((tmp & 0x80000000 ) == 0) {
-+			tmp <<=1 ;
-+			index_msb--;
-+		}
-+		if (smp_num_siblings & (smp_num_siblings - 1))
-+			index_msb++;
-+		phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
-+
-+		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
-+		       phys_proc_id[cpu]);
-+
-+		smp_num_siblings = smp_num_siblings / c->x86_num_cores;
-+
-+		tmp = smp_num_siblings;
-+		index_msb = 31;
-+		while ((tmp & 0x80000000) == 0) {
-+			tmp <<=1 ;
-+			index_msb--;
-+		}
-+
-+		if (smp_num_siblings & (smp_num_siblings - 1))
-+			index_msb++;
-+
-+		cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
-+
-+		if (c->x86_num_cores > 1)
-+			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
-+			       cpu_core_id[cpu]);
-+	}
-+}
-+#endif
-+
-+void __init print_cpu_info(struct cpuinfo_x86 *c)
-+{
-+	char *vendor = NULL;
-+
-+	if (c->x86_vendor < X86_VENDOR_NUM)
-+		vendor = this_cpu->c_vendor;
-+	else if (c->cpuid_level >= 0)
-+		vendor = c->x86_vendor_id;
-+
-+	if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
-+		printk("%s ", vendor);
-+
-+	if (!c->x86_model_id[0])
-+		printk("%d86", c->x86);
-+	else
-+		printk("%s", c->x86_model_id);
-+
-+	if (c->x86_mask || c->cpuid_level >= 0) 
-+		printk(" stepping %02x\n", c->x86_mask);
-+	else
-+		printk("\n");
-+}
-+
-+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-+
-+/* This is hacky. :)
-+ * We're emulating future behavior.
-+ * In the future, the cpu-specific init functions will be called implicitly
-+ * via the magic of initcalls.
-+ * They will insert themselves into the cpu_devs structure.
-+ * Then, when cpu_init() is called, we can just iterate over that array.
-+ */
-+
-+extern int intel_cpu_init(void);
-+extern int cyrix_init_cpu(void);
-+extern int nsc_init_cpu(void);
-+extern int amd_init_cpu(void);
-+extern int centaur_init_cpu(void);
-+extern int transmeta_init_cpu(void);
-+extern int rise_init_cpu(void);
-+extern int nexgen_init_cpu(void);
-+extern int umc_init_cpu(void);
-+
-+void __init early_cpu_init(void)
-+{
-+	intel_cpu_init();
-+	cyrix_init_cpu();
-+	nsc_init_cpu();
-+	amd_init_cpu();
-+	centaur_init_cpu();
-+	transmeta_init_cpu();
-+	rise_init_cpu();
-+	nexgen_init_cpu();
-+	umc_init_cpu();
-+	early_cpu_detect();
-+
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+	/* pse is not compatible with on-the-fly unmapping,
-+	 * disable it even if the cpus claim to support it.
-+	 */
-+	clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-+	disable_pse = 1;
-+#endif
-+}
-+
-+void __cpuinit cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
-+{
-+	unsigned long frames[16];
-+	unsigned long va;
-+	int f;
-+
-+	for (va = gdt_descr->address, f = 0;
-+	     va < gdt_descr->address + gdt_descr->size;
-+	     va += PAGE_SIZE, f++) {
-+		frames[f] = virt_to_mfn(va);
-+		make_lowmem_page_readonly(
-+			(void *)va, XENFEAT_writable_descriptor_tables);
-+	}
-+	if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
-+		BUG();
-+	lgdt_finish();
-+}
-+
-+/*
-+ * cpu_init() initializes state that is per-CPU. Some data is already
-+ * initialized (naturally) in the bootstrap process, such as the GDT
-+ * and IDT. We reload them nevertheless, this function acts as a
-+ * 'CPU state barrier', nothing should get across.
-+ */
-+void __cpuinit cpu_init (void)
-+{
-+	int cpu = smp_processor_id();
-+	struct tss_struct * t = &per_cpu(init_tss, cpu);
-+	struct thread_struct *thread = &current->thread;
-+
-+	if (cpu_test_and_set(cpu, cpu_initialized)) {
-+		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
-+		for (;;) local_irq_enable();
-+	}
-+	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
-+
-+	if (cpu_has_vme || cpu_has_de)
-+		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-+	if (tsc_disable && cpu_has_tsc) {
-+		printk(KERN_NOTICE "Disabling TSC...\n");
-+		/**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
-+		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
-+		set_in_cr4(X86_CR4_TSD);
-+	}
-+
-+	/*
-+	 * Set up the per-thread TLS descriptor cache:
-+	 */
-+	memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
-+	       GDT_ENTRY_TLS_ENTRIES * 8);
-+
-+	cpu_gdt_init(&cpu_gdt_descr[cpu]);
-+
-+	/*
-+	 * Delete NT
-+	 */
-+	__asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
-+
-+	/*
-+	 * Set up and load the per-CPU TSS and LDT
-+	 */
-+	atomic_inc(&init_mm.mm_count);
-+	current->active_mm = &init_mm;
-+	if (current->mm)
-+		BUG();
-+	enter_lazy_tlb(&init_mm, current);
-+
-+	load_esp0(t, thread);
-+
-+	load_LDT(&init_mm.context);
-+
-+	/* Clear %fs and %gs. */
-+	asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
-+
-+	/* Clear all 6 debug registers: */
-+
-+#define CD(register) HYPERVISOR_set_debugreg(register, 0)
-+
-+	CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
-+
-+#undef CD
-+
-+	/*
-+	 * Force FPU initialization:
-+	 */
-+	current_thread_info()->status = 0;
-+	clear_used_math();
-+	mxcsr_feature_mask_init();
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/Makefile linux-2.6.12-xen/arch/xen/i386/kernel/cpu/Makefile
---- pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/cpu/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,31 @@
-+#
-+# Makefile for x86-compatible CPU details and quirks
-+#
-+
-+CFLAGS	+= -Iarch/i386/kernel/cpu
-+
-+obj-y	:=	common.o
-+c-obj-y	+=	proc.o
-+
-+c-obj-y	+=	amd.o
-+c-obj-y	+=	cyrix.o
-+c-obj-y	+=	centaur.o
-+c-obj-y	+=	transmeta.o
-+c-obj-y	+=	intel.o intel_cacheinfo.o
-+c-obj-y	+=	rise.o
-+c-obj-y	+=	nexgen.o
-+c-obj-y	+=	umc.o
-+
-+#obj-$(CONFIG_X86_MCE)	+=	../../../../i386/kernel/cpu/mcheck/
-+
-+obj-$(CONFIG_MTRR)	+= 	mtrr/
-+#obj-$(CONFIG_CPU_FREQ)	+=	../../../../i386/kernel/cpu/cpufreq/
-+
-+c-link	:=
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-+	@ln -fsn $(srctree)/arch/i386/kernel/cpu/$(notdir $@) $@
-+
-+obj-y	+= $(c-obj-y)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/mtrr/main.c linux-2.6.12-xen/arch/xen/i386/kernel/cpu/mtrr/main.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/mtrr/main.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/cpu/mtrr/main.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,172 @@
-+#include <linux/init.h>
-+#include <linux/proc_fs.h>
-+#include <linux/ctype.h>
-+#include <linux/module.h>
-+#include <linux/seq_file.h>
-+#include <asm/uaccess.h>
-+
-+#include <asm/mtrr.h>
-+#include "mtrr.h"
-+
-+void generic_get_mtrr(unsigned int reg, unsigned long *base,
-+		      unsigned int *size, mtrr_type * type)
-+{
-+	dom0_op_t op;
-+
-+	op.cmd = DOM0_READ_MEMTYPE;
-+	op.u.read_memtype.reg = reg;
-+	(void)HYPERVISOR_dom0_op(&op);
-+
-+	*size = op.u.read_memtype.nr_pfns;
-+	*base = op.u.read_memtype.pfn;
-+	*type = op.u.read_memtype.type;
-+}
-+
-+struct mtrr_ops generic_mtrr_ops = {
-+	.use_intel_if      = 1,
-+	.get               = generic_get_mtrr,
-+};
-+
-+struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
-+unsigned int num_var_ranges;
-+unsigned int *usage_table;
-+
-+static void __init set_num_var_ranges(void)
-+{
-+	dom0_op_t op;
-+
-+	for (num_var_ranges = 0; ; num_var_ranges++) {
-+		op.cmd = DOM0_READ_MEMTYPE;
-+		op.u.read_memtype.reg = num_var_ranges;
-+		if (HYPERVISOR_dom0_op(&op) != 0)
-+			break;
-+	}
-+}
-+
-+static void __init init_table(void)
-+{
-+	int i, max;
-+
-+	max = num_var_ranges;
-+	if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
-+	    == NULL) {
-+		printk(KERN_ERR "mtrr: could not allocate\n");
-+		return;
-+	}
-+	for (i = 0; i < max; i++)
-+		usage_table[i] = 0;
-+}
-+
-+int mtrr_add_page(unsigned long base, unsigned long size, 
-+		  unsigned int type, char increment)
-+{
-+	int error;
-+	dom0_op_t op;
-+
-+	op.cmd = DOM0_ADD_MEMTYPE;
-+	op.u.add_memtype.pfn     = base;
-+	op.u.add_memtype.nr_pfns = size;
-+	op.u.add_memtype.type    = type;
-+	error = HYPERVISOR_dom0_op(&op);
-+	if (error) {
-+		BUG_ON(error > 0);
-+		return error;
-+	}
-+
-+	if (increment)
-+		++usage_table[op.u.add_memtype.reg];
-+
-+	return op.u.add_memtype.reg;
-+}
-+
-+int
-+mtrr_add(unsigned long base, unsigned long size, unsigned int type,
-+	 char increment)
-+{
-+	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
-+		printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
-+		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
-+		return -EINVAL;
-+	}
-+	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
-+			     increment);
-+}
-+
-+int mtrr_del_page(int reg, unsigned long base, unsigned long size)
-+{
-+	int i, max;
-+	mtrr_type ltype;
-+	unsigned long lbase;
-+	unsigned int lsize;
-+	int error = -EINVAL;
-+	dom0_op_t op;
-+
-+	max = num_var_ranges;
-+	if (reg < 0) {
-+		/*  Search for existing MTRR  */
-+		for (i = 0; i < max; ++i) {
-+			mtrr_if->get(i, &lbase, &lsize, &ltype);
-+			if (lbase == base && lsize == size) {
-+				reg = i;
-+				break;
-+			}
-+		}
-+		if (reg < 0) {
-+			printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
-+			       size);
-+			goto out;
-+		}
-+	}
-+	if (usage_table[reg] < 1) {
-+		printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
-+		goto out;
-+	}
-+	if (--usage_table[reg] < 1) {
-+		op.cmd = DOM0_DEL_MEMTYPE;
-+		op.u.del_memtype.handle = 0;
-+		op.u.del_memtype.reg    = reg;
-+		error = HYPERVISOR_dom0_op(&op);
-+		if (error) {
-+			BUG_ON(error > 0);
-+			goto out;
-+		}
-+	}
-+	error = reg;
-+ out:
-+	return error;
-+}
-+
-+int
-+mtrr_del(int reg, unsigned long base, unsigned long size)
-+{
-+	if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
-+		printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
-+		printk(KERN_DEBUG "mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
-+		return -EINVAL;
-+	}
-+	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
-+}
-+
-+EXPORT_SYMBOL(mtrr_add);
-+EXPORT_SYMBOL(mtrr_del);
-+
-+static int __init mtrr_init(void)
-+{
-+	struct cpuinfo_x86 *c = &boot_cpu_data;
-+
-+	if (!(xen_start_info->flags & SIF_PRIVILEGED))
-+		return -ENODEV;
-+
-+	if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
-+	    (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
-+	    (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
-+	    (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
-+		return -ENODEV;
-+
-+	set_num_var_ranges();
-+	init_table();
-+
-+	return 0;
-+}
-+
-+subsys_initcall(mtrr_init);
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/mtrr/Makefile linux-2.6.12-xen/arch/xen/i386/kernel/cpu/mtrr/Makefile
---- pristine-linux-2.6.12/arch/xen/i386/kernel/cpu/mtrr/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/cpu/mtrr/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,16 @@
-+obj-y	:= main.o
-+c-obj-y	:= if.o
-+
-+c-link	:=
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)): $(obj)/mtrr.h
-+	@ln -fsn $(srctree)/arch/i386/kernel/cpu/mtrr/$(notdir $@) $@
-+
-+$(patsubst %.o,$(obj)/%.c,$(obj-y)): $(obj)/mtrr.h
-+
-+$(obj)/mtrr.h:
-+	@ln -fsn $(srctree)/arch/i386/kernel/cpu/mtrr/mtrr.h $@
-+
-+obj-y	+= $(c-obj-y)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/entry.S linux-2.6.12-xen/arch/xen/i386/kernel/entry.S
---- pristine-linux-2.6.12/arch/xen/i386/kernel/entry.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/entry.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,842 @@
-+/*
-+ *  linux/arch/i386/entry.S
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ */
-+
-+/*
-+ * entry.S contains the system-call and fault low-level handling routines.
-+ * This also contains the timer-interrupt handler, as well as all interrupts
-+ * and faults that can result in a task-switch.
-+ *
-+ * NOTE: This code handles signal-recognition, which happens every time
-+ * after a timer-interrupt and after each system call.
-+ *
-+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
-+ * on a 486.
-+ *
-+ * Stack layout in 'ret_from_system_call':
-+ * 	ptrace needs to have all regs on the stack.
-+ *	if the order here is changed, it needs to be
-+ *	updated in fork.c:copy_process, signal.c:do_signal,
-+ *	ptrace.c and ptrace.h
-+ *
-+ *	 0(%esp) - %ebx
-+ *	 4(%esp) - %ecx
-+ *	 8(%esp) - %edx
-+ *       C(%esp) - %esi
-+ *	10(%esp) - %edi
-+ *	14(%esp) - %ebp
-+ *	18(%esp) - %eax
-+ *	1C(%esp) - %ds
-+ *	20(%esp) - %es
-+ *	24(%esp) - orig_eax
-+ *	28(%esp) - %eip
-+ *	2C(%esp) - %cs
-+ *	30(%esp) - %eflags
-+ *	34(%esp) - %oldesp
-+ *	38(%esp) - %oldss
-+ *
-+ * "current" is in register %ebx during any slow entries.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/linkage.h>
-+#include <asm/thread_info.h>
-+#include <asm/errno.h>
-+#include <asm/segment.h>
-+#include <asm/smp.h>
-+#include <asm/page.h>
-+#include <asm/desc.h>
-+#include "irq_vectors.h"
-+#include <asm-xen/xen-public/xen.h>
-+
-+#define nr_syscalls ((syscall_table_size)/4)
-+
-+EBX		= 0x00
-+ECX		= 0x04
-+EDX		= 0x08
-+ESI		= 0x0C
-+EDI		= 0x10
-+EBP		= 0x14
-+EAX		= 0x18
-+DS		= 0x1C
-+ES		= 0x20
-+ORIG_EAX	= 0x24
-+EIP		= 0x28
-+CS		= 0x2C
-+EVENT_MASK	= 0x2E
-+EFLAGS		= 0x30
-+OLDESP		= 0x34
-+OLDSS		= 0x38
-+
-+CF_MASK		= 0x00000001
-+TF_MASK		= 0x00000100
-+IF_MASK		= 0x00000200
-+DF_MASK		= 0x00000400 
-+NT_MASK		= 0x00004000
-+VM_MASK		= 0x00020000
-+/* Pseudo-eflags. */
-+NMI_MASK	= 0x80000000
-+	
-+/* Offsets into shared_info_t. */
-+#define evtchn_upcall_pending		/* 0 */
-+#define evtchn_upcall_mask		1
-+
-+#define sizeof_vcpu_shift		6
-+
-+#ifdef CONFIG_SMP
-+#define preempt_disable(reg)	incl TI_preempt_count(reg)
-+#define preempt_enable(reg)	decl TI_preempt_count(reg)
-+#define XEN_GET_VCPU_INFO(reg)	preempt_disable(%ebp)			; \
-+				movl TI_cpu(%ebp),reg			; \
-+				shl  $sizeof_vcpu_shift,reg		; \
-+				addl HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%ebp)
-+#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
-+#else
-+#define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg)
-+#define XEN_PUT_VCPU_INFO_fixup
-+#endif
-+
-+#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
-+#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
-+#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
-+				XEN_LOCKED_BLOCK_EVENTS(reg)		; \
-+    				XEN_PUT_VCPU_INFO(reg)
-+#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
-+				XEN_LOCKED_UNBLOCK_EVENTS(reg)		; \
-+    				XEN_PUT_VCPU_INFO(reg)
-+#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
-+
-+#ifdef CONFIG_PREEMPT
-+#define preempt_stop		GET_THREAD_INFO(%ebp)			; \
-+				XEN_BLOCK_EVENTS(%esi)
-+#else
-+#define preempt_stop
-+#define resume_kernel		restore_nocheck
-+#endif
-+
-+#define SAVE_ALL \
-+	cld; \
-+	pushl %es; \
-+	pushl %ds; \
-+	pushl %eax; \
-+	pushl %ebp; \
-+	pushl %edi; \
-+	pushl %esi; \
-+	pushl %edx; \
-+	pushl %ecx; \
-+	pushl %ebx; \
-+	movl $(__USER_DS), %edx; \
-+	movl %edx, %ds; \
-+	movl %edx, %es;
-+
-+#define RESTORE_INT_REGS \
-+	popl %ebx;	\
-+	popl %ecx;	\
-+	popl %edx;	\
-+	popl %esi;	\
-+	popl %edi;	\
-+	popl %ebp;	\
-+	popl %eax
-+
-+#define RESTORE_REGS	\
-+	RESTORE_INT_REGS; \
-+1:	popl %ds;	\
-+2:	popl %es;	\
-+.section .fixup,"ax";	\
-+3:	movl $0,(%esp);	\
-+	jmp 1b;		\
-+4:	movl $0,(%esp);	\
-+	jmp 2b;		\
-+.previous;		\
-+.section __ex_table,"a";\
-+	.align 4;	\
-+	.long 1b,3b;	\
-+	.long 2b,4b;	\
-+.previous
-+
-+
-+#define RESTORE_ALL	\
-+	RESTORE_REGS	\
-+	addl $4, %esp;	\
-+1:	iret;		\
-+.section .fixup,"ax";   \
-+2:	pushl $0;	\
-+	pushl $do_iret_error;	\
-+	jmp error_code;	\
-+.previous;		\
-+.section __ex_table,"a";\
-+	.align 4;	\
-+	.long 1b,2b;	\
-+.previous
-+
-+
-+ENTRY(ret_from_fork)
-+	pushl %eax
-+	call schedule_tail
-+	GET_THREAD_INFO(%ebp)
-+	popl %eax
-+	jmp syscall_exit
-+
-+/*
-+ * Return to user mode is not as complex as all this looks,
-+ * but we want the default path for a system call return to
-+ * go as quickly as possible which is why some of this is
-+ * less clear than it otherwise should be.
-+ */
-+
-+	# userspace resumption stub bypassing syscall exit tracing
-+	ALIGN
-+ret_from_exception:
-+	preempt_stop
-+ret_from_intr:
-+	GET_THREAD_INFO(%ebp)
-+	movl EFLAGS(%esp), %eax		# mix EFLAGS and CS
-+	movb CS(%esp), %al
-+	testl $(VM_MASK | 2), %eax
-+	jz resume_kernel
-+ENTRY(resume_userspace)
-+	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
-+					# setting need_resched or sigpending
-+					# between sampling and the iret
-+	movl TI_flags(%ebp), %ecx
-+	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done on
-+					# int/exception return?
-+	jne work_pending
-+	jmp restore_all
-+
-+#ifdef CONFIG_PREEMPT
-+ENTRY(resume_kernel)
-+	XEN_BLOCK_EVENTS(%esi)
-+	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
-+	jnz restore_nocheck
-+need_resched:
-+	movl TI_flags(%ebp), %ecx	# need_resched set ?
-+	testb $_TIF_NEED_RESCHED, %cl
-+	jz restore_all
-+	testb $0xFF,EVENT_MASK(%esp)	# interrupts off (exception path) ?
-+	jnz restore_all
-+	call preempt_schedule_irq
-+	jmp need_resched
-+#endif
-+
-+/* SYSENTER_RETURN points to after the "sysenter" instruction in
-+   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
-+
-+	# sysenter call handler stub
-+ENTRY(sysenter_entry)
-+	movl TSS_sysenter_esp0(%esp),%esp
-+sysenter_past_esp:
-+	sti
-+	pushl $(__USER_DS)
-+	pushl %ebp
-+	pushfl
-+	pushl $(__USER_CS)
-+	pushl $SYSENTER_RETURN
-+
-+/*
-+ * Load the potential sixth argument from user stack.
-+ * Careful about security.
-+ */
-+	cmpl $__PAGE_OFFSET-3,%ebp
-+	jae syscall_fault
-+1:	movl (%ebp),%ebp
-+.section __ex_table,"a"
-+	.align 4
-+	.long 1b,syscall_fault
-+.previous
-+
-+	pushl %eax
-+	SAVE_ALL
-+	GET_THREAD_INFO(%ebp)
-+
-+	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-+	testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
-+	jnz syscall_trace_entry
-+	cmpl $(nr_syscalls), %eax
-+	jae syscall_badsys
-+	call *sys_call_table(,%eax,4)
-+	movl %eax,EAX(%esp)
-+	cli
-+	movl TI_flags(%ebp), %ecx
-+	testw $_TIF_ALLWORK_MASK, %cx
-+	jne syscall_exit_work
-+/* if something modifies registers it must also disable sysexit */
-+	movl EIP(%esp), %edx
-+	movl OLDESP(%esp), %ecx
-+	xorl %ebp,%ebp
-+	sti
-+	sysexit
-+
-+
-+	# system call handler stub
-+ENTRY(system_call)
-+	pushl %eax			# save orig_eax
-+	SAVE_ALL
-+	GET_THREAD_INFO(%ebp)
-+					# system call tracing in operation
-+	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-+	testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
-+	jnz syscall_trace_entry
-+	cmpl $(nr_syscalls), %eax
-+	jae syscall_badsys
-+syscall_call:
-+	call *sys_call_table(,%eax,4)
-+	movl %eax,EAX(%esp)		# store the return value
-+syscall_exit:
-+	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
-+					# setting need_resched or sigpending
-+					# between sampling and the iret
-+	movl TI_flags(%ebp), %ecx
-+	testw $_TIF_ALLWORK_MASK, %cx	# current->work
-+	jne syscall_exit_work
-+
-+restore_all:
-+#if 0 /* XEN */
-+	movl EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
-+	# Warning: OLDSS(%esp) contains the wrong/random values if we
-+	# are returning to the kernel.
-+	# See comments in process.c:copy_thread() for details.
-+	movb OLDSS(%esp), %ah
-+	movb CS(%esp), %al
-+	andl $(VM_MASK | (4 << 8) | 3), %eax
-+	cmpl $((4 << 8) | 3), %eax
-+	je ldt_ss			# returning to user-space with LDT SS
-+#endif /* XEN */
-+restore_nocheck:
-+	testl $(VM_MASK|NMI_MASK), EFLAGS(%esp)
-+	jnz hypervisor_iret
-+	movb EVENT_MASK(%esp), %al
-+	notb %al			# %al == ~saved_mask
-+	XEN_GET_VCPU_INFO(%esi)
-+	andb evtchn_upcall_mask(%esi),%al
-+	andb $1,%al			# %al == mask & ~saved_mask
-+	jnz restore_all_enable_events	#     != 0 => reenable event delivery
-+	XEN_PUT_VCPU_INFO(%esi)
-+	RESTORE_REGS
-+	addl $4, %esp
-+1:	iret
-+.section .fixup,"ax"
-+iret_exc:
-+	pushl $0			# no error code
-+	pushl $do_iret_error
-+	jmp error_code
-+.previous
-+.section __ex_table,"a"
-+	.align 4
-+	.long 1b,iret_exc
-+.previous
-+
-+hypervisor_iret:
-+	andl $~NMI_MASK, EFLAGS(%esp)
-+	RESTORE_REGS
-+	addl $4, %esp
-+	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
-+
-+#if 0 /* XEN */
-+ldt_ss:
-+	larl OLDSS(%esp), %eax
-+	jnz restore_nocheck
-+	testl $0x00400000, %eax		# returning to 32bit stack?
-+	jnz restore_nocheck		# allright, normal return
-+	/* If returning to userspace with 16bit stack,
-+	 * try to fix the higher word of ESP, as the CPU
-+	 * won't restore it.
-+	 * This is an "official" bug of all the x86-compatible
-+	 * CPUs, which we can try to work around to make
-+	 * dosemu and wine happy. */
-+	subl $8, %esp		# reserve space for switch16 pointer
-+	cli
-+	movl %esp, %eax
-+	/* Set up the 16bit stack frame with switch32 pointer on top,
-+	 * and a switch16 pointer on top of the current frame. */
-+	call setup_x86_bogus_stack
-+	RESTORE_REGS
-+	lss 20+4(%esp), %esp	# switch to 16bit stack
-+1:	iret
-+.section __ex_table,"a"
-+	.align 4
-+	.long 1b,iret_exc
-+.previous
-+#endif /* XEN */
-+
-+	# perform work that needs to be done immediately before resumption
-+	ALIGN
-+work_pending:
-+	testb $_TIF_NEED_RESCHED, %cl
-+	jz work_notifysig
-+work_resched:
-+	call schedule
-+	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
-+					# setting need_resched or sigpending
-+					# between sampling and the iret
-+	movl TI_flags(%ebp), %ecx
-+	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other
-+					# than syscall tracing?
-+	jz restore_all
-+	testb $_TIF_NEED_RESCHED, %cl
-+	jnz work_resched
-+
-+work_notifysig:				# deal with pending signals and
-+					# notify-resume requests
-+	testl $VM_MASK, EFLAGS(%esp)
-+	movl %esp, %eax
-+	jne work_notifysig_v86		# returning to kernel-space or
-+					# vm86-space
-+	xorl %edx, %edx
-+	call do_notify_resume
-+	jmp restore_all
-+
-+	ALIGN
-+work_notifysig_v86:
-+	pushl %ecx			# save ti_flags for do_notify_resume
-+	call save_v86_state		# %eax contains pt_regs pointer
-+	popl %ecx
-+	movl %eax, %esp
-+	xorl %edx, %edx
-+	call do_notify_resume
-+	jmp restore_all
-+
-+	# perform syscall exit tracing
-+	ALIGN
-+syscall_trace_entry:
-+	movl $-ENOSYS,EAX(%esp)
-+	movl %esp, %eax
-+	xorl %edx,%edx
-+	call do_syscall_trace
-+	movl ORIG_EAX(%esp), %eax
-+	cmpl $(nr_syscalls), %eax
-+	jnae syscall_call
-+	jmp syscall_exit
-+
-+	# perform syscall exit tracing
-+	ALIGN
-+syscall_exit_work:
-+	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
-+	jz work_pending
-+	XEN_UNBLOCK_EVENTS(%esi)	# could let do_syscall_trace() call
-+					# schedule() instead
-+	movl %esp, %eax
-+	movl $1, %edx
-+	call do_syscall_trace
-+	jmp resume_userspace
-+
-+	ALIGN
-+syscall_fault:
-+	pushl %eax			# save orig_eax
-+	SAVE_ALL
-+	GET_THREAD_INFO(%ebp)
-+	movl $-EFAULT,EAX(%esp)
-+	jmp resume_userspace
-+
-+	ALIGN
-+syscall_badsys:
-+	movl $-ENOSYS,EAX(%esp)
-+	jmp resume_userspace
-+
-+#if 0 /* XEN */
-+#define FIXUP_ESPFIX_STACK \
-+	movl %esp, %eax; \
-+	/* switch to 32bit stack using the pointer on top of 16bit stack */ \
-+	lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
-+	/* copy data from 16bit stack to 32bit stack */ \
-+	call fixup_x86_bogus_stack; \
-+	/* put ESP to the proper location */ \
-+	movl %eax, %esp;
-+#define UNWIND_ESPFIX_STACK \
-+	pushl %eax; \
-+	movl %ss, %eax; \
-+	/* see if on 16bit stack */ \
-+	cmpw $__ESPFIX_SS, %ax; \
-+	jne 28f; \
-+	movl $__KERNEL_DS, %edx; \
-+	movl %edx, %ds; \
-+	movl %edx, %es; \
-+	/* switch to 32bit stack */ \
-+	FIXUP_ESPFIX_STACK \
-+28:	popl %eax;
-+
-+/*
-+ * Build the entry stubs and pointer table with
-+ * some assembler magic.
-+ */
-+.data
-+ENTRY(interrupt)
-+.text
-+
-+vector=0
-+ENTRY(irq_entries_start)
-+.rept NR_IRQS
-+	ALIGN
-+1:	pushl $vector-256
-+	jmp common_interrupt
-+.data
-+	.long 1b
-+.text
-+vector=vector+1
-+.endr
-+
-+	ALIGN
-+common_interrupt:
-+	SAVE_ALL
-+	movl %esp,%eax
-+	call do_IRQ
-+	jmp ret_from_intr
-+
-+#define BUILD_INTERRUPT(name, nr)	\
-+ENTRY(name)				\
-+	pushl $nr-256;			\
-+	SAVE_ALL			\
-+	movl %esp,%eax;			\
-+	call smp_/**/name;		\
-+	jmp ret_from_intr;
-+
-+/* The include is where all of the SMP etc. interrupts come from */
-+#include "entry_arch.h"
-+#endif /* XEN */
-+
-+ENTRY(divide_error)
-+	pushl $0			# no error code
-+	pushl $do_divide_error
-+	ALIGN
-+error_code:
-+	pushl %ds
-+	pushl %eax
-+	xorl %eax, %eax
-+	pushl %ebp
-+	pushl %edi
-+	pushl %esi
-+	pushl %edx
-+	decl %eax			# eax = -1
-+	pushl %ecx
-+	pushl %ebx
-+	cld
-+	pushl %es
-+#	UNWIND_ESPFIX_STACK
-+	popl %ecx
-+	movl ES(%esp), %edi		# get the function address
-+	movl ORIG_EAX(%esp), %edx	# get the error code
-+	movl %eax, ORIG_EAX(%esp)
-+	movl %ecx, ES(%esp)
-+	movl $(__USER_DS), %ecx
-+	movl %ecx, %ds
-+	movl %ecx, %es
-+	movl %esp,%eax			# pt_regs pointer
-+	call *%edi
-+	jmp ret_from_exception
-+
-+# A note on the "critical region" in our callback handler.
-+# We want to avoid stacking callback handlers due to events occurring
-+# during handling of the last event. To do this, we keep events disabled
-+# until we've done all processing. HOWEVER, we must enable events before
-+# popping the stack frame (can't be done atomically) and so it would still
-+# be possible to get enough handler activations to overflow the stack.
-+# Although unlikely, bugs of that kind are hard to track down, so we'd
-+# like to avoid the possibility.
-+# So, on entry to the handler we detect whether we interrupted an
-+# existing activation in its critical region -- if so, we pop the current
-+# activation and restart the handler using the previous one.
-+ENTRY(hypervisor_callback)
-+	pushl %eax
-+	SAVE_ALL
-+	movl EIP(%esp),%eax
-+	cmpl $scrit,%eax
-+	jb   11f
-+	cmpl $ecrit,%eax
-+	jb   critical_region_fixup
-+11:	push %esp
-+	call evtchn_do_upcall
-+	add  $4,%esp
-+	jmp  ret_from_intr
-+
-+        ALIGN
-+restore_all_enable_events:  
-+	XEN_LOCKED_UNBLOCK_EVENTS(%esi)
-+scrit:	/**** START OF CRITICAL REGION ****/
-+	XEN_TEST_PENDING(%esi)
-+	jnz  14f			# process more events if necessary...
-+	XEN_PUT_VCPU_INFO(%esi)
-+	RESTORE_ALL
-+14:	XEN_LOCKED_BLOCK_EVENTS(%esi)
-+	XEN_PUT_VCPU_INFO(%esi)
-+	jmp  11b
-+ecrit:  /**** END OF CRITICAL REGION ****/
-+# [How we do the fixup]. We want to merge the current stack frame with the
-+# just-interrupted frame. How we do this depends on where in the critical
-+# region the interrupted handler was executing, and so how many saved
-+# registers are in each frame. We do this quickly using the lookup table
-+# 'critical_fixup_table'. For each byte offset in the critical region, it
-+# provides the number of bytes which have already been popped from the
-+# interrupted stack frame. 
-+critical_region_fixup:
-+	addl $critical_fixup_table-scrit,%eax
-+	movzbl (%eax),%eax		# %eax contains num bytes popped
-+	cmpb $0xff,%al                  # 0xff => vcpu_info critical region
-+	jne  15f
-+	GET_THREAD_INFO(%ebp)
-+	XEN_PUT_VCPU_INFO(%esi)         # abort vcpu_info critical region
-+        xorl %eax,%eax
-+15:	mov  %esp,%esi
-+	add  %eax,%esi			# %esi points at end of src region
-+	mov  %esp,%edi
-+	add  $0x34,%edi			# %edi points at end of dst region
-+	mov  %eax,%ecx
-+	shr  $2,%ecx			# convert words to bytes
-+	je   17f			# skip loop if nothing to copy
-+16:	subl $4,%esi			# pre-decrementing copy loop
-+	subl $4,%edi
-+	movl (%esi),%eax
-+	movl %eax,(%edi)
-+	loop 16b
-+17:	movl %edi,%esp			# final %edi is top of merged stack
-+	jmp  11b
-+
-+critical_fixup_table:
-+	.byte 0xff,0xff,0xff		# testb $0xff,(%esi) = XEN_TEST_PENDING
-+	.byte 0xff,0xff			# jnz  14f
-+	XEN_PUT_VCPU_INFO_fixup
-+	.byte 0x00			# pop  %ebx
-+	.byte 0x04			# pop  %ecx
-+	.byte 0x08			# pop  %edx
-+	.byte 0x0c			# pop  %esi
-+	.byte 0x10			# pop  %edi
-+	.byte 0x14			# pop  %ebp
-+	.byte 0x18			# pop  %eax
-+	.byte 0x1c			# pop  %ds
-+	.byte 0x20			# pop  %es
-+	.byte 0x24,0x24,0x24		# add  $4,%esp
-+	.byte 0x28			# iret
-+	.byte 0xff,0xff,0xff,0xff	# movb $1,1(%esi)
-+	XEN_PUT_VCPU_INFO_fixup
-+	.byte 0x00,0x00			# jmp  11b
-+
-+# Hypervisor uses this for application faults while it executes.
-+ENTRY(failsafe_callback)
-+1:	popl %ds
-+2:	popl %es
-+3:	popl %fs
-+4:	popl %gs
-+	subl $4,%esp
-+	SAVE_ALL
-+	jmp  ret_from_exception
-+.section .fixup,"ax";	\
-+6:	movl $0,(%esp);	\
-+	jmp 1b;		\
-+7:	movl $0,(%esp);	\
-+	jmp 2b;		\
-+8:	movl $0,(%esp);	\
-+	jmp 3b;		\
-+9:	movl $0,(%esp);	\
-+	jmp 4b;		\
-+.previous;		\
-+.section __ex_table,"a";\
-+	.align 4;	\
-+	.long 1b,6b;	\
-+	.long 2b,7b;	\
-+	.long 3b,8b;	\
-+	.long 4b,9b;	\
-+.previous
-+
-+ENTRY(coprocessor_error)
-+	pushl $0
-+	pushl $do_coprocessor_error
-+	jmp error_code
-+
-+ENTRY(simd_coprocessor_error)
-+	pushl $0
-+	pushl $do_simd_coprocessor_error
-+	jmp error_code
-+
-+ENTRY(device_not_available)
-+	pushl $-1			# mark this as an int
-+	SAVE_ALL
-+	#preempt_stop /* This is already an interrupt gate on Xen. */
-+	call math_state_restore
-+	jmp ret_from_exception
-+
-+/*
-+ * Debug traps and NMI can happen at the one SYSENTER instruction
-+ * that sets up the real kernel stack. Check here, since we can't
-+ * allow the wrong stack to be used.
-+ *
-+ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
-+ * already pushed 3 words if it hits on the sysenter instruction:
-+ * eflags, cs and eip.
-+ *
-+ * We just load the right stack, and push the three (known) values
-+ * by hand onto the new stack - while updating the return eip past
-+ * the instruction that would have done it for sysenter.
-+ */
-+#define FIX_STACK(offset, ok, label)		\
-+	cmpw $__KERNEL_CS,4(%esp);		\
-+	jne ok;					\
-+label:						\
-+	movl TSS_sysenter_esp0+offset(%esp),%esp;	\
-+	pushfl;					\
-+	pushl $__KERNEL_CS;			\
-+	pushl $sysenter_past_esp
-+
-+ENTRY(debug)
-+	cmpl $sysenter_entry,(%esp)
-+	jne debug_stack_correct
-+	FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
-+debug_stack_correct:
-+	pushl $-1			# mark this as an int
-+	SAVE_ALL
-+	xorl %edx,%edx			# error code 0
-+	movl %esp,%eax			# pt_regs pointer
-+	call do_debug
-+	jmp ret_from_exception
-+
-+ENTRY(nmi)
-+	pushl %eax
-+	SAVE_ALL
-+	xorl %edx,%edx		# zero error code
-+	movl %esp,%eax		# pt_regs pointer
-+	call do_nmi
-+	orl  $NMI_MASK, EFLAGS(%esp)
-+	jmp restore_all
-+
-+#if 0 /* XEN */
-+/*
-+ * NMI is doubly nasty. It can happen _while_ we're handling
-+ * a debug fault, and the debug fault hasn't yet been able to
-+ * clear up the stack. So we first check whether we got  an
-+ * NMI on the sysenter entry path, but after that we need to
-+ * check whether we got an NMI on the debug path where the debug
-+ * fault happened on the sysenter path.
-+ */
-+ENTRY(nmi)
-+	pushl %eax
-+	movl %ss, %eax
-+	cmpw $__ESPFIX_SS, %ax
-+	popl %eax
-+	je nmi_16bit_stack
-+	cmpl $sysenter_entry,(%esp)
-+	je nmi_stack_fixup
-+	pushl %eax
-+	movl %esp,%eax
-+	/* Do not access memory above the end of our stack page,
-+	 * it might not exist.
-+	 */
-+	andl $(THREAD_SIZE-1),%eax
-+	cmpl $(THREAD_SIZE-20),%eax
-+	popl %eax
-+	jae nmi_stack_correct
-+	cmpl $sysenter_entry,12(%esp)
-+	je nmi_debug_stack_check
-+nmi_stack_correct:
-+	pushl %eax
-+	SAVE_ALL
-+	xorl %edx,%edx		# zero error code
-+	movl %esp,%eax		# pt_regs pointer
-+	call do_nmi
-+	jmp restore_all
-+
-+nmi_stack_fixup:
-+	FIX_STACK(12,nmi_stack_correct, 1)
-+	jmp nmi_stack_correct
-+nmi_debug_stack_check:
-+	cmpw $__KERNEL_CS,16(%esp)
-+	jne nmi_stack_correct
-+	cmpl $debug - 1,(%esp)
-+	jle nmi_stack_correct
-+	cmpl $debug_esp_fix_insn,(%esp)
-+	jle nmi_debug_stack_fixup
-+nmi_debug_stack_fixup:
-+	FIX_STACK(24,nmi_stack_correct, 1)
-+	jmp nmi_stack_correct
-+
-+nmi_16bit_stack:
-+	/* create the pointer to lss back */
-+	pushl %ss
-+	pushl %esp
-+	movzwl %sp, %esp
-+	addw $4, (%esp)
-+	/* copy the iret frame of 12 bytes */
-+	.rept 3
-+	pushl 16(%esp)
-+	.endr
-+	pushl %eax
-+	SAVE_ALL
-+	FIXUP_ESPFIX_STACK		# %eax == %esp
-+	xorl %edx,%edx			# zero error code
-+	call do_nmi
-+	RESTORE_REGS
-+	lss 12+4(%esp), %esp		# back to 16bit stack
-+1:	iret
-+.section __ex_table,"a"
-+	.align 4
-+	.long 1b,iret_exc
-+.previous
-+#endif /* XEN */
-+
-+ENTRY(int3)
-+	pushl $-1			# mark this as an int
-+	SAVE_ALL
-+	xorl %edx,%edx		# zero error code
-+	movl %esp,%eax		# pt_regs pointer
-+	call do_int3
-+	jmp ret_from_exception
-+
-+ENTRY(overflow)
-+	pushl $0
-+	pushl $do_overflow
-+	jmp error_code
-+
-+ENTRY(bounds)
-+	pushl $0
-+	pushl $do_bounds
-+	jmp error_code
-+
-+ENTRY(invalid_op)
-+	pushl $0
-+	pushl $do_invalid_op
-+	jmp error_code
-+
-+ENTRY(coprocessor_segment_overrun)
-+	pushl $0
-+	pushl $do_coprocessor_segment_overrun
-+	jmp error_code
-+
-+ENTRY(invalid_TSS)
-+	pushl $do_invalid_TSS
-+	jmp error_code
-+
-+ENTRY(segment_not_present)
-+	pushl $do_segment_not_present
-+	jmp error_code
-+
-+ENTRY(stack_segment)
-+	pushl $do_stack_segment
-+	jmp error_code
-+
-+ENTRY(general_protection)
-+	pushl $do_general_protection
-+	jmp error_code
-+
-+ENTRY(alignment_check)
-+	pushl $do_alignment_check
-+	jmp error_code
-+
-+ENTRY(page_fault)
-+	pushl $do_page_fault
-+	jmp error_code
-+
-+#ifdef CONFIG_X86_MCE
-+ENTRY(machine_check)
-+	pushl $0
-+	pushl machine_check_vector
-+	jmp error_code
-+#endif
-+
-+ENTRY(fixup_4gb_segment)
-+	pushl $do_fixup_4gb_segment
-+	jmp error_code
-+
-+#include "syscall_table.S"
-+
-+syscall_table_size=(.-sys_call_table)
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/fixup.c linux-2.6.12-xen/arch/xen/i386/kernel/fixup.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/fixup.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/fixup.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,95 @@
-+/******************************************************************************
-+ * fixup.c
-+ * 
-+ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
-+ * Used to avoid repeated slow emulation of common instructions used by the
-+ * user-space TLS (Thread-Local Storage) libraries.
-+ * 
-+ * **** NOTE ****
-+ *  Issues with the binary rewriting have caused it to be removed. Instead
-+ *  we rely on Xen's emulator to boot the kernel, and then print a banner
-+ *  message recommending that the user disables /lib/tls.
-+ * 
-+ * Copyright (c) 2004, K A Fraser
-+ * 
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License as published by
-+ * the Free Software Foundation; either version 2 of the License, or
-+ * (at your option) any later version.
-+ * 
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ * 
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/init.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/kernel.h>
-+#include <linux/delay.h>
-+#include <linux/version.h>
-+
-+#define DP(_f, _args...) printk(KERN_ALERT "  " _f "\n" , ## _args )
-+
-+fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
-+{
-+	static unsigned long printed = 0;
-+	char info[100];
-+	int i;
-+
-+	if (test_and_set_bit(0, &printed))
-+		return;
-+
-+	HYPERVISOR_vm_assist(
-+		VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
-+
-+	sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
-+
-+
-+	DP("");
-+	DP("***************************************************************");
-+	DP("***************************************************************");
-+	DP("** WARNING: Currently emulating unsupported memory accesses  **");
-+	DP("**          in /lib/tls glibc libraries. The emulation is    **");
-+	DP("**          slow. To ensure full performance you should      **");
-+	DP("**          install a 'xen-friendly' (nosegneg) version of   **");
-+	DP("**          the library, or disable tls support by executing **");
-+	DP("**          the following as root:                           **");
-+	DP("**          mv /lib/tls /lib/tls.disabled                    **");
-+	DP("** Offending process: %-38.38s **", info);
-+	DP("***************************************************************");
-+	DP("***************************************************************");
-+	DP("");
-+
-+	for (i = 5; i > 0; i--) {
-+		printk("Pausing... %d", i);
-+		mdelay(1000);
-+		printk("\b\b\b\b\b\b\b\b\b\b\b\b");
-+	}
-+
-+	printk("Continuing...\n\n");
-+}
-+
-+static int __init fixup_init(void)
-+{
-+	HYPERVISOR_vm_assist(
-+		VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
-+	return 0;
-+}
-+__initcall(fixup_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/head.S linux-2.6.12-xen/arch/xen/i386/kernel/head.S
---- pristine-linux-2.6.12/arch/xen/i386/kernel/head.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/head.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,198 @@
-+
-+#include <linux/config.h>
-+
-+.section __xen_guest
-+	.ascii	"GUEST_OS=linux,GUEST_VER=2.6"
-+	.ascii	",XEN_VER=xen-3.0"
-+	.ascii	",VIRT_BASE=0xC0000000"
-+	.ascii	",HYPERCALL_PAGE=0x104" /* __pa(hypercall_page) >> 12 */
-+#ifdef CONFIG_X86_PAE
-+	.ascii	",PAE=yes"
-+#else
-+	.ascii	",PAE=no"
-+#endif
-+#ifdef CONFIG_XEN_SHADOW_MODE
-+	.ascii	",SHADOW=translate"
-+#endif
-+	.ascii	",LOADER=generic"
-+	.byte	0
-+
-+.text
-+#include <linux/threads.h>
-+#include <linux/linkage.h>
-+#include <asm/segment.h>
-+#include <asm/thread_info.h>
-+#include <asm/asm_offsets.h>
-+#include <asm-xen/xen-public/arch-x86_32.h>
-+
-+/*
-+ * References to members of the new_cpu_data structure.
-+ */
-+
-+#define X86		new_cpu_data+CPUINFO_x86
-+#define X86_VENDOR	new_cpu_data+CPUINFO_x86_vendor
-+#define X86_MODEL	new_cpu_data+CPUINFO_x86_model
-+#define X86_MASK	new_cpu_data+CPUINFO_x86_mask
-+#define X86_HARD_MATH	new_cpu_data+CPUINFO_hard_math
-+#define X86_CPUID	new_cpu_data+CPUINFO_cpuid_level
-+#define X86_CAPABILITY	new_cpu_data+CPUINFO_x86_capability
-+#define X86_VENDOR_ID	new_cpu_data+CPUINFO_x86_vendor_id
-+
-+ENTRY(startup_32)
-+	movl %esi,xen_start_info
-+
-+#if 0
-+ENTRY(startup_32_smp)
-+#endif /* CONFIG_SMP */
-+
-+	cld
-+
-+	/* Set up the stack pointer */
-+	lss stack_start,%esp
-+
-+checkCPUtype:
-+
-+	/* get vendor info */
-+	xorl %eax,%eax			# call CPUID with 0 -> return vendor ID
-+	cpuid
-+	movl %eax,X86_CPUID		# save CPUID level
-+	movl %ebx,X86_VENDOR_ID		# lo 4 chars
-+	movl %edx,X86_VENDOR_ID+4	# next 4 chars
-+	movl %ecx,X86_VENDOR_ID+8	# last 4 chars
-+
-+	movl $1,%eax		# Use the CPUID instruction to get CPU type
-+	cpuid
-+	movb %al,%cl		# save reg for future use
-+	andb $0x0f,%ah		# mask processor family
-+	movb %ah,X86
-+	andb $0xf0,%al		# mask model
-+	shrb $4,%al
-+	movb %al,X86_MODEL
-+	andb $0x0f,%cl		# mask mask revision
-+	movb %cl,X86_MASK
-+	movl %edx,X86_CAPABILITY
-+
-+	incb ready
-+
-+	xorl %eax,%eax			# Clear FS/GS and LDT
-+	movl %eax,%fs
-+	movl %eax,%gs
-+	cld			# gcc2 wants the direction flag cleared at all times
-+
-+#if 0
-+	movb ready, %cl	
-+	cmpb $1,%cl
-+	je 1f			# the first CPU calls start_kernel
-+				# all other CPUs call initialize_secondary
-+	call initialize_secondary
-+	jmp L6
-+1:
-+#endif /* CONFIG_SMP */
-+	call start_kernel
-+L6:
-+	jmp L6			# main should never return here, but
-+				# just in case, we know what happens.
-+
-+ENTRY(lgdt_finish)
-+	movl $(__KERNEL_DS),%eax	# reload all the segment registers
-+	movw %ax,%ss			# after changing gdt.
-+
-+	movl $(__USER_DS),%eax		# DS/ES contains default USER segment
-+	movw %ax,%ds
-+	movw %ax,%es
-+
-+	popl %eax			# reload CS by intersegment return
-+	pushl $(__KERNEL_CS)
-+	pushl %eax
-+	lret
-+
-+ENTRY(stack_start)
-+	.long init_thread_union+THREAD_SIZE
-+	.long __BOOT_DS
-+
-+ready:	.byte 0
-+
-+.globl idt_descr
-+.globl cpu_gdt_descr
-+
-+	ALIGN
-+	.word 0				# 32-bit align idt_desc.address
-+idt_descr:
-+	.word IDT_ENTRIES*8-1		# idt contains 256 entries
-+	.long idt_table
-+
-+# boot GDT descriptor (later on used by CPU#0):
-+	.word 0				# 32 bit align gdt_desc.address
-+cpu_gdt_descr:
-+	.word GDT_SIZE
-+	.long cpu_gdt_table
-+
-+	.fill NR_CPUS-1,8,0		# space for the other GDT descriptors
-+
-+.org 0x1000
-+ENTRY(empty_zero_page)
-+
-+.org 0x2000
-+ENTRY(cpu_gdt_table)
-+	.quad 0x0000000000000000	/* NULL descriptor */
-+	.quad 0x0000000000000000	/* 0x0b reserved */
-+	.quad 0x0000000000000000	/* 0x13 reserved */
-+	.quad 0x0000000000000000	/* 0x1b reserved */
-+	.quad 0x0000000000000000	/* 0x20 unused */
-+	.quad 0x0000000000000000	/* 0x28 unused */
-+	.quad 0x0000000000000000	/* 0x33 TLS entry 1 */
-+	.quad 0x0000000000000000	/* 0x3b TLS entry 2 */
-+	.quad 0x0000000000000000	/* 0x43 TLS entry 3 */
-+	.quad 0x0000000000000000	/* 0x4b reserved */
-+	.quad 0x0000000000000000	/* 0x53 reserved */
-+	.quad 0x0000000000000000	/* 0x5b reserved */
-+
-+#ifdef CONFIG_X86_PAE
-+	.quad 0x00cfbb00000067ff	/* 0x60 kernel 4GB code at 0x00000000 */
-+	.quad 0x00cfb300000067ff	/* 0x68 kernel 4GB data at 0x00000000 */
-+	.quad 0x00cffb00000067ff	/* 0x73 user 4GB code at 0x00000000 */
-+	.quad 0x00cff300000067ff	/* 0x7b user 4GB data at 0x00000000 */
-+#else
-+	.quad 0x00cfbb000000c3ff	/* 0x60 kernel 4GB code at 0x00000000 */
-+	.quad 0x00cfb3000000c3ff	/* 0x68 kernel 4GB data at 0x00000000 */
-+	.quad 0x00cffb000000c3ff	/* 0x73 user 4GB code at 0x00000000 */
-+	.quad 0x00cff3000000c3ff	/* 0x7b user 4GB data at 0x00000000 */
-+#endif
-+
-+	.quad 0x0000000000000000	/* 0x80 TSS descriptor */
-+	.quad 0x0000000000000000	/* 0x88 LDT descriptor */
-+
-+	/* Segments used for calling PnP BIOS */
-+	.quad 0x0000000000000000	/* 0x90 32-bit code */
-+	.quad 0x0000000000000000	/* 0x98 16-bit code */
-+	.quad 0x0000000000000000	/* 0xa0 16-bit data */
-+	.quad 0x0000000000000000	/* 0xa8 16-bit data */
-+	.quad 0x0000000000000000	/* 0xb0 16-bit data */
-+	/*
-+	 * The APM segments have byte granularity and their bases
-+	 * and limits are set at run time.
-+	 */
-+	.quad 0x0000000000000000	/* 0xb8 APM CS    code */
-+	.quad 0x0000000000000000	/* 0xc0 APM CS 16 code (16 bit) */
-+	.quad 0x0000000000000000	/* 0xc8 APM DS    data */
-+
-+	.quad 0x0000000000000000	/* 0xd0 - ESPFIX 16-bit SS */
-+	.quad 0x0000000000000000	/* 0xd8 - unused */
-+	.quad 0x0000000000000000	/* 0xe0 - unused */
-+	.quad 0x0000000000000000	/* 0xe8 - unused */
-+	.quad 0x0000000000000000	/* 0xf0 - unused */
-+	.quad 0x0000000000000000	/* 0xf8 - GDT entry 31: double-fault TSS */
-+	.fill GDT_ENTRIES-32,8,0
-+
-+.org 0x3000
-+ENTRY(default_ldt)
-+
-+.org 0x4000
-+ENTRY(hypercall_page)
-+
-+.org 0x5000
-+/*
-+ * Real beginning of normal "text" segment
-+ */
-+ENTRY(stext)
-+ENTRY(_stext)
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/i386_ksyms.c linux-2.6.12-xen/arch/xen/i386/kernel/i386_ksyms.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/i386_ksyms.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/i386_ksyms.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,185 @@
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/smp.h>
-+#include <linux/user.h>
-+#include <linux/elfcore.h>
-+#include <linux/mca.h>
-+#include <linux/sched.h>
-+#include <linux/in6.h>
-+#include <linux/interrupt.h>
-+#include <linux/smp_lock.h>
-+#include <linux/pm.h>
-+#include <linux/pci.h>
-+#include <linux/apm_bios.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/tty.h>
-+#include <linux/highmem.h>
-+#include <linux/time.h>
-+
-+#include <asm/semaphore.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/uaccess.h>
-+#include <asm/checksum.h>
-+#include <asm/io.h>
-+#include <asm/delay.h>
-+#include <asm/irq.h>
-+#include <asm/mmx.h>
-+#include <asm/desc.h>
-+#include <asm/pgtable.h>
-+#include <asm/tlbflush.h>
-+#include <asm/nmi.h>
-+#include <asm/ist.h>
-+#include <asm/kdebug.h>
-+
-+extern void dump_thread(struct pt_regs *, struct user *);
-+extern spinlock_t rtc_lock;
-+
-+/* This is definitely a GPL-only symbol */
-+EXPORT_SYMBOL_GPL(cpu_gdt_table);
-+
-+#if defined(CONFIG_APM_MODULE)
-+extern void machine_real_restart(unsigned char *, int);
-+EXPORT_SYMBOL(machine_real_restart);
-+extern void default_idle(void);
-+EXPORT_SYMBOL(default_idle);
-+#endif
-+
-+#ifdef CONFIG_SMP
-+extern void FASTCALL( __write_lock_failed(rwlock_t *rw));
-+extern void FASTCALL( __read_lock_failed(rwlock_t *rw));
-+#endif
-+
-+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-+extern struct drive_info_struct drive_info;
-+EXPORT_SYMBOL(drive_info);
-+#endif
-+
-+extern unsigned long cpu_khz;
-+extern unsigned long get_cmos_time(void);
-+
-+/* platform dependent support */
-+EXPORT_SYMBOL(boot_cpu_data);
-+#ifdef CONFIG_DISCONTIGMEM
-+EXPORT_SYMBOL(node_data);
-+EXPORT_SYMBOL(physnode_map);
-+#endif
-+#ifdef CONFIG_X86_NUMAQ
-+EXPORT_SYMBOL(xquad_portio);
-+#endif
-+EXPORT_SYMBOL(dump_thread);
-+EXPORT_SYMBOL(dump_fpu);
-+EXPORT_SYMBOL_GPL(kernel_fpu_begin);
-+EXPORT_SYMBOL(__ioremap);
-+EXPORT_SYMBOL(ioremap_nocache);
-+EXPORT_SYMBOL(iounmap);
-+EXPORT_SYMBOL(kernel_thread);
-+EXPORT_SYMBOL(pm_idle);
-+EXPORT_SYMBOL(pm_power_off);
-+EXPORT_SYMBOL(get_cmos_time);
-+EXPORT_SYMBOL(cpu_khz);
-+EXPORT_SYMBOL(apm_info);
-+
-+EXPORT_SYMBOL(__down_failed);
-+EXPORT_SYMBOL(__down_failed_interruptible);
-+EXPORT_SYMBOL(__down_failed_trylock);
-+EXPORT_SYMBOL(__up_wakeup);
-+/* Networking helper routines. */
-+EXPORT_SYMBOL(csum_partial_copy_generic);
-+/* Delay loops */
-+EXPORT_SYMBOL(__ndelay);
-+EXPORT_SYMBOL(__udelay);
-+EXPORT_SYMBOL(__delay);
-+EXPORT_SYMBOL(__const_udelay);
-+
-+EXPORT_SYMBOL(__get_user_1);
-+EXPORT_SYMBOL(__get_user_2);
-+EXPORT_SYMBOL(__get_user_4);
-+
-+EXPORT_SYMBOL(__put_user_1);
-+EXPORT_SYMBOL(__put_user_2);
-+EXPORT_SYMBOL(__put_user_4);
-+EXPORT_SYMBOL(__put_user_8);
-+
-+EXPORT_SYMBOL(strpbrk);
-+EXPORT_SYMBOL(strstr);
-+
-+EXPORT_SYMBOL(strncpy_from_user);
-+EXPORT_SYMBOL(__strncpy_from_user);
-+EXPORT_SYMBOL(clear_user);
-+EXPORT_SYMBOL(__clear_user);
-+EXPORT_SYMBOL(__copy_from_user_ll);
-+EXPORT_SYMBOL(__copy_to_user_ll);
-+EXPORT_SYMBOL(strnlen_user);
-+
-+#ifdef CONFIG_PCI
-+EXPORT_SYMBOL(pci_mem_start);
-+#endif
-+
-+#ifdef CONFIG_PCI_BIOS
-+EXPORT_SYMBOL(pcibios_set_irq_routing);
-+EXPORT_SYMBOL(pcibios_get_irq_routing_table);
-+#endif
-+
-+#ifdef CONFIG_X86_USE_3DNOW
-+EXPORT_SYMBOL(_mmx_memcpy);
-+EXPORT_SYMBOL(mmx_clear_page);
-+EXPORT_SYMBOL(mmx_copy_page);
-+#endif
-+
-+#ifdef CONFIG_X86_HT
-+EXPORT_SYMBOL(smp_num_siblings);
-+EXPORT_SYMBOL(cpu_sibling_map);
-+#endif
-+
-+#ifdef CONFIG_SMP
-+EXPORT_SYMBOL(__write_lock_failed);
-+EXPORT_SYMBOL(__read_lock_failed);
-+
-+/* Global SMP stuff */
-+EXPORT_SYMBOL(smp_call_function);
-+
-+/* TLB flushing */
-+EXPORT_SYMBOL(flush_tlb_page);
-+#endif
-+
-+#ifdef CONFIG_X86_IO_APIC
-+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-+#endif
-+
-+#ifdef CONFIG_MCA
-+EXPORT_SYMBOL(machine_id);
-+#endif
-+
-+#ifdef CONFIG_VT
-+EXPORT_SYMBOL(screen_info);
-+#endif
-+
-+EXPORT_SYMBOL(get_wchan);
-+
-+EXPORT_SYMBOL(rtc_lock);
-+
-+EXPORT_SYMBOL_GPL(set_nmi_callback);
-+EXPORT_SYMBOL_GPL(unset_nmi_callback);
-+
-+EXPORT_SYMBOL(register_die_notifier);
-+#ifdef CONFIG_HAVE_DEC_LOCK
-+EXPORT_SYMBOL(_atomic_dec_and_lock);
-+#endif
-+
-+EXPORT_SYMBOL(__PAGE_KERNEL);
-+
-+#ifdef CONFIG_HIGHMEM
-+EXPORT_SYMBOL(kmap);
-+EXPORT_SYMBOL(kunmap);
-+EXPORT_SYMBOL(kmap_atomic);
-+EXPORT_SYMBOL(kunmap_atomic);
-+EXPORT_SYMBOL(kmap_atomic_to_page);
-+#endif
-+
-+#if defined(CONFIG_X86_SPEEDSTEP_SMI) || defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
-+EXPORT_SYMBOL(ist_info);
-+#endif
-+
-+EXPORT_SYMBOL(csum_partial);
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/init_task.c linux-2.6.12-xen/arch/xen/i386/kernel/init_task.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/init_task.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/init_task.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,49 @@
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/init.h>
-+#include <linux/init_task.h>
-+#include <linux/fs.h>
-+#include <linux/mqueue.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/desc.h>
-+
-+static struct fs_struct init_fs = INIT_FS;
-+static struct files_struct init_files = INIT_FILES;
-+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-+
-+#define swapper_pg_dir ((pgd_t *)NULL)
-+struct mm_struct init_mm = INIT_MM(init_mm);
-+#undef swapper_pg_dir
-+
-+EXPORT_SYMBOL(init_mm);
-+
-+/*
-+ * Initial thread structure.
-+ *
-+ * We need to make sure that this is THREAD_SIZE aligned due to the
-+ * way process stacks are handled. This is done by having a special
-+ * "init_task" linker map entry..
-+ */
-+union thread_union init_thread_union 
-+	__attribute__((__section__(".data.init_task"))) =
-+		{ INIT_THREAD_INFO(init_task) };
-+
-+/*
-+ * Initial task structure.
-+ *
-+ * All other task structs will be allocated on slabs in fork.c
-+ */
-+struct task_struct init_task = INIT_TASK(init_task);
-+
-+EXPORT_SYMBOL(init_task);
-+
-+/*
-+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
-+ * no more per-task TSS's.
-+ */ 
-+DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp = INIT_TSS;
-+
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/io_apic.c linux-2.6.12-xen/arch/xen/i386/kernel/io_apic.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/io_apic.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/io_apic.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2611 @@
-+/*
-+ *	Intel IO-APIC support for multi-Pentium hosts.
-+ *
-+ *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
-+ *
-+ *	Many thanks to Stig Venaas for trying out countless experimental
-+ *	patches and reporting/debugging problems patiently!
-+ *
-+ *	(c) 1999, Multiple IO-APIC support, developed by
-+ *	Ken-ichi Yaku <yaku at css1.kbnes.nec.co.jp> and
-+ *      Hidemi Kishimoto <kisimoto at css1.kbnes.nec.co.jp>,
-+ *	further tested and cleaned up by Zach Brown <zab at redhat.com>
-+ *	and Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
-+ *					thanks to Eric Gilmore
-+ *					and Rolf G. Tews
-+ *					for testing these extensively
-+ *	Paul Diefenbaugh	:	Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/irq.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/sched.h>
-+#include <linux/config.h>
-+#include <linux/smp_lock.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/compiler.h>
-+#include <linux/acpi.h>
-+
-+#include <linux/sysdev.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/desc.h>
-+#include <asm/timer.h>
-+
-+#include <mach_apic.h>
-+
-+#include "io_ports.h"
-+
-+#ifdef CONFIG_XEN
-+
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/xen-public/physdev.h>
-+
-+/* Fake i8259 */
-+#define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
-+#define disable_8259A_irq(_irq)  ((void)0)
-+#define i8259A_irq_pending(_irq) (0)
-+
-+unsigned long io_apic_irqs;
-+
-+static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
-+{
-+	physdev_op_t op;
-+	int ret;
-+
-+	op.cmd = PHYSDEVOP_APIC_READ;
-+	op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
-+	op.u.apic_op.offset = reg;
-+	ret = HYPERVISOR_physdev_op(&op);
-+	if (ret)
-+		return ret;
-+	return op.u.apic_op.value;
-+}
-+
-+static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-+{
-+	physdev_op_t op;
-+
-+	op.cmd = PHYSDEVOP_APIC_WRITE;
-+	op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
-+	op.u.apic_op.offset = reg;
-+	op.u.apic_op.value = value;
-+	HYPERVISOR_physdev_op(&op);
-+}
-+
-+#define io_apic_read(a,r)    xen_io_apic_read(a,r)
-+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
-+
-+#endif /* CONFIG_XEN */
-+
-+int (*ioapic_renumber_irq)(int ioapic, int irq);
-+atomic_t irq_mis_count;
-+
-+static DEFINE_SPINLOCK(ioapic_lock);
-+
-+/*
-+ *	Is the SiS APIC rmw bug present ?
-+ *	-1 = don't know, 0 = no, 1 = yes
-+ */
-+int sis_apic_bug = -1;
-+
-+/*
-+ * # of IRQ routing registers
-+ */
-+int nr_ioapic_registers[MAX_IO_APICS];
-+
-+/*
-+ * Rough estimation of how many shared IRQs there are, can
-+ * be changed anytime.
-+ */
-+#define MAX_PLUS_SHARED_IRQS NR_IRQS
-+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
-+
-+/*
-+ * This is performance-critical, we want to do it O(1)
-+ *
-+ * the indexing order of this array favors 1:1 mappings
-+ * between pins and IRQs.
-+ */
-+
-+static struct irq_pin_list {
-+	int apic, pin, next;
-+} irq_2_pin[PIN_MAP_SIZE];
-+
-+int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
-+#ifdef CONFIG_PCI_MSI
-+#define vector_to_irq(vector) 	\
-+	(platform_legacy_irq(vector) ? vector : vector_irq[vector])
-+#else
-+#define vector_to_irq(vector)	(vector)
-+#endif
-+
-+/*
-+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
-+ * shared ISA-space IRQs, so we have to support them. We are super
-+ * fast in the common case, and fast for shared ISA-space IRQs.
-+ */
-+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
-+{
-+	static int first_free_entry = NR_IRQS;
-+	struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+	while (entry->next)
-+		entry = irq_2_pin + entry->next;
-+
-+	if (entry->pin != -1) {
-+		entry->next = first_free_entry;
-+		entry = irq_2_pin + entry->next;
-+		if (++first_free_entry >= PIN_MAP_SIZE)
-+			panic("io_apic.c: whoops");
-+	}
-+	entry->apic = apic;
-+	entry->pin = pin;
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Reroute an IRQ to a different pin.
-+ */
-+static void __init replace_pin_at_irq(unsigned int irq,
-+				      int oldapic, int oldpin,
-+				      int newapic, int newpin)
-+{
-+	struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+	while (1) {
-+		if (entry->apic == oldapic && entry->pin == oldpin) {
-+			entry->apic = newapic;
-+			entry->pin = newpin;
-+		}
-+		if (!entry->next)
-+			break;
-+		entry = irq_2_pin + entry->next;
-+	}
-+}
-+
-+static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
-+{
-+	struct irq_pin_list *entry = irq_2_pin + irq;
-+	unsigned int pin, reg;
-+
-+	for (;;) {
-+		pin = entry->pin;
-+		if (pin == -1)
-+			break;
-+		reg = io_apic_read(entry->apic, 0x10 + pin*2);
-+		reg &= ~disable;
-+		reg |= enable;
-+		io_apic_modify(entry->apic, 0x10 + pin*2, reg);
-+		if (!entry->next)
-+			break;
-+		entry = irq_2_pin + entry->next;
-+	}
-+}
-+
-+/* mask = 1 */
-+static void __mask_IO_APIC_irq (unsigned int irq)
-+{
-+	__modify_IO_APIC_irq(irq, 0x00010000, 0);
-+}
-+
-+/* mask = 0 */
-+static void __unmask_IO_APIC_irq (unsigned int irq)
-+{
-+	__modify_IO_APIC_irq(irq, 0, 0x00010000);
-+}
-+
-+/* mask = 1, trigger = 0 */
-+static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
-+{
-+	__modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
-+}
-+
-+/* mask = 0, trigger = 1 */
-+static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
-+{
-+	__modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
-+}
-+
-+static void mask_IO_APIC_irq (unsigned int irq)
-+{
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	__mask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void unmask_IO_APIC_irq (unsigned int irq)
-+{
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	__unmask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
-+	
-+	/* Check delivery_mode to be sure we're not clearing an SMI pin */
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+	*(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+	if (entry.delivery_mode == dest_SMI)
-+		return;
-+
-+	/*
-+	 * Disable it in the IO-APIC irq-routing table:
-+	 */
-+	memset(&entry, 0, sizeof(entry));
-+	entry.mask = 1;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
-+	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC (void)
-+{
-+	int apic, pin;
-+
-+	for (apic = 0; apic < nr_ioapics; apic++)
-+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-+			clear_IO_APIC_pin(apic, pin);
-+}
-+
-+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
-+{
-+	unsigned long flags;
-+	int pin;
-+	struct irq_pin_list *entry = irq_2_pin + irq;
-+	unsigned int apicid_value;
-+	
-+	apicid_value = cpu_mask_to_apicid(cpumask);
-+	/* Prepare to do the io_apic_write */
-+	apicid_value = apicid_value << 24;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	for (;;) {
-+		pin = entry->pin;
-+		if (pin == -1)
-+			break;
-+		io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
-+		if (!entry->next)
-+			break;
-+		entry = irq_2_pin + entry->next;
-+	}
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+#else
-+#define clear_IO_APIC() ((void)0)
-+#endif
-+
-+#if defined(CONFIG_IRQBALANCE)
-+# include <asm/processor.h>	/* kernel_thread() */
-+# include <linux/kernel_stat.h>	/* kstat */
-+# include <linux/slab.h>		/* kmalloc() */
-+# include <linux/timer.h>	/* time_after() */
-+ 
-+# ifdef CONFIG_BALANCED_IRQ_DEBUG
-+#  define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
-+#  define Dprintk(x...) do { TDprintk(x); } while (0)
-+# else
-+#  define TDprintk(x...) 
-+#  define Dprintk(x...) 
-+# endif
-+
-+cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS];
-+
-+#define IRQBALANCE_CHECK_ARCH -999
-+static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
-+static int physical_balance = 0;
-+
-+static struct irq_cpu_info {
-+	unsigned long * last_irq;
-+	unsigned long * irq_delta;
-+	unsigned long irq;
-+} irq_cpu_data[NR_CPUS];
-+
-+#define CPU_IRQ(cpu)		(irq_cpu_data[cpu].irq)
-+#define LAST_CPU_IRQ(cpu,irq)   (irq_cpu_data[cpu].last_irq[irq])
-+#define IRQ_DELTA(cpu,irq) 	(irq_cpu_data[cpu].irq_delta[irq])
-+
-+#define IDLE_ENOUGH(cpu,now) \
-+	(idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
-+
-+#define IRQ_ALLOWED(cpu, allowed_mask)	cpu_isset(cpu, allowed_mask)
-+
-+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
-+
-+#define MAX_BALANCED_IRQ_INTERVAL	(5*HZ)
-+#define MIN_BALANCED_IRQ_INTERVAL	(HZ/2)
-+#define BALANCED_IRQ_MORE_DELTA		(HZ/10)
-+#define BALANCED_IRQ_LESS_DELTA		(HZ)
-+
-+static long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL;
-+
-+static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
-+			unsigned long now, int direction)
-+{
-+	int search_idle = 1;
-+	int cpu = curr_cpu;
-+
-+	goto inside;
-+
-+	do {
-+		if (unlikely(cpu == curr_cpu))
-+			search_idle = 0;
-+inside:
-+		if (direction == 1) {
-+			cpu++;
-+			if (cpu >= NR_CPUS)
-+				cpu = 0;
-+		} else {
-+			cpu--;
-+			if (cpu == -1)
-+				cpu = NR_CPUS-1;
-+		}
-+	} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
-+			(search_idle && !IDLE_ENOUGH(cpu,now)));
-+
-+	return cpu;
-+}
-+
-+static inline void balance_irq(int cpu, int irq)
-+{
-+	unsigned long now = jiffies;
-+	cpumask_t allowed_mask;
-+	unsigned int new_cpu;
-+		
-+	if (irqbalance_disabled)
-+		return; 
-+
-+	cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
-+	new_cpu = move(cpu, allowed_mask, now, 1);
-+	if (cpu != new_cpu) {
-+		irq_desc_t *desc = irq_desc + irq;
-+		unsigned long flags;
-+
-+		spin_lock_irqsave(&desc->lock, flags);
-+		pending_irq_balance_cpumask[irq] = cpumask_of_cpu(new_cpu);
-+		spin_unlock_irqrestore(&desc->lock, flags);
-+	}
-+}
-+
-+static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
-+{
-+	int i, j;
-+	Dprintk("Rotating IRQs among CPUs.\n");
-+	for (i = 0; i < NR_CPUS; i++) {
-+		for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) {
-+			if (!irq_desc[j].action)
-+				continue;
-+			/* Is it a significant load ?  */
-+			if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
-+						useful_load_threshold)
-+				continue;
-+			balance_irq(i, j);
-+		}
-+	}
-+	balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
-+		balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);	
-+	return;
-+}
-+
-+static void do_irq_balance(void)
-+{
-+	int i, j;
-+	unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
-+	unsigned long move_this_load = 0;
-+	int max_loaded = 0, min_loaded = 0;
-+	int load;
-+	unsigned long useful_load_threshold = balanced_irq_interval + 10;
-+	int selected_irq;
-+	int tmp_loaded, first_attempt = 1;
-+	unsigned long tmp_cpu_irq;
-+	unsigned long imbalance = 0;
-+	cpumask_t allowed_mask, target_cpu_mask, tmp;
-+
-+	for (i = 0; i < NR_CPUS; i++) {
-+		int package_index;
-+		CPU_IRQ(i) = 0;
-+		if (!cpu_online(i))
-+			continue;
-+		package_index = CPU_TO_PACKAGEINDEX(i);
-+		for (j = 0; j < NR_IRQS; j++) {
-+			unsigned long value_now, delta;
-+			/* Is this an active IRQ? */
-+			if (!irq_desc[j].action)
-+				continue;
-+			if ( package_index == i )
-+				IRQ_DELTA(package_index,j) = 0;
-+			/* Determine the total count per processor per IRQ */
-+			value_now = (unsigned long) kstat_cpu(i).irqs[j];
-+
-+			/* Determine the activity per processor per IRQ */
-+			delta = value_now - LAST_CPU_IRQ(i,j);
-+
-+			/* Update last_cpu_irq[][] for the next time */
-+			LAST_CPU_IRQ(i,j) = value_now;
-+
-+			/* Ignore IRQs whose rate is less than the clock */
-+			if (delta < useful_load_threshold)
-+				continue;
-+			/* update the load for the processor or package total */
-+			IRQ_DELTA(package_index,j) += delta;
-+
-+			/* Keep track of the higher numbered sibling as well */
-+			if (i != package_index)
-+				CPU_IRQ(i) += delta;
-+			/*
-+			 * We have sibling A and sibling B in the package
-+			 *
-+			 * cpu_irq[A] = load for cpu A + load for cpu B
-+			 * cpu_irq[B] = load for cpu B
-+			 */
-+			CPU_IRQ(package_index) += delta;
-+		}
-+	}
-+	/* Find the least loaded processor package */
-+	for (i = 0; i < NR_CPUS; i++) {
-+		if (!cpu_online(i))
-+			continue;
-+		if (i != CPU_TO_PACKAGEINDEX(i))
-+			continue;
-+		if (min_cpu_irq > CPU_IRQ(i)) {
-+			min_cpu_irq = CPU_IRQ(i);
-+			min_loaded = i;
-+		}
-+	}
-+	max_cpu_irq = ULONG_MAX;
-+
-+tryanothercpu:
-+	/* Look for heaviest loaded processor.
-+	 * We may come back to get the next heaviest loaded processor.
-+	 * Skip processors with trivial loads.
-+	 */
-+	tmp_cpu_irq = 0;
-+	tmp_loaded = -1;
-+	for (i = 0; i < NR_CPUS; i++) {
-+		if (!cpu_online(i))
-+			continue;
-+		if (i != CPU_TO_PACKAGEINDEX(i))
-+			continue;
-+		if (max_cpu_irq <= CPU_IRQ(i)) 
-+			continue;
-+		if (tmp_cpu_irq < CPU_IRQ(i)) {
-+			tmp_cpu_irq = CPU_IRQ(i);
-+			tmp_loaded = i;
-+		}
-+	}
-+
-+	if (tmp_loaded == -1) {
-+ 	 /* In the case of small number of heavy interrupt sources, 
-+	  * loading some of the cpus too much. We use Ingo's original 
-+	  * approach to rotate them around.
-+	  */
-+		if (!first_attempt && imbalance >= useful_load_threshold) {
-+			rotate_irqs_among_cpus(useful_load_threshold);
-+			return;
-+		}
-+		goto not_worth_the_effort;
-+	}
-+	
-+	first_attempt = 0;		/* heaviest search */
-+	max_cpu_irq = tmp_cpu_irq;	/* load */
-+	max_loaded = tmp_loaded;	/* processor */
-+	imbalance = (max_cpu_irq - min_cpu_irq) / 2;
-+	
-+	Dprintk("max_loaded cpu = %d\n", max_loaded);
-+	Dprintk("min_loaded cpu = %d\n", min_loaded);
-+	Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
-+	Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
-+	Dprintk("load imbalance = %lu\n", imbalance);
-+
-+	/* if imbalance is less than approx 10% of max load, then
-+	 * observe diminishing returns action. - quit
-+	 */
-+	if (imbalance < (max_cpu_irq >> 3)) {
-+		Dprintk("Imbalance too trivial\n");
-+		goto not_worth_the_effort;
-+	}
-+
-+tryanotherirq:
-+	/* if we select an IRQ to move that can't go where we want, then
-+	 * see if there is another one to try.
-+	 */
-+	move_this_load = 0;
-+	selected_irq = -1;
-+	for (j = 0; j < NR_IRQS; j++) {
-+		/* Is this an active IRQ? */
-+		if (!irq_desc[j].action)
-+			continue;
-+		if (imbalance <= IRQ_DELTA(max_loaded,j))
-+			continue;
-+		/* Try to find the IRQ that is closest to the imbalance
-+		 * without going over.
-+		 */
-+		if (move_this_load < IRQ_DELTA(max_loaded,j)) {
-+			move_this_load = IRQ_DELTA(max_loaded,j);
-+			selected_irq = j;
-+		}
-+	}
-+	if (selected_irq == -1) {
-+		goto tryanothercpu;
-+	}
-+
-+	imbalance = move_this_load;
-+	
-+	/* For physical_balance case, we accumlated both load
-+	 * values in the one of the siblings cpu_irq[],
-+	 * to use the same code for physical and logical processors
-+	 * as much as possible. 
-+	 *
-+	 * NOTE: the cpu_irq[] array holds the sum of the load for
-+	 * sibling A and sibling B in the slot for the lowest numbered
-+	 * sibling (A), _AND_ the load for sibling B in the slot for
-+	 * the higher numbered sibling.
-+	 *
-+	 * We seek the least loaded sibling by making the comparison
-+	 * (A+B)/2 vs B
-+	 */
-+	load = CPU_IRQ(min_loaded) >> 1;
-+	for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
-+		if (load > CPU_IRQ(j)) {
-+			/* This won't change cpu_sibling_map[min_loaded] */
-+			load = CPU_IRQ(j);
-+			min_loaded = j;
-+		}
-+	}
-+
-+	cpus_and(allowed_mask, cpu_online_map, irq_affinity[selected_irq]);
-+	target_cpu_mask = cpumask_of_cpu(min_loaded);
-+	cpus_and(tmp, target_cpu_mask, allowed_mask);
-+
-+	if (!cpus_empty(tmp)) {
-+		irq_desc_t *desc = irq_desc + selected_irq;
-+		unsigned long flags;
-+
-+		Dprintk("irq = %d moved to cpu = %d\n",
-+				selected_irq, min_loaded);
-+		/* mark for change destination */
-+		spin_lock_irqsave(&desc->lock, flags);
-+		pending_irq_balance_cpumask[selected_irq] =
-+					cpumask_of_cpu(min_loaded);
-+		spin_unlock_irqrestore(&desc->lock, flags);
-+		/* Since we made a change, come back sooner to 
-+		 * check for more variation.
-+		 */
-+		balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
-+			balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);	
-+		return;
-+	}
-+	goto tryanotherirq;
-+
-+not_worth_the_effort:
-+	/*
-+	 * if we did not find an IRQ to move, then adjust the time interval
-+	 * upward
-+	 */
-+	balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
-+		balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);	
-+	Dprintk("IRQ worth rotating not found\n");
-+	return;
-+}
-+
-+static int balanced_irq(void *unused)
-+{
-+	int i;
-+	unsigned long prev_balance_time = jiffies;
-+	long time_remaining = balanced_irq_interval;
-+
-+	daemonize("kirqd");
-+	
-+	/* push everything to CPU 0 to give us a starting point.  */
-+	for (i = 0 ; i < NR_IRQS ; i++) {
-+		pending_irq_balance_cpumask[i] = cpumask_of_cpu(0);
-+	}
-+
-+	for ( ; ; ) {
-+		set_current_state(TASK_INTERRUPTIBLE);
-+		time_remaining = schedule_timeout(time_remaining);
-+		try_to_freeze(PF_FREEZE);
-+		if (time_after(jiffies,
-+				prev_balance_time+balanced_irq_interval)) {
-+			preempt_disable();
-+			do_irq_balance();
-+			prev_balance_time = jiffies;
-+			time_remaining = balanced_irq_interval;
-+			preempt_enable();
-+		}
-+	}
-+	return 0;
-+}
-+
-+static int __init balanced_irq_init(void)
-+{
-+	int i;
-+	struct cpuinfo_x86 *c;
-+	cpumask_t tmp;
-+
-+	cpus_shift_right(tmp, cpu_online_map, 2);
-+        c = &boot_cpu_data;
-+	/* When not overwritten by the command line ask subarchitecture. */
-+	if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
-+		irqbalance_disabled = NO_BALANCE_IRQ;
-+	if (irqbalance_disabled)
-+		return 0;
-+	
-+	 /* disable irqbalance completely if there is only one processor online */
-+	if (num_online_cpus() < 2) {
-+		irqbalance_disabled = 1;
-+		return 0;
-+	}
-+	/*
-+	 * Enable physical balance only if more than 1 physical processor
-+	 * is present
-+	 */
-+	if (smp_num_siblings > 1 && !cpus_empty(tmp))
-+		physical_balance = 1;
-+
-+	for (i = 0; i < NR_CPUS; i++) {
-+		if (!cpu_online(i))
-+			continue;
-+		irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
-+		irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
-+		if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
-+			printk(KERN_ERR "balanced_irq_init: out of memory");
-+			goto failed;
-+		}
-+		memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
-+		memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
-+	}
-+	
-+	printk(KERN_INFO "Starting balanced_irq\n");
-+	if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0) 
-+		return 0;
-+	else 
-+		printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
-+failed:
-+	for (i = 0; i < NR_CPUS; i++) {
-+		if(irq_cpu_data[i].irq_delta)
-+			kfree(irq_cpu_data[i].irq_delta);
-+		if(irq_cpu_data[i].last_irq)
-+			kfree(irq_cpu_data[i].last_irq);
-+	}
-+	return 0;
-+}
-+
-+int __init irqbalance_disable(char *str)
-+{
-+	irqbalance_disabled = 1;
-+	return 0;
-+}
-+
-+__setup("noirqbalance", irqbalance_disable);
-+
-+static inline void move_irq(int irq)
-+{
-+	/* note - we hold the desc->lock */
-+	if (unlikely(!cpus_empty(pending_irq_balance_cpumask[irq]))) {
-+		set_ioapic_affinity_irq(irq, pending_irq_balance_cpumask[irq]);
-+		cpus_clear(pending_irq_balance_cpumask[irq]);
-+	}
-+}
-+
-+late_initcall(balanced_irq_init);
-+
-+#else /* !CONFIG_IRQBALANCE */
-+static inline void move_irq(int irq) { }
-+#endif /* CONFIG_IRQBALANCE */
-+
-+#ifndef CONFIG_SMP
-+void fastcall send_IPI_self(int vector)
-+{
-+#ifndef CONFIG_XEN
-+	unsigned int cfg;
-+
-+	/*
-+	 * Wait for idle.
-+	 */
-+	apic_wait_icr_idle();
-+	cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
-+	/*
-+	 * Send the IPI. The write to APIC_ICR fires this off.
-+	 */
-+	apic_write_around(APIC_ICR, cfg);
-+#endif
-+}
-+#endif /* !CONFIG_SMP */
-+
-+
-+/*
-+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
-+ * specific CPU-side IRQs.
-+ */
-+
-+#define MAX_PIRQS 8
-+static int pirq_entries [MAX_PIRQS];
-+static int pirqs_enabled;
-+int skip_ioapic_setup;
-+
-+static int __init ioapic_setup(char *str)
-+{
-+	skip_ioapic_setup = 1;
-+	return 1;
-+}
-+
-+__setup("noapic", ioapic_setup);
-+
-+static int __init ioapic_pirq_setup(char *str)
-+{
-+	int i, max;
-+	int ints[MAX_PIRQS+1];
-+
-+	get_options(str, ARRAY_SIZE(ints), ints);
-+
-+	for (i = 0; i < MAX_PIRQS; i++)
-+		pirq_entries[i] = -1;
-+
-+	pirqs_enabled = 1;
-+	apic_printk(APIC_VERBOSE, KERN_INFO
-+			"PIRQ redirection, working around broken MP-BIOS.\n");
-+	max = MAX_PIRQS;
-+	if (ints[0] < MAX_PIRQS)
-+		max = ints[0];
-+
-+	for (i = 0; i < max; i++) {
-+		apic_printk(APIC_VERBOSE, KERN_DEBUG
-+				"... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
-+		/*
-+		 * PIRQs are mapped upside down, usually.
-+		 */
-+		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
-+	}
-+	return 1;
-+}
-+
-+__setup("pirq=", ioapic_pirq_setup);
-+
-+/*
-+ * Find the IRQ entry number of a certain pin.
-+ */
-+static int find_irq_entry(int apic, int pin, int type)
-+{
-+	int i;
-+
-+	for (i = 0; i < mp_irq_entries; i++)
-+		if (mp_irqs[i].mpc_irqtype == type &&
-+		    (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
-+		     mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
-+		    mp_irqs[i].mpc_dstirq == pin)
-+			return i;
-+
-+	return -1;
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Find the pin to which IRQ[irq] (ISA) is connected
-+ */
-+static int find_isa_irq_pin(int irq, int type)
-+{
-+	int i;
-+
-+	for (i = 0; i < mp_irq_entries; i++) {
-+		int lbus = mp_irqs[i].mpc_srcbus;
-+
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_NEC98
-+		    ) &&
-+		    (mp_irqs[i].mpc_irqtype == type) &&
-+		    (mp_irqs[i].mpc_srcbusirq == irq))
-+
-+			return mp_irqs[i].mpc_dstirq;
-+	}
-+	return -1;
-+}
-+#endif
-+
-+/*
-+ * Find a specific PCI IRQ entry.
-+ * Not an __init, possibly needed by modules
-+ */
-+static int pin_2_irq(int idx, int apic, int pin);
-+
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
-+{
-+	int apic, i, best_guess = -1;
-+
-+	apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
-+		"slot:%d, pin:%d.\n", bus, slot, pin);
-+	if (mp_bus_id_to_pci_bus[bus] == -1) {
-+		printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
-+		return -1;
-+	}
-+	for (i = 0; i < mp_irq_entries; i++) {
-+		int lbus = mp_irqs[i].mpc_srcbus;
-+
-+		for (apic = 0; apic < nr_ioapics; apic++)
-+			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
-+			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
-+				break;
-+
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
-+		    !mp_irqs[i].mpc_irqtype &&
-+		    (bus == lbus) &&
-+		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
-+			int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
-+
-+			if (!(apic || IO_APIC_IRQ(irq)))
-+				continue;
-+
-+			if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
-+				return irq;
-+			/*
-+			 * Use the first all-but-pin matching entry as a
-+			 * best-guess fuzzy result for broken mptables.
-+			 */
-+			if (best_guess < 0)
-+				best_guess = irq;
-+		}
-+	}
-+	return best_guess;
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * This function currently is only a helper for the i386 smp boot process where 
-+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
-+ * so mask in all cases should simply be TARGET_CPUS
-+ */
-+void __init setup_ioapic_dest(void)
-+{
-+	int pin, ioapic, irq, irq_entry;
-+
-+	if (skip_ioapic_setup == 1)
-+		return;
-+
-+	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
-+		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-+			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
-+			if (irq_entry == -1)
-+				continue;
-+			irq = pin_2_irq(irq_entry, ioapic, pin);
-+			set_ioapic_affinity_irq(irq, TARGET_CPUS);
-+		}
-+
-+	}
-+}
-+#endif /* !CONFIG_XEN */
-+
-+/*
-+ * EISA Edge/Level control register, ELCR
-+ */
-+static int EISA_ELCR(unsigned int irq)
-+{
-+	if (irq < 16) {
-+		unsigned int port = 0x4d0 + (irq >> 3);
-+		return (inb(port) >> (irq & 7)) & 1;
-+	}
-+	apic_printk(APIC_VERBOSE, KERN_INFO
-+			"Broken MPtable reports ISA irq %d\n", irq);
-+	return 0;
-+}
-+
-+/* EISA interrupts are always polarity zero and can be edge or level
-+ * trigger depending on the ELCR value.  If an interrupt is listed as
-+ * EISA conforming in the MP table, that means its trigger type must
-+ * be read in from the ELCR */
-+
-+#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
-+#define default_EISA_polarity(idx)	(0)
-+
-+/* ISA interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_ISA_trigger(idx)	(0)
-+#define default_ISA_polarity(idx)	(0)
-+
-+/* PCI interrupts are always polarity one level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_PCI_trigger(idx)	(1)
-+#define default_PCI_polarity(idx)	(1)
-+
-+/* MCA interrupts are always polarity zero level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_MCA_trigger(idx)	(1)
-+#define default_MCA_polarity(idx)	(0)
-+
-+/* NEC98 interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_NEC98_trigger(idx)     (0)
-+#define default_NEC98_polarity(idx)    (0)
-+
-+static int __init MPBIOS_polarity(int idx)
-+{
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+	int polarity;
-+
-+	/*
-+	 * Determine IRQ line polarity (high active or low active):
-+	 */
-+	switch (mp_irqs[idx].mpc_irqflag & 3)
-+	{
-+		case 0: /* conforms, ie. bus-type dependent polarity */
-+		{
-+			switch (mp_bus_id_to_type[bus])
-+			{
-+				case MP_BUS_ISA: /* ISA pin */
-+				{
-+					polarity = default_ISA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_EISA: /* EISA pin */
-+				{
-+					polarity = default_EISA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_PCI: /* PCI pin */
-+				{
-+					polarity = default_PCI_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_MCA: /* MCA pin */
-+				{
-+					polarity = default_MCA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_NEC98: /* NEC 98 pin */
-+				{
-+					polarity = default_NEC98_polarity(idx);
-+					break;
-+				}
-+				default:
-+				{
-+					printk(KERN_WARNING "broken BIOS!!\n");
-+					polarity = 1;
-+					break;
-+				}
-+			}
-+			break;
-+		}
-+		case 1: /* high active */
-+		{
-+			polarity = 0;
-+			break;
-+		}
-+		case 2: /* reserved */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			polarity = 1;
-+			break;
-+		}
-+		case 3: /* low active */
-+		{
-+			polarity = 1;
-+			break;
-+		}
-+		default: /* invalid */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			polarity = 1;
-+			break;
-+		}
-+	}
-+	return polarity;
-+}
-+
-+static int MPBIOS_trigger(int idx)
-+{
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+	int trigger;
-+
-+	/*
-+	 * Determine IRQ trigger mode (edge or level sensitive):
-+	 */
-+	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
-+	{
-+		case 0: /* conforms, ie. bus-type dependent */
-+		{
-+			switch (mp_bus_id_to_type[bus])
-+			{
-+				case MP_BUS_ISA: /* ISA pin */
-+				{
-+					trigger = default_ISA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_EISA: /* EISA pin */
-+				{
-+					trigger = default_EISA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_PCI: /* PCI pin */
-+				{
-+					trigger = default_PCI_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_MCA: /* MCA pin */
-+				{
-+					trigger = default_MCA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_NEC98: /* NEC 98 pin */
-+				{
-+					trigger = default_NEC98_trigger(idx);
-+					break;
-+				}
-+				default:
-+				{
-+					printk(KERN_WARNING "broken BIOS!!\n");
-+					trigger = 1;
-+					break;
-+				}
-+			}
-+			break;
-+		}
-+		case 1: /* edge */
-+		{
-+			trigger = 0;
-+			break;
-+		}
-+		case 2: /* reserved */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			trigger = 1;
-+			break;
-+		}
-+		case 3: /* level */
-+		{
-+			trigger = 1;
-+			break;
-+		}
-+		default: /* invalid */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			trigger = 0;
-+			break;
-+		}
-+	}
-+	return trigger;
-+}
-+
-+static inline int irq_polarity(int idx)
-+{
-+	return MPBIOS_polarity(idx);
-+}
-+
-+static inline int irq_trigger(int idx)
-+{
-+	return MPBIOS_trigger(idx);
-+}
-+
-+static int pin_2_irq(int idx, int apic, int pin)
-+{
-+	int irq, i;
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+
-+	/*
-+	 * Debugging check, we are in big trouble if this message pops up!
-+	 */
-+	if (mp_irqs[idx].mpc_dstirq != pin)
-+		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
-+
-+	switch (mp_bus_id_to_type[bus])
-+	{
-+		case MP_BUS_ISA: /* ISA pin */
-+		case MP_BUS_EISA:
-+		case MP_BUS_MCA:
-+		case MP_BUS_NEC98:
-+		{
-+			irq = mp_irqs[idx].mpc_srcbusirq;
-+			break;
-+		}
-+		case MP_BUS_PCI: /* PCI pin */
-+		{
-+			/*
-+			 * PCI IRQs are mapped in order
-+			 */
-+			i = irq = 0;
-+			while (i < apic)
-+				irq += nr_ioapic_registers[i++];
-+			irq += pin;
-+
-+			/*
-+			 * For MPS mode, so far only needed by ES7000 platform
-+			 */
-+			if (ioapic_renumber_irq)
-+				irq = ioapic_renumber_irq(apic, irq);
-+
-+			break;
-+		}
-+		default:
-+		{
-+			printk(KERN_ERR "unknown bus type %d.\n",bus); 
-+			irq = 0;
-+			break;
-+		}
-+	}
-+
-+	/*
-+	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
-+	 */
-+	if ((pin >= 16) && (pin <= 23)) {
-+		if (pirq_entries[pin-16] != -1) {
-+			if (!pirq_entries[pin-16]) {
-+				apic_printk(APIC_VERBOSE, KERN_DEBUG
-+						"disabling PIRQ%d\n", pin-16);
-+			} else {
-+				irq = pirq_entries[pin-16];
-+				apic_printk(APIC_VERBOSE, KERN_DEBUG
-+						"using PIRQ%d -> IRQ %d\n",
-+						pin-16, irq);
-+			}
-+		}
-+	}
-+	return irq;
-+}
-+
-+static inline int IO_APIC_irq_trigger(int irq)
-+{
-+	int apic, idx, pin;
-+
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+			idx = find_irq_entry(apic,pin,mp_INT);
-+			if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
-+				return irq_trigger(idx);
-+		}
-+	}
-+	/*
-+	 * nonexistent IRQs are edge default
-+	 */
-+	return 0;
-+}
-+
-+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-+u8 irq_vector[NR_IRQ_VECTORS]; /* = { FIRST_DEVICE_VECTOR , 0 }; */
-+
-+int assign_irq_vector(int irq)
-+{
-+	static int current_vector = FIRST_DEVICE_VECTOR;
-+	physdev_op_t op;
-+
-+	BUG_ON(irq >= NR_IRQ_VECTORS);
-+	if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
-+		return IO_APIC_VECTOR(irq);
-+
-+	op.cmd = PHYSDEVOP_ASSIGN_VECTOR;
-+	op.u.irq_op.irq = irq;
-+	if (HYPERVISOR_physdev_op(&op))
-+		return -ENOSPC;
-+	current_vector = op.u.irq_op.vector;
-+
-+	vector_irq[current_vector] = irq;
-+	if (irq != AUTO_ASSIGN)
-+		IO_APIC_VECTOR(irq) = current_vector;
-+
-+	return current_vector;
-+}
-+
-+#ifndef CONFIG_XEN
-+static struct hw_interrupt_type ioapic_level_type;
-+static struct hw_interrupt_type ioapic_edge_type;
-+
-+#define IOAPIC_AUTO	-1
-+#define IOAPIC_EDGE	0
-+#define IOAPIC_LEVEL	1
-+
-+static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
-+{
-+	if (use_pci_vector() && !platform_legacy_irq(irq)) {
-+		if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+				trigger == IOAPIC_LEVEL)
-+			irq_desc[vector].handler = &ioapic_level_type;
-+		else
-+			irq_desc[vector].handler = &ioapic_edge_type;
-+		set_intr_gate(vector, interrupt[vector]);
-+	} else	{
-+		if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+				trigger == IOAPIC_LEVEL)
-+			irq_desc[irq].handler = &ioapic_level_type;
-+		else
-+			irq_desc[irq].handler = &ioapic_edge_type;
-+		set_intr_gate(vector, interrupt[irq]);
-+	}
-+}
-+#else
-+#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
-+#endif
-+
-+static void __init setup_IO_APIC_irqs(void)
-+{
-+	struct IO_APIC_route_entry entry;
-+	int apic, pin, idx, irq, first_notcon = 1, vector;
-+	unsigned long flags;
-+
-+	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-+
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+
-+		/*
-+		 * add it to the IO-APIC irq-routing table:
-+		 */
-+		memset(&entry,0,sizeof(entry));
-+
-+		entry.delivery_mode = INT_DELIVERY_MODE;
-+		entry.dest_mode = INT_DEST_MODE;
-+		entry.mask = 0;				/* enable IRQ */
-+		entry.dest.logical.logical_dest = 
-+					cpu_mask_to_apicid(TARGET_CPUS);
-+
-+		idx = find_irq_entry(apic,pin,mp_INT);
-+		if (idx == -1) {
-+			if (first_notcon) {
-+				apic_printk(APIC_VERBOSE, KERN_DEBUG
-+						" IO-APIC (apicid-pin) %d-%d",
-+						mp_ioapics[apic].mpc_apicid,
-+						pin);
-+				first_notcon = 0;
-+			} else
-+				apic_printk(APIC_VERBOSE, ", %d-%d",
-+					mp_ioapics[apic].mpc_apicid, pin);
-+			continue;
-+		}
-+
-+		entry.trigger = irq_trigger(idx);
-+		entry.polarity = irq_polarity(idx);
-+
-+		if (irq_trigger(idx)) {
-+			entry.trigger = 1;
-+			entry.mask = 1;
-+		}
-+
-+		irq = pin_2_irq(idx, apic, pin);
-+		/*
-+		 * skip adding the timer int on secondary nodes, which causes
-+		 * a small but painful rift in the time-space continuum
-+		 */
-+		if (multi_timer_check(apic, irq))
-+			continue;
-+		else
-+			add_pin_to_irq(irq, apic, pin);
-+
-+		if (/*!apic &&*/ !IO_APIC_IRQ(irq))
-+			continue;
-+
-+		if (IO_APIC_IRQ(irq)) {
-+			vector = assign_irq_vector(irq);
-+			entry.vector = vector;
-+			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-+		
-+			if (!apic && (irq < 16))
-+				disable_8259A_irq(irq);
-+		}
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+		io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+	}
-+	}
-+
-+	if (!first_notcon)
-+		apic_printk(APIC_VERBOSE, " not connected.\n");
-+}
-+
-+/*
-+ * Set up the 8259A-master output pin:
-+ */
-+#ifndef CONFIG_XEN
-+static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
-+
-+	memset(&entry,0,sizeof(entry));
-+
-+	disable_8259A_irq(0);
-+
-+	/* mask LVT0 */
-+	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+
-+	/*
-+	 * We use logical delivery to get the timer IRQ
-+	 * to the first CPU.
-+	 */
-+	entry.dest_mode = INT_DEST_MODE;
-+	entry.mask = 0;					/* unmask IRQ now */
-+	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+	entry.delivery_mode = INT_DELIVERY_MODE;
-+	entry.polarity = 0;
-+	entry.trigger = 0;
-+	entry.vector = vector;
-+
-+	/*
-+	 * The timer IRQ doesn't have to know that behind the
-+	 * scene we have a 8259A-master in AEOI mode ...
-+	 */
-+	irq_desc[0].handler = &ioapic_edge_type;
-+
-+	/*
-+	 * Add it to the IO-APIC irq-routing table:
-+	 */
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
-+	io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	enable_8259A_irq(0);
-+}
-+
-+static inline void UNEXPECTED_IO_APIC(void)
-+{
-+}
-+
-+void __init print_IO_APIC(void)
-+{
-+	int apic, i;
-+	union IO_APIC_reg_00 reg_00;
-+	union IO_APIC_reg_01 reg_01;
-+	union IO_APIC_reg_02 reg_02;
-+	union IO_APIC_reg_03 reg_03;
-+	unsigned long flags;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+ 	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
-+	for (i = 0; i < nr_ioapics; i++)
-+		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-+		       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
-+
-+	/*
-+	 * We are a bit conservative about what we expect.  We have to
-+	 * know about every hardware change ASAP.
-+	 */
-+	printk(KERN_INFO "testing the IO APIC.......................\n");
-+
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_00.raw = io_apic_read(apic, 0);
-+	reg_01.raw = io_apic_read(apic, 1);
-+	if (reg_01.bits.version >= 0x10)
-+		reg_02.raw = io_apic_read(apic, 2);
-+	if (reg_01.bits.version >= 0x20)
-+		reg_03.raw = io_apic_read(apic, 3);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
-+	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
-+	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
-+	printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
-+	printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);
-+	if (reg_00.bits.ID >= get_physical_broadcast())
-+		UNEXPECTED_IO_APIC();
-+	if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
-+		UNEXPECTED_IO_APIC();
-+
-+	printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
-+	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
-+	if (	(reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
-+		(reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
-+		(reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
-+		(reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
-+		(reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
-+		(reg_01.bits.entries != 0x2E) &&
-+		(reg_01.bits.entries != 0x3F)
-+	)
-+		UNEXPECTED_IO_APIC();
-+
-+	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
-+	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
-+	if (	(reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
-+		(reg_01.bits.version != 0x10) && /* oldest IO-APICs */
-+		(reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
-+		(reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
-+		(reg_01.bits.version != 0x20)    /* Intel P64H (82806 AA) */
-+	)
-+		UNEXPECTED_IO_APIC();
-+	if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
-+		UNEXPECTED_IO_APIC();
-+
-+	/*
-+	 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
-+	 * but the value of reg_02 is read as the previous read register
-+	 * value, so ignore it if reg_02 == reg_01.
-+	 */
-+	if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
-+		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
-+		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
-+		if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
-+			UNEXPECTED_IO_APIC();
-+	}
-+
-+	/*
-+	 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
-+	 * or reg_03, but the value of reg_0[23] is read as the previous read
-+	 * register value, so ignore it if reg_03 == reg_0[12].
-+	 */
-+	if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
-+	    reg_03.raw != reg_01.raw) {
-+		printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
-+		printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
-+		if (reg_03.bits.__reserved_1)
-+			UNEXPECTED_IO_APIC();
-+	}
-+
-+	printk(KERN_DEBUG ".... IRQ redirection table:\n");
-+
-+	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
-+			  " Stat Dest Deli Vect:   \n");
-+
-+	for (i = 0; i <= reg_01.bits.entries; i++) {
-+		struct IO_APIC_route_entry entry;
-+
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		*(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
-+		*(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+		printk(KERN_DEBUG " %02x %03X %02X  ",
-+			i,
-+			entry.dest.logical.logical_dest,
-+			entry.dest.physical.physical_dest
-+		);
-+
-+		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
-+			entry.mask,
-+			entry.trigger,
-+			entry.irr,
-+			entry.polarity,
-+			entry.delivery_status,
-+			entry.dest_mode,
-+			entry.delivery_mode,
-+			entry.vector
-+		);
-+	}
-+	}
-+	if (use_pci_vector())
-+		printk(KERN_INFO "Using vector-based indexing\n");
-+	printk(KERN_DEBUG "IRQ to pin mappings:\n");
-+	for (i = 0; i < NR_IRQS; i++) {
-+		struct irq_pin_list *entry = irq_2_pin + i;
-+		if (entry->pin < 0)
-+			continue;
-+ 		if (use_pci_vector() && !platform_legacy_irq(i))
-+			printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
-+		else
-+			printk(KERN_DEBUG "IRQ%d ", i);
-+		for (;;) {
-+			printk("-> %d:%d", entry->apic, entry->pin);
-+			if (!entry->next)
-+				break;
-+			entry = irq_2_pin + entry->next;
-+		}
-+		printk("\n");
-+	}
-+
-+	printk(KERN_INFO ".................................... done.\n");
-+
-+	return;
-+}
-+
-+static void print_APIC_bitfield (int base)
-+{
-+	unsigned int v;
-+	int i, j;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
-+	for (i = 0; i < 8; i++) {
-+		v = apic_read(base + i*0x10);
-+		for (j = 0; j < 32; j++) {
-+			if (v & (1<<j))
-+				printk("1");
-+			else
-+				printk("0");
-+		}
-+		printk("\n");
-+	}
-+}
-+
-+void /*__init*/ print_local_APIC(void * dummy)
-+{
-+	unsigned int v, ver, maxlvt;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-+		smp_processor_id(), hard_smp_processor_id());
-+	v = apic_read(APIC_ID);
-+	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
-+	v = apic_read(APIC_LVR);
-+	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-+	ver = GET_APIC_VERSION(v);
-+	maxlvt = get_maxlvt();
-+
-+	v = apic_read(APIC_TASKPRI);
-+	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-+
-+	if (APIC_INTEGRATED(ver)) {			/* !82489DX */
-+		v = apic_read(APIC_ARBPRI);
-+		printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-+			v & APIC_ARBPRI_MASK);
-+		v = apic_read(APIC_PROCPRI);
-+		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
-+	}
-+
-+	v = apic_read(APIC_EOI);
-+	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
-+	v = apic_read(APIC_RRR);
-+	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-+	v = apic_read(APIC_LDR);
-+	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-+	v = apic_read(APIC_DFR);
-+	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-+	v = apic_read(APIC_SPIV);
-+	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-+
-+	printk(KERN_DEBUG "... APIC ISR field:\n");
-+	print_APIC_bitfield(APIC_ISR);
-+	printk(KERN_DEBUG "... APIC TMR field:\n");
-+	print_APIC_bitfield(APIC_TMR);
-+	printk(KERN_DEBUG "... APIC IRR field:\n");
-+	print_APIC_bitfield(APIC_IRR);
-+
-+	if (APIC_INTEGRATED(ver)) {		/* !82489DX */
-+		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
-+			apic_write(APIC_ESR, 0);
-+		v = apic_read(APIC_ESR);
-+		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
-+	}
-+
-+	v = apic_read(APIC_ICR);
-+	printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
-+	v = apic_read(APIC_ICR2);
-+	printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
-+
-+	v = apic_read(APIC_LVTT);
-+	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-+
-+	if (maxlvt > 3) {                       /* PC is LVT#4. */
-+		v = apic_read(APIC_LVTPC);
-+		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-+	}
-+	v = apic_read(APIC_LVT0);
-+	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-+	v = apic_read(APIC_LVT1);
-+	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-+
-+	if (maxlvt > 2) {			/* ERR is LVT#3. */
-+		v = apic_read(APIC_LVTERR);
-+		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-+	}
-+
-+	v = apic_read(APIC_TMICT);
-+	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-+	v = apic_read(APIC_TMCCT);
-+	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-+	v = apic_read(APIC_TDCR);
-+	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-+	printk("\n");
-+}
-+
-+void print_all_local_APICs (void)
-+{
-+	on_each_cpu(print_local_APIC, NULL, 1, 1);
-+}
-+
-+void /*__init*/ print_PIC(void)
-+{
-+	extern spinlock_t i8259A_lock;
-+	unsigned int v;
-+	unsigned long flags;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+	printk(KERN_DEBUG "\nprinting PIC contents\n");
-+
-+	spin_lock_irqsave(&i8259A_lock, flags);
-+
-+	v = inb(0xa1) << 8 | inb(0x21);
-+	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
-+
-+	v = inb(0xa0) << 8 | inb(0x20);
-+	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
-+
-+	outb(0x0b,0xa0);
-+	outb(0x0b,0x20);
-+	v = inb(0xa0) << 8 | inb(0x20);
-+	outb(0x0a,0xa0);
-+	outb(0x0a,0x20);
-+
-+	spin_unlock_irqrestore(&i8259A_lock, flags);
-+
-+	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
-+
-+	v = inb(0x4d1) << 8 | inb(0x4d0);
-+	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-+}
-+#else
-+void __init print_IO_APIC(void) { }
-+#endif /* !CONFIG_XEN */
-+
-+static void __init enable_IO_APIC(void)
-+{
-+	union IO_APIC_reg_01 reg_01;
-+	int i;
-+	unsigned long flags;
-+
-+	for (i = 0; i < PIN_MAP_SIZE; i++) {
-+		irq_2_pin[i].pin = -1;
-+		irq_2_pin[i].next = 0;
-+	}
-+	if (!pirqs_enabled)
-+		for (i = 0; i < MAX_PIRQS; i++)
-+			pirq_entries[i] = -1;
-+
-+	/*
-+	 * The number of IO-APIC IRQ registers (== #pins):
-+	 */
-+	for (i = 0; i < nr_ioapics; i++) {
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_01.raw = io_apic_read(i, 1);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		nr_ioapic_registers[i] = reg_01.bits.entries+1;
-+	}
-+
-+	/*
-+	 * Do not trust the IO-APIC being empty at bootup
-+	 */
-+	clear_IO_APIC();
-+}
-+
-+/*
-+ * Not an __init, needed by the reboot code
-+ */
-+void disable_IO_APIC(void)
-+{
-+	/*
-+	 * Clear the IO-APIC before rebooting:
-+	 */
-+	clear_IO_APIC();
-+
-+#ifndef CONFIG_XEN
-+	disconnect_bsp_APIC();
-+#endif
-+}
-+
-+/*
-+ * function to set the IO-APIC physical IDs based on the
-+ * values stored in the MPC table.
-+ *
-+ * by Matt Domsch <Matt_Domsch at dell.com>  Tue Dec 21 12:25:05 CST 1999
-+ */
-+
-+#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
-+static void __init setup_ioapic_ids_from_mpc(void)
-+{
-+	union IO_APIC_reg_00 reg_00;
-+	physid_mask_t phys_id_present_map;
-+	int apic;
-+	int i;
-+	unsigned char old_id;
-+	unsigned long flags;
-+
-+	/*
-+	 * This is broken; anything with a real cpu count has to
-+	 * circumvent this idiocy regardless.
-+	 */
-+	phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
-+
-+	/*
-+	 * Set the IOAPIC ID to the value stored in the MPC table.
-+	 */
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+		/* Read the register 0 value */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_00.raw = io_apic_read(apic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		
-+		old_id = mp_ioapics[apic].mpc_apicid;
-+
-+		if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
-+			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
-+				apic, mp_ioapics[apic].mpc_apicid);
-+			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-+				reg_00.bits.ID);
-+			mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
-+		}
-+
-+		/* Don't check I/O APIC IDs for some xAPIC systems.  They have
-+		 * no meaning without the serial APIC bus. */
-+		if (NO_IOAPIC_CHECK)
-+			continue;
-+		/*
-+		 * Sanity check, is the ID really free? Every APIC in a
-+		 * system must have a unique ID or we get lots of nice
-+		 * 'stuck on smp_invalidate_needed IPI wait' messages.
-+		 */
-+		if (check_apicid_used(phys_id_present_map,
-+					mp_ioapics[apic].mpc_apicid)) {
-+			printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
-+				apic, mp_ioapics[apic].mpc_apicid);
-+			for (i = 0; i < get_physical_broadcast(); i++)
-+				if (!physid_isset(i, phys_id_present_map))
-+					break;
-+			if (i >= get_physical_broadcast())
-+				panic("Max APIC ID exceeded!\n");
-+			printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-+				i);
-+			physid_set(i, phys_id_present_map);
-+			mp_ioapics[apic].mpc_apicid = i;
-+		} else {
-+			physid_mask_t tmp;
-+			tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
-+			apic_printk(APIC_VERBOSE, "Setting %d in the "
-+					"phys_id_present_map\n",
-+					mp_ioapics[apic].mpc_apicid);
-+			physids_or(phys_id_present_map, phys_id_present_map, tmp);
-+		}
-+
-+
-+		/*
-+		 * We need to adjust the IRQ routing table
-+		 * if the ID changed.
-+		 */
-+		if (old_id != mp_ioapics[apic].mpc_apicid)
-+			for (i = 0; i < mp_irq_entries; i++)
-+				if (mp_irqs[i].mpc_dstapic == old_id)
-+					mp_irqs[i].mpc_dstapic
-+						= mp_ioapics[apic].mpc_apicid;
-+
-+		/*
-+		 * Read the right value from the MPC table and
-+		 * write it into the ID register.
-+	 	 */
-+		apic_printk(APIC_VERBOSE, KERN_INFO
-+			"...changing IO-APIC physical APIC ID to %d ...",
-+			mp_ioapics[apic].mpc_apicid);
-+
-+		reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(apic, 0, reg_00.raw);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+		/*
-+		 * Sanity check
-+		 */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_00.raw = io_apic_read(apic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
-+			printk("could not set ID!\n");
-+		else
-+			apic_printk(APIC_VERBOSE, " ok.\n");
-+	}
-+}
-+#else
-+static void __init setup_ioapic_ids_from_mpc(void) { }
-+#endif
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * There is a nasty bug in some older SMP boards, their mptable lies
-+ * about the timer IRQ. We do the following to work around the situation:
-+ *
-+ *	- timer IRQ defaults to IO-APIC IRQ
-+ *	- if this function detects that timer IRQs are defunct, then we fall
-+ *	  back to ISA timer IRQs
-+ */
-+static int __init timer_irq_works(void)
-+{
-+	unsigned long t1 = jiffies;
-+
-+	local_irq_enable();
-+	/* Let ten ticks pass... */
-+	mdelay((10 * 1000) / HZ);
-+
-+	/*
-+	 * Expect a few ticks at least, to be sure some possible
-+	 * glue logic does not lock up after one or two first
-+	 * ticks in a non-ExtINT mode.  Also the local APIC
-+	 * might have cached one ExtINT interrupt.  Finally, at
-+	 * least one tick may be lost due to delays.
-+	 */
-+	if (jiffies - t1 > 4)
-+		return 1;
-+
-+	return 0;
-+}
-+
-+/*
-+ * In the SMP+IOAPIC case it might happen that there are an unspecified
-+ * number of pending IRQ events unhandled. These cases are very rare,
-+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
-+ * better to do it this way as thus we do not have to be aware of
-+ * 'pending' interrupts in the IRQ path, except at this point.
-+ */
-+/*
-+ * Edge triggered needs to resend any interrupt
-+ * that was delayed but this is now handled in the device
-+ * independent code.
-+ */
-+
-+/*
-+ * Starting up a edge-triggered IO-APIC interrupt is
-+ * nasty - we need to make sure that we get the edge.
-+ * If it is already asserted for some reason, we need
-+ * return 1 to indicate that is was pending.
-+ *
-+ * This is not complete - we should be able to fake
-+ * an edge even if it isn't on the 8259A...
-+ */
-+static unsigned int startup_edge_ioapic_irq(unsigned int irq)
-+{
-+	int was_pending = 0;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	if (irq < 16) {
-+		disable_8259A_irq(irq);
-+		if (i8259A_irq_pending(irq))
-+			was_pending = 1;
-+	}
-+	__unmask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return was_pending;
-+}
-+
-+/*
-+ * Once we have recorded IRQ_PENDING already, we can mask the
-+ * interrupt for real. This prevents IRQ storms from unhandled
-+ * devices.
-+ */
-+static void ack_edge_ioapic_irq(unsigned int irq)
-+{
-+	move_irq(irq);
-+	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
-+					== (IRQ_PENDING | IRQ_DISABLED))
-+		mask_IO_APIC_irq(irq);
-+	ack_APIC_irq();
-+}
-+
-+/*
-+ * Level triggered interrupts can just be masked,
-+ * and shutting down and starting up the interrupt
-+ * is the same as enabling and disabling them -- except
-+ * with a startup need to return a "was pending" value.
-+ *
-+ * Level triggered interrupts are special because we
-+ * do not touch any IO-APIC register while handling
-+ * them. We ack the APIC in the end-IRQ handler, not
-+ * in the start-IRQ-handler. Protection against reentrance
-+ * from the same interrupt is still provided, both by the
-+ * generic IRQ layer and by the fact that an unacked local
-+ * APIC does not accept IRQs.
-+ */
-+static unsigned int startup_level_ioapic_irq (unsigned int irq)
-+{
-+	unmask_IO_APIC_irq(irq);
-+
-+	return 0; /* don't check for pending */
-+}
-+
-+static void end_level_ioapic_irq (unsigned int irq)
-+{
-+	unsigned long v;
-+	int i;
-+
-+	move_irq(irq);
-+/*
-+ * It appears there is an erratum which affects at least version 0x11
-+ * of I/O APIC (that's the 82093AA and cores integrated into various
-+ * chipsets).  Under certain conditions a level-triggered interrupt is
-+ * erroneously delivered as edge-triggered one but the respective IRR
-+ * bit gets set nevertheless.  As a result the I/O unit expects an EOI
-+ * message but it will never arrive and further interrupts are blocked
-+ * from the source.  The exact reason is so far unknown, but the
-+ * phenomenon was observed when two consecutive interrupt requests
-+ * from a given source get delivered to the same CPU and the source is
-+ * temporarily disabled in between.
-+ *
-+ * A workaround is to simulate an EOI message manually.  We achieve it
-+ * by setting the trigger mode to edge and then to level when the edge
-+ * trigger mode gets detected in the TMR of a local APIC for a
-+ * level-triggered interrupt.  We mask the source for the time of the
-+ * operation to prevent an edge-triggered interrupt escaping meanwhile.
-+ * The idea is from Manfred Spraul.  --macro
-+ */
-+	i = IO_APIC_VECTOR(irq);
-+
-+	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
-+
-+	ack_APIC_irq();
-+
-+	if (!(v & (1 << (i & 0x1f)))) {
-+		atomic_inc(&irq_mis_count);
-+		spin_lock(&ioapic_lock);
-+		__mask_and_edge_IO_APIC_irq(irq);
-+		__unmask_and_level_IO_APIC_irq(irq);
-+		spin_unlock(&ioapic_lock);
-+	}
-+}
-+
-+#ifdef CONFIG_PCI_MSI
-+static unsigned int startup_edge_ioapic_vector(unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	return startup_edge_ioapic_irq(irq);
-+}
-+
-+static void ack_edge_ioapic_vector(unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	ack_edge_ioapic_irq(irq);
-+}
-+
-+static unsigned int startup_level_ioapic_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	return startup_level_ioapic_irq (irq);
-+}
-+
-+static void end_level_ioapic_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	end_level_ioapic_irq(irq);
-+}
-+
-+static void mask_IO_APIC_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	mask_IO_APIC_irq(irq);
-+}
-+
-+static void unmask_IO_APIC_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	unmask_IO_APIC_irq(irq);
-+}
-+
-+static void set_ioapic_affinity_vector (unsigned int vector,
-+					cpumask_t cpu_mask)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	set_ioapic_affinity_irq(irq, cpu_mask);
-+}
-+#endif
-+
-+/*
-+ * Level and edge triggered IO-APIC interrupts need different handling,
-+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
-+ * handled with the level-triggered descriptor, but that one has slightly
-+ * more overhead. Level-triggered interrupts cannot be handled with the
-+ * edge-triggered handler, without risking IRQ storms and other ugly
-+ * races.
-+ */
-+static struct hw_interrupt_type ioapic_edge_type = {
-+	.typename 	= "IO-APIC-edge",
-+	.startup 	= startup_edge_ioapic,
-+	.shutdown 	= shutdown_edge_ioapic,
-+	.enable 	= enable_edge_ioapic,
-+	.disable 	= disable_edge_ioapic,
-+	.ack 		= ack_edge_ioapic,
-+	.end 		= end_edge_ioapic,
-+	.set_affinity 	= set_ioapic_affinity,
-+};
-+
-+static struct hw_interrupt_type ioapic_level_type = {
-+	.typename 	= "IO-APIC-level",
-+	.startup 	= startup_level_ioapic,
-+	.shutdown 	= shutdown_level_ioapic,
-+	.enable 	= enable_level_ioapic,
-+	.disable 	= disable_level_ioapic,
-+	.ack 		= mask_and_ack_level_ioapic,
-+	.end 		= end_level_ioapic,
-+	.set_affinity 	= set_ioapic_affinity,
-+};
-+#endif /* !CONFIG_XEN */
-+
-+static inline void init_IO_APIC_traps(void)
-+{
-+	int irq;
-+
-+	/*
-+	 * NOTE! The local APIC isn't very good at handling
-+	 * multiple interrupts at the same interrupt level.
-+	 * As the interrupt level is determined by taking the
-+	 * vector number and shifting that right by 4, we
-+	 * want to spread these out a bit so that they don't
-+	 * all fall in the same interrupt level.
-+	 *
-+	 * Also, we've got to be careful not to trash gate
-+	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
-+	 */
-+	for (irq = 0; irq < NR_IRQS ; irq++) {
-+		int tmp = irq;
-+		if (use_pci_vector()) {
-+			if (!platform_legacy_irq(tmp))
-+				if ((tmp = vector_to_irq(tmp)) == -1)
-+					continue;
-+		}
-+		if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
-+			/*
-+			 * Hmm.. We don't have an entry for this,
-+			 * so default to an old-fashioned 8259
-+			 * interrupt if we can..
-+			 */
-+			if (irq < 16)
-+				make_8259A_irq(irq);
-+#ifndef CONFIG_XEN
-+			else
-+				/* Strange. Oh, well.. */
-+				irq_desc[irq].handler = &no_irq_type;
-+#endif
-+		}
-+	}
-+}
-+
-+#ifndef CONFIG_XEN
-+static void enable_lapic_irq (unsigned int irq)
-+{
-+	unsigned long v;
-+
-+	v = apic_read(APIC_LVT0);
-+	apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
-+}
-+
-+static void disable_lapic_irq (unsigned int irq)
-+{
-+	unsigned long v;
-+
-+	v = apic_read(APIC_LVT0);
-+	apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
-+}
-+
-+static void ack_lapic_irq (unsigned int irq)
-+{
-+	ack_APIC_irq();
-+}
-+
-+static void end_lapic_irq (unsigned int i) { /* nothing */ }
-+
-+static struct hw_interrupt_type lapic_irq_type = {
-+	.typename 	= "local-APIC-edge",
-+	.startup 	= NULL, /* startup_irq() not used for IRQ0 */
-+	.shutdown 	= NULL, /* shutdown_irq() not used for IRQ0 */
-+	.enable 	= enable_lapic_irq,
-+	.disable 	= disable_lapic_irq,
-+	.ack 		= ack_lapic_irq,
-+	.end 		= end_lapic_irq
-+};
-+
-+static void setup_nmi (void)
-+{
-+	/*
-+ 	 * Dirty trick to enable the NMI watchdog ...
-+	 * We put the 8259A master into AEOI mode and
-+	 * unmask on all local APICs LVT0 as NMI.
-+	 *
-+	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
-+	 * is from Maciej W. Rozycki - so we do not have to EOI from
-+	 * the NMI handler or the timer interrupt.
-+	 */ 
-+	apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
-+
-+	on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
-+
-+	apic_printk(APIC_VERBOSE, " done.\n");
-+}
-+
-+/*
-+ * This looks a bit hackish but it's about the only one way of sending
-+ * a few INTA cycles to 8259As and any associated glue logic.  ICR does
-+ * not support the ExtINT mode, unfortunately.  We need to send these
-+ * cycles as some i82489DX-based boards have glue logic that keeps the
-+ * 8259A interrupt line asserted until INTA.  --macro
-+ */
-+static inline void unlock_ExtINT_logic(void)
-+{
-+	int pin, i;
-+	struct IO_APIC_route_entry entry0, entry1;
-+	unsigned char save_control, save_freq_select;
-+	unsigned long flags;
-+
-+	pin = find_isa_irq_pin(8, mp_INT);
-+	if (pin == -1)
-+		return;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	*(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin);
-+	*(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+	clear_IO_APIC_pin(0, pin);
-+
-+	memset(&entry1, 0, sizeof(entry1));
-+
-+	entry1.dest_mode = 0;			/* physical delivery */
-+	entry1.mask = 0;			/* unmask IRQ now */
-+	entry1.dest.physical.physical_dest = hard_smp_processor_id();
-+	entry1.delivery_mode = dest_ExtINT;
-+	entry1.polarity = entry0.polarity;
-+	entry1.trigger = 0;
-+	entry1.vector = 0;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
-+	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	save_control = CMOS_READ(RTC_CONTROL);
-+	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-+	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
-+		   RTC_FREQ_SELECT);
-+	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
-+
-+	i = 100;
-+	while (i-- > 0) {
-+		mdelay(10);
-+		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
-+			i -= 10;
-+	}
-+
-+	CMOS_WRITE(save_control, RTC_CONTROL);
-+	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+	clear_IO_APIC_pin(0, pin);
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
-+	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+/*
-+ * This code may look a bit paranoid, but it's supposed to cooperate with
-+ * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
-+ * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
-+ * fanatically on his truly buggy board.
-+ */
-+static inline void check_timer(void)
-+{
-+	int pin1, pin2;
-+	int vector;
-+
-+	/*
-+	 * get/set the timer IRQ vector:
-+	 */
-+	disable_8259A_irq(0);
-+	vector = assign_irq_vector(0);
-+	set_intr_gate(vector, interrupt[0]);
-+
-+	/*
-+	 * Subtle, code in do_timer_interrupt() expects an AEOI
-+	 * mode for the 8259A whenever interrupts are routed
-+	 * through I/O APICs.  Also IRQ0 has to be enabled in
-+	 * the 8259A which implies the virtual wire has to be
-+	 * disabled in the local APIC.
-+	 */
-+	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+	init_8259A(1);
-+	timer_ack = 1;
-+	enable_8259A_irq(0);
-+
-+	pin1 = find_isa_irq_pin(0, mp_INT);
-+	pin2 = find_isa_irq_pin(0, mp_ExtINT);
-+
-+	printk(KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2);
-+
-+	if (pin1 != -1) {
-+		/*
-+		 * Ok, does IRQ0 through the IOAPIC work?
-+		 */
-+		unmask_IO_APIC_irq(0);
-+		if (timer_irq_works()) {
-+			if (nmi_watchdog == NMI_IO_APIC) {
-+				disable_8259A_irq(0);
-+				setup_nmi();
-+				enable_8259A_irq(0);
-+			}
-+			return;
-+		}
-+		clear_IO_APIC_pin(0, pin1);
-+		printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
-+	}
-+
-+	printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
-+	if (pin2 != -1) {
-+		printk("\n..... (found pin %d) ...", pin2);
-+		/*
-+		 * legacy devices should be connected to IO APIC #0
-+		 */
-+		setup_ExtINT_IRQ0_pin(pin2, vector);
-+		if (timer_irq_works()) {
-+			printk("works.\n");
-+			if (pin1 != -1)
-+				replace_pin_at_irq(0, 0, pin1, 0, pin2);
-+			else
-+				add_pin_to_irq(0, 0, pin2);
-+			if (nmi_watchdog == NMI_IO_APIC) {
-+				setup_nmi();
-+			}
-+			return;
-+		}
-+		/*
-+		 * Cleanup, just in case ...
-+		 */
-+		clear_IO_APIC_pin(0, pin2);
-+	}
-+	printk(" failed.\n");
-+
-+	if (nmi_watchdog == NMI_IO_APIC) {
-+		printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
-+		nmi_watchdog = 0;
-+	}
-+
-+	printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
-+
-+	disable_8259A_irq(0);
-+	irq_desc[0].handler = &lapic_irq_type;
-+	apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
-+	enable_8259A_irq(0);
-+
-+	if (timer_irq_works()) {
-+		printk(" works.\n");
-+		return;
-+	}
-+	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
-+	printk(" failed.\n");
-+
-+	printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
-+
-+	timer_ack = 0;
-+	init_8259A(0);
-+	make_8259A_irq(0);
-+	apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
-+
-+	unlock_ExtINT_logic();
-+
-+	if (timer_irq_works()) {
-+		printk(" works.\n");
-+		return;
-+	}
-+	printk(" failed :(.\n");
-+	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
-+		"report.  Then try booting with the 'noapic' option");
-+}
-+#else
-+#define check_timer() ((void)0)
-+#endif
-+
-+/*
-+ *
-+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
-+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
-+ *   Linux doesn't really care, as it's not actually used
-+ *   for any interrupt handling anyway.
-+ */
-+#define PIC_IRQS	(1 << PIC_CASCADE_IR)
-+
-+void __init setup_IO_APIC(void)
-+{
-+	enable_IO_APIC();
-+
-+	if (acpi_ioapic)
-+		io_apic_irqs = ~0;	/* all IRQs go through IOAPIC */
-+	else
-+		io_apic_irqs = ~PIC_IRQS;
-+
-+	printk("ENABLING IO-APIC IRQs\n");
-+
-+	/*
-+	 * Set up IO-APIC IRQ routing.
-+	 */
-+	if (!acpi_ioapic)
-+		setup_ioapic_ids_from_mpc();
-+#ifndef CONFIG_XEN
-+	sync_Arb_IDs();
-+#endif
-+	setup_IO_APIC_irqs();
-+	init_IO_APIC_traps();
-+	check_timer();
-+	if (!acpi_ioapic)
-+		print_IO_APIC();
-+}
-+
-+/*
-+ *	Called after all the initialization is done. If we didnt find any
-+ *	APIC bugs then we can allow the modify fast path
-+ */
-+ 
-+static int __init io_apic_bug_finalize(void)
-+{
-+	if(sis_apic_bug == -1)
-+		sis_apic_bug = 0;
-+	return 0;
-+}
-+
-+late_initcall(io_apic_bug_finalize);
-+
-+struct sysfs_ioapic_data {
-+	struct sys_device dev;
-+	struct IO_APIC_route_entry entry[0];
-+};
-+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
-+
-+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+	struct IO_APIC_route_entry *entry;
-+	struct sysfs_ioapic_data *data;
-+	unsigned long flags;
-+	int i;
-+	
-+	data = container_of(dev, struct sysfs_ioapic_data, dev);
-+	entry = data->entry;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+		*(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
-+		*(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
-+	}
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return 0;
-+}
-+
-+static int ioapic_resume(struct sys_device *dev)
-+{
-+	struct IO_APIC_route_entry *entry;
-+	struct sysfs_ioapic_data *data;
-+	unsigned long flags;
-+	union IO_APIC_reg_00 reg_00;
-+	int i;
-+	
-+	data = container_of(dev, struct sysfs_ioapic_data, dev);
-+	entry = data->entry;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_00.raw = io_apic_read(dev->id, 0);
-+	if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
-+		reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
-+		io_apic_write(dev->id, 0, reg_00.raw);
-+	}
-+	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+		io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
-+		io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
-+	}
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return 0;
-+}
-+
-+static struct sysdev_class ioapic_sysdev_class = {
-+	set_kset_name("ioapic"),
-+	.suspend = ioapic_suspend,
-+	.resume = ioapic_resume,
-+};
-+
-+static int __init ioapic_init_sysfs(void)
-+{
-+	struct sys_device * dev;
-+	int i, size, error = 0;
-+
-+	error = sysdev_class_register(&ioapic_sysdev_class);
-+	if (error)
-+		return error;
-+
-+	for (i = 0; i < nr_ioapics; i++ ) {
-+		size = sizeof(struct sys_device) + nr_ioapic_registers[i] 
-+			* sizeof(struct IO_APIC_route_entry);
-+		mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
-+		if (!mp_ioapic_data[i]) {
-+			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+			continue;
-+		}
-+		memset(mp_ioapic_data[i], 0, size);
-+		dev = &mp_ioapic_data[i]->dev;
-+		dev->id = i; 
-+		dev->cls = &ioapic_sysdev_class;
-+		error = sysdev_register(dev);
-+		if (error) {
-+			kfree(mp_ioapic_data[i]);
-+			mp_ioapic_data[i] = NULL;
-+			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+			continue;
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+device_initcall(ioapic_init_sysfs);
-+
-+/* --------------------------------------------------------------------------
-+                          ACPI-based IOAPIC Configuration
-+   -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI_BOOT
-+
-+int __init io_apic_get_unique_id (int ioapic, int apic_id)
-+{
-+#ifndef CONFIG_XEN
-+	union IO_APIC_reg_00 reg_00;
-+	static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
-+	physid_mask_t tmp;
-+	unsigned long flags;
-+	int i = 0;
-+
-+	/*
-+	 * The P4 platform supports up to 256 APIC IDs on two separate APIC 
-+	 * buses (one for LAPICs, one for IOAPICs), where predecessors only 
-+	 * supports up to 16 on one shared APIC bus.
-+	 * 
-+	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
-+	 *      advantage of new APIC bus architecture.
-+	 */
-+
-+	if (physids_empty(apic_id_map))
-+		apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_00.raw = io_apic_read(ioapic, 0);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	if (apic_id >= get_physical_broadcast()) {
-+		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
-+			"%d\n", ioapic, apic_id, reg_00.bits.ID);
-+		apic_id = reg_00.bits.ID;
-+	}
-+
-+	/*
-+	 * Every APIC in a system must have a unique ID or we get lots of nice 
-+	 * 'stuck on smp_invalidate_needed IPI wait' messages.
-+	 */
-+	if (check_apicid_used(apic_id_map, apic_id)) {
-+
-+		for (i = 0; i < get_physical_broadcast(); i++) {
-+			if (!check_apicid_used(apic_id_map, i))
-+				break;
-+		}
-+
-+		if (i == get_physical_broadcast())
-+			panic("Max apic_id exceeded!\n");
-+
-+		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
-+			"trying %d\n", ioapic, apic_id, i);
-+
-+		apic_id = i;
-+	} 
-+
-+	tmp = apicid_to_cpu_present(apic_id);
-+	physids_or(apic_id_map, apic_id_map, tmp);
-+
-+	if (reg_00.bits.ID != apic_id) {
-+		reg_00.bits.ID = apic_id;
-+
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(ioapic, 0, reg_00.raw);
-+		reg_00.raw = io_apic_read(ioapic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+		/* Sanity check */
-+		if (reg_00.bits.ID != apic_id)
-+			panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic);
-+	}
-+
-+	apic_printk(APIC_VERBOSE, KERN_INFO
-+			"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
-+#endif /* !CONFIG_XEN */
-+
-+	return apic_id;
-+}
-+
-+
-+int __init io_apic_get_version (int ioapic)
-+{
-+	union IO_APIC_reg_01	reg_01;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_01.raw = io_apic_read(ioapic, 1);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return reg_01.bits.version;
-+}
-+
-+
-+int __init io_apic_get_redir_entries (int ioapic)
-+{
-+	union IO_APIC_reg_01	reg_01;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_01.raw = io_apic_read(ioapic, 1);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return reg_01.bits.entries;
-+}
-+
-+
-+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
-+
-+	if (!IO_APIC_IRQ(irq)) {
-+		printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
-+			ioapic);
-+		return -EINVAL;
-+	}
-+
-+	/*
-+	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
-+	 * Note that we mask (disable) IRQs now -- these get enabled when the
-+	 * corresponding device driver registers for this IRQ.
-+	 */
-+
-+	memset(&entry,0,sizeof(entry));
-+
-+	entry.delivery_mode = INT_DELIVERY_MODE;
-+	entry.dest_mode = INT_DEST_MODE;
-+	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+	entry.trigger = edge_level;
-+	entry.polarity = active_high_low;
-+	entry.mask  = 1;
-+
-+	/*
-+	 * IRQs < 16 are already in the irq_2_pin[] map
-+	 */
-+	if (irq >= 16)
-+		add_pin_to_irq(irq, ioapic, pin);
-+
-+	entry.vector = assign_irq_vector(irq);
-+
-+	apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
-+		"(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
-+		mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
-+		edge_level, active_high_low);
-+
-+	ioapic_register_intr(irq, entry.vector, edge_level);
-+
-+	if (!ioapic && (irq < 16))
-+		disable_8259A_irq(irq);
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
-+	io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return 0;
-+}
-+
-+#endif /*CONFIG_ACPI_BOOT*/
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/ioport.c linux-2.6.12-xen/arch/xen/i386/kernel/ioport.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/ioport.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/ioport.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,125 @@
-+/*
-+ *	linux/arch/i386/kernel/ioport.c
-+ *
-+ * This contains the io-permission bitmap code - written by obz, with changes
-+ * by Linus.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/ioport.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/thread_info.h>
-+#include <asm-xen/xen-public/physdev.h>
-+
-+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
-+static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
-+{
-+	unsigned long mask;
-+	unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
-+	unsigned int low_index = base & (BITS_PER_LONG-1);
-+	int length = low_index + extent;
-+
-+	if (low_index != 0) {
-+		mask = (~0UL << low_index);
-+		if (length < BITS_PER_LONG)
-+			mask &= ~(~0UL << length);
-+		if (new_value)
-+			*bitmap_base++ |= mask;
-+		else
-+			*bitmap_base++ &= ~mask;
-+		length -= BITS_PER_LONG;
-+	}
-+
-+	mask = (new_value ? ~0UL : 0UL);
-+	while (length >= BITS_PER_LONG) {
-+		*bitmap_base++ = mask;
-+		length -= BITS_PER_LONG;
-+	}
-+
-+	if (length > 0) {
-+		mask = ~(~0UL << length);
-+		if (new_value)
-+			*bitmap_base++ |= mask;
-+		else
-+			*bitmap_base++ &= ~mask;
-+	}
-+}
-+
-+
-+/*
-+ * this changes the io permissions bitmap in the current task.
-+ */
-+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-+{
-+	struct thread_struct * t = &current->thread;
-+	unsigned long *bitmap;
-+	physdev_op_t op;
-+
-+	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
-+		return -EINVAL;
-+	if (turn_on && !capable(CAP_SYS_RAWIO))
-+		return -EPERM;
-+
-+	/*
-+	 * If it's the first ioperm() call in this thread's lifetime, set the
-+	 * IO bitmap up. ioperm() is much less timing critical than clone(),
-+	 * this is why we delay this operation until now:
-+	 */
-+	if (!t->io_bitmap_ptr) {
-+		bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+		if (!bitmap)
-+			return -ENOMEM;
-+
-+		memset(bitmap, 0xff, IO_BITMAP_BYTES);
-+		t->io_bitmap_ptr = bitmap;
-+
-+		op.cmd = PHYSDEVOP_SET_IOBITMAP;
-+		op.u.set_iobitmap.bitmap   = (char *)bitmap;
-+		op.u.set_iobitmap.nr_ports = IO_BITMAP_BITS;
-+		HYPERVISOR_physdev_op(&op);
-+	}
-+
-+	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
-+
-+	return 0;
-+}
-+
-+/*
-+ * sys_iopl has to be used when you want to access the IO ports
-+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
-+ *
-+ * Here we just change the eflags value on the stack: we allow
-+ * only the super-user to do it. This depends on the stack-layout
-+ * on system-call entry - see also fork() and the signal handling
-+ * code.
-+ */
-+
-+asmlinkage long sys_iopl(unsigned int new_io_pl)
-+{
-+	unsigned int old_io_pl = current->thread.io_pl;
-+	physdev_op_t op;
-+
-+	if (new_io_pl > 3)
-+		return -EINVAL;
-+
-+	/* Need "raw I/O" privileges for direct port access. */
-+	if ((new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO))
-+		return -EPERM;
-+
-+	/* Change our version of the privilege levels. */
-+	current->thread.io_pl = new_io_pl;
-+
-+	/* Force the change at ring 0. */
-+	op.cmd             = PHYSDEVOP_SET_IOPL;
-+	op.u.set_iopl.iopl = (new_io_pl == 0) ? 1 : new_io_pl;
-+	HYPERVISOR_physdev_op(&op);
-+
-+	return 0;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/irq.c linux-2.6.12-xen/arch/xen/i386/kernel/irq.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/irq.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/irq.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,300 @@
-+/*
-+ *	linux/arch/i386/kernel/irq.c
-+ *
-+ *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
-+ *
-+ * This file contains the lowest level x86-specific interrupt
-+ * entry, irq-stacks and irq statistics code. All the remaining
-+ * irq logic is done by the generic kernel/irq/ code and
-+ * by the x86-specific irq controller code. (e.g. i8259.c and
-+ * io_apic.c.)
-+ */
-+
-+#include <asm/uaccess.h>
-+#include <linux/module.h>
-+#include <linux/seq_file.h>
-+#include <linux/interrupt.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/delay.h>
-+
-+DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp;
-+EXPORT_PER_CPU_SYMBOL(irq_stat);
-+
-+#ifndef CONFIG_X86_LOCAL_APIC
-+/*
-+ * 'what should we do if we get a hw irq event on an illegal vector'.
-+ * each architecture has to answer this themselves.
-+ */
-+void ack_bad_irq(unsigned int irq)
-+{
-+	printk("unexpected IRQ trap at vector %02x\n", irq);
-+}
-+#endif
-+
-+#ifdef CONFIG_4KSTACKS
-+/*
-+ * per-CPU IRQ handling contexts (thread information and stack)
-+ */
-+union irq_ctx {
-+	struct thread_info      tinfo;
-+	u32                     stack[THREAD_SIZE/sizeof(u32)];
-+};
-+
-+static union irq_ctx *hardirq_ctx[NR_CPUS];
-+static union irq_ctx *softirq_ctx[NR_CPUS];
-+#endif
-+
-+/*
-+ * do_IRQ handles all normal device IRQ's (the special
-+ * SMP cross-CPU interrupts have their own specific
-+ * handlers).
-+ */
-+fastcall unsigned int do_IRQ(struct pt_regs *regs)
-+{	
-+	/* high bits used in ret_from_ code */
-+	int irq = regs->orig_eax & __IRQ_MASK(HARDIRQ_BITS);
-+#ifdef CONFIG_4KSTACKS
-+	union irq_ctx *curctx, *irqctx;
-+	u32 *isp;
-+#endif
-+
-+	irq_enter();
-+#ifdef CONFIG_DEBUG_STACKOVERFLOW
-+	/* Debugging check for stack overflow: is there less than 1KB free? */
-+	{
-+		long esp;
-+
-+		__asm__ __volatile__("andl %%esp,%0" :
-+					"=r" (esp) : "0" (THREAD_SIZE - 1));
-+		if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
-+			printk("do_IRQ: stack overflow: %ld\n",
-+				esp - sizeof(struct thread_info));
-+			dump_stack();
-+		}
-+	}
-+#endif
-+
-+#ifdef CONFIG_4KSTACKS
-+
-+	curctx = (union irq_ctx *) current_thread_info();
-+	irqctx = hardirq_ctx[smp_processor_id()];
-+
-+	/*
-+	 * this is where we switch to the IRQ stack. However, if we are
-+	 * already using the IRQ stack (because we interrupted a hardirq
-+	 * handler) we can't do that and just have to keep using the
-+	 * current stack (which is the irq stack already after all)
-+	 */
-+	if (curctx != irqctx) {
-+		int arg1, arg2, ebx;
-+
-+		/* build the stack frame on the IRQ stack */
-+		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-+		irqctx->tinfo.task = curctx->tinfo.task;
-+		irqctx->tinfo.previous_esp = current_stack_pointer;
-+
-+		asm volatile(
-+			"       xchgl   %%ebx,%%esp      \n"
-+			"       call    __do_IRQ         \n"
-+			"       movl   %%ebx,%%esp      \n"
-+			: "=a" (arg1), "=d" (arg2), "=b" (ebx)
-+			:  "0" (irq),   "1" (regs),  "2" (isp)
-+			: "memory", "cc", "ecx"
-+		);
-+	} else
-+#endif
-+		__do_IRQ(irq, regs);
-+
-+	irq_exit();
-+
-+	return 1;
-+}
-+
-+#ifdef CONFIG_4KSTACKS
-+
-+/*
-+ * These should really be __section__(".bss.page_aligned") as well, but
-+ * gcc's 3.0 and earlier don't handle that correctly.
-+ */
-+static char softirq_stack[NR_CPUS * THREAD_SIZE]
-+		__attribute__((__aligned__(THREAD_SIZE)));
-+
-+static char hardirq_stack[NR_CPUS * THREAD_SIZE]
-+		__attribute__((__aligned__(THREAD_SIZE)));
-+
-+/*
-+ * allocate per-cpu stacks for hardirq and for softirq processing
-+ */
-+void irq_ctx_init(int cpu)
-+{
-+	union irq_ctx *irqctx;
-+
-+	if (hardirq_ctx[cpu])
-+		return;
-+
-+	irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
-+	irqctx->tinfo.task              = NULL;
-+	irqctx->tinfo.exec_domain       = NULL;
-+	irqctx->tinfo.cpu               = cpu;
-+	irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
-+	irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
-+
-+	hardirq_ctx[cpu] = irqctx;
-+
-+	irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
-+	irqctx->tinfo.task              = NULL;
-+	irqctx->tinfo.exec_domain       = NULL;
-+	irqctx->tinfo.cpu               = cpu;
-+	irqctx->tinfo.preempt_count     = SOFTIRQ_OFFSET;
-+	irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
-+
-+	softirq_ctx[cpu] = irqctx;
-+
-+	printk("CPU %u irqstacks, hard=%p soft=%p\n",
-+		cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
-+}
-+
-+extern asmlinkage void __do_softirq(void);
-+
-+asmlinkage void do_softirq(void)
-+{
-+	unsigned long flags;
-+	struct thread_info *curctx;
-+	union irq_ctx *irqctx;
-+	u32 *isp;
-+
-+	if (in_interrupt())
-+		return;
-+
-+	local_irq_save(flags);
-+
-+	if (local_softirq_pending()) {
-+		curctx = current_thread_info();
-+		irqctx = softirq_ctx[smp_processor_id()];
-+		irqctx->tinfo.task = curctx->task;
-+		irqctx->tinfo.previous_esp = current_stack_pointer;
-+
-+		/* build the stack frame on the softirq stack */
-+		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-+
-+		asm volatile(
-+			"       xchgl   %%ebx,%%esp     \n"
-+			"       call    __do_softirq    \n"
-+			"       movl    %%ebx,%%esp     \n"
-+			: "=b"(isp)
-+			: "0"(isp)
-+			: "memory", "cc", "edx", "ecx", "eax"
-+		);
-+	}
-+
-+	local_irq_restore(flags);
-+}
-+
-+EXPORT_SYMBOL(do_softirq);
-+#endif
-+
-+/*
-+ * Interrupt statistics:
-+ */
-+
-+atomic_t irq_err_count;
-+
-+/*
-+ * /proc/interrupts printing:
-+ */
-+
-+int show_interrupts(struct seq_file *p, void *v)
-+{
-+	int i = *(loff_t *) v, j;
-+	struct irqaction * action;
-+	unsigned long flags;
-+
-+	if (i == 0) {
-+		seq_printf(p, "           ");
-+		for_each_cpu(j)
-+			seq_printf(p, "CPU%d       ",j);
-+		seq_putc(p, '\n');
-+	}
-+
-+	if (i < NR_IRQS) {
-+		spin_lock_irqsave(&irq_desc[i].lock, flags);
-+		action = irq_desc[i].action;
-+		if (!action)
-+			goto skip;
-+		seq_printf(p, "%3d: ",i);
-+#ifndef CONFIG_SMP
-+		seq_printf(p, "%10u ", kstat_irqs(i));
-+#else
-+		for_each_cpu(j)
-+			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-+#endif
-+		seq_printf(p, " %14s", irq_desc[i].handler->typename);
-+		seq_printf(p, "  %s", action->name);
-+
-+		for (action=action->next; action; action = action->next)
-+			seq_printf(p, ", %s", action->name);
-+
-+		seq_putc(p, '\n');
-+skip:
-+		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-+	} else if (i == NR_IRQS) {
-+		seq_printf(p, "NMI: ");
-+		for_each_cpu(j)
-+ 			seq_printf(p, "%10u ", nmi_count(j));
-+		seq_putc(p, '\n');
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		seq_printf(p, "LOC: ");
-+		for_each_cpu(j)
-+			seq_printf(p, "%10u ", per_cpu(irq_stat,j).apic_timer_irqs);
-+		seq_putc(p, '\n');
-+#endif
-+		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-+#if defined(CONFIG_X86_IO_APIC)
-+		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-+#endif
-+	}
-+	return 0;
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+void fixup_irqs(cpumask_t map)
-+{
-+	unsigned int irq;
-+	static int warned;
-+
-+	for (irq = 0; irq < NR_IRQS; irq++) {
-+		cpumask_t mask;
-+		if (irq == 2)
-+			continue;
-+
-+		cpus_and(mask, irq_affinity[irq], map);
-+		if (any_online_cpu(mask) == NR_CPUS) {
-+			/*printk("Breaking affinity for irq %i\n", irq);*/
-+			mask = map;
-+		}
-+		if (irq_desc[irq].handler->set_affinity)
-+			irq_desc[irq].handler->set_affinity(irq, mask);
-+		else if (irq_desc[irq].action && !(warned++))
-+			printk("Cannot set affinity for irq %i\n", irq);
-+	}
-+
-+#if 0
-+	barrier();
-+	/* Ingo Molnar says: "after the IO-APIC masks have been redirected
-+	   [note the nop - the interrupt-enable boundary on x86 is two
-+	   instructions from sti] - to flush out pending hardirqs and
-+	   IPIs. After this point nothing is supposed to reach this CPU." */
-+	__asm__ __volatile__("sti; nop; cli");
-+	barrier();
-+#else
-+	/* That doesn't seem sufficient.  Give it 1ms. */
-+	local_irq_enable();
-+	mdelay(1);
-+	local_irq_disable();
-+#endif
-+}
-+#endif
-+
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/ldt.c linux-2.6.12-xen/arch/xen/i386/kernel/ldt.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/ldt.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/ldt.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,268 @@
-+/*
-+ * linux/kernel/ldt.c
-+ *
-+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-+ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/slab.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/ldt.h>
-+#include <asm/desc.h>
-+
-+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-+static void flush_ldt(void *null)
-+{
-+	if (current->active_mm)
-+		load_LDT(&current->active_mm->context);
-+}
-+#endif
-+
-+static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
-+{
-+	void *oldldt;
-+	void *newldt;
-+	int oldsize;
-+
-+	if (mincount <= pc->size)
-+		return 0;
-+	oldsize = pc->size;
-+	mincount = (mincount+511)&(~511);
-+	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
-+		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
-+	else
-+		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
-+
-+	if (!newldt)
-+		return -ENOMEM;
-+
-+	if (oldsize)
-+		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
-+	oldldt = pc->ldt;
-+	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
-+	pc->ldt = newldt;
-+	wmb();
-+	pc->size = mincount;
-+	wmb();
-+
-+	if (reload) {
-+#ifdef CONFIG_SMP
-+		cpumask_t mask;
-+		preempt_disable();
-+#endif
-+		make_pages_readonly(
-+			pc->ldt,
-+			(pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		load_LDT(pc);
-+#ifdef CONFIG_SMP
-+		mask = cpumask_of_cpu(smp_processor_id());
-+		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-+			smp_call_function(flush_ldt, NULL, 1, 1);
-+		preempt_enable();
-+#endif
-+	}
-+	if (oldsize) {
-+		make_pages_writable(
-+			oldldt,
-+			(oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
-+			vfree(oldldt);
-+		else
-+			kfree(oldldt);
-+	}
-+	return 0;
-+}
-+
-+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
-+{
-+	int err = alloc_ldt(new, old->size, 0);
-+	if (err < 0)
-+		return err;
-+	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-+	make_pages_readonly(
-+		new->ldt,
-+		(new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+		XENFEAT_writable_descriptor_tables);
-+	return 0;
-+}
-+
-+/*
-+ * we do not have to muck with descriptors here, that is
-+ * done in switch_mm() as needed.
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-+{
-+	struct mm_struct * old_mm;
-+	int retval = 0;
-+
-+	init_MUTEX(&mm->context.sem);
-+	mm->context.size = 0;
-+	old_mm = current->mm;
-+	if (old_mm && old_mm->context.size > 0) {
-+		down(&old_mm->context.sem);
-+		retval = copy_ldt(&mm->context, &old_mm->context);
-+		up(&old_mm->context.sem);
-+	}
-+	return retval;
-+}
-+
-+/*
-+ * No need to lock the MM as we are the last user
-+ */
-+void destroy_context(struct mm_struct *mm)
-+{
-+	if (mm->context.size) {
-+		if (mm == current->active_mm)
-+			clear_LDT();
-+		make_pages_writable(
-+			mm->context.ldt,
-+			(mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
-+			vfree(mm->context.ldt);
-+		else
-+			kfree(mm->context.ldt);
-+		mm->context.size = 0;
-+	}
-+}
-+
-+static int read_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+	int err;
-+	unsigned long size;
-+	struct mm_struct * mm = current->mm;
-+
-+	if (!mm->context.size)
-+		return 0;
-+	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
-+		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
-+
-+	down(&mm->context.sem);
-+	size = mm->context.size*LDT_ENTRY_SIZE;
-+	if (size > bytecount)
-+		size = bytecount;
-+
-+	err = 0;
-+	if (copy_to_user(ptr, mm->context.ldt, size))
-+		err = -EFAULT;
-+	up(&mm->context.sem);
-+	if (err < 0)
-+		goto error_return;
-+	if (size != bytecount) {
-+		/* zero-fill the rest */
-+		if (clear_user(ptr+size, bytecount-size) != 0) {
-+			err = -EFAULT;
-+			goto error_return;
-+		}
-+	}
-+	return bytecount;
-+error_return:
-+	return err;
-+}
-+
-+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+	int err;
-+	unsigned long size;
-+	void *address;
-+
-+	err = 0;
-+	address = &default_ldt[0];
-+	size = 5*sizeof(struct desc_struct);
-+	if (size > bytecount)
-+		size = bytecount;
-+
-+	err = size;
-+	if (copy_to_user(ptr, address, size))
-+		err = -EFAULT;
-+
-+	return err;
-+}
-+
-+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
-+{
-+	struct mm_struct * mm = current->mm;
-+	__u32 entry_1, entry_2;
-+	int error;
-+	struct user_desc ldt_info;
-+
-+	error = -EINVAL;
-+	if (bytecount != sizeof(ldt_info))
-+		goto out;
-+	error = -EFAULT; 	
-+	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
-+		goto out;
-+
-+	error = -EINVAL;
-+	if (ldt_info.entry_number >= LDT_ENTRIES)
-+		goto out;
-+	if (ldt_info.contents == 3) {
-+		if (oldmode)
-+			goto out;
-+		if (ldt_info.seg_not_present == 0)
-+			goto out;
-+	}
-+
-+	down(&mm->context.sem);
-+	if (ldt_info.entry_number >= mm->context.size) {
-+		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
-+		if (error < 0)
-+			goto out_unlock;
-+	}
-+
-+   	/* Allow LDTs to be cleared by the user. */
-+   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-+		if (oldmode || LDT_empty(&ldt_info)) {
-+			entry_1 = 0;
-+			entry_2 = 0;
-+			goto install;
-+		}
-+	}
-+
-+	entry_1 = LDT_entry_a(&ldt_info);
-+	entry_2 = LDT_entry_b(&ldt_info);
-+	if (oldmode)
-+		entry_2 &= ~(1 << 20);
-+
-+	/* Install the new entry ...  */
-+install:
-+	error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
-+				entry_1, entry_2);
-+
-+out_unlock:
-+	up(&mm->context.sem);
-+out:
-+	return error;
-+}
-+
-+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
-+{
-+	int ret = -ENOSYS;
-+
-+	switch (func) {
-+	case 0:
-+		ret = read_ldt(ptr, bytecount);
-+		break;
-+	case 1:
-+		ret = write_ldt(ptr, bytecount, 1);
-+		break;
-+	case 2:
-+		ret = read_default_ldt(ptr, bytecount);
-+		break;
-+	case 0x11:
-+		ret = write_ldt(ptr, bytecount, 0);
-+		break;
-+	}
-+	return ret;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/Makefile linux-2.6.12-xen/arch/xen/i386/kernel/Makefile
---- pristine-linux-2.6.12/arch/xen/i386/kernel/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,100 @@
-+#
-+# Makefile for the linux kernel.
-+#
-+
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+CFLAGS	+= -Iarch/$(XENARCH)/kernel
-+AFLAGS	+= -Iarch/$(XENARCH)/kernel
-+
-+extra-y := head.o init_task.o
-+
-+obj-y	:= process.o signal.o entry.o traps.o \
-+		time.o ioport.o ldt.o setup.o \
-+		pci-dma.o i386_ksyms.o irq.o quirks.o fixup.o
-+
-+c-obj-y	:= semaphore.o vm86.o \
-+		ptrace.o sys_i386.o \
-+		i387.o dmi_scan.o bootflag.o
-+s-obj-y	:=
-+
-+obj-y				+= cpu/
-+#obj-y				+= timers/
-+obj-$(CONFIG_ACPI_BOOT)		+= acpi/
-+#c-obj-$(CONFIG_X86_BIOS_REBOOT)	+= reboot.o
-+c-obj-$(CONFIG_MCA)		+= mca.o
-+c-obj-$(CONFIG_X86_MSR)		+= msr.o
-+c-obj-$(CONFIG_X86_CPUID)	+= cpuid.o
-+obj-$(CONFIG_MICROCODE)		+= microcode.o
-+c-obj-$(CONFIG_APM)		+= apm.o
-+obj-$(CONFIG_X86_SMP)		+= smp.o
-+#obj-$(CONFIG_X86_TRAMPOLINE)	+= trampoline.o
-+obj-$(CONFIG_X86_MPPARSE)	+= mpparse.o
-+obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o
-+c-obj-$(CONFIG_X86_LOCAL_APIC)	+= nmi.o
-+obj-$(CONFIG_X86_IO_APIC)	+= io_apic.o
-+c-obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups.o
-+c-obj-$(CONFIG_X86_NUMAQ)	+= numaq.o
-+c-obj-$(CONFIG_X86_SUMMIT_NUMA)	+= summit.o
-+c-obj-$(CONFIG_MODULES)		+= module.o
-+c-obj-y				+= sysenter.o
-+obj-y				+= vsyscall.o
-+c-obj-$(CONFIG_ACPI_SRAT) 	+= srat.o
-+c-obj-$(CONFIG_HPET_TIMER) 	+= time_hpet.o
-+c-obj-$(CONFIG_EFI) 		+= efi.o efi_stub.o
-+c-obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
-+c-obj-$(CONFIG_SMP_ALTERNATIVES)+= smpalts.o
-+obj-$(CONFIG_SWIOTLB)		+= swiotlb.o
-+
-+EXTRA_AFLAGS   := -traditional
-+
-+c-obj-$(CONFIG_SCx200)		+= scx200.o
-+
-+# vsyscall.o contains the vsyscall DSO images as __initdata.
-+# We must build both images before we can assemble it.
-+# Note: kbuild does not track this dependency due to usage of .incbin
-+$(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
-+targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
-+targets += vsyscall-note.o vsyscall.lds
-+
-+# The DSO images are built using a special linker script.
-+quiet_cmd_syscall = SYSCALL $@
-+      cmd_syscall = $(CC) -m elf_i386 -nostdlib $(SYSCFLAGS_$(@F)) \
-+		          -Wl,-T,$(filter-out FORCE,$^) -o $@
-+
-+export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH)
-+
-+vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
-+SYSCFLAGS_vsyscall-sysenter.so	= $(vsyscall-flags)
-+SYSCFLAGS_vsyscall-int80.so	= $(vsyscall-flags)
-+
-+$(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
-+$(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
-+		      $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
-+	$(call if_changed,syscall)
-+
-+# We also create a special relocatable object that should mirror the symbol
-+# table and layout of the linked DSO.  With ld -R we can then refer to
-+# these symbols in the kernel code rather than hand-coded addresses.
-+extra-y += vsyscall-syms.o
-+$(obj)/built-in.o: $(obj)/vsyscall-syms.o
-+$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
-+
-+SYSCFLAGS_vsyscall-syms.o = -r
-+$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
-+			$(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
-+	$(call if_changed,syscall)
-+
-+c-link	:=
-+s-link	:= vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o vsyscall.lds.o
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-obj-m) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
-+	@ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
-+
-+$(obj)/vsyscall-int80.S: $(obj)/vsyscall-sigreturn.S
-+
-+obj-y	+= $(c-obj-y) $(s-obj-y)
-+obj-m	+= $(c-obj-m)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-m) $(c-obj-) $(c-link))
-+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/microcode.c linux-2.6.12-xen/arch/xen/i386/kernel/microcode.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/microcode.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/microcode.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,163 @@
-+/*
-+ *	Intel CPU Microcode Update Driver for Linux
-+ *
-+ *	Copyright (C) 2000-2004 Tigran Aivazian
-+ *
-+ *	This driver allows to upgrade microcode on Intel processors
-+ *	belonging to IA-32 family - PentiumPro, Pentium II, 
-+ *	Pentium III, Xeon, Pentium 4, etc.
-+ *
-+ *	Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual, 
-+ *	Order Number 245472 or free download from:
-+ *		
-+ *	http://developer.intel.com/design/pentium4/manuals/245472.htm
-+ *
-+ *	For more information, go to http://www.urbanmyth.org/microcode
-+ *
-+ *	This program is free software; you can redistribute it and/or
-+ *	modify it under the terms of the GNU General Public License
-+ *	as published by the Free Software Foundation; either version
-+ *	2 of the License, or (at your option) any later version.
-+ */
-+
-+//#define DEBUG /* pr_debug */
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/sched.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/miscdevice.h>
-+#include <linux/spinlock.h>
-+#include <linux/mm.h>
-+#include <linux/syscalls.h>
-+
-+#include <asm/msr.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+
-+MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
-+MODULE_AUTHOR("Tigran Aivazian <tigran at veritas.com>");
-+MODULE_LICENSE("GPL");
-+
-+#define MICROCODE_VERSION 	"1.14-xen"
-+
-+#define DEFAULT_UCODE_DATASIZE 	(2000) 	  /* 2000 bytes */
-+#define MC_HEADER_SIZE		(sizeof (microcode_header_t))  	  /* 48 bytes */
-+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
-+
-+/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
-+static DECLARE_MUTEX(microcode_sem);
-+
-+static void __user *user_buffer;	/* user area microcode data buffer */
-+static unsigned int user_buffer_size;	/* it's size */
-+				
-+static int microcode_open (struct inode *unused1, struct file *unused2)
-+{
-+	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-+}
-+
-+
-+static int do_microcode_update (void)
-+{
-+	int err;
-+	dom0_op_t op;
-+
-+	err = sys_mlock((unsigned long)user_buffer, user_buffer_size);
-+	if (err != 0)
-+		return err;
-+
-+	op.cmd = DOM0_MICROCODE;
-+	op.u.microcode.data = user_buffer;
-+	op.u.microcode.length = user_buffer_size;
-+	err = HYPERVISOR_dom0_op(&op);
-+
-+	(void)sys_munlock((unsigned long)user_buffer, user_buffer_size);
-+
-+	return err;
-+}
-+
-+static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
-+{
-+	ssize_t ret;
-+
-+	if (len < DEFAULT_UCODE_TOTALSIZE) {
-+		printk(KERN_ERR "microcode: not enough data\n"); 
-+		return -EINVAL;
-+	}
-+
-+	if ((len >> PAGE_SHIFT) > num_physpages) {
-+		printk(KERN_ERR "microcode: too much data (max %ld pages)\n", num_physpages);
-+		return -EINVAL;
-+	}
-+
-+	down(&microcode_sem);
-+
-+	user_buffer = (void __user *) buf;
-+	user_buffer_size = (int) len;
-+
-+	ret = do_microcode_update();
-+	if (!ret)
-+		ret = (ssize_t)len;
-+
-+	up(&microcode_sem);
-+
-+	return ret;
-+}
-+
-+static int microcode_ioctl (struct inode *inode, struct file *file, 
-+		unsigned int cmd, unsigned long arg)
-+{
-+	switch (cmd) {
-+		/* 
-+		 *  XXX: will be removed after microcode_ctl 
-+		 *  is updated to ignore failure of this ioctl()
-+		 */
-+		case MICROCODE_IOCFREE:
-+			return 0;
-+		default:
-+			return -EINVAL;
-+	}
-+	return -EINVAL;
-+}
-+
-+static struct file_operations microcode_fops = {
-+	.owner		= THIS_MODULE,
-+	.write		= microcode_write,
-+	.ioctl		= microcode_ioctl,
-+	.open		= microcode_open,
-+};
-+
-+static struct miscdevice microcode_dev = {
-+	.minor		= MICROCODE_MINOR,
-+	.name		= "microcode",
-+	.devfs_name	= "cpu/microcode",
-+	.fops		= &microcode_fops,
-+};
-+
-+static int __init microcode_init (void)
-+{
-+	int error;
-+
-+	error = misc_register(&microcode_dev);
-+	if (error) {
-+		printk(KERN_ERR
-+			"microcode: can't misc_register on minor=%d\n",
-+			MICROCODE_MINOR);
-+		return error;
-+	}
-+
-+	printk(KERN_INFO 
-+		"IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran at veritas.com>\n");
-+	return 0;
-+}
-+
-+static void __exit microcode_exit (void)
-+{
-+	misc_deregister(&microcode_dev);
-+	printk(KERN_INFO "IA-32 Microcode Update Driver v" MICROCODE_VERSION " unregistered\n");
-+}
-+
-+module_init(microcode_init)
-+module_exit(microcode_exit)
-+MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/mpparse.c linux-2.6.12-xen/arch/xen/i386/kernel/mpparse.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/mpparse.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/mpparse.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1126 @@
-+/*
-+ *	Intel Multiprocessor Specification 1.1 and 1.4
-+ *	compliant MP-table parsing routines.
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *		Erich Boleyn	:	MP v1.4 and additional changes.
-+ *		Alan Cox	:	Added EBDA scanning
-+ *		Ingo Molnar	:	various cleanups and rewrites
-+ *		Maciej W. Rozycki:	Bits for default MP configurations
-+ *		Paul Diefenbaugh:	Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/irq.h>
-+#include <linux/init.h>
-+#include <linux/acpi.h>
-+#include <linux/delay.h>
-+#include <linux/config.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/bitops.h>
-+
-+#include <asm/smp.h>
-+#include <asm/acpi.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/io_apic.h>
-+
-+#include <mach_apic.h>
-+#include <mach_mpparse.h>
-+#include <bios_ebda.h>
-+
-+/* Have we found an MP table */
-+int smp_found_config;
-+unsigned int __initdata maxcpus = NR_CPUS;
-+
-+/*
-+ * Various Linux-internal data structures created from the
-+ * MP-table.
-+ */
-+int apic_version [MAX_APICS];
-+int mp_bus_id_to_type [MAX_MP_BUSSES];
-+int mp_bus_id_to_node [MAX_MP_BUSSES];
-+int mp_bus_id_to_local [MAX_MP_BUSSES];
-+int quad_local_to_mp_bus_id [NR_CPUS/4][4];
-+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+static int mp_current_pci_id;
-+
-+/* I/O APIC entries */
-+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-+
-+/* # of MP IRQ source entries */
-+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-+
-+/* MP IRQ source entries */
-+int mp_irq_entries;
-+
-+int nr_ioapics;
-+
-+int pic_mode;
-+unsigned long mp_lapic_addr;
-+
-+/* Processor that is doing the boot up */
-+unsigned int boot_cpu_physical_apicid = -1U;
-+unsigned int boot_cpu_logical_apicid = -1U;
-+/* Internal processor count */
-+static unsigned int __initdata num_processors;
-+
-+/* Bitmask of physically existing CPUs */
-+physid_mask_t phys_cpu_present_map;
-+
-+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+/*
-+ * Intel MP BIOS table parsing routines:
-+ */
-+
-+
-+/*
-+ * Checksum an MP configuration block.
-+ */
-+
-+static int __init mpf_checksum(unsigned char *mp, int len)
-+{
-+	int sum = 0;
-+
-+	while (len--)
-+		sum += *mp++;
-+
-+	return sum & 0xFF;
-+}
-+
-+/*
-+ * Have to match translation table entries to main table entries by counter
-+ * hence the mpc_record variable .... can't see a less disgusting way of
-+ * doing this ....
-+ */
-+
-+static int mpc_record; 
-+static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
-+
-+#ifdef CONFIG_X86_NUMAQ
-+static int MP_valid_apicid(int apicid, int version)
-+{
-+	return hweight_long(apicid & 0xf) == 1 && (apicid >> 4) != 0xf;
-+}
-+#elif !defined(CONFIG_XEN)
-+static int MP_valid_apicid(int apicid, int version)
-+{
-+	if (version >= 0x14)
-+		return apicid < 0xff;
-+	else
-+		return apicid < 0xf;
-+}
-+#endif
-+
-+#ifndef CONFIG_XEN
-+static void __init MP_processor_info (struct mpc_config_processor *m)
-+{
-+ 	int ver, apicid;
-+	physid_mask_t tmp;
-+ 	
-+	if (!(m->mpc_cpuflag & CPU_ENABLED))
-+		return;
-+
-+	apicid = mpc_apic_id(m, translation_table[mpc_record]);
-+
-+	if (m->mpc_featureflag&(1<<0))
-+		Dprintk("    Floating point unit present.\n");
-+	if (m->mpc_featureflag&(1<<7))
-+		Dprintk("    Machine Exception supported.\n");
-+	if (m->mpc_featureflag&(1<<8))
-+		Dprintk("    64 bit compare & exchange supported.\n");
-+	if (m->mpc_featureflag&(1<<9))
-+		Dprintk("    Internal APIC present.\n");
-+	if (m->mpc_featureflag&(1<<11))
-+		Dprintk("    SEP present.\n");
-+	if (m->mpc_featureflag&(1<<12))
-+		Dprintk("    MTRR  present.\n");
-+	if (m->mpc_featureflag&(1<<13))
-+		Dprintk("    PGE  present.\n");
-+	if (m->mpc_featureflag&(1<<14))
-+		Dprintk("    MCA  present.\n");
-+	if (m->mpc_featureflag&(1<<15))
-+		Dprintk("    CMOV  present.\n");
-+	if (m->mpc_featureflag&(1<<16))
-+		Dprintk("    PAT  present.\n");
-+	if (m->mpc_featureflag&(1<<17))
-+		Dprintk("    PSE  present.\n");
-+	if (m->mpc_featureflag&(1<<18))
-+		Dprintk("    PSN  present.\n");
-+	if (m->mpc_featureflag&(1<<19))
-+		Dprintk("    Cache Line Flush Instruction present.\n");
-+	/* 20 Reserved */
-+	if (m->mpc_featureflag&(1<<21))
-+		Dprintk("    Debug Trace and EMON Store present.\n");
-+	if (m->mpc_featureflag&(1<<22))
-+		Dprintk("    ACPI Thermal Throttle Registers  present.\n");
-+	if (m->mpc_featureflag&(1<<23))
-+		Dprintk("    MMX  present.\n");
-+	if (m->mpc_featureflag&(1<<24))
-+		Dprintk("    FXSR  present.\n");
-+	if (m->mpc_featureflag&(1<<25))
-+		Dprintk("    XMM  present.\n");
-+	if (m->mpc_featureflag&(1<<26))
-+		Dprintk("    Willamette New Instructions  present.\n");
-+	if (m->mpc_featureflag&(1<<27))
-+		Dprintk("    Self Snoop  present.\n");
-+	if (m->mpc_featureflag&(1<<28))
-+		Dprintk("    HT  present.\n");
-+	if (m->mpc_featureflag&(1<<29))
-+		Dprintk("    Thermal Monitor present.\n");
-+	/* 30, 31 Reserved */
-+
-+
-+	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+		Dprintk("    Bootup CPU\n");
-+		boot_cpu_physical_apicid = m->mpc_apicid;
-+		boot_cpu_logical_apicid = apicid;
-+	}
-+
-+	if (num_processors >= NR_CPUS) {
-+		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-+			"  Processor ignored.\n", NR_CPUS); 
-+		return;
-+	}
-+
-+	if (num_processors >= maxcpus) {
-+		printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
-+			" Processor ignored.\n", maxcpus); 
-+		return;
-+	}
-+	num_processors++;
-+	ver = m->mpc_apicver;
-+
-+	if (!MP_valid_apicid(apicid, ver)) {
-+		printk(KERN_WARNING "Processor #%d INVALID. (Max ID: %d).\n",
-+			m->mpc_apicid, MAX_APICS);
-+		--num_processors;
-+		return;
-+	}
-+
-+	tmp = apicid_to_cpu_present(apicid);
-+	physids_or(phys_cpu_present_map, phys_cpu_present_map, tmp);
-+	
-+	/*
-+	 * Validate version
-+	 */
-+	if (ver == 0x0) {
-+		printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
-+		ver = 0x10;
-+	}
-+	apic_version[m->mpc_apicid] = ver;
-+	bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
-+}
-+#else
-+void __init MP_processor_info (struct mpc_config_processor *m)
-+{
-+	num_processors++;
-+}
-+#endif /* CONFIG_XEN */
-+
-+static void __init MP_bus_info (struct mpc_config_bus *m)
-+{
-+	char str[7];
-+
-+	memcpy(str, m->mpc_bustype, 6);
-+	str[6] = 0;
-+
-+	mpc_oem_bus_info(m, str, translation_table[mpc_record]);
-+
-+	if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
-+	} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
-+	} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
-+		mpc_oem_pci_bus(m, translation_table[mpc_record]);
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
-+		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-+		mp_current_pci_id++;
-+	} else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-+	} else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
-+	} else {
-+		printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
-+	}
-+}
-+
-+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
-+{
-+	if (!(m->mpc_flags & MPC_APIC_USABLE))
-+		return;
-+
-+	printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
-+		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
-+	if (nr_ioapics >= MAX_IO_APICS) {
-+		printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
-+			MAX_IO_APICS, nr_ioapics);
-+		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
-+	}
-+	if (!m->mpc_apicaddr) {
-+		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
-+			" found in MP table, skipping!\n");
-+		return;
-+	}
-+	mp_ioapics[nr_ioapics] = *m;
-+	nr_ioapics++;
-+}
-+
-+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
-+{
-+	mp_irqs [mp_irq_entries] = *m;
-+	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-+		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
-+			m->mpc_irqtype, m->mpc_irqflag & 3,
-+			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-+			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+		panic("Max # of irq sources exceeded!!\n");
-+}
-+
-+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
-+{
-+	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-+		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-+			m->mpc_irqtype, m->mpc_irqflag & 3,
-+			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-+			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
-+	/*
-+	 * Well it seems all SMP boards in existence
-+	 * use ExtINT/LVT1 == LINT0 and
-+	 * NMI/LVT2 == LINT1 - the following check
-+	 * will show us if this assumptions is false.
-+	 * Until then we do not have to add baggage.
-+	 */
-+	if ((m->mpc_irqtype == mp_ExtINT) &&
-+		(m->mpc_destapiclint != 0))
-+			BUG();
-+	if ((m->mpc_irqtype == mp_NMI) &&
-+		(m->mpc_destapiclint != 1))
-+			BUG();
-+}
-+
-+#ifdef CONFIG_X86_NUMAQ
-+static void __init MP_translation_info (struct mpc_config_translation *m)
-+{
-+	printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
-+
-+	if (mpc_record >= MAX_MPC_ENTRY) 
-+		printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
-+	else
-+		translation_table[mpc_record] = m; /* stash this for later */
-+	if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
-+		node_set_online(m->trans_quad);
-+}
-+
-+/*
-+ * Read/parse the MPC oem tables
-+ */
-+
-+static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
-+	unsigned short oemsize)
-+{
-+	int count = sizeof (*oemtable); /* the header size */
-+	unsigned char *oemptr = ((unsigned char *)oemtable)+count;
-+	
-+	mpc_record = 0;
-+	printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
-+	if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
-+	{
-+		printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
-+			oemtable->oem_signature[0],
-+			oemtable->oem_signature[1],
-+			oemtable->oem_signature[2],
-+			oemtable->oem_signature[3]);
-+		return;
-+	}
-+	if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
-+	{
-+		printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
-+		return;
-+	}
-+	while (count < oemtable->oem_length) {
-+		switch (*oemptr) {
-+			case MP_TRANSLATION:
-+			{
-+				struct mpc_config_translation *m=
-+					(struct mpc_config_translation *)oemptr;
-+				MP_translation_info(m);
-+				oemptr += sizeof(*m);
-+				count += sizeof(*m);
-+				++mpc_record;
-+				break;
-+			}
-+			default:
-+			{
-+				printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
-+				return;
-+			}
-+		}
-+       }
-+}
-+
-+static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
-+		char *productid)
-+{
-+	if (strncmp(oem, "IBM NUMA", 8))
-+		printk("Warning!  May not be a NUMA-Q system!\n");
-+	if (mpc->mpc_oemptr)
-+		smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
-+				mpc->mpc_oemsize);
-+}
-+#endif	/* CONFIG_X86_NUMAQ */
-+
-+/*
-+ * Read/parse the MPC
-+ */
-+
-+static int __init smp_read_mpc(struct mp_config_table *mpc)
-+{
-+	char str[16];
-+	char oem[10];
-+	int count=sizeof(*mpc);
-+	unsigned char *mpt=((unsigned char *)mpc)+count;
-+
-+	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-+		printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
-+			*(u32 *)mpc->mpc_signature);
-+		return 0;
-+	}
-+	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-+		printk(KERN_ERR "SMP mptable: checksum error!\n");
-+		return 0;
-+	}
-+	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-+		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
-+			mpc->mpc_spec);
-+		return 0;
-+	}
-+	if (!mpc->mpc_lapic) {
-+		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
-+		return 0;
-+	}
-+	memcpy(oem,mpc->mpc_oem,8);
-+	oem[8]=0;
-+	printk(KERN_INFO "OEM ID: %s ",oem);
-+
-+	memcpy(str,mpc->mpc_productid,12);
-+	str[12]=0;
-+	printk("Product ID: %s ",str);
-+
-+	mps_oem_check(mpc, oem, str);
-+
-+	printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
-+
-+	/* 
-+	 * Save the local APIC address (it might be non-default) -- but only
-+	 * if we're not using ACPI.
-+	 */
-+	if (!acpi_lapic)
-+		mp_lapic_addr = mpc->mpc_lapic;
-+
-+	/*
-+	 *	Now process the configuration blocks.
-+	 */
-+	mpc_record = 0;
-+	while (count < mpc->mpc_length) {
-+		switch(*mpt) {
-+			case MP_PROCESSOR:
-+			{
-+				struct mpc_config_processor *m=
-+					(struct mpc_config_processor *)mpt;
-+				/* ACPI may have already provided this data */
-+				if (!acpi_lapic)
-+					MP_processor_info(m);
-+				mpt += sizeof(*m);
-+				count += sizeof(*m);
-+				break;
-+			}
-+			case MP_BUS:
-+			{
-+				struct mpc_config_bus *m=
-+					(struct mpc_config_bus *)mpt;
-+				MP_bus_info(m);
-+				mpt += sizeof(*m);
-+				count += sizeof(*m);
-+				break;
-+			}
-+			case MP_IOAPIC:
-+			{
-+				struct mpc_config_ioapic *m=
-+					(struct mpc_config_ioapic *)mpt;
-+				MP_ioapic_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			case MP_INTSRC:
-+			{
-+				struct mpc_config_intsrc *m=
-+					(struct mpc_config_intsrc *)mpt;
-+
-+				MP_intsrc_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			case MP_LINTSRC:
-+			{
-+				struct mpc_config_lintsrc *m=
-+					(struct mpc_config_lintsrc *)mpt;
-+				MP_lintsrc_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			default:
-+			{
-+				count = mpc->mpc_length;
-+				break;
-+			}
-+		}
-+		++mpc_record;
-+	}
-+	clustered_apic_check();
-+	if (!num_processors)
-+		printk(KERN_ERR "SMP mptable: no processors registered!\n");
-+	return num_processors;
-+}
-+
-+static int __init ELCR_trigger(unsigned int irq)
-+{
-+	unsigned int port;
-+
-+	port = 0x4d0 + (irq >> 3);
-+	return (inb(port) >> (irq & 7)) & 1;
-+}
-+
-+static void __init construct_default_ioirq_mptable(int mpc_default_type)
-+{
-+	struct mpc_config_intsrc intsrc;
-+	int i;
-+	int ELCR_fallback = 0;
-+
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqflag = 0;			/* conforming */
-+	intsrc.mpc_srcbus = 0;
-+	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
-+
-+	intsrc.mpc_irqtype = mp_INT;
-+
-+	/*
-+	 *  If true, we have an ISA/PCI system with no IRQ entries
-+	 *  in the MP table. To prevent the PCI interrupts from being set up
-+	 *  incorrectly, we try to use the ELCR. The sanity check to see if
-+	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-+	 *  never be level sensitive, so we simply see if the ELCR agrees.
-+	 *  If it does, we assume it's valid.
-+	 */
-+	if (mpc_default_type == 5) {
-+		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
-+
-+		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-+			printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
-+		else {
-+			printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-+			ELCR_fallback = 1;
-+		}
-+	}
-+
-+	for (i = 0; i < 16; i++) {
-+		switch (mpc_default_type) {
-+		case 2:
-+			if (i == 0 || i == 13)
-+				continue;	/* IRQ0 & IRQ13 not connected */
-+			/* fall through */
-+		default:
-+			if (i == 2)
-+				continue;	/* IRQ2 is never connected */
-+		}
-+
-+		if (ELCR_fallback) {
-+			/*
-+			 *  If the ELCR indicates a level-sensitive interrupt, we
-+			 *  copy that information over to the MP table in the
-+			 *  irqflag field (level sensitive, active high polarity).
-+			 */
-+			if (ELCR_trigger(i))
-+				intsrc.mpc_irqflag = 13;
-+			else
-+				intsrc.mpc_irqflag = 0;
-+		}
-+
-+		intsrc.mpc_srcbusirq = i;
-+		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
-+		MP_intsrc_info(&intsrc);
-+	}
-+
-+	intsrc.mpc_irqtype = mp_ExtINT;
-+	intsrc.mpc_srcbusirq = 0;
-+	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
-+	MP_intsrc_info(&intsrc);
-+}
-+
-+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
-+{
-+	struct mpc_config_processor processor;
-+	struct mpc_config_bus bus;
-+	struct mpc_config_ioapic ioapic;
-+	struct mpc_config_lintsrc lintsrc;
-+	int linttypes[2] = { mp_ExtINT, mp_NMI };
-+	int i;
-+
-+	/*
-+	 * local APIC has default address
-+	 */
-+	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-+
-+	/*
-+	 * 2 CPUs, numbered 0 & 1.
-+	 */
-+	processor.mpc_type = MP_PROCESSOR;
-+	/* Either an integrated APIC or a discrete 82489DX. */
-+	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+	processor.mpc_cpuflag = CPU_ENABLED;
-+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+				   (boot_cpu_data.x86_model << 4) |
-+				   boot_cpu_data.x86_mask;
-+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+	processor.mpc_reserved[0] = 0;
-+	processor.mpc_reserved[1] = 0;
-+	for (i = 0; i < 2; i++) {
-+		processor.mpc_apicid = i;
-+		MP_processor_info(&processor);
-+	}
-+
-+	bus.mpc_type = MP_BUS;
-+	bus.mpc_busid = 0;
-+	switch (mpc_default_type) {
-+		default:
-+			printk("???\n");
-+			printk(KERN_ERR "Unknown standard configuration %d\n",
-+				mpc_default_type);
-+			/* fall through */
-+		case 1:
-+		case 5:
-+			memcpy(bus.mpc_bustype, "ISA   ", 6);
-+			break;
-+		case 2:
-+		case 6:
-+		case 3:
-+			memcpy(bus.mpc_bustype, "EISA  ", 6);
-+			break;
-+		case 4:
-+		case 7:
-+			memcpy(bus.mpc_bustype, "MCA   ", 6);
-+	}
-+	MP_bus_info(&bus);
-+	if (mpc_default_type > 4) {
-+		bus.mpc_busid = 1;
-+		memcpy(bus.mpc_bustype, "PCI   ", 6);
-+		MP_bus_info(&bus);
-+	}
-+
-+	ioapic.mpc_type = MP_IOAPIC;
-+	ioapic.mpc_apicid = 2;
-+	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+	ioapic.mpc_flags = MPC_APIC_USABLE;
-+	ioapic.mpc_apicaddr = 0xFEC00000;
-+	MP_ioapic_info(&ioapic);
-+
-+	/*
-+	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
-+	 */
-+	construct_default_ioirq_mptable(mpc_default_type);
-+
-+	lintsrc.mpc_type = MP_LINTSRC;
-+	lintsrc.mpc_irqflag = 0;		/* conforming */
-+	lintsrc.mpc_srcbusid = 0;
-+	lintsrc.mpc_srcbusirq = 0;
-+	lintsrc.mpc_destapic = MP_APIC_ALL;
-+	for (i = 0; i < 2; i++) {
-+		lintsrc.mpc_irqtype = linttypes[i];
-+		lintsrc.mpc_destapiclint = i;
-+		MP_lintsrc_info(&lintsrc);
-+	}
-+}
-+
-+static struct intel_mp_floating *mpf_found;
-+
-+/*
-+ * Scan the memory blocks for an SMP configuration block.
-+ */
-+void __init get_smp_config (void)
-+{
-+	struct intel_mp_floating *mpf = mpf_found;
-+
-+	/*
-+	 * ACPI may be used to obtain the entire SMP configuration or just to 
-+	 * enumerate/configure processors (CONFIG_ACPI_BOOT).  Note that 
-+	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
-+	 * processors, where MPS only supports physical.
-+	 */
-+	if (acpi_lapic && acpi_ioapic) {
-+		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
-+		return;
-+	}
-+	else if (acpi_lapic)
-+		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
-+
-+	printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-+	if (mpf->mpf_feature2 & (1<<7)) {
-+		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
-+		pic_mode = 1;
-+	} else {
-+		printk(KERN_INFO "    Virtual Wire compatibility mode.\n");
-+		pic_mode = 0;
-+	}
-+
-+	/*
-+	 * Now see if we need to read further.
-+	 */
-+	if (mpf->mpf_feature1 != 0) {
-+
-+		printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-+		construct_default_ISA_mptable(mpf->mpf_feature1);
-+
-+	} else if (mpf->mpf_physptr) {
-+
-+		/*
-+		 * Read the physical hardware table.  Anything here will
-+		 * override the defaults.
-+		 */
-+		if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
-+			smp_found_config = 0;
-+			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-+			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-+			return;
-+		}
-+		/*
-+		 * If there are no explicit MP IRQ entries, then we are
-+		 * broken.  We set up most of the low 16 IO-APIC pins to
-+		 * ISA defaults and hope it will work.
-+		 */
-+		if (!mp_irq_entries) {
-+			struct mpc_config_bus bus;
-+
-+			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
-+
-+			bus.mpc_type = MP_BUS;
-+			bus.mpc_busid = 0;
-+			memcpy(bus.mpc_bustype, "ISA   ", 6);
-+			MP_bus_info(&bus);
-+
-+			construct_default_ioirq_mptable(0);
-+		}
-+
-+	} else
-+		BUG();
-+
-+	printk(KERN_INFO "Processors: %d\n", num_processors);
-+	/*
-+	 * Only use the first configuration found.
-+	 */
-+}
-+
-+static int __init smp_scan_config (unsigned long base, unsigned long length)
-+{
-+	unsigned long *bp = isa_bus_to_virt(base);
-+	struct intel_mp_floating *mpf;
-+
-+	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-+	if (sizeof(*mpf) != 16)
-+		printk("Error: MPF size\n");
-+
-+	while (length > 0) {
-+		mpf = (struct intel_mp_floating *)bp;
-+		if ((*bp == SMP_MAGIC_IDENT) &&
-+			(mpf->mpf_length == 1) &&
-+			!mpf_checksum((unsigned char *)bp, 16) &&
-+			((mpf->mpf_specification == 1)
-+				|| (mpf->mpf_specification == 4)) ) {
-+
-+			smp_found_config = 1;
-+#ifndef CONFIG_XEN
-+			printk(KERN_INFO "found SMP MP-table at %08lx\n",
-+						virt_to_phys(mpf));
-+			reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
-+			if (mpf->mpf_physptr) {
-+				/*
-+				 * We cannot access to MPC table to compute
-+				 * table size yet, as only few megabytes from
-+				 * the bottom is mapped now.
-+				 * PC-9800's MPC table places on the very last
-+				 * of physical memory; so that simply reserving
-+				 * PAGE_SIZE from mpg->mpf_physptr yields BUG()
-+				 * in reserve_bootmem.
-+				 */
-+				unsigned long size = PAGE_SIZE;
-+				unsigned long end = max_low_pfn * PAGE_SIZE;
-+				if (mpf->mpf_physptr + size > end)
-+					size = end - mpf->mpf_physptr;
-+				reserve_bootmem(mpf->mpf_physptr, size);
-+			}
-+#else
-+			printk(KERN_INFO "found SMP MP-table at %08lx\n",
-+				((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
-+#endif
-+
-+			mpf_found = mpf;
-+			return 1;
-+		}
-+		bp += 4;
-+		length -= 16;
-+	}
-+	return 0;
-+}
-+
-+void __init find_smp_config (void)
-+{
-+#ifndef CONFIG_XEN
-+	unsigned int address;
-+#endif
-+
-+	/*
-+	 * FIXME: Linux assumes you have 640K of base ram..
-+	 * this continues the error...
-+	 *
-+	 * 1) Scan the bottom 1K for a signature
-+	 * 2) Scan the top 1K of base RAM
-+	 * 3) Scan the 64K of bios
-+	 */
-+	if (smp_scan_config(0x0,0x400) ||
-+		smp_scan_config(639*0x400,0x400) ||
-+			smp_scan_config(0xF0000,0x10000))
-+		return;
-+	/*
-+	 * If it is an SMP machine we should know now, unless the
-+	 * configuration is in an EISA/MCA bus machine with an
-+	 * extended bios data area.
-+	 *
-+	 * there is a real-mode segmented pointer pointing to the
-+	 * 4K EBDA area at 0x40E, calculate and scan it here.
-+	 *
-+	 * NOTE! There are Linux loaders that will corrupt the EBDA
-+	 * area, and as such this kind of SMP config may be less
-+	 * trustworthy, simply because the SMP table may have been
-+	 * stomped on during early boot. These loaders are buggy and
-+	 * should be fixed.
-+	 *
-+	 * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
-+	 */
-+
-+#ifndef CONFIG_XEN
-+	address = get_bios_ebda();
-+	if (address)
-+		smp_scan_config(address, 0x400);
-+#endif
-+}
-+
-+/* --------------------------------------------------------------------------
-+                            ACPI-based MP Configuration
-+   -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI_BOOT
-+
-+void __init mp_register_lapic_address (
-+	u64			address)
-+{
-+#ifndef CONFIG_XEN
-+	mp_lapic_addr = (unsigned long) address;
-+
-+	if (boot_cpu_physical_apicid == -1U)
-+		boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
-+
-+	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
-+#endif
-+}
-+
-+
-+void __init mp_register_lapic (
-+	u8			id, 
-+	u8			enabled)
-+{
-+	struct mpc_config_processor processor;
-+	int			boot_cpu = 0;
-+	
-+	if (MAX_APICS - id <= 0) {
-+		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
-+			id, MAX_APICS);
-+		return;
-+	}
-+
-+	if (id == boot_cpu_physical_apicid)
-+		boot_cpu = 1;
-+
-+#ifndef CONFIG_XEN
-+	processor.mpc_type = MP_PROCESSOR;
-+	processor.mpc_apicid = id;
-+	processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
-+	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-+	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 
-+		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+	processor.mpc_reserved[0] = 0;
-+	processor.mpc_reserved[1] = 0;
-+#endif
-+
-+	MP_processor_info(&processor);
-+}
-+
-+#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT))
-+
-+#define MP_ISA_BUS		0
-+#define MP_MAX_IOAPIC_PIN	127
-+
-+static struct mp_ioapic_routing {
-+	int			apic_id;
-+	int			gsi_base;
-+	int			gsi_end;
-+	u32			pin_programmed[4];
-+} mp_ioapic_routing[MAX_IO_APICS];
-+
-+
-+static int mp_find_ioapic (
-+	int			gsi)
-+{
-+	int			i = 0;
-+
-+	/* Find the IOAPIC that manages this GSI. */
-+	for (i = 0; i < nr_ioapics; i++) {
-+		if ((gsi >= mp_ioapic_routing[i].gsi_base)
-+			&& (gsi <= mp_ioapic_routing[i].gsi_end))
-+			return i;
-+	}
-+
-+	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
-+
-+	return -1;
-+}
-+	
-+
-+void __init mp_register_ioapic (
-+	u8			id, 
-+	u32			address,
-+	u32			gsi_base)
-+{
-+	int			idx = 0;
-+
-+	if (nr_ioapics >= MAX_IO_APICS) {
-+		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-+			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
-+		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
-+	}
-+	if (!address) {
-+		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-+			" found in MADT table, skipping!\n");
-+		return;
-+	}
-+
-+	idx = nr_ioapics++;
-+
-+	mp_ioapics[idx].mpc_type = MP_IOAPIC;
-+	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-+	mp_ioapics[idx].mpc_apicaddr = address;
-+
-+	mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id);
-+	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
-+	
-+	/* 
-+	 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
-+	 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
-+	 */
-+	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-+	mp_ioapic_routing[idx].gsi_base = gsi_base;
-+	mp_ioapic_routing[idx].gsi_end = gsi_base + 
-+		io_apic_get_redir_entries(idx);
-+
-+	printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
-+		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
-+		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
-+		mp_ioapic_routing[idx].gsi_base,
-+		mp_ioapic_routing[idx].gsi_end);
-+
-+	return;
-+}
-+
-+
-+void __init mp_override_legacy_irq (
-+	u8			bus_irq,
-+	u8			polarity, 
-+	u8			trigger, 
-+	u32			gsi)
-+{
-+	struct mpc_config_intsrc intsrc;
-+	int			ioapic = -1;
-+	int			pin = -1;
-+
-+	/* 
-+	 * Convert 'gsi' to 'ioapic.pin'.
-+	 */
-+	ioapic = mp_find_ioapic(gsi);
-+	if (ioapic < 0)
-+		return;
-+	pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
-+
-+	/*
-+	 * TBD: This check is for faulty timer entries, where the override
-+	 *      erroneously sets the trigger to level, resulting in a HUGE 
-+	 *      increase of timer interrupts!
-+	 */
-+	if ((bus_irq == 0) && (trigger == 3))
-+		trigger = 1;
-+
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqtype = mp_INT;
-+	intsrc.mpc_irqflag = (trigger << 2) | polarity;
-+	intsrc.mpc_srcbus = MP_ISA_BUS;
-+	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
-+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
-+	intsrc.mpc_dstirq = pin;				    /* INTIN# */
-+
-+	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
-+		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-+		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-+		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
-+
-+	mp_irqs[mp_irq_entries] = intsrc;
-+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+		panic("Max # of irq sources exceeded!\n");
-+
-+	return;
-+}
-+
-+int es7000_plat;
-+
-+void __init mp_config_acpi_legacy_irqs (void)
-+{
-+	struct mpc_config_intsrc intsrc;
-+	int			i = 0;
-+	int			ioapic = -1;
-+
-+	/* 
-+	 * Fabricate the legacy ISA bus (bus #31).
-+	 */
-+	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
-+	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
-+
-+	/*
-+	 * Older generations of ES7000 have no legacy identity mappings
-+	 */
-+	if (es7000_plat == 1)
-+		return;
-+
-+	/* 
-+	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
-+	 */
-+	ioapic = mp_find_ioapic(0);
-+	if (ioapic < 0)
-+		return;
-+
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqflag = 0;					/* Conforming */
-+	intsrc.mpc_srcbus = MP_ISA_BUS;
-+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
-+
-+	/* 
-+	 * Use the default configuration for the IRQs 0-15.  Unless
-+	 * overriden by (MADT) interrupt source override entries.
-+	 */
-+	for (i = 0; i < 16; i++) {
-+		int idx;
-+
-+		for (idx = 0; idx < mp_irq_entries; idx++) {
-+			struct mpc_config_intsrc *irq = mp_irqs + idx;
-+
-+			/* Do we already have a mapping for this ISA IRQ? */
-+			if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-+				break;
-+
-+			/* Do we already have a mapping for this IOAPIC pin */
-+			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-+				(irq->mpc_dstirq == i))
-+				break;
-+		}
-+
-+		if (idx != mp_irq_entries) {
-+			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-+			continue;			/* IRQ already used */
-+		}
-+
-+		intsrc.mpc_irqtype = mp_INT;
-+		intsrc.mpc_srcbusirq = i;		   /* Identity mapped */
-+		intsrc.mpc_dstirq = i;
-+
-+		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-+			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-+			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-+			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 
-+			intsrc.mpc_dstirq);
-+
-+		mp_irqs[mp_irq_entries] = intsrc;
-+		if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+			panic("Max # of irq sources exceeded!\n");
-+	}
-+}
-+
-+int mp_register_gsi (u32 gsi, int edge_level, int active_high_low)
-+{
-+	int			ioapic = -1;
-+	int			ioapic_pin = 0;
-+	int			idx, bit = 0;
-+
-+#ifdef CONFIG_ACPI_BUS
-+	/* Don't set up the ACPI SCI because it's already set up */
-+	if (acpi_fadt.sci_int == gsi)
-+		return gsi;
-+#endif
-+
-+	ioapic = mp_find_ioapic(gsi);
-+	if (ioapic < 0) {
-+		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-+		return gsi;
-+	}
-+
-+	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
-+
-+	if (ioapic_renumber_irq)
-+		gsi = ioapic_renumber_irq(ioapic, gsi);
-+
-+	/* 
-+	 * Avoid pin reprogramming.  PRTs typically include entries  
-+	 * with redundant pin->gsi mappings (but unique PCI devices);
-+	 * we only program the IOAPIC on the first.
-+	 */
-+	bit = ioapic_pin % 32;
-+	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-+	if (idx > 3) {
-+		printk(KERN_ERR "Invalid reference to IOAPIC pin "
-+			"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
-+			ioapic_pin);
-+		return gsi;
-+	}
-+	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-+		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-+			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-+		return gsi;
-+	}
-+
-+	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
-+
-+	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-+		    edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
-+		    active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
-+	return gsi;
-+}
-+
-+#endif /*CONFIG_X86_IO_APIC && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)*/
-+#endif /*CONFIG_ACPI_BOOT*/
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/pci-dma.c linux-2.6.12-xen/arch/xen/i386/kernel/pci-dma.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/pci-dma.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/pci-dma.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,319 @@
-+/*
-+ * Dynamic DMA mapping support.
-+ *
-+ * On i386 there is no hardware dynamic DMA address translation,
-+ * so consistent alloc/free are merely page allocation/freeing.
-+ * The rest of the dynamic DMA mapping interface is implemented
-+ * in asm/pci.h.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/mm.h>
-+#include <linux/string.h>
-+#include <linux/pci.h>
-+#include <linux/version.h>
-+#include <asm/io.h>
-+#include <asm-xen/balloon.h>
-+#include <asm/tlbflush.h>
-+#include <asm/swiotlb.h>
-+
-+struct dma_coherent_mem {
-+	void		*virt_base;
-+	u32		device_base;
-+	int		size;
-+	int		flags;
-+	unsigned long	*bitmap;
-+};
-+
-+#define IOMMU_BUG_ON(test)				\
-+do {							\
-+	if (unlikely(test)) {				\
-+		printk(KERN_ALERT "Fatal DMA error! "	\
-+		       "Please use 'swiotlb=force'\n");	\
-+		BUG();					\
-+	}						\
-+} while (0)
-+
-+int
-+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+	   enum dma_data_direction direction)
-+{
-+	int i, rc;
-+
-+	BUG_ON(direction == DMA_NONE);
-+
-+	if (swiotlb) {
-+		rc = swiotlb_map_sg(hwdev, sg, nents, direction);
-+	} else {
-+		for (i = 0; i < nents; i++ ) {
-+			sg[i].dma_address =
-+				page_to_phys(sg[i].page) + sg[i].offset;
-+			sg[i].dma_length  = sg[i].length;
-+			BUG_ON(!sg[i].page);
-+			IOMMU_BUG_ON(address_needs_mapping(
-+				hwdev, sg[i].dma_address));
-+		}
-+		rc = nents;
-+	}
-+
-+	flush_write_buffers();
-+	return rc;
-+}
-+EXPORT_SYMBOL(dma_map_sg);
-+
-+void
-+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-+	     enum dma_data_direction direction)
-+{
-+	BUG_ON(direction == DMA_NONE);
-+	if (swiotlb)
-+		swiotlb_unmap_sg(hwdev, sg, nents, direction);
-+}
-+EXPORT_SYMBOL(dma_unmap_sg);
-+
-+dma_addr_t
-+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-+	     size_t size, enum dma_data_direction direction)
-+{
-+	dma_addr_t dma_addr;
-+
-+	BUG_ON(direction == DMA_NONE);
-+
-+	if (swiotlb) {
-+		dma_addr = swiotlb_map_page(
-+			dev, page, offset, size, direction);
-+	} else {
-+		dma_addr = page_to_phys(page) + offset;
-+		IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
-+	}
-+
-+	return dma_addr;
-+}
-+EXPORT_SYMBOL(dma_map_page);
-+
-+void
-+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-+	       enum dma_data_direction direction)
-+{
-+	BUG_ON(direction == DMA_NONE);
-+	if (swiotlb)
-+		swiotlb_unmap_page(dev, dma_address, size, direction);
-+}
-+EXPORT_SYMBOL(dma_unmap_page);
-+
-+int
-+dma_mapping_error(dma_addr_t dma_addr)
-+{
-+	if (swiotlb)
-+		return swiotlb_dma_mapping_error(dma_addr);
-+	return 0;
-+}
-+EXPORT_SYMBOL(dma_mapping_error);
-+
-+int
-+dma_supported(struct device *dev, u64 mask)
-+{
-+	if (swiotlb)
-+		return swiotlb_dma_supported(dev, mask);
-+	/*
-+	 * By default we'll BUG when an infeasible DMA is requested, and
-+	 * request swiotlb=force (see IOMMU_BUG_ON).
-+	 */
-+	return 1;
-+}
-+EXPORT_SYMBOL(dma_supported);
-+
-+void *dma_alloc_coherent(struct device *dev, size_t size,
-+			   dma_addr_t *dma_handle, unsigned int __nocast gfp)
-+{
-+	void *ret;
-+	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-+	unsigned int order = get_order(size);
-+	unsigned long vstart;
-+	/* ignore region specifiers */
-+	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-+
-+	if (mem) {
-+		int page = bitmap_find_free_region(mem->bitmap, mem->size,
-+						     order);
-+		if (page >= 0) {
-+			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
-+			ret = mem->virt_base + (page << PAGE_SHIFT);
-+			memset(ret, 0, size);
-+			return ret;
-+		}
-+		if (mem->flags & DMA_MEMORY_EXCLUSIVE)
-+			return NULL;
-+	}
-+
-+	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-+		gfp |= GFP_DMA;
-+
-+	vstart = __get_free_pages(gfp, order);
-+	ret = (void *)vstart;
-+
-+	if (ret != NULL) {
-+		/* NB. Hardcode 31 address bits for now: aacraid limitation. */
-+		if (xen_create_contiguous_region(vstart, order, 31) != 0) {
-+			free_pages(vstart, order);
-+			return NULL;
-+		}
-+		memset(ret, 0, size);
-+		*dma_handle = virt_to_bus(ret);
-+	}
-+	return ret;
-+}
-+EXPORT_SYMBOL(dma_alloc_coherent);
-+
-+void dma_free_coherent(struct device *dev, size_t size,
-+			 void *vaddr, dma_addr_t dma_handle)
-+{
-+	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
-+	int order = get_order(size);
-+	
-+	if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-+		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-+
-+		bitmap_release_region(mem->bitmap, page, order);
-+	} else {
-+		xen_destroy_contiguous_region((unsigned long)vaddr, order);
-+		free_pages((unsigned long)vaddr, order);
-+	}
-+}
-+EXPORT_SYMBOL(dma_free_coherent);
-+
-+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
-+				dma_addr_t device_addr, size_t size, int flags)
-+{
-+	void __iomem *mem_base;
-+	int pages = size >> PAGE_SHIFT;
-+	int bitmap_size = (pages + 31)/32;
-+
-+	if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
-+		goto out;
-+	if (!size)
-+		goto out;
-+	if (dev->dma_mem)
-+		goto out;
-+
-+	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-+
-+	mem_base = ioremap(bus_addr, size);
-+	if (!mem_base)
-+		goto out;
-+
-+	dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
-+	if (!dev->dma_mem)
-+		goto out;
-+	memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
-+	dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
-+	if (!dev->dma_mem->bitmap)
-+		goto free1_out;
-+	memset(dev->dma_mem->bitmap, 0, bitmap_size);
-+
-+	dev->dma_mem->virt_base = mem_base;
-+	dev->dma_mem->device_base = device_addr;
-+	dev->dma_mem->size = pages;
-+	dev->dma_mem->flags = flags;
-+
-+	if (flags & DMA_MEMORY_MAP)
-+		return DMA_MEMORY_MAP;
-+
-+	return DMA_MEMORY_IO;
-+
-+ free1_out:
-+	kfree(dev->dma_mem->bitmap);
-+ out:
-+	return 0;
-+}
-+EXPORT_SYMBOL(dma_declare_coherent_memory);
-+
-+void dma_release_declared_memory(struct device *dev)
-+{
-+	struct dma_coherent_mem *mem = dev->dma_mem;
-+	
-+	if(!mem)
-+		return;
-+	dev->dma_mem = NULL;
-+	iounmap(mem->virt_base);
-+	kfree(mem->bitmap);
-+	kfree(mem);
-+}
-+EXPORT_SYMBOL(dma_release_declared_memory);
-+
-+void *dma_mark_declared_memory_occupied(struct device *dev,
-+					dma_addr_t device_addr, size_t size)
-+{
-+	struct dma_coherent_mem *mem = dev->dma_mem;
-+	int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-+	int pos, err;
-+
-+	if (!mem)
-+		return ERR_PTR(-EINVAL);
-+
-+	pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
-+	err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
-+	if (err != 0)
-+		return ERR_PTR(err);
-+	return mem->virt_base + (pos << PAGE_SHIFT);
-+}
-+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-+
-+dma_addr_t
-+dma_map_single(struct device *dev, void *ptr, size_t size,
-+	       enum dma_data_direction direction)
-+{
-+	dma_addr_t dma;
-+
-+	BUG_ON(direction == DMA_NONE);
-+
-+	if (swiotlb) {
-+		dma = swiotlb_map_single(dev, ptr, size, direction);
-+	} else {
-+		dma = virt_to_bus(ptr);
-+		IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
-+		IOMMU_BUG_ON(address_needs_mapping(dev, dma));
-+	}
-+
-+	flush_write_buffers();
-+	return dma;
-+}
-+EXPORT_SYMBOL(dma_map_single);
-+
-+void
-+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-+		 enum dma_data_direction direction)
-+{
-+	BUG_ON(direction == DMA_NONE);
-+	if (swiotlb)
-+		swiotlb_unmap_single(dev, dma_addr, size, direction);
-+}
-+EXPORT_SYMBOL(dma_unmap_single);
-+
-+void
-+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-+			enum dma_data_direction direction)
-+{
-+	if (swiotlb)
-+		swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
-+}
-+EXPORT_SYMBOL(dma_sync_single_for_cpu);
-+
-+void
-+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
-+                           enum dma_data_direction direction)
-+{
-+	if (swiotlb)
-+		swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
-+}
-+EXPORT_SYMBOL(dma_sync_single_for_device);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/process.c linux-2.6.12-xen/arch/xen/i386/kernel/process.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/process.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/process.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,770 @@
-+/*
-+ *  linux/arch/i386/kernel/process.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *
-+ *  Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of process handling..
-+ */
-+
-+#include <stdarg.h>
-+
-+#include <linux/cpu.h>
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/elfcore.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/user.h>
-+#include <linux/a.out.h>
-+#include <linux/interrupt.h>
-+#include <linux/config.h>
-+#include <linux/utsname.h>
-+#include <linux/delay.h>
-+#include <linux/reboot.h>
-+#include <linux/init.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/module.h>
-+#include <linux/kallsyms.h>
-+#include <linux/ptrace.h>
-+#include <linux/random.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/ldt.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/irq.h>
-+#include <asm/desc.h>
-+#include <asm-xen/xen-public/physdev.h>
-+#include <asm-xen/xen-public/vcpu.h>
-+#ifdef CONFIG_MATH_EMULATION
-+#include <asm/math_emu.h>
-+#endif
-+
-+#include <linux/irq.h>
-+#include <linux/err.h>
-+
-+#include <asm/tlbflush.h>
-+#include <asm/cpu.h>
-+
-+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
-+
-+static int hlt_counter;
-+
-+unsigned long boot_option_idle_override = 0;
-+EXPORT_SYMBOL(boot_option_idle_override);
-+
-+/*
-+ * Return saved PC of a blocked thread.
-+ */
-+unsigned long thread_saved_pc(struct task_struct *tsk)
-+{
-+	return ((unsigned long *)tsk->thread.esp)[3];
-+}
-+
-+/*
-+ * Powermanagement idle function, if any..
-+ */
-+void (*pm_idle)(void);
-+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-+
-+void disable_hlt(void)
-+{
-+	hlt_counter++;
-+}
-+
-+EXPORT_SYMBOL(disable_hlt);
-+
-+void enable_hlt(void)
-+{
-+	hlt_counter--;
-+}
-+
-+EXPORT_SYMBOL(enable_hlt);
-+
-+/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-+extern void stop_hz_timer(void);
-+extern void start_hz_timer(void);
-+void xen_idle(void)
-+{
-+	local_irq_disable();
-+
-+	if (need_resched()) {
-+		local_irq_enable();
-+	} else {
-+		stop_hz_timer();
-+		/* Blocking includes an implicit local_irq_enable(). */
-+		HYPERVISOR_sched_op(SCHEDOP_block, 0);
-+		start_hz_timer();
-+	}
-+}
-+
-+/*
-+ * The idle thread. There's no useful work to be
-+ * done, so just try to conserve power and have a
-+ * low exit latency (ie sit in a loop waiting for
-+ * somebody to say that they'd like to reschedule)
-+ */
-+void cpu_idle (void)
-+{
-+#if defined(CONFIG_HOTPLUG_CPU)
-+	int cpu = _smp_processor_id();
-+#endif
-+
-+	/* endless idle loop with no priority at all */
-+	while (1) {
-+		while (!need_resched()) {
-+
-+			if (__get_cpu_var(cpu_idle_state))
-+				__get_cpu_var(cpu_idle_state) = 0;
-+			rmb();
-+
-+#if defined(CONFIG_HOTPLUG_CPU)
-+			if (cpu_is_offline(cpu)) {
-+				HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
-+				local_irq_enable();
-+			}
-+#endif
-+
-+			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
-+			xen_idle();
-+		}
-+		schedule();
-+	}
-+}
-+
-+void cpu_idle_wait(void)
-+{
-+	unsigned int cpu, this_cpu = get_cpu();
-+	cpumask_t map;
-+
-+	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-+	put_cpu();
-+
-+	cpus_clear(map);
-+	for_each_online_cpu(cpu) {
-+		per_cpu(cpu_idle_state, cpu) = 1;
-+		cpu_set(cpu, map);
-+	}
-+
-+	__get_cpu_var(cpu_idle_state) = 0;
-+
-+	wmb();
-+	do {
-+		ssleep(1);
-+		for_each_online_cpu(cpu) {
-+			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
-+				cpu_clear(cpu, map);
-+		}
-+		cpus_and(map, map, cpu_online_map);
-+	} while (!cpus_empty(map));
-+}
-+EXPORT_SYMBOL_GPL(cpu_idle_wait);
-+
-+/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
-+/* Always use xen_idle() instead. */
-+void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
-+
-+void show_regs(struct pt_regs * regs)
-+{
-+	printk("\n");
-+	printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
-+	printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
-+	print_symbol("EIP is at %s\n", regs->eip);
-+
-+	if (regs->xcs & 2)
-+		printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
-+	printk(" EFLAGS: %08lx    %s  (%s)\n",
-+	       regs->eflags, print_tainted(), system_utsname.release);
-+	printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
-+		regs->eax,regs->ebx,regs->ecx,regs->edx);
-+	printk("ESI: %08lx EDI: %08lx EBP: %08lx",
-+		regs->esi, regs->edi, regs->ebp);
-+	printk(" DS: %04x ES: %04x\n",
-+		0xffff & regs->xds,0xffff & regs->xes);
-+
-+	show_trace(NULL, &regs->esp);
-+}
-+
-+/*
-+ * This gets run with %ebx containing the
-+ * function to call, and %edx containing
-+ * the "args".
-+ */
-+extern void kernel_thread_helper(void);
-+__asm__(".section .text\n"
-+	".align 4\n"
-+	"kernel_thread_helper:\n\t"
-+	"movl %edx,%eax\n\t"
-+	"pushl %edx\n\t"
-+	"call *%ebx\n\t"
-+	"pushl %eax\n\t"
-+	"call do_exit\n"
-+	".previous");
-+
-+/*
-+ * Create a kernel thread
-+ */
-+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-+{
-+	struct pt_regs regs;
-+
-+	memset(&regs, 0, sizeof(regs));
-+
-+	regs.ebx = (unsigned long) fn;
-+	regs.edx = (unsigned long) arg;
-+
-+	regs.xds = __USER_DS;
-+	regs.xes = __USER_DS;
-+	regs.orig_eax = -1;
-+	regs.eip = (unsigned long) kernel_thread_helper;
-+	regs.xcs = __KERNEL_CS;
-+	regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
-+
-+	/* Ok, create the new process.. */
-+	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-+}
-+
-+/*
-+ * Free current thread data structures etc..
-+ */
-+void exit_thread(void)
-+{
-+	struct task_struct *tsk = current;
-+	struct thread_struct *t = &tsk->thread;
-+
-+	/* The process may have allocated an io port bitmap... nuke it. */
-+	if (unlikely(NULL != t->io_bitmap_ptr)) {
-+		physdev_op_t op = { 0 };
-+		op.cmd = PHYSDEVOP_SET_IOBITMAP;
-+		HYPERVISOR_physdev_op(&op);
-+		kfree(t->io_bitmap_ptr);
-+		t->io_bitmap_ptr = NULL;
-+	}
-+}
-+
-+void flush_thread(void)
-+{
-+	struct task_struct *tsk = current;
-+
-+	memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
-+	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
-+	/*
-+	 * Forget coprocessor state..
-+	 */
-+	clear_fpu(tsk);
-+	clear_used_math();
-+}
-+
-+void release_thread(struct task_struct *dead_task)
-+{
-+	if (dead_task->mm) {
-+		// temporary debugging check
-+		if (dead_task->mm->context.size) {
-+			printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
-+					dead_task->comm,
-+					dead_task->mm->context.ldt,
-+					dead_task->mm->context.size);
-+			BUG();
-+		}
-+	}
-+
-+	release_vm86_irqs(dead_task);
-+}
-+
-+/*
-+ * This gets called before we allocate a new thread and copy
-+ * the current task into it.
-+ */
-+void prepare_to_copy(struct task_struct *tsk)
-+{
-+	unlazy_fpu(tsk);
-+}
-+
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
-+	unsigned long unused,
-+	struct task_struct * p, struct pt_regs * regs)
-+{
-+	struct pt_regs * childregs;
-+	struct task_struct *tsk;
-+	int err;
-+
-+	childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
-+	/*
-+	 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
-+	 * This is necessary to guarantee that the entire "struct pt_regs"
-+	 * is accessable even if the CPU haven't stored the SS/ESP registers
-+	 * on the stack (interrupt gate does not save these registers
-+	 * when switching to the same priv ring).
-+	 * Therefore beware: accessing the xss/esp fields of the
-+	 * "struct pt_regs" is possible, but they may contain the
-+	 * completely wrong values.
-+	 */
-+	childregs = (struct pt_regs *) ((unsigned long) childregs - 8);
-+	*childregs = *regs;
-+	childregs->eax = 0;
-+	childregs->esp = esp;
-+
-+	p->thread.esp = (unsigned long) childregs;
-+	p->thread.esp0 = (unsigned long) (childregs+1);
-+
-+	p->thread.eip = (unsigned long) ret_from_fork;
-+
-+	savesegment(fs,p->thread.fs);
-+	savesegment(gs,p->thread.gs);
-+
-+	tsk = current;
-+	if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
-+		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+		if (!p->thread.io_bitmap_ptr) {
-+			p->thread.io_bitmap_max = 0;
-+			return -ENOMEM;
-+		}
-+		memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
-+			IO_BITMAP_BYTES);
-+	}
-+
-+	/*
-+	 * Set a new TLS for the child thread?
-+	 */
-+	if (clone_flags & CLONE_SETTLS) {
-+		struct desc_struct *desc;
-+		struct user_desc info;
-+		int idx;
-+
-+		err = -EFAULT;
-+		if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
-+			goto out;
-+		err = -EINVAL;
-+		if (LDT_empty(&info))
-+			goto out;
-+
-+		idx = info.entry_number;
-+		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+			goto out;
-+
-+		desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-+		desc->a = LDT_entry_a(&info);
-+		desc->b = LDT_entry_b(&info);
-+	}
-+
-+	p->thread.io_pl = current->thread.io_pl;
-+
-+	err = 0;
-+ out:
-+	if (err && p->thread.io_bitmap_ptr) {
-+		kfree(p->thread.io_bitmap_ptr);
-+		p->thread.io_bitmap_max = 0;
-+	}
-+	return err;
-+}
-+
-+/*
-+ * fill in the user structure for a core dump..
-+ */
-+void dump_thread(struct pt_regs * regs, struct user * dump)
-+{
-+	int i;
-+
-+/* changed the size calculations - should hopefully work better. lbt */
-+	dump->magic = CMAGIC;
-+	dump->start_code = 0;
-+	dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
-+	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
-+	dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
-+	dump->u_dsize -= dump->u_tsize;
-+	dump->u_ssize = 0;
-+	for (i = 0; i < 8; i++)
-+		dump->u_debugreg[i] = current->thread.debugreg[i];  
-+
-+	if (dump->start_stack < TASK_SIZE)
-+		dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-+
-+	dump->regs.ebx = regs->ebx;
-+	dump->regs.ecx = regs->ecx;
-+	dump->regs.edx = regs->edx;
-+	dump->regs.esi = regs->esi;
-+	dump->regs.edi = regs->edi;
-+	dump->regs.ebp = regs->ebp;
-+	dump->regs.eax = regs->eax;
-+	dump->regs.ds = regs->xds;
-+	dump->regs.es = regs->xes;
-+	savesegment(fs,dump->regs.fs);
-+	savesegment(gs,dump->regs.gs);
-+	dump->regs.orig_eax = regs->orig_eax;
-+	dump->regs.eip = regs->eip;
-+	dump->regs.cs = regs->xcs;
-+	dump->regs.eflags = regs->eflags;
-+	dump->regs.esp = regs->esp;
-+	dump->regs.ss = regs->xss;
-+
-+	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
-+}
-+
-+/* 
-+ * Capture the user space registers if the task is not running (in user space)
-+ */
-+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-+{
-+	struct pt_regs ptregs;
-+	
-+	ptregs = *(struct pt_regs *)
-+		((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
-+	ptregs.xcs &= 0xffff;
-+	ptregs.xds &= 0xffff;
-+	ptregs.xes &= 0xffff;
-+	ptregs.xss &= 0xffff;
-+
-+	elf_core_copy_regs(regs, &ptregs);
-+
-+	boot_option_idle_override = 1;
-+	return 1;
-+}
-+
-+/*
-+ *	switch_to(x,yn) should switch tasks from x to y.
-+ *
-+ * We fsave/fwait so that an exception goes off at the right time
-+ * (as a call from the fsave or fwait in effect) rather than to
-+ * the wrong process. Lazy FP saving no longer makes any sense
-+ * with modern CPU's, and this simplifies a lot of things (SMP
-+ * and UP become the same).
-+ *
-+ * NOTE! We used to use the x86 hardware context switching. The
-+ * reason for not using it any more becomes apparent when you
-+ * try to recover gracefully from saved state that is no longer
-+ * valid (stale segment register values in particular). With the
-+ * hardware task-switch, there is no way to fix up bad state in
-+ * a reasonable manner.
-+ *
-+ * The fact that Intel documents the hardware task-switching to
-+ * be slow is a fairly red herring - this code is not noticeably
-+ * faster. However, there _is_ some room for improvement here,
-+ * so the performance issues may eventually be a valid point.
-+ * More important, however, is the fact that this allows us much
-+ * more flexibility.
-+ *
-+ * The return value (in %eax) will be the "prev" task after
-+ * the task-switch, and shows up in ret_from_fork in entry.S,
-+ * for example.
-+ */
-+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+	struct thread_struct *prev = &prev_p->thread,
-+				 *next = &next_p->thread;
-+	int cpu = smp_processor_id();
-+	struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+	physdev_op_t iopl_op, iobmp_op;
-+	multicall_entry_t _mcl[8], *mcl = _mcl;
-+
-+	/* XEN NOTE: FS/GS saved in switch_mm(), not here. */
-+
-+	/*
-+	 * This is basically '__unlazy_fpu', except that we queue a
-+	 * multicall to indicate FPU task switch, rather than
-+	 * synchronously trapping to Xen.
-+	 */
-+	if (prev_p->thread_info->status & TS_USEDFPU) {
-+		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-+		mcl->op      = __HYPERVISOR_fpu_taskswitch;
-+		mcl->args[0] = 1;
-+		mcl++;
-+	}
-+#if 0 /* lazy fpu sanity check */
-+	else BUG_ON(!(read_cr0() & 8));
-+#endif
-+
-+	/*
-+	 * Reload esp0, LDT and the page table pointer:
-+	 * This is load_esp0(tss, next) with a multicall.
-+	 */
-+	tss->esp0 = next->esp0;
-+	mcl->op      = __HYPERVISOR_stack_switch;
-+	mcl->args[0] = tss->ss0;
-+	mcl->args[1] = tss->esp0;
-+	mcl++;
-+
-+	/*
-+	 * Load the per-thread Thread-Local Storage descriptor.
-+	 * This is load_TLS(next, cpu) with multicalls.
-+	 */
-+#define C(i) do {							\
-+	if (unlikely(next->tls_array[i].a != prev->tls_array[i].a ||	\
-+		     next->tls_array[i].b != prev->tls_array[i].b)) {	\
-+		mcl->op = __HYPERVISOR_update_descriptor;		\
-+		*(u64 *)&mcl->args[0] =	virt_to_machine(		\
-+			&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
-+		*(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i];	\
-+		mcl++;							\
-+	}								\
-+} while (0)
-+	C(0); C(1); C(2);
-+#undef C
-+
-+	if (unlikely(prev->io_pl != next->io_pl)) {
-+		iopl_op.cmd             = PHYSDEVOP_SET_IOPL;
-+		iopl_op.u.set_iopl.iopl = (next->io_pl == 0) ? 1 : next->io_pl;
-+		mcl->op      = __HYPERVISOR_physdev_op;
-+		mcl->args[0] = (unsigned long)&iopl_op;
-+		mcl++;
-+	}
-+
-+	if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-+		iobmp_op.cmd                     =
-+			PHYSDEVOP_SET_IOBITMAP;
-+		iobmp_op.u.set_iobitmap.bitmap   =
-+			(char *)next->io_bitmap_ptr;
-+		iobmp_op.u.set_iobitmap.nr_ports =
-+			next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
-+		mcl->op      = __HYPERVISOR_physdev_op;
-+		mcl->args[0] = (unsigned long)&iobmp_op;
-+		mcl++;
-+	}
-+
-+	(void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
-+
-+	/*
-+	 * Restore %fs and %gs if needed.
-+	 */
-+	if (unlikely(next->fs | next->gs)) {
-+		loadsegment(fs, next->fs);
-+		loadsegment(gs, next->gs);
-+	}
-+
-+	/*
-+	 * Now maybe reload the debug registers
-+	 */
-+	if (unlikely(next->debugreg[7])) {
-+		loaddebug(next, 0);
-+		loaddebug(next, 1);
-+		loaddebug(next, 2);
-+		loaddebug(next, 3);
-+		/* no 4 and 5 */
-+		loaddebug(next, 6);
-+		loaddebug(next, 7);
-+	}
-+
-+	return prev_p;
-+}
-+
-+asmlinkage int sys_fork(struct pt_regs regs)
-+{
-+	return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-+}
-+
-+asmlinkage int sys_clone(struct pt_regs regs)
-+{
-+	unsigned long clone_flags;
-+	unsigned long newsp;
-+	int __user *parent_tidptr, *child_tidptr;
-+
-+	clone_flags = regs.ebx;
-+	newsp = regs.ecx;
-+	parent_tidptr = (int __user *)regs.edx;
-+	child_tidptr = (int __user *)regs.edi;
-+	if (!newsp)
-+		newsp = regs.esp;
-+	return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
-+}
-+
-+/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
-+ *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
-+ */
-+asmlinkage int sys_vfork(struct pt_regs regs)
-+{
-+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
-+}
-+
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage int sys_execve(struct pt_regs regs)
-+{
-+	int error;
-+	char * filename;
-+
-+	filename = getname((char __user *) regs.ebx);
-+	error = PTR_ERR(filename);
-+	if (IS_ERR(filename))
-+		goto out;
-+	error = do_execve(filename,
-+			(char __user * __user *) regs.ecx,
-+			(char __user * __user *) regs.edx,
-+			&regs);
-+	if (error == 0) {
-+		task_lock(current);
-+		current->ptrace &= ~PT_DTRACE;
-+		task_unlock(current);
-+		/* Make sure we don't return using sysenter.. */
-+		set_thread_flag(TIF_IRET);
-+	}
-+	putname(filename);
-+out:
-+	return error;
-+}
-+
-+#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
-+#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
-+
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+	unsigned long ebp, esp, eip;
-+	unsigned long stack_page;
-+	int count = 0;
-+	if (!p || p == current || p->state == TASK_RUNNING)
-+		return 0;
-+	stack_page = (unsigned long)p->thread_info;
-+	esp = p->thread.esp;
-+	if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
-+		return 0;
-+	/* include/asm-i386/system.h:switch_to() pushes ebp last. */
-+	ebp = *(unsigned long *) esp;
-+	do {
-+		if (ebp < stack_page || ebp > top_ebp+stack_page)
-+			return 0;
-+		eip = *(unsigned long *) (ebp+4);
-+		if (!in_sched_functions(eip))
-+			return eip;
-+		ebp = *(unsigned long *) ebp;
-+	} while (count++ < 16);
-+	return 0;
-+}
-+
-+/*
-+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
-+ */
-+static int get_free_idx(void)
-+{
-+	struct thread_struct *t = &current->thread;
-+	int idx;
-+
-+	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
-+		if (desc_empty(t->tls_array + idx))
-+			return idx + GDT_ENTRY_TLS_MIN;
-+	return -ESRCH;
-+}
-+
-+/*
-+ * Set a given TLS descriptor:
-+ */
-+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
-+{
-+	struct thread_struct *t = &current->thread;
-+	struct user_desc info;
-+	struct desc_struct *desc;
-+	int cpu, idx;
-+
-+	if (copy_from_user(&info, u_info, sizeof(info)))
-+		return -EFAULT;
-+	idx = info.entry_number;
-+
-+	/*
-+	 * index -1 means the kernel should try to find and
-+	 * allocate an empty descriptor:
-+	 */
-+	if (idx == -1) {
-+		idx = get_free_idx();
-+		if (idx < 0)
-+			return idx;
-+		if (put_user(idx, &u_info->entry_number))
-+			return -EFAULT;
-+	}
-+
-+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+		return -EINVAL;
-+
-+	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
-+
-+	/*
-+	 * We must not get preempted while modifying the TLS.
-+	 */
-+	cpu = get_cpu();
-+
-+	if (LDT_empty(&info)) {
-+		desc->a = 0;
-+		desc->b = 0;
-+	} else {
-+		desc->a = LDT_entry_a(&info);
-+		desc->b = LDT_entry_b(&info);
-+	}
-+	load_TLS(t, cpu);
-+
-+	put_cpu();
-+
-+	return 0;
-+}
-+
-+/*
-+ * Get the current Thread-Local Storage area:
-+ */
-+
-+#define GET_BASE(desc) ( \
-+	(((desc)->a >> 16) & 0x0000ffff) | \
-+	(((desc)->b << 16) & 0x00ff0000) | \
-+	( (desc)->b        & 0xff000000)   )
-+
-+#define GET_LIMIT(desc) ( \
-+	((desc)->a & 0x0ffff) | \
-+	 ((desc)->b & 0xf0000) )
-+	
-+#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
-+#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
-+#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
-+#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
-+#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
-+#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)
-+
-+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
-+{
-+	struct user_desc info;
-+	struct desc_struct *desc;
-+	int idx;
-+
-+	if (get_user(idx, &u_info->entry_number))
-+		return -EFAULT;
-+	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
-+		return -EINVAL;
-+
-+	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
-+
-+	info.entry_number = idx;
-+	info.base_addr = GET_BASE(desc);
-+	info.limit = GET_LIMIT(desc);
-+	info.seg_32bit = GET_32BIT(desc);
-+	info.contents = GET_CONTENTS(desc);
-+	info.read_exec_only = !GET_WRITABLE(desc);
-+	info.limit_in_pages = GET_LIMIT_PAGES(desc);
-+	info.seg_not_present = !GET_PRESENT(desc);
-+	info.useable = GET_USEABLE(desc);
-+
-+	if (copy_to_user(u_info, &info, sizeof(info)))
-+		return -EFAULT;
-+	return 0;
-+}
-+
-+unsigned long arch_align_stack(unsigned long sp)
-+{
-+	if (randomize_va_space)
-+		sp -= get_random_int() % 8192;
-+	return sp & ~0xf;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/quirks.c linux-2.6.12-xen/arch/xen/i386/kernel/quirks.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/quirks.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/quirks.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,49 @@
-+/*
-+ * This file contains work-arounds for x86 and x86_64 platform bugs.
-+ */
-+#include <linux/config.h>
-+#include <linux/pci.h>
-+#include <linux/irq.h>
-+
-+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
-+
-+static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
-+{
-+	u8 config, rev;
-+	u32 word;
-+
-+	/* BIOS may enable hardware IRQ balancing for
-+	 * E7520/E7320/E7525(revision ID 0x9 and below)
-+	 * based platforms.
-+	 * Disable SW irqbalance/affinity on those platforms.
-+	 */
-+	pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
-+	if (rev > 0x9)
-+		return;
-+
-+	printk(KERN_INFO "Intel E7520/7320/7525 detected.");
-+
-+	/* enable access to config space*/
-+	pci_read_config_byte(dev, 0xf4, &config);
-+	config |= 0x2;
-+	pci_write_config_byte(dev, 0xf4, config);
-+
-+	/* read xTPR register */
-+	raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
-+
-+	if (!(word & (1 << 13))) {
-+		dom0_op_t op;
-+		printk(KERN_INFO "Disabling irq balancing and affinity\n");
-+		op.cmd = DOM0_PLATFORM_QUIRK;
-+		op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
-+		(void)HYPERVISOR_dom0_op(&op);
-+	}
-+
-+	config &= ~0x2;
-+	/* disable access to config space*/
-+	pci_write_config_byte(dev, 0xf4, config);
-+}
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7320_MCH,	quirk_intel_irqbalance);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7525_MCH,	quirk_intel_irqbalance);
-+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7520_MCH,	quirk_intel_irqbalance);
-+#endif
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/setup.c linux-2.6.12-xen/arch/xen/i386/kernel/setup.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/setup.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/setup.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1790 @@
-+/*
-+ *  linux/arch/i386/kernel/setup.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *
-+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
-+ *
-+ *  Memory region support
-+ *	David Parsons <orc at pell.chi.il.us>, July-August 1999
-+ *
-+ *  Added E820 sanitization routine (removes overlapping memory regions);
-+ *  Brian Moyle <bmoyle at mvista.com>, February 2001
-+ *
-+ * Moved CPU detection code to cpu/${cpu}.c
-+ *    Patrick Mochel <mochel at osdl.org>, March 2002
-+ *
-+ *  Provisions for empty E820 memory regions (reported by certain BIOSes).
-+ *  Alex Achenbach <xela at slit.de>, December 2002.
-+ *
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of initialization
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/tty.h>
-+#include <linux/ioport.h>
-+#include <linux/acpi.h>
-+#include <linux/apm_bios.h>
-+#include <linux/initrd.h>
-+#include <linux/bootmem.h>
-+#include <linux/seq_file.h>
-+#include <linux/console.h>
-+#include <linux/mca.h>
-+#include <linux/root_dev.h>
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/efi.h>
-+#include <linux/init.h>
-+#include <linux/edd.h>
-+#include <linux/nodemask.h>
-+#include <linux/kernel.h>
-+#include <linux/percpu.h>
-+#include <linux/notifier.h>
-+#include <video/edid.h>
-+#include <asm/e820.h>
-+#include <asm/mpspec.h>
-+#include <asm/setup.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/sections.h>
-+#include <asm/io_apic.h>
-+#include <asm/ist.h>
-+#include <asm/io.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/xen-public/physdev.h>
-+#include <asm-xen/xen-public/memory.h>
-+#include <asm-xen/features.h>
-+#include "setup_arch_pre.h"
-+#include <bios_ebda.h>
-+
-+/* Allows setting of maximum possible memory size  */
-+static unsigned long xen_override_max_pfn;
-+
-+static int xen_panic_event(struct notifier_block *, unsigned long, void *);
-+static struct notifier_block xen_panic_block = {
-+	xen_panic_event, NULL, 0 /* try to go last */
-+};
-+
-+extern char hypercall_page[PAGE_SIZE];
-+EXPORT_SYMBOL(hypercall_page);
-+
-+int disable_pse __initdata = 0;
-+
-+/*
-+ * Machine setup..
-+ */
-+
-+#ifdef CONFIG_EFI
-+int efi_enabled = 0;
-+EXPORT_SYMBOL(efi_enabled);
-+#endif
-+
-+/* cpu data as detected by the assembly code in head.S */
-+struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
-+/* common cpu data for all cpus */
-+struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
-+
-+unsigned long mmu_cr4_features;
-+
-+#ifdef	CONFIG_ACPI_INTERPRETER
-+	int acpi_disabled = 0;
-+#else
-+	int acpi_disabled = 1;
-+#endif
-+EXPORT_SYMBOL(acpi_disabled);
-+
-+#ifdef	CONFIG_ACPI_BOOT
-+int __initdata acpi_force = 0;
-+extern acpi_interrupt_flags	acpi_sci_flags;
-+#endif
-+
-+/* for MCA, but anyone else can use it if they want */
-+unsigned int machine_id;
-+unsigned int machine_submodel_id;
-+unsigned int BIOS_revision;
-+unsigned int mca_pentium_flag;
-+
-+/* For PCI or other memory-mapped resources */
-+unsigned long pci_mem_start = 0x10000000;
-+
-+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
-+int bootloader_type;
-+
-+/* user-defined highmem size */
-+static unsigned int highmem_pages = -1;
-+
-+/*
-+ * Setup options
-+ */
-+struct drive_info_struct { char dummy[32]; } drive_info;
-+struct screen_info screen_info;
-+struct apm_info apm_info;
-+struct sys_desc_table_struct {
-+	unsigned short length;
-+	unsigned char table[0];
-+};
-+struct edid_info edid_info;
-+struct ist_info ist_info;
-+struct e820map e820;
-+
-+extern void early_cpu_init(void);
-+extern void dmi_scan_machine(void);
-+extern void generic_apic_probe(char *);
-+extern int root_mountflags;
-+
-+unsigned long saved_videomode;
-+
-+#define RAMDISK_IMAGE_START_MASK  	0x07FF
-+#define RAMDISK_PROMPT_FLAG		0x8000
-+#define RAMDISK_LOAD_FLAG		0x4000	
-+
-+static char command_line[COMMAND_LINE_SIZE];
-+
-+unsigned char __initdata boot_params[PARAM_SIZE];
-+
-+static struct resource data_resource = {
-+	.name	= "Kernel data",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource code_resource = {
-+	.name	= "Kernel code",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+static struct resource system_rom_resource = {
-+	.name	= "System ROM",
-+	.start	= 0xf0000,
-+	.end	= 0xfffff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
-+
-+static struct resource extension_rom_resource = {
-+	.name	= "Extension ROM",
-+	.start	= 0xe0000,
-+	.end	= 0xeffff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
-+
-+static struct resource adapter_rom_resources[] = { {
-+	.name 	= "Adapter ROM",
-+	.start	= 0xc8000,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+	.name 	= "Adapter ROM",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+	.name 	= "Adapter ROM",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+	.name 	= "Adapter ROM",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+	.name 	= "Adapter ROM",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+}, {
-+	.name 	= "Adapter ROM",
-+	.start	= 0,
-+	.end	= 0,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+} };
-+
-+#define ADAPTER_ROM_RESOURCES \
-+	(sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
-+
-+static struct resource video_rom_resource = {
-+	.name 	= "Video ROM",
-+	.start	= 0xc0000,
-+	.end	= 0xc7fff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-+};
-+#endif
-+
-+static struct resource video_ram_resource = {
-+	.name	= "Video RAM area",
-+	.start	= 0xa0000,
-+	.end	= 0xbffff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM
-+};
-+
-+static struct resource standard_io_resources[] = { {
-+	.name	= "dma1",
-+	.start	= 0x0000,
-+	.end	= 0x001f,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "pic1",
-+	.start	= 0x0020,
-+	.end	= 0x0021,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name   = "timer0",
-+	.start	= 0x0040,
-+	.end    = 0x0043,
-+	.flags  = IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name   = "timer1",
-+	.start  = 0x0050,
-+	.end    = 0x0053,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "keyboard",
-+	.start	= 0x0060,
-+	.end	= 0x006f,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "dma page reg",
-+	.start	= 0x0080,
-+	.end	= 0x008f,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "pic2",
-+	.start	= 0x00a0,
-+	.end	= 0x00a1,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "dma2",
-+	.start	= 0x00c0,
-+	.end	= 0x00df,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+}, {
-+	.name	= "fpu",
-+	.start	= 0x00f0,
-+	.end	= 0x00ff,
-+	.flags	= IORESOURCE_BUSY | IORESOURCE_IO
-+} };
-+
-+#define STANDARD_IO_RESOURCES \
-+	(sizeof standard_io_resources / sizeof standard_io_resources[0])
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
-+
-+static int __init romchecksum(unsigned char *rom, unsigned long length)
-+{
-+	unsigned char *p, sum = 0;
-+
-+	for (p = rom; p < rom + length; p++)
-+		sum += *p;
-+	return sum == 0;
-+}
-+
-+static void __init probe_roms(void)
-+{
-+	unsigned long start, length, upper;
-+	unsigned char *rom;
-+	int	      i;
-+
-+	/* Nothing to do if not running in dom0. */
-+	if (!(xen_start_info->flags & SIF_INITDOMAIN))
-+		return;
-+
-+	/* video rom */
-+	upper = adapter_rom_resources[0].start;
-+	for (start = video_rom_resource.start; start < upper; start += 2048) {
-+		rom = isa_bus_to_virt(start);
-+		if (!romsignature(rom))
-+			continue;
-+
-+		video_rom_resource.start = start;
-+
-+		/* 0 < length <= 0x7f * 512, historically */
-+		length = rom[2] * 512;
-+
-+		/* if checksum okay, trust length byte */
-+		if (length && romchecksum(rom, length))
-+			video_rom_resource.end = start + length - 1;
-+
-+		request_resource(&iomem_resource, &video_rom_resource);
-+		break;
-+	}
-+
-+	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-+	if (start < upper)
-+		start = upper;
-+
-+	/* system rom */
-+	request_resource(&iomem_resource, &system_rom_resource);
-+	upper = system_rom_resource.start;
-+
-+	/* check for extension rom (ignore length byte!) */
-+	rom = isa_bus_to_virt(extension_rom_resource.start);
-+	if (romsignature(rom)) {
-+		length = extension_rom_resource.end - extension_rom_resource.start + 1;
-+		if (romchecksum(rom, length)) {
-+			request_resource(&iomem_resource, &extension_rom_resource);
-+			upper = extension_rom_resource.start;
-+		}
-+	}
-+
-+	/* check for adapter roms on 2k boundaries */
-+	for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
-+		rom = isa_bus_to_virt(start);
-+		if (!romsignature(rom))
-+			continue;
-+
-+		/* 0 < length <= 0x7f * 512, historically */
-+		length = rom[2] * 512;
-+
-+		/* but accept any length that fits if checksum okay */
-+		if (!length || start + length > upper || !romchecksum(rom, length))
-+			continue;
-+
-+		adapter_rom_resources[i].start = start;
-+		adapter_rom_resources[i].end = start + length - 1;
-+		request_resource(&iomem_resource, &adapter_rom_resources[i]);
-+
-+		start = adapter_rom_resources[i++].end & ~2047UL;
-+	}
-+}
-+#endif
-+
-+/*
-+ * Point at the empty zero page to start with. We map the real shared_info
-+ * page as soon as fixmap is up and running.
-+ */
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
-+
-+unsigned long *phys_to_machine_mapping;
-+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
-+EXPORT_SYMBOL(phys_to_machine_mapping);
-+
-+/* Raw start-of-day parameters from the hypervisor. */
-+start_info_t *xen_start_info;
-+EXPORT_SYMBOL(xen_start_info);
-+
-+static void __init limit_regions(unsigned long long size)
-+{
-+	unsigned long long current_addr = 0;
-+	int i;
-+
-+	if (efi_enabled) {
-+		for (i = 0; i < memmap.nr_map; i++) {
-+			current_addr = memmap.map[i].phys_addr +
-+				       (memmap.map[i].num_pages << 12);
-+			if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
-+				if (current_addr >= size) {
-+					memmap.map[i].num_pages -=
-+						(((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
-+					memmap.nr_map = i + 1;
-+					return;
-+				}
-+			}
-+		}
-+	}
-+	for (i = 0; i < e820.nr_map; i++) {
-+		if (e820.map[i].type == E820_RAM) {
-+			current_addr = e820.map[i].addr + e820.map[i].size;
-+			if (current_addr >= size) {
-+				e820.map[i].size -= current_addr-size;
-+				e820.nr_map = i + 1;
-+				return;
-+			}
-+		}
-+	}
-+}
-+
-+static void __init add_memory_region(unsigned long long start,
-+                                  unsigned long long size, int type)
-+{
-+	int x;
-+
-+	if (!efi_enabled) {
-+       		x = e820.nr_map;
-+
-+		if (x == E820MAX) {
-+		    printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-+		    return;
-+		}
-+
-+		e820.map[x].addr = start;
-+		e820.map[x].size = size;
-+		e820.map[x].type = type;
-+		e820.nr_map++;
-+	}
-+} /* add_memory_region */
-+
-+#define E820_DEBUG	1
-+
-+static void __init print_memory_map(char *who)
-+{
-+	int i;
-+
-+	for (i = 0; i < e820.nr_map; i++) {
-+		printk(" %s: %016Lx - %016Lx ", who,
-+			e820.map[i].addr,
-+			e820.map[i].addr + e820.map[i].size);
-+		switch (e820.map[i].type) {
-+		case E820_RAM:	printk("(usable)\n");
-+				break;
-+		case E820_RESERVED:
-+				printk("(reserved)\n");
-+				break;
-+		case E820_ACPI:
-+				printk("(ACPI data)\n");
-+				break;
-+		case E820_NVS:
-+				printk("(ACPI NVS)\n");
-+				break;
-+		default:	printk("type %lu\n", e820.map[i].type);
-+				break;
-+		}
-+	}
-+}
-+
-+#if 0
-+/*
-+ * Sanitize the BIOS e820 map.
-+ *
-+ * Some e820 responses include overlapping entries.  The following 
-+ * replaces the original e820 map with a new one, removing overlaps.
-+ *
-+ */
-+struct change_member {
-+	struct e820entry *pbios; /* pointer to original bios entry */
-+	unsigned long long addr; /* address for this change point */
-+};
-+static struct change_member change_point_list[2*E820MAX] __initdata;
-+static struct change_member *change_point[2*E820MAX] __initdata;
-+static struct e820entry *overlap_list[E820MAX] __initdata;
-+static struct e820entry new_bios[E820MAX] __initdata;
-+
-+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-+{
-+	struct change_member *change_tmp;
-+	unsigned long current_type, last_type;
-+	unsigned long long last_addr;
-+	int chgidx, still_changing;
-+	int overlap_entries;
-+	int new_bios_entry;
-+	int old_nr, new_nr, chg_nr;
-+	int i;
-+
-+	/*
-+		Visually we're performing the following (1,2,3,4 = memory types)...
-+
-+		Sample memory map (w/overlaps):
-+		   ____22__________________
-+		   ______________________4_
-+		   ____1111________________
-+		   _44_____________________
-+		   11111111________________
-+		   ____________________33__
-+		   ___________44___________
-+		   __________33333_________
-+		   ______________22________
-+		   ___________________2222_
-+		   _________111111111______
-+		   _____________________11_
-+		   _________________4______
-+
-+		Sanitized equivalent (no overlap):
-+		   1_______________________
-+		   _44_____________________
-+		   ___1____________________
-+		   ____22__________________
-+		   ______11________________
-+		   _________1______________
-+		   __________3_____________
-+		   ___________44___________
-+		   _____________33_________
-+		   _______________2________
-+		   ________________1_______
-+		   _________________4______
-+		   ___________________2____
-+		   ____________________33__
-+		   ______________________4_
-+	*/
-+
-+	/* if there's only one memory region, don't bother */
-+	if (*pnr_map < 2)
-+		return -1;
-+
-+	old_nr = *pnr_map;
-+
-+	/* bail out if we find any unreasonable addresses in bios map */
-+	for (i=0; i<old_nr; i++)
-+		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
-+			return -1;
-+
-+	/* create pointers for initial change-point information (for sorting) */
-+	for (i=0; i < 2*old_nr; i++)
-+		change_point[i] = &change_point_list[i];
-+
-+	/* record all known change-points (starting and ending addresses),
-+	   omitting those that are for empty memory regions */
-+	chgidx = 0;
-+	for (i=0; i < old_nr; i++)	{
-+		if (biosmap[i].size != 0) {
-+			change_point[chgidx]->addr = biosmap[i].addr;
-+			change_point[chgidx++]->pbios = &biosmap[i];
-+			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-+			change_point[chgidx++]->pbios = &biosmap[i];
-+		}
-+	}
-+	chg_nr = chgidx;    	/* true number of change-points */
-+
-+	/* sort change-point list by memory addresses (low -> high) */
-+	still_changing = 1;
-+	while (still_changing)	{
-+		still_changing = 0;
-+		for (i=1; i < chg_nr; i++)  {
-+			/* if <current_addr> > <last_addr>, swap */
-+			/* or, if current=<start_addr> & last=<end_addr>, swap */
-+			if ((change_point[i]->addr < change_point[i-1]->addr) ||
-+				((change_point[i]->addr == change_point[i-1]->addr) &&
-+				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
-+				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-+			   )
-+			{
-+				change_tmp = change_point[i];
-+				change_point[i] = change_point[i-1];
-+				change_point[i-1] = change_tmp;
-+				still_changing=1;
-+			}
-+		}
-+	}
-+
-+	/* create a new bios memory map, removing overlaps */
-+	overlap_entries=0;	 /* number of entries in the overlap table */
-+	new_bios_entry=0;	 /* index for creating new bios map entries */
-+	last_type = 0;		 /* start with undefined memory type */
-+	last_addr = 0;		 /* start with 0 as last starting address */
-+	/* loop through change-points, determining affect on the new bios map */
-+	for (chgidx=0; chgidx < chg_nr; chgidx++)
-+	{
-+		/* keep track of all overlapping bios entries */
-+		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-+		{
-+			/* add map entry to overlap list (> 1 entry implies an overlap) */
-+			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-+		}
-+		else
-+		{
-+			/* remove entry from list (order independent, so swap with last) */
-+			for (i=0; i<overlap_entries; i++)
-+			{
-+				if (overlap_list[i] == change_point[chgidx]->pbios)
-+					overlap_list[i] = overlap_list[overlap_entries-1];
-+			}
-+			overlap_entries--;
-+		}
-+		/* if there are overlapping entries, decide which "type" to use */
-+		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-+		current_type = 0;
-+		for (i=0; i<overlap_entries; i++)
-+			if (overlap_list[i]->type > current_type)
-+				current_type = overlap_list[i]->type;
-+		/* continue building up new bios map based on this information */
-+		if (current_type != last_type)	{
-+			if (last_type != 0)	 {
-+				new_bios[new_bios_entry].size =
-+					change_point[chgidx]->addr - last_addr;
-+				/* move forward only if the new size was non-zero */
-+				if (new_bios[new_bios_entry].size != 0)
-+					if (++new_bios_entry >= E820MAX)
-+						break; 	/* no more space left for new bios entries */
-+			}
-+			if (current_type != 0)	{
-+				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-+				new_bios[new_bios_entry].type = current_type;
-+				last_addr=change_point[chgidx]->addr;
-+			}
-+			last_type = current_type;
-+		}
-+	}
-+	new_nr = new_bios_entry;   /* retain count for new bios entries */
-+
-+	/* copy new bios mapping into original location */
-+	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-+	*pnr_map = new_nr;
-+
-+	return 0;
-+}
-+
-+/*
-+ * Copy the BIOS e820 map into a safe place.
-+ *
-+ * Sanity-check it while we're at it..
-+ *
-+ * If we're lucky and live on a modern system, the setup code
-+ * will have given us a memory map that we can use to properly
-+ * set up memory.  If we aren't, we'll fake a memory map.
-+ *
-+ * We check to see that the memory map contains at least 2 elements
-+ * before we'll use it, because the detection code in setup.S may
-+ * not be perfect and most every PC known to man has two memory
-+ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
-+ * thinkpad 560x, for example, does not cooperate with the memory
-+ * detection code.)
-+ */
-+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-+{
-+	/* Only one memory region (or negative)? Ignore it */
-+	if (nr_map < 2)
-+		return -1;
-+
-+	do {
-+		unsigned long long start = biosmap->addr;
-+		unsigned long long size = biosmap->size;
-+		unsigned long long end = start + size;
-+		unsigned long type = biosmap->type;
-+
-+		/* Overflow in 64 bits? Ignore the memory map. */
-+		if (start > end)
-+			return -1;
-+
-+		/*
-+		 * Some BIOSes claim RAM in the 640k - 1M region.
-+		 * Not right. Fix it up.
-+		 */
-+		if (type == E820_RAM) {
-+			if (start < 0x100000ULL && end > 0xA0000ULL) {
-+				if (start < 0xA0000ULL)
-+					add_memory_region(start, 0xA0000ULL-start, type);
-+				if (end <= 0x100000ULL)
-+					continue;
-+				start = 0x100000ULL;
-+				size = end - start;
-+			}
-+		}
-+		add_memory_region(start, size, type);
-+	} while (biosmap++,--nr_map);
-+	return 0;
-+}
-+#endif
-+
-+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-+struct edd edd;
-+#ifdef CONFIG_EDD_MODULE
-+EXPORT_SYMBOL(edd);
-+#endif
-+/**
-+ * copy_edd() - Copy the BIOS EDD information
-+ *              from boot_params into a safe place.
-+ *
-+ */
-+static inline void copy_edd(void)
-+{
-+     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
-+     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
-+     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
-+     edd.edd_info_nr = EDD_NR;
-+}
-+#else
-+static inline void copy_edd(void)
-+{
-+}
-+#endif
-+
-+/*
-+ * Do NOT EVER look at the BIOS memory size location.
-+ * It does not work on many machines.
-+ */
-+#define LOWMEMSIZE()	(0x9f000)
-+
-+static void __init parse_cmdline_early (char ** cmdline_p)
-+{
-+	char c = ' ', *to = command_line, *from = saved_command_line;
-+	int len = 0, max_cmdline;
-+	int userdef = 0;
-+
-+	if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
-+		max_cmdline = COMMAND_LINE_SIZE;
-+	memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
-+	/* Save unparsed command line copy for /proc/cmdline */
-+	saved_command_line[max_cmdline-1] = '\0';
-+
-+	for (;;) {
-+		if (c != ' ')
-+			goto next_char;
-+		/*
-+		 * "mem=nopentium" disables the 4MB page tables.
-+		 * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
-+		 * to <mem>, overriding the bios size.
-+		 * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
-+		 * <start> to <start>+<mem>, overriding the bios size.
-+		 *
-+		 * HPA tells me bootloaders need to parse mem=, so no new
-+		 * option should be mem=  [also see Documentation/i386/boot.txt]
-+		 */
-+		if (!memcmp(from, "mem=", 4)) {
-+			if (to != command_line)
-+				to--;
-+			if (!memcmp(from+4, "nopentium", 9)) {
-+				from += 9+4;
-+				clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
-+				disable_pse = 1;
-+			} else {
-+				/* If the user specifies memory size, we
-+				 * limit the BIOS-provided memory map to
-+				 * that size. exactmap can be used to specify
-+				 * the exact map. mem=number can be used to
-+				 * trim the existing memory map.
-+				 */
-+				unsigned long long mem_size;
-+ 
-+				mem_size = memparse(from+4, &from);
-+#if 0
-+				limit_regions(mem_size);
-+				userdef=1;
-+#else
-+				xen_override_max_pfn =
-+					(unsigned long)(mem_size>>PAGE_SHIFT);
-+#endif
-+			}
-+		}
-+
-+		else if (!memcmp(from, "memmap=", 7)) {
-+			if (to != command_line)
-+				to--;
-+			if (!memcmp(from+7, "exactmap", 8)) {
-+				from += 8+7;
-+				e820.nr_map = 0;
-+				userdef = 1;
-+			} else {
-+				/* If the user specifies memory size, we
-+				 * limit the BIOS-provided memory map to
-+				 * that size. exactmap can be used to specify
-+				 * the exact map. mem=number can be used to
-+				 * trim the existing memory map.
-+				 */
-+				unsigned long long start_at, mem_size;
-+ 
-+				mem_size = memparse(from+7, &from);
-+				if (*from == '@') {
-+					start_at = memparse(from+1, &from);
-+					add_memory_region(start_at, mem_size, E820_RAM);
-+				} else if (*from == '#') {
-+					start_at = memparse(from+1, &from);
-+					add_memory_region(start_at, mem_size, E820_ACPI);
-+				} else if (*from == '$') {
-+					start_at = memparse(from+1, &from);
-+					add_memory_region(start_at, mem_size, E820_RESERVED);
-+				} else {
-+					limit_regions(mem_size);
-+					userdef=1;
-+				}
-+			}
-+		}
-+
-+		else if (!memcmp(from, "noexec=", 7))
-+			noexec_setup(from + 7);
-+
-+
-+#ifdef  CONFIG_X86_MPPARSE
-+		/*
-+		 * If the BIOS enumerates physical processors before logical,
-+		 * maxcpus=N at enumeration-time can be used to disable HT.
-+		 */
-+		else if (!memcmp(from, "maxcpus=", 8)) {
-+			extern unsigned int maxcpus;
-+
-+			maxcpus = simple_strtoul(from + 8, NULL, 0);
-+		}
-+#endif
-+
-+#ifdef CONFIG_ACPI_BOOT
-+		/* "acpi=off" disables both ACPI table parsing and interpreter */
-+		else if (!memcmp(from, "acpi=off", 8)) {
-+			disable_acpi();
-+		}
-+
-+		/* acpi=force to over-ride black-list */
-+		else if (!memcmp(from, "acpi=force", 10)) {
-+			acpi_force = 1;
-+			acpi_ht = 1;
-+			acpi_disabled = 0;
-+		}
-+
-+		/* acpi=strict disables out-of-spec workarounds */
-+		else if (!memcmp(from, "acpi=strict", 11)) {
-+			acpi_strict = 1;
-+		}
-+
-+		/* Limit ACPI just to boot-time to enable HT */
-+		else if (!memcmp(from, "acpi=ht", 7)) {
-+			if (!acpi_force)
-+				disable_acpi();
-+			acpi_ht = 1;
-+		}
-+		
-+		/* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
-+		else if (!memcmp(from, "pci=noacpi", 10)) {
-+			acpi_disable_pci();
-+		}
-+		/* "acpi=noirq" disables ACPI interrupt routing */
-+		else if (!memcmp(from, "acpi=noirq", 10)) {
-+			acpi_noirq_set();
-+		}
-+
-+		else if (!memcmp(from, "acpi_sci=edge", 13))
-+			acpi_sci_flags.trigger =  1;
-+
-+		else if (!memcmp(from, "acpi_sci=level", 14))
-+			acpi_sci_flags.trigger = 3;
-+
-+		else if (!memcmp(from, "acpi_sci=high", 13))
-+			acpi_sci_flags.polarity = 1;
-+
-+		else if (!memcmp(from, "acpi_sci=low", 12))
-+			acpi_sci_flags.polarity = 3;
-+
-+#ifdef CONFIG_X86_IO_APIC
-+		else if (!memcmp(from, "acpi_skip_timer_override", 24))
-+			acpi_skip_timer_override = 1;
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		/* disable IO-APIC */
-+		else if (!memcmp(from, "noapic", 6))
-+			disable_ioapic_setup();
-+#endif /* CONFIG_X86_LOCAL_APIC */
-+#endif /* CONFIG_ACPI_BOOT */
-+
-+		/*
-+		 * highmem=size forces highmem to be exactly 'size' bytes.
-+		 * This works even on boxes that have no highmem otherwise.
-+		 * This also works to reduce highmem size on bigger boxes.
-+		 */
-+		else if (!memcmp(from, "highmem=", 8))
-+			highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
-+	
-+		/*
-+		 * vmalloc=size forces the vmalloc area to be exactly 'size'
-+		 * bytes. This can be used to increase (or decrease) the
-+		 * vmalloc area - the default is 128m.
-+		 */
-+		else if (!memcmp(from, "vmalloc=", 8))
-+			__VMALLOC_RESERVE = memparse(from+8, &from);
-+
-+	next_char:
-+		c = *(from++);
-+		if (!c)
-+			break;
-+		if (COMMAND_LINE_SIZE <= ++len)
-+			break;
-+		*(to++) = c;
-+	}
-+	*to = '\0';
-+	*cmdline_p = command_line;
-+	if (userdef) {
-+		printk(KERN_INFO "user-defined physical RAM map:\n");
-+		print_memory_map("user");
-+	}
-+}
-+
-+#if 0 /* !XEN */
-+/*
-+ * Callback for efi_memory_walk.
-+ */
-+static int __init
-+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
-+{
-+	unsigned long *max_pfn = arg, pfn;
-+
-+	if (start < end) {
-+		pfn = PFN_UP(end -1);
-+		if (pfn > *max_pfn)
-+			*max_pfn = pfn;
-+	}
-+	return 0;
-+}
-+
-+
-+/*
-+ * Find the highest page frame number we have available
-+ */
-+void __init find_max_pfn(void)
-+{
-+	int i;
-+
-+	max_pfn = 0;
-+	if (efi_enabled) {
-+		efi_memmap_walk(efi_find_max_pfn, &max_pfn);
-+		return;
-+	}
-+
-+	for (i = 0; i < e820.nr_map; i++) {
-+		unsigned long start, end;
-+		/* RAM? */
-+		if (e820.map[i].type != E820_RAM)
-+			continue;
-+		start = PFN_UP(e820.map[i].addr);
-+		end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-+		if (start >= end)
-+			continue;
-+		if (end > max_pfn)
-+			max_pfn = end;
-+	}
-+}
-+#else
-+/* We don't use the fake e820 because we need to respond to user override. */
-+void __init find_max_pfn(void)
-+{
-+	if (xen_override_max_pfn == 0) {
-+		max_pfn = xen_start_info->nr_pages;
-+		/* Default 8MB slack (to balance backend allocations). */
-+		max_pfn += 8 << (20 - PAGE_SHIFT);
-+	} else if (xen_override_max_pfn > xen_start_info->nr_pages) {
-+		max_pfn = xen_override_max_pfn;
-+	} else {
-+		max_pfn = xen_start_info->nr_pages;
-+	}
-+}
-+#endif /* XEN */
-+
-+/*
-+ * Determine low and high memory ranges:
-+ */
-+unsigned long __init find_max_low_pfn(void)
-+{
-+	unsigned long max_low_pfn;
-+
-+	max_low_pfn = max_pfn;
-+	if (max_low_pfn > MAXMEM_PFN) {
-+		if (highmem_pages == -1)
-+			highmem_pages = max_pfn - MAXMEM_PFN;
-+		if (highmem_pages + MAXMEM_PFN < max_pfn)
-+			max_pfn = MAXMEM_PFN + highmem_pages;
-+		if (highmem_pages + MAXMEM_PFN > max_pfn) {
-+			printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
-+			highmem_pages = 0;
-+		}
-+		max_low_pfn = MAXMEM_PFN;
-+#ifndef CONFIG_HIGHMEM
-+		/* Maximum memory usable is what is directly addressable */
-+		printk(KERN_WARNING "Warning only %ldMB will be used.\n",
-+					MAXMEM>>20);
-+		if (max_pfn > MAX_NONPAE_PFN)
-+			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
-+		else
-+			printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
-+		max_pfn = MAXMEM_PFN;
-+#else /* !CONFIG_HIGHMEM */
-+#ifndef CONFIG_X86_PAE
-+		if (max_pfn > MAX_NONPAE_PFN) {
-+			max_pfn = MAX_NONPAE_PFN;
-+			printk(KERN_WARNING "Warning only 4GB will be used.\n");
-+			printk(KERN_WARNING "Use a PAE enabled kernel.\n");
-+		}
-+#endif /* !CONFIG_X86_PAE */
-+#endif /* !CONFIG_HIGHMEM */
-+	} else {
-+		if (highmem_pages == -1)
-+			highmem_pages = 0;
-+#ifdef CONFIG_HIGHMEM
-+		if (highmem_pages >= max_pfn) {
-+			printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
-+			highmem_pages = 0;
-+		}
-+		if (highmem_pages) {
-+			if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
-+				printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
-+				highmem_pages = 0;
-+			}
-+			max_low_pfn -= highmem_pages;
-+		}
-+#else
-+		if (highmem_pages)
-+			printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
-+#endif
-+	}
-+	return max_low_pfn;
-+}
-+
-+/*
-+ * Free all available memory for boot time allocation.  Used
-+ * as a callback function by efi_memory_walk()
-+ */
-+
-+static int __init
-+free_available_memory(unsigned long start, unsigned long end, void *arg)
-+{
-+	/* check max_low_pfn */
-+	if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
-+		return 0;
-+	if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
-+		end = (max_low_pfn + 1) << PAGE_SHIFT;
-+	if (start < end)
-+		free_bootmem(start, end - start);
-+
-+	return 0;
-+}
-+/*
-+ * Register fully available low RAM pages with the bootmem allocator.
-+ */
-+static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
-+{
-+	int i;
-+
-+	if (efi_enabled) {
-+		efi_memmap_walk(free_available_memory, NULL);
-+		return;
-+	}
-+	for (i = 0; i < e820.nr_map; i++) {
-+		unsigned long curr_pfn, last_pfn, size;
-+		/*
-+		 * Reserve usable low memory
-+		 */
-+		if (e820.map[i].type != E820_RAM)
-+			continue;
-+		/*
-+		 * We are rounding up the start address of usable memory:
-+		 */
-+		curr_pfn = PFN_UP(e820.map[i].addr);
-+		if (curr_pfn >= max_low_pfn)
-+			continue;
-+		/*
-+		 * ... and at the end of the usable range downwards:
-+		 */
-+		last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-+
-+		if (last_pfn > max_low_pfn)
-+			last_pfn = max_low_pfn;
-+
-+		/*
-+		 * .. finally, did all the rounding and playing
-+		 * around just make the area go away?
-+		 */
-+		if (last_pfn <= curr_pfn)
-+			continue;
-+
-+		size = last_pfn - curr_pfn;
-+		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
-+	}
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * workaround for Dell systems that neglect to reserve EBDA
-+ */
-+static void __init reserve_ebda_region(void)
-+{
-+	unsigned int addr;
-+	addr = get_bios_ebda();
-+	if (addr)
-+		reserve_bootmem(addr, PAGE_SIZE);	
-+}
-+#endif
-+
-+#ifndef CONFIG_DISCONTIGMEM
-+void __init setup_bootmem_allocator(void);
-+static unsigned long __init setup_memory(void)
-+{
-+	/*
-+	 * partially used pages are not usable - thus
-+	 * we are rounding upwards:
-+	 */
-+ 	min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
-+		xen_start_info->nr_pt_frames;
-+
-+	find_max_pfn();
-+
-+	max_low_pfn = find_max_low_pfn();
-+
-+#ifdef CONFIG_HIGHMEM
-+	highstart_pfn = highend_pfn = max_pfn;
-+	if (max_pfn > max_low_pfn) {
-+		highstart_pfn = max_low_pfn;
-+	}
-+	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
-+		pages_to_mb(highend_pfn - highstart_pfn));
-+#endif
-+	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
-+			pages_to_mb(max_low_pfn));
-+
-+	setup_bootmem_allocator();
-+
-+	return max_low_pfn;
-+}
-+
-+void __init zone_sizes_init(void)
-+{
-+	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
-+	unsigned int max_dma, low;
-+
-+	/*
-+	 * XEN: Our notion of "DMA memory" is fake when running over Xen.
-+	 * We simply put all RAM in the DMA zone so that those drivers which
-+	 * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
-+	 * Those drivers that *do* require lowmem are screwed anyway when
-+	 * running over Xen!
-+	 */
-+	max_dma = max_low_pfn;
-+	low = max_low_pfn;
-+
-+	if (low < max_dma)
-+		zones_size[ZONE_DMA] = low;
-+	else {
-+		zones_size[ZONE_DMA] = max_dma;
-+		zones_size[ZONE_NORMAL] = low - max_dma;
-+#ifdef CONFIG_HIGHMEM
-+		zones_size[ZONE_HIGHMEM] = highend_pfn - low;
-+#endif
-+	}
-+	free_area_init(zones_size);
-+}
-+#else
-+extern unsigned long setup_memory(void);
-+extern void zone_sizes_init(void);
-+#endif /* !CONFIG_DISCONTIGMEM */
-+
-+void __init setup_bootmem_allocator(void)
-+{
-+	unsigned long bootmap_size;
-+	/*
-+	 * Initialize the boot-time allocator (with low memory only):
-+	 */
-+	bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
-+
-+	register_bootmem_low_pages(max_low_pfn);
-+
-+	/*
-+	 * Reserve the bootmem bitmap itself as well. We do this in two
-+	 * steps (first step was init_bootmem()) because this catches
-+	 * the (very unlikely) case of us accidentally initializing the
-+	 * bootmem allocator with an invalid RAM area.
-+	 */
-+	reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(min_low_pfn) +
-+			 bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
-+
-+#ifndef CONFIG_XEN
-+	/*
-+	 * reserve physical page 0 - it's a special BIOS page on many boxes,
-+	 * enabling clean reboots, SMP operation, laptop functions.
-+	 */
-+	reserve_bootmem(0, PAGE_SIZE);
-+
-+	/* reserve EBDA region, it's a 4K region */
-+	reserve_ebda_region();
-+
-+    /* could be an AMD 768MPX chipset. Reserve a page  before VGA to prevent
-+       PCI prefetch into it (errata #56). Usually the page is reserved anyways,
-+       unless you have no PS/2 mouse plugged in. */
-+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+	    boot_cpu_data.x86 == 6)
-+	     reserve_bootmem(0xa0000 - 4096, 4096);
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * But first pinch a few for the stack/trampoline stuff
-+	 * FIXME: Don't need the extra page at 4K, but need to fix
-+	 * trampoline before removing it. (see the GDT stuff)
-+	 */
-+	reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
-+#endif
-+#ifdef CONFIG_ACPI_SLEEP
-+	/*
-+	 * Reserve low memory region for sleep support.
-+	 */
-+	acpi_reserve_bootmem();
-+#endif
-+#endif /* !CONFIG_XEN */
-+
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	if (xen_start_info->mod_start) {
-+		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
-+			/*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
-+			initrd_start = INITRD_START + PAGE_OFFSET;
-+			initrd_end = initrd_start+INITRD_SIZE;
-+			initrd_below_start_ok = 1;
-+		}
-+		else {
-+			printk(KERN_ERR "initrd extends beyond end of memory "
-+			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+			    INITRD_START + INITRD_SIZE,
-+			    max_low_pfn << PAGE_SHIFT);
-+			initrd_start = 0;
-+		}
-+	}
-+#endif
-+
-+	phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;
-+}
-+
-+/*
-+ * The node 0 pgdat is initialized before all of these because
-+ * it's needed for bootmem.  node>0 pgdats have their virtual
-+ * space allocated before the pagetables are in place to access
-+ * them, so they can't be cleared then.
-+ *
-+ * This should all compile down to nothing when NUMA is off.
-+ */
-+void __init remapped_pgdat_init(void)
-+{
-+	int nid;
-+
-+	for_each_online_node(nid) {
-+		if (nid != 0)
-+			memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
-+	}
-+}
-+
-+/*
-+ * Request address space for all standard RAM and ROM resources
-+ * and also for regions reported as reserved by the e820.
-+ */
-+static void __init
-+legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
-+{
-+	int i;
-+#ifdef CONFIG_XEN
-+	dom0_op_t op;
-+	struct dom0_memory_map_entry *map;
-+	unsigned long gapstart, gapsize;
-+	unsigned long long last;
-+#endif
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+	probe_roms();
-+#endif
-+
-+#ifdef CONFIG_XEN
-+	map = alloc_bootmem_low_pages(PAGE_SIZE);
-+	op.cmd = DOM0_PHYSICAL_MEMORY_MAP;
-+	op.u.physical_memory_map.memory_map = map;
-+	op.u.physical_memory_map.max_map_entries =
-+		PAGE_SIZE / sizeof(struct dom0_memory_map_entry);
-+	BUG_ON(HYPERVISOR_dom0_op(&op));
-+
-+	last = 0x100000000ULL;
-+	gapstart = 0x10000000;
-+	gapsize = 0x400000;
-+
-+	for (i = op.u.physical_memory_map.nr_map_entries - 1; i >= 0; i--) {
-+		struct resource *res;
-+
-+		if ((last > map[i].end) && ((last - map[i].end) > gapsize)) {
-+			gapsize = last - map[i].end;
-+			gapstart = map[i].end;
-+		}
-+		if (map[i].start < last)
-+			last = map[i].start;
-+
-+		if (map[i].end > 0x100000000ULL)
-+			continue;
-+		res = alloc_bootmem_low(sizeof(struct resource));
-+		res->name = map[i].is_ram ? "System RAM" : "reserved";
-+		res->start = map[i].start;
-+		res->end = map[i].end - 1;
-+		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+		request_resource(&iomem_resource, res);
-+	}
-+
-+	free_bootmem(__pa(map), PAGE_SIZE);
-+
-+	/*
-+	 * Start allocating dynamic PCI memory a bit into the gap,
-+	 * aligned up to the nearest megabyte.
-+	 *
-+	 * Question: should we try to pad it up a bit (do something
-+	 * like " + (gapsize >> 3)" in there too?). We now have the
-+	 * technology.
-+	 */
-+	pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
-+
-+	printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
-+		pci_mem_start, gapstart, gapsize);
-+#else
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct resource *res;
-+		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
-+			continue;
-+		res = alloc_bootmem_low(sizeof(struct resource));
-+		switch (e820.map[i].type) {
-+		case E820_RAM:	res->name = "System RAM"; break;
-+		case E820_ACPI:	res->name = "ACPI Tables"; break;
-+		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
-+		default:	res->name = "reserved";
-+		}
-+		res->start = e820.map[i].addr;
-+		res->end = res->start + e820.map[i].size - 1;
-+		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+		request_resource(&iomem_resource, res);
-+		if (e820.map[i].type == E820_RAM) {
-+			/*
-+			 *  We don't know which RAM region contains kernel data,
-+			 *  so we try it repeatedly and let the resource manager
-+			 *  test it.
-+			 */
-+			request_resource(res, code_resource);
-+			request_resource(res, data_resource);
-+		}
-+	}
-+#endif
-+}
-+
-+/*
-+ * Request address space for all standard resources
-+ */
-+static void __init register_memory(void)
-+{
-+#ifndef CONFIG_XEN
-+	unsigned long gapstart, gapsize;
-+	unsigned long long last;
-+#endif
-+	int	      i;
-+
-+	/* Nothing to do if not running in dom0. */
-+	if (!(xen_start_info->flags & SIF_INITDOMAIN))
-+		return;
-+
-+	if (efi_enabled)
-+		efi_initialize_iomem_resources(&code_resource, &data_resource);
-+	else
-+		legacy_init_iomem_resources(&code_resource, &data_resource);
-+
-+	/* EFI systems may still have VGA */
-+	request_resource(&iomem_resource, &video_ram_resource);
-+
-+	/* request I/O space for devices used on all i[345]86 PCs */
-+	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
-+		request_resource(&ioport_resource, &standard_io_resources[i]);
-+
-+#ifndef CONFIG_XEN
-+	/*
-+	 * Search for the bigest gap in the low 32 bits of the e820
-+	 * memory space.
-+	 */
-+	last = 0x100000000ull;
-+	gapstart = 0x10000000;
-+	gapsize = 0x400000;
-+	i = e820.nr_map;
-+	while (--i >= 0) {
-+		unsigned long long start = e820.map[i].addr;
-+		unsigned long long end = start + e820.map[i].size;
-+
-+		/*
-+		 * Since "last" is at most 4GB, we know we'll
-+		 * fit in 32 bits if this condition is true
-+		 */
-+		if (last > end) {
-+			unsigned long gap = last - end;
-+
-+			if (gap > gapsize) {
-+				gapsize = gap;
-+				gapstart = end;
-+			}
-+		}
-+		if (start < last)
-+			last = start;
-+	}
-+
-+	/*
-+	 * Start allocating dynamic PCI memory a bit into the gap,
-+	 * aligned up to the nearest megabyte.
-+	 *
-+	 * Question: should we try to pad it up a bit (do something
-+	 * like " + (gapsize >> 3)" in there too?). We now have the
-+	 * technology.
-+	 */
-+	pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
-+
-+	printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
-+		pci_mem_start, gapstart, gapsize);
-+#endif
-+}
-+
-+/* Use inline assembly to define this because the nops are defined 
-+   as inline assembly strings in the include files and we cannot 
-+   get them easily into strings. */
-+asm("\t.data\nintelnops: " 
-+    GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
-+    GENERIC_NOP7 GENERIC_NOP8); 
-+asm("\t.data\nk8nops: " 
-+    K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
-+    K8_NOP7 K8_NOP8); 
-+asm("\t.data\nk7nops: " 
-+    K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
-+    K7_NOP7 K7_NOP8); 
-+    
-+extern unsigned char intelnops[], k8nops[], k7nops[];
-+static unsigned char *intel_nops[ASM_NOP_MAX+1] = { 
-+     NULL,
-+     intelnops,
-+     intelnops + 1,
-+     intelnops + 1 + 2,
-+     intelnops + 1 + 2 + 3,
-+     intelnops + 1 + 2 + 3 + 4,
-+     intelnops + 1 + 2 + 3 + 4 + 5,
-+     intelnops + 1 + 2 + 3 + 4 + 5 + 6,
-+     intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-+}; 
-+static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 
-+     NULL,
-+     k8nops,
-+     k8nops + 1,
-+     k8nops + 1 + 2,
-+     k8nops + 1 + 2 + 3,
-+     k8nops + 1 + 2 + 3 + 4,
-+     k8nops + 1 + 2 + 3 + 4 + 5,
-+     k8nops + 1 + 2 + 3 + 4 + 5 + 6,
-+     k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-+}; 
-+static unsigned char *k7_nops[ASM_NOP_MAX+1] = { 
-+     NULL,
-+     k7nops,
-+     k7nops + 1,
-+     k7nops + 1 + 2,
-+     k7nops + 1 + 2 + 3,
-+     k7nops + 1 + 2 + 3 + 4,
-+     k7nops + 1 + 2 + 3 + 4 + 5,
-+     k7nops + 1 + 2 + 3 + 4 + 5 + 6,
-+     k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-+}; 
-+static struct nop { 
-+     int cpuid; 
-+     unsigned char **noptable; 
-+} noptypes[] = { 
-+     { X86_FEATURE_K8, k8_nops }, 
-+     { X86_FEATURE_K7, k7_nops }, 
-+     { -1, NULL }
-+}; 
-+
-+/* Replace instructions with better alternatives for this CPU type.
-+
-+   This runs before SMP is initialized to avoid SMP problems with
-+   self modifying code. This implies that assymetric systems where
-+   APs have less capabilities than the boot processor are not handled. 
-+   In this case boot with "noreplacement". */ 
-+void apply_alternatives(void *start, void *end) 
-+{ 
-+	struct alt_instr *a; 
-+	int diff, i, k;
-+        unsigned char **noptable = intel_nops; 
-+	for (i = 0; noptypes[i].cpuid >= 0; i++) { 
-+		if (boot_cpu_has(noptypes[i].cpuid)) { 
-+			noptable = noptypes[i].noptable;
-+			break;
-+		}
-+	} 
-+	for (a = start; (void *)a < end; a++) { 
-+		if (!boot_cpu_has(a->cpuid))
-+			continue;
-+		BUG_ON(a->replacementlen > a->instrlen); 
-+		memcpy(a->instr, a->replacement, a->replacementlen); 
-+		diff = a->instrlen - a->replacementlen; 
-+		/* Pad the rest with nops */
-+		for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
-+			k = diff;
-+			if (k > ASM_NOP_MAX)
-+				k = ASM_NOP_MAX;
-+			memcpy(a->instr + i, noptable[k], k); 
-+		} 
-+	}
-+} 
-+
-+static int no_replacement __initdata = 0; 
-+ 
-+void __init alternative_instructions(void)
-+{
-+	extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-+	if (no_replacement) 
-+		return;
-+	apply_alternatives(__alt_instructions, __alt_instructions_end);
-+}
-+
-+static int __init noreplacement_setup(char *s)
-+{ 
-+     no_replacement = 1; 
-+     return 0; 
-+} 
-+
-+__setup("noreplacement", noreplacement_setup); 
-+
-+static char * __init machine_specific_memory_setup(void);
-+
-+#ifdef CONFIG_MCA
-+static void set_mca_bus(int x)
-+{
-+	MCA_bus = x;
-+}
-+#else
-+static void set_mca_bus(int x) { }
-+#endif
-+
-+/*
-+ * Determine if we were loaded by an EFI loader.  If so, then we have also been
-+ * passed the efi memmap, systab, etc., so we should use these data structures
-+ * for initialization.  Note, the efi init code path is determined by the
-+ * global efi_enabled. This allows the same kernel image to be used on existing
-+ * systems (with a traditional BIOS) as well as on EFI systems.
-+ */
-+void __init setup_arch(char **cmdline_p)
-+{
-+	int i, j, k, fpp;
-+	physdev_op_t op;
-+	unsigned long max_low_pfn;
-+
-+	/* Force a quick death if the kernel panics. */
-+	extern int panic_timeout;
-+	if (panic_timeout == 0)
-+		panic_timeout = 1;
-+
-+	/* Register a call for panic conditions. */
-+	notifier_chain_register(&panic_notifier_list, &xen_panic_block);
-+
-+	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
-+	HYPERVISOR_vm_assist(VMASST_CMD_enable,
-+			     VMASST_TYPE_writable_pagetables);
-+
-+	memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
-+	early_cpu_init();
-+
-+	/*
-+	 * FIXME: This isn't an official loader_type right
-+	 * now but does currently work with elilo.
-+	 * If we were configured as an EFI kernel, check to make
-+	 * sure that we were loaded correctly from elilo and that
-+	 * the system table is valid.  If not, then initialize normally.
-+	 */
-+#ifdef CONFIG_EFI
-+	if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
-+		efi_enabled = 1;
-+#endif
-+
-+	/* This must be initialized to UNNAMED_MAJOR for ipconfig to work
-+	   properly.  Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
-+	*/
-+	ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
-+ 	drive_info = DRIVE_INFO;
-+ 	screen_info = SCREEN_INFO;
-+	edid_info = EDID_INFO;
-+	apm_info.bios = APM_BIOS_INFO;
-+	ist_info = IST_INFO;
-+	saved_videomode = VIDEO_MODE;
-+	if( SYS_DESC_TABLE.length != 0 ) {
-+		set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
-+		machine_id = SYS_DESC_TABLE.table[0];
-+		machine_submodel_id = SYS_DESC_TABLE.table[1];
-+		BIOS_revision = SYS_DESC_TABLE.table[2];
-+	}
-+	bootloader_type = LOADER_TYPE;
-+
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+	/* This is drawn from a dump from vgacon:startup in standard Linux. */
-+	screen_info.orig_video_mode = 3; 
-+	screen_info.orig_video_isVGA = 1;
-+	screen_info.orig_video_lines = 25;
-+	screen_info.orig_video_cols = 80;
-+	screen_info.orig_video_ega_bx = 3;
-+	screen_info.orig_video_points = 16;
-+#endif
-+
-+#ifdef CONFIG_BLK_DEV_RAM
-+	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+#endif
-+
-+	setup_xen_features();
-+
-+	ARCH_SETUP
-+	if (efi_enabled)
-+		efi_init();
-+	else {
-+		printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+		print_memory_map(machine_specific_memory_setup());
-+	}
-+
-+	copy_edd();
-+
-+	if (!MOUNT_ROOT_RDONLY)
-+		root_mountflags &= ~MS_RDONLY;
-+	init_mm.start_code = (unsigned long) _text;
-+	init_mm.end_code = (unsigned long) _etext;
-+	init_mm.end_data = (unsigned long) _edata;
-+	init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
-+		       xen_start_info->nr_pt_frames) << PAGE_SHIFT;
-+
-+	/* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
-+	/*code_resource.start = virt_to_phys(_text);*/
-+	/*code_resource.end = virt_to_phys(_etext)-1;*/
-+	/*data_resource.start = virt_to_phys(_etext);*/
-+	/*data_resource.end = virt_to_phys(_edata)-1;*/
-+
-+	parse_cmdline_early(cmdline_p);
-+
-+	max_low_pfn = setup_memory();
-+
-+	/*
-+	 * NOTE: before this point _nobody_ is allowed to allocate
-+	 * any memory using the bootmem allocator.  Although the
-+	 * alloctor is now initialised only the first 8Mb of the kernel
-+	 * virtual address space has been mapped.  All allocations before
-+	 * paging_init() has completed must use the alloc_bootmem_low_pages()
-+	 * variant (which allocates DMA'able memory) and care must be taken
-+	 * not to exceed the 8Mb limit.
-+	 */
-+
-+#ifdef CONFIG_SMP
-+	smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
-+#endif
-+	paging_init();
-+	remapped_pgdat_init();
-+	zone_sizes_init();
-+
-+#ifdef CONFIG_X86_FIND_SMP_CONFIG
-+	/*
-+	 * Find and reserve possible boot-time SMP configuration:
-+	 */
-+	find_smp_config();
-+#endif
-+
-+	/* Make sure we have a correctly sized P->M table. */
-+	phys_to_machine_mapping = alloc_bootmem_low_pages(
-+		max_pfn * sizeof(unsigned long));
-+	memset(phys_to_machine_mapping, ~0,
-+		max_pfn * sizeof(unsigned long));
-+	memcpy(phys_to_machine_mapping,
-+		(unsigned long *)xen_start_info->mfn_list,
-+		xen_start_info->nr_pages * sizeof(unsigned long));
-+	free_bootmem(
-+		__pa(xen_start_info->mfn_list), 
-+		PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-+		sizeof(unsigned long))));
-+
-+	/* 
-+	 * Initialise the list of the frames that specify the list of 
-+	 * frames that make up the p2m table. Used by save/restore
-+	 */
-+	pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
-+	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+	  virt_to_mfn(pfn_to_mfn_frame_list_list);
-+	       
-+	fpp = PAGE_SIZE/sizeof(unsigned long);
-+	for ( i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++ )
-+	{
-+	    if ( (j % fpp) == 0 )
-+	    {
-+	        k++;
-+		BUG_ON(k>=16);
-+		pfn_to_mfn_frame_list[k] = alloc_bootmem_low_pages(PAGE_SIZE);
-+		pfn_to_mfn_frame_list_list[k] = 
-+		    virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+		j=0;
-+	    }
-+	    pfn_to_mfn_frame_list[k][j] = 
-+	        virt_to_mfn(&phys_to_machine_mapping[i]);
-+	}
-+	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
-+
-+	/*
-+	 * NOTE: at this point the bootmem allocator is fully available.
-+	 */
-+
-+#ifdef CONFIG_EARLY_PRINTK
-+	{
-+		char *s = strstr(*cmdline_p, "earlyprintk=");
-+		if (s) {
-+			extern void setup_early_printk(char *);
-+
-+			setup_early_printk(s);
-+			printk("early console enabled\n");
-+		}
-+	}
-+#endif
-+
-+	if (xen_start_info->flags & SIF_INITDOMAIN)
-+		dmi_scan_machine();
-+
-+#ifdef CONFIG_X86_GENERICARCH
-+	generic_apic_probe(*cmdline_p);
-+#endif	
-+	if (efi_enabled)
-+		efi_map_memmap();
-+
-+	op.cmd             = PHYSDEVOP_SET_IOPL;
-+	op.u.set_iopl.iopl = 1;
-+	HYPERVISOR_physdev_op(&op);
-+
-+#ifdef CONFIG_ACPI_BOOT
-+	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
-+		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
-+		acpi_disabled = 1;
-+		acpi_ht = 0;
-+	}
-+#endif
-+
-+#ifdef CONFIG_ACPI_BOOT
-+	/*
-+	 * Parse the ACPI tables for possible boot-time SMP configuration.
-+	 */
-+	acpi_boot_table_init();
-+	acpi_boot_init();
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	if (smp_found_config)
-+		get_smp_config();
-+#endif
-+
-+	/* XXX Disable irqdebug until we have a way to avoid interrupt
-+	 * conflicts. */
-+	noirqdebug_setup("");
-+
-+	register_memory();
-+
-+	if (xen_start_info->flags & SIF_INITDOMAIN) {
-+		if (!(xen_start_info->flags & SIF_PRIVILEGED))
-+			panic("Xen granted us console access "
-+			      "but not privileged status");
-+
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+		if (!efi_enabled ||
-+		    (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
-+			conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+		conswitchp = &dummy_con;
-+#endif
-+#endif
-+	} else {
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+		extern const struct consw xennull_con;
-+		extern int console_use_vt;
-+#if defined(CONFIG_VGA_CONSOLE)
-+		/* disable VGA driver */
-+		ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
-+#endif
-+		conswitchp = &xennull_con;
-+		console_use_vt = 0;
-+#endif
-+	}
-+}
-+
-+static int
-+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
-+{
-+	HYPERVISOR_sched_op(SCHEDOP_shutdown, SHUTDOWN_crash);
-+	/* we're never actually going to get here... */
-+	return NOTIFY_DONE;
-+}
-+
-+#include "setup_arch_post.h"
-+/*
-+ * Local Variables:
-+ * mode:c
-+ * c-file-style:"k&r"
-+ * c-basic-offset:8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/signal.c linux-2.6.12-xen/arch/xen/i386/kernel/signal.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/signal.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/signal.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,665 @@
-+/*
-+ *  linux/arch/i386/kernel/signal.c
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *
-+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
-+ *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel.h>
-+#include <linux/signal.h>
-+#include <linux/errno.h>
-+#include <linux/wait.h>
-+#include <linux/unistd.h>
-+#include <linux/stddef.h>
-+#include <linux/personality.h>
-+#include <linux/suspend.h>
-+#include <linux/ptrace.h>
-+#include <linux/elf.h>
-+#include <asm/processor.h>
-+#include <asm/ucontext.h>
-+#include <asm/uaccess.h>
-+#include <asm/i387.h>
-+#include "sigframe.h"
-+
-+#define DEBUG_SIG 0
-+
-+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-+
-+/*
-+ * Atomically swap in the new signal mask, and wait for a signal.
-+ */
-+asmlinkage int
-+sys_sigsuspend(int history0, int history1, old_sigset_t mask)
-+{
-+	struct pt_regs * regs = (struct pt_regs *) &history0;
-+	sigset_t saveset;
-+
-+	mask &= _BLOCKABLE;
-+	spin_lock_irq(&current->sighand->siglock);
-+	saveset = current->blocked;
-+	siginitset(&current->blocked, mask);
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
-+
-+	regs->eax = -EINTR;
-+	while (1) {
-+		current->state = TASK_INTERRUPTIBLE;
-+		schedule();
-+		if (do_signal(regs, &saveset))
-+			return -EINTR;
-+	}
-+}
-+
-+asmlinkage int
-+sys_rt_sigsuspend(struct pt_regs regs)
-+{
-+	sigset_t saveset, newset;
-+
-+	/* XXX: Don't preclude handling different sized sigset_t's.  */
-+	if (regs.ecx != sizeof(sigset_t))
-+		return -EINVAL;
-+
-+	if (copy_from_user(&newset, (sigset_t __user *)regs.ebx, sizeof(newset)))
-+		return -EFAULT;
-+	sigdelsetmask(&newset, ~_BLOCKABLE);
-+
-+	spin_lock_irq(&current->sighand->siglock);
-+	saveset = current->blocked;
-+	current->blocked = newset;
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
-+
-+	regs.eax = -EINTR;
-+	while (1) {
-+		current->state = TASK_INTERRUPTIBLE;
-+		schedule();
-+		if (do_signal(&regs, &saveset))
-+			return -EINTR;
-+	}
-+}
-+
-+asmlinkage int 
-+sys_sigaction(int sig, const struct old_sigaction __user *act,
-+	      struct old_sigaction __user *oact)
-+{
-+	struct k_sigaction new_ka, old_ka;
-+	int ret;
-+
-+	if (act) {
-+		old_sigset_t mask;
-+		if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-+		    __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-+		    __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
-+			return -EFAULT;
-+		__get_user(new_ka.sa.sa_flags, &act->sa_flags);
-+		__get_user(mask, &act->sa_mask);
-+		siginitset(&new_ka.sa.sa_mask, mask);
-+	}
-+
-+	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-+
-+	if (!ret && oact) {
-+		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-+		    __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-+		    __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
-+			return -EFAULT;
-+		__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-+		__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
-+	}
-+
-+	return ret;
-+}
-+
-+asmlinkage int
-+sys_sigaltstack(unsigned long ebx)
-+{
-+	/* This is needed to make gcc realize it doesn't own the "struct pt_regs" */
-+	struct pt_regs *regs = (struct pt_regs *)&ebx;
-+	const stack_t __user *uss = (const stack_t __user *)ebx;
-+	stack_t __user *uoss = (stack_t __user *)regs->ecx;
-+
-+	return do_sigaltstack(uss, uoss, regs->esp);
-+}
-+
-+
-+/*
-+ * Do a signal return; undo the signal stack.
-+ */
-+
-+static int
-+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax)
-+{
-+	unsigned int err = 0;
-+
-+	/* Always make any pending restarted system calls return -EINTR */
-+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
-+
-+#define COPY(x)		err |= __get_user(regs->x, &sc->x)
-+
-+#define COPY_SEG(seg)							\
-+	{ unsigned short tmp;						\
-+	  err |= __get_user(tmp, &sc->seg);				\
-+	  regs->x##seg = tmp; }
-+
-+#define COPY_SEG_STRICT(seg)						\
-+	{ unsigned short tmp;						\
-+	  err |= __get_user(tmp, &sc->seg);				\
-+	  regs->x##seg = tmp|3; }
-+
-+#define GET_SEG(seg)							\
-+	{ unsigned short tmp;						\
-+	  err |= __get_user(tmp, &sc->seg);				\
-+	  loadsegment(seg,tmp); }
-+
-+#define	FIX_EFLAGS	(X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \
-+			 X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
-+			 X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
-+
-+	GET_SEG(gs);
-+	GET_SEG(fs);
-+	COPY_SEG(es);
-+	COPY_SEG(ds);
-+	COPY(edi);
-+	COPY(esi);
-+	COPY(ebp);
-+	COPY(esp);
-+	COPY(ebx);
-+	COPY(edx);
-+	COPY(ecx);
-+	COPY(eip);
-+	COPY_SEG_STRICT(cs);
-+	COPY_SEG_STRICT(ss);
-+	
-+	{
-+		unsigned int tmpflags;
-+		err |= __get_user(tmpflags, &sc->eflags);
-+		regs->eflags = (regs->eflags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
-+		regs->orig_eax = -1;		/* disable syscall checks */
-+	}
-+
-+	{
-+		struct _fpstate __user * buf;
-+		err |= __get_user(buf, &sc->fpstate);
-+		if (buf) {
-+			if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
-+				goto badframe;
-+			err |= restore_i387(buf);
-+		} else {
-+			struct task_struct *me = current;
-+			if (used_math()) {
-+				clear_fpu(me);
-+				clear_used_math();
-+			}
-+		}
-+	}
-+
-+	err |= __get_user(*peax, &sc->eax);
-+	return err;
-+
-+badframe:
-+	return 1;
-+}
-+
-+asmlinkage int sys_sigreturn(unsigned long __unused)
-+{
-+	struct pt_regs *regs = (struct pt_regs *) &__unused;
-+	struct sigframe __user *frame = (struct sigframe __user *)(regs->esp - 8);
-+	sigset_t set;
-+	int eax;
-+
-+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-+		goto badframe;
-+	if (__get_user(set.sig[0], &frame->sc.oldmask)
-+	    || (_NSIG_WORDS > 1
-+		&& __copy_from_user(&set.sig[1], &frame->extramask,
-+				    sizeof(frame->extramask))))
-+		goto badframe;
-+
-+	sigdelsetmask(&set, ~_BLOCKABLE);
-+	spin_lock_irq(&current->sighand->siglock);
-+	current->blocked = set;
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
-+	
-+	if (restore_sigcontext(regs, &frame->sc, &eax))
-+		goto badframe;
-+	return eax;
-+
-+badframe:
-+	force_sig(SIGSEGV, current);
-+	return 0;
-+}	
-+
-+asmlinkage int sys_rt_sigreturn(unsigned long __unused)
-+{
-+	struct pt_regs *regs = (struct pt_regs *) &__unused;
-+	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(regs->esp - 4);
-+	sigset_t set;
-+	int eax;
-+
-+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-+		goto badframe;
-+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
-+		goto badframe;
-+
-+	sigdelsetmask(&set, ~_BLOCKABLE);
-+	spin_lock_irq(&current->sighand->siglock);
-+	current->blocked = set;
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
-+	
-+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
-+		goto badframe;
-+
-+	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->esp) == -EFAULT)
-+		goto badframe;
-+
-+	return eax;
-+
-+badframe:
-+	force_sig(SIGSEGV, current);
-+	return 0;
-+}	
-+
-+/*
-+ * Set up a signal frame.
-+ */
-+
-+static int
-+setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
-+		 struct pt_regs *regs, unsigned long mask)
-+{
-+	int tmp, err = 0;
-+
-+	tmp = 0;
-+	__asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
-+	err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
-+	__asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
-+	err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
-+
-+	err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
-+	err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
-+	err |= __put_user(regs->edi, &sc->edi);
-+	err |= __put_user(regs->esi, &sc->esi);
-+	err |= __put_user(regs->ebp, &sc->ebp);
-+	err |= __put_user(regs->esp, &sc->esp);
-+	err |= __put_user(regs->ebx, &sc->ebx);
-+	err |= __put_user(regs->edx, &sc->edx);
-+	err |= __put_user(regs->ecx, &sc->ecx);
-+	err |= __put_user(regs->eax, &sc->eax);
-+	err |= __put_user(current->thread.trap_no, &sc->trapno);
-+	err |= __put_user(current->thread.error_code, &sc->err);
-+	err |= __put_user(regs->eip, &sc->eip);
-+	err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs);
-+	err |= __put_user(regs->eflags, &sc->eflags);
-+	err |= __put_user(regs->esp, &sc->esp_at_signal);
-+	err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss);
-+
-+	tmp = save_i387(fpstate);
-+	if (tmp < 0)
-+	  err = 1;
-+	else
-+	  err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
-+
-+	/* non-iBCS2 extensions.. */
-+	err |= __put_user(mask, &sc->oldmask);
-+	err |= __put_user(current->thread.cr2, &sc->cr2);
-+
-+	return err;
-+}
-+
-+/*
-+ * Determine which stack to use..
-+ */
-+static inline void __user *
-+get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
-+{
-+	unsigned long esp;
-+
-+	/* Default to using normal stack */
-+	esp = regs->esp;
-+
-+	/* This is the X/Open sanctioned signal stack switching.  */
-+	if (ka->sa.sa_flags & SA_ONSTACK) {
-+		if (sas_ss_flags(esp) == 0)
-+			esp = current->sas_ss_sp + current->sas_ss_size;
-+	}
-+
-+	/* This is the legacy signal stack switching. */
-+	else if ((regs->xss & 0xffff) != __USER_DS &&
-+		 !(ka->sa.sa_flags & SA_RESTORER) &&
-+		 ka->sa.sa_restorer) {
-+		esp = (unsigned long) ka->sa.sa_restorer;
-+	}
-+
-+	return (void __user *)((esp - frame_size) & -8ul);
-+}
-+
-+/* These symbols are defined with the addresses in the vsyscall page.
-+   See vsyscall-sigreturn.S.  */
-+extern void __user __kernel_sigreturn;
-+extern void __user __kernel_rt_sigreturn;
-+
-+static void setup_frame(int sig, struct k_sigaction *ka,
-+			sigset_t *set, struct pt_regs * regs)
-+{
-+	void __user *restorer;
-+	struct sigframe __user *frame;
-+	int err = 0;
-+	int usig;
-+
-+	frame = get_sigframe(ka, regs, sizeof(*frame));
-+
-+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-+		goto give_sigsegv;
-+
-+	usig = current_thread_info()->exec_domain
-+		&& current_thread_info()->exec_domain->signal_invmap
-+		&& sig < 32
-+		? current_thread_info()->exec_domain->signal_invmap[sig]
-+		: sig;
-+
-+	err = __put_user(usig, &frame->sig);
-+	if (err)
-+		goto give_sigsegv;
-+
-+	err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]);
-+	if (err)
-+		goto give_sigsegv;
-+
-+	if (_NSIG_WORDS > 1) {
-+		err = __copy_to_user(&frame->extramask, &set->sig[1],
-+				      sizeof(frame->extramask));
-+		if (err)
-+			goto give_sigsegv;
-+	}
-+
-+	restorer = &__kernel_sigreturn;
-+	if (ka->sa.sa_flags & SA_RESTORER)
-+		restorer = ka->sa.sa_restorer;
-+
-+	/* Set up to return from userspace.  */
-+	err |= __put_user(restorer, &frame->pretcode);
-+	 
-+	/*
-+	 * This is popl %eax ; movl $,%eax ; int $0x80
-+	 *
-+	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
-+	 * reasons and because gdb uses it as a signature to notice
-+	 * signal handler stack frames.
-+	 */
-+	err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
-+	err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2));
-+	err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
-+
-+	if (err)
-+		goto give_sigsegv;
-+
-+	/* Set up registers for signal handler */
-+	regs->esp = (unsigned long) frame;
-+	regs->eip = (unsigned long) ka->sa.sa_handler;
-+	regs->eax = (unsigned long) sig;
-+	regs->edx = (unsigned long) 0;
-+	regs->ecx = (unsigned long) 0;
-+
-+	set_fs(USER_DS);
-+	regs->xds = __USER_DS;
-+	regs->xes = __USER_DS;
-+	regs->xss = __USER_DS;
-+	regs->xcs = __USER_CS;
-+
-+	/*
-+	 * Clear TF when entering the signal handler, but
-+	 * notify any tracer that was single-stepping it.
-+	 * The tracer may want to single-step inside the
-+	 * handler too.
-+	 */
-+	regs->eflags &= ~TF_MASK;
-+	if (test_thread_flag(TIF_SINGLESTEP))
-+		ptrace_notify(SIGTRAP);
-+
-+#if DEBUG_SIG
-+	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
-+		current->comm, current->pid, frame, regs->eip, frame->pretcode);
-+#endif
-+
-+	return;
-+
-+give_sigsegv:
-+	force_sigsegv(sig, current);
-+}
-+
-+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-+			   sigset_t *set, struct pt_regs * regs)
-+{
-+	void __user *restorer;
-+	struct rt_sigframe __user *frame;
-+	int err = 0;
-+	int usig;
-+
-+	frame = get_sigframe(ka, regs, sizeof(*frame));
-+
-+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-+		goto give_sigsegv;
-+
-+	usig = current_thread_info()->exec_domain
-+		&& current_thread_info()->exec_domain->signal_invmap
-+		&& sig < 32
-+		? current_thread_info()->exec_domain->signal_invmap[sig]
-+		: sig;
-+
-+	err |= __put_user(usig, &frame->sig);
-+	err |= __put_user(&frame->info, &frame->pinfo);
-+	err |= __put_user(&frame->uc, &frame->puc);
-+	err |= copy_siginfo_to_user(&frame->info, info);
-+	if (err)
-+		goto give_sigsegv;
-+
-+	/* Create the ucontext.  */
-+	err |= __put_user(0, &frame->uc.uc_flags);
-+	err |= __put_user(0, &frame->uc.uc_link);
-+	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-+	err |= __put_user(sas_ss_flags(regs->esp),
-+			  &frame->uc.uc_stack.ss_flags);
-+	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-+	err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
-+			        regs, set->sig[0]);
-+	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-+	if (err)
-+		goto give_sigsegv;
-+
-+	/* Set up to return from userspace.  */
-+	restorer = &__kernel_rt_sigreturn;
-+	if (ka->sa.sa_flags & SA_RESTORER)
-+		restorer = ka->sa.sa_restorer;
-+	err |= __put_user(restorer, &frame->pretcode);
-+	 
-+	/*
-+	 * This is movl $,%eax ; int $0x80
-+	 *
-+	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
-+	 * reasons and because gdb uses it as a signature to notice
-+	 * signal handler stack frames.
-+	 */
-+	err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
-+	err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1));
-+	err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
-+
-+	if (err)
-+		goto give_sigsegv;
-+
-+	/* Set up registers for signal handler */
-+	regs->esp = (unsigned long) frame;
-+	regs->eip = (unsigned long) ka->sa.sa_handler;
-+	regs->eax = (unsigned long) usig;
-+	regs->edx = (unsigned long) &frame->info;
-+	regs->ecx = (unsigned long) &frame->uc;
-+
-+	set_fs(USER_DS);
-+	regs->xds = __USER_DS;
-+	regs->xes = __USER_DS;
-+	regs->xss = __USER_DS;
-+	regs->xcs = __USER_CS;
-+
-+	/*
-+	 * Clear TF when entering the signal handler, but
-+	 * notify any tracer that was single-stepping it.
-+	 * The tracer may want to single-step inside the
-+	 * handler too.
-+	 */
-+	regs->eflags &= ~TF_MASK;
-+	if (test_thread_flag(TIF_SINGLESTEP))
-+		ptrace_notify(SIGTRAP);
-+
-+#if DEBUG_SIG
-+	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
-+		current->comm, current->pid, frame, regs->eip, frame->pretcode);
-+#endif
-+
-+	return;
-+
-+give_sigsegv:
-+	force_sigsegv(sig, current);
-+}
-+
-+/*
-+ * OK, we're invoking a handler
-+ */	
-+
-+static void
-+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-+	      sigset_t *oldset,	struct pt_regs * regs)
-+{
-+	/* Are we from a system call? */
-+	if (regs->orig_eax >= 0) {
-+		/* If so, check system call restarting.. */
-+		switch (regs->eax) {
-+		        case -ERESTART_RESTARTBLOCK:
-+			case -ERESTARTNOHAND:
-+				regs->eax = -EINTR;
-+				break;
-+
-+			case -ERESTARTSYS:
-+				if (!(ka->sa.sa_flags & SA_RESTART)) {
-+					regs->eax = -EINTR;
-+					break;
-+				}
-+			/* fallthrough */
-+			case -ERESTARTNOINTR:
-+				regs->eax = regs->orig_eax;
-+				regs->eip -= 2;
-+		}
-+	}
-+
-+	/*
-+	 * If TF is set due to a debugger (PT_DTRACE), clear the TF flag so
-+	 * that register information in the sigcontext is correct.
-+	 */
-+	if (unlikely(regs->eflags & TF_MASK)
-+	    && likely(current->ptrace & PT_DTRACE)) {
-+		current->ptrace &= ~PT_DTRACE;
-+		regs->eflags &= ~TF_MASK;
-+	}
-+
-+	/* Set up the stack frame */
-+	if (ka->sa.sa_flags & SA_SIGINFO)
-+		setup_rt_frame(sig, ka, info, oldset, regs);
-+	else
-+		setup_frame(sig, ka, oldset, regs);
-+
-+	if (!(ka->sa.sa_flags & SA_NODEFER)) {
-+		spin_lock_irq(&current->sighand->siglock);
-+		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-+		sigaddset(&current->blocked,sig);
-+		recalc_sigpending();
-+		spin_unlock_irq(&current->sighand->siglock);
-+	}
-+}
-+
-+/*
-+ * Note that 'init' is a special process: it doesn't get signals it doesn't
-+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
-+ * mistake.
-+ */
-+int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
-+{
-+	siginfo_t info;
-+	int signr;
-+	struct k_sigaction ka;
-+
-+	/*
-+	 * We want the common case to go fast, which
-+	 * is why we may in certain cases get here from
-+	 * kernel mode. Just return without doing anything
-+	 * if so.
-+	 */
-+	if ((regs->xcs & 2) != 2)
-+		return 1;
-+
-+	if (current->flags & PF_FREEZE) {
-+		refrigerator(0);
-+		goto no_signal;
-+	}
-+
-+	if (!oldset)
-+		oldset = &current->blocked;
-+
-+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-+	if (signr > 0) {
-+		/* Reenable any watchpoints before delivering the
-+		 * signal to user space. The processor register will
-+		 * have been cleared if the watchpoint triggered
-+		 * inside the kernel.
-+		 */
-+		if (unlikely(current->thread.debugreg[7])) {
-+			loaddebug(&current->thread, 7);
-+		}
-+
-+		/* Whee!  Actually deliver the signal.  */
-+		handle_signal(signr, &info, &ka, oldset, regs);
-+		return 1;
-+	}
-+
-+ no_signal:
-+	/* Did we come from a system call? */
-+	if (regs->orig_eax >= 0) {
-+		/* Restart the system call - no handlers present */
-+		if (regs->eax == -ERESTARTNOHAND ||
-+		    regs->eax == -ERESTARTSYS ||
-+		    regs->eax == -ERESTARTNOINTR) {
-+			regs->eax = regs->orig_eax;
-+			regs->eip -= 2;
-+		}
-+		if (regs->eax == -ERESTART_RESTARTBLOCK){
-+			regs->eax = __NR_restart_syscall;
-+			regs->eip -= 2;
-+		}
-+	}
-+	return 0;
-+}
-+
-+/*
-+ * notification of userspace execution resumption
-+ * - triggered by current->work.notify_resume
-+ */
-+__attribute__((regparm(3)))
-+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
-+		      __u32 thread_info_flags)
-+{
-+	/* Pending single-step? */
-+	if (thread_info_flags & _TIF_SINGLESTEP) {
-+		regs->eflags |= TF_MASK;
-+		clear_thread_flag(TIF_SINGLESTEP);
-+	}
-+	/* deal with pending signal delivery */
-+	if (thread_info_flags & _TIF_SIGPENDING)
-+		do_signal(regs,oldset);
-+	
-+	clear_thread_flag(TIF_IRET);
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/smp.c linux-2.6.12-xen/arch/xen/i386/kernel/smp.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/smp.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/smp.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,606 @@
-+/*
-+ *	Intel SMP support routines.
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998-99, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	This code is released under the GNU General Public License version 2 or
-+ *	later.
-+ */
-+
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/irq.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/cache.h>
-+#include <linux/interrupt.h>
-+#include <linux/cpu.h>
-+
-+#include <asm/mtrr.h>
-+#include <asm/tlbflush.h>
-+#if 0
-+#include <mach_apic.h>
-+#endif
-+#include <asm-xen/evtchn.h>
-+
-+/*
-+ *	Some notes on x86 processor bugs affecting SMP operation:
-+ *
-+ *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
-+ *	The Linux implications for SMP are handled as follows:
-+ *
-+ *	Pentium III / [Xeon]
-+ *		None of the E1AP-E3AP errata are visible to the user.
-+ *
-+ *	E1AP.	see PII A1AP
-+ *	E2AP.	see PII A2AP
-+ *	E3AP.	see PII A3AP
-+ *
-+ *	Pentium II / [Xeon]
-+ *		None of the A1AP-A3AP errata are visible to the user.
-+ *
-+ *	A1AP.	see PPro 1AP
-+ *	A2AP.	see PPro 2AP
-+ *	A3AP.	see PPro 7AP
-+ *
-+ *	Pentium Pro
-+ *		None of 1AP-9AP errata are visible to the normal user,
-+ *	except occasional delivery of 'spurious interrupt' as trap #15.
-+ *	This is very rare and a non-problem.
-+ *
-+ *	1AP.	Linux maps APIC as non-cacheable
-+ *	2AP.	worked around in hardware
-+ *	3AP.	fixed in C0 and above steppings microcode update.
-+ *		Linux does not use excessive STARTUP_IPIs.
-+ *	4AP.	worked around in hardware
-+ *	5AP.	symmetric IO mode (normal Linux operation) not affected.
-+ *		'noapic' mode has vector 0xf filled out properly.
-+ *	6AP.	'noapic' mode might be affected - fixed in later steppings
-+ *	7AP.	We do not assume writes to the LVT deassering IRQs
-+ *	8AP.	We do not enable low power mode (deep sleep) during MP bootup
-+ *	9AP.	We do not use mixed mode
-+ *
-+ *	Pentium
-+ *		There is a marginal case where REP MOVS on 100MHz SMP
-+ *	machines with B stepping processors can fail. XXX should provide
-+ *	an L1cache=Writethrough or L1cache=off option.
-+ *
-+ *		B stepping CPUs may hang. There are hardware work arounds
-+ *	for this. We warn about it in case your board doesn't have the work
-+ *	arounds. Basically thats so I can tell anyone with a B stepping
-+ *	CPU and SMP problems "tough".
-+ *
-+ *	Specific items [From Pentium Processor Specification Update]
-+ *
-+ *	1AP.	Linux doesn't use remote read
-+ *	2AP.	Linux doesn't trust APIC errors
-+ *	3AP.	We work around this
-+ *	4AP.	Linux never generated 3 interrupts of the same priority
-+ *		to cause a lost local interrupt.
-+ *	5AP.	Remote read is never used
-+ *	6AP.	not affected - worked around in hardware
-+ *	7AP.	not affected - worked around in hardware
-+ *	8AP.	worked around in hardware - we get explicit CS errors if not
-+ *	9AP.	only 'noapic' mode affected. Might generate spurious
-+ *		interrupts, we log only the first one and count the
-+ *		rest silently.
-+ *	10AP.	not affected - worked around in hardware
-+ *	11AP.	Linux reads the APIC between writes to avoid this, as per
-+ *		the documentation. Make sure you preserve this as it affects
-+ *		the C stepping chips too.
-+ *	12AP.	not affected - worked around in hardware
-+ *	13AP.	not affected - worked around in hardware
-+ *	14AP.	we always deassert INIT during bootup
-+ *	15AP.	not affected - worked around in hardware
-+ *	16AP.	not affected - worked around in hardware
-+ *	17AP.	not affected - worked around in hardware
-+ *	18AP.	not affected - worked around in hardware
-+ *	19AP.	not affected - worked around in BIOS
-+ *
-+ *	If this sounds worrying believe me these bugs are either ___RARE___,
-+ *	or are signal timing bugs worked around in hardware and there's
-+ *	about nothing of note with C stepping upwards.
-+ */
-+
-+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
-+
-+/*
-+ * the following functions deal with sending IPIs between CPUs.
-+ *
-+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
-+ */
-+
-+static inline int __prepare_ICR (unsigned int shortcut, int vector)
-+{
-+	return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;
-+}
-+
-+static inline int __prepare_ICR2 (unsigned int mask)
-+{
-+	return SET_APIC_DEST_FIELD(mask);
-+}
-+
-+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
-+
-+static inline void __send_IPI_one(unsigned int cpu, int vector)
-+{
-+	int irq = per_cpu(ipi_to_irq, cpu)[vector];
-+	BUG_ON(irq < 0);
-+	notify_remote_via_irq(irq);
-+}
-+
-+void __send_IPI_shortcut(unsigned int shortcut, int vector)
-+{
-+	int cpu;
-+
-+	switch (shortcut) {
-+	case APIC_DEST_SELF:
-+		__send_IPI_one(smp_processor_id(), vector);
-+		break;
-+	case APIC_DEST_ALLBUT:
-+		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+			if (cpu == smp_processor_id())
-+				continue;
-+			if (cpu_isset(cpu, cpu_online_map)) {
-+				__send_IPI_one(cpu, vector);
-+			}
-+		}
-+		break;
-+	default:
-+		printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
-+		       vector);
-+		break;
-+	}
-+}
-+
-+void fastcall send_IPI_self(int vector)
-+{
-+	__send_IPI_shortcut(APIC_DEST_SELF, vector);
-+}
-+
-+/*
-+ * This is only used on smaller machines.
-+ */
-+void send_IPI_mask_bitmask(cpumask_t mask, int vector)
-+{
-+	unsigned long flags;
-+	unsigned int cpu;
-+
-+	local_irq_save(flags);
-+	WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
-+
-+	for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+		if (cpu_isset(cpu, mask)) {
-+			__send_IPI_one(cpu, vector);
-+		}
-+	}
-+
-+	local_irq_restore(flags);
-+}
-+
-+void send_IPI_mask_sequence(cpumask_t mask, int vector)
-+{
-+
-+	send_IPI_mask_bitmask(mask, vector);
-+}
-+
-+#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
-+
-+#if 0 /* XEN */
-+/*
-+ *	Smarter SMP flushing macros. 
-+ *		c/o Linus Torvalds.
-+ *
-+ *	These mean you can really definitely utterly forget about
-+ *	writing to user space from interrupts. (Its not allowed anyway).
-+ *
-+ *	Optimizations Manfred Spraul <manfred at colorfullife.com>
-+ */
-+
-+static cpumask_t flush_cpumask;
-+static struct mm_struct * flush_mm;
-+static unsigned long flush_va;
-+static DEFINE_SPINLOCK(tlbstate_lock);
-+#define FLUSH_ALL	0xffffffff
-+
-+/*
-+ * We cannot call mmdrop() because we are in interrupt context, 
-+ * instead update mm->cpu_vm_mask.
-+ *
-+ * We need to reload %cr3 since the page tables may be going
-+ * away from under us..
-+ */
-+static inline void leave_mm (unsigned long cpu)
-+{
-+	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-+		BUG();
-+	cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
-+	load_cr3(swapper_pg_dir);
-+}
-+
-+/*
-+ *
-+ * The flush IPI assumes that a thread switch happens in this order:
-+ * [cpu0: the cpu that switches]
-+ * 1) switch_mm() either 1a) or 1b)
-+ * 1a) thread switch to a different mm
-+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
-+ * 	Stop ipi delivery for the old mm. This is not synchronized with
-+ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
-+ * 	for the wrong mm, and in the worst case we perform a superflous
-+ * 	tlb flush.
-+ * 1a2) set cpu_tlbstate to TLBSTATE_OK
-+ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-+ *	was in lazy tlb mode.
-+ * 1a3) update cpu_tlbstate[].active_mm
-+ * 	Now cpu0 accepts tlb flushes for the new mm.
-+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
-+ * 	Now the other cpus will send tlb flush ipis.
-+ * 1a4) change cr3.
-+ * 1b) thread switch without mm change
-+ *	cpu_tlbstate[].active_mm is correct, cpu0 already handles
-+ *	flush ipis.
-+ * 1b1) set cpu_tlbstate to TLBSTATE_OK
-+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
-+ * 	Atomically set the bit [other cpus will start sending flush ipis],
-+ * 	and test the bit.
-+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
-+ * 2) switch %%esp, ie current
-+ *
-+ * The interrupt must handle 2 special cases:
-+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
-+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
-+ *   runs in kernel space, the cpu could load tlb entries for user space
-+ *   pages.
-+ *
-+ * The good news is that cpu_tlbstate is local to each cpu, no
-+ * write/read ordering problems.
-+ */
-+
-+/*
-+ * TLB flush IPI:
-+ *
-+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
-+ * 2) Leave the mm if we are in the lazy tlb mode.
-+ */
-+
-+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
-+				     struct pt_regs *regs)
-+{
-+	unsigned long cpu;
-+
-+	cpu = get_cpu();
-+
-+	if (!cpu_isset(cpu, flush_cpumask))
-+		goto out;
-+		/* 
-+		 * This was a BUG() but until someone can quote me the
-+		 * line from the intel manual that guarantees an IPI to
-+		 * multiple CPUs is retried _only_ on the erroring CPUs
-+		 * its staying as a return
-+		 *
-+		 * BUG();
-+		 */
-+		 
-+	if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
-+		if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
-+			if (flush_va == FLUSH_ALL)
-+				local_flush_tlb();
-+			else
-+				__flush_tlb_one(flush_va);
-+		} else
-+			leave_mm(cpu);
-+	}
-+	smp_mb__before_clear_bit();
-+	cpu_clear(cpu, flush_cpumask);
-+	smp_mb__after_clear_bit();
-+out:
-+	put_cpu_no_resched();
-+
-+	return IRQ_HANDLED;
-+}
-+
-+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
-+						unsigned long va)
-+{
-+	/*
-+	 * A couple of (to be removed) sanity checks:
-+	 *
-+	 * - current CPU must not be in mask
-+	 * - mask must exist :)
-+	 */
-+	BUG_ON(cpus_empty(cpumask));
-+	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-+	BUG_ON(!mm);
-+
-+	/* If a CPU which we ran on has gone down, OK. */
-+	cpus_and(cpumask, cpumask, cpu_online_map);
-+	if (cpus_empty(cpumask))
-+		return;
-+
-+	/*
-+	 * i'm not happy about this global shared spinlock in the
-+	 * MM hot path, but we'll see how contended it is.
-+	 * Temporarily this turns IRQs off, so that lockups are
-+	 * detected by the NMI watchdog.
-+	 */
-+	spin_lock(&tlbstate_lock);
-+	
-+	flush_mm = mm;
-+	flush_va = va;
-+#if NR_CPUS <= BITS_PER_LONG
-+	atomic_set_mask(cpumask, &flush_cpumask);
-+#else
-+	{
-+		int k;
-+		unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
-+		unsigned long *cpu_mask = (unsigned long *)&cpumask;
-+		for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
-+			atomic_set_mask(cpu_mask[k], &flush_mask[k]);
-+	}
-+#endif
-+	/*
-+	 * We have to send the IPI only to
-+	 * CPUs affected.
-+	 */
-+	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
-+
-+	while (!cpus_empty(flush_cpumask))
-+		/* nothing. lockup detection does not belong here */
-+		mb();
-+
-+	flush_mm = NULL;
-+	flush_va = 0;
-+	spin_unlock(&tlbstate_lock);
-+}
-+	
-+void flush_tlb_current_task(void)
-+{
-+	struct mm_struct *mm = current->mm;
-+	cpumask_t cpu_mask;
-+
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
-+
-+	local_flush_tlb();
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+	preempt_enable();
-+}
-+
-+void flush_tlb_mm (struct mm_struct * mm)
-+{
-+	cpumask_t cpu_mask;
-+
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
-+
-+	if (current->active_mm == mm) {
-+		if (current->mm)
-+			local_flush_tlb();
-+		else
-+			leave_mm(smp_processor_id());
-+	}
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+
-+	preempt_enable();
-+}
-+
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{
-+	struct mm_struct *mm = vma->vm_mm;
-+	cpumask_t cpu_mask;
-+
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
-+
-+	if (current->active_mm == mm) {
-+		if(current->mm)
-+			__flush_tlb_one(va);
-+		else
-+		 	leave_mm(smp_processor_id());
-+	}
-+
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, va);
-+
-+	preempt_enable();
-+}
-+
-+static void do_flush_tlb_all(void* info)
-+{
-+	unsigned long cpu = smp_processor_id();
-+
-+	__flush_tlb_all();
-+	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
-+		leave_mm(cpu);
-+}
-+
-+void flush_tlb_all(void)
-+{
-+	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
-+}
-+
-+#else
-+
-+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
-+				     struct pt_regs *regs)
-+{ return 0; }
-+void flush_tlb_current_task(void)
-+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
-+void flush_tlb_mm(struct mm_struct * mm)
-+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
-+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
-+void flush_tlb_all(void)
-+{ xen_tlb_flush_all(); }
-+
-+#endif /* XEN */
-+
-+/*
-+ * this function sends a 'reschedule' IPI to another CPU.
-+ * it goes straight through and wastes no time serializing
-+ * anything. Worst case is that we lose a reschedule ...
-+ */
-+void smp_send_reschedule(int cpu)
-+{
-+	WARN_ON(cpu_is_offline(cpu));
-+	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
-+}
-+
-+/*
-+ * Structure and data for smp_call_function(). This is designed to minimise
-+ * static memory requirements. It also looks cleaner.
-+ */
-+static DEFINE_SPINLOCK(call_lock);
-+
-+struct call_data_struct {
-+	void (*func) (void *info);
-+	void *info;
-+	atomic_t started;
-+	atomic_t finished;
-+	int wait;
-+};
-+
-+static struct call_data_struct * call_data;
-+
-+/*
-+ * this function sends a 'generic call function' IPI to all other CPUs
-+ * in the system.
-+ */
-+
-+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-+			int wait)
-+/*
-+ * [SUMMARY] Run a function on all other CPUs.
-+ * <func> The function to run. This must be fast and non-blocking.
-+ * <info> An arbitrary pointer to pass to the function.
-+ * <nonatomic> currently unused.
-+ * <wait> If true, wait (atomically) until function has completed on other CPUs.
-+ * [RETURNS] 0 on success, else a negative status code. Does not return until
-+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
-+ *
-+ * You must not call this function with disabled interrupts or from a
-+ * hardware interrupt handler or from a bottom half handler.
-+ */
-+{
-+	struct call_data_struct data;
-+	int cpus;
-+
-+	/* Holding any lock stops cpus from going down. */
-+	spin_lock(&call_lock);
-+	cpus = num_online_cpus()-1;
-+
-+	if (!cpus) {
-+		spin_unlock(&call_lock);
-+		return 0;
-+	}
-+
-+	/* Can deadlock when called with interrupts disabled */
-+	WARN_ON(irqs_disabled());
-+
-+	data.func = func;
-+	data.info = info;
-+	atomic_set(&data.started, 0);
-+	data.wait = wait;
-+	if (wait)
-+		atomic_set(&data.finished, 0);
-+
-+	call_data = &data;
-+	mb();
-+	
-+	/* Send a message to all other CPUs and wait for them to respond */
-+	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-+
-+	/* Wait for response */
-+	while (atomic_read(&data.started) != cpus)
-+		barrier();
-+
-+	if (wait)
-+		while (atomic_read(&data.finished) != cpus)
-+			barrier();
-+	spin_unlock(&call_lock);
-+
-+	return 0;
-+}
-+
-+static void stop_this_cpu (void * dummy)
-+{
-+	/*
-+	 * Remove this CPU:
-+	 */
-+	cpu_clear(smp_processor_id(), cpu_online_map);
-+	local_irq_disable();
-+#if 0
-+	disable_local_APIC();
-+#endif
-+	if (cpu_data[smp_processor_id()].hlt_works_ok)
-+		for(;;) __asm__("hlt");
-+	for (;;);
-+}
-+
-+/*
-+ * this function calls the 'stop' function on all other CPUs in the system.
-+ */
-+
-+void smp_send_stop(void)
-+{
-+	smp_call_function(stop_this_cpu, NULL, 1, 0);
-+
-+	local_irq_disable();
-+#if 0
-+	disable_local_APIC();
-+#endif
-+	local_irq_enable();
-+}
-+
-+/*
-+ * Reschedule call back. Nothing to do,
-+ * all the work is done automatically when
-+ * we return from the interrupt.
-+ */
-+irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
-+				     struct pt_regs *regs)
-+{
-+
-+	return IRQ_HANDLED;
-+}
-+
-+#include <linux/kallsyms.h>
-+irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
-+					struct pt_regs *regs)
-+{
-+	void (*func) (void *info) = call_data->func;
-+	void *info = call_data->info;
-+	int wait = call_data->wait;
-+
-+	/*
-+	 * Notify initiating CPU that I've grabbed the data and am
-+	 * about to execute the function
-+	 */
-+	mb();
-+	atomic_inc(&call_data->started);
-+	/*
-+	 * At this point the info structure may be out of scope unless wait==1
-+	 */
-+	irq_enter();
-+	(*func)(info);
-+	irq_exit();
-+
-+	if (wait) {
-+		mb();
-+		atomic_inc(&call_data->finished);
-+	}
-+
-+	return IRQ_HANDLED;
-+}
-+
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/swiotlb.c linux-2.6.12-xen/arch/xen/i386/kernel/swiotlb.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/swiotlb.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/swiotlb.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,671 @@
-+/*
-+ * Dynamic DMA mapping support.
-+ *
-+ * This implementation is a fallback for platforms that do not support
-+ * I/O TLBs (aka DMA address translation hardware).
-+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
-+ * Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
-+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
-+ *	David Mosberger-Tang <davidm at hpl.hp.com>
-+ * Copyright (C) 2005 Keir Fraser <keir at xensource.com>
-+ */
-+
-+#include <linux/cache.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/pci.h>
-+#include <linux/spinlock.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+#include <linux/bootmem.h>
-+#include <linux/highmem.h>
-+#include <asm/io.h>
-+#include <asm/pci.h>
-+#include <asm/dma.h>
-+#include <asm/uaccess.h>
-+#include <asm-xen/xen-public/memory.h>
-+
-+#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
-+
-+#define SG_ENT_PHYS_ADDRESS(sg)	(page_to_phys((sg)->page) + (sg)->offset)
-+
-+/*
-+ * Maximum allowable number of contiguous slabs to map,
-+ * must be a power of 2.  What is the appropriate value ?
-+ * The complexity of {map,unmap}_single is linearly dependent on this value.
-+ */
-+#define IO_TLB_SEGSIZE	128
-+
-+/*
-+ * log of the size of each IO TLB slab.  The number of slabs is command line
-+ * controllable.
-+ */
-+#define IO_TLB_SHIFT 11
-+
-+static int swiotlb_force;
-+static char *iotlb_virt_start;
-+static unsigned long iotlb_nslabs;
-+
-+/*
-+ * Used to do a quick range check in swiotlb_unmap_single and
-+ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
-+ * API.
-+ */
-+static dma_addr_t iotlb_bus_start, iotlb_bus_end, iotlb_bus_mask;
-+
-+/* Does the given dma address reside within the swiotlb aperture? */
-+#define in_swiotlb_aperture(a) (!(((a) ^ iotlb_bus_start) & iotlb_bus_mask))
-+
-+/*
-+ * When the IOMMU overflows we return a fallback buffer. This sets the size.
-+ */
-+static unsigned long io_tlb_overflow = 32*1024;
-+
-+void *io_tlb_overflow_buffer;
-+
-+/*
-+ * This is a free list describing the number of free entries available from
-+ * each index
-+ */
-+static unsigned int *io_tlb_list;
-+static unsigned int io_tlb_index;
-+
-+/*
-+ * We need to save away the original address corresponding to a mapped entry
-+ * for the sync operations.
-+ */
-+static struct phys_addr {
-+	struct page *page;
-+	unsigned int offset;
-+} *io_tlb_orig_addr;
-+
-+/*
-+ * Protect the above data structures in the map and unmap calls
-+ */
-+static DEFINE_SPINLOCK(io_tlb_lock);
-+
-+static int __init
-+setup_io_tlb_npages(char *str)
-+{
-+	/* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
-+	if (isdigit(*str)) {
-+		iotlb_nslabs = simple_strtoul(str, &str, 0) <<
-+			(20 - IO_TLB_SHIFT);
-+		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
-+		/* Round up to power of two (xen_create_contiguous_region). */
-+		while (iotlb_nslabs & (iotlb_nslabs-1))
-+			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
-+	}
-+	if (*str == ',')
-+		++str;
-+	/*
-+         * NB. 'force' enables the swiotlb, but doesn't force its use for
-+         * every DMA like it does on native Linux. 'off' forcibly disables
-+         * use of the swiotlb.
-+         */
-+	if (!strcmp(str, "force"))
-+		swiotlb_force = 1;
-+	else if (!strcmp(str, "off"))
-+		swiotlb_force = -1;
-+	return 1;
-+}
-+__setup("swiotlb=", setup_io_tlb_npages);
-+/* make io_tlb_overflow tunable too? */
-+
-+/*
-+ * Statically reserve bounce buffer space and initialize bounce buffer data
-+ * structures for the software IO TLB used to implement the PCI DMA API.
-+ */
-+void
-+swiotlb_init_with_default_size (size_t default_size)
-+{
-+	unsigned long i, bytes;
-+	int rc;
-+
-+	if (!iotlb_nslabs) {
-+		iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
-+		iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
-+		/* Round up to power of two (xen_create_contiguous_region). */
-+		while (iotlb_nslabs & (iotlb_nslabs-1))
-+			iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
-+	}
-+
-+	bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
-+
-+	/*
-+	 * Get IO TLB memory from the low pages
-+	 */
-+	iotlb_virt_start = alloc_bootmem_low_pages(bytes);
-+	if (!iotlb_virt_start)
-+		panic("Cannot allocate SWIOTLB buffer!\n"
-+		      "Use dom0_mem Xen boot parameter to reserve\n"
-+		      "some DMA memory (e.g., dom0_mem=-128M).\n");
-+
-+	/* Hardcode 31 address bits for now: aacraid limitation. */
-+	rc = xen_create_contiguous_region(
-+		(unsigned long)iotlb_virt_start, get_order(bytes), 31);
-+	BUG_ON(rc);
-+
-+	/*
-+	 * Allocate and initialize the free list array.  This array is used
-+	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
-+	 */
-+	io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
-+	for (i = 0; i < iotlb_nslabs; i++)
-+ 		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-+	io_tlb_index = 0;
-+	io_tlb_orig_addr = alloc_bootmem(
-+		iotlb_nslabs * sizeof(*io_tlb_orig_addr));
-+
-+	/*
-+	 * Get the overflow emergency buffer
-+	 */
-+	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
-+
-+	iotlb_bus_start = virt_to_bus(iotlb_virt_start);
-+	iotlb_bus_end   = iotlb_bus_start + bytes;
-+	iotlb_bus_mask  = ~(dma_addr_t)(bytes - 1);
-+
-+	printk(KERN_INFO "Software IO TLB enabled: \n"
-+	       " Aperture:     %lu megabytes\n"
-+	       " Bus range:    0x%016lx - 0x%016lx\n"
-+	       " Kernel range: 0x%016lx - 0x%016lx\n",
-+	       bytes >> 20,
-+	       (unsigned long)iotlb_bus_start,
-+	       (unsigned long)iotlb_bus_end,
-+	       (unsigned long)iotlb_virt_start,
-+	       (unsigned long)iotlb_virt_start + bytes);
-+}
-+
-+void
-+swiotlb_init(void)
-+{
-+	long ram_end;
-+	size_t defsz = 64 * (1 << 20); /* 64MB default size */
-+
-+	if (swiotlb_force == 1) {
-+		swiotlb = 1;
-+	} else if ((swiotlb_force != -1) &&
-+		   (xen_start_info->flags & SIF_INITDOMAIN)) {
-+		/* Domain 0 always has a swiotlb. */
-+		ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
-+		if (ram_end <= 0x7ffff)
-+			defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
-+		swiotlb = 1;
-+	}
-+
-+	if (swiotlb)
-+		swiotlb_init_with_default_size(defsz);
-+	else
-+		printk(KERN_INFO "Software IO TLB disabled\n");
-+}
-+
-+/*
-+ * We use __copy_to_user to transfer to the host buffer because the buffer
-+ * may be mapped read-only (e.g, in blkback driver) but lower-level
-+ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
-+ * unnecessary copy from the aperture to the host buffer, and a page fault.
-+ */
-+static void
-+__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
-+{
-+	if (PageHighMem(buffer.page)) {
-+		size_t len, bytes;
-+		char *dev, *host, *kmp;
-+		len = size;
-+		while (len != 0) {
-+			if (((bytes = len) + buffer.offset) > PAGE_SIZE)
-+				bytes = PAGE_SIZE - buffer.offset;
-+			kmp  = kmap_atomic(buffer.page, KM_SWIOTLB);
-+			dev  = dma_addr + size - len;
-+			host = kmp + buffer.offset;
-+			if (dir == DMA_FROM_DEVICE) {
-+				if (__copy_to_user(host, dev, bytes))
-+					/* inaccessible */;
-+			} else
-+				memcpy(dev, host, bytes);
-+			kunmap_atomic(kmp, KM_SWIOTLB);
-+			len -= bytes;
-+			buffer.page++;
-+			buffer.offset = 0;
-+		}
-+	} else {
-+		char *host = (char *)phys_to_virt(
-+			page_to_pseudophys(buffer.page)) + buffer.offset;
-+		if (dir == DMA_FROM_DEVICE) {
-+			if (__copy_to_user(host, dma_addr, size))
-+				/* inaccessible */;
-+		} else if (dir == DMA_TO_DEVICE)
-+			memcpy(dma_addr, host, size);
-+	}
-+}
-+
-+/*
-+ * Allocates bounce buffer and returns its kernel virtual address.
-+ */
-+static void *
-+map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
-+{
-+	unsigned long flags;
-+	char *dma_addr;
-+	unsigned int nslots, stride, index, wrap;
-+	int i;
-+
-+	/*
-+	 * For mappings greater than a page, we limit the stride (and
-+	 * hence alignment) to a page size.
-+	 */
-+	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-+	if (size > PAGE_SIZE)
-+		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
-+	else
-+		stride = 1;
-+
-+	BUG_ON(!nslots);
-+
-+	/*
-+	 * Find suitable number of IO TLB entries size that will fit this
-+	 * request and allocate a buffer from that IO TLB pool.
-+	 */
-+	spin_lock_irqsave(&io_tlb_lock, flags);
-+	{
-+		wrap = index = ALIGN(io_tlb_index, stride);
-+
-+		if (index >= iotlb_nslabs)
-+			wrap = index = 0;
-+
-+		do {
-+			/*
-+			 * If we find a slot that indicates we have 'nslots'
-+			 * number of contiguous buffers, we allocate the
-+			 * buffers from that slot and mark the entries as '0'
-+			 * indicating unavailable.
-+			 */
-+			if (io_tlb_list[index] >= nslots) {
-+				int count = 0;
-+
-+				for (i = index; i < (int)(index + nslots); i++)
-+					io_tlb_list[i] = 0;
-+				for (i = index - 1;
-+				     (OFFSET(i, IO_TLB_SEGSIZE) !=
-+				      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
-+				     i--)
-+					io_tlb_list[i] = ++count;
-+				dma_addr = iotlb_virt_start +
-+					(index << IO_TLB_SHIFT);
-+
-+				/*
-+				 * Update the indices to avoid searching in
-+				 * the next round.
-+				 */
-+				io_tlb_index = 
-+					((index + nslots) < iotlb_nslabs
-+					 ? (index + nslots) : 0);
-+
-+				goto found;
-+			}
-+			index += stride;
-+			if (index >= iotlb_nslabs)
-+				index = 0;
-+		} while (index != wrap);
-+
-+		spin_unlock_irqrestore(&io_tlb_lock, flags);
-+		return NULL;
-+	}
-+  found:
-+	spin_unlock_irqrestore(&io_tlb_lock, flags);
-+
-+	/*
-+	 * Save away the mapping from the original address to the DMA address.
-+	 * This is needed when we sync the memory.  Then we sync the buffer if
-+	 * needed.
-+	 */
-+	io_tlb_orig_addr[index] = buffer;
-+	if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
-+		__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
-+
-+	return dma_addr;
-+}
-+
-+/*
-+ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
-+ */
-+static void
-+unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
-+{
-+	unsigned long flags;
-+	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-+	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-+	struct phys_addr buffer = io_tlb_orig_addr[index];
-+
-+	/*
-+	 * First, sync the memory before unmapping the entry
-+	 */
-+	if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
-+		__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
-+
-+	/*
-+	 * Return the buffer to the free list by setting the corresponding
-+	 * entries to indicate the number of contigous entries available.
-+	 * While returning the entries to the free list, we merge the entries
-+	 * with slots below and above the pool being returned.
-+	 */
-+	spin_lock_irqsave(&io_tlb_lock, flags);
-+	{
-+		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
-+			 io_tlb_list[index + nslots] : 0);
-+		/*
-+		 * Step 1: return the slots to the free list, merging the
-+		 * slots with superceeding slots
-+		 */
-+		for (i = index + nslots - 1; i >= index; i--)
-+			io_tlb_list[i] = ++count;
-+		/*
-+		 * Step 2: merge the returned slots with the preceding slots,
-+		 * if available (non zero)
-+		 */
-+		for (i = index - 1;
-+		     (OFFSET(i, IO_TLB_SEGSIZE) !=
-+		      IO_TLB_SEGSIZE -1) && io_tlb_list[i];
-+		     i--)
-+			io_tlb_list[i] = ++count;
-+	}
-+	spin_unlock_irqrestore(&io_tlb_lock, flags);
-+}
-+
-+static void
-+sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
-+{
-+	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-+	struct phys_addr buffer = io_tlb_orig_addr[index];
-+	BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
-+	__sync_single(buffer, dma_addr, size, dir);
-+}
-+
-+static void
-+swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
-+{
-+	/*
-+	 * Ran out of IOMMU space for this operation. This is very bad.
-+	 * Unfortunately the drivers cannot handle this operation properly.
-+	 * unless they check for pci_dma_mapping_error (most don't)
-+	 * When the mapping is small enough return a static buffer to limit
-+	 * the damage, or panic when the transfer is too big.
-+	 */
-+	printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
-+	       "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
-+
-+	if (size > io_tlb_overflow && do_panic) {
-+		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+			panic("PCI-DMA: Memory would be corrupted\n");
-+		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
-+			panic("PCI-DMA: Random memory would be DMAed\n");
-+	}
-+}
-+
-+/*
-+ * Map a single buffer of the indicated size for DMA in streaming mode.  The
-+ * PCI address to use is returned.
-+ *
-+ * Once the device is given the dma address, the device owns this memory until
-+ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
-+ */
-+dma_addr_t
-+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
-+{
-+	dma_addr_t dev_addr = virt_to_bus(ptr);
-+	void *map;
-+	struct phys_addr buffer;
-+
-+	BUG_ON(dir == DMA_NONE);
-+
-+	/*
-+	 * If the pointer passed in happens to be in the device's DMA window,
-+	 * we can safely return the device addr and not worry about bounce
-+	 * buffering it.
-+	 */
-+	if (!range_straddles_page_boundary(ptr, size) &&
-+	    !address_needs_mapping(hwdev, dev_addr))
-+		return dev_addr;
-+
-+	/*
-+	 * Oh well, have to allocate and map a bounce buffer.
-+	 */
-+	buffer.page   = virt_to_page(ptr);
-+	buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
-+	map = map_single(hwdev, buffer, size, dir);
-+	if (!map) {
-+		swiotlb_full(hwdev, size, dir, 1);
-+		map = io_tlb_overflow_buffer;
-+	}
-+
-+	dev_addr = virt_to_bus(map);
-+	return dev_addr;
-+}
-+
-+/*
-+ * Unmap a single streaming mode DMA translation.  The dma_addr and size must
-+ * match what was provided for in a previous swiotlb_map_single call.  All
-+ * other usages are undefined.
-+ *
-+ * After this call, reads by the cpu to the buffer are guaranteed to see
-+ * whatever the device wrote there.
-+ */
-+void
-+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
-+		     int dir)
-+{
-+	BUG_ON(dir == DMA_NONE);
-+	if (in_swiotlb_aperture(dev_addr))
-+		unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
-+
-+/*
-+ * Make physical memory consistent for a single streaming mode DMA translation
-+ * after a transfer.
-+ *
-+ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
-+ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
-+ * call this function before doing so.  At the next point you give the PCI dma
-+ * address back to the card, you must first perform a
-+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
-+ */
-+void
-+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-+			    size_t size, int dir)
-+{
-+	BUG_ON(dir == DMA_NONE);
-+	if (in_swiotlb_aperture(dev_addr))
-+		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
-+
-+void
-+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-+			       size_t size, int dir)
-+{
-+	BUG_ON(dir == DMA_NONE);
-+	if (in_swiotlb_aperture(dev_addr))
-+		sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
-+}
-+
-+/*
-+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
-+ * This is the scatter-gather version of the above swiotlb_map_single
-+ * interface.  Here the scatter gather list elements are each tagged with the
-+ * appropriate dma address and length.  They are obtained via
-+ * sg_dma_{address,length}(SG).
-+ *
-+ * NOTE: An implementation may be able to use a smaller number of
-+ *       DMA address/length pairs than there are SG table elements.
-+ *       (for example via virtual mapping capabilities)
-+ *       The routine returns the number of addr/length pairs actually
-+ *       used, at most nents.
-+ *
-+ * Device ownership issues as mentioned above for swiotlb_map_single are the
-+ * same here.
-+ */
-+int
-+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
-+	       int dir)
-+{
-+	struct phys_addr buffer;
-+	dma_addr_t dev_addr;
-+	char *map;
-+	int i;
-+
-+	BUG_ON(dir == DMA_NONE);
-+
-+	for (i = 0; i < nelems; i++, sg++) {
-+		dev_addr = SG_ENT_PHYS_ADDRESS(sg);
-+		if (address_needs_mapping(hwdev, dev_addr)) {
-+			buffer.page   = sg->page;
-+			buffer.offset = sg->offset;
-+			map = map_single(hwdev, buffer, sg->length, dir);
-+			if (!map) {
-+				/* Don't panic here, we expect map_sg users
-+				   to do proper error handling. */
-+				swiotlb_full(hwdev, sg->length, dir, 0);
-+				swiotlb_unmap_sg(hwdev, sg - i, i, dir);
-+				sg[0].dma_length = 0;
-+				return 0;
-+			}
-+			sg->dma_address = (dma_addr_t)virt_to_bus(map);
-+		} else
-+			sg->dma_address = dev_addr;
-+		sg->dma_length = sg->length;
-+	}
-+	return nelems;
-+}
-+
-+/*
-+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
-+ * concerning calls here are the same as for swiotlb_unmap_single() above.
-+ */
-+void
-+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
-+		 int dir)
-+{
-+	int i;
-+
-+	BUG_ON(dir == DMA_NONE);
-+
-+	for (i = 0; i < nelems; i++, sg++)
-+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+			unmap_single(hwdev, 
-+				     (void *)bus_to_virt(sg->dma_address),
-+				     sg->dma_length, dir);
-+}
-+
-+/*
-+ * Make physical memory consistent for a set of streaming mode DMA translations
-+ * after a transfer.
-+ *
-+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
-+ * and usage.
-+ */
-+void
-+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-+			int nelems, int dir)
-+{
-+	int i;
-+
-+	BUG_ON(dir == DMA_NONE);
-+
-+	for (i = 0; i < nelems; i++, sg++)
-+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+			sync_single(hwdev,
-+				    (void *)bus_to_virt(sg->dma_address),
-+				    sg->dma_length, dir);
-+}
-+
-+void
-+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-+			   int nelems, int dir)
-+{
-+	int i;
-+
-+	BUG_ON(dir == DMA_NONE);
-+
-+	for (i = 0; i < nelems; i++, sg++)
-+		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-+			sync_single(hwdev,
-+				    (void *)bus_to_virt(sg->dma_address),
-+				    sg->dma_length, dir);
-+}
-+
-+dma_addr_t
-+swiotlb_map_page(struct device *hwdev, struct page *page,
-+		 unsigned long offset, size_t size,
-+		 enum dma_data_direction direction)
-+{
-+	struct phys_addr buffer;
-+	dma_addr_t dev_addr;
-+	char *map;
-+
-+	dev_addr = page_to_phys(page) + offset;
-+	if (address_needs_mapping(hwdev, dev_addr)) {
-+		buffer.page   = page;
-+		buffer.offset = offset;
-+		map = map_single(hwdev, buffer, size, direction);
-+		if (!map) {
-+			swiotlb_full(hwdev, size, direction, 1);
-+			map = io_tlb_overflow_buffer;
-+		}
-+		dev_addr = (dma_addr_t)virt_to_bus(map);
-+	}
-+
-+	return dev_addr;
-+}
-+
-+void
-+swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+		   size_t size, enum dma_data_direction direction)
-+{
-+	BUG_ON(direction == DMA_NONE);
-+	if (in_swiotlb_aperture(dma_address))
-+		unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
-+}
-+
-+int
-+swiotlb_dma_mapping_error(dma_addr_t dma_addr)
-+{
-+	return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
-+}
-+
-+/*
-+ * Return whether the given PCI device DMA address mask can be supported
-+ * properly.  For example, if your device can only drive the low 24-bits
-+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
-+ * this function.
-+ */
-+int
-+swiotlb_dma_supported (struct device *hwdev, u64 mask)
-+{
-+	return (mask >= (iotlb_bus_end - 1));
-+}
-+
-+EXPORT_SYMBOL(swiotlb_init);
-+EXPORT_SYMBOL(swiotlb_map_single);
-+EXPORT_SYMBOL(swiotlb_unmap_single);
-+EXPORT_SYMBOL(swiotlb_map_sg);
-+EXPORT_SYMBOL(swiotlb_unmap_sg);
-+EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
-+EXPORT_SYMBOL(swiotlb_sync_single_for_device);
-+EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
-+EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
-+EXPORT_SYMBOL(swiotlb_map_page);
-+EXPORT_SYMBOL(swiotlb_unmap_page);
-+EXPORT_SYMBOL(swiotlb_dma_mapping_error);
-+EXPORT_SYMBOL(swiotlb_dma_supported);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/time.c linux-2.6.12-xen/arch/xen/i386/kernel/time.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/time.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/time.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,927 @@
-+/*
-+ *  linux/arch/i386/kernel/time.c
-+ *
-+ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
-+ *
-+ * This file contains the PC-specific time handling details:
-+ * reading the RTC at bootup, etc..
-+ * 1994-07-02    Alan Modra
-+ *	fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
-+ * 1995-03-26    Markus Kuhn
-+ *      fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
-+ *      precision CMOS clock update
-+ * 1996-05-03    Ingo Molnar
-+ *      fixed time warps in do_[slow|fast]_gettimeoffset()
-+ * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
-+ *		"A Kernel Model for Precision Timekeeping" by Dave Mills
-+ * 1998-09-05    (Various)
-+ *	More robust do_fast_gettimeoffset() algorithm implemented
-+ *	(works with APM, Cyrix 6x86MX and Centaur C6),
-+ *	monotonic gettimeofday() with fast_get_timeoffset(),
-+ *	drift-proof precision TSC calibration on boot
-+ *	(C. Scott Ananian <cananian at alumni.princeton.edu>, Andrew D.
-+ *	Balsa <andrebalsa at altern.org>, Philip Gladstone <philip at raptor.com>;
-+ *	ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause at tu-harburg.de>).
-+ * 1998-12-16    Andrea Arcangeli
-+ *	Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
-+ *	because was not accounting lost_ticks.
-+ * 1998-12-24 Copyright (C) 1998  Andrea Arcangeli
-+ *	Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
-+ *	serialize accesses to xtime/lost_ticks).
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/param.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/interrupt.h>
-+#include <linux/time.h>
-+#include <linux/delay.h>
-+#include <linux/init.h>
-+#include <linux/smp.h>
-+#include <linux/module.h>
-+#include <linux/sysdev.h>
-+#include <linux/bcd.h>
-+#include <linux/efi.h>
-+#include <linux/mca.h>
-+#include <linux/sysctl.h>
-+#include <linux/percpu.h>
-+
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/irq.h>
-+#include <asm/msr.h>
-+#include <asm/delay.h>
-+#include <asm/mpspec.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+#include <asm/timer.h>
-+
-+#include "mach_time.h"
-+
-+#include <linux/timex.h>
-+#include <linux/config.h>
-+
-+#include <asm/hpet.h>
-+
-+#include <asm/arch_hooks.h>
-+
-+#include "io_ports.h"
-+
-+#include <asm-xen/evtchn.h>
-+
-+extern spinlock_t i8259A_lock;
-+int pit_latch_buggy;              /* extern */
-+
-+u64 jiffies_64 = INITIAL_JIFFIES;
-+
-+EXPORT_SYMBOL(jiffies_64);
-+
-+#if defined(__x86_64__)
-+unsigned long vxtime_hz = PIT_TICK_RATE;
-+struct vxtime_data __vxtime __section_vxtime;   /* for vsyscalls */
-+volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
-+unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
-+struct timespec __xtime __section_xtime;
-+struct timezone __sys_tz __section_sys_tz;
-+#endif
-+
-+#if defined(__x86_64__)
-+unsigned int cpu_khz;	/* Detected as we calibrate the TSC */
-+#else
-+unsigned long cpu_khz;	/* Detected as we calibrate the TSC */
-+#endif
-+
-+extern unsigned long wall_jiffies;
-+
-+DEFINE_SPINLOCK(rtc_lock);
-+
-+DEFINE_SPINLOCK(i8253_lock);
-+EXPORT_SYMBOL(i8253_lock);
-+
-+extern struct init_timer_opts timer_tsc_init;
-+extern struct timer_opts timer_tsc;
-+struct timer_opts *cur_timer = &timer_tsc;
-+
-+/* These are peridically updated in shared_info, and then copied here. */
-+struct shadow_time_info {
-+	u64 tsc_timestamp;     /* TSC at last update of time vals.  */
-+	u64 system_timestamp;  /* Time, in nanosecs, since boot.    */
-+	u32 tsc_to_nsec_mul;
-+	u32 tsc_to_usec_mul;
-+	int tsc_shift;
-+	u32 version;
-+};
-+static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
-+static struct timespec shadow_tv;
-+static u32 shadow_tv_version;
-+
-+/* Keep track of last time we did processing/updating of jiffies and xtime. */
-+static u64 processed_system_time;   /* System time (ns) at last processing. */
-+static DEFINE_PER_CPU(u64, processed_system_time);
-+
-+/* Must be signed, as it's compared with s64 quantities which can be -ve. */
-+#define NS_PER_TICK (1000000000LL/HZ)
-+
-+static inline void __normalize_time(time_t *sec, s64 *nsec)
-+{
-+	while (*nsec >= NSEC_PER_SEC) {
-+		(*nsec) -= NSEC_PER_SEC;
-+		(*sec)++;
-+	}
-+	while (*nsec < 0) {
-+		(*nsec) += NSEC_PER_SEC;
-+		(*sec)--;
-+	}
-+}
-+
-+/* Does this guest OS track Xen time, or set its wall clock independently? */
-+static int independent_wallclock = 0;
-+static int __init __independent_wallclock(char *str)
-+{
-+	independent_wallclock = 1;
-+	return 1;
-+}
-+__setup("independent_wallclock", __independent_wallclock);
-+
-+int tsc_disable __initdata = 0;
-+
-+static void delay_tsc(unsigned long loops)
-+{
-+	unsigned long bclock, now;
-+	
-+	rdtscl(bclock);
-+	do
-+	{
-+		rep_nop();
-+		rdtscl(now);
-+	} while ((now-bclock) < loops);
-+}
-+
-+struct timer_opts timer_tsc = {
-+	.name = "tsc",
-+	.delay = delay_tsc,
-+};
-+
-+/*
-+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
-+ * yielding a 64-bit result.
-+ */
-+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
-+{
-+	u64 product;
-+#ifdef __i386__
-+	u32 tmp1, tmp2;
-+#endif
-+
-+	if ( shift < 0 )
-+		delta >>= -shift;
-+	else
-+		delta <<= shift;
-+
-+#ifdef __i386__
-+	__asm__ (
-+		"mul  %5       ; "
-+		"mov  %4,%%eax ; "
-+		"mov  %%edx,%4 ; "
-+		"mul  %5       ; "
-+		"xor  %5,%5    ; "
-+		"add  %4,%%eax ; "
-+		"adc  %5,%%edx ; "
-+		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
-+		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-+#else
-+	__asm__ (
-+		"mul %%rdx ; shrd $32,%%rdx,%%rax"
-+		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-+#endif
-+
-+	return product;
-+}
-+
-+void init_cpu_khz(void)
-+{
-+	u64 __cpu_khz = 1000000ULL << 32;
-+	struct vcpu_time_info *info;
-+	info = &HYPERVISOR_shared_info->vcpu_info[0].time;
-+	do_div(__cpu_khz, info->tsc_to_system_mul);
-+	if ( info->tsc_shift < 0 )
-+		cpu_khz = __cpu_khz << -info->tsc_shift;
-+	else
-+		cpu_khz = __cpu_khz >> info->tsc_shift;
-+}
-+
-+static u64 get_nsec_offset(struct shadow_time_info *shadow)
-+{
-+	u64 now, delta;
-+	rdtscll(now);
-+	delta = now - shadow->tsc_timestamp;
-+	return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
-+}
-+
-+static unsigned long get_usec_offset(struct shadow_time_info *shadow)
-+{
-+	u64 now, delta;
-+	rdtscll(now);
-+	delta = now - shadow->tsc_timestamp;
-+	return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
-+}
-+
-+static void __update_wallclock(time_t sec, long nsec)
-+{
-+	long wtm_nsec, xtime_nsec;
-+	time_t wtm_sec, xtime_sec;
-+	u64 tmp, wc_nsec;
-+
-+	/* Adjust wall-clock time base based on wall_jiffies ticks. */
-+	wc_nsec = processed_system_time;
-+	wc_nsec += sec * (u64)NSEC_PER_SEC;
-+	wc_nsec += nsec;
-+	wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
-+
-+	/* Split wallclock base into seconds and nanoseconds. */
-+	tmp = wc_nsec;
-+	xtime_nsec = do_div(tmp, 1000000000);
-+	xtime_sec  = (time_t)tmp;
-+
-+	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
-+	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
-+
-+	set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
-+	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-+
-+	time_adjust = 0;		/* stop active adjtime() */
-+	time_status |= STA_UNSYNC;
-+	time_maxerror = NTP_PHASE_LIMIT;
-+	time_esterror = NTP_PHASE_LIMIT;
-+}
-+
-+static void update_wallclock(void)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+
-+	do {
-+		shadow_tv_version = s->wc_version;
-+		rmb();
-+		shadow_tv.tv_sec  = s->wc_sec;
-+		shadow_tv.tv_nsec = s->wc_nsec;
-+		rmb();
-+	}
-+	while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
-+
-+	if (!independent_wallclock)
-+		__update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
-+}
-+
-+/*
-+ * Reads a consistent set of time-base values from Xen, into a shadow data
-+ * area.
-+ */
-+static void get_time_values_from_xen(void)
-+{
-+	shared_info_t           *s = HYPERVISOR_shared_info;
-+	struct vcpu_time_info   *src;
-+	struct shadow_time_info *dst;
-+
-+	src = &s->vcpu_info[smp_processor_id()].time;
-+	dst = &per_cpu(shadow_time, smp_processor_id());
-+
-+	do {
-+		dst->version = src->version;
-+		rmb();
-+		dst->tsc_timestamp     = src->tsc_timestamp;
-+		dst->system_timestamp  = src->system_time;
-+		dst->tsc_to_nsec_mul   = src->tsc_to_system_mul;
-+		dst->tsc_shift         = src->tsc_shift;
-+		rmb();
-+	}
-+	while ((src->version & 1) | (dst->version ^ src->version));
-+
-+	dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
-+}
-+
-+static inline int time_values_up_to_date(int cpu)
-+{
-+	struct vcpu_time_info   *src;
-+	struct shadow_time_info *dst;
-+
-+	src = &HYPERVISOR_shared_info->vcpu_info[cpu].time;
-+	dst = &per_cpu(shadow_time, cpu); 
-+
-+	return (dst->version == src->version);
-+}
-+
-+/*
-+ * This is a special lock that is owned by the CPU and holds the index
-+ * register we are working with.  It is required for NMI access to the
-+ * CMOS/RTC registers.  See include/asm-i386/mc146818rtc.h for details.
-+ */
-+volatile unsigned long cmos_lock = 0;
-+EXPORT_SYMBOL(cmos_lock);
-+
-+/* Routines for accessing the CMOS RAM/RTC. */
-+unsigned char rtc_cmos_read(unsigned char addr)
-+{
-+	unsigned char val;
-+	lock_cmos_prefix(addr);
-+	outb_p(addr, RTC_PORT(0));
-+	val = inb_p(RTC_PORT(1));
-+	lock_cmos_suffix(addr);
-+	return val;
-+}
-+EXPORT_SYMBOL(rtc_cmos_read);
-+
-+void rtc_cmos_write(unsigned char val, unsigned char addr)
-+{
-+	lock_cmos_prefix(addr);
-+	outb_p(addr, RTC_PORT(0));
-+	outb_p(val, RTC_PORT(1));
-+	lock_cmos_suffix(addr);
-+}
-+EXPORT_SYMBOL(rtc_cmos_write);
-+
-+/*
-+ * This version of gettimeofday has microsecond resolution
-+ * and better than microsecond precision on fast x86 machines with TSC.
-+ */
-+void do_gettimeofday(struct timeval *tv)
-+{
-+	unsigned long seq;
-+	unsigned long usec, sec;
-+	unsigned long max_ntp_tick;
-+	s64 nsec;
-+	unsigned int cpu;
-+	struct shadow_time_info *shadow;
-+	u32 local_time_version;
-+
-+	cpu = get_cpu();
-+	shadow = &per_cpu(shadow_time, cpu);
-+
-+	do {
-+		unsigned long lost;
-+
-+		local_time_version = shadow->version;
-+		seq = read_seqbegin(&xtime_lock);
-+
-+		usec = get_usec_offset(shadow);
-+		lost = jiffies - wall_jiffies;
-+
-+		/*
-+		 * If time_adjust is negative then NTP is slowing the clock
-+		 * so make sure not to go into next possible interval.
-+		 * Better to lose some accuracy than have time go backwards..
-+		 */
-+		if (unlikely(time_adjust < 0)) {
-+			max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
-+			usec = min(usec, max_ntp_tick);
-+
-+			if (lost)
-+				usec += lost * max_ntp_tick;
-+		}
-+		else if (unlikely(lost))
-+			usec += lost * (USEC_PER_SEC / HZ);
-+
-+		sec = xtime.tv_sec;
-+		usec += (xtime.tv_nsec / NSEC_PER_USEC);
-+
-+		nsec = shadow->system_timestamp - processed_system_time;
-+		__normalize_time(&sec, &nsec);
-+		usec += (long)nsec / NSEC_PER_USEC;
-+
-+		if (unlikely(!time_values_up_to_date(cpu))) {
-+			/*
-+			 * We may have blocked for a long time,
-+			 * rendering our calculations invalid
-+			 * (e.g. the time delta may have
-+			 * overflowed). Detect that and recalculate
-+			 * with fresh values.
-+			 */
-+			get_time_values_from_xen();
-+			continue;
-+		}
-+	} while (read_seqretry(&xtime_lock, seq) ||
-+		 (local_time_version != shadow->version));
-+
-+	put_cpu();
-+
-+	while (usec >= USEC_PER_SEC) {
-+		usec -= USEC_PER_SEC;
-+		sec++;
-+	}
-+
-+	tv->tv_sec = sec;
-+	tv->tv_usec = usec;
-+}
-+
-+EXPORT_SYMBOL(do_gettimeofday);
-+
-+int do_settimeofday(struct timespec *tv)
-+{
-+	time_t sec;
-+	s64 nsec;
-+	unsigned int cpu;
-+	struct shadow_time_info *shadow;
-+	dom0_op_t op;
-+
-+	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
-+		return -EINVAL;
-+
-+	cpu = get_cpu();
-+	shadow = &per_cpu(shadow_time, cpu);
-+
-+	write_seqlock_irq(&xtime_lock);
-+
-+	/*
-+	 * Ensure we don't get blocked for a long time so that our time delta
-+	 * overflows. If that were to happen then our shadow time values would
-+	 * be stale, so we can retry with fresh ones.
-+	 */
-+	for ( ; ; ) {
-+		nsec = tv->tv_nsec - get_nsec_offset(shadow);
-+		if (time_values_up_to_date(cpu))
-+			break;
-+		get_time_values_from_xen();
-+	}
-+	sec = tv->tv_sec;
-+	__normalize_time(&sec, &nsec);
-+
-+	if ((xen_start_info->flags & SIF_INITDOMAIN) &&
-+	    !independent_wallclock) {
-+		op.cmd = DOM0_SETTIME;
-+		op.u.settime.secs        = sec;
-+		op.u.settime.nsecs       = nsec;
-+		op.u.settime.system_time = shadow->system_timestamp;
-+		HYPERVISOR_dom0_op(&op);
-+		update_wallclock();
-+	} else if (independent_wallclock) {
-+		nsec -= shadow->system_timestamp;
-+		__normalize_time(&sec, &nsec);
-+		__update_wallclock(sec, nsec);
-+	}
-+
-+	write_sequnlock_irq(&xtime_lock);
-+
-+	put_cpu();
-+
-+	clock_was_set();
-+	return 0;
-+}
-+
-+EXPORT_SYMBOL(do_settimeofday);
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+static int set_rtc_mmss(unsigned long nowtime)
-+{
-+	int retval;
-+
-+	WARN_ON(irqs_disabled());
-+
-+	if (!(xen_start_info->flags & SIF_INITDOMAIN))
-+		return 0;
-+
-+	/* gets recalled with irq locally disabled */
-+	spin_lock_irq(&rtc_lock);
-+	if (efi_enabled)
-+		retval = efi_set_rtc_mmss(nowtime);
-+	else
-+		retval = mach_set_rtc_mmss(nowtime);
-+	spin_unlock_irq(&rtc_lock);
-+
-+	return retval;
-+}
-+#else
-+static int set_rtc_mmss(unsigned long nowtime)
-+{
-+	return 0;
-+}
-+#endif
-+
-+/* monotonic_clock(): returns # of nanoseconds passed since time_init()
-+ *		Note: This function is required to return accurate
-+ *		time even in the absence of multiple timer ticks.
-+ */
-+unsigned long long monotonic_clock(void)
-+{
-+	int cpu = get_cpu();
-+	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+	u64 time;
-+	u32 local_time_version;
-+
-+	do {
-+		local_time_version = shadow->version;
-+		smp_rmb();
-+		time = shadow->system_timestamp + get_nsec_offset(shadow);
-+		if (!time_values_up_to_date(cpu))
-+			get_time_values_from_xen();
-+		smp_rmb();
-+	} while (local_time_version != shadow->version);
-+
-+	put_cpu();
-+
-+	return time;
-+}
-+EXPORT_SYMBOL(monotonic_clock);
-+
-+unsigned long long sched_clock(void)
-+{
-+	return monotonic_clock();
-+}
-+
-+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-+unsigned long profile_pc(struct pt_regs *regs)
-+{
-+	unsigned long pc = instruction_pointer(regs);
-+
-+	if (in_lock_functions(pc))
-+		return *(unsigned long *)(regs->ebp + 4);
-+
-+	return pc;
-+}
-+EXPORT_SYMBOL(profile_pc);
-+#endif
-+
-+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+	s64 delta, delta_cpu;
-+	int i, cpu = smp_processor_id();
-+	struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
-+
-+	write_seqlock(&xtime_lock);
-+
-+	do {
-+		get_time_values_from_xen();
-+
-+		delta = delta_cpu = 
-+			shadow->system_timestamp + get_nsec_offset(shadow);
-+		delta     -= processed_system_time;
-+		delta_cpu -= per_cpu(processed_system_time, cpu);
-+	}
-+	while (!time_values_up_to_date(cpu));
-+
-+	if ((unlikely(delta < -1000000LL) || unlikely(delta_cpu < 0))
-+	    && printk_ratelimit()) {
-+		printk("Timer ISR/%d: Time went backwards: "
-+		       "delta=%lld cpu_delta=%lld shadow=%lld "
-+		       "off=%lld processed=%lld cpu_processed=%lld\n",
-+		       cpu, delta, delta_cpu, shadow->system_timestamp,
-+		       (s64)get_nsec_offset(shadow),
-+		       processed_system_time,
-+		       per_cpu(processed_system_time, cpu));
-+		for (i = 0; i < num_online_cpus(); i++)
-+			printk(" %d: %lld\n", i,
-+			       per_cpu(processed_system_time, i));
-+	}
-+
-+	/* System-wide jiffy work. */
-+	while (delta >= NS_PER_TICK) {
-+		delta -= NS_PER_TICK;
-+		processed_system_time += NS_PER_TICK;
-+		do_timer(regs);
-+	}
-+
-+	if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
-+		update_wallclock();
-+		clock_was_set();
-+	}
-+
-+	write_sequnlock(&xtime_lock);
-+
-+	/*
-+         * Local CPU jiffy work. No need to hold xtime_lock, and I'm not sure
-+         * if there is risk of deadlock if we do (since update_process_times
-+         * may do scheduler rebalancing work and thus acquire runqueue locks).
-+         */
-+	while (delta_cpu >= NS_PER_TICK) {
-+		delta_cpu -= NS_PER_TICK;
-+		per_cpu(processed_system_time, cpu) += NS_PER_TICK;
-+		update_process_times(user_mode(regs));
-+		profile_tick(CPU_PROFILING, regs);
-+	}
-+
-+	return IRQ_HANDLED;
-+}
-+
-+/* not static: needed by APM */
-+unsigned long get_cmos_time(void)
-+{
-+	unsigned long retval;
-+
-+	spin_lock(&rtc_lock);
-+
-+	if (efi_enabled)
-+		retval = efi_get_time();
-+	else
-+		retval = mach_get_cmos_time();
-+
-+	spin_unlock(&rtc_lock);
-+
-+	return retval;
-+}
-+static void sync_cmos_clock(unsigned long dummy);
-+
-+static struct timer_list sync_cmos_timer =
-+                                      TIMER_INITIALIZER(sync_cmos_clock, 0, 0);
-+
-+static void sync_cmos_clock(unsigned long dummy)
-+{
-+	struct timeval now, next;
-+	int fail = 1;
-+
-+	/*
-+	 * If we have an externally synchronized Linux clock, then update
-+	 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
-+	 * called as close as possible to 500 ms before the new second starts.
-+	 * This code is run on a timer.  If the clock is set, that timer
-+	 * may not expire at the correct time.  Thus, we adjust...
-+	 */
-+	if ((time_status & STA_UNSYNC) != 0)
-+		/*
-+		 * Not synced, exit, do not restart a timer (if one is
-+		 * running, let it run out).
-+		 */
-+		return;
-+
-+	do_gettimeofday(&now);
-+	if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
-+	    now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
-+		fail = set_rtc_mmss(now.tv_sec);
-+
-+	next.tv_usec = USEC_AFTER - now.tv_usec;
-+	if (next.tv_usec <= 0)
-+		next.tv_usec += USEC_PER_SEC;
-+
-+	if (!fail)
-+		next.tv_sec = 659;
-+	else
-+		next.tv_sec = 0;
-+
-+	if (next.tv_usec >= USEC_PER_SEC) {
-+		next.tv_sec++;
-+		next.tv_usec -= USEC_PER_SEC;
-+	}
-+	mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
-+}
-+
-+void notify_arch_cmos_timer(void)
-+{
-+	mod_timer(&sync_cmos_timer, jiffies + 1);
-+}
-+
-+static long clock_cmos_diff, sleep_start;
-+
-+static int timer_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+	/*
-+	 * Estimate time zone so that set_time can update the clock
-+	 */
-+	clock_cmos_diff = -get_cmos_time();
-+	clock_cmos_diff += get_seconds();
-+	sleep_start = get_cmos_time();
-+	return 0;
-+}
-+
-+static int timer_resume(struct sys_device *dev)
-+{
-+	unsigned long flags;
-+	unsigned long sec;
-+	unsigned long sleep_length;
-+
-+#ifdef CONFIG_HPET_TIMER
-+	if (is_hpet_enabled())
-+		hpet_reenable();
-+#endif
-+	sec = get_cmos_time() + clock_cmos_diff;
-+	sleep_length = (get_cmos_time() - sleep_start) * HZ;
-+	write_seqlock_irqsave(&xtime_lock, flags);
-+	xtime.tv_sec = sec;
-+	xtime.tv_nsec = 0;
-+	write_sequnlock_irqrestore(&xtime_lock, flags);
-+	jiffies += sleep_length;
-+	wall_jiffies += sleep_length;
-+	return 0;
-+}
-+
-+static struct sysdev_class timer_sysclass = {
-+	.resume = timer_resume,
-+	.suspend = timer_suspend,
-+	set_kset_name("timer"),
-+};
-+
-+
-+/* XXX this driverfs stuff should probably go elsewhere later -john */
-+static struct sys_device device_timer = {
-+	.id	= 0,
-+	.cls	= &timer_sysclass,
-+};
-+
-+static int time_init_device(void)
-+{
-+	int error = sysdev_class_register(&timer_sysclass);
-+	if (!error)
-+		error = sysdev_register(&device_timer);
-+	return error;
-+}
-+
-+device_initcall(time_init_device);
-+
-+#ifdef CONFIG_HPET_TIMER
-+extern void (*late_time_init)(void);
-+/* Duplicate of time_init() below, with hpet_enable part added */
-+static void __init hpet_time_init(void)
-+{
-+	xtime.tv_sec = get_cmos_time();
-+	xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
-+	set_normalized_timespec(&wall_to_monotonic,
-+		-xtime.tv_sec, -xtime.tv_nsec);
-+
-+	if ((hpet_enable() >= 0) && hpet_use_timer) {
-+		printk("Using HPET for base-timer\n");
-+	}
-+
-+	cur_timer = select_timer();
-+	printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
-+
-+	time_init_hook();
-+}
-+#endif
-+
-+/* Dynamically-mapped IRQ. */
-+DEFINE_PER_CPU(int, timer_irq);
-+
-+extern void (*late_time_init)(void);
-+static void setup_cpu0_timer_irq(void)
-+{
-+	per_cpu(timer_irq, 0) =
-+		bind_virq_to_irqhandler(
-+			VIRQ_TIMER,
-+			0,
-+			timer_interrupt,
-+			SA_INTERRUPT,
-+			"timer0",
-+			NULL);
-+	BUG_ON(per_cpu(timer_irq, 0) < 0);
-+}
-+
-+void __init time_init(void)
-+{
-+#ifdef CONFIG_HPET_TIMER
-+	if (is_hpet_capable()) {
-+		/*
-+		 * HPET initialization needs to do memory-mapped io. So, let
-+		 * us do a late initialization after mem_init().
-+		 */
-+		late_time_init = hpet_time_init;
-+		return;
-+	}
-+#endif
-+	get_time_values_from_xen();
-+
-+	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
-+	per_cpu(processed_system_time, 0) = processed_system_time;
-+
-+	update_wallclock();
-+
-+	init_cpu_khz();
-+	printk(KERN_INFO "Xen reported: %lu.%03lu MHz processor.\n",
-+	       cpu_khz / 1000, cpu_khz % 1000);
-+
-+#if defined(__x86_64__)
-+	vxtime.mode = VXTIME_TSC;
-+	vxtime.quot = (1000000L << 32) / vxtime_hz;
-+	vxtime.tsc_quot = (1000L << 32) / cpu_khz;
-+	vxtime.hz = vxtime_hz;
-+	sync_core();
-+	rdtscll(vxtime.last_tsc);
-+#endif
-+
-+	/* Cannot request_irq() until kmem is initialised. */
-+	late_time_init = setup_cpu0_timer_irq;
-+}
-+
-+/* Convert jiffies to system time. */
-+static inline u64 jiffies_to_st(unsigned long j) 
-+{
-+	unsigned long seq;
-+	long delta;
-+	u64 st;
-+
-+	do {
-+		seq = read_seqbegin(&xtime_lock);
-+		delta = j - jiffies;
-+		/* NB. The next check can trigger in some wrap-around cases,
-+		 * but that's ok: we'll just end up with a shorter timeout. */
-+		if (delta < 1) 
-+			delta = 1;
-+		st = processed_system_time + (delta * (u64)NS_PER_TICK);
-+	} while (read_seqretry(&xtime_lock, seq));
-+
-+	return st;
-+}
-+
-+/*
-+ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
-+ * These functions are based on implementations from arch/s390/kernel/time.c
-+ */
-+void stop_hz_timer(void)
-+{
-+	unsigned int cpu = smp_processor_id();
-+	unsigned long j;
-+	
-+	/* s390 does this /before/ checking rcu_pending(). We copy them. */
-+	cpu_set(cpu, nohz_cpu_mask);
-+
-+	/* Leave ourselves in 'tick mode' if rcu or softirq pending. */
-+	if (rcu_pending(cpu) || local_softirq_pending()) {
-+		cpu_clear(cpu, nohz_cpu_mask);
-+		j = jiffies + 1;
-+	} else {
-+		j = next_timer_interrupt();
-+	}
-+
-+	BUG_ON(HYPERVISOR_set_timer_op(jiffies_to_st(j)) != 0);
-+}
-+
-+void start_hz_timer(void)
-+{
-+	cpu_clear(smp_processor_id(), nohz_cpu_mask);
-+}
-+
-+/* No locking required. We are only CPU running, and interrupts are off. */
-+void time_resume(void)
-+{
-+	init_cpu_khz();
-+
-+	get_time_values_from_xen();
-+
-+	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
-+	per_cpu(processed_system_time, 0) = processed_system_time;
-+
-+	update_wallclock();
-+}
-+
-+#ifdef CONFIG_SMP
-+static char timer_name[NR_CPUS][15];
-+
-+void local_setup_timer(unsigned int cpu)
-+{
-+	int seq;
-+
-+	BUG_ON(cpu == 0);
-+
-+	do {
-+		seq = read_seqbegin(&xtime_lock);
-+		/* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
-+		per_cpu(processed_system_time, cpu) = 
-+			per_cpu(shadow_time, 0).system_timestamp;
-+	} while (read_seqretry(&xtime_lock, seq));
-+
-+	sprintf(timer_name[cpu], "timer%d", cpu);
-+	per_cpu(timer_irq, cpu) =
-+		bind_virq_to_irqhandler(
-+			VIRQ_TIMER,
-+			cpu,
-+			timer_interrupt,
-+			SA_INTERRUPT,
-+			timer_name[cpu],
-+			NULL);
-+	BUG_ON(per_cpu(timer_irq, cpu) < 0);
-+}
-+
-+void local_teardown_timer(unsigned int cpu)
-+{
-+	BUG_ON(cpu == 0);
-+	unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
-+}
-+#endif
-+
-+/*
-+ * /proc/sys/xen: This really belongs in another file. It can stay here for
-+ * now however.
-+ */
-+static ctl_table xen_subtable[] = {
-+	{1, "independent_wallclock", &independent_wallclock,
-+	 sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
-+	{0}
-+};
-+static ctl_table xen_table[] = {
-+	{123, "xen", NULL, 0, 0555, xen_subtable},
-+	{0}
-+};
-+static int __init xen_sysctl_init(void)
-+{
-+	(void)register_sysctl_table(xen_table, 0);
-+	return 0;
-+}
-+__initcall(xen_sysctl_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/traps.c linux-2.6.12-xen/arch/xen/i386/kernel/traps.c
---- pristine-linux-2.6.12/arch/xen/i386/kernel/traps.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/traps.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1029 @@
-+/*
-+ *  linux/arch/i386/traps.c
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *
-+ *  Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
-+ */
-+
-+/*
-+ * 'Traps.c' handles hardware traps and faults after we have saved some
-+ * state in 'asm.s'.
-+ */
-+#include <linux/config.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/timer.h>
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/interrupt.h>
-+#include <linux/highmem.h>
-+#include <linux/kallsyms.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/kprobes.h>
-+
-+#ifdef CONFIG_EISA
-+#include <linux/ioport.h>
-+#include <linux/eisa.h>
-+#endif
-+
-+#ifdef CONFIG_MCA
-+#include <linux/mca.h>
-+#endif
-+
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/debugreg.h>
-+#include <asm/desc.h>
-+#include <asm/i387.h>
-+#include <asm/nmi.h>
-+
-+#include <asm/smp.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/kdebug.h>
-+
-+#include <linux/irq.h>
-+#include <linux/module.h>
-+
-+#include "mach_traps.h"
-+
-+asmlinkage int system_call(void);
-+
-+/* Do we ignore FPU interrupts ? */
-+char ignore_fpu_irq = 0;
-+
-+/*
-+ * The IDT has to be page-aligned to simplify the Pentium
-+ * F0 0F bug workaround.. We have a special link segment
-+ * for this.
-+ */
-+struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
-+
-+asmlinkage void divide_error(void);
-+asmlinkage void debug(void);
-+asmlinkage void nmi(void);
-+asmlinkage void int3(void);
-+asmlinkage void overflow(void);
-+asmlinkage void bounds(void);
-+asmlinkage void invalid_op(void);
-+asmlinkage void device_not_available(void);
-+asmlinkage void coprocessor_segment_overrun(void);
-+asmlinkage void invalid_TSS(void);
-+asmlinkage void segment_not_present(void);
-+asmlinkage void stack_segment(void);
-+asmlinkage void general_protection(void);
-+asmlinkage void page_fault(void);
-+asmlinkage void coprocessor_error(void);
-+asmlinkage void simd_coprocessor_error(void);
-+asmlinkage void alignment_check(void);
-+asmlinkage void fixup_4gb_segment(void);
-+asmlinkage void machine_check(void);
-+
-+static int kstack_depth_to_print = 24;
-+struct notifier_block *i386die_chain;
-+static DEFINE_SPINLOCK(die_notifier_lock);
-+
-+int register_die_notifier(struct notifier_block *nb)
-+{
-+	int err = 0;
-+	unsigned long flags;
-+	spin_lock_irqsave(&die_notifier_lock, flags);
-+	err = notifier_chain_register(&i386die_chain, nb);
-+	spin_unlock_irqrestore(&die_notifier_lock, flags);
-+	return err;
-+}
-+
-+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
-+{
-+	return	p > (void *)tinfo &&
-+		p < (void *)tinfo + THREAD_SIZE - 3;
-+}
-+
-+static inline unsigned long print_context_stack(struct thread_info *tinfo,
-+				unsigned long *stack, unsigned long ebp)
-+{
-+	unsigned long addr;
-+
-+#ifdef	CONFIG_FRAME_POINTER
-+	while (valid_stack_ptr(tinfo, (void *)ebp)) {
-+		addr = *(unsigned long *)(ebp + 4);
-+		printk(" [<%08lx>] ", addr);
-+		print_symbol("%s", addr);
-+		printk("\n");
-+		ebp = *(unsigned long *)ebp;
-+	}
-+#else
-+	while (valid_stack_ptr(tinfo, stack)) {
-+		addr = *stack++;
-+		if (__kernel_text_address(addr)) {
-+			printk(" [<%08lx>]", addr);
-+			print_symbol(" %s", addr);
-+			printk("\n");
-+		}
-+	}
-+#endif
-+	return ebp;
-+}
-+
-+void show_trace(struct task_struct *task, unsigned long * stack)
-+{
-+	unsigned long ebp;
-+
-+	if (!task)
-+		task = current;
-+
-+	if (task == current) {
-+		/* Grab ebp right from our regs */
-+		asm ("movl %%ebp, %0" : "=r" (ebp) : );
-+	} else {
-+		/* ebp is the last reg pushed by switch_to */
-+		ebp = *(unsigned long *) task->thread.esp;
-+	}
-+
-+	while (1) {
-+		struct thread_info *context;
-+		context = (struct thread_info *)
-+			((unsigned long)stack & (~(THREAD_SIZE - 1)));
-+		ebp = print_context_stack(context, stack, ebp);
-+		stack = (unsigned long*)context->previous_esp;
-+		if (!stack)
-+			break;
-+		printk(" =======================\n");
-+	}
-+}
-+
-+void show_stack(struct task_struct *task, unsigned long *esp)
-+{
-+	unsigned long *stack;
-+	int i;
-+
-+	if (esp == NULL) {
-+		if (task)
-+			esp = (unsigned long*)task->thread.esp;
-+		else
-+			esp = (unsigned long *)&esp;
-+	}
-+
-+	stack = esp;
-+	for(i = 0; i < kstack_depth_to_print; i++) {
-+		if (kstack_end(stack))
-+			break;
-+		if (i && ((i % 8) == 0))
-+			printk("\n       ");
-+		printk("%08lx ", *stack++);
-+	}
-+	printk("\nCall Trace:\n");
-+	show_trace(task, esp);
-+}
-+
-+/*
-+ * The architecture-independent dump_stack generator
-+ */
-+void dump_stack(void)
-+{
-+	unsigned long stack;
-+
-+	show_trace(current, &stack);
-+}
-+
-+EXPORT_SYMBOL(dump_stack);
-+
-+void show_registers(struct pt_regs *regs)
-+{
-+	int i;
-+	int in_kernel = 1;
-+	unsigned long esp;
-+	unsigned short ss;
-+
-+	esp = (unsigned long) (&regs->esp);
-+	ss = __KERNEL_DS;
-+	if (regs->xcs & 2) {
-+		in_kernel = 0;
-+		esp = regs->esp;
-+		ss = regs->xss & 0xffff;
-+	}
-+	print_modules();
-+	printk("CPU:    %d\nEIP:    %04x:[<%08lx>]    %s VLI\nEFLAGS: %08lx"
-+			"   (%s) \n",
-+		smp_processor_id(), 0xffff & regs->xcs, regs->eip,
-+		print_tainted(), regs->eflags, system_utsname.release);
-+	print_symbol("EIP is at %s\n", regs->eip);
-+	printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
-+		regs->eax, regs->ebx, regs->ecx, regs->edx);
-+	printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
-+		regs->esi, regs->edi, regs->ebp, esp);
-+	printk("ds: %04x   es: %04x   ss: %04x\n",
-+		regs->xds & 0xffff, regs->xes & 0xffff, ss);
-+	printk("Process %s (pid: %d, threadinfo=%p task=%p)",
-+		current->comm, current->pid, current_thread_info(), current);
-+	/*
-+	 * When in-kernel, we also print out the stack and code at the
-+	 * time of the fault..
-+	 */
-+	if (in_kernel) {
-+		u8 *eip;
-+
-+		printk("\nStack: ");
-+		show_stack(NULL, (unsigned long*)esp);
-+
-+		printk("Code: ");
-+
-+		eip = (u8 *)regs->eip - 43;
-+		for (i = 0; i < 64; i++, eip++) {
-+			unsigned char c;
-+
-+			if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) {
-+				printk(" Bad EIP value.");
-+				break;
-+			}
-+			if (eip == (u8 *)regs->eip)
-+				printk("<%02x> ", c);
-+			else
-+				printk("%02x ", c);
-+		}
-+	}
-+	printk("\n");
-+}	
-+
-+static void handle_BUG(struct pt_regs *regs)
-+{
-+	unsigned short ud2;
-+	unsigned short line;
-+	char *file;
-+	char c;
-+	unsigned long eip;
-+
-+	if (regs->xcs & 2)
-+		goto no_bug;		/* Not in kernel */
-+
-+	eip = regs->eip;
-+
-+	if (eip < PAGE_OFFSET)
-+		goto no_bug;
-+	if (__get_user(ud2, (unsigned short *)eip))
-+		goto no_bug;
-+	if (ud2 != 0x0b0f)
-+		goto no_bug;
-+	if (__get_user(line, (unsigned short *)(eip + 2)))
-+		goto bug;
-+	if (__get_user(file, (char **)(eip + 4)) ||
-+		(unsigned long)file < PAGE_OFFSET || __get_user(c, file))
-+		file = "<bad filename>";
-+
-+	printk("------------[ cut here ]------------\n");
-+	printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
-+
-+no_bug:
-+	return;
-+
-+	/* Here we know it was a BUG but file-n-line is unavailable */
-+bug:
-+	printk("Kernel BUG\n");
-+}
-+
-+void die(const char * str, struct pt_regs * regs, long err)
-+{
-+	static struct {
-+		spinlock_t lock;
-+		u32 lock_owner;
-+		int lock_owner_depth;
-+	} die = {
-+		.lock =			SPIN_LOCK_UNLOCKED,
-+		.lock_owner =		-1,
-+		.lock_owner_depth =	0
-+	};
-+	static int die_counter;
-+
-+	if (die.lock_owner != _smp_processor_id()) {
-+		console_verbose();
-+		spin_lock_irq(&die.lock);
-+		die.lock_owner = smp_processor_id();
-+		die.lock_owner_depth = 0;
-+		bust_spinlocks(1);
-+	}
-+
-+	if (++die.lock_owner_depth < 3) {
-+		int nl = 0;
-+		handle_BUG(regs);
-+		printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-+#ifdef CONFIG_PREEMPT
-+		printk("PREEMPT ");
-+		nl = 1;
-+#endif
-+#ifdef CONFIG_SMP
-+		printk("SMP ");
-+		nl = 1;
-+#endif
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+		printk("DEBUG_PAGEALLOC");
-+		nl = 1;
-+#endif
-+		if (nl)
-+			printk("\n");
-+	notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
-+		show_registers(regs);
-+  	} else
-+		printk(KERN_ERR "Recursive die() failure, output suppressed\n");
-+
-+	bust_spinlocks(0);
-+	die.lock_owner = -1;
-+	spin_unlock_irq(&die.lock);
-+	if (in_interrupt())
-+		panic("Fatal exception in interrupt");
-+
-+	if (panic_on_oops) {
-+		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
-+		ssleep(5);
-+		panic("Fatal exception");
-+	}
-+	do_exit(SIGSEGV);
-+}
-+
-+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-+{
-+	if (!(regs->eflags & VM_MASK) && !(2 & regs->xcs))
-+		die(str, regs, err);
-+}
-+
-+static void do_trap(int trapnr, int signr, char *str, int vm86,
-+			   struct pt_regs * regs, long error_code, siginfo_t *info)
-+{
-+	if (regs->eflags & VM_MASK) {
-+		if (vm86)
-+			goto vm86_trap;
-+		goto trap_signal;
-+	}
-+
-+	if (!(regs->xcs & 2))
-+		goto kernel_trap;
-+
-+	trap_signal: {
-+		struct task_struct *tsk = current;
-+		tsk->thread.error_code = error_code;
-+		tsk->thread.trap_no = trapnr;
-+		if (info)
-+			force_sig_info(signr, info, tsk);
-+		else
-+			force_sig(signr, tsk);
-+		return;
-+	}
-+
-+	kernel_trap: {
-+		if (!fixup_exception(regs))
-+			die(str, regs, error_code);
-+		return;
-+	}
-+
-+	vm86_trap: {
-+		int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
-+		if (ret) goto trap_signal;
-+		return;
-+	}
-+}
-+
-+#define DO_ERROR(trapnr, signr, str, name) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
-+}
-+
-+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	siginfo_t info; \
-+	info.si_signo = signr; \
-+	info.si_errno = 0; \
-+	info.si_code = sicode; \
-+	info.si_addr = (void __user *)siaddr; \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
-+}
-+
-+#define DO_VM86_ERROR(trapnr, signr, str, name) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
-+}
-+
-+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+fastcall void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	siginfo_t info; \
-+	info.si_signo = signr; \
-+	info.si_errno = 0; \
-+	info.si_code = sicode; \
-+	info.si_addr = (void __user *)siaddr; \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+						== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
-+}
-+
-+DO_VM86_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->eip)
-+#ifndef CONFIG_KPROBES
-+DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
-+#endif
-+DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
-+DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
-+DO_ERROR_INFO( 6, SIGILL,  "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
-+DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
-+DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
-+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-+DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
-+DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
-+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-+#ifdef CONFIG_X86_MCE
-+DO_ERROR(18, SIGBUS, "machine check", machine_check)
-+#endif
-+DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
-+
-+fastcall void do_general_protection(struct pt_regs * regs, long error_code)
-+{
-+	/*
-+	 * If we trapped on an LDT access then ensure that the default_ldt is
-+	 * loaded, if nothing else. We load default_ldt lazily because LDT
-+	 * switching costs time and many applications don't need it.
-+	 */
-+	if (unlikely((error_code & 6) == 4)) {
-+		unsigned long ldt;
-+		__asm__ __volatile__ ("sldt %0" : "=r" (ldt));
-+		if (ldt == 0) {
-+			xen_set_ldt((unsigned long)&default_ldt[0], 5);
-+			return;
-+		}
-+	}
-+
-+	if (regs->eflags & VM_MASK)
-+		goto gp_in_vm86;
-+
-+	if (!(regs->xcs & 2))
-+		goto gp_in_kernel;
-+
-+	current->thread.error_code = error_code;
-+	current->thread.trap_no = 13;
-+	force_sig(SIGSEGV, current);
-+	return;
-+
-+gp_in_vm86:
-+	local_irq_enable();
-+	handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
-+	return;
-+
-+gp_in_kernel:
-+	if (!fixup_exception(regs)) {
-+		if (notify_die(DIE_GPF, "general protection fault", regs,
-+				error_code, 13, SIGSEGV) == NOTIFY_STOP)
-+			return;
-+		die("general protection fault", regs, error_code);
-+	}
-+}
-+
-+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
-+{
-+	printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
-+	printk("You probably have a hardware problem with your RAM chips\n");
-+
-+	/* Clear and disable the memory parity error line. */
-+	clear_mem_error(reason);
-+}
-+
-+static void io_check_error(unsigned char reason, struct pt_regs * regs)
-+{
-+	printk("NMI: IOCK error (debug interrupt?)\n");
-+	show_registers(regs);
-+
-+	/* Re-enable the IOCK line, wait for a few seconds */
-+	clear_io_check_error(reason);
-+}
-+
-+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-+{
-+#ifdef CONFIG_MCA
-+	/* Might actually be able to figure out what the guilty party
-+	* is. */
-+	if( MCA_bus ) {
-+		mca_handle_nmi();
-+		return;
-+	}
-+#endif
-+	printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-+		reason, smp_processor_id());
-+	printk("Dazed and confused, but trying to continue\n");
-+	printk("Do you have a strange power saving mode enabled?\n");
-+}
-+
-+static DEFINE_SPINLOCK(nmi_print_lock);
-+
-+void die_nmi (struct pt_regs *regs, const char *msg)
-+{
-+	spin_lock(&nmi_print_lock);
-+	/*
-+	* We are in trouble anyway, lets at least try
-+	* to get a message out.
-+	*/
-+	bust_spinlocks(1);
-+	printk(msg);
-+	printk(" on CPU%d, eip %08lx, registers:\n",
-+		smp_processor_id(), regs->eip);
-+	show_registers(regs);
-+	printk("console shuts up ...\n");
-+	console_silent();
-+	spin_unlock(&nmi_print_lock);
-+	bust_spinlocks(0);
-+	do_exit(SIGSEGV);
-+}
-+
-+static void default_do_nmi(struct pt_regs * regs)
-+{
-+	unsigned char reason = 0;
-+
-+	/* Only the BSP gets external NMIs from the system.  */
-+	if (!smp_processor_id())
-+		reason = get_nmi_reason();
-+ 
-+	if (!(reason & 0xc0)) {
-+		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
-+							== NOTIFY_STOP)
-+			return;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		/*
-+		 * Ok, so this is none of the documented NMI sources,
-+		 * so it must be the NMI watchdog.
-+		 */
-+		if (nmi_watchdog) {
-+			nmi_watchdog_tick(regs);
-+			return;
-+		}
-+#endif
-+		unknown_nmi_error(reason, regs);
-+		return;
-+	}
-+	if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
-+		return;
-+	if (reason & 0x80)
-+		mem_parity_error(reason, regs);
-+	if (reason & 0x40)
-+		io_check_error(reason, regs);
-+	/*
-+	 * Reassert NMI in case it became active meanwhile
-+	 * as it's edge-triggered.
-+	 */
-+	reassert_nmi();
-+}
-+
-+static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
-+{
-+	return 0;
-+}
-+ 
-+static nmi_callback_t nmi_callback = dummy_nmi_callback;
-+ 
-+fastcall void do_nmi(struct pt_regs * regs, long error_code)
-+{
-+	int cpu;
-+
-+	nmi_enter();
-+
-+	cpu = smp_processor_id();
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+	if (!cpu_online(cpu)) {
-+		nmi_exit();
-+		return;
-+	}
-+#endif
-+
-+	++nmi_count(cpu);
-+
-+	if (!nmi_callback(regs, cpu))
-+		default_do_nmi(regs);
-+
-+	nmi_exit();
-+}
-+
-+void set_nmi_callback(nmi_callback_t callback)
-+{
-+	nmi_callback = callback;
-+}
-+
-+void unset_nmi_callback(void)
-+{
-+	nmi_callback = dummy_nmi_callback;
-+}
-+
-+#ifdef CONFIG_KPROBES
-+fastcall void do_int3(struct pt_regs *regs, long error_code)
-+{
-+	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
-+			== NOTIFY_STOP)
-+		return;
-+	/* This is an interrupt gate, because kprobes wants interrupts
-+	disabled.  Normal trap handlers don't. */
-+	restore_interrupts(regs);
-+	do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
-+}
-+#endif
-+
-+/*
-+ * Our handling of the processor debug registers is non-trivial.
-+ * We do not clear them on entry and exit from the kernel. Therefore
-+ * it is possible to get a watchpoint trap here from inside the kernel.
-+ * However, the code in ./ptrace.c has ensured that the user can
-+ * only set watchpoints on userspace addresses. Therefore the in-kernel
-+ * watchpoint trap can only occur in code which is reading/writing
-+ * from user space. Such code must not hold kernel locks (since it
-+ * can equally take a page fault), therefore it is safe to call
-+ * force_sig_info even though that claims and releases locks.
-+ * 
-+ * Code in ./signal.c ensures that the debug control register
-+ * is restored before we deliver any signal, and therefore that
-+ * user code runs with the correct debug control register even though
-+ * we clear it here.
-+ *
-+ * Being careful here means that we don't have to be as careful in a
-+ * lot of more complicated places (task switching can be a bit lazy
-+ * about restoring all the debug state, and ptrace doesn't have to
-+ * find every occurrence of the TF bit that could be saved away even
-+ * by user code)
-+ */
-+fastcall void do_debug(struct pt_regs * regs, long error_code)
-+{
-+	unsigned int condition;
-+	struct task_struct *tsk = current;
-+
-+	condition = HYPERVISOR_get_debugreg(6);
-+
-+	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
-+					SIGTRAP) == NOTIFY_STOP)
-+		return;
-+	/* It's safe to allow irq's after DR6 has been saved */
-+	if (regs->eflags & X86_EFLAGS_IF)
-+		local_irq_enable();
-+
-+	/* Mask out spurious debug traps due to lazy DR7 setting */
-+	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
-+		if (!tsk->thread.debugreg[7])
-+			goto clear_dr7;
-+	}
-+
-+	if (regs->eflags & VM_MASK)
-+		goto debug_vm86;
-+
-+	/* Save debug status register where ptrace can see it */
-+	tsk->thread.debugreg[6] = condition;
-+
-+	/*
-+	 * Single-stepping through TF: make sure we ignore any events in
-+	 * kernel space (but re-enable TF when returning to user mode).
-+	 */
-+	if (condition & DR_STEP) {
-+		/*
-+		 * We already checked v86 mode above, so we can
-+		 * check for kernel mode by just checking the CPL
-+		 * of CS.
-+		 */
-+		if ((regs->xcs & 2) == 0)
-+			goto clear_TF_reenable;
-+	}
-+
-+	/* Ok, finally something we can handle */
-+	send_sigtrap(tsk, regs, error_code);
-+
-+	/* Disable additional traps. They'll be re-enabled when
-+	 * the signal is delivered.
-+	 */
-+clear_dr7:
-+	HYPERVISOR_set_debugreg(7, 0);
-+	return;
-+
-+debug_vm86:
-+	handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
-+	return;
-+
-+clear_TF_reenable:
-+	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-+	regs->eflags &= ~TF_MASK;
-+	return;
-+}
-+
-+/*
-+ * Note that we play around with the 'TS' bit in an attempt to get
-+ * the correct behaviour even in the presence of the asynchronous
-+ * IRQ13 behaviour
-+ */
-+void math_error(void __user *eip)
-+{
-+	struct task_struct * task;
-+	siginfo_t info;
-+	unsigned short cwd, swd;
-+
-+	/*
-+	 * Save the info for the exception handler and clear the error.
-+	 */
-+	task = current;
-+	save_init_fpu(task);
-+	task->thread.trap_no = 16;
-+	task->thread.error_code = 0;
-+	info.si_signo = SIGFPE;
-+	info.si_errno = 0;
-+	info.si_code = __SI_FAULT;
-+	info.si_addr = eip;
-+	/*
-+	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
-+	 * status.  0x3f is the exception bits in these regs, 0x200 is the
-+	 * C1 reg you need in case of a stack fault, 0x040 is the stack
-+	 * fault bit.  We should only be taking one exception at a time,
-+	 * so if this combination doesn't produce any single exception,
-+	 * then we have a bad program that isn't syncronizing its FPU usage
-+	 * and it will suffer the consequences since we won't be able to
-+	 * fully reproduce the context of the exception
-+	 */
-+	cwd = get_fpu_cwd(task);
-+	swd = get_fpu_swd(task);
-+	switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
-+		case 0x000:
-+		default:
-+			break;
-+		case 0x001: /* Invalid Op */
-+		case 0x041: /* Stack Fault */
-+		case 0x241: /* Stack Fault | Direction */
-+			info.si_code = FPE_FLTINV;
-+			/* Should we clear the SF or let user space do it ???? */
-+			break;
-+		case 0x002: /* Denormalize */
-+		case 0x010: /* Underflow */
-+			info.si_code = FPE_FLTUND;
-+			break;
-+		case 0x004: /* Zero Divide */
-+			info.si_code = FPE_FLTDIV;
-+			break;
-+		case 0x008: /* Overflow */
-+			info.si_code = FPE_FLTOVF;
-+			break;
-+		case 0x020: /* Precision */
-+			info.si_code = FPE_FLTRES;
-+			break;
-+	}
-+	force_sig_info(SIGFPE, &info, task);
-+}
-+
-+fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
-+{
-+	ignore_fpu_irq = 1;
-+	math_error((void __user *)regs->eip);
-+}
-+
-+static void simd_math_error(void __user *eip)
-+{
-+	struct task_struct * task;
-+	siginfo_t info;
-+	unsigned short mxcsr;
-+
-+	/*
-+	 * Save the info for the exception handler and clear the error.
-+	 */
-+	task = current;
-+	save_init_fpu(task);
-+	task->thread.trap_no = 19;
-+	task->thread.error_code = 0;
-+	info.si_signo = SIGFPE;
-+	info.si_errno = 0;
-+	info.si_code = __SI_FAULT;
-+	info.si_addr = eip;
-+	/*
-+	 * The SIMD FPU exceptions are handled a little differently, as there
-+	 * is only a single status/control register.  Thus, to determine which
-+	 * unmasked exception was caught we must mask the exception mask bits
-+	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-+	 */
-+	mxcsr = get_fpu_mxcsr(task);
-+	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-+		case 0x000:
-+		default:
-+			break;
-+		case 0x001: /* Invalid Op */
-+			info.si_code = FPE_FLTINV;
-+			break;
-+		case 0x002: /* Denormalize */
-+		case 0x010: /* Underflow */
-+			info.si_code = FPE_FLTUND;
-+			break;
-+		case 0x004: /* Zero Divide */
-+			info.si_code = FPE_FLTDIV;
-+			break;
-+		case 0x008: /* Overflow */
-+			info.si_code = FPE_FLTOVF;
-+			break;
-+		case 0x020: /* Precision */
-+			info.si_code = FPE_FLTRES;
-+			break;
-+	}
-+	force_sig_info(SIGFPE, &info, task);
-+}
-+
-+fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
-+					  long error_code)
-+{
-+	if (cpu_has_xmm) {
-+		/* Handle SIMD FPU exceptions on PIII+ processors. */
-+		ignore_fpu_irq = 1;
-+		simd_math_error((void __user *)regs->eip);
-+	} else {
-+		/*
-+		 * Handle strange cache flush from user space exception
-+		 * in all other cases.  This is undocumented behaviour.
-+		 */
-+		if (regs->eflags & VM_MASK) {
-+			handle_vm86_fault((struct kernel_vm86_regs *)regs,
-+					  error_code);
-+			return;
-+		}
-+		die_if_kernel("cache flush denied", regs, error_code);
-+		current->thread.trap_no = 19;
-+		current->thread.error_code = error_code;
-+		force_sig(SIGSEGV, current);
-+	}
-+}
-+
-+#ifndef CONFIG_XEN
-+fastcall void setup_x86_bogus_stack(unsigned char * stk)
-+{
-+	unsigned long *switch16_ptr, *switch32_ptr;
-+	struct pt_regs *regs;
-+	unsigned long stack_top, stack_bot;
-+	unsigned short iret_frame16_off;
-+	int cpu = smp_processor_id();
-+	/* reserve the space on 32bit stack for the magic switch16 pointer */
-+	memmove(stk, stk + 8, sizeof(struct pt_regs));
-+	switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
-+	regs = (struct pt_regs *)stk;
-+	/* now the switch32 on 16bit stack */
-+	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-+	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
-+	switch32_ptr = (unsigned long *)(stack_top - 8);
-+	iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
-+	/* copy iret frame on 16bit stack */
-+	memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
-+	/* fill in the switch pointers */
-+	switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
-+	switch16_ptr[1] = __ESPFIX_SS;
-+	switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
-+		8 - CPU_16BIT_STACK_SIZE;
-+	switch32_ptr[1] = __KERNEL_DS;
-+}
-+
-+fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
-+{
-+	unsigned long *switch32_ptr;
-+	unsigned char *stack16, *stack32;
-+	unsigned long stack_top, stack_bot;
-+	int len;
-+	int cpu = smp_processor_id();
-+	stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-+	stack_top = stack_bot +	CPU_16BIT_STACK_SIZE;
-+	switch32_ptr = (unsigned long *)(stack_top - 8);
-+	/* copy the data from 16bit stack to 32bit stack */
-+	len = CPU_16BIT_STACK_SIZE - 8 - sp;
-+	stack16 = (unsigned char *)(stack_bot + sp);
-+	stack32 = (unsigned char *)
-+		(switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
-+	memcpy(stack32, stack16, len);
-+	return stack32;
-+}
-+#endif
-+
-+/*
-+ *  'math_state_restore()' saves the current math information in the
-+ * old math state array, and gets the new ones from the current task
-+ *
-+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
-+ * Don't touch unless you *really* know how it works.
-+ *
-+ * Must be called with kernel preemption disabled (in this case,
-+ * local interrupts are disabled at the call-site in entry.S).
-+ */
-+asmlinkage void math_state_restore(struct pt_regs regs)
-+{
-+	struct thread_info *thread = current_thread_info();
-+	struct task_struct *tsk = thread->task;
-+
-+	/* NB. 'clts' is done for us by Xen during virtual trap. */
-+	if (!tsk_used_math(tsk))
-+		init_fpu(tsk);
-+	restore_fpu(tsk);
-+	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
-+}
-+
-+#ifndef CONFIG_MATH_EMULATION
-+
-+asmlinkage void math_emulate(long arg)
-+{
-+	printk("math-emulation not enabled and no coprocessor found.\n");
-+	printk("killing %s.\n",current->comm);
-+	force_sig(SIGFPE,current);
-+	schedule();
-+}
-+
-+#endif /* CONFIG_MATH_EMULATION */
-+
-+#ifdef CONFIG_X86_F00F_BUG
-+void __init trap_init_f00f_bug(void)
-+{
-+	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
-+
-+	/*
-+	 * Update the IDT descriptor and reload the IDT so that
-+	 * it uses the read-only mapped virtual address.
-+	 */
-+	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
-+	__asm__ __volatile__("lidt %0" : : "m" (idt_descr));
-+}
-+#endif
-+
-+
-+/*
-+ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
-+ * for those that specify <dpl>|4 in the second field.
-+ */
-+static trap_info_t trap_table[] = {
-+	{  0, 0, __KERNEL_CS, (unsigned long)divide_error		},
-+	{  1, 0|4, __KERNEL_CS, (unsigned long)debug			},
-+	{  3, 3|4, __KERNEL_CS, (unsigned long)int3			},
-+	{  4, 3, __KERNEL_CS, (unsigned long)overflow			},
-+	{  5, 3, __KERNEL_CS, (unsigned long)bounds			},
-+	{  6, 0, __KERNEL_CS, (unsigned long)invalid_op			},
-+	{  7, 0|4, __KERNEL_CS, (unsigned long)device_not_available	},
-+	{  9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
-+	{ 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS		},
-+	{ 11, 0, __KERNEL_CS, (unsigned long)segment_not_present	},
-+	{ 12, 0, __KERNEL_CS, (unsigned long)stack_segment		},
-+	{ 13, 0, __KERNEL_CS, (unsigned long)general_protection		},
-+	{ 14, 0|4, __KERNEL_CS, (unsigned long)page_fault		},
-+	{ 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment		},
-+	{ 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error		},
-+	{ 17, 0, __KERNEL_CS, (unsigned long)alignment_check		},
-+#ifdef CONFIG_X86_MCE
-+	{ 18, 0, __KERNEL_CS, (unsigned long)machine_check		},
-+#endif
-+	{ 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error	},
-+	{ SYSCALL_VECTOR,  3, __KERNEL_CS, (unsigned long)system_call	},
-+	{  0, 0,	   0, 0						}
-+};
-+
-+void __init trap_init(void)
-+{
-+	HYPERVISOR_set_trap_table(trap_table);
-+
-+	/*
-+	 * default LDT is a single-entry callgate to lcall7 for iBCS
-+	 * and a callgate to lcall27 for Solaris/x86 binaries
-+	 */
-+	make_lowmem_page_readonly(
-+		&default_ldt[0], XENFEAT_writable_descriptor_tables);
-+
-+	/*
-+	 * Should be a barrier for any external CPU state.
-+	 */
-+	cpu_init();
-+}
-+
-+void smp_trap_init(trap_info_t *trap_ctxt)
-+{
-+	trap_info_t *t = trap_table;
-+	int i;
-+
-+	for (i = 0; i < 256; i++) {
-+		trap_ctxt[i].vector = i;
-+		trap_ctxt[i].cs     = FLAT_KERNEL_CS;
-+	}
-+
-+	for (t = trap_table; t->address; t++) {
-+		trap_ctxt[t->vector].flags = t->flags;
-+		trap_ctxt[t->vector].cs = t->cs;
-+		trap_ctxt[t->vector].address = t->address;
-+	}
-+}
-+
-+static int __init kstack_setup(char *s)
-+{
-+	kstack_depth_to_print = simple_strtoul(s, NULL, 0);
-+	return 0;
-+}
-+__setup("kstack=", kstack_setup);
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/vsyscall-note.S linux-2.6.12-xen/arch/xen/i386/kernel/vsyscall-note.S
---- pristine-linux-2.6.12/arch/xen/i386/kernel/vsyscall-note.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/vsyscall-note.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,32 @@
-+/*
-+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
-+ * Here we can supply some information useful to userland.
-+ * First we get the vanilla i386 note that supplies the kernel version info.
-+ */
-+
-+#include "../../../i386/kernel/vsyscall-note.S"
-+
-+/*
-+ * Now we add a special note telling glibc's dynamic linker a fake hardware
-+ * flavor that it will use to choose the search path for libraries in the
-+ * same way it uses real hardware capabilities like "mmx".
-+ * We supply "nosegneg" as the fake capability, to indicate that we
-+ * do not like negative offsets in instructions using segment overrides,
-+ * since we implement those inefficiently.  This makes it possible to
-+ * install libraries optimized to avoid those access patterns in someplace
-+ * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
-+ * corresponding to the bits here is needed to make ldconfig work right.
-+ * It should contain:
-+ *	hwcap 0 nosegneg
-+ * to match the mapping of bit to name that we give here.
-+ */
-+#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
-+	ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
-+	.long ncaps, mask
-+#define NOTE_KERNELCAP(bit, name) \
-+	.byte bit; .asciz name
-+#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
-+
-+NOTE_KERNELCAP_BEGIN(1, 1)
-+NOTE_KERNELCAP(1, "nosegneg")  /* Change 1 back to 0 when glibc is fixed! */
-+NOTE_KERNELCAP_END
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/kernel/vsyscall.S linux-2.6.12-xen/arch/xen/i386/kernel/vsyscall.S
---- pristine-linux-2.6.12/arch/xen/i386/kernel/vsyscall.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/kernel/vsyscall.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,15 @@
-+#include <linux/init.h>
-+
-+__INITDATA
-+
-+	.globl vsyscall_int80_start, vsyscall_int80_end
-+vsyscall_int80_start:
-+	.incbin "arch/xen/i386/kernel/vsyscall-int80.so"
-+vsyscall_int80_end:
-+
-+	.globl vsyscall_sysenter_start, vsyscall_sysenter_end
-+vsyscall_sysenter_start:
-+	.incbin "arch/xen/i386/kernel/vsyscall-sysenter.so"
-+vsyscall_sysenter_end:
-+
-+__FINIT
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mach-default/Makefile linux-2.6.12-xen/arch/xen/i386/mach-default/Makefile
---- pristine-linux-2.6.12/arch/xen/i386/mach-default/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/mach-default/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,12 @@
-+#
-+# Makefile for the linux kernel.
-+#
-+
-+c-obj-y				:= topology.o
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y)):
-+	@ln -fsn $(srctree)/arch/i386/mach-default/$(notdir $@) $@
-+
-+obj-y	+= $(c-obj-y)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-))
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/Makefile linux-2.6.12-xen/arch/xen/i386/Makefile
---- pristine-linux-2.6.12/arch/xen/i386/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,108 @@
-+#
-+# i386/Makefile
-+#
-+# This file is included by the global makefile so that you can add your own
-+# architecture-specific flags and dependencies. Remember to do have actions
-+# for "archclean" cleaning up for this architecture.
-+#
-+# This file is subject to the terms and conditions of the GNU General Public
-+# License.  See the file "COPYING" in the main directory of this archive
-+# for more details.
-+#
-+# Copyright (C) 1994 by Linus Torvalds
-+#
-+# 19990713  Artur Skawina <skawina at geocities.com>
-+#           Added '-march' and '-mpreferred-stack-boundary' support
-+#
-+# 20050320  Kianusch Sayah Karadji <kianusch at sk-tech.net>
-+#           Added support for GEODE CPU
-+
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+LDFLAGS		:= -m elf_i386
-+LDFLAGS_vmlinux :=
-+CHECK		:= $(CHECK) -D__i386__=1
-+
-+CFLAGS += -m32
-+AFLAGS += -m32
-+
-+CFLAGS += -pipe -msoft-float
-+
-+# prevent gcc from keeping the stack 16 byte aligned
-+CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2,)
-+
-+align := $(cc-option-align)
-+cflags-$(CONFIG_M386)		+= -march=i386
-+cflags-$(CONFIG_M486)		+= -march=i486
-+cflags-$(CONFIG_M586)		+= -march=i586
-+cflags-$(CONFIG_M586TSC)	+= -march=i586
-+cflags-$(CONFIG_M586MMX)	+= $(call cc-option,-march=pentium-mmx,-march=i586)
-+cflags-$(CONFIG_M686)		+= -march=i686
-+cflags-$(CONFIG_MPENTIUMII)	+= -march=i686 $(call cc-option,-mtune=pentium2)
-+cflags-$(CONFIG_MPENTIUMIII)	+= -march=i686 $(call cc-option,-mtune=pentium3)
-+cflags-$(CONFIG_MPENTIUMM)	+= -march=i686 $(call cc-option,-mtune=pentium3)
-+cflags-$(CONFIG_MPENTIUM4)	+= -march=i686 $(call cc-option,-mtune=pentium4)
-+cflags-$(CONFIG_MK6)		+= -march=k6
-+# Please note, that patches that add -march=athlon-xp and friends are pointless.
-+# They make zero difference whatsosever to performance at this time.
-+cflags-$(CONFIG_MK7)		+= $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)
-+cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4))
-+cflags-$(CONFIG_MCRUSOE)	+= -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
-+cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call cc-option,-mtune=pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
-+cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
-+cflags-$(CONFIG_MWINCHIP2)	+= $(call cc-option,-march=winchip2,-march=i586)
-+cflags-$(CONFIG_MWINCHIP3D)	+= $(call cc-option,-march=winchip2,-march=i586)
-+cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
-+cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
-+
-+# AMD Elan support
-+cflags-$(CONFIG_X86_ELAN)	+= -march=i486
-+
-+# Geode GX1 support
-+cflags-$(CONFIG_MGEODEGX1)		+= $(call cc-option,-march=pentium-mmx,-march=i486)
-+
-+# -mregparm=3 works ok on gcc-3.0 and later
-+#
-+GCC_VERSION			:= $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
-+cflags-$(CONFIG_REGPARM) 	+= $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
-+
-+# Disable unit-at-a-time mode, it makes gcc use a lot more stack
-+# due to the lack of sharing of stacklots.
-+CFLAGS += $(call cc-option,-fno-unit-at-a-time,)
-+
-+CFLAGS += $(cflags-y)
-+
-+head-y := arch/xen/i386/kernel/head.o arch/xen/i386/kernel/init_task.o
-+
-+libs-y 					+= arch/i386/lib/
-+core-y					+= arch/xen/i386/kernel/ \
-+					   arch/xen/i386/mm/ \
-+					   arch/xen/i386/mach-default/ \
-+					   arch/i386/crypto/
-+# \
-+#					   arch/xen/$(mcore-y)/
-+drivers-$(CONFIG_MATH_EMULATION)	+= arch/i386/math-emu/
-+drivers-$(CONFIG_PCI)			+= arch/xen/i386/pci/
-+# must be linked after kernel/
-+drivers-$(CONFIG_OPROFILE)		+= arch/i386/oprofile/
-+drivers-$(CONFIG_PM)			+= arch/i386/power/
-+
-+# for clean
-+obj-	+= kernel/ mm/ pci/
-+#obj-	+= ../../i386/lib/ ../../i386/mm/ 
-+#../../i386/$(mcore-y)/
-+#obj-	+= ../../i386/pci/ ../../i386/oprofile/ ../../i386/power/
-+
-+xenflags-y += -Iinclude/asm-xen/asm-i386/mach-xen \
-+		-Iinclude/asm-i386/mach-default
-+CFLAGS += $(xenflags-y)
-+AFLAGS += $(xenflags-y)
-+
-+prepare: include/asm-$(XENARCH)/asm_offsets.h
-+CLEAN_FILES += include/asm-$(XENARCH)/asm_offsets.h
-+
-+arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
-+	include/linux/version.h include/config/MARKER
-+
-+include/asm-$(XENARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
-+	$(call filechk,gen-asm-offsets)
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/fault.c linux-2.6.12-xen/arch/xen/i386/mm/fault.c
---- pristine-linux-2.6.12/arch/xen/i386/mm/fault.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/mm/fault.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,620 @@
-+/*
-+ *  linux/arch/i386/mm/fault.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ */
-+
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/tty.h>
-+#include <linux/vt_kern.h>		/* For unblank_screen() */
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
-+
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/desc.h>
-+#include <asm/kdebug.h>
-+
-+extern void die(const char *,struct pt_regs *,long);
-+
-+DEFINE_PER_CPU(pgd_t *, cur_pgd);
-+
-+/*
-+ * Unlock any spinlocks which will prevent us from getting the
-+ * message out 
-+ */
-+void bust_spinlocks(int yes)
-+{
-+	int loglevel_save = console_loglevel;
-+
-+	if (yes) {
-+		oops_in_progress = 1;
-+		return;
-+	}
-+#ifdef CONFIG_VT
-+	unblank_screen();
-+#endif
-+	oops_in_progress = 0;
-+	/*
-+	 * OK, the message is on the console.  Now we call printk()
-+	 * without oops_in_progress set so that printk will give klogd
-+	 * a poke.  Hold onto your hats...
-+	 */
-+	console_loglevel = 15;		/* NMI oopser may have shut the console up */
-+	printk(" ");
-+	console_loglevel = loglevel_save;
-+}
-+
-+/*
-+ * Return EIP plus the CS segment base.  The segment limit is also
-+ * adjusted, clamped to the kernel/user address space (whichever is
-+ * appropriate), and returned in *eip_limit.
-+ *
-+ * The segment is checked, because it might have been changed by another
-+ * task between the original faulting instruction and here.
-+ *
-+ * If CS is no longer a valid code segment, or if EIP is beyond the
-+ * limit, or if it is a kernel address when CS is not a kernel segment,
-+ * then the returned value will be greater than *eip_limit.
-+ * 
-+ * This is slow, but is very rarely executed.
-+ */
-+static inline unsigned long get_segment_eip(struct pt_regs *regs,
-+					    unsigned long *eip_limit)
-+{
-+	unsigned long eip = regs->eip;
-+	unsigned seg = regs->xcs & 0xffff;
-+	u32 seg_ar, seg_limit, base, *desc;
-+
-+	/* The standard kernel/user address space limit. */
-+	*eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
-+
-+	/* Unlikely, but must come before segment checks. */
-+	if (unlikely((regs->eflags & VM_MASK) != 0))
-+		return eip + (seg << 4);
-+	
-+	/* By far the most common cases. */
-+	if (likely(seg == __USER_CS || seg == __KERNEL_CS))
-+		return eip;
-+
-+	/* Check the segment exists, is within the current LDT/GDT size,
-+	   that kernel/user (ring 0..3) has the appropriate privilege,
-+	   that it's a code segment, and get the limit. */
-+	__asm__ ("larl %3,%0; lsll %3,%1"
-+		 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
-+	if ((~seg_ar & 0x9800) || eip > seg_limit) {
-+		*eip_limit = 0;
-+		return 1;	 /* So that returned eip > *eip_limit. */
-+	}
-+
-+	/* Get the GDT/LDT descriptor base. 
-+	   When you look for races in this code remember that
-+	   LDT and other horrors are only used in user space. */
-+	if (seg & (1<<2)) {
-+		/* Must lock the LDT while reading it. */
-+		down(&current->mm->context.sem);
-+		desc = current->mm->context.ldt;
-+		desc = (void *)desc + (seg & ~7);
-+	} else {
-+		/* Must disable preemption while reading the GDT. */
-+		desc = (u32 *)get_cpu_gdt_table(get_cpu());
-+		desc = (void *)desc + (seg & ~7);
-+	}
-+
-+	/* Decode the code segment base from the descriptor */
-+	base = get_desc_base((unsigned long *)desc);
-+
-+	if (seg & (1<<2)) { 
-+		up(&current->mm->context.sem);
-+	} else
-+		put_cpu();
-+
-+	/* Adjust EIP and segment limit, and clamp at the kernel limit.
-+	   It's legitimate for segments to wrap at 0xffffffff. */
-+	seg_limit += base;
-+	if (seg_limit < *eip_limit && seg_limit >= base)
-+		*eip_limit = seg_limit;
-+	return eip + base;
-+}
-+
-+/* 
-+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
-+ * Check that here and ignore it.
-+ */
-+static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
-+{ 
-+	unsigned long limit;
-+	unsigned long instr = get_segment_eip (regs, &limit);
-+	int scan_more = 1;
-+	int prefetch = 0; 
-+	int i;
-+
-+	for (i = 0; scan_more && i < 15; i++) { 
-+		unsigned char opcode;
-+		unsigned char instr_hi;
-+		unsigned char instr_lo;
-+
-+		if (instr > limit)
-+			break;
-+		if (__get_user(opcode, (unsigned char *) instr))
-+			break; 
-+
-+		instr_hi = opcode & 0xf0; 
-+		instr_lo = opcode & 0x0f; 
-+		instr++;
-+
-+		switch (instr_hi) { 
-+		case 0x20:
-+		case 0x30:
-+			/* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
-+			scan_more = ((instr_lo & 7) == 0x6);
-+			break;
-+			
-+		case 0x60:
-+			/* 0x64 thru 0x67 are valid prefixes in all modes. */
-+			scan_more = (instr_lo & 0xC) == 0x4;
-+			break;		
-+		case 0xF0:
-+			/* 0xF0, 0xF2, and 0xF3 are valid prefixes */
-+			scan_more = !instr_lo || (instr_lo>>1) == 1;
-+			break;			
-+		case 0x00:
-+			/* Prefetch instruction is 0x0F0D or 0x0F18 */
-+			scan_more = 0;
-+			if (instr > limit)
-+				break;
-+			if (__get_user(opcode, (unsigned char *) instr)) 
-+				break;
-+			prefetch = (instr_lo == 0xF) &&
-+				(opcode == 0x0D || opcode == 0x18);
-+			break;			
-+		default:
-+			scan_more = 0;
-+			break;
-+		} 
-+	}
-+	return prefetch;
-+}
-+
-+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
-+			      unsigned long error_code)
-+{
-+	if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-+		     boot_cpu_data.x86 >= 6)) {
-+		/* Catch an obscure case of prefetch inside an NX page. */
-+		if (nx_enabled && (error_code & 16))
-+			return 0;
-+		return __is_prefetch(regs, addr);
-+	}
-+	return 0;
-+} 
-+
-+fastcall void do_invalid_op(struct pt_regs *, unsigned long);
-+
-+#ifdef CONFIG_X86_PAE
-+static void dump_fault_path(unsigned long address)
-+{
-+	unsigned long *p, page;
-+	unsigned long mfn; 
-+
-+	preempt_disable();
-+	page = __pa(per_cpu(cur_pgd, smp_processor_id()));
-+	preempt_enable();
-+
-+	p  = (unsigned long *)__va(page);
-+	p += (address >> 30) * 2;
-+	printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
-+	if (p[0] & 1) {
-+		mfn  = (p[0] >> PAGE_SHIFT) | ((p[1] & 0x7) << 20); 
-+		page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
-+		p  = (unsigned long *)__va(page);
-+		address &= 0x3fffffff;
-+		p += (address >> 21) * 2;
-+		printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n", 
-+		       page, p[1], p[0]);
-+#ifndef CONFIG_HIGHPTE
-+		if (p[0] & 1) {
-+			mfn  = (p[0] >> PAGE_SHIFT) | ((p[1] & 0x7) << 20); 
-+			page = mfn_to_pfn(mfn) << PAGE_SHIFT; 
-+			p  = (unsigned long *) __va(page);
-+			address &= 0x001fffff;
-+			p += (address >> 12) * 2;
-+			printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
-+			       page, p[1], p[0]);
-+		}
-+#endif
-+	}
-+}
-+#else
-+static void dump_fault_path(unsigned long address)
-+{
-+	unsigned long page;
-+
-+	preempt_disable();
-+	page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
-+	    [address >> 22];
-+	preempt_enable();
-+
-+	page = ((unsigned long *) per_cpu(cur_pgd, get_cpu()))
-+	    [address >> 22];
-+	printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
-+	       machine_to_phys(page));
-+	/*
-+	 * We must not directly access the pte in the highpte
-+	 * case, the page table might be allocated in highmem.
-+	 * And lets rather not kmap-atomic the pte, just in case
-+	 * it's allocated already.
-+	 */
-+#ifndef CONFIG_HIGHPTE
-+	if (page & 1) {
-+		page &= PAGE_MASK;
-+		address &= 0x003ff000;
-+		page = machine_to_phys(page);
-+		page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
-+		printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
-+		       machine_to_phys(page));
-+	}
-+#endif
-+}
-+#endif
-+
-+
-+/*
-+ * This routine handles page faults.  It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
-+ *
-+ * error_code:
-+ *	bit 0 == 0 means no page found, 1 means protection fault
-+ *	bit 1 == 0 means read, 1 means write
-+ *	bit 2 == 0 means kernel, 1 means user-mode
-+ */
-+fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code)
-+{
-+	struct task_struct *tsk;
-+	struct mm_struct *mm;
-+	struct vm_area_struct * vma;
-+	unsigned long address;
-+	int write;
-+	siginfo_t info;
-+
-+	address = HYPERVISOR_shared_info->vcpu_info[
-+		smp_processor_id()].arch.cr2;
-+
-+	/* Set the "privileged fault" bit to something sane. */
-+	error_code &= ~4;
-+	error_code |= (regs->xcs & 2) << 1;
-+	if (regs->eflags & X86_EFLAGS_VM)
-+		error_code |= 4;
-+
-+	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-+					SIGSEGV) == NOTIFY_STOP)
-+		return;
-+	/* It's safe to allow irq's after cr2 has been saved */
-+	if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
-+		local_irq_enable();
-+
-+	tsk = current;
-+
-+	info.si_code = SEGV_MAPERR;
-+
-+	/*
-+	 * We fault-in kernel-space virtual memory on-demand. The
-+	 * 'reference' page table is init_mm.pgd.
-+	 *
-+	 * NOTE! We MUST NOT take any locks for this case. We may
-+	 * be in an interrupt or a critical region, and should
-+	 * only copy the information from the master page table,
-+	 * nothing more.
-+	 *
-+	 * This verifies that the fault happens in kernel space
-+	 * (error_code & 4) == 0, and that the fault was not a
-+	 * protection error (error_code & 1) == 0.
-+	 */
-+	if (unlikely(address >= TASK_SIZE)) { 
-+		if (!(error_code & 5))
-+			goto vmalloc_fault;
-+		/* 
-+		 * Don't take the mm semaphore here. If we fixup a prefetch
-+		 * fault we could otherwise deadlock.
-+		 */
-+		goto bad_area_nosemaphore;
-+	} 
-+
-+	mm = tsk->mm;
-+
-+	/*
-+	 * If we're in an interrupt, have no user context or are running in an
-+	 * atomic region then we must not take the fault..
-+	 */
-+	if (in_atomic() || !mm)
-+		goto bad_area_nosemaphore;
-+
-+	/* When running in the kernel we expect faults to occur only to
-+	 * addresses in user space.  All other faults represent errors in the
-+	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
-+	 * erroneous fault occuring in a code path which already holds mmap_sem
-+	 * we will deadlock attempting to validate the fault against the
-+	 * address space.  Luckily the kernel only validly references user
-+	 * space from well defined areas of code, which are listed in the
-+	 * exceptions table.
-+	 *
-+	 * As the vast majority of faults will be valid we will only perform
-+	 * the source reference check when there is a possibilty of a deadlock.
-+	 * Attempt to lock the address space, if we cannot we then validate the
-+	 * source.  If this is invalid we can skip the address space check,
-+	 * thus avoiding the deadlock.
-+	 */
-+	if (!down_read_trylock(&mm->mmap_sem)) {
-+		if ((error_code & 4) == 0 &&
-+		    !search_exception_tables(regs->eip))
-+			goto bad_area_nosemaphore;
-+		down_read(&mm->mmap_sem);
-+	}
-+
-+	vma = find_vma(mm, address);
-+	if (!vma)
-+		goto bad_area;
-+	if (vma->vm_start <= address)
-+		goto good_area;
-+	if (!(vma->vm_flags & VM_GROWSDOWN))
-+		goto bad_area;
-+	if (error_code & 4) {
-+		/*
-+		 * accessing the stack below %esp is always a bug.
-+		 * The "+ 32" is there due to some instructions (like
-+		 * pusha) doing post-decrement on the stack and that
-+		 * doesn't show up until later..
-+		 */
-+		if (address + 32 < regs->esp)
-+			goto bad_area;
-+	}
-+	if (expand_stack(vma, address))
-+		goto bad_area;
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+	info.si_code = SEGV_ACCERR;
-+	write = 0;
-+	switch (error_code & 3) {
-+		default:	/* 3: write, present */
-+#ifdef TEST_VERIFY_AREA
-+			if (regs->cs == KERNEL_CS)
-+				printk("WP fault at %08lx\n", regs->eip);
-+#endif
-+			/* fall through */
-+		case 2:		/* write, not present */
-+			if (!(vma->vm_flags & VM_WRITE))
-+				goto bad_area;
-+			write++;
-+			break;
-+		case 1:		/* read, present */
-+			goto bad_area;
-+		case 0:		/* read, not present */
-+			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
-+				goto bad_area;
-+	}
-+
-+ survive:
-+	/*
-+	 * If for any reason at all we couldn't handle the fault,
-+	 * make sure we exit gracefully rather than endlessly redo
-+	 * the fault.
-+	 */
-+	switch (handle_mm_fault(mm, vma, address, write)) {
-+		case VM_FAULT_MINOR:
-+			tsk->min_flt++;
-+			break;
-+		case VM_FAULT_MAJOR:
-+			tsk->maj_flt++;
-+			break;
-+		case VM_FAULT_SIGBUS:
-+			goto do_sigbus;
-+		case VM_FAULT_OOM:
-+			goto out_of_memory;
-+		default:
-+			BUG();
-+	}
-+
-+	/*
-+	 * Did it hit the DOS screen memory VA from vm86 mode?
-+	 */
-+	if (regs->eflags & VM_MASK) {
-+		unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
-+		if (bit < 32)
-+			tsk->thread.screen_bitmap |= 1 << bit;
-+	}
-+	up_read(&mm->mmap_sem);
-+	return;
-+
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+	up_read(&mm->mmap_sem);
-+
-+bad_area_nosemaphore:
-+	/* User mode accesses just cause a SIGSEGV */
-+	if (error_code & 4) {
-+		/* 
-+		 * Valid to do another page fault here because this one came 
-+		 * from user space.
-+		 */
-+		if (is_prefetch(regs, address, error_code))
-+			return;
-+
-+		tsk->thread.cr2 = address;
-+		/* Kernel addresses are always protection faults */
-+		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-+		tsk->thread.trap_no = 14;
-+		info.si_signo = SIGSEGV;
-+		info.si_errno = 0;
-+		/* info.si_code has been set above */
-+		info.si_addr = (void __user *)address;
-+		force_sig_info(SIGSEGV, &info, tsk);
-+		return;
-+	}
-+
-+#ifdef CONFIG_X86_F00F_BUG
-+	/*
-+	 * Pentium F0 0F C7 C8 bug workaround.
-+	 */
-+	if (boot_cpu_data.f00f_bug) {
-+		unsigned long nr;
-+		
-+		nr = (address - idt_descr.address) >> 3;
-+
-+		if (nr == 6) {
-+			do_invalid_op(regs, 0);
-+			return;
-+		}
-+	}
-+#endif
-+
-+no_context:
-+	/* Are we prepared to handle this kernel fault?  */
-+	if (fixup_exception(regs))
-+		return;
-+
-+	/* 
-+	 * Valid to do another page fault here, because if this fault
-+	 * had been triggered by is_prefetch fixup_exception would have 
-+	 * handled it.
-+	 */
-+ 	if (is_prefetch(regs, address, error_code))
-+ 		return;
-+
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ */
-+
-+	bust_spinlocks(1);
-+
-+#ifdef CONFIG_X86_PAE
-+	if (error_code & 16) {
-+		pte_t *pte = lookup_address(address);
-+
-+		if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
-+			printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
-+	}
-+#endif
-+	if (address < PAGE_SIZE)
-+		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-+	else
-+		printk(KERN_ALERT "Unable to handle kernel paging request");
-+	printk(" at virtual address %08lx\n",address);
-+	printk(KERN_ALERT " printing eip:\n");
-+	printk("%08lx\n", regs->eip);
-+	dump_fault_path(address);
-+	die("Oops", regs, error_code);
-+	bust_spinlocks(0);
-+	do_exit(SIGKILL);
-+
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+	up_read(&mm->mmap_sem);
-+	if (tsk->pid == 1) {
-+		yield();
-+		down_read(&mm->mmap_sem);
-+		goto survive;
-+	}
-+	printk("VM: killing process %s\n", tsk->comm);
-+	if (error_code & 4)
-+		do_exit(SIGKILL);
-+	goto no_context;
-+
-+do_sigbus:
-+	up_read(&mm->mmap_sem);
-+
-+	/* Kernel mode? Handle exceptions or die */
-+	if (!(error_code & 4))
-+		goto no_context;
-+
-+	/* User space => ok to do another page fault */
-+	if (is_prefetch(regs, address, error_code))
-+		return;
-+
-+	tsk->thread.cr2 = address;
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = 14;
-+	info.si_signo = SIGBUS;
-+	info.si_errno = 0;
-+	info.si_code = BUS_ADRERR;
-+	info.si_addr = (void __user *)address;
-+	force_sig_info(SIGBUS, &info, tsk);
-+	return;
-+
-+vmalloc_fault:
-+	{
-+		/*
-+		 * Synchronize this task's top level page-table
-+		 * with the 'reference' page table.
-+		 *
-+		 * Do _not_ use "tsk" here. We might be inside
-+		 * an interrupt in the middle of a task switch..
-+		 */
-+		int index = pgd_index(address);
-+		pgd_t *pgd, *pgd_k;
-+		pud_t *pud, *pud_k;
-+		pmd_t *pmd, *pmd_k;
-+		pte_t *pte_k;
-+
-+		preempt_disable();
-+		pgd = index + per_cpu(cur_pgd, smp_processor_id());
-+		preempt_enable();
-+		pgd_k = init_mm.pgd + index;
-+
-+		if (!pgd_present(*pgd_k))
-+			goto no_context;
-+
-+		/*
-+		 * set_pgd(pgd, *pgd_k); here would be useless on PAE
-+		 * and redundant with the set_pmd() on non-PAE. As would
-+		 * set_pud.
-+		 */
-+
-+		pud = pud_offset(pgd, address);
-+		pud_k = pud_offset(pgd_k, address);
-+		if (!pud_present(*pud_k))
-+			goto no_context;
-+		
-+		pmd = pmd_offset(pud, address);
-+		pmd_k = pmd_offset(pud_k, address);
-+		if (!pmd_present(*pmd_k))
-+			goto no_context;
-+#ifndef CONFIG_XEN
-+		set_pmd(pmd, *pmd_k);
-+#else
-+		/*
-+		 * When running on Xen we must launder *pmd_k through
-+		 * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
-+		 */
-+		set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
-+#endif
-+
-+		pte_k = pte_offset_kernel(pmd_k, address);
-+		if (!pte_present(*pte_k))
-+			goto no_context;
-+		return;
-+	}
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/highmem.c linux-2.6.12-xen/arch/xen/i386/mm/highmem.c
---- pristine-linux-2.6.12/arch/xen/i386/mm/highmem.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/mm/highmem.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,99 @@
-+#include <linux/highmem.h>
-+
-+void *kmap(struct page *page)
-+{
-+	might_sleep();
-+	if (!PageHighMem(page))
-+		return page_address(page);
-+	return kmap_high(page);
-+}
-+
-+void kunmap(struct page *page)
-+{
-+	if (in_interrupt())
-+		BUG();
-+	if (!PageHighMem(page))
-+		return;
-+	kunmap_high(page);
-+}
-+
-+/*
-+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
-+ * no global lock is needed and because the kmap code must perform a global TLB
-+ * invalidation when the kmap pool wraps.
-+ *
-+ * However when holding an atomic kmap is is not legal to sleep, so atomic
-+ * kmaps are appropriate for short, tight code paths only.
-+ */
-+static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
-+{
-+	enum fixed_addresses idx;
-+	unsigned long vaddr;
-+
-+	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+	inc_preempt_count();
-+	if (!PageHighMem(page))
-+		return page_address(page);
-+
-+	idx = type + KM_TYPE_NR*smp_processor_id();
-+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+#ifdef CONFIG_DEBUG_HIGHMEM
-+	if (!pte_none(*(kmap_pte-idx)))
-+		BUG();
-+#endif
-+	set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
-+
-+	return (void*) vaddr;
-+}
-+
-+void *kmap_atomic(struct page *page, enum km_type type)
-+{
-+	return __kmap_atomic(page, type, kmap_prot);
-+}
-+
-+/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
-+void *kmap_atomic_pte(struct page *page, enum km_type type)
-+{
-+	return __kmap_atomic(page, type, PAGE_KERNEL_RO);
-+}
-+
-+void kunmap_atomic(void *kvaddr, enum km_type type)
-+{
-+#ifdef CONFIG_DEBUG_HIGHMEM
-+	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-+	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-+
-+	if (vaddr < FIXADDR_START) { // FIXME
-+		dec_preempt_count();
-+		preempt_check_resched();
-+		return;
-+	}
-+
-+	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
-+		BUG();
-+
-+	/*
-+	 * force other mappings to Oops if they'll try to access
-+	 * this pte without first remap it
-+	 */
-+	pte_clear(&init_mm, vaddr, kmap_pte-idx);
-+	__flush_tlb_one(vaddr);
-+#endif
-+
-+	dec_preempt_count();
-+	preempt_check_resched();
-+}
-+
-+struct page *kmap_atomic_to_page(void *ptr)
-+{
-+	unsigned long idx, vaddr = (unsigned long)ptr;
-+	pte_t *pte;
-+
-+	if (vaddr < FIXADDR_START)
-+		return virt_to_page(ptr);
-+
-+	idx = virt_to_fix(vaddr);
-+	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
-+	return pte_page(*pte);
-+}
-+
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/hypervisor.c linux-2.6.12-xen/arch/xen/i386/mm/hypervisor.c
---- pristine-linux-2.6.12/arch/xen/i386/mm/hypervisor.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/mm/hypervisor.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,459 @@
-+/******************************************************************************
-+ * mm/hypervisor.c
-+ * 
-+ * Update page tables via the hypervisor.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/vmalloc.h>
-+#include <asm/page.h>
-+#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/balloon.h>
-+#include <asm-xen/xen-public/memory.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
-+#include <asm/tlbflush.h>
-+
-+#ifdef CONFIG_X86_64
-+#define pmd_val_ma(v) (v).pmd
-+#else
-+#ifdef CONFIG_X86_PAE
-+# define pmd_val_ma(v) ((v).pmd)
-+# define pud_val_ma(v) ((v).pgd.pgd)
-+#else
-+# define pmd_val_ma(v) ((v).pud.pgd.pgd)
-+#endif
-+#endif
-+
-+#ifndef CONFIG_XEN_SHADOW_MODE
-+void xen_l1_entry_update(pte_t *ptr, pte_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = pte_val_ma(val);
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = pmd_val_ma(val);
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#ifdef CONFIG_X86_PAE
-+void xen_l3_entry_update(pud_t *ptr, pud_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = pud_val_ma(val);
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+#endif
-+
-+#ifdef CONFIG_X86_64
-+void xen_l3_entry_update(pud_t *ptr, pud_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = val.pud;
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
-+{
-+	mmu_update_t u;
-+	u.ptr = virt_to_machine(ptr);
-+	u.val = val.pgd;
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+#endif /* CONFIG_X86_64 */
-+#endif /* CONFIG_XEN_SHADOW_MODE */
-+
-+void xen_machphys_update(unsigned long mfn, unsigned long pfn)
-+{
-+	mmu_update_t u;
-+	u.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-+	u.val = pfn;
-+	BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pt_switch(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_NEW_BASEPTR;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_new_user_pt(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_NEW_USER_BASEPTR;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_tlb_flush(void)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_invlpg(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_INVLPG_LOCAL;
-+	op.arg1.linear_addr = ptr & PAGE_MASK;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#ifdef CONFIG_SMP
-+
-+void xen_tlb_flush_all(void)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_TLB_FLUSH_ALL;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_tlb_flush_mask(cpumask_t *mask)
-+{
-+	struct mmuext_op op;
-+	if ( cpus_empty(*mask) )
-+		return;
-+	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-+	op.arg2.vcpumask = mask->bits;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_invlpg_all(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_INVLPG_ALL;
-+	op.arg1.linear_addr = ptr & PAGE_MASK;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	if ( cpus_empty(*mask) )
-+		return;
-+	op.cmd = MMUEXT_INVLPG_MULTI;
-+	op.arg1.linear_addr = ptr & PAGE_MASK;
-+	op.arg2.vcpumask    = mask->bits;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#endif /* CONFIG_SMP */
-+
-+#ifndef CONFIG_XEN_SHADOW_MODE
-+void xen_pgd_pin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+#ifdef CONFIG_X86_64
-+	op.cmd = MMUEXT_PIN_L4_TABLE;
-+#elif defined(CONFIG_X86_PAE)
-+	op.cmd = MMUEXT_PIN_L3_TABLE;
-+#else
-+	op.cmd = MMUEXT_PIN_L2_TABLE;
-+#endif
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pgd_unpin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_UNPIN_TABLE;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pte_pin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_PIN_L1_TABLE;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pte_unpin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_UNPIN_TABLE;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+#ifdef CONFIG_X86_64
-+void xen_pud_pin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_PIN_L3_TABLE;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pud_unpin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_UNPIN_TABLE;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pmd_pin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_PIN_L2_TABLE;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+void xen_pmd_unpin(unsigned long ptr)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_UNPIN_TABLE;
-+	op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+#endif /* CONFIG_X86_64 */
-+#endif /* CONFIG_XEN_SHADOW_MODE */
-+
-+void xen_set_ldt(unsigned long ptr, unsigned long len)
-+{
-+	struct mmuext_op op;
-+	op.cmd = MMUEXT_SET_LDT;
-+	op.arg1.linear_addr = ptr;
-+	op.arg2.nr_ents     = len;
-+	BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
-+}
-+
-+/*
-+ * Bitmap is indexed by page number. If bit is set, the page is part of a
-+ * xen_create_contiguous_region() area of memory.
-+ */
-+unsigned long *contiguous_bitmap;
-+
-+static void contiguous_bitmap_set(
-+	unsigned long first_page, unsigned long nr_pages)
-+{
-+	unsigned long start_off, end_off, curr_idx, end_idx;
-+
-+	curr_idx  = first_page / BITS_PER_LONG;
-+	start_off = first_page & (BITS_PER_LONG-1);
-+	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
-+	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
-+
-+	if (curr_idx == end_idx) {
-+		contiguous_bitmap[curr_idx] |=
-+			((1UL<<end_off)-1) & -(1UL<<start_off);
-+	} else {
-+		contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
-+		while ( ++curr_idx < end_idx )
-+			contiguous_bitmap[curr_idx] = ~0UL;
-+		contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
-+	}
-+}
-+
-+static void contiguous_bitmap_clear(
-+	unsigned long first_page, unsigned long nr_pages)
-+{
-+	unsigned long start_off, end_off, curr_idx, end_idx;
-+
-+	curr_idx  = first_page / BITS_PER_LONG;
-+	start_off = first_page & (BITS_PER_LONG-1);
-+	end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
-+	end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
-+
-+	if (curr_idx == end_idx) {
-+		contiguous_bitmap[curr_idx] &=
-+			-(1UL<<end_off) | ((1UL<<start_off)-1);
-+	} else {
-+		contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
-+		while ( ++curr_idx != end_idx )
-+			contiguous_bitmap[curr_idx] = 0;
-+		contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
-+	}
-+}
-+
-+/* Ensure multi-page extents are contiguous in machine memory. */
-+int xen_create_contiguous_region(
-+	unsigned long vstart, unsigned int order, unsigned int address_bits)
-+{
-+	pgd_t         *pgd; 
-+	pud_t         *pud; 
-+	pmd_t         *pmd;
-+	pte_t         *pte;
-+	unsigned long  frame, i, flags;
-+	struct xen_memory_reservation reservation = {
-+		.extent_start = &frame,
-+		.nr_extents   = 1,
-+		.extent_order = 0,
-+		.domid        = DOMID_SELF
-+	};
-+
-+	scrub_pages(vstart, 1 << order);
-+
-+	balloon_lock(flags);
-+
-+	/* 1. Zap current PTEs, giving away the underlying pages. */
-+	for (i = 0; i < (1<<order); i++) {
-+		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
-+		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-+		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-+		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-+		frame = pte_mfn(*pte);
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
-+			INVALID_P2M_ENTRY);
-+		BUG_ON(HYPERVISOR_memory_op(
-+			XENMEM_decrease_reservation, &reservation) != 1);
-+	}
-+
-+	/* 2. Get a new contiguous memory extent. */
-+	reservation.extent_order = order;
-+	reservation.address_bits = address_bits;
-+	frame = __pa(vstart) >> PAGE_SHIFT;
-+	if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-+				 &reservation) != 1)
-+		goto fail;
-+
-+	/* 3. Map the new extent in place of old pages. */
-+	for (i = 0; i < (1<<order); i++) {
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			vstart + (i*PAGE_SIZE),
-+			pfn_pte_ma(frame+i, PAGE_KERNEL), 0));
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame+i);
-+	}
-+
-+	flush_tlb_all();
-+
-+	contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
-+
-+	balloon_unlock(flags);
-+
-+	return 0;
-+
-+ fail:
-+	reservation.extent_order = 0;
-+	reservation.address_bits = 0;
-+
-+	for (i = 0; i < (1<<order); i++) {
-+		frame = (__pa(vstart) >> PAGE_SHIFT) + i;
-+		BUG_ON(HYPERVISOR_memory_op(
-+			XENMEM_populate_physmap, &reservation) != 1);
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			vstart + (i*PAGE_SIZE),
-+			pfn_pte_ma(frame, PAGE_KERNEL), 0));
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
-+	}
-+
-+	flush_tlb_all();
-+
-+	balloon_unlock(flags);
-+
-+	return -ENOMEM;
-+}
-+
-+void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
-+{
-+	pgd_t         *pgd; 
-+	pud_t         *pud; 
-+	pmd_t         *pmd;
-+	pte_t         *pte;
-+	unsigned long  frame, i, flags;
-+	struct xen_memory_reservation reservation = {
-+		.extent_start = &frame,
-+		.nr_extents   = 1,
-+		.extent_order = 0,
-+		.domid        = DOMID_SELF
-+	};
-+
-+	scrub_pages(vstart, 1 << order);
-+
-+	balloon_lock(flags);
-+
-+	contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
-+
-+	/* 1. Zap current PTEs, giving away the underlying pages. */
-+	for (i = 0; i < (1<<order); i++) {
-+		pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
-+		pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-+		pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-+		pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-+		frame = pte_mfn(*pte);
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
-+			INVALID_P2M_ENTRY);
-+		BUG_ON(HYPERVISOR_memory_op(
-+			XENMEM_decrease_reservation, &reservation) != 1);
-+	}
-+
-+	/* 2. Map new pages in place of old pages. */
-+	for (i = 0; i < (1<<order); i++) {
-+		frame = (__pa(vstart) >> PAGE_SHIFT) + i;
-+		BUG_ON(HYPERVISOR_memory_op(
-+			XENMEM_populate_physmap, &reservation) != 1);
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			vstart + (i*PAGE_SIZE),
-+			pfn_pte_ma(frame, PAGE_KERNEL), 0));
-+		set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
-+	}
-+
-+	flush_tlb_all();
-+
-+	balloon_unlock(flags);
-+}
-+
-+#ifdef __i386__
-+int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
-+{
-+	__u32 *lp = (__u32 *)((char *)ldt + entry * 8);
-+	maddr_t mach_lp = arbitrary_virt_to_machine(lp);
-+	return HYPERVISOR_update_descriptor(
-+		mach_lp, (u64)entry_a | ((u64)entry_b<<32));
-+}
-+#endif
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/init.c linux-2.6.12-xen/arch/xen/i386/mm/init.c
---- pristine-linux-2.6.12/arch/xen/i386/mm/init.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/mm/init.c	2006-02-25 00:05:34.156176000 +0100
-@@ -0,0 +1,780 @@
-+/*
-+ *  linux/arch/i386/mm/init.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *
-+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/hugetlb.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/highmem.h>
-+#include <linux/pagemap.h>
-+#include <linux/bootmem.h>
-+#include <linux/slab.h>
-+#include <linux/proc_fs.h>
-+#include <linux/efi.h>
-+
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/dma.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/apic.h>
-+#include <asm/tlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm/sections.h>
-+#include <asm/hypervisor.h>
-+
-+extern unsigned long *contiguous_bitmap;
-+
-+#if defined(CONFIG_SWIOTLB)
-+extern void swiotlb_init(void);
-+int swiotlb;
-+EXPORT_SYMBOL(swiotlb);
-+#endif
-+
-+unsigned int __VMALLOC_RESERVE = 128 << 20;
-+
-+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-+unsigned long highstart_pfn, highend_pfn;
-+
-+static int noinline do_test_wp_bit(void);
-+
-+/*
-+ * Creates a middle page table and puts a pointer to it in the
-+ * given global directory entry. This only returns the gd entry
-+ * in non-PAE compilation mode, since the middle layer is folded.
-+ */
-+static pmd_t * __init one_md_table_init(pgd_t *pgd)
-+{
-+	pud_t *pud;
-+	pmd_t *pmd_table;
-+		
-+#ifdef CONFIG_X86_PAE
-+	pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-+	make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
-+	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-+	pud = pud_offset(pgd, 0);
-+	if (pmd_table != pmd_offset(pud, 0)) 
-+		BUG();
-+#else
-+	pud = pud_offset(pgd, 0);
-+	pmd_table = pmd_offset(pud, 0);
-+#endif
-+
-+	return pmd_table;
-+}
-+
-+/*
-+ * Create a page table and place a pointer to it in a middle page
-+ * directory entry.
-+ */
-+static pte_t * __init one_page_table_init(pmd_t *pmd)
-+{
-+	if (pmd_none(*pmd)) {
-+		pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-+		make_lowmem_page_readonly(page_table,
-+					  XENFEAT_writable_page_tables);
-+		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
-+		if (page_table != pte_offset_kernel(pmd, 0))
-+			BUG();	
-+
-+		return page_table;
-+	}
-+	
-+	return pte_offset_kernel(pmd, 0);
-+}
-+
-+/*
-+ * This function initializes a certain range of kernel virtual memory 
-+ * with new bootmem page tables, everywhere page tables are missing in
-+ * the given range.
-+ */
-+
-+/*
-+ * NOTE: The pagetables are allocated contiguous on the physical space 
-+ * so we can cache the place of the first one and move around without 
-+ * checking the pgd every time.
-+ */
-+static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	int pgd_idx, pmd_idx;
-+	unsigned long vaddr;
-+
-+	vaddr = start;
-+	pgd_idx = pgd_index(vaddr);
-+	pmd_idx = pmd_index(vaddr);
-+	pgd = pgd_base + pgd_idx;
-+
-+	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
-+		if (pgd_none(*pgd)) 
-+			one_md_table_init(pgd);
-+		pud = pud_offset(pgd, vaddr);
-+		pmd = pmd_offset(pud, vaddr);
-+		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
-+			if (vaddr < HYPERVISOR_VIRT_START && pmd_none(*pmd)) 
-+				one_page_table_init(pmd);
-+
-+			vaddr += PMD_SIZE;
-+		}
-+		pmd_idx = 0;
-+	}
-+}
-+
-+static inline int is_kernel_text(unsigned long addr)
-+{
-+	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
-+		return 1;
-+	return 0;
-+}
-+
-+/*
-+ * This maps the physical memory to kernel virtual address space, a total 
-+ * of max_low_pfn pages, by creating page tables starting from address 
-+ * PAGE_OFFSET.
-+ */
-+static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
-+{
-+	unsigned long pfn;
-+	pgd_t *pgd;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	int pgd_idx, pmd_idx, pte_ofs;
-+
-+	unsigned long max_ram_pfn = xen_start_info->nr_pages;
-+	if (max_ram_pfn > max_low_pfn)
-+		max_ram_pfn = max_low_pfn;
-+
-+	pgd_idx = pgd_index(PAGE_OFFSET);
-+	pgd = pgd_base + pgd_idx;
-+	pfn = 0;
-+	pmd_idx = pmd_index(PAGE_OFFSET);
-+	pte_ofs = pte_index(PAGE_OFFSET);
-+
-+	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
-+#ifdef CONFIG_XEN
-+		/*
-+		 * Native linux hasn't PAE-paging enabled yet at this
-+		 * point.  When running as xen domain we are in PAE
-+		 * mode already, thus we can't simply hook a empty
-+		 * pmd.  That would kill the mappings we are currently
-+		 * using ...
-+		 */
-+		pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
-+#else
-+		pmd = one_md_table_init(pgd);
-+#endif
-+		if (pfn >= max_low_pfn)
-+			continue;
-+		pmd += pmd_idx;
-+		for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
-+			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
-+			if (address >= HYPERVISOR_VIRT_START)
-+				continue;
-+
-+			/* Map with big pages if possible, otherwise create normal page tables. */
-+			if (cpu_has_pse) {
-+				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
-+
-+				if (is_kernel_text(address) || is_kernel_text(address2))
-+					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
-+				else
-+					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
-+				pfn += PTRS_PER_PTE;
-+			} else {
-+				pte = one_page_table_init(pmd);
-+
-+				pte += pte_ofs;
-+				for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
-+						/* XEN: Only map initial RAM allocation. */
-+						if ((pfn >= max_ram_pfn) || pte_present(*pte))
-+							continue;
-+						if (is_kernel_text(address))
-+							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
-+						else
-+							set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
-+				}
-+				pte_ofs = 0;
-+			}
-+		}
-+		pmd_idx = 0;
-+	}
-+}
-+
-+#ifndef CONFIG_XEN
-+
-+static inline int page_kills_ppro(unsigned long pagenr)
-+{
-+	if (pagenr >= 0x70000 && pagenr <= 0x7003F)
-+		return 1;
-+	return 0;
-+}
-+
-+extern int is_available_memory(efi_memory_desc_t *);
-+
-+static inline int page_is_ram(unsigned long pagenr)
-+{
-+	int i;
-+	unsigned long addr, end;
-+
-+	if (efi_enabled) {
-+		efi_memory_desc_t *md;
-+
-+		for (i = 0; i < memmap.nr_map; i++) {
-+			md = &memmap.map[i];
-+			if (!is_available_memory(md))
-+				continue;
-+			addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-+			end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
-+
-+			if ((pagenr >= addr) && (pagenr < end))
-+				return 1;
-+		}
-+		return 0;
-+	}
-+
-+	for (i = 0; i < e820.nr_map; i++) {
-+
-+		if (e820.map[i].type != E820_RAM)	/* not usable memory */
-+			continue;
-+		/*
-+		 *	!!!FIXME!!! Some BIOSen report areas as RAM that
-+		 *	are not. Notably the 640->1Mb area. We need a sanity
-+		 *	check here.
-+		 */
-+		addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
-+		end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
-+		if  ((pagenr >= addr) && (pagenr < end))
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+#else /* CONFIG_XEN */
-+
-+#define page_kills_ppro(p)	0
-+#define page_is_ram(p)		1
-+
-+#endif
-+
-+#ifdef CONFIG_HIGHMEM
-+pte_t *kmap_pte;
-+pgprot_t kmap_prot;
-+
-+#define kmap_get_fixmap_pte(vaddr)					\
-+	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
-+
-+static void __init kmap_init(void)
-+{
-+	unsigned long kmap_vstart;
-+
-+	/* cache the first kmap pte */
-+	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
-+	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-+
-+	kmap_prot = PAGE_KERNEL;
-+}
-+
-+static void __init permanent_kmaps_init(pgd_t *pgd_base)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	unsigned long vaddr;
-+
-+	vaddr = PKMAP_BASE;
-+	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
-+
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	pud = pud_offset(pgd, vaddr);
-+	pmd = pmd_offset(pud, vaddr);
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	pkmap_page_table = pte;	
-+}
-+
-+void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
-+{
-+	if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
-+		ClearPageReserved(page);
-+		set_bit(PG_highmem, &page->flags);
-+		set_page_count(page, 1);
-+		if (pfn < xen_start_info->nr_pages)
-+			__free_page(page);
-+		totalhigh_pages++;
-+	} else
-+		SetPageReserved(page);
-+}
-+
-+#ifndef CONFIG_DISCONTIGMEM
-+static void __init set_highmem_pages_init(int bad_ppro)
-+{
-+	int pfn;
-+	for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
-+		one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
-+	totalram_pages += totalhigh_pages;
-+}
-+#else
-+extern void set_highmem_pages_init(int);
-+#endif /* !CONFIG_DISCONTIGMEM */
-+
-+#else
-+#define kmap_init() do { } while (0)
-+#define permanent_kmaps_init(pgd_base) do { } while (0)
-+#define set_highmem_pages_init(bad_ppro) do { } while (0)
-+#endif /* CONFIG_HIGHMEM */
-+
-+unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
-+unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
-+
-+#ifndef CONFIG_DISCONTIGMEM
-+#define remap_numa_kva() do {} while (0)
-+#else
-+extern void __init remap_numa_kva(void);
-+#endif
-+
-+pgd_t *swapper_pg_dir;
-+
-+static void __init pagetable_init (void)
-+{
-+	unsigned long vaddr;
-+	pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
-+	int i;
-+
-+	swapper_pg_dir = pgd_base;
-+	init_mm.pgd    = pgd_base;
-+	for (i = 0; i < NR_CPUS; i++)
-+		per_cpu(cur_pgd, i) = pgd_base;
-+
-+	/* Enable PSE if available */
-+	if (cpu_has_pse) {
-+		set_in_cr4(X86_CR4_PSE);
-+	}
-+
-+	/* Enable PGE if available */
-+	if (cpu_has_pge) {
-+		set_in_cr4(X86_CR4_PGE);
-+		__PAGE_KERNEL |= _PAGE_GLOBAL;
-+		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
-+	}
-+
-+	kernel_physical_mapping_init(pgd_base);
-+	remap_numa_kva();
-+
-+	/*
-+	 * Fixed mappings, only the page table structure has to be
-+	 * created - mappings will be set by set_fixmap():
-+	 */
-+	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
-+	page_table_range_init(vaddr, 0, pgd_base);
-+
-+	permanent_kmaps_init(pgd_base);
-+}
-+
-+#if defined(CONFIG_PM_DISK) || defined(CONFIG_SOFTWARE_SUSPEND)
-+/*
-+ * Swap suspend & friends need this for resume because things like the intel-agp
-+ * driver might have split up a kernel 4MB mapping.
-+ */
-+char __nosavedata swsusp_pg_dir[PAGE_SIZE]
-+	__attribute__ ((aligned (PAGE_SIZE)));
-+
-+static inline void save_pg_dir(void)
-+{
-+	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
-+}
-+#else
-+static inline void save_pg_dir(void)
-+{
-+}
-+#endif
-+
-+void zap_low_mappings (void)
-+{
-+	int i;
-+
-+	save_pg_dir();
-+
-+	/*
-+	 * Zap initial low-memory mappings.
-+	 *
-+	 * Note that "pgd_clear()" doesn't do it for
-+	 * us, because pgd_clear() is a no-op on i386.
-+	 */
-+	for (i = 0; i < USER_PTRS_PER_PGD; i++)
-+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
-+		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
-+#else
-+		set_pgd(swapper_pg_dir+i, __pgd(0));
-+#endif
-+	flush_tlb_all();
-+}
-+
-+static int disable_nx __initdata = 0;
-+u64 __supported_pte_mask = ~_PAGE_NX;
-+
-+/*
-+ * noexec = on|off
-+ *
-+ * Control non executable mappings.
-+ *
-+ * on      Enable
-+ * off     Disable
-+ */
-+void __init noexec_setup(const char *str)
-+{
-+	if (!strncmp(str, "on",2) && cpu_has_nx) {
-+		__supported_pte_mask |= _PAGE_NX;
-+		disable_nx = 0;
-+	} else if (!strncmp(str,"off",3)) {
-+		disable_nx = 1;
-+		__supported_pte_mask &= ~_PAGE_NX;
-+	}
-+}
-+
-+int nx_enabled = 0;
-+#ifdef CONFIG_X86_PAE
-+
-+static void __init set_nx(void)
-+{
-+	unsigned int v[4], l, h;
-+
-+	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
-+		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
-+		if ((v[3] & (1 << 20)) && !disable_nx) {
-+			rdmsr(MSR_EFER, l, h);
-+			l |= EFER_NX;
-+			wrmsr(MSR_EFER, l, h);
-+			nx_enabled = 1;
-+			__supported_pte_mask |= _PAGE_NX;
-+		}
-+	}
-+}
-+
-+/*
-+ * Enables/disables executability of a given kernel page and
-+ * returns the previous setting.
-+ */
-+int __init set_kernel_exec(unsigned long vaddr, int enable)
-+{
-+	pte_t *pte;
-+	int ret = 1;
-+
-+	if (!nx_enabled)
-+		goto out;
-+
-+	pte = lookup_address(vaddr);
-+	BUG_ON(!pte);
-+
-+	if (!pte_exec_kernel(*pte))
-+		ret = 0;
-+
-+	if (enable)
-+		pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
-+	else
-+		pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
-+	__flush_tlb_all();
-+out:
-+	return ret;
-+}
-+
-+#endif
-+
-+/*
-+ * paging_init() sets up the page tables - note that the first 8MB are
-+ * already mapped by head.S.
-+ *
-+ * This routines also unmaps the page at virtual kernel address 0, so
-+ * that we can trap those pesky NULL-reference errors in the kernel.
-+ */
-+void __init paging_init(void)
-+{
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+	int i;
-+#endif
-+
-+#ifdef CONFIG_X86_PAE
-+	set_nx();
-+	if (nx_enabled)
-+		printk("NX (Execute Disable) protection: active\n");
-+#endif
-+
-+	pagetable_init();
-+
-+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
-+	/*
-+	 * We will bail out later - printk doesn't work right now so
-+	 * the user would just see a hanging kernel.
-+	 * when running as xen domain we are already in PAE mode at
-+	 * this point.
-+	 */
-+	if (cpu_has_pae)
-+		set_in_cr4(X86_CR4_PAE);
-+#endif
-+	__flush_tlb_all();
-+
-+	kmap_init();
-+
-+	/* Switch to the real shared_info page, and clear the dummy page. */
-+	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+	memset(empty_zero_page, 0, sizeof(empty_zero_page));
-+
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+	/* Setup mapping of lower 1st MB */
-+	for (i = 0; i < NR_FIX_ISAMAPS; i++)
-+		if (xen_start_info->flags & SIF_PRIVILEGED)
-+			set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
-+		else
-+			__set_fixmap(FIX_ISAMAP_BEGIN - i,
-+				     virt_to_machine(empty_zero_page),
-+				     PAGE_KERNEL_RO);
-+#endif
-+}
-+
-+/*
-+ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
-+ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
-+ * used to involve black magic jumps to work around some nasty CPU bugs,
-+ * but fortunately the switch to using exceptions got rid of all that.
-+ */
-+
-+static void __init test_wp_bit(void)
-+{
-+	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
-+
-+	/* Any page-aligned address will do, the test is non-destructive */
-+	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
-+	boot_cpu_data.wp_works_ok = do_test_wp_bit();
-+	clear_fixmap(FIX_WP_TEST);
-+
-+	if (!boot_cpu_data.wp_works_ok) {
-+		printk("No.\n");
-+#ifdef CONFIG_X86_WP_WORKS_OK
-+		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
-+#endif
-+	} else {
-+		printk("Ok.\n");
-+	}
-+}
-+
-+static void __init set_max_mapnr_init(void)
-+{
-+#ifdef CONFIG_HIGHMEM
-+	num_physpages = highend_pfn;
-+#else
-+	num_physpages = max_low_pfn;
-+#endif
-+#ifndef CONFIG_DISCONTIGMEM
-+	max_mapnr = num_physpages;
-+#endif
-+}
-+
-+static struct kcore_list kcore_mem, kcore_vmalloc; 
-+
-+void __init mem_init(void)
-+{
-+	extern int ppro_with_ram_bug(void);
-+	int codesize, reservedpages, datasize, initsize;
-+	int tmp;
-+	int bad_ppro;
-+	unsigned long pfn;
-+
-+	contiguous_bitmap = alloc_bootmem_low_pages(
-+		(max_low_pfn + 2*BITS_PER_LONG) >> 3);
-+	BUG_ON(!contiguous_bitmap);
-+	memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
-+
-+#if defined(CONFIG_SWIOTLB)
-+	swiotlb_init();	
-+#endif
-+
-+#ifndef CONFIG_DISCONTIGMEM
-+	if (!mem_map)
-+		BUG();
-+#endif
-+	
-+	bad_ppro = ppro_with_ram_bug();
-+
-+#ifdef CONFIG_HIGHMEM
-+	/* check that fixmap and pkmap do not overlap */
-+	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
-+		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
-+		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
-+				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
-+		BUG();
-+	}
-+#endif
-+ 
-+	set_max_mapnr_init();
-+
-+#ifdef CONFIG_HIGHMEM
-+	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
-+#else
-+	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
-+#endif
-+	printk("vmalloc area: %lx-%lx, maxmem %lx\n",
-+	       VMALLOC_START,VMALLOC_END,MAXMEM);
-+	BUG_ON(VMALLOC_START > VMALLOC_END);
-+	
-+	/* this will put all low memory onto the freelists */
-+	totalram_pages += free_all_bootmem();
-+	/* XEN: init and count low-mem pages outside initial allocation. */
-+	for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
-+		ClearPageReserved(&mem_map[pfn]);
-+		set_page_count(&mem_map[pfn], 1);
-+		totalram_pages++;
-+	}
-+
-+	reservedpages = 0;
-+	for (tmp = 0; tmp < max_low_pfn; tmp++)
-+		/*
-+		 * Only count reserved RAM pages
-+		 */
-+		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
-+			reservedpages++;
-+
-+	set_highmem_pages_init(bad_ppro);
-+
-+	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
-+	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
-+	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
-+
-+	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
-+	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
-+		   VMALLOC_END-VMALLOC_START);
-+
-+	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
-+		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-+		num_physpages << (PAGE_SHIFT-10),
-+		codesize >> 10,
-+		reservedpages << (PAGE_SHIFT-10),
-+		datasize >> 10,
-+		initsize >> 10,
-+		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
-+	       );
-+
-+#ifdef CONFIG_X86_PAE
-+	if (!cpu_has_pae)
-+		panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
-+#endif
-+	if (boot_cpu_data.wp_works_ok < 0)
-+		test_wp_bit();
-+
-+	/*
-+	 * Subtle. SMP is doing it's boot stuff late (because it has to
-+	 * fork idle threads) - but it also needs low mappings for the
-+	 * protected-mode entry to work. We zap these entries only after
-+	 * the WP-bit has been tested.
-+	 */
-+#ifndef CONFIG_SMP
-+	zap_low_mappings();
-+#endif
-+
-+	set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
-+}
-+
-+kmem_cache_t *pgd_cache;
-+kmem_cache_t *pmd_cache;
-+
-+void __init pgtable_cache_init(void)
-+{
-+	if (PTRS_PER_PMD > 1) {
-+		pmd_cache = kmem_cache_create("pmd",
-+					PTRS_PER_PMD*sizeof(pmd_t),
-+					PTRS_PER_PMD*sizeof(pmd_t),
-+					0,
-+					pmd_ctor,
-+					NULL);
-+		if (!pmd_cache)
-+			panic("pgtable_cache_init(): cannot create pmd cache");
-+	}
-+	pgd_cache = kmem_cache_create("pgd",
-+#ifndef CONFIG_XEN
-+				PTRS_PER_PGD*sizeof(pgd_t),
-+				PTRS_PER_PGD*sizeof(pgd_t),
-+#else
-+				PAGE_SIZE,
-+				PAGE_SIZE,
-+#endif
-+				0,
-+				pgd_ctor,
-+				pgd_dtor);
-+	if (!pgd_cache)
-+		panic("pgtable_cache_init(): Cannot create pgd cache");
-+}
-+
-+/*
-+ * This function cannot be __init, since exceptions don't work in that
-+ * section.  Put this after the callers, so that it cannot be inlined.
-+ */
-+static int noinline do_test_wp_bit(void)
-+{
-+	char tmp_reg;
-+	int flag;
-+
-+	__asm__ __volatile__(
-+		"	movb %0,%1	\n"
-+		"1:	movb %1,%0	\n"
-+		"	xorl %2,%2	\n"
-+		"2:			\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 4	\n"
-+		"	.long 1b,2b	\n"
-+		".previous		\n"
-+		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
-+		 "=q" (tmp_reg),
-+		 "=r" (flag)
-+		:"2" (1)
-+		:"memory");
-+	
-+	return flag;
-+}
-+
-+void free_initmem(void)
-+{
-+	unsigned long addr;
-+
-+	addr = (unsigned long)(&__init_begin);
-+	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
-+		ClearPageReserved(virt_to_page(addr));
-+		set_page_count(virt_to_page(addr), 1);
-+		memset((void *)addr, 0xcc, PAGE_SIZE);
-+		free_page(addr);
-+		totalram_pages++;
-+	}
-+	printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
-+}
-+
-+#ifdef CONFIG_BLK_DEV_INITRD
-+void free_initrd_mem(unsigned long start, unsigned long end)
-+{
-+	if (start < end)
-+		printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-+	for (; start < end; start += PAGE_SIZE) {
-+		ClearPageReserved(virt_to_page(start));
-+		set_page_count(virt_to_page(start), 1);
-+		free_page(start);
-+		totalram_pages++;
-+	}
-+}
-+#endif
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/ioremap.c linux-2.6.12-xen/arch/xen/i386/mm/ioremap.c
---- pristine-linux-2.6.12/arch/xen/i386/mm/ioremap.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/mm/ioremap.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,470 @@
-+/*
-+ * arch/i386/mm/ioremap.c
-+ *
-+ * Re-map IO memory to kernel address space so that we can access it.
-+ * This is needed for high PCI addresses that aren't mapped in the
-+ * 640k-1MB IO memory area on PC's
-+ *
-+ * (C) Copyright 1995 1996 Linus Torvalds
-+ */
-+
-+#include <linux/vmalloc.h>
-+#include <linux/init.h>
-+#include <linux/slab.h>
-+#include <linux/module.h>
-+#include <asm/io.h>
-+#include <asm/fixmap.h>
-+#include <asm/cacheflush.h>
-+#include <asm/tlbflush.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+
-+#define ISA_START_ADDRESS	0x0
-+#define ISA_END_ADDRESS		0x100000
-+
-+#if 0 /* not PAE safe */
-+/* These hacky macros avoid phys->machine translations. */
-+#define __direct_pte(x) ((pte_t) { (x) } )
-+#define __direct_mk_pte(page_nr,pgprot) \
-+  __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
-+#define direct_mk_pte_phys(physpage, pgprot) \
-+  __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-+#endif
-+
-+static int direct_remap_area_pte_fn(pte_t *pte, 
-+				    struct page *pte_page,
-+				    unsigned long address, 
-+				    void *data)
-+{
-+	mmu_update_t **v = (mmu_update_t **)data;
-+
-+	(*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pte_page)) <<
-+		     PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
-+	(*v)++;
-+
-+	return 0;
-+}
-+
-+static int __direct_remap_pfn_range(struct mm_struct *mm,
-+				    unsigned long address, 
-+				    unsigned long mfn,
-+				    unsigned long size, 
-+				    pgprot_t prot,
-+				    domid_t  domid)
-+{
-+	int rc;
-+	unsigned long i, start_address;
-+	mmu_update_t *u, *v, *w;
-+
-+	u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-+	if (u == NULL)
-+		return -ENOMEM;
-+
-+	start_address = address;
-+
-+	flush_cache_all();
-+
-+	for (i = 0; i < size; i += PAGE_SIZE) {
-+		if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
-+			/* Fill in the PTE pointers. */
-+			rc = generic_page_range(mm, start_address, 
-+						address - start_address,
-+						direct_remap_area_pte_fn, &w);
-+			if (rc)
-+				goto out;
-+			w = u;
-+			rc = -EFAULT;
-+			if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
-+				goto out;
-+			v = u;
-+			start_address = address;
-+		}
-+
-+		/*
-+		 * Fill in the machine address: PTE ptr is done later by
-+		 * __direct_remap_area_pages(). 
-+		 */
-+		v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
-+
-+		mfn++;
-+		address += PAGE_SIZE; 
-+		v++;
-+	}
-+
-+	if (v != u) {
-+		/* get the ptep's filled in */
-+		rc = generic_page_range(mm, start_address, address - start_address,
-+				   direct_remap_area_pte_fn, &w);
-+		if (rc)
-+			goto out;
-+		rc = -EFAULT;
-+		if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
-+			goto out;
-+	}
-+
-+	rc = 0;
-+
-+ out:
-+	flush_tlb_all();
-+
-+	free_page((unsigned long)u);
-+
-+	return rc;
-+}
-+
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+			   unsigned long address, 
-+			   unsigned long mfn,
-+			   unsigned long size, 
-+			   pgprot_t prot,
-+			   domid_t  domid)
-+{
-+	/* Same as remap_pfn_range(). */
-+	vma->vm_flags |= VM_IO | VM_RESERVED;
-+
-+	return __direct_remap_pfn_range(
-+		vma->vm_mm, address, mfn, size, prot, domid);
-+}
-+EXPORT_SYMBOL(direct_remap_pfn_range);
-+
-+int direct_kernel_remap_pfn_range(unsigned long address, 
-+				  unsigned long mfn,
-+				  unsigned long size, 
-+				  pgprot_t prot,
-+				  domid_t  domid)
-+{
-+	return __direct_remap_pfn_range(
-+		&init_mm, address, mfn, size, prot, domid);
-+}
-+EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
-+
-+static int lookup_pte_fn(
-+	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
-+{
-+	uint64_t *ptep = (uint64_t *)data;
-+	if (ptep)
-+		*ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
-+			 PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
-+	return 0;
-+}
-+
-+int create_lookup_pte_addr(struct mm_struct *mm, 
-+			   unsigned long address,
-+			   uint64_t *ptep)
-+{
-+	return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
-+}
-+
-+EXPORT_SYMBOL(create_lookup_pte_addr);
-+
-+static int noop_fn(
-+	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
-+{
-+	return 0;
-+}
-+
-+int touch_pte_range(struct mm_struct *mm,
-+		    unsigned long address,
-+		    unsigned long size)
-+{
-+	return generic_page_range(mm, address, size, noop_fn, NULL);
-+} 
-+
-+EXPORT_SYMBOL(touch_pte_range);
-+
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+
-+/*
-+ * Does @address reside within a non-highmem page that is local to this virtual
-+ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
-+ * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
-+ * why this works.
-+ */
-+static inline int is_local_lowmem(unsigned long address)
-+{
-+	extern unsigned long max_low_pfn;
-+	unsigned long mfn = address >> PAGE_SHIFT;
-+	unsigned long pfn = mfn_to_pfn(mfn);
-+	return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn));
-+}
-+
-+/*
-+ * Generic mapping function (not visible outside):
-+ */
-+
-+/*
-+ * Remap an arbitrary physical address space into the kernel virtual
-+ * address space. Needed when the kernel wants to access high addresses
-+ * directly.
-+ *
-+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
-+ * have to convert them into an offset in a page-aligned mapping, but the
-+ * caller shouldn't need to know that small detail.
-+ */
-+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
-+{
-+	void __iomem * addr;
-+	struct vm_struct * area;
-+	unsigned long offset, last_addr;
-+	domid_t domid = DOMID_IO;
-+
-+	/* Don't allow wraparound or zero size */
-+	last_addr = phys_addr + size - 1;
-+	if (!size || last_addr < phys_addr)
-+		return NULL;
-+
-+	/*
-+	 * Don't remap the low PCI/ISA area, it's always mapped..
-+	 */
-+	if (xen_start_info->flags & SIF_PRIVILEGED &&
-+	    phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
-+		return (void __iomem *) isa_bus_to_virt(phys_addr);
-+
-+	/*
-+	 * Don't allow anybody to remap normal RAM that we're using..
-+	 */
-+	if (is_local_lowmem(phys_addr)) {
-+		char *t_addr, *t_end;
-+		struct page *page;
-+
-+		t_addr = bus_to_virt(phys_addr);
-+		t_end = t_addr + (size - 1);
-+	   
-+		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
-+			if(!PageReserved(page))
-+				return NULL;
-+
-+		domid = DOMID_SELF;
-+	}
-+
-+	/*
-+	 * Mappings have to be page-aligned
-+	 */
-+	offset = phys_addr & ~PAGE_MASK;
-+	phys_addr &= PAGE_MASK;
-+	size = PAGE_ALIGN(last_addr+1) - phys_addr;
-+
-+	/*
-+	 * Ok, go for it..
-+	 */
-+	area = get_vm_area(size, VM_IOREMAP | (flags << 20));
-+	if (!area)
-+		return NULL;
-+	area->phys_addr = phys_addr;
-+	addr = (void __iomem *) area->addr;
-+	flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
-+#ifdef __x86_64__
-+	flags |= _PAGE_USER;
-+#endif
-+	if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
-+				     phys_addr>>PAGE_SHIFT,
-+				     size, __pgprot(flags), domid)) {
-+		vunmap((void __force *) addr);
-+		return NULL;
-+	}
-+	return (void __iomem *) (offset + (char __iomem *)addr);
-+}
-+
-+
-+/**
-+ * ioremap_nocache     -   map bus memory into CPU space
-+ * @offset:    bus address of the memory
-+ * @size:      size of the resource to map
-+ *
-+ * ioremap_nocache performs a platform specific sequence of operations to
-+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
-+ * writew/writel functions and the other mmio helpers. The returned
-+ * address is not guaranteed to be usable directly as a virtual
-+ * address. 
-+ *
-+ * This version of ioremap ensures that the memory is marked uncachable
-+ * on the CPU as well as honouring existing caching rules from things like
-+ * the PCI bus. Note that there are other caches and buffers on many 
-+ * busses. In particular driver authors should read up on PCI writes
-+ *
-+ * It's useful if some control registers are in such an area and
-+ * write combining or read caching is not desirable:
-+ * 
-+ * Must be freed with iounmap.
-+ */
-+
-+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-+{
-+	unsigned long last_addr;
-+	void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
-+	if (!p) 
-+		return p; 
-+
-+	/* Guaranteed to be > phys_addr, as per __ioremap() */
-+	last_addr = phys_addr + size - 1;
-+
-+	if (is_local_lowmem(last_addr)) { 
-+		struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
-+		unsigned long npages;
-+
-+		phys_addr &= PAGE_MASK;
-+
-+		/* This might overflow and become zero.. */
-+		last_addr = PAGE_ALIGN(last_addr);
-+
-+		/* .. but that's ok, because modulo-2**n arithmetic will make
-+	 	* the page-aligned "last - first" come out right.
-+	 	*/
-+		npages = (last_addr - phys_addr) >> PAGE_SHIFT;
-+
-+		if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
-+			iounmap(p); 
-+			p = NULL;
-+		}
-+		global_flush_tlb();
-+	}
-+
-+	return p;					
-+}
-+
-+void iounmap(volatile void __iomem *addr)
-+{
-+	struct vm_struct *p;
-+	if ((void __force *) addr <= high_memory) 
-+		return;
-+
-+	/*
-+	 * __ioremap special-cases the PCI/ISA range by not instantiating a
-+	 * vm_area and by simply returning an address into the kernel mapping
-+	 * of ISA space.   So handle that here.
-+	 */
-+	if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-+		return;
-+
-+	write_lock(&vmlist_lock);
-+	p = __remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
-+	if (!p) { 
-+		printk("iounmap: bad address %p\n", addr);
-+		goto out_unlock;
-+	}
-+
-+	if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
-+		/* p->size includes the guard page, but cpa doesn't like that */
-+		change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
-+				 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
-+				 PAGE_KERNEL);
-+		global_flush_tlb();
-+	} 
-+out_unlock:
-+	write_unlock(&vmlist_lock);
-+	kfree(p); 
-+}
-+
-+#ifdef __i386__
-+
-+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-+{
-+	unsigned long offset, last_addr;
-+	unsigned int nrpages;
-+	enum fixed_addresses idx;
-+
-+	/* Don't allow wraparound or zero size */
-+	last_addr = phys_addr + size - 1;
-+	if (!size || last_addr < phys_addr)
-+		return NULL;
-+
-+	/*
-+	 * Don't remap the low PCI/ISA area, it's always mapped..
-+	 */
-+	if (xen_start_info->flags & SIF_PRIVILEGED &&
-+	    phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
-+		return isa_bus_to_virt(phys_addr);
-+
-+	/*
-+	 * Mappings have to be page-aligned
-+	 */
-+	offset = phys_addr & ~PAGE_MASK;
-+	phys_addr &= PAGE_MASK;
-+	size = PAGE_ALIGN(last_addr) - phys_addr;
-+
-+	/*
-+	 * Mappings have to fit in the FIX_BTMAP area.
-+	 */
-+	nrpages = size >> PAGE_SHIFT;
-+	if (nrpages > NR_FIX_BTMAPS)
-+		return NULL;
-+
-+	/*
-+	 * Ok, go for it..
-+	 */
-+	idx = FIX_BTMAP_BEGIN;
-+	while (nrpages > 0) {
-+		set_fixmap(idx, phys_addr);
-+		phys_addr += PAGE_SIZE;
-+		--idx;
-+		--nrpages;
-+	}
-+	return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
-+}
-+
-+void __init bt_iounmap(void *addr, unsigned long size)
-+{
-+	unsigned long virt_addr;
-+	unsigned long offset;
-+	unsigned int nrpages;
-+	enum fixed_addresses idx;
-+
-+	virt_addr = (unsigned long)addr;
-+	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
-+		return;
-+	if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
-+		return;
-+	offset = virt_addr & ~PAGE_MASK;
-+	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
-+
-+	idx = FIX_BTMAP_BEGIN;
-+	while (nrpages > 0) {
-+		clear_fixmap(idx);
-+		--idx;
-+		--nrpages;
-+	}
-+}
-+
-+#endif /* __i386__ */
-+
-+#else /* CONFIG_XEN_PHYSDEV_ACCESS */
-+
-+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size,
-+			 unsigned long flags)
-+{
-+	return NULL;
-+}
-+
-+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-+{
-+	return NULL;
-+}
-+
-+void iounmap(volatile void __iomem *addr)
-+{
-+}
-+
-+#ifdef __i386__
-+
-+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-+{
-+	return NULL;
-+}
-+
-+void __init bt_iounmap(void *addr, unsigned long size)
-+{
-+}
-+
-+#endif /* __i386__ */
-+
-+#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/Makefile linux-2.6.12-xen/arch/xen/i386/mm/Makefile
---- pristine-linux-2.6.12/arch/xen/i386/mm/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/mm/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,24 @@
-+#
-+# Makefile for the linux i386-specific parts of the memory manager.
-+#
-+
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+CFLAGS	+= -Iarch/$(XENARCH)/mm
-+
-+obj-y	:= init.o pgtable.o fault.o ioremap.o hypervisor.o
-+c-obj-y	:= extable.o mmap.o pageattr.o
-+
-+c-obj-$(CONFIG_DISCONTIGMEM)	+= discontig.o
-+c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-+obj-$(CONFIG_HIGHMEM) += highmem.o
-+c-obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
-+
-+c-link	:=
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-+	@ln -fsn $(srctree)/arch/i386/mm/$(notdir $@) $@
-+
-+obj-y	+= $(c-obj-y)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/mm/pgtable.c linux-2.6.12-xen/arch/xen/i386/mm/pgtable.c
---- pristine-linux-2.6.12/arch/xen/i386/mm/pgtable.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/mm/pgtable.c	2006-02-25 00:05:34.159175000 +0100
-@@ -0,0 +1,622 @@
-+/*
-+ *  linux/arch/i386/mm/pgtable.c
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/highmem.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/spinlock.h>
-+
-+#include <asm/system.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/tlb.h>
-+#include <asm/tlbflush.h>
-+#include <asm/io.h>
-+#include <asm/mmu_context.h>
-+
-+#include <asm-xen/features.h>
-+#include <asm-xen/foreign_page.h>
-+#include <asm/hypervisor.h>
-+
-+static void pgd_test_and_unpin(pgd_t *pgd);
-+
-+void show_mem(void)
-+{
-+	int total = 0, reserved = 0;
-+	int shared = 0, cached = 0;
-+	int highmem = 0;
-+	struct page *page;
-+	pg_data_t *pgdat;
-+	unsigned long i;
-+
-+	printk("Mem-info:\n");
-+	show_free_areas();
-+	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-+	for_each_pgdat(pgdat) {
-+		for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-+			page = pgdat->node_mem_map + i;
-+			total++;
-+			if (PageHighMem(page))
-+				highmem++;
-+			if (PageReserved(page))
-+				reserved++;
-+			else if (PageSwapCache(page))
-+				cached++;
-+			else if (page_count(page))
-+				shared += page_count(page) - 1;
-+		}
-+	}
-+	printk("%d pages of RAM\n", total);
-+	printk("%d pages of HIGHMEM\n",highmem);
-+	printk("%d reserved pages\n",reserved);
-+	printk("%d pages shared\n",shared);
-+	printk("%d pages swap cached\n",cached);
-+}
-+
-+/*
-+ * Associate a virtual page frame with a given physical page frame 
-+ * and protection flags for that frame.
-+ */ 
-+static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	if (pgd_none(*pgd)) {
-+		BUG();
-+		return;
-+	}
-+	pud = pud_offset(pgd, vaddr);
-+	if (pud_none(*pud)) {
-+		BUG();
-+		return;
-+	}
-+	pmd = pmd_offset(pud, vaddr);
-+	if (pmd_none(*pmd)) {
-+		BUG();
-+		return;
-+	}
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	/* <pfn,flags> stored as-is, to permit clearing entries */
-+	set_pte(pte, pfn_pte(pfn, flags));
-+
-+	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
-+	 */
-+	__flush_tlb_one(vaddr);
-+}
-+
-+/*
-+ * Associate a virtual page frame with a given physical page frame 
-+ * and protection flags for that frame.
-+ */ 
-+static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
-+			   pgprot_t flags)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	if (pgd_none(*pgd)) {
-+		BUG();
-+		return;
-+	}
-+	pud = pud_offset(pgd, vaddr);
-+	if (pud_none(*pud)) {
-+		BUG();
-+		return;
-+	}
-+	pmd = pmd_offset(pud, vaddr);
-+	if (pmd_none(*pmd)) {
-+		BUG();
-+		return;
-+	}
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	/* <pfn,flags> stored as-is, to permit clearing entries */
-+	set_pte(pte, pfn_pte_ma(pfn, flags));
-+
-+	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
-+	 */
-+	__flush_tlb_one(vaddr);
-+}
-+
-+/*
-+ * Associate a large virtual page frame with a given physical page frame 
-+ * and protection flags for that frame. pfn is for the base of the page,
-+ * vaddr is what the page gets mapped to - both must be properly aligned. 
-+ * The pmd must already be instantiated. Assumes PAE mode.
-+ */ 
-+void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+
-+	if (vaddr & (PMD_SIZE-1)) {		/* vaddr is misaligned */
-+		printk ("set_pmd_pfn: vaddr misaligned\n");
-+		return; /* BUG(); */
-+	}
-+	if (pfn & (PTRS_PER_PTE-1)) {		/* pfn is misaligned */
-+		printk ("set_pmd_pfn: pfn misaligned\n");
-+		return; /* BUG(); */
-+	}
-+	pgd = swapper_pg_dir + pgd_index(vaddr);
-+	if (pgd_none(*pgd)) {
-+		printk ("set_pmd_pfn: pgd_none\n");
-+		return; /* BUG(); */
-+	}
-+	pud = pud_offset(pgd, vaddr);
-+	pmd = pmd_offset(pud, vaddr);
-+	set_pmd(pmd, pfn_pmd(pfn, flags));
-+	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
-+	 */
-+	__flush_tlb_one(vaddr);
-+}
-+
-+void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
-+{
-+	unsigned long address = __fix_to_virt(idx);
-+
-+	if (idx >= __end_of_fixed_addresses) {
-+		BUG();
-+		return;
-+	}
-+	switch (idx) {
-+	case FIX_WP_TEST:
-+	case FIX_VSYSCALL:
-+#ifdef CONFIG_X86_F00F_BUG
-+	case FIX_F00F_IDT:
-+#endif
-+		set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
-+		break;
-+	default:
-+		set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
-+		break;
-+	}
-+}
-+
-+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-+{
-+	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
-+	if (pte)
-+		make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
-+	return pte;
-+}
-+
-+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-+{
-+	struct page *pte;
-+
-+#ifdef CONFIG_HIGHPTE
-+	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
-+#else
-+	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-+	if (pte) {
-+		SetPageForeign(pte, pte_free);
-+		set_page_count(pte, 1);
-+	}
-+#endif
-+
-+	return pte;
-+}
-+
-+void pte_free(struct page *pte)
-+{
-+	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
-+
-+	if (!pte_write(*virt_to_ptep(va)))
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
-+
-+	ClearPageForeign(pte);
-+	set_page_count(pte, 1);
-+
-+	__free_page(pte);
-+}
-+
-+void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
-+{
-+	memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+}
-+
-+/*
-+ * List of all pgd's needed for non-PAE so it can invalidate entries
-+ * in both cached and uncached pgd's; not needed for PAE since the
-+ * kernel pmd is shared. If PAE were not to share the pmd a similar
-+ * tactic would be needed. This is essentially codepath-based locking
-+ * against pageattr.c; it is the unique case in which a valid change
-+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
-+ * vmalloc faults work because attached pagetables are never freed.
-+ * The locking scheme was chosen on the basis of manfred's
-+ * recommendations and having no core impact whatsoever.
-+ * -- wli
-+ */
-+DEFINE_SPINLOCK(pgd_lock);
-+struct page *pgd_list;
-+
-+static inline void pgd_list_add(pgd_t *pgd)
-+{
-+	struct page *page = virt_to_page(pgd);
-+	page->index = (unsigned long)pgd_list;
-+	if (pgd_list)
-+		pgd_list->private = (unsigned long)&page->index;
-+	pgd_list = page;
-+	page->private = (unsigned long)&pgd_list;
-+}
-+
-+static inline void pgd_list_del(pgd_t *pgd)
-+{
-+	struct page *next, **pprev, *page = virt_to_page(pgd);
-+	next = (struct page *)page->index;
-+	pprev = (struct page **)page->private;
-+	*pprev = next;
-+	if (next)
-+		next->private = (unsigned long)pprev;
-+}
-+
-+void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
-+{
-+	unsigned long flags;
-+
-+	if (PTRS_PER_PMD > 1) {
-+		/* Ensure pgd resides below 4GB. */
-+		int rc = xen_create_contiguous_region(
-+			(unsigned long)pgd, 0, 32);
-+		BUG_ON(rc);
-+		if (HAVE_SHARED_KERNEL_PMD)
-+			memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+			       swapper_pg_dir + USER_PTRS_PER_PGD,
-+			       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-+	} else {
-+		spin_lock_irqsave(&pgd_lock, flags);
-+		memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+		       swapper_pg_dir + USER_PTRS_PER_PGD,
-+		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-+		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+		pgd_list_add(pgd);
-+		spin_unlock_irqrestore(&pgd_lock, flags);
-+	}
-+}
-+
-+void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
-+{
-+	unsigned long flags; /* can be called from interrupt context */
-+
-+	if (PTRS_PER_PMD > 1) {
-+		xen_destroy_contiguous_region((unsigned long)pgd, 0);
-+	} else {
-+		spin_lock_irqsave(&pgd_lock, flags);
-+		pgd_list_del(pgd);
-+		spin_unlock_irqrestore(&pgd_lock, flags);
-+
-+		pgd_test_and_unpin(pgd);
-+	}
-+}
-+
-+pgd_t *pgd_alloc(struct mm_struct *mm)
-+{
-+	int i;
-+	pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
-+
-+	pgd_test_and_unpin(pgd);
-+
-+	if (PTRS_PER_PMD == 1 || !pgd)
-+		return pgd;
-+
-+	for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
-+		pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+		if (!pmd)
-+			goto out_oom;
-+		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
-+	}
-+
-+	if (!HAVE_SHARED_KERNEL_PMD) {
-+		unsigned long flags;
-+
-+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+			if (!pmd)
-+				goto out_oom;
-+			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
-+		}
-+
-+		spin_lock_irqsave(&pgd_lock, flags);
-+		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
-+			pgd_t *kpgd = pgd_offset_k(v);
-+			pud_t *kpud = pud_offset(kpgd, v);
-+			pmd_t *kpmd = pmd_offset(kpud, v);
-+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+			memcpy(pmd, kpmd, PAGE_SIZE);
-+			make_lowmem_page_readonly(
-+				pmd, XENFEAT_writable_page_tables);
-+		}
-+		pgd_list_add(pgd);
-+		spin_unlock_irqrestore(&pgd_lock, flags);
-+	}
-+
-+	return pgd;
-+
-+out_oom:
-+	for (i--; i >= 0; i--)
-+		kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
-+	kmem_cache_free(pgd_cache, pgd);
-+	return NULL;
-+}
-+
-+void pgd_free(pgd_t *pgd)
-+{
-+	int i;
-+
-+	pgd_test_and_unpin(pgd);
-+
-+	/* in the PAE case user pgd entries are overwritten before usage */
-+	if (PTRS_PER_PMD > 1) {
-+		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
-+			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+			kmem_cache_free(pmd_cache, pmd);
-+		}
-+		if (!HAVE_SHARED_KERNEL_PMD) {
-+			unsigned long flags;
-+			spin_lock_irqsave(&pgd_lock, flags);
-+			pgd_list_del(pgd);
-+			spin_unlock_irqrestore(&pgd_lock, flags);
-+			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
-+				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
-+				make_lowmem_page_writable(
-+					pmd, XENFEAT_writable_page_tables);
-+				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+				kmem_cache_free(pmd_cache, pmd);
-+			}
-+		}
-+	}
-+	/* in the non-PAE case, free_pgtables() clears user pgd entries */
-+	kmem_cache_free(pgd_cache, pgd);
-+}
-+
-+#ifndef CONFIG_XEN_SHADOW_MODE
-+void make_lowmem_page_readonly(void *va, unsigned int feature)
-+{
-+	pte_t *pte;
-+	int rc;
-+
-+	if (xen_feature(feature))
-+		return;
-+
-+	pte = virt_to_ptep(va);
-+	rc = HYPERVISOR_update_va_mapping(
-+		(unsigned long)va, pte_wrprotect(*pte), 0);
-+	BUG_ON(rc);
-+}
-+
-+void make_lowmem_page_writable(void *va, unsigned int feature)
-+{
-+	pte_t *pte;
-+	int rc;
-+
-+	if (xen_feature(feature))
-+		return;
-+
-+	pte = virt_to_ptep(va);
-+	rc = HYPERVISOR_update_va_mapping(
-+		(unsigned long)va, pte_mkwrite(*pte), 0);
-+	BUG_ON(rc);
-+}
-+
-+void make_page_readonly(void *va, unsigned int feature)
-+{
-+	pte_t *pte;
-+	int rc;
-+
-+	if (xen_feature(feature))
-+		return;
-+
-+	pte = virt_to_ptep(va);
-+	rc = HYPERVISOR_update_va_mapping(
-+		(unsigned long)va, pte_wrprotect(*pte), 0);
-+	if (rc) /* fallback? */
-+		xen_l1_entry_update(pte, pte_wrprotect(*pte));
-+	if ((unsigned long)va >= (unsigned long)high_memory) {
-+		unsigned long pfn = pte_pfn(*pte);
-+#ifdef CONFIG_HIGHMEM
-+		if (pfn >= highstart_pfn)
-+			kmap_flush_unused(); /* flush stale writable kmaps */
-+		else
-+#endif
-+			make_lowmem_page_readonly(
-+				phys_to_virt(pfn << PAGE_SHIFT), feature); 
-+	}
-+}
-+
-+void make_page_writable(void *va, unsigned int feature)
-+{
-+	pte_t *pte;
-+	int rc;
-+
-+	if (xen_feature(feature))
-+		return;
-+
-+	pte = virt_to_ptep(va);
-+	rc = HYPERVISOR_update_va_mapping(
-+		(unsigned long)va, pte_mkwrite(*pte), 0);
-+	if (rc) /* fallback? */
-+		xen_l1_entry_update(pte, pte_mkwrite(*pte));
-+	if ((unsigned long)va >= (unsigned long)high_memory) {
-+		unsigned long pfn = pte_pfn(*pte); 
-+#ifdef CONFIG_HIGHMEM
-+		if (pfn < highstart_pfn)
-+#endif
-+			make_lowmem_page_writable(
-+				phys_to_virt(pfn << PAGE_SHIFT), feature);
-+	}
-+}
-+
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
-+{
-+	if (xen_feature(feature))
-+		return;
-+
-+	while (nr-- != 0) {
-+		make_page_readonly(va, feature);
-+		va = (void *)((unsigned long)va + PAGE_SIZE);
-+	}
-+}
-+
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
-+{
-+	if (xen_feature(feature))
-+		return;
-+
-+	while (nr-- != 0) {
-+		make_page_writable(va, feature);
-+		va = (void *)((unsigned long)va + PAGE_SIZE);
-+	}
-+}
-+#endif /* CONFIG_XEN_SHADOW_MODE */
-+
-+static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
-+{
-+	struct page *page = virt_to_page(pt);
-+	unsigned long pfn = page_to_pfn(page);
-+
-+	if (PageHighMem(page))
-+		return;
-+	BUG_ON(HYPERVISOR_update_va_mapping(
-+		(unsigned long)__va(pfn << PAGE_SHIFT),
-+		pfn_pte(pfn, flags), 0));
-+}
-+
-+static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
-+{
-+	pgd_t *pgd = pgd_base;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	int    g, u, m;
-+
-+	for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
-+		if (pgd_none(*pgd))
-+			continue;
-+		pud = pud_offset(pgd, 0);
-+		if (PTRS_PER_PUD > 1) /* not folded */
-+			pgd_walk_set_prot(pud,flags);
-+		for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
-+			if (pud_none(*pud))
-+				continue;
-+			pmd = pmd_offset(pud, 0);
-+			if (PTRS_PER_PMD > 1) /* not folded */
-+				pgd_walk_set_prot(pmd,flags);
-+			for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
-+				if (pmd_none(*pmd))
-+					continue;
-+				pte = pte_offset_kernel(pmd,0);
-+				pgd_walk_set_prot(pte,flags);
-+			}
-+		}
-+	}
-+
-+	BUG_ON(HYPERVISOR_update_va_mapping(
-+		(unsigned long)pgd_base,
-+		pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
-+		UVMF_TLB_FLUSH));
-+}
-+
-+static void __pgd_pin(pgd_t *pgd)
-+{
-+	pgd_walk(pgd, PAGE_KERNEL_RO);
-+	xen_pgd_pin(__pa(pgd));
-+	set_bit(PG_pinned, &virt_to_page(pgd)->flags);
-+}
-+
-+static void __pgd_unpin(pgd_t *pgd)
-+{
-+	xen_pgd_unpin(__pa(pgd));
-+	pgd_walk(pgd, PAGE_KERNEL);
-+	clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
-+}
-+
-+static void pgd_test_and_unpin(pgd_t *pgd)
-+{
-+	if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
-+		__pgd_unpin(pgd);
-+}
-+
-+void mm_pin(struct mm_struct *mm)
-+{
-+	spin_lock(&mm->page_table_lock);
-+	__pgd_pin(mm->pgd);
-+	spin_unlock(&mm->page_table_lock);
-+}
-+
-+void mm_unpin(struct mm_struct *mm)
-+{
-+	spin_lock(&mm->page_table_lock);
-+	__pgd_unpin(mm->pgd);
-+	spin_unlock(&mm->page_table_lock);
-+}
-+
-+void mm_pin_all(void)
-+{
-+	struct page *page;
-+	for (page = pgd_list; page; page = (struct page *)page->index) {
-+		if (!test_bit(PG_pinned, &page->flags))
-+			__pgd_pin((pgd_t *)page_address(page));
-+	}
-+}
-+
-+void _arch_exit_mmap(struct mm_struct *mm)
-+{
-+	struct task_struct *tsk = current;
-+
-+	task_lock(tsk);
-+
-+	/*
-+	 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
-+	 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
-+	 */
-+	if (tsk->active_mm == mm) {
-+		tsk->active_mm = &init_mm;
-+		atomic_inc(&init_mm.mm_count);
-+
-+		switch_mm(mm, &init_mm, tsk);
-+
-+		atomic_dec(&mm->mm_count);
-+		BUG_ON(atomic_read(&mm->mm_count) == 0);
-+	}
-+
-+	task_unlock(tsk);
-+
-+	if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
-+	    (atomic_read(&mm->mm_count) == 1))
-+		mm_unpin(mm);
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/pci/i386.c linux-2.6.12-xen/arch/xen/i386/pci/i386.c
---- pristine-linux-2.6.12/arch/xen/i386/pci/i386.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/pci/i386.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,304 @@
-+/*
-+ *	Low-Level PCI Access for i386 machines
-+ *
-+ * Copyright 1993, 1994 Drew Eckhardt
-+ *      Visionary Computing
-+ *      (Unix and Linux consulting and custom programming)
-+ *      Drew at Colorado.EDU
-+ *      +1 (303) 786-7975
-+ *
-+ * Drew's work was sponsored by:
-+ *	iX Multiuser Multitasking Magazine
-+ *	Hannover, Germany
-+ *	hm at ix.de
-+ *
-+ * Copyright 1997--2000 Martin Mares <mj at ucw.cz>
-+ *
-+ * For more information, please consult the following manuals (look at
-+ * http://www.pcisig.com/ for how to get them):
-+ *
-+ * PCI BIOS Specification
-+ * PCI Local Bus Specification
-+ * PCI to PCI Bridge Specification
-+ * PCI System Design Guide
-+ *
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include <linux/init.h>
-+#include <linux/ioport.h>
-+#include <linux/errno.h>
-+
-+#include "pci.h"
-+
-+/*
-+ * We need to avoid collisions with `mirrored' VGA ports
-+ * and other strange ISA hardware, so we always want the
-+ * addresses to be allocated in the 0x000-0x0ff region
-+ * modulo 0x400.
-+ *
-+ * Why? Because some silly external IO cards only decode
-+ * the low 10 bits of the IO address. The 0x00-0xff region
-+ * is reserved for motherboard devices that decode all 16
-+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
-+ * but we want to try to avoid allocating at 0x2900-0x2bff
-+ * which might have be mirrored at 0x0100-0x03ff..
-+ */
-+void
-+pcibios_align_resource(void *data, struct resource *res,
-+		       unsigned long size, unsigned long align)
-+{
-+	if (res->flags & IORESOURCE_IO) {
-+		unsigned long start = res->start;
-+
-+		if (start & 0x300) {
-+			start = (start + 0x3ff) & ~0x3ff;
-+			res->start = start;
-+		}
-+	}
-+}
-+
-+
-+/*
-+ *  Handle resources of PCI devices.  If the world were perfect, we could
-+ *  just allocate all the resource regions and do nothing more.  It isn't.
-+ *  On the other hand, we cannot just re-allocate all devices, as it would
-+ *  require us to know lots of host bridge internals.  So we attempt to
-+ *  keep as much of the original configuration as possible, but tweak it
-+ *  when it's found to be wrong.
-+ *
-+ *  Known BIOS problems we have to work around:
-+ *	- I/O or memory regions not configured
-+ *	- regions configured, but not enabled in the command register
-+ *	- bogus I/O addresses above 64K used
-+ *	- expansion ROMs left enabled (this may sound harmless, but given
-+ *	  the fact the PCI specs explicitly allow address decoders to be
-+ *	  shared between expansion ROMs and other resource regions, it's
-+ *	  at least dangerous)
-+ *
-+ *  Our solution:
-+ *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
-+ *	    This gives us fixed barriers on where we can allocate.
-+ *	(2) Allocate resources for all enabled devices.  If there is
-+ *	    a collision, just mark the resource as unallocated. Also
-+ *	    disable expansion ROMs during this step.
-+ *	(3) Try to allocate resources for disabled devices.  If the
-+ *	    resources were assigned correctly, everything goes well,
-+ *	    if they weren't, they won't disturb allocation of other
-+ *	    resources.
-+ *	(4) Assign new addresses to resources which were either
-+ *	    not configured at all or misconfigured.  If explicitly
-+ *	    requested by the user, configure expansion ROM address
-+ *	    as well.
-+ */
-+
-+static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
-+{
-+	struct pci_bus *bus;
-+	struct pci_dev *dev;
-+	int idx;
-+	struct resource *r, *pr;
-+
-+	/* Depth-First Search on bus tree */
-+	list_for_each_entry(bus, bus_list, node) {
-+		if ((dev = bus->self)) {
-+			for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
-+				r = &dev->resource[idx];
-+				if (!r->start)
-+					continue;
-+				pr = pci_find_parent_resource(dev, r);
-+				if (!pr || request_resource(pr, r) < 0)
-+					printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev));
-+			}
-+		}
-+		pcibios_allocate_bus_resources(&bus->children);
-+	}
-+}
-+
-+static void __init pcibios_allocate_resources(int pass)
-+{
-+	struct pci_dev *dev = NULL;
-+	int idx, disabled;
-+	u16 command;
-+	struct resource *r, *pr;
-+
-+	for_each_pci_dev(dev) {
-+		pci_read_config_word(dev, PCI_COMMAND, &command);
-+		for(idx = 0; idx < 6; idx++) {
-+			r = &dev->resource[idx];
-+			if (r->parent)		/* Already allocated */
-+				continue;
-+			if (!r->start)		/* Address not assigned at all */
-+				continue;
-+			if (r->flags & IORESOURCE_IO)
-+				disabled = !(command & PCI_COMMAND_IO);
-+			else
-+				disabled = !(command & PCI_COMMAND_MEMORY);
-+			if (pass == disabled) {
-+				DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
-+				    r->start, r->end, r->flags, disabled, pass);
-+				pr = pci_find_parent_resource(dev, r);
-+				if (!pr || request_resource(pr, r) < 0) {
-+					printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev));
-+					/* We'll assign a new address later */
-+					r->end -= r->start;
-+					r->start = 0;
-+				}
-+			}
-+		}
-+		if (!pass) {
-+			r = &dev->resource[PCI_ROM_RESOURCE];
-+			if (r->flags & IORESOURCE_ROM_ENABLE) {
-+				/* Turn the ROM off, leave the resource region, but keep it unregistered. */
-+				u32 reg;
-+				DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
-+				r->flags &= ~IORESOURCE_ROM_ENABLE;
-+				pci_read_config_dword(dev, dev->rom_base_reg, &reg);
-+				pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE);
-+			}
-+		}
-+	}
-+}
-+
-+static int __init pcibios_assign_resources(void)
-+{
-+	struct pci_dev *dev = NULL;
-+	int idx;
-+	struct resource *r;
-+
-+	for_each_pci_dev(dev) {
-+		int class = dev->class >> 8;
-+
-+		/* Don't touch classless devices and host bridges */
-+		if (!class || class == PCI_CLASS_BRIDGE_HOST)
-+			continue;
-+
-+		for(idx=0; idx<6; idx++) {
-+			r = &dev->resource[idx];
-+
-+			/*
-+			 *  Don't touch IDE controllers and I/O ports of video cards!
-+			 */
-+			if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) ||
-+			    (class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO)))
-+				continue;
-+
-+			/*
-+			 *  We shall assign a new address to this resource, either because
-+			 *  the BIOS forgot to do so or because we have decided the old
-+			 *  address was unusable for some reason.
-+			 */
-+			if (!r->start && r->end)
-+				pci_assign_resource(dev, idx);
-+		}
-+
-+		if (pci_probe & PCI_ASSIGN_ROMS) {
-+			r = &dev->resource[PCI_ROM_RESOURCE];
-+			r->end -= r->start;
-+			r->start = 0;
-+			if (r->end)
-+				pci_assign_resource(dev, PCI_ROM_RESOURCE);
-+		}
-+	}
-+	return 0;
-+}
-+
-+void __init pcibios_resource_survey(void)
-+{
-+	DBG("PCI: Allocating resources\n");
-+	pcibios_allocate_bus_resources(&pci_root_buses);
-+	pcibios_allocate_resources(0);
-+	pcibios_allocate_resources(1);
-+}
-+
-+/**
-+ * called in fs_initcall (one below subsys_initcall),
-+ * give a chance for motherboard reserve resources
-+ */
-+fs_initcall(pcibios_assign_resources);
-+
-+int pcibios_enable_resources(struct pci_dev *dev, int mask)
-+{
-+	u16 cmd, old_cmd;
-+	int idx;
-+	struct resource *r;
-+
-+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
-+	old_cmd = cmd;
-+	for(idx=0; idx<6; idx++) {
-+		/* Only set up the requested stuff */
-+		if (!(mask & (1<<idx)))
-+			continue;
-+
-+		r = &dev->resource[idx];
-+		if (!r->start && r->end) {
-+			printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
-+			return -EINVAL;
-+		}
-+		if (r->flags & IORESOURCE_IO)
-+			cmd |= PCI_COMMAND_IO;
-+		if (r->flags & IORESOURCE_MEM)
-+			cmd |= PCI_COMMAND_MEMORY;
-+	}
-+	if (dev->resource[PCI_ROM_RESOURCE].start)
-+		cmd |= PCI_COMMAND_MEMORY;
-+	if (cmd != old_cmd) {
-+		printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
-+		pci_write_config_word(dev, PCI_COMMAND, cmd);
-+	}
-+	return 0;
-+}
-+
-+/*
-+ *  If we set up a device for bus mastering, we need to check the latency
-+ *  timer as certain crappy BIOSes forget to set it properly.
-+ */
-+unsigned int pcibios_max_latency = 255;
-+
-+void pcibios_set_master(struct pci_dev *dev)
-+{
-+	u8 lat;
-+	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
-+	if (lat < 16)
-+		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
-+	else if (lat > pcibios_max_latency)
-+		lat = pcibios_max_latency;
-+	else
-+		return;
-+	printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
-+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
-+}
-+
-+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-+			enum pci_mmap_state mmap_state, int write_combine)
-+{
-+	unsigned long prot;
-+
-+	/* I/O space cannot be accessed via normal processor loads and
-+	 * stores on this platform.
-+	 */
-+	if (mmap_state == pci_mmap_io)
-+		return -EINVAL;
-+
-+	/* Leave vm_pgoff as-is, the PCI space address is the physical
-+	 * address on this platform.
-+	 */
-+	vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
-+
-+	prot = pgprot_val(vma->vm_page_prot);
-+	if (boot_cpu_data.x86 > 3)
-+		prot |= _PAGE_PCD | _PAGE_PWT;
-+	vma->vm_page_prot = __pgprot(prot);
-+
-+	/* Write-combine setting is ignored, it is changed via the mtrr
-+	 * interfaces on this platform.
-+	 */
-+	if (direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-+				   vma->vm_end - vma->vm_start,
-+				   vma->vm_page_prot, DOMID_IO))
-+		return -EAGAIN;
-+
-+	return 0;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/pci/irq.c linux-2.6.12-xen/arch/xen/i386/pci/irq.c
---- pristine-linux-2.6.12/arch/xen/i386/pci/irq.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/pci/irq.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1120 @@
-+/*
-+ *	Low-Level PCI Support for PC -- Routing of Interrupts
-+ *
-+ *	(c) 1999--2000 Martin Mares <mj at ucw.cz>
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/pci.h>
-+#include <linux/init.h>
-+#include <linux/slab.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/dmi.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/io_apic.h>
-+#include <asm/hw_irq.h>
-+#include <linux/acpi.h>
-+
-+#include "pci.h"
-+
-+#define PIRQ_SIGNATURE	(('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
-+#define PIRQ_VERSION 0x0100
-+
-+static int broken_hp_bios_irq9;
-+static int acer_tm360_irqrouting;
-+
-+static struct irq_routing_table *pirq_table;
-+
-+static int pirq_enable_irq(struct pci_dev *dev);
-+
-+/*
-+ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
-+ * Avoid using: 13, 14 and 15 (FP error and IDE).
-+ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
-+ */
-+unsigned int pcibios_irq_mask = 0xfff8;
-+
-+static int pirq_penalty[16] = {
-+	1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
-+	0, 0, 0, 0, 1000, 100000, 100000, 100000
-+};
-+
-+struct irq_router {
-+	char *name;
-+	u16 vendor, device;
-+	int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
-+	int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
-+};
-+
-+struct irq_router_handler {
-+	u16 vendor;
-+	int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
-+};
-+
-+int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
-+
-+/*
-+ *  Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
-+ */
-+
-+static struct irq_routing_table * __init pirq_find_routing_table(void)
-+{
-+	u8 *addr;
-+	struct irq_routing_table *rt;
-+	int i;
-+	u8 sum;
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+	for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
-+		rt = (struct irq_routing_table *) addr;
-+		if (rt->signature != PIRQ_SIGNATURE ||
-+		    rt->version != PIRQ_VERSION ||
-+		    rt->size % 16 ||
-+		    rt->size < sizeof(struct irq_routing_table))
-+			continue;
-+		sum = 0;
-+		for(i=0; i<rt->size; i++)
-+			sum += addr[i];
-+		if (!sum) {
-+			DBG("PCI: Interrupt Routing Table found at 0x%p\n", rt);
-+			return rt;
-+		}
-+	}
-+#endif
-+	
-+	return NULL;
-+}
-+
-+/*
-+ *  If we have a IRQ routing table, use it to search for peer host
-+ *  bridges.  It's a gross hack, but since there are no other known
-+ *  ways how to get a list of buses, we have to go this way.
-+ */
-+
-+static void __init pirq_peer_trick(void)
-+{
-+	struct irq_routing_table *rt = pirq_table;
-+	u8 busmap[256];
-+	int i;
-+	struct irq_info *e;
-+
-+	memset(busmap, 0, sizeof(busmap));
-+	for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
-+		e = &rt->slots[i];
-+#ifdef DEBUG
-+		{
-+			int j;
-+			DBG("%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
-+			for(j=0; j<4; j++)
-+				DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
-+			DBG("\n");
-+		}
-+#endif
-+		busmap[e->bus] = 1;
-+	}
-+	for(i = 1; i < 256; i++) {
-+		if (!busmap[i] || pci_find_bus(0, i))
-+			continue;
-+		if (pci_scan_bus(i, &pci_root_ops, NULL))
-+			printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
-+	}
-+	pcibios_last_bus = -1;
-+}
-+
-+/*
-+ *  Code for querying and setting of IRQ routes on various interrupt routers.
-+ */
-+
-+void eisa_set_level_irq(unsigned int irq)
-+{
-+	unsigned char mask = 1 << (irq & 7);
-+	unsigned int port = 0x4d0 + (irq >> 3);
-+	unsigned char val;
-+	static u16 eisa_irq_mask;
-+
-+	if (irq >= 16 || (1 << irq) & eisa_irq_mask)
-+		return;
-+
-+	eisa_irq_mask |= (1 << irq);
-+	printk("PCI: setting IRQ %u as level-triggered\n", irq);
-+	val = inb(port);
-+	if (!(val & mask)) {
-+		DBG(" -> edge");
-+		outb(val | mask, port);
-+	}
-+}
-+
-+/*
-+ * Common IRQ routing practice: nybbles in config space,
-+ * offset by some magic constant.
-+ */
-+static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
-+{
-+	u8 x;
-+	unsigned reg = offset + (nr >> 1);
-+
-+	pci_read_config_byte(router, reg, &x);
-+	return (nr & 1) ? (x >> 4) : (x & 0xf);
-+}
-+
-+static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
-+{
-+	u8 x;
-+	unsigned reg = offset + (nr >> 1);
-+
-+	pci_read_config_byte(router, reg, &x);
-+	x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
-+	pci_write_config_byte(router, reg, x);
-+}
-+
-+/*
-+ * ALI pirq entries are damn ugly, and completely undocumented.
-+ * This has been figured out from pirq tables, and it's not a pretty
-+ * picture.
-+ */
-+static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
-+
-+	return irqmap[read_config_nybble(router, 0x48, pirq-1)];
-+}
-+
-+static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	static unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
-+	unsigned int val = irqmap[irq];
-+		
-+	if (val) {
-+		write_config_nybble(router, 0x48, pirq-1, val);
-+		return 1;
-+	}
-+	return 0;
-+}
-+
-+/*
-+ * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
-+ * just a pointer to the config space.
-+ */
-+static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	u8 x;
-+
-+	pci_read_config_byte(router, pirq, &x);
-+	return (x < 16) ? x : 0;
-+}
-+
-+static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	pci_write_config_byte(router, pirq, irq);
-+	return 1;
-+}
-+
-+/*
-+ * The VIA pirq rules are nibble-based, like ALI,
-+ * but without the ugly irq number munging.
-+ * However, PIRQD is in the upper instead of lower 4 bits.
-+ */
-+static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
-+}
-+
-+static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
-+	return 1;
-+}
-+
-+/*
-+ * ITE 8330G pirq rules are nibble-based
-+ * FIXME: pirqmap may be { 1, 0, 3, 2 },
-+ * 	  2+3 are both mapped to irq 9 on my system
-+ */
-+static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
-+	return read_config_nybble(router,0x43, pirqmap[pirq-1]);
-+}
-+
-+static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
-+	write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
-+	return 1;
-+}
-+
-+/*
-+ * OPTI: high four bits are nibble pointer..
-+ * I wonder what the low bits do?
-+ */
-+static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	return read_config_nybble(router, 0xb8, pirq >> 4);
-+}
-+
-+static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	write_config_nybble(router, 0xb8, pirq >> 4, irq);
-+	return 1;
-+}
-+
-+/*
-+ * Cyrix: nibble offset 0x5C
-+ * 0x5C bits 7:4 is INTB bits 3:0 is INTA 
-+ * 0x5D bits 7:4 is INTD bits 3:0 is INTC
-+ */
-+static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	return read_config_nybble(router, 0x5C, (pirq-1)^1);
-+}
-+
-+static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
-+	return 1;
-+}
-+
-+/*
-+ *	PIRQ routing for SiS 85C503 router used in several SiS chipsets.
-+ *	We have to deal with the following issues here:
-+ *	- vendors have different ideas about the meaning of link values
-+ *	- some onboard devices (integrated in the chipset) have special
-+ *	  links and are thus routed differently (i.e. not via PCI INTA-INTD)
-+ *	- different revision of the router have a different layout for
-+ *	  the routing registers, particularly for the onchip devices
-+ *
-+ *	For all routing registers the common thing is we have one byte
-+ *	per routeable link which is defined as:
-+ *		 bit 7      IRQ mapping enabled (0) or disabled (1)
-+ *		 bits [6:4] reserved (sometimes used for onchip devices)
-+ *		 bits [3:0] IRQ to map to
-+ *		     allowed: 3-7, 9-12, 14-15
-+ *		     reserved: 0, 1, 2, 8, 13
-+ *
-+ *	The config-space registers located at 0x41/0x42/0x43/0x44 are
-+ *	always used to route the normal PCI INT A/B/C/D respectively.
-+ *	Apparently there are systems implementing PCI routing table using
-+ *	link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
-+ *	We try our best to handle both link mappings.
-+ *	
-+ *	Currently (2003-05-21) it appears most SiS chipsets follow the
-+ *	definition of routing registers from the SiS-5595 southbridge.
-+ *	According to the SiS 5595 datasheets the revision id's of the
-+ *	router (ISA-bridge) should be 0x01 or 0xb0.
-+ *
-+ *	Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
-+ *	Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
-+ *	They seem to work with the current routing code. However there is
-+ *	some concern because of the two USB-OHCI HCs (original SiS 5595
-+ *	had only one). YMMV.
-+ *
-+ *	Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
-+ *
-+ *	0x61:	IDEIRQ:
-+ *		bits [6:5] must be written 01
-+ *		bit 4 channel-select primary (0), secondary (1)
-+ *
-+ *	0x62:	USBIRQ:
-+ *		bit 6 OHCI function disabled (0), enabled (1)
-+ *	
-+ *	0x6a:	ACPI/SCI IRQ: bits 4-6 reserved
-+ *
-+ *	0x7e:	Data Acq. Module IRQ - bits 4-6 reserved
-+ *
-+ *	We support USBIRQ (in addition to INTA-INTD) and keep the
-+ *	IDE, ACPI and DAQ routing untouched as set by the BIOS.
-+ *
-+ *	Currently the only reported exception is the new SiS 65x chipset
-+ *	which includes the SiS 69x southbridge. Here we have the 85C503
-+ *	router revision 0x04 and there are changes in the register layout
-+ *	mostly related to the different USB HCs with USB 2.0 support.
-+ *
-+ *	Onchip routing for router rev-id 0x04 (try-and-error observation)
-+ *
-+ *	0x60/0x61/0x62/0x63:	1xEHCI and 3xOHCI (companion) USB-HCs
-+ *				bit 6-4 are probably unused, not like 5595
-+ */
-+
-+#define PIRQ_SIS_IRQ_MASK	0x0f
-+#define PIRQ_SIS_IRQ_DISABLE	0x80
-+#define PIRQ_SIS_USB_ENABLE	0x40
-+
-+static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	u8 x;
-+	int reg;
-+
-+	reg = pirq;
-+	if (reg >= 0x01 && reg <= 0x04)
-+		reg += 0x40;
-+	pci_read_config_byte(router, reg, &x);
-+	return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK);
-+}
-+
-+static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	u8 x;
-+	int reg;
-+
-+	reg = pirq;
-+	if (reg >= 0x01 && reg <= 0x04)
-+		reg += 0x40;
-+	pci_read_config_byte(router, reg, &x);
-+	x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE);
-+	x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE;
-+	pci_write_config_byte(router, reg, x);
-+	return 1;
-+}
-+
-+
-+/*
-+ * VLSI: nibble offset 0x74 - educated guess due to routing table and
-+ *       config space of VLSI 82C534 PCI-bridge/router (1004:0102)
-+ *       Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
-+ *       devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
-+ *       for the busbridge to the docking station.
-+ */
-+
-+static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	if (pirq > 8) {
-+		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
-+		return 0;
-+	}
-+	return read_config_nybble(router, 0x74, pirq-1);
-+}
-+
-+static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	if (pirq > 8) {
-+		printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
-+		return 0;
-+	}
-+	write_config_nybble(router, 0x74, pirq-1, irq);
-+	return 1;
-+}
-+
-+/*
-+ * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
-+ * and Redirect I/O registers (0x0c00 and 0x0c01).  The Index register
-+ * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a.  The Redirect
-+ * register is a straight binary coding of desired PIC IRQ (low nibble).
-+ *
-+ * The 'link' value in the PIRQ table is already in the correct format
-+ * for the Index register.  There are some special index values:
-+ * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
-+ * and 0x03 for SMBus.
-+ */
-+static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	outb_p(pirq, 0xc00);
-+	return inb(0xc01) & 0xf;
-+}
-+
-+static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	outb_p(pirq, 0xc00);
-+	outb_p(irq, 0xc01);
-+	return 1;
-+}
-+
-+/* Support for AMD756 PCI IRQ Routing
-+ * Jhon H. Caicedo <jhcaiced at osso.org.co>
-+ * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
-+ * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
-+ * The AMD756 pirq rules are nibble-based
-+ * offset 0x56 0-3 PIRQA  4-7  PIRQB
-+ * offset 0x57 0-3 PIRQC  4-7  PIRQD
-+ */
-+static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
-+{
-+	u8 irq;
-+	irq = 0;
-+	if (pirq <= 4)
-+	{
-+		irq = read_config_nybble(router, 0x56, pirq - 1);
-+	}
-+	printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
-+		dev->vendor, dev->device, pirq, irq);
-+	return irq;
-+}
-+
-+static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n", 
-+		dev->vendor, dev->device, pirq, irq);
-+	if (pirq <= 4)
-+	{
-+		write_config_nybble(router, 0x56, pirq - 1, irq);
-+	}
-+	return 1;
-+}
-+
-+#ifdef CONFIG_PCI_BIOS
-+
-+static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
-+{
-+	struct pci_dev *bridge;
-+	int pin = pci_get_interrupt_pin(dev, &bridge);
-+	return pcibios_set_irq_routing(bridge, pin, irq);
-+}
-+
-+#endif
-+
-+static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	static struct pci_device_id pirq_440gx[] = {
-+		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
-+		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
-+		{ },
-+	};
-+
-+	/* 440GX has a proprietary PIRQ router -- don't use it */
-+	if (pci_dev_present(pirq_440gx))
-+		return 0;
-+
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_INTEL_82371FB_0:
-+		case PCI_DEVICE_ID_INTEL_82371SB_0:
-+		case PCI_DEVICE_ID_INTEL_82371AB_0:
-+		case PCI_DEVICE_ID_INTEL_82371MX:
-+		case PCI_DEVICE_ID_INTEL_82443MX_0:
-+		case PCI_DEVICE_ID_INTEL_82801AA_0:
-+		case PCI_DEVICE_ID_INTEL_82801AB_0:
-+		case PCI_DEVICE_ID_INTEL_82801BA_0:
-+		case PCI_DEVICE_ID_INTEL_82801BA_10:
-+		case PCI_DEVICE_ID_INTEL_82801CA_0:
-+		case PCI_DEVICE_ID_INTEL_82801CA_12:
-+		case PCI_DEVICE_ID_INTEL_82801DB_0:
-+		case PCI_DEVICE_ID_INTEL_82801E_0:
-+		case PCI_DEVICE_ID_INTEL_82801EB_0:
-+		case PCI_DEVICE_ID_INTEL_ESB_1:
-+		case PCI_DEVICE_ID_INTEL_ICH6_0:
-+		case PCI_DEVICE_ID_INTEL_ICH6_1:
-+		case PCI_DEVICE_ID_INTEL_ICH7_0:
-+		case PCI_DEVICE_ID_INTEL_ICH7_1:
-+		case PCI_DEVICE_ID_INTEL_ICH7_30:
-+		case PCI_DEVICE_ID_INTEL_ICH7_31:
-+		case PCI_DEVICE_ID_INTEL_ESB2_0:
-+			r->name = "PIIX/ICH";
-+			r->get = pirq_piix_get;
-+			r->set = pirq_piix_set;
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	/* FIXME: We should move some of the quirk fixup stuff here */
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_VIA_82C586_0:
-+		case PCI_DEVICE_ID_VIA_82C596:
-+		case PCI_DEVICE_ID_VIA_82C686:
-+		case PCI_DEVICE_ID_VIA_8231:
-+		/* FIXME: add new ones for 8233/5 */
-+			r->name = "VIA";
-+			r->get = pirq_via_get;
-+			r->set = pirq_via_set;
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_VLSI_82C534:
-+			r->name = "VLSI 82C534";
-+			r->get = pirq_vlsi_get;
-+			r->set = pirq_vlsi_set;
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+
-+static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_SERVERWORKS_OSB4:
-+		case PCI_DEVICE_ID_SERVERWORKS_CSB5:
-+			r->name = "ServerWorks";
-+			r->get = pirq_serverworks_get;
-+			r->set = pirq_serverworks_set;
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	if (device != PCI_DEVICE_ID_SI_503)
-+		return 0;
-+		
-+	r->name = "SIS";
-+	r->get = pirq_sis_get;
-+	r->set = pirq_sis_set;
-+	return 1;
-+}
-+
-+static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_CYRIX_5520:
-+			r->name = "NatSemi";
-+			r->get = pirq_cyrix_get;
-+			r->set = pirq_cyrix_set;
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_OPTI_82C700:
-+			r->name = "OPTI";
-+			r->get = pirq_opti_get;
-+			r->set = pirq_opti_set;
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_ITE_IT8330G_0:
-+			r->name = "ITE";
-+			r->get = pirq_ite_get;
-+			r->set = pirq_ite_set;
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+	case PCI_DEVICE_ID_AL_M1533:
-+	case PCI_DEVICE_ID_AL_M1563:
-+		printk("PCI: Using ALI IRQ Router\n");
-+			r->name = "ALI";
-+			r->get = pirq_ali_get;
-+			r->set = pirq_ali_set;
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
-+{
-+	switch(device)
-+	{
-+		case PCI_DEVICE_ID_AMD_VIPER_740B:
-+			r->name = "AMD756";
-+			break;
-+		case PCI_DEVICE_ID_AMD_VIPER_7413:
-+			r->name = "AMD766";
-+			break;
-+		case PCI_DEVICE_ID_AMD_VIPER_7443:
-+			r->name = "AMD768";
-+			break;
-+		default:
-+			return 0;
-+	}
-+	r->get = pirq_amd756_get;
-+	r->set = pirq_amd756_set;
-+	return 1;
-+}
-+		
-+static __initdata struct irq_router_handler pirq_routers[] = {
-+	{ PCI_VENDOR_ID_INTEL, intel_router_probe },
-+	{ PCI_VENDOR_ID_AL, ali_router_probe },
-+	{ PCI_VENDOR_ID_ITE, ite_router_probe },
-+	{ PCI_VENDOR_ID_VIA, via_router_probe },
-+	{ PCI_VENDOR_ID_OPTI, opti_router_probe },
-+	{ PCI_VENDOR_ID_SI, sis_router_probe },
-+	{ PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
-+	{ PCI_VENDOR_ID_VLSI, vlsi_router_probe },
-+	{ PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
-+	{ PCI_VENDOR_ID_AMD, amd_router_probe },
-+	/* Someone with docs needs to add the ATI Radeon IGP */
-+	{ 0, NULL }
-+};
-+static struct irq_router pirq_router;
-+static struct pci_dev *pirq_router_dev;
-+
-+
-+/*
-+ *	FIXME: should we have an option to say "generic for
-+ *	chipset" ?
-+ */
-+ 
-+static void __init pirq_find_router(struct irq_router *r)
-+{
-+	struct irq_routing_table *rt = pirq_table;
-+	struct irq_router_handler *h;
-+
-+#ifdef CONFIG_PCI_BIOS
-+	if (!rt->signature) {
-+		printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
-+		r->set = pirq_bios_set;
-+		r->name = "BIOS";
-+		return;
-+	}
-+#endif
-+
-+	/* Default unless a driver reloads it */
-+	r->name = "default";
-+	r->get = NULL;
-+	r->set = NULL;
-+	
-+	DBG("PCI: Attempting to find IRQ router for %04x:%04x\n",
-+	    rt->rtr_vendor, rt->rtr_device);
-+
-+	pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
-+	if (!pirq_router_dev) {
-+		DBG("PCI: Interrupt router not found at %02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
-+		return;
-+	}
-+
-+	for( h = pirq_routers; h->vendor; h++) {
-+		/* First look for a router match */
-+		if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device))
-+			break;
-+		/* Fall back to a device match */
-+		if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device))
-+			break;
-+	}
-+	printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
-+		pirq_router.name,
-+		pirq_router_dev->vendor,
-+		pirq_router_dev->device,
-+		pci_name(pirq_router_dev));
-+}
-+
-+static struct irq_info *pirq_get_info(struct pci_dev *dev)
-+{
-+	struct irq_routing_table *rt = pirq_table;
-+	int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
-+	struct irq_info *info;
-+
-+	for (info = rt->slots; entries--; info++)
-+		if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
-+			return info;
-+	return NULL;
-+}
-+
-+static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
-+{
-+	u8 pin;
-+	struct irq_info *info;
-+	int i, pirq, newirq;
-+	int irq = 0;
-+	u32 mask;
-+	struct irq_router *r = &pirq_router;
-+	struct pci_dev *dev2 = NULL;
-+	char *msg = NULL;
-+
-+	/* Find IRQ pin */
-+	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+	if (!pin) {
-+		DBG(" -> no interrupt pin\n");
-+		return 0;
-+	}
-+	pin = pin - 1;
-+
-+	/* Find IRQ routing entry */
-+
-+	if (!pirq_table)
-+		return 0;
-+	
-+	DBG("IRQ for %s[%c]", pci_name(dev), 'A' + pin);
-+	info = pirq_get_info(dev);
-+	if (!info) {
-+		DBG(" -> not found in routing table\n");
-+		return 0;
-+	}
-+	pirq = info->irq[pin].link;
-+	mask = info->irq[pin].bitmap;
-+	if (!pirq) {
-+		DBG(" -> not routed\n");
-+		return 0;
-+	}
-+	DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
-+	mask &= pcibios_irq_mask;
-+
-+	/* Work around broken HP Pavilion Notebooks which assign USB to
-+	   IRQ 9 even though it is actually wired to IRQ 11 */
-+
-+	if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
-+		dev->irq = 11;
-+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
-+		r->set(pirq_router_dev, dev, pirq, 11);
-+	}
-+
-+	/* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
-+	if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) {
-+		pirq = 0x68;
-+		mask = 0x400;
-+		dev->irq = r->get(pirq_router_dev, dev, pirq);
-+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
-+	}
-+
-+	/*
-+	 * Find the best IRQ to assign: use the one
-+	 * reported by the device if possible.
-+	 */
-+	newirq = dev->irq;
-+	if (!((1 << newirq) & mask)) {
-+		if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
-+		else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev));
-+	}
-+	if (!newirq && assign) {
-+		for (i = 0; i < 16; i++) {
-+			if (!(mask & (1 << i)))
-+				continue;
-+			if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, SA_SHIRQ))
-+				newirq = i;
-+		}
-+	}
-+	DBG(" -> newirq=%d", newirq);
-+
-+	/* Check if it is hardcoded */
-+	if ((pirq & 0xf0) == 0xf0) {
-+		irq = pirq & 0xf;
-+		DBG(" -> hardcoded IRQ %d\n", irq);
-+		msg = "Hardcoded";
-+	} else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
-+	((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
-+		DBG(" -> got IRQ %d\n", irq);
-+		msg = "Found";
-+	} else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
-+		DBG(" -> assigning IRQ %d", newirq);
-+		if (r->set(pirq_router_dev, dev, pirq, newirq)) {
-+			eisa_set_level_irq(newirq);
-+			DBG(" ... OK\n");
-+			msg = "Assigned";
-+			irq = newirq;
-+		}
-+	}
-+
-+	if (!irq) {
-+		DBG(" ... failed\n");
-+		if (newirq && mask == (1 << newirq)) {
-+			msg = "Guessed";
-+			irq = newirq;
-+		} else
-+			return 0;
-+	}
-+	printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev));
-+
-+	/* Update IRQ for all devices with the same pirq value */
-+	while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
-+		pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
-+		if (!pin)
-+			continue;
-+		pin--;
-+		info = pirq_get_info(dev2);
-+		if (!info)
-+			continue;
-+		if (info->irq[pin].link == pirq) {
-+			/* We refuse to override the dev->irq information. Give a warning! */
-+		    	if ( dev2->irq && dev2->irq != irq && \
-+			(!(pci_probe & PCI_USE_PIRQ_MASK) || \
-+			((1 << dev2->irq) & mask)) ) {
-+#ifndef CONFIG_PCI_MSI
-+		    		printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
-+				       pci_name(dev2), dev2->irq, irq);
-+#endif
-+		    		continue;
-+		    	}
-+			dev2->irq = irq;
-+			pirq_penalty[irq]++;
-+			if (dev != dev2)
-+				printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2));
-+		}
-+	}
-+	return 1;
-+}
-+
-+static void __init pcibios_fixup_irqs(void)
-+{
-+	struct pci_dev *dev = NULL;
-+	u8 pin;
-+
-+	DBG("PCI: IRQ fixup\n");
-+	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-+		/*
-+		 * If the BIOS has set an out of range IRQ number, just ignore it.
-+		 * Also keep track of which IRQ's are already in use.
-+		 */
-+		if (dev->irq >= 16) {
-+			DBG("%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
-+			dev->irq = 0;
-+		}
-+		/* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
-+		if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
-+			pirq_penalty[dev->irq] = 0;
-+		pirq_penalty[dev->irq]++;
-+	}
-+
-+	dev = NULL;
-+	while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
-+		pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+#ifdef CONFIG_X86_IO_APIC
-+		/*
-+		 * Recalculate IRQ numbers if we use the I/O APIC.
-+		 */
-+		if (io_apic_assign_pci_irqs)
-+		{
-+			int irq;
-+
-+			if (pin) {
-+				pin--;		/* interrupt pins are numbered starting from 1 */
-+				irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
-+	/*
-+	 * Busses behind bridges are typically not listed in the MP-table.
-+	 * In this case we have to look up the IRQ based on the parent bus,
-+	 * parent slot, and pin number. The SMP code detects such bridged
-+	 * busses itself so we should get into this branch reliably.
-+	 */
-+				if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
-+					struct pci_dev * bridge = dev->bus->self;
-+
-+					pin = (pin + PCI_SLOT(dev->devfn)) % 4;
-+					irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 
-+							PCI_SLOT(bridge->devfn), pin);
-+					if (irq >= 0)
-+						printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
-+							pci_name(bridge), 'A' + pin, irq);
-+				}
-+				if (irq >= 0) {
-+					if (use_pci_vector() &&
-+						!platform_legacy_irq(irq))
-+						irq = IO_APIC_VECTOR(irq);
-+
-+					printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
-+						pci_name(dev), 'A' + pin, irq);
-+					dev->irq = irq;
-+				}
-+			}
-+		}
-+#endif
-+		/*
-+		 * Still no IRQ? Try to lookup one...
-+		 */
-+		if (pin && !dev->irq)
-+			pcibios_lookup_irq(dev, 0);
-+	}
-+}
-+
-+/*
-+ * Work around broken HP Pavilion Notebooks which assign USB to
-+ * IRQ 9 even though it is actually wired to IRQ 11
-+ */
-+static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
-+{
-+	if (!broken_hp_bios_irq9) {
-+		broken_hp_bios_irq9 = 1;
-+		printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
-+	}
-+	return 0;
-+}
-+
-+/*
-+ * Work around broken Acer TravelMate 360 Notebooks which assign
-+ * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
-+ */
-+static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
-+{
-+	if (!acer_tm360_irqrouting) {
-+		acer_tm360_irqrouting = 1;
-+		printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
-+	}
-+	return 0;
-+}
-+
-+static struct dmi_system_id __initdata pciirq_dmi_table[] = {
-+	{
-+		.callback = fix_broken_hp_bios_irq9,
-+		.ident = "HP Pavilion N5400 Series Laptop",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-+			DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
-+			DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"),
-+			DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
-+		},
-+	},
-+	{
-+		.callback = fix_acer_tm360_irqrouting,
-+		.ident = "Acer TravelMate 36x Laptop",
-+		.matches = {
-+			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-+			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
-+		},
-+	},
-+	{ }
-+};
-+
-+static int __init pcibios_irq_init(void)
-+{
-+	DBG("PCI: IRQ init\n");
-+
-+	if (pcibios_enable_irq || raw_pci_ops == NULL)
-+		return 0;
-+
-+	dmi_check_system(pciirq_dmi_table);
-+
-+	pirq_table = pirq_find_routing_table();
-+
-+#ifdef CONFIG_PCI_BIOS
-+	if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
-+		pirq_table = pcibios_get_irq_routing_table();
-+#endif
-+	if (pirq_table) {
-+		pirq_peer_trick();
-+		pirq_find_router(&pirq_router);
-+		if (pirq_table->exclusive_irqs) {
-+			int i;
-+			for (i=0; i<16; i++)
-+				if (!(pirq_table->exclusive_irqs & (1 << i)))
-+					pirq_penalty[i] += 100;
-+		}
-+		/* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
-+		if (io_apic_assign_pci_irqs)
-+			pirq_table = NULL;
-+	}
-+
-+	pcibios_enable_irq = pirq_enable_irq;
-+
-+	pcibios_fixup_irqs();
-+	return 0;
-+}
-+
-+subsys_initcall(pcibios_irq_init);
-+
-+
-+static void pirq_penalize_isa_irq(int irq)
-+{
-+	/*
-+	 *  If any ISAPnP device reports an IRQ in its list of possible
-+	 *  IRQ's, we try to avoid assigning it to PCI devices.
-+	 */
-+	if (irq < 16)
-+		pirq_penalty[irq] += 100;
-+}
-+
-+void pcibios_penalize_isa_irq(int irq)
-+{
-+#ifdef CONFIG_ACPI_PCI
-+	if (!acpi_noirq)
-+		acpi_penalize_isa_irq(irq);
-+	else
-+#endif
-+		pirq_penalize_isa_irq(irq);
-+}
-+
-+static int pirq_enable_irq(struct pci_dev *dev)
-+{
-+	u8 pin;
-+	struct pci_dev *temp_dev;
-+
-+	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-+	if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
-+		char *msg = "";
-+
-+		pin--;		/* interrupt pins are numbered starting from 1 */
-+
-+		if (io_apic_assign_pci_irqs) {
-+			int irq;
-+
-+			irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
-+			/*
-+			 * Busses behind bridges are typically not listed in the MP-table.
-+			 * In this case we have to look up the IRQ based on the parent bus,
-+			 * parent slot, and pin number. The SMP code detects such bridged
-+			 * busses itself so we should get into this branch reliably.
-+			 */
-+			temp_dev = dev;
-+			while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
-+				struct pci_dev * bridge = dev->bus->self;
-+
-+				pin = (pin + PCI_SLOT(dev->devfn)) % 4;
-+				irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 
-+						PCI_SLOT(bridge->devfn), pin);
-+				if (irq >= 0)
-+					printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
-+						pci_name(bridge), 'A' + pin, irq);
-+				dev = bridge;
-+			}
-+			dev = temp_dev;
-+			if (irq >= 0) {
-+#ifdef CONFIG_PCI_MSI
-+				if (!platform_legacy_irq(irq))
-+					irq = IO_APIC_VECTOR(irq);
-+#endif
-+				printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
-+					pci_name(dev), 'A' + pin, irq);
-+				dev->irq = irq;
-+				return 0;
-+			} else
-+				msg = " Probably buggy MP table.";
-+		} else if (pci_probe & PCI_BIOS_IRQ_SCAN)
-+			msg = "";
-+		else
-+			msg = " Please try using pci=biosirq.";
-+
-+		/* With IDE legacy devices the IRQ lookup failure is not a problem.. */
-+		if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5))
-+			return 0;
-+
-+		printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
-+		       'A' + pin, pci_name(dev), msg);
-+	}
-+	return 0;
-+}
-+
-+int pci_vector_resources(int last, int nr_released)
-+{
-+	int count = nr_released;
-+
-+	int next = last;
-+	int offset = (last % 8);
-+
-+	while (next < FIRST_SYSTEM_VECTOR) {
-+		next += 8;
-+#ifdef CONFIG_X86_64
-+		if (next == IA32_SYSCALL_VECTOR)
-+			continue;
-+#else
-+		if (next == SYSCALL_VECTOR)
-+			continue;
-+#endif
-+		count++;
-+		if (next >= FIRST_SYSTEM_VECTOR) {
-+			if (offset%8) {
-+				next = FIRST_DEVICE_VECTOR + offset;
-+				offset++;
-+				continue;
-+			}
-+			count--;
-+		}
-+	}
-+
-+	return count;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/i386/pci/Makefile linux-2.6.12-xen/arch/xen/i386/pci/Makefile
---- pristine-linux-2.6.12/arch/xen/i386/pci/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/i386/pci/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,33 @@
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+CFLAGS	+= -Iarch/$(XENARCH)/pci
-+
-+obj-y				:= i386.o
-+
-+#c-obj-$(CONFIG_PCI_BIOS)		+= pcbios.o
-+c-obj-$(CONFIG_PCI_MMCONFIG)	+= mmconfig.o
-+c-obj-$(CONFIG_PCI_DIRECT)	+= direct.o
-+
-+c-pci-y				:= fixup.o
-+c-pci-$(CONFIG_ACPI_PCI)	+= acpi.o
-+c-pci-y				+= legacy.o
-+# Make sure irq.o gets linked in after legacy.o
-+l-pci-y				+= irq.o
-+
-+c-pci-$(CONFIG_X86_VISWS)	:= visws.o fixup.o
-+pci-$(CONFIG_X86_VISWS)		:=
-+c-pci-$(CONFIG_X86_NUMAQ)	:= numa.o
-+l-pci-$(CONFIG_X86_NUMAQ)	:= irq.o
-+
-+obj-y				+= $(pci-y)
-+c-obj-y				+= $(c-pci-y) common.o
-+
-+c-link	:=
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-+	@ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
-+
-+# Make sure irq.o gets linked in before common.o
-+obj-y	+= $(patsubst common.o,$(l-pci-y) common.o,$(c-obj-y))
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
-diff -Nurp pristine-linux-2.6.12/arch/xen/Kconfig linux-2.6.12-xen/arch/xen/Kconfig
---- pristine-linux-2.6.12/arch/xen/Kconfig	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/Kconfig	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,212 @@
-+#
-+# For a description of the syntax of this configuration file,
-+# see Documentation/kbuild/kconfig-language.txt.
-+#
-+
-+mainmenu "Linux Kernel Configuration"
-+
-+config XEN
-+	bool
-+	default y
-+	help
-+	  This is the Linux Xen port.
-+
-+config ARCH_XEN
-+	bool
-+	default y
-+
-+
-+config NO_IDLE_HZ
-+	bool
-+	default y
-+
-+
-+menu "XEN"
-+
-+config XEN_PRIVILEGED_GUEST
-+	bool "Privileged Guest (domain 0)"
-+	default n
-+	select XEN_PHYSDEV_ACCESS
-+	help
-+	  Support for privileged operation (domain 0)
-+
-+config XEN_PHYSDEV_ACCESS
-+	bool "Physical device access"
-+	default XEN_PRIVILEGED_GUEST
-+	help
-+	  Assume access is available to physical hardware devices
-+	  (e.g., hard drives, network cards). This allows you to configure
-+	  such devices and also includes some low-level support that is
-+	  otherwise not compiled into the kernel.
-+
-+config XEN_BLKDEV_BACKEND
-+	bool "Block-device backend driver"
-+	depends on XEN_PHYSDEV_ACCESS
-+	default y
-+	help
-+	  The block-device backend driver allows the kernel to export its
-+	  block devices to other guests via a high-performance shared-memory
-+	  interface.
-+
-+config XEN_BLKDEV_TAP_BE
-+        bool "Block Tap support for backend driver (DANGEROUS)"
-+        depends on XEN_BLKDEV_BACKEND
-+        default n
-+        help
-+          If you intend to use the block tap driver, the backend domain will
-+          not know the domain id of the real frontend, and so will not be able
-+          to map its data pages.  This modifies the backend to attempt to map
-+          from both the tap domain and the real frontend.  This presents a
-+          security risk, and so should ONLY be used for development
-+          with the blktap.  This option will be removed as the block drivers are
-+          modified to use grant tables.
-+
-+config XEN_NETDEV_BACKEND
-+	bool "Network-device backend driver"
-+	depends on XEN_PHYSDEV_ACCESS
-+	default y
-+	help
-+	  The network-device backend driver allows the kernel to export its
-+	  network devices to other guests via a high-performance shared-memory
-+	  interface.
-+
-+config XEN_NETDEV_PIPELINED_TRANSMITTER
-+	bool "Pipelined transmitter (DANGEROUS)"
-+	depends on XEN_NETDEV_BACKEND
-+	default n
-+	help
-+	  If the net backend is a dumb domain, such as a transparent Ethernet
-+	  bridge with no local IP interface, it is safe to say Y here to get
-+	  slightly lower network overhead.
-+	  If the backend has a local IP interface; or may be doing smart things
-+	  like reassembling packets to perform firewall filtering; or if you
-+	  are unsure; or if you experience network hangs when this option is
-+	  enabled; then you must say N here.
-+
-+config XEN_TPMDEV_FRONTEND
-+        bool "TPM-device frontend driver"
-+        default n
-+	select TCG_TPM
-+	select TCG_XEN
-+        help
-+          The TPM-device frontend driver.
-+
-+config XEN_TPMDEV_BACKEND
-+        bool "TPM-device backend driver"
-+        default n
-+        help
-+          The TPM-device backend driver
-+
-+config XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
-+        bool "TPM backend closes upon vTPM failure"
-+        depends on XEN_TPMDEV_BACKEND
-+        default n
-+        help
-+          The TPM backend closes the channel if the vTPM in userspace indicates
-+          a failure. The corresponding domain's channel will be closed.
-+          Say Y if you want this feature.
-+
-+config XEN_BLKDEV_FRONTEND
-+	tristate "Block-device frontend driver"
-+	default y
-+	help
-+	  The block-device frontend driver allows the kernel to access block
-+	  devices mounted within another guest OS. Unless you are building a
-+	  dedicated device-driver domain, or your master control domain
-+	  (domain 0), then you almost certainly want to say Y here.
-+
-+config XEN_NETDEV_FRONTEND
-+	tristate "Network-device frontend driver"
-+	default y
-+	help
-+	  The network-device frontend driver allows the kernel to access
-+	  network interfaces within another guest OS. Unless you are building a
-+	  dedicated device-driver domain, or your master control domain
-+	  (domain 0), then you almost certainly want to say Y here.
-+
-+config XEN_BLKDEV_TAP
-+	bool "Block device tap driver"
-+	default n
-+	help
-+	  This driver allows a VM to interact on block device channels
-+	  to other VMs.  Block messages may be passed through or redirected
-+	  to a character device, allowing device prototyping in application
-+	  space.  Odds are that you want to say N here.
-+
-+config XEN_SHADOW_MODE
-+	bool "Fake shadow mode"
-+	default n
-+    help
-+      fakes out a shadow mode kernel
-+
-+
-+config XEN_SCRUB_PAGES
-+	bool "Scrub memory before freeing it to Xen"
-+	default y
-+	help
-+	  Erase memory contents before freeing it back to Xen's global
-+	  pool. This ensures that any secrets contained within that
-+	  memory (e.g., private keys) cannot be found by other guests that
-+	  may be running on the machine. Most people will want to say Y here.
-+	  If security is not a concern then you may increase performance by
-+	  saying N.
-+
-+choice
-+	prompt "Processor Type"
-+	default XEN_X86
-+
-+config XEN_X86
-+	bool "X86"
-+	help
-+	  Choose this option if your computer is a X86 architecture.
-+
-+config XEN_X86_64
-+	bool "X86_64"
-+	help
-+	  Choose this option if your computer is a X86_64 architecture.
-+
-+endchoice
-+
-+endmenu
-+
-+config HAVE_ARCH_ALLOC_SKB
-+	bool
-+	default y
-+
-+config HAVE_ARCH_DEV_ALLOC_SKB
-+	bool
-+	default y
-+
-+source "init/Kconfig"
-+
-+if XEN_X86
-+source "arch/xen/i386/Kconfig"
-+endif
-+
-+if XEN_X86_64
-+source "arch/xen/x86_64/Kconfig"
-+endif
-+
-+menu "Executable file formats"
-+
-+source "fs/Kconfig.binfmt"
-+
-+endmenu
-+
-+source "arch/xen/Kconfig.drivers"
-+
-+if XEN_PRIVILEGED_GUEST
-+menu "Power management options"
-+source "drivers/acpi/Kconfig"
-+endmenu
-+endif
-+
-+source "fs/Kconfig"
-+
-+source "security/Kconfig"
-+
-+source "crypto/Kconfig"
-+
-+source "lib/Kconfig"
-+
-+source "arch/xen/Kconfig.debug"
-diff -Nurp pristine-linux-2.6.12/arch/xen/Kconfig.debug linux-2.6.12-xen/arch/xen/Kconfig.debug
---- pristine-linux-2.6.12/arch/xen/Kconfig.debug	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/Kconfig.debug	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,129 @@
-+menu "Kernel hacking"
-+
-+source "lib/Kconfig.debug"
-+
-+# X86
-+config EARLY_PRINTK
-+	bool "Early printk" if EMBEDDED && DEBUG_KERNEL
-+	default y
-+	depends on X86
-+	help
-+	  Write kernel log output directly into the VGA buffer or to a serial
-+	  port.
-+
-+	  This is useful for kernel debugging when your machine crashes very
-+	  early before the console code is initialized. For normal operation
-+	  it is not recommended because it looks ugly and doesn't cooperate
-+	  with klogd/syslogd or the X server. You should normally N here,
-+	  unless you want to debug such a crash.
-+
-+config DEBUG_STACKOVERFLOW
-+	bool "Check for stack overflows"
-+	depends on DEBUG_KERNEL && X86
-+
-+config KPROBES
-+	bool "Kprobes"
-+	depends on DEBUG_KERNEL && X86
-+	help
-+	  Kprobes allows you to trap at almost any kernel address and
-+	  execute a callback function.  register_kprobe() establishes
-+	  a probepoint and specifies the callback.  Kprobes is useful
-+	  for kernel debugging, non-intrusive instrumentation and testing.
-+	  If in doubt, say "N".
-+
-+config DEBUG_STACK_USAGE
-+	bool "Stack utilization instrumentation"
-+	depends on DEBUG_KERNEL && X86
-+	help
-+	  Enables the display of the minimum amount of free stack which each
-+	  task has ever had available in the sysrq-T and sysrq-P debug output.
-+
-+	  This option will slow down process creation somewhat.
-+
-+comment "Page alloc debug is incompatible with Software Suspend on i386"
-+	depends on DEBUG_KERNEL && SOFTWARE_SUSPEND && X86
-+
-+config DEBUG_PAGEALLOC
-+	bool "Page alloc debugging"
-+	depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && X86
-+	help
-+	  Unmap pages from the kernel linear mapping after free_pages().
-+	  This results in a large slowdown, but helps to find certain types
-+	  of memory corruptions.
-+
-+config 4KSTACKS
-+	bool "Use 4Kb for kernel stacks instead of 8Kb"
-+	depends on DEBUG_KERNEL && X86
-+	help
-+	  If you say Y here the kernel will use a 4Kb stacksize for the
-+	  kernel stack attached to each process/thread. This facilitates
-+	  running more threads on a system and also reduces the pressure
-+	  on the VM subsystem for higher order allocations. This option
-+	  will also use IRQ stacks to compensate for the reduced stackspace.
-+
-+config X86_FIND_SMP_CONFIG
-+	bool
-+	depends on X86_LOCAL_APIC || X86_VOYAGER && X86
-+	default y
-+
-+config X86_MPPARSE
-+	bool
-+	depends on X86_LOCAL_APIC && !X86_VISWS && X86
-+	default y
-+
-+# X86_64
-+
-+# !SMP for now because the context switch early causes GPF in segment reloading
-+# and the GS base checking does the wrong thing then, causing a hang.
-+config CHECKING
-+	bool "Additional run-time checks"
-+	depends on DEBUG_KERNEL && !SMP && X86_64
-+	help
-+	  Enables some internal consistency checks for kernel debugging.
-+	  You should normally say N.
-+
-+config INIT_DEBUG
-+	bool "Debug __init statements"
-+	depends on DEBUG_KERNEL && X86_64
-+	help
-+	  Fill __init and __initdata at the end of boot. This helps debugging
-+	  illegal uses of __init and __initdata after initialization.
-+
-+config IOMMU_DEBUG
-+       depends on GART_IOMMU && DEBUG_KERNEL && X86_64
-+       bool "Enable IOMMU debugging"
-+       help
-+         Force the IOMMU to on even when you have less than 4GB of
-+	 memory and add debugging code. On overflow always panic. And
-+	 allow to enable IOMMU leak tracing. Can be disabled at boot
-+	 time with iommu=noforce. This will also enable scatter gather
-+	 list merging.  Currently not recommended for production
-+	 code. When you use it make sure you have a big enough
-+	 IOMMU/AGP aperture.  Most of the options enabled by this can
-+	 be set more finegrained using the iommu= command line
-+	 options. See Documentation/x86_64/boot-options.txt for more
-+	 details.
-+
-+config IOMMU_LEAK
-+       bool "IOMMU leak tracing"
-+       depends on DEBUG_KERNEL && X86_64
-+       depends on IOMMU_DEBUG
-+       help
-+         Add a simple leak tracer to the IOMMU code. This is useful when you
-+	 are debugging a buggy device driver that leaks IOMMU mappings.
-+
-+#config X86_REMOTE_DEBUG
-+#       bool "kgdb debugging stub"
-+
-+# X86 & X86_64
-+config KPROBES
-+	bool "Kprobes"
-+	depends on DEBUG_KERNEL
-+	help
-+	  Kprobes allows you to trap at almost any kernel address and
-+	  execute a callback function.  register_kprobe() establishes
-+	  a probepoint and specifies the callback.  Kprobes is useful
-+	  for kernel debugging, non-intrusive instrumentation and testing.
-+	  If in doubt, say "N".
-+
-+endmenu
-diff -Nurp pristine-linux-2.6.12/arch/xen/Kconfig.drivers linux-2.6.12-xen/arch/xen/Kconfig.drivers
---- pristine-linux-2.6.12/arch/xen/Kconfig.drivers	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/Kconfig.drivers	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,98 @@
-+# arch/xen/Kconfig.drivers
-+
-+menu "Device Drivers"
-+
-+source "drivers/base/Kconfig"
-+
-+if XEN_PHYSDEV_ACCESS
-+source "drivers/mtd/Kconfig"
-+source "drivers/parport/Kconfig"
-+source "drivers/pnp/Kconfig"
-+endif
-+
-+source "drivers/block/Kconfig"
-+
-+if XEN_PHYSDEV_ACCESS
-+source "drivers/ide/Kconfig"
-+endif
-+
-+source "drivers/scsi/Kconfig"
-+
-+if XEN_PHYSDEV_ACCESS
-+source "drivers/cdrom/Kconfig"
-+endif
-+
-+source "drivers/md/Kconfig"
-+
-+if XEN_PHYSDEV_ACCESS
-+source "drivers/message/fusion/Kconfig"
-+source "drivers/ieee1394/Kconfig"
-+source "drivers/message/i2o/Kconfig"
-+endif
-+
-+source "net/Kconfig"
-+
-+if XEN_PHYSDEV_ACCESS
-+source "drivers/isdn/Kconfig"
-+source "drivers/telephony/Kconfig"
-+source "drivers/input/Kconfig"
-+source "drivers/char/Kconfig"
-+source "drivers/i2c/Kconfig"
-+source "drivers/w1/Kconfig"
-+source "drivers/misc/Kconfig"
-+source "drivers/media/Kconfig"
-+source "drivers/video/Kconfig"
-+source "sound/Kconfig"
-+source "drivers/usb/Kconfig"
-+source "drivers/mmc/Kconfig"
-+source "drivers/infiniband/Kconfig"
-+endif
-+
-+if !XEN_PHYSDEV_ACCESS
-+source "drivers/char/tpm/Kconfig.domU"
-+endif
-+
-+if !XEN_PHYSDEV_ACCESS
-+
-+menu "Character devices"
-+
-+config UNIX98_PTYS
-+	bool
-+	default y
-+
-+config LEGACY_PTYS
-+	bool "Legacy (BSD) PTY support"
-+	default y
-+	---help---
-+	  A pseudo terminal (PTY) is a software device consisting of two
-+	  halves: a master and a slave. The slave device behaves identical to
-+	  a physical terminal; the master device is used by a process to
-+	  read data from and write data to the slave, thereby emulating a
-+	  terminal. Typical programs for the master side are telnet servers
-+	  and xterms.
-+
-+	  Linux has traditionally used the BSD-like names /dev/ptyxx
-+	  for masters and /dev/ttyxx for slaves of pseudo
-+	  terminals. This scheme has a number of problems, including
-+	  security.  This option enables these legacy devices; on most
-+	  systems, it is safe to say N.
-+
-+
-+config LEGACY_PTY_COUNT
-+	int "Maximum number of legacy PTY in use"
-+	depends on LEGACY_PTYS
-+	range 1 256
-+	default "256"
-+	---help---
-+	  The maximum number of legacy PTYs that can be used at any one time.
-+	  The default is 256, and should be more than enough.  Embedded
-+	  systems may want to reduce this to save memory.
-+
-+	  When not in use, each legacy PTY occupies 12 bytes on 32-bit
-+	  architectures and 24 bytes on 64-bit architectures.
-+
-+endmenu
-+
-+endif
-+
-+endmenu
-diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/evtchn.c linux-2.6.12-xen/arch/xen/kernel/evtchn.c
---- pristine-linux-2.6.12/arch/xen/kernel/evtchn.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/kernel/evtchn.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,820 @@
-+/******************************************************************************
-+ * evtchn.c
-+ * 
-+ * Communication via Xen event channels.
-+ * 
-+ * Copyright (c) 2002-2005, K A Fraser
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/irq.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/version.h>
-+#include <asm/atomic.h>
-+#include <asm/system.h>
-+#include <asm/ptrace.h>
-+#include <asm/synch_bitops.h>
-+#include <asm-xen/xen-public/event_channel.h>
-+#include <asm-xen/xen-public/physdev.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/evtchn.h>
-+#include <linux/mc146818rtc.h> /* RTC_IRQ */
-+
-+/*
-+ * This lock protects updates to the following mapping and reference-count
-+ * arrays. The lock does not need to be acquired to read the mapping tables.
-+ */
-+static spinlock_t irq_mapping_update_lock;
-+
-+/* IRQ <-> event-channel mappings. */
-+static int evtchn_to_irq[NR_EVENT_CHANNELS];
-+
-+/* Packed IRQ information: binding type, sub-type index, and event channel. */
-+static u32 irq_info[NR_IRQS];
-+/* Binding types. */
-+enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
-+/* Constructor for packed IRQ information. */
-+#define mk_irq_info(type, index, evtchn)				\
-+	(((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
-+/* Convenient shorthand for packed representation of an unbound IRQ. */
-+#define IRQ_UNBOUND	mk_irq_info(IRQT_UNBOUND, 0, 0)
-+/* Accessor macros for packed IRQ information. */
-+#define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
-+#define index_from_irq(irq)  ((u8)(irq_info[irq] >> 16))
-+#define type_from_irq(irq)   ((u8)(irq_info[irq] >> 24))
-+
-+/* IRQ <-> VIRQ mapping. */
-+DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
-+
-+/* IRQ <-> IPI mapping. */
-+#ifndef NR_IPIS
-+#define NR_IPIS 1 
-+#endif
-+DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
-+
-+/* Reference counts for bindings to IRQs. */
-+static int irq_bindcount[NR_IRQS];
-+
-+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
-+static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
-+
-+#ifdef CONFIG_SMP
-+
-+static u8 cpu_evtchn[NR_EVENT_CHANNELS];
-+static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
-+
-+#define active_evtchns(cpu,sh,idx)		\
-+	((sh)->evtchn_pending[idx] &		\
-+	 cpu_evtchn_mask[cpu][idx] &		\
-+	 ~(sh)->evtchn_mask[idx])
-+
-+static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
-+{
-+	clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
-+	set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
-+	cpu_evtchn[chn] = cpu;
-+}
-+
-+static void init_evtchn_cpu_bindings(void)
-+{
-+	/* By default all event channels notify CPU#0. */
-+	memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
-+	memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
-+}
-+
-+#define cpu_from_evtchn(evtchn)		(cpu_evtchn[evtchn])
-+
-+#else
-+
-+#define active_evtchns(cpu,sh,idx)		\
-+	((sh)->evtchn_pending[idx] &		\
-+	 ~(sh)->evtchn_mask[idx])
-+#define bind_evtchn_to_cpu(chn,cpu)	((void)0)
-+#define init_evtchn_cpu_bindings()	((void)0)
-+#define cpu_from_evtchn(evtchn)		(0)
-+
-+#endif
-+
-+/* Upcall to generic IRQ layer. */
-+#ifdef CONFIG_X86
-+extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
-+#if defined (__i386__)
-+#define IRQ_REG orig_eax
-+#elif defined (__x86_64__)
-+#define IRQ_REG orig_rax
-+#endif
-+#define do_IRQ(irq, regs) do {			\
-+	(regs)->IRQ_REG = (irq);		\
-+	do_IRQ((regs));				\
-+} while (0)
-+#endif
-+
-+/* Xen will never allocate port zero for any purpose. */
-+#define VALID_EVTCHN(chn)	((chn) != 0)
-+
-+/*
-+ * Force a proper event-channel callback from Xen after clearing the
-+ * callback mask. We do this in a very simple manner, by making a call
-+ * down into Xen. The pending flag will be checked by Xen on return.
-+ */
-+void force_evtchn_callback(void)
-+{
-+	(void)HYPERVISOR_xen_version(0, NULL);
-+}
-+EXPORT_SYMBOL(force_evtchn_callback);
-+
-+/* NB. Interrupts are disabled on entry. */
-+asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
-+{
-+	unsigned long  l1, l2;
-+	unsigned int   l1i, l2i, port;
-+	int            irq, cpu = smp_processor_id();
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	vcpu_info_t   *vcpu_info = &s->vcpu_info[cpu];
-+
-+	vcpu_info->evtchn_upcall_pending = 0;
-+
-+	/* NB. No need for a barrier here -- XCHG is a barrier on x86. */
-+	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
-+	while (l1 != 0) {
-+		l1i = __ffs(l1);
-+		l1 &= ~(1UL << l1i);
-+
-+		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
-+			l2i = __ffs(l2);
-+
-+			port = (l1i * BITS_PER_LONG) + l2i;
-+			if ((irq = evtchn_to_irq[port]) != -1)
-+				do_IRQ(irq, regs);
-+			else
-+				evtchn_device_upcall(port);
-+		}
-+	}
-+}
-+EXPORT_SYMBOL(evtchn_do_upcall);
-+
-+static int find_unbound_irq(void)
-+{
-+	int irq;
-+
-+	for (irq = 0; irq < NR_IRQS; irq++)
-+		if (irq_bindcount[irq] == 0)
-+			break;
-+
-+	if (irq == NR_IRQS)
-+		panic("No available IRQ to bind to: increase NR_IRQS!\n");
-+
-+	return irq;
-+}
-+
-+static int bind_evtchn_to_irq(unsigned int evtchn)
-+{
-+	int irq;
-+
-+	spin_lock(&irq_mapping_update_lock);
-+
-+	if ((irq = evtchn_to_irq[evtchn]) == -1) {
-+		irq = find_unbound_irq();
-+		evtchn_to_irq[evtchn] = irq;
-+		irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
-+	}
-+
-+	irq_bindcount[irq]++;
-+
-+	spin_unlock(&irq_mapping_update_lock);
-+    
-+	return irq;
-+}
-+
-+static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
-+{
-+	evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq };
-+	int evtchn, irq;
-+
-+	spin_lock(&irq_mapping_update_lock);
-+
-+	if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
-+		op.u.bind_virq.virq = virq;
-+		op.u.bind_virq.vcpu = cpu;
-+		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+		evtchn = op.u.bind_virq.port;
-+
-+		irq = find_unbound_irq();
-+		evtchn_to_irq[evtchn] = irq;
-+		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-+
-+		per_cpu(virq_to_irq, cpu)[virq] = irq;
-+
-+		bind_evtchn_to_cpu(evtchn, cpu);
-+	}
-+
-+	irq_bindcount[irq]++;
-+
-+	spin_unlock(&irq_mapping_update_lock);
-+    
-+	return irq;
-+}
-+
-+static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
-+{
-+	evtchn_op_t op = { .cmd = EVTCHNOP_bind_ipi };
-+	int evtchn, irq;
-+
-+	spin_lock(&irq_mapping_update_lock);
-+
-+	if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
-+		op.u.bind_ipi.vcpu = cpu;
-+		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+		evtchn = op.u.bind_ipi.port;
-+
-+		irq = find_unbound_irq();
-+		evtchn_to_irq[evtchn] = irq;
-+		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-+
-+		per_cpu(ipi_to_irq, cpu)[ipi] = irq;
-+
-+		bind_evtchn_to_cpu(evtchn, cpu);
-+	}
-+
-+	irq_bindcount[irq]++;
-+
-+	spin_unlock(&irq_mapping_update_lock);
-+
-+	return irq;
-+}
-+
-+static void unbind_from_irq(unsigned int irq)
-+{
-+	evtchn_op_t op = { .cmd = EVTCHNOP_close };
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	spin_lock(&irq_mapping_update_lock);
-+
-+	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
-+		op.u.close.port = evtchn;
-+		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+
-+		switch (type_from_irq(irq)) {
-+		case IRQT_VIRQ:
-+			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
-+				[index_from_irq(irq)] = -1;
-+			break;
-+		case IRQT_IPI:
-+			per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
-+				[index_from_irq(irq)] = -1;
-+			break;
-+		default:
-+			break;
-+		}
-+
-+		/* Closed ports are implicitly re-bound to VCPU0. */
-+		bind_evtchn_to_cpu(evtchn, 0);
-+
-+		evtchn_to_irq[evtchn] = -1;
-+		irq_info[irq] = IRQ_UNBOUND;
-+	}
-+
-+	spin_unlock(&irq_mapping_update_lock);
-+}
-+
-+int bind_evtchn_to_irqhandler(
-+	unsigned int evtchn,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id)
-+{
-+	unsigned int irq;
-+	int retval;
-+
-+	irq = bind_evtchn_to_irq(evtchn);
-+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+	if (retval != 0) {
-+		unbind_from_irq(irq);
-+		return retval;
-+	}
-+
-+	return irq;
-+}
-+EXPORT_SYMBOL(bind_evtchn_to_irqhandler);
-+
-+int bind_virq_to_irqhandler(
-+	unsigned int virq,
-+	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id)
-+{
-+	unsigned int irq;
-+	int retval;
-+
-+	irq = bind_virq_to_irq(virq, cpu);
-+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+	if (retval != 0) {
-+		unbind_from_irq(irq);
-+		return retval;
-+	}
-+
-+	return irq;
-+}
-+EXPORT_SYMBOL(bind_virq_to_irqhandler);
-+
-+int bind_ipi_to_irqhandler(
-+	unsigned int ipi,
-+	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id)
-+{
-+	unsigned int irq;
-+	int retval;
-+
-+	irq = bind_ipi_to_irq(ipi, cpu);
-+	retval = request_irq(irq, handler, irqflags, devname, dev_id);
-+	if (retval != 0) {
-+		unbind_from_irq(irq);
-+		return retval;
-+	}
-+
-+	return irq;
-+}
-+EXPORT_SYMBOL(bind_ipi_to_irqhandler);
-+
-+void unbind_from_irqhandler(unsigned int irq, void *dev_id)
-+{
-+	free_irq(irq, dev_id);
-+	unbind_from_irq(irq);
-+}
-+EXPORT_SYMBOL(unbind_from_irqhandler);
-+
-+#ifdef CONFIG_SMP
-+static void do_nothing_function(void *ign)
-+{
-+}
-+#endif
-+
-+/* Rebind an evtchn so that it gets delivered to a specific cpu */
-+static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
-+{
-+	evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
-+	int evtchn;
-+
-+	spin_lock(&irq_mapping_update_lock);
-+
-+	evtchn = evtchn_from_irq(irq);
-+	if (!VALID_EVTCHN(evtchn)) {
-+		spin_unlock(&irq_mapping_update_lock);
-+		return;
-+	}
-+
-+	/* Send future instances of this interrupt to other vcpu. */
-+	op.u.bind_vcpu.port = evtchn;
-+	op.u.bind_vcpu.vcpu = tcpu;
-+
-+	/*
-+	 * If this fails, it usually just indicates that we're dealing with a 
-+	 * virq or IPI channel, which don't actually need to be rebound. Ignore
-+	 * it, but don't do the xenlinux-level rebind in that case.
-+	 */
-+	if (HYPERVISOR_event_channel_op(&op) >= 0)
-+		bind_evtchn_to_cpu(evtchn, tcpu);
-+
-+	spin_unlock(&irq_mapping_update_lock);
-+
-+	/*
-+	 * Now send the new target processor a NOP IPI. When this returns, it
-+	 * will check for any pending interrupts, and so service any that got 
-+	 * delivered to the wrong processor by mistake.
-+	 * 
-+	 * XXX: The only time this is called with interrupts disabled is from
-+	 * the hotplug/hotunplug path. In that case, all cpus are stopped with 
-+	 * interrupts disabled, and the missed interrupts will be picked up
-+	 * when they start again. This is kind of a hack.
-+	 */
-+	if (!irqs_disabled())
-+		smp_call_function(do_nothing_function, NULL, 0, 0);
-+}
-+
-+
-+static void set_affinity_irq(unsigned irq, cpumask_t dest)
-+{
-+	unsigned tcpu = first_cpu(dest);
-+	rebind_irq_to_cpu(irq, tcpu);
-+}
-+
-+/*
-+ * Interface to generic handling in irq.c
-+ */
-+
-+static unsigned int startup_dynirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn))
-+		unmask_evtchn(evtchn);
-+	return 0;
-+}
-+
-+static void shutdown_dynirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn))
-+		mask_evtchn(evtchn);
-+}
-+
-+static void enable_dynirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn))
-+		unmask_evtchn(evtchn);
-+}
-+
-+static void disable_dynirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn))
-+		mask_evtchn(evtchn);
-+}
-+
-+static void ack_dynirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn)) {
-+		mask_evtchn(evtchn);
-+		clear_evtchn(evtchn);
-+	}
-+}
-+
-+static void end_dynirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
-+		unmask_evtchn(evtchn);
-+}
-+
-+static struct hw_interrupt_type dynirq_type = {
-+	"Dynamic-irq",
-+	startup_dynirq,
-+	shutdown_dynirq,
-+	enable_dynirq,
-+	disable_dynirq,
-+	ack_dynirq,
-+	end_dynirq,
-+	set_affinity_irq
-+};
-+
-+static inline void pirq_unmask_notify(int pirq)
-+{
-+	physdev_op_t op;
-+	if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
-+		op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
-+		(void)HYPERVISOR_physdev_op(&op);
-+	}
-+}
-+
-+static inline void pirq_query_unmask(int pirq)
-+{
-+	physdev_op_t op;
-+	op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
-+	op.u.irq_status_query.irq = pirq;
-+	(void)HYPERVISOR_physdev_op(&op);
-+	clear_bit(pirq, &pirq_needs_unmask_notify[0]);
-+	if (op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY)
-+		set_bit(pirq, &pirq_needs_unmask_notify[0]);
-+}
-+
-+/*
-+ * On startup, if there is no action associated with the IRQ then we are
-+ * probing. In this case we should not share with others as it will confuse us.
-+ */
-+#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
-+
-+static unsigned int startup_pirq(unsigned int irq)
-+{
-+	evtchn_op_t op = { .cmd = EVTCHNOP_bind_pirq };
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn))
-+		goto out;
-+
-+	op.u.bind_pirq.pirq  = irq;
-+	/* NB. We are happy to share unless we are probing. */
-+	op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
-+	if (HYPERVISOR_event_channel_op(&op) != 0) {
-+		if ( !probing_irq(irq) )
-+			printk(KERN_INFO "Failed to obtain physical "
-+			       "IRQ %d\n", irq);
-+		return 0;
-+	}
-+	evtchn = op.u.bind_pirq.port;
-+
-+	pirq_query_unmask(irq_to_pirq(irq));
-+
-+	bind_evtchn_to_cpu(evtchn, 0);
-+	evtchn_to_irq[evtchn] = irq;
-+	irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
-+
-+ out:
-+	unmask_evtchn(evtchn);
-+	pirq_unmask_notify(irq_to_pirq(irq));
-+
-+	return 0;
-+}
-+
-+static void shutdown_pirq(unsigned int irq)
-+{
-+	evtchn_op_t op = { .cmd = EVTCHNOP_close };
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (!VALID_EVTCHN(evtchn))
-+		return;
-+
-+	mask_evtchn(evtchn);
-+
-+	op.u.close.port = evtchn;
-+	BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+
-+	bind_evtchn_to_cpu(evtchn, 0);
-+	evtchn_to_irq[evtchn] = -1;
-+	irq_info[irq] = IRQ_UNBOUND;
-+}
-+
-+static void enable_pirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn)) {
-+		unmask_evtchn(evtchn);
-+		pirq_unmask_notify(irq_to_pirq(irq));
-+	}
-+}
-+
-+static void disable_pirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn))
-+		mask_evtchn(evtchn);
-+}
-+
-+static void ack_pirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn)) {
-+		mask_evtchn(evtchn);
-+		clear_evtchn(evtchn);
-+	}
-+}
-+
-+static void end_pirq(unsigned int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
-+		unmask_evtchn(evtchn);
-+		pirq_unmask_notify(irq_to_pirq(irq));
-+	}
-+}
-+
-+static struct hw_interrupt_type pirq_type = {
-+	"Phys-irq",
-+	startup_pirq,
-+	shutdown_pirq,
-+	enable_pirq,
-+	disable_pirq,
-+	ack_pirq,
-+	end_pirq,
-+	set_affinity_irq
-+};
-+
-+void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
-+{
-+	int evtchn = evtchn_from_irq(i);
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	if (!VALID_EVTCHN(evtchn))
-+		return;
-+	BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
-+	synch_set_bit(evtchn, &s->evtchn_pending[0]);
-+}
-+
-+void notify_remote_via_irq(int irq)
-+{
-+	int evtchn = evtchn_from_irq(irq);
-+
-+	if (VALID_EVTCHN(evtchn))
-+		notify_remote_via_evtchn(evtchn);
-+}
-+EXPORT_SYMBOL(notify_remote_via_irq);
-+
-+void mask_evtchn(int port)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	synch_set_bit(port, &s->evtchn_mask[0]);
-+}
-+EXPORT_SYMBOL(mask_evtchn);
-+
-+void unmask_evtchn(int port)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	unsigned int cpu = smp_processor_id();
-+	vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
-+
-+	/* Slow path (hypercall) if this is a non-local port. */
-+	if (unlikely(cpu != cpu_from_evtchn(port))) {
-+		evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
-+				   .u.unmask.port = port };
-+		(void)HYPERVISOR_event_channel_op(&op);
-+		return;
-+	}
-+
-+	synch_clear_bit(port, &s->evtchn_mask[0]);
-+
-+	/*
-+	 * The following is basically the equivalent of 'hw_resend_irq'. Just
-+	 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
-+	 * masked.
-+	 */
-+	if (synch_test_bit(port, &s->evtchn_pending[0]) && 
-+	    !synch_test_and_set_bit(port / BITS_PER_LONG,
-+				    &vcpu_info->evtchn_pending_sel)) {
-+		vcpu_info->evtchn_upcall_pending = 1;
-+		if (!vcpu_info->evtchn_upcall_mask)
-+			force_evtchn_callback();
-+	}
-+}
-+EXPORT_SYMBOL(unmask_evtchn);
-+
-+void irq_resume(void)
-+{
-+	evtchn_op_t op;
-+	int         cpu, pirq, virq, ipi, irq, evtchn;
-+
-+	init_evtchn_cpu_bindings();
-+
-+	/* New event-channel space is not 'live' yet. */
-+	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
-+		mask_evtchn(evtchn);
-+
-+	/* Check that no PIRQs are still bound. */
-+	for (pirq = 0; pirq < NR_PIRQS; pirq++)
-+		BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
-+
-+	/* Secondary CPUs must have no VIRQ or IPI bindings. */
-+	for (cpu = 1; cpu < NR_CPUS; cpu++) {
-+		for (virq = 0; virq < NR_VIRQS; virq++)
-+			BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
-+		for (ipi = 0; ipi < NR_IPIS; ipi++)
-+			BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
-+	}
-+
-+	/* No IRQ <-> event-channel mappings. */
-+	for (irq = 0; irq < NR_IRQS; irq++)
-+		irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
-+	for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
-+		evtchn_to_irq[evtchn] = -1;
-+
-+	/* Primary CPU: rebind VIRQs automatically. */
-+	for (virq = 0; virq < NR_VIRQS; virq++) {
-+		if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
-+			continue;
-+
-+		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
-+
-+		/* Get a new binding from Xen. */
-+		memset(&op, 0, sizeof(op));
-+		op.cmd              = EVTCHNOP_bind_virq;
-+		op.u.bind_virq.virq = virq;
-+		op.u.bind_virq.vcpu = 0;
-+		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+		evtchn = op.u.bind_virq.port;
-+        
-+		/* Record the new mapping. */
-+		evtchn_to_irq[evtchn] = irq;
-+		irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
-+
-+		/* Ready for use. */
-+		unmask_evtchn(evtchn);
-+	}
-+
-+	/* Primary CPU: rebind IPIs automatically. */
-+	for (ipi = 0; ipi < NR_IPIS; ipi++) {
-+		if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
-+			continue;
-+
-+		BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
-+
-+		/* Get a new binding from Xen. */
-+		memset(&op, 0, sizeof(op));
-+		op.cmd = EVTCHNOP_bind_ipi;
-+		op.u.bind_ipi.vcpu = 0;
-+		BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
-+		evtchn = op.u.bind_ipi.port;
-+        
-+		/* Record the new mapping. */
-+		evtchn_to_irq[evtchn] = irq;
-+		irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-+
-+		/* Ready for use. */
-+		unmask_evtchn(evtchn);
-+	}
-+}
-+
-+void __init init_IRQ(void)
-+{
-+	int i;
-+	int cpu;
-+
-+	irq_ctx_init(0);
-+
-+	spin_lock_init(&irq_mapping_update_lock);
-+
-+	init_evtchn_cpu_bindings();
-+
-+	/* No VIRQ or IPI bindings. */
-+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+		for (i = 0; i < NR_VIRQS; i++)
-+			per_cpu(virq_to_irq, cpu)[i] = -1;
-+		for (i = 0; i < NR_IPIS; i++)
-+			per_cpu(ipi_to_irq, cpu)[i] = -1;
-+	}
-+
-+	/* No event-channel -> IRQ mappings. */
-+	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
-+		evtchn_to_irq[i] = -1;
-+		mask_evtchn(i); /* No event channels are 'live' right now. */
-+	}
-+
-+	/* No IRQ -> event-channel mappings. */
-+	for (i = 0; i < NR_IRQS; i++)
-+		irq_info[i] = IRQ_UNBOUND;
-+
-+	/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-+	for (i = 0; i < NR_DYNIRQS; i++) {
-+		irq_bindcount[dynirq_to_irq(i)] = 0;
-+
-+		irq_desc[dynirq_to_irq(i)].status  = IRQ_DISABLED;
-+		irq_desc[dynirq_to_irq(i)].action  = NULL;
-+		irq_desc[dynirq_to_irq(i)].depth   = 1;
-+		irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
-+	}
-+
-+	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
-+	for (i = 0; i < NR_PIRQS; i++)
-+	{
-+		irq_bindcount[pirq_to_irq(i)] = 1;
-+
-+#ifdef RTC_IRQ
-+		/* If not domain 0, force our RTC driver to fail its probe. */
-+		if ((i == RTC_IRQ) &&
-+		    !(xen_start_info->flags & SIF_INITDOMAIN))
-+			continue;
-+#endif
-+
-+		irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
-+		irq_desc[pirq_to_irq(i)].action  = NULL;
-+		irq_desc[pirq_to_irq(i)].depth   = 1;
-+		irq_desc[pirq_to_irq(i)].handler = &pirq_type;
-+	}
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/features.c linux-2.6.12-xen/arch/xen/kernel/features.c
---- pristine-linux-2.6.12/arch/xen/kernel/features.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/kernel/features.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,38 @@
-+/******************************************************************************
-+ * features.c
-+ *
-+ * Xen feature flags.
-+ *
-+ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
-+ */
-+#include <linux/types.h>
-+#include <linux/cache.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/features.h>
-+
-+/* When we rebase to a more recent Linux we can use __read_mostly here. */
-+unsigned long xen_features[XENFEAT_NR_SUBMAPS] __cacheline_aligned;
-+
-+void setup_xen_features(void)
-+{
-+	uint32_t *flags = (uint32_t *)&xen_features[0];
-+	xen_feature_info_t fi;
-+	int i;
-+
-+	for (i=0; i<XENFEAT_NR_SUBMAPS; i++) {
-+		fi.submap_idx = i;
-+		if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
-+			break;
-+		flags[i] = fi.submap;
-+	}
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/gnttab.c linux-2.6.12-xen/arch/xen/kernel/gnttab.c
---- pristine-linux-2.6.12/arch/xen/kernel/gnttab.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/kernel/gnttab.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,411 @@
-+/******************************************************************************
-+ * gnttab.c
-+ * 
-+ * Granting foreign access to our memory reservation.
-+ * 
-+ * Copyright (c) 2005, Christopher Clark
-+ * Copyright (c) 2004-2005, K A Fraser
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <asm/pgtable.h>
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm/fixmap.h>
-+#include <asm/uaccess.h>
-+#include <asm-xen/linux-public/privcmd.h>
-+#include <asm-xen/gnttab.h>
-+#include <asm/synch_bitops.h>
-+
-+#if 1
-+#define ASSERT(_p)							      \
-+	if ( !(_p) ) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
-+	#_p , __LINE__, __FILE__); *(int*)0=0; }
-+#else
-+#define ASSERT(_p) ((void)0)
-+#endif
-+
-+#define WPRINTK(fmt, args...)				\
-+	printk(KERN_WARNING "xen_grant: " fmt, ##args)
-+
-+
-+EXPORT_SYMBOL(gnttab_grant_foreign_access);
-+EXPORT_SYMBOL(gnttab_end_foreign_access_ref);
-+EXPORT_SYMBOL(gnttab_end_foreign_access);
-+EXPORT_SYMBOL(gnttab_query_foreign_access);
-+EXPORT_SYMBOL(gnttab_grant_foreign_transfer);
-+EXPORT_SYMBOL(gnttab_end_foreign_transfer_ref);
-+EXPORT_SYMBOL(gnttab_end_foreign_transfer);
-+EXPORT_SYMBOL(gnttab_alloc_grant_references);
-+EXPORT_SYMBOL(gnttab_free_grant_references);
-+EXPORT_SYMBOL(gnttab_free_grant_reference);
-+EXPORT_SYMBOL(gnttab_claim_grant_reference);
-+EXPORT_SYMBOL(gnttab_release_grant_reference);
-+EXPORT_SYMBOL(gnttab_request_free_callback);
-+EXPORT_SYMBOL(gnttab_grant_foreign_access_ref);
-+EXPORT_SYMBOL(gnttab_grant_foreign_transfer_ref);
-+
-+/* External tools reserve first few grant table entries. */
-+#define NR_RESERVED_ENTRIES 8
-+
-+#define NR_GRANT_ENTRIES (NR_GRANT_FRAMES * PAGE_SIZE / sizeof(grant_entry_t))
-+#define GNTTAB_LIST_END (NR_GRANT_ENTRIES + 1)
-+
-+static grant_ref_t gnttab_list[NR_GRANT_ENTRIES];
-+static int gnttab_free_count;
-+static grant_ref_t gnttab_free_head;
-+static spinlock_t gnttab_list_lock = SPIN_LOCK_UNLOCKED;
-+
-+static grant_entry_t *shared;
-+
-+static struct gnttab_free_callback *gnttab_free_callback_list = NULL;
-+
-+static int
-+get_free_entries(int count)
-+{
-+	unsigned long flags;
-+	int ref;
-+	grant_ref_t head;
-+	spin_lock_irqsave(&gnttab_list_lock, flags);
-+	if (gnttab_free_count < count) {
-+		spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+		return -1;
-+	}
-+	ref = head = gnttab_free_head;
-+	gnttab_free_count -= count;
-+	while (count-- > 1)
-+		head = gnttab_list[head];
-+	gnttab_free_head = gnttab_list[head];
-+	gnttab_list[head] = GNTTAB_LIST_END;
-+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+	return ref;
-+}
-+
-+#define get_free_entry() get_free_entries(1)
-+
-+static void
-+do_free_callbacks(void)
-+{
-+	struct gnttab_free_callback *callback, *next;
-+
-+	callback = gnttab_free_callback_list;
-+	gnttab_free_callback_list = NULL;
-+
-+	while (callback != NULL) {
-+		next = callback->next;
-+		if (gnttab_free_count >= callback->count) {
-+			callback->next = NULL;
-+			callback->fn(callback->arg);
-+		} else {
-+			callback->next = gnttab_free_callback_list;
-+			gnttab_free_callback_list = callback;
-+		}
-+		callback = next;
-+	}
-+}
-+
-+static inline void
-+check_free_callbacks(void)
-+{
-+	if (unlikely(gnttab_free_callback_list))
-+		do_free_callbacks();
-+}
-+
-+static void
-+put_free_entry(grant_ref_t ref)
-+{
-+	unsigned long flags;
-+	spin_lock_irqsave(&gnttab_list_lock, flags);
-+	gnttab_list[ref] = gnttab_free_head;
-+	gnttab_free_head = ref;
-+	gnttab_free_count++;
-+	check_free_callbacks();
-+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+
-+/*
-+ * Public grant-issuing interface functions
-+ */
-+
-+int
-+gnttab_grant_foreign_access(domid_t domid, unsigned long frame, int readonly)
-+{
-+	int ref;
-+    
-+	if (unlikely((ref = get_free_entry()) == -1))
-+		return -ENOSPC;
-+
-+	shared[ref].frame = frame;
-+	shared[ref].domid = domid;
-+	wmb();
-+	shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
-+
-+	return ref;
-+}
-+
-+void
-+gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
-+				unsigned long frame, int readonly)
-+{
-+	shared[ref].frame = frame;
-+	shared[ref].domid = domid;
-+	wmb();
-+	shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
-+}
-+
-+
-+int
-+gnttab_query_foreign_access(grant_ref_t ref)
-+{
-+	u16 nflags;
-+
-+	nflags = shared[ref].flags;
-+
-+	return (nflags & (GTF_reading|GTF_writing));
-+}
-+
-+int
-+gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
-+{
-+	u16 flags, nflags;
-+
-+	nflags = shared[ref].flags;
-+	do {
-+		if ( (flags = nflags) & (GTF_reading|GTF_writing) ) {
-+			printk(KERN_ALERT "WARNING: g.e. still in use!\n");
-+			return 0;
-+		}
-+	}
-+	while ((nflags = synch_cmpxchg(&shared[ref].flags, flags, 0)) !=
-+	       flags);
-+
-+	return 1;
-+}
-+
-+void
-+gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page)
-+{
-+	if (gnttab_end_foreign_access_ref(ref, readonly)) {
-+		put_free_entry(ref);
-+		if (page != 0) {
-+			free_page(page);
-+		}
-+	}
-+	else {
-+		/* XXX This needs to be fixed so that the ref and page are
-+		   placed on a list to be freed up later. */
-+		printk(KERN_WARNING
-+		       "WARNING: leaking g.e. and page still in use!\n");
-+	}
-+}
-+
-+int
-+gnttab_grant_foreign_transfer(domid_t domid)
-+{
-+	int ref;
-+
-+	if (unlikely((ref = get_free_entry()) == -1))
-+		return -ENOSPC;
-+
-+	shared[ref].frame = 0;
-+	shared[ref].domid = domid;
-+	wmb();
-+	shared[ref].flags = GTF_accept_transfer;
-+
-+	return ref;
-+}
-+
-+void
-+gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid)
-+{
-+	shared[ref].frame = 0;
-+	shared[ref].domid = domid;
-+	wmb();
-+	shared[ref].flags = GTF_accept_transfer;
-+}
-+
-+unsigned long
-+gnttab_end_foreign_transfer_ref(grant_ref_t ref)
-+{
-+	unsigned long frame;
-+	u16           flags;
-+
-+	/*
-+         * If a transfer is not even yet started, try to reclaim the grant
-+         * reference and return failure (== 0).
-+         */
-+	while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
-+		if ( synch_cmpxchg(&shared[ref].flags, flags, 0) == flags )
-+			return 0;
-+		cpu_relax();
-+	}
-+
-+	/* If a transfer is in progress then wait until it is completed. */
-+	while (!(flags & GTF_transfer_completed)) {
-+		flags = shared[ref].flags;
-+		cpu_relax();
-+	}
-+
-+	/* Read the frame number /after/ reading completion status. */
-+	rmb();
-+	frame = shared[ref].frame;
-+	BUG_ON(frame == 0);
-+
-+	return frame;
-+}
-+
-+unsigned long
-+gnttab_end_foreign_transfer(grant_ref_t ref)
-+{
-+	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
-+	put_free_entry(ref);
-+	return frame;
-+}
-+
-+void
-+gnttab_free_grant_reference(grant_ref_t ref)
-+{
-+
-+	put_free_entry(ref);
-+}
-+
-+void
-+gnttab_free_grant_references(grant_ref_t head)
-+{
-+	grant_ref_t ref;
-+	unsigned long flags;
-+	int count = 1;
-+	if (head == GNTTAB_LIST_END)
-+		return;
-+	spin_lock_irqsave(&gnttab_list_lock, flags);
-+	ref = head;
-+	while (gnttab_list[ref] != GNTTAB_LIST_END) {
-+		ref = gnttab_list[ref];
-+		count++;
-+	}
-+	gnttab_list[ref] = gnttab_free_head;
-+	gnttab_free_head = head;
-+	gnttab_free_count += count;
-+	check_free_callbacks();
-+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+
-+int
-+gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
-+{
-+	int h = get_free_entries(count);
-+
-+	if (h == -1)
-+		return -ENOSPC;
-+
-+	*head = h;
-+
-+	return 0;
-+}
-+
-+int
-+gnttab_claim_grant_reference(grant_ref_t *private_head)
-+{
-+	grant_ref_t g = *private_head;
-+	if (unlikely(g == GNTTAB_LIST_END))
-+		return -ENOSPC;
-+	*private_head = gnttab_list[g];
-+	return g;
-+}
-+
-+void
-+gnttab_release_grant_reference(grant_ref_t *private_head, grant_ref_t  release)
-+{
-+	gnttab_list[release] = *private_head;
-+	*private_head = release;
-+}
-+
-+void
-+gnttab_request_free_callback(struct gnttab_free_callback *callback,
-+			     void (*fn)(void *), void *arg, u16 count)
-+{
-+	unsigned long flags;
-+	spin_lock_irqsave(&gnttab_list_lock, flags);
-+	if (callback->next)
-+		goto out;
-+	callback->fn = fn;
-+	callback->arg = arg;
-+	callback->count = count;
-+	callback->next = gnttab_free_callback_list;
-+	gnttab_free_callback_list = callback;
-+	check_free_callbacks();
-+ out:
-+	spin_unlock_irqrestore(&gnttab_list_lock, flags);
-+}
-+
-+int
-+gnttab_resume(void)
-+{
-+	gnttab_setup_table_t setup;
-+	unsigned long        frames[NR_GRANT_FRAMES];
-+	int                  i;
-+
-+	setup.dom        = DOMID_SELF;
-+	setup.nr_frames  = NR_GRANT_FRAMES;
-+	setup.frame_list = frames;
-+
-+	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
-+	BUG_ON(setup.status != 0);
-+
-+#ifdef __ia64__
-+	shared = __va(frames[0] << PAGE_SHIFT);
-+	printk("grant table at %p\n", shared);
-+#else
-+	for (i = 0; i < NR_GRANT_FRAMES; i++)
-+		set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
-+#endif
-+
-+	return 0;
-+}
-+
-+int
-+gnttab_suspend(void)
-+{
-+	int i;
-+
-+	for (i = 0; i < NR_GRANT_FRAMES; i++)
-+		clear_fixmap(FIX_GNTTAB_END - i);
-+
-+	return 0;
-+}
-+
-+static int __init
-+gnttab_init(void)
-+{
-+	int i;
-+
-+	if (xen_init() < 0)
-+		return -ENODEV;
-+
-+	BUG_ON(gnttab_resume());
-+
-+#ifndef __ia64__
-+	shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
-+#endif
-+
-+	for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
-+		gnttab_list[i] = i + 1;
-+	gnttab_free_count = NR_GRANT_ENTRIES - NR_RESERVED_ENTRIES;
-+	gnttab_free_head  = NR_RESERVED_ENTRIES;
-+
-+	printk("Grant table initialized\n");
-+	return 0;
-+}
-+
-+core_initcall(gnttab_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/Makefile linux-2.6.12-xen/arch/xen/kernel/Makefile
---- pristine-linux-2.6.12/arch/xen/kernel/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/kernel/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,18 @@
-+#
-+# Makefile for the linux kernel.
-+#
-+
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+CPPFLAGS_vmlinux.lds += -U$(XENARCH)
-+
-+$(obj)/vmlinux.lds.S:
-+	@ln -fsn $(srctree)/arch/$(XENARCH)/kernel/vmlinux.lds.S $@
-+
-+extra-y += vmlinux.lds
-+
-+obj-y   := evtchn.o reboot.o gnttab.o features.o
-+
-+obj-$(CONFIG_PROC_FS) += xen_proc.o
-+obj-$(CONFIG_NET)     += skbuff.o
-+obj-$(CONFIG_SMP)     += smpboot.o
-diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/reboot.c linux-2.6.12-xen/arch/xen/kernel/reboot.c
---- pristine-linux-2.6.12/arch/xen/kernel/reboot.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/kernel/reboot.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,430 @@
-+#define __KERNEL_SYSCALLS__
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/unistd.h>
-+#include <linux/module.h>
-+#include <linux/reboot.h>
-+#include <linux/sysrq.h>
-+#include <linux/stringify.h>
-+#include <asm/irq.h>
-+#include <asm/mmu_context.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/xen-public/dom0_ops.h>
-+#include <asm-xen/xenbus.h>
-+#include <linux/cpu.h>
-+#include <linux/kthread.h>
-+#include <asm-xen/xencons.h>
-+
-+#if defined(__i386__) || defined(__x86_64__)
-+/*
-+ * Power off function, if any
-+ */
-+void (*pm_power_off)(void);
-+#endif
-+
-+#define SHUTDOWN_INVALID  -1
-+#define SHUTDOWN_POWEROFF  0
-+#define SHUTDOWN_REBOOT    1
-+#define SHUTDOWN_SUSPEND   2
-+// Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
-+// report a crash, not be instructed to crash!
-+// HALT is the same as POWEROFF, as far as we're concerned.  The tools use
-+// the distinction when we return the reason code to them.
-+#define SHUTDOWN_HALT      4
-+
-+void machine_restart(char * __unused)
-+{
-+	/* We really want to get pending console data out before we die. */
-+	xencons_force_flush();
-+	HYPERVISOR_sched_op(SCHEDOP_shutdown, SHUTDOWN_reboot);
-+}
-+
-+void machine_halt(void)
-+{
-+	machine_power_off();
-+}
-+
-+void machine_power_off(void)
-+{
-+	/* We really want to get pending console data out before we die. */
-+	xencons_force_flush();
-+	HYPERVISOR_sched_op(SCHEDOP_shutdown, SHUTDOWN_poweroff);
-+}
-+
-+int reboot_thru_bios = 0;	/* for dmi_scan.c */
-+EXPORT_SYMBOL(machine_restart);
-+EXPORT_SYMBOL(machine_halt);
-+EXPORT_SYMBOL(machine_power_off);
-+
-+
-+/******************************************************************************
-+ * Stop/pickle callback handling.
-+ */
-+
-+/* Ignore multiple shutdown requests. */
-+static int shutting_down = SHUTDOWN_INVALID;
-+static void __shutdown_handler(void *unused);
-+static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
-+
-+#ifndef CONFIG_HOTPLUG_CPU
-+#define cpu_down(x) (-EOPNOTSUPP)
-+#define cpu_up(x) (-EOPNOTSUPP)
-+#endif
-+
-+
-+static int __do_suspend(void *ignore)
-+{
-+	int i, j, k, fpp;
-+
-+	extern int gnttab_suspend(void);
-+	extern int gnttab_resume(void);
-+
-+	extern void time_resume(void);
-+	extern unsigned long max_pfn;
-+	extern unsigned long *pfn_to_mfn_frame_list_list;
-+	extern unsigned long *pfn_to_mfn_frame_list[];
-+
-+#ifdef CONFIG_SMP
-+	cpumask_t prev_online_cpus;
-+	int vcpu_prepare(int vcpu);
-+#endif
-+
-+	int err = 0;
-+
-+	BUG_ON(smp_processor_id() != 0);
-+	BUG_ON(in_interrupt());
-+
-+#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
-+	if (num_online_cpus() > 1) {
-+		printk(KERN_WARNING "Can't suspend SMP guests "
-+		       "without CONFIG_HOTPLUG_CPU\n");
-+		return -EOPNOTSUPP;
-+	}
-+#endif
-+
-+	xenbus_suspend();
-+
-+	lock_cpu_hotplug();
-+#ifdef CONFIG_SMP
-+	/*
-+	 * Take all other CPUs offline. We hold the hotplug semaphore to
-+	 * avoid other processes bringing up CPUs under our feet.
-+	 */
-+	cpus_clear(prev_online_cpus);
-+	while (num_online_cpus() > 1) {
-+		for_each_online_cpu(i) {
-+			if (i == 0)
-+				continue;
-+			unlock_cpu_hotplug();
-+			err = cpu_down(i);
-+			lock_cpu_hotplug();
-+			if (err != 0) {
-+				printk(KERN_CRIT "Failed to take all CPUs "
-+				       "down: %d.\n", err);
-+				goto out_reenable_cpus;
-+			}
-+			cpu_set(i, prev_online_cpus);
-+		}
-+	}
-+#endif
-+
-+	preempt_disable();
-+
-+#ifdef __i386__
-+	kmem_cache_shrink(pgd_cache);
-+	mm_pin_all();
-+#endif
-+
-+	__cli();
-+	preempt_enable();
-+	unlock_cpu_hotplug();
-+
-+	gnttab_suspend();
-+
-+	HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+	clear_fixmap(FIX_SHARED_INFO);
-+
-+	xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
-+	xen_start_info->console_mfn = mfn_to_pfn(xen_start_info->console_mfn);
-+
-+	/*
-+	 * We'll stop somewhere inside this hypercall. When it returns,
-+	 * we'll start resuming after the restore.
-+	 */
-+	HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
-+
-+	shutting_down = SHUTDOWN_INVALID; 
-+
-+	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+
-+	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+
-+	memset(empty_zero_page, 0, PAGE_SIZE);
-+	     
-+	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+		virt_to_mfn(pfn_to_mfn_frame_list_list);
-+  
-+	fpp = PAGE_SIZE/sizeof(unsigned long);
-+	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
-+		if ((j % fpp) == 0) {
-+			k++;
-+			pfn_to_mfn_frame_list_list[k] = 
-+				virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+			j = 0;
-+		}
-+		pfn_to_mfn_frame_list[k][j] = 
-+			virt_to_mfn(&phys_to_machine_mapping[i]);
-+	}
-+	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
-+
-+	gnttab_resume();
-+
-+	irq_resume();
-+
-+	time_resume();
-+
-+	__sti();
-+
-+	xencons_resume();
-+
-+#ifdef CONFIG_SMP
-+	for_each_cpu(i)
-+		vcpu_prepare(i);
-+
-+#endif
-+
-+	/* 
-+	 * Only resume xenbus /after/ we've prepared our VCPUs; otherwise
-+	 * the VCPU hotplug callback can race with our vcpu_prepare
-+	 */
-+	xenbus_resume();
-+
-+#ifdef CONFIG_SMP
-+ out_reenable_cpus:
-+	for_each_cpu_mask(i, prev_online_cpus) {
-+		j = cpu_up(i);
-+		if ((j != 0) && !cpu_online(i)) {
-+			printk(KERN_CRIT "Failed to bring cpu "
-+			       "%d back up (%d).\n",
-+			       i, j);
-+			err = j;
-+		}
-+	}
-+#endif
-+
-+	return err;
-+}
-+
-+static int shutdown_process(void *__unused)
-+{
-+	static char *envp[] = { "HOME=/", "TERM=linux", 
-+				"PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
-+	static char *restart_argv[]  = { "/sbin/reboot", NULL };
-+	static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
-+
-+	extern asmlinkage long sys_reboot(int magic1, int magic2,
-+					  unsigned int cmd, void *arg);
-+
-+	daemonize("shutdown");
-+
-+	switch (shutting_down) {
-+	case SHUTDOWN_POWEROFF:
-+	case SHUTDOWN_HALT:
-+		if (execve("/sbin/poweroff", poweroff_argv, envp) < 0) {
-+			sys_reboot(LINUX_REBOOT_MAGIC1,
-+				   LINUX_REBOOT_MAGIC2,
-+				   LINUX_REBOOT_CMD_POWER_OFF,
-+				   NULL);
-+		}
-+		break;
-+
-+	case SHUTDOWN_REBOOT:
-+		if (execve("/sbin/reboot", restart_argv, envp) < 0) {
-+			sys_reboot(LINUX_REBOOT_MAGIC1,
-+				   LINUX_REBOOT_MAGIC2,
-+				   LINUX_REBOOT_CMD_RESTART,
-+				   NULL);
-+		}
-+		break;
-+	}
-+
-+	shutting_down = SHUTDOWN_INVALID; /* could try again */
-+
-+	return 0;
-+}
-+
-+static int kthread_create_on_cpu(int (*f)(void *arg),
-+				 void *arg,
-+				 const char *name,
-+				 int cpu)
-+{
-+	struct task_struct *p;
-+	p = kthread_create(f, arg, name);
-+	if (IS_ERR(p))
-+		return PTR_ERR(p);
-+	kthread_bind(p, cpu);
-+	wake_up_process(p);
-+	return 0;
-+}
-+
-+static void __shutdown_handler(void *unused)
-+{
-+	int err;
-+
-+	if (shutting_down != SHUTDOWN_SUSPEND)
-+		err = kernel_thread(shutdown_process, NULL,
-+				    CLONE_FS | CLONE_FILES);
-+	else
-+		err = kthread_create_on_cpu(__do_suspend, NULL, "suspend", 0);
-+
-+	if ( err < 0 ) {
-+		printk(KERN_WARNING "Error creating shutdown process (%d): "
-+		       "retrying...\n", -err);
-+		schedule_delayed_work(&shutdown_work, HZ/2);
-+	}
-+}
-+
-+static void shutdown_handler(struct xenbus_watch *watch,
-+			     const char **vec, unsigned int len)
-+{
-+	char *str;
-+	xenbus_transaction_t xbt;
-+	int err;
-+
-+	if (shutting_down != SHUTDOWN_INVALID)
-+		return;
-+
-+ again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err)
-+		return;
-+	str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
-+	/* Ignore read errors and empty reads. */
-+	if (XENBUS_IS_ERR_READ(str)) {
-+		xenbus_transaction_end(xbt, 1);
-+		return;
-+	}
-+
-+	xenbus_write(xbt, "control", "shutdown", "");
-+
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err == -EAGAIN) {
-+		kfree(str);
-+		goto again;
-+	}
-+
-+	if (strcmp(str, "poweroff") == 0)
-+		shutting_down = SHUTDOWN_POWEROFF;
-+	else if (strcmp(str, "reboot") == 0)
-+		shutting_down = SHUTDOWN_REBOOT;
-+	else if (strcmp(str, "suspend") == 0)
-+		shutting_down = SHUTDOWN_SUSPEND;
-+	else if (strcmp(str, "halt") == 0)
-+		shutting_down = SHUTDOWN_HALT;
-+	else {
-+		printk("Ignoring shutdown request: %s\n", str);
-+		shutting_down = SHUTDOWN_INVALID;
-+	}
-+
-+	if (shutting_down != SHUTDOWN_INVALID)
-+		schedule_work(&shutdown_work);
-+
-+	kfree(str);
-+}
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
-+			  unsigned int len)
-+{
-+	char sysrq_key = '\0';
-+	xenbus_transaction_t xbt;
-+	int err;
-+
-+ again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err)
-+		return;
-+	if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
-+		printk(KERN_ERR "Unable to read sysrq code in "
-+		       "control/sysrq\n");
-+		xenbus_transaction_end(xbt, 1);
-+		return;
-+	}
-+
-+	if (sysrq_key != '\0')
-+		xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
-+
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err == -EAGAIN)
-+		goto again;
-+
-+	if (sysrq_key != '\0') {
-+		handle_sysrq(sysrq_key, NULL, NULL);
-+	}
-+}
-+#endif
-+
-+static struct xenbus_watch shutdown_watch = {
-+	.node = "control/shutdown",
-+	.callback = shutdown_handler
-+};
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+static struct xenbus_watch sysrq_watch = {
-+	.node ="control/sysrq",
-+	.callback = sysrq_handler
-+};
-+#endif
-+
-+static struct notifier_block xenstore_notifier;
-+
-+static int setup_shutdown_watcher(struct notifier_block *notifier,
-+                                  unsigned long event,
-+                                  void *data)
-+{
-+	int err1 = 0;
-+#ifdef CONFIG_MAGIC_SYSRQ
-+	int err2 = 0;
-+#endif
-+
-+	err1 = register_xenbus_watch(&shutdown_watch);
-+#ifdef CONFIG_MAGIC_SYSRQ
-+	err2 = register_xenbus_watch(&sysrq_watch);
-+#endif
-+
-+	if (err1) {
-+		printk(KERN_ERR "Failed to set shutdown watcher\n");
-+	}
-+    
-+#ifdef CONFIG_MAGIC_SYSRQ
-+	if (err2) {
-+		printk(KERN_ERR "Failed to set sysrq watcher\n");
-+	}
-+#endif
-+
-+	return NOTIFY_DONE;
-+}
-+
-+static int __init setup_shutdown_event(void)
-+{
-+    
-+	xenstore_notifier.notifier_call = setup_shutdown_watcher;
-+
-+	register_xenstore_notifier(&xenstore_notifier);
-+    
-+	return 0;
-+}
-+
-+subsys_initcall(setup_shutdown_event);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/skbuff.c linux-2.6.12-xen/arch/xen/kernel/skbuff.c
---- pristine-linux-2.6.12/arch/xen/kernel/skbuff.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/kernel/skbuff.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,142 @@
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <linux/init.h>
-+#include <asm/io.h>
-+#include <asm/page.h>
-+#include <asm/hypervisor.h>
-+
-+/* Referenced in netback.c. */
-+/*static*/ kmem_cache_t *skbuff_cachep;
-+
-+#define MAX_SKBUFF_ORDER 4
-+static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
-+
-+static struct {
-+	int size;
-+	kmem_cache_t *cachep;
-+} skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
-+
-+struct sk_buff *alloc_skb(unsigned int length, int gfp_mask)
-+{
-+	int order, i;
-+	kmem_cache_t *cachep;
-+
-+	length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
-+
-+	if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
-+		for (i = 0; skbuff_small[i].size < length; i++)
-+			continue;
-+		cachep = skbuff_small[i].cachep;
-+	} else {
-+		order = get_order(length);
-+		if (order > MAX_SKBUFF_ORDER) {
-+			printk(KERN_ALERT "Attempt to allocate order %d "
-+			       "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
-+			return NULL;
-+		}
-+		cachep = skbuff_order_cachep[order];
-+	}
-+
-+	length -= sizeof(struct skb_shared_info);
-+
-+	return alloc_skb_from_cache(cachep, length, gfp_mask);
-+}
-+
-+struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
-+{
-+	struct sk_buff *skb;
-+	int order;
-+
-+	length = SKB_DATA_ALIGN(length + 16);
-+	order = get_order(length + sizeof(struct skb_shared_info));
-+	if (order > MAX_SKBUFF_ORDER) {
-+		printk(KERN_ALERT "Attempt to allocate order %d skbuff. "
-+		       "Increase MAX_SKBUFF_ORDER.\n", order);
-+		return NULL;
-+	}
-+
-+	skb = alloc_skb_from_cache(
-+		skbuff_order_cachep[order], length, gfp_mask);
-+	if (skb != NULL)
-+		skb_reserve(skb, 16);
-+
-+	return skb;
-+}
-+
-+static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
-+{
-+	int order = 0;
-+
-+	while (skbuff_order_cachep[order] != cachep)
-+		order++;
-+
-+	/* Do our best to allocate contiguous memory but fall back to IOMMU. */
-+	if (order != 0)
-+		(void)xen_create_contiguous_region(
-+			(unsigned long)buf, order, 0);
-+
-+	scrub_pages(buf, 1 << order);
-+}
-+
-+static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
-+{
-+	int order = 0;
-+
-+	while (skbuff_order_cachep[order] != cachep)
-+		order++;
-+
-+	if (order != 0)
-+		xen_destroy_contiguous_region((unsigned long)buf, order);
-+}
-+
-+static int __init skbuff_init(void)
-+{
-+	static char name[MAX_SKBUFF_ORDER + 1][20];
-+	static char small_name[ARRAY_SIZE(skbuff_small)][20];
-+	unsigned long size;
-+	int i, order;
-+
-+	for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) {
-+		size = skbuff_small[i].size;
-+		sprintf(small_name[i], "xen-skb-%lu", size);
-+		/*
-+		 * No ctor/dtor: objects do not span page boundaries, and they
-+		 * are only used on transmit path so no need for scrubbing.
-+		 */
-+		skbuff_small[i].cachep = kmem_cache_create(
-+			small_name[i], size, size, 0, NULL, NULL);
-+	}
-+
-+	for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
-+		size = PAGE_SIZE << order;
-+		sprintf(name[order], "xen-skb-%lu", size);
-+		skbuff_order_cachep[order] = kmem_cache_create(
-+			name[order], size, size, 0, skbuff_ctor, skbuff_dtor);
-+	}
-+
-+	skbuff_cachep = skbuff_order_cachep[0];
-+
-+	return 0;
-+}
-+core_initcall(skbuff_init);
-+
-+EXPORT_SYMBOL(__dev_alloc_skb);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/smpboot.c linux-2.6.12-xen/arch/xen/kernel/smpboot.c
---- pristine-linux-2.6.12/arch/xen/kernel/smpboot.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/kernel/smpboot.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,439 @@
-+/*
-+ *	Xen SMP booting functions
-+ *
-+ *	See arch/i386/kernel/smpboot.c for copyright and credits for derived
-+ *	portions of this file.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/config.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/smp_lock.h>
-+#include <linux/irq.h>
-+#include <linux/bootmem.h>
-+#include <linux/notifier.h>
-+#include <linux/cpu.h>
-+#include <linux/percpu.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/pgalloc.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/xen-public/vcpu.h>
-+#include <asm-xen/xenbus.h>
-+
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+#include <asm/smp_alt.h>
-+#endif
-+
-+extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
-+extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
-+
-+extern void local_setup_timer(unsigned int cpu);
-+extern void local_teardown_timer(unsigned int cpu);
-+
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void system_call(void);
-+extern void smp_trap_init(trap_info_t *);
-+
-+extern cpumask_t cpu_initialized;
-+
-+/* Number of siblings per CPU package */
-+int smp_num_siblings = 1;
-+int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
-+EXPORT_SYMBOL(phys_proc_id);
-+int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
-+EXPORT_SYMBOL(cpu_core_id);
-+
-+cpumask_t cpu_online_map;
-+EXPORT_SYMBOL(cpu_online_map);
-+cpumask_t cpu_possible_map;
-+EXPORT_SYMBOL(cpu_possible_map);
-+
-+struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-+EXPORT_SYMBOL(cpu_data);
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+DEFINE_PER_CPU(int, cpu_state) = { 0 };
-+#endif
-+
-+static DEFINE_PER_CPU(int, resched_irq);
-+static DEFINE_PER_CPU(int, callfunc_irq);
-+static char resched_name[NR_CPUS][15];
-+static char callfunc_name[NR_CPUS][15];
-+
-+u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+void *xquad_portio;
-+
-+cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
-+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
-+EXPORT_SYMBOL(cpu_core_map);
-+
-+#if defined(__i386__)
-+u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
-+EXPORT_SYMBOL(x86_cpu_to_apicid);
-+#elif !defined(CONFIG_X86_IO_APIC)
-+unsigned int maxcpus = NR_CPUS;
-+#endif
-+
-+void __init smp_alloc_memory(void)
-+{
-+}
-+
-+static void xen_smp_intr_init(unsigned int cpu)
-+{
-+	sprintf(resched_name[cpu], "resched%d", cpu);
-+	per_cpu(resched_irq, cpu) =
-+		bind_ipi_to_irqhandler(
-+			RESCHEDULE_VECTOR,
-+			cpu,
-+			smp_reschedule_interrupt,
-+			SA_INTERRUPT,
-+			resched_name[cpu],
-+			NULL);
-+	BUG_ON(per_cpu(resched_irq, cpu) < 0);
-+
-+	sprintf(callfunc_name[cpu], "callfunc%d", cpu);
-+	per_cpu(callfunc_irq, cpu) =
-+		bind_ipi_to_irqhandler(
-+			CALL_FUNCTION_VECTOR,
-+			cpu,
-+			smp_call_function_interrupt,
-+			SA_INTERRUPT,
-+			callfunc_name[cpu],
-+			NULL);
-+	BUG_ON(per_cpu(callfunc_irq, cpu) < 0);
-+
-+	if (cpu != 0)
-+		local_setup_timer(cpu);
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+static void xen_smp_intr_exit(unsigned int cpu)
-+{
-+	if (cpu != 0)
-+		local_teardown_timer(cpu);
-+
-+	unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-+	unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-+}
-+#endif
-+
-+static void cpu_bringup(void)
-+{
-+	if (!cpu_isset(smp_processor_id(), cpu_initialized))
-+		cpu_init();
-+	local_irq_enable();
-+	cpu_idle();
-+}
-+
-+void vcpu_prepare(int vcpu)
-+{
-+	vcpu_guest_context_t ctxt;
-+	struct task_struct *idle = idle_task(vcpu);
-+
-+	if (vcpu == 0)
-+		return;
-+
-+	memset(&ctxt, 0, sizeof(ctxt));
-+
-+	ctxt.flags = VGCF_IN_KERNEL;
-+	ctxt.user_regs.ds = __USER_DS;
-+	ctxt.user_regs.es = __USER_DS;
-+	ctxt.user_regs.fs = 0;
-+	ctxt.user_regs.gs = 0;
-+	ctxt.user_regs.ss = __KERNEL_DS;
-+	ctxt.user_regs.eip = (unsigned long)cpu_bringup;
-+	ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
-+
-+	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
-+
-+	smp_trap_init(ctxt.trap_ctxt);
-+
-+	ctxt.ldt_ents = 0;
-+
-+	ctxt.gdt_frames[0] = virt_to_mfn(cpu_gdt_descr[vcpu].address);
-+	ctxt.gdt_ents      = cpu_gdt_descr[vcpu].size / 8;
-+
-+#ifdef __i386__
-+	ctxt.user_regs.cs = __KERNEL_CS;
-+	ctxt.user_regs.esp = idle->thread.esp;
-+
-+	ctxt.kernel_ss = __KERNEL_DS;
-+	ctxt.kernel_sp = idle->thread.esp0;
-+
-+	ctxt.event_callback_cs     = __KERNEL_CS;
-+	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
-+	ctxt.failsafe_callback_cs  = __KERNEL_CS;
-+	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
-+
-+	ctxt.ctrlreg[3] = virt_to_mfn(swapper_pg_dir) << PAGE_SHIFT;
-+#else
-+	ctxt.user_regs.cs = __KERNEL_CS | 3;
-+	ctxt.user_regs.esp = idle->thread.rsp;
-+
-+	ctxt.kernel_ss = __KERNEL_DS;
-+	ctxt.kernel_sp = idle->thread.rsp0;
-+
-+	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
-+	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
-+	ctxt.syscall_callback_eip  = (unsigned long)system_call;
-+
-+	ctxt.ctrlreg[3] = virt_to_mfn(init_level4_pgt) << PAGE_SHIFT;
-+
-+	ctxt.gs_base_kernel = (unsigned long)(cpu_pda + vcpu);
-+#endif
-+
-+	BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_initialise, vcpu, &ctxt));
-+}
-+
-+void __init smp_prepare_cpus(unsigned int max_cpus)
-+{
-+	int cpu, rc;
-+	struct task_struct *idle;
-+
-+	cpu_data[0] = boot_cpu_data;
-+
-+	cpu_2_logical_apicid[0] = 0;
-+	x86_cpu_to_apicid[0] = 0;
-+
-+	current_thread_info()->cpu = 0;
-+	cpu_sibling_map[0] = cpumask_of_cpu(0);
-+	cpu_core_map[0]    = cpumask_of_cpu(0);
-+
-+	if (max_cpus != 0)
-+		xen_smp_intr_init(0);
-+
-+	for (cpu = 1; cpu < max_cpus; cpu++) {
-+		rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
-+		if (rc == -ENOENT)
-+			break;
-+		BUG_ON(rc != 0);
-+
-+		cpu_data[cpu] = boot_cpu_data;
-+		cpu_2_logical_apicid[cpu] = cpu;
-+		x86_cpu_to_apicid[cpu] = cpu;
-+
-+		idle = fork_idle(cpu);
-+		if (IS_ERR(idle))
-+			panic("failed fork for CPU %d", cpu);
-+
-+#ifdef __x86_64__
-+		cpu_pda[cpu].pcurrent = idle;
-+		cpu_pda[cpu].cpunumber = cpu;
-+		per_cpu(init_tss,cpu).rsp0 = idle->thread.rsp;
-+		clear_ti_thread_flag(idle->thread_info, TIF_FORK);
-+#endif
-+
-+		irq_ctx_init(cpu);
-+
-+		cpu_gdt_descr[cpu].address =
-+			__get_free_page(GFP_KERNEL|__GFP_ZERO);
-+		BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
-+		cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
-+		memcpy((void *)cpu_gdt_descr[cpu].address,
-+		       (void *)cpu_gdt_descr[0].address,
-+		       cpu_gdt_descr[0].size);
-+		make_page_readonly(
-+			(void *)cpu_gdt_descr[cpu].address,
-+			XENFEAT_writable_descriptor_tables);
-+
-+		cpu_set(cpu, cpu_possible_map);
-+#ifdef CONFIG_HOTPLUG_CPU
-+		if (xen_start_info->flags & SIF_INITDOMAIN)
-+			cpu_set(cpu, cpu_present_map);
-+#else
-+		cpu_set(cpu, cpu_present_map);
-+#endif
-+
-+		vcpu_prepare(cpu);
-+	}
-+
-+	/* Currently, Xen gives no dynamic NUMA/HT info. */
-+	for (cpu = 1; cpu < NR_CPUS; cpu++) {
-+		cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
-+		cpu_core_map[cpu]    = cpumask_of_cpu(cpu);
-+	}
-+
-+#ifdef CONFIG_X86_IO_APIC
-+	/*
-+	 * Here we can be sure that there is an IO-APIC in the system. Let's
-+	 * go and set it up:
-+	 */
-+	if (!skip_ioapic_setup && nr_ioapics)
-+		setup_IO_APIC();
-+#endif
-+}
-+
-+void __devinit smp_prepare_boot_cpu(void)
-+{
-+	cpu_possible_map = cpumask_of_cpu(0);
-+	cpu_present_map  = cpumask_of_cpu(0);
-+	cpu_online_map   = cpumask_of_cpu(0);
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+
-+/*
-+ * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
-+ * But do it early enough to catch critical for_each_present_cpu() loops
-+ * in i386-specific code.
-+ */
-+static int __init initialize_cpu_present_map(void)
-+{
-+	cpu_present_map = cpu_possible_map;
-+	return 0;
-+}
-+core_initcall(initialize_cpu_present_map);
-+
-+static void vcpu_hotplug(unsigned int cpu)
-+{
-+	int err;
-+	char dir[32], state[32];
-+
-+	if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
-+		return;
-+
-+	sprintf(dir, "cpu/%d", cpu);
-+	err = xenbus_scanf(XBT_NULL, dir, "availability", "%s", state);
-+	if (err != 1) {
-+		printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
-+		return;
-+	}
-+
-+	if (strcmp(state, "online") == 0) {
-+		(void)cpu_up(cpu);
-+	} else if (strcmp(state, "offline") == 0) {
-+		(void)cpu_down(cpu);
-+	} else {
-+		printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
-+		       state, cpu);
-+	}
-+}
-+
-+static void handle_vcpu_hotplug_event(
-+	struct xenbus_watch *watch, const char **vec, unsigned int len)
-+{
-+	int cpu;
-+	char *cpustr;
-+	const char *node = vec[XS_WATCH_PATH];
-+
-+	if ((cpustr = strstr(node, "cpu/")) != NULL) {
-+		sscanf(cpustr, "cpu/%d", &cpu);
-+		vcpu_hotplug(cpu);
-+	}
-+}
-+
-+static int setup_cpu_watcher(struct notifier_block *notifier,
-+			      unsigned long event, void *data)
-+{
-+	int i;
-+
-+	static struct xenbus_watch cpu_watch = {
-+		.node = "cpu",
-+		.callback = handle_vcpu_hotplug_event };
-+	(void)register_xenbus_watch(&cpu_watch);
-+
-+	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
-+		for_each_cpu(i)
-+			vcpu_hotplug(i);
-+		printk(KERN_INFO "Brought up %ld CPUs\n",
-+		       (long)num_online_cpus());
-+	}
-+
-+	return NOTIFY_DONE;
-+}
-+
-+static int __init setup_vcpu_hotplug_event(void)
-+{
-+	static struct notifier_block xsn_cpu = {
-+		.notifier_call = setup_cpu_watcher };
-+	register_xenstore_notifier(&xsn_cpu);
-+	return 0;
-+}
-+
-+arch_initcall(setup_vcpu_hotplug_event);
-+
-+int __cpu_disable(void)
-+{
-+	cpumask_t map = cpu_online_map;
-+	int cpu = smp_processor_id();
-+
-+	if (cpu == 0)
-+		return -EBUSY;
-+
-+	cpu_clear(cpu, map);
-+	fixup_irqs(map);
-+	cpu_clear(cpu, cpu_online_map);
-+
-+	return 0;
-+}
-+
-+void __cpu_die(unsigned int cpu)
-+{
-+	while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
-+		current->state = TASK_UNINTERRUPTIBLE;
-+		schedule_timeout(HZ/10);
-+	}
-+
-+	xen_smp_intr_exit(cpu);
-+
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+	if (num_online_cpus() == 1)
-+		unprepare_for_smp();
-+#endif
-+}
-+
-+#else /* !CONFIG_HOTPLUG_CPU */
-+
-+int __cpu_disable(void)
-+{
-+	return -ENOSYS;
-+}
-+
-+void __cpu_die(unsigned int cpu)
-+{
-+	BUG();
-+}
-+
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+int __devinit __cpu_up(unsigned int cpu)
-+{
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+	if (num_online_cpus() == 1)
-+		prepare_for_smp();
-+#endif
-+
-+	xen_smp_intr_init(cpu);
-+	cpu_set(cpu, cpu_online_map);
-+	if (HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL) != 0)
-+		BUG();
-+
-+	return 0;
-+}
-+
-+void __init smp_cpus_done(unsigned int max_cpus)
-+{
-+}
-+
-+int setup_profiling_timer(unsigned int multiplier)
-+{
-+	/* Dummy function. */
-+	return 0;
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/kernel/xen_proc.c linux-2.6.12-xen/arch/xen/kernel/xen_proc.c
---- pristine-linux-2.6.12/arch/xen/kernel/xen_proc.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/kernel/xen_proc.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,29 @@
-+
-+#include <linux/config.h>
-+#include <linux/proc_fs.h>
-+#include <asm-xen/xen_proc.h>
-+
-+static struct proc_dir_entry *xen_base;
-+
-+struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
-+{
-+	if ( xen_base == NULL )
-+		if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
-+			panic("Couldn't create /proc/xen");
-+	return create_proc_entry(name, mode, xen_base);
-+}
-+
-+void remove_xen_proc_entry(const char *name)
-+{
-+	remove_proc_entry(name, xen_base);
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/Makefile linux-2.6.12-xen/arch/xen/Makefile
---- pristine-linux-2.6.12/arch/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,93 @@
-+#
-+# xen/Makefile
-+#
-+# This file is included by the global makefile so that you can add your own
-+# architecture-specific flags and dependencies. Remember to do have actions
-+# for "archclean" cleaning up for this architecture.
-+#
-+# This file is subject to the terms and conditions of the GNU General Public
-+# License.  See the file "COPYING" in the main directory of this archive
-+# for more details.
-+#
-+# Copyright (C) 2004 by Christian Limpach
-+#
-+
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+# pick up headers from include/asm-xen/asm in preference over include/asm
-+NOSTDINC_FLAGS  = -nostdinc -iwithprefix include/asm-xen -Iinclude/asm-xen -iwithprefix include
-+ifneq ($(KBUILD_SRC),)
-+NOSTDINC_FLAGS += -I$(srctree)/include/asm-xen
-+endif
-+
-+# make uname return the processor arch
-+UTS_MACHINE := $(XENARCH)
-+
-+core-y	+= arch/xen/kernel/
-+
-+.PHONY: include2/asm
-+include2/asm:
-+ifneq ($(KBUILD_SRC),)
-+	@echo '  SYMLINK ../include/asm-$(XENARCH) -> include2/asm'
-+	$(Q)ln -fsn ../include/asm-$(XENARCH) include2/asm
-+endif
-+
-+include/.asm-ignore: include/asm
-+	@rm -f include/.asm-ignore
-+	@mv include/asm include/.asm-ignore
-+	@echo '  SYMLINK include/asm -> include/asm-$(XENARCH)'
-+	$(Q)if [ ! -d include ]; then mkdir -p include; fi;
-+	@ln -fsn $(srctree)/include/asm-$(XENARCH) include/asm
-+
-+include/asm-xen/asm:
-+	@echo '  SYMLINK $@ -> include/asm-xen/asm-$(XENARCH)'
-+	@mkdir -p include/asm-xen
-+	@ln -fsn $(srctree)/include/asm-xen/asm-$(XENARCH) $@
-+
-+arch/xen/arch:
-+	@rm -f $@
-+	@mkdir -p arch/xen
-+	@ln -fsn $(srctree)/arch/xen/$(XENARCH) $@
-+
-+arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
-+				   include/config/MARKER
-+
-+include/asm-$(ARCH)/asm_offsets.h: arch/$(XENARCH)/kernel/asm-offsets.s
-+	$(call filechk,gen-asm-offsets)
-+
-+prepare: include/.asm-ignore include/asm-xen/asm \
-+	arch/xen/arch include/asm-$(ARCH)/asm_offsets.h include2/asm ;
-+
-+all: vmlinuz
-+
-+vmlinuz: vmlinux
-+	$(Q)$(MAKE) $(build)=arch/xen/boot vmlinuz
-+
-+bzImage: vmlinuz
-+	$(Q)$(MAKE) $(build)=arch/xen/boot bzImage
-+
-+XINSTALL_NAME ?= $(KERNELRELEASE)
-+install: vmlinuz
-+install kernel_install:
-+	mkdir -p $(INSTALL_PATH)/boot
-+	ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
-+	rm -f $(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+	install -m0644 vmlinuz $(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+	install -m0644 vmlinux $(INSTALL_PATH)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+	install -m0664 .config $(INSTALL_PATH)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+	install -m0664 System.map $(INSTALL_PATH)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
-+	ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
-+
-+archclean:
-+	@if [ -e arch/xen/arch ]; then $(MAKE) $(clean)=arch/xen/arch; fi;
-+	@rm -f arch/xen/arch include/.asm-ignore include/asm-xen/asm
-+	@rm -f vmlinux-stripped vmlinuz
-+
-+define archhelp
-+  echo  '* vmlinuz	- Compressed kernel image'
-+  echo  '  install	- Install kernel image and config file'
-+endef
-+
-+ifneq ($(XENARCH),)
-+include	$(srctree)/arch/xen/$(XENARCH)/Makefile
-+endif
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/ia32/ia32entry.S linux-2.6.12-xen/arch/xen/x86_64/ia32/ia32entry.S
---- pristine-linux-2.6.12/arch/xen/x86_64/ia32/ia32entry.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/ia32/ia32entry.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,629 @@
-+/*
-+ * Compatibility mode system call entry point for x86-64. 
-+ * 		
-+ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
-+ */		 
-+
-+#include <asm/dwarf2.h>
-+#include <asm/calling.h>
-+#include <asm/offset.h>
-+#include <asm/current.h>
-+#include <asm/errno.h>
-+#include <asm/ia32_unistd.h>	
-+#include <asm/thread_info.h>	
-+#include <asm/segment.h>
-+#include <asm/vsyscall32.h>
-+#include <linux/linkage.h>
-+
-+#define __XEN_X86_64 1
-+	
-+	.macro IA32_ARG_FIXUP noebp=0
-+	movl	%edi,%r8d
-+	.if \noebp
-+	.else
-+	movl	%ebp,%r9d
-+	.endif
-+	xchg	%ecx,%esi
-+	movl	%ebx,%edi
-+	movl	%edx,%edx	/* zero extension */
-+	.endm 
-+
-+	/* clobbers %eax */	
-+	.macro  CLEAR_RREGS
-+	xorl 	%eax,%eax
-+	movq	%rax,R11(%rsp)
-+	movq	%rax,R10(%rsp)
-+	movq	%rax,R9(%rsp)
-+	movq	%rax,R8(%rsp)
-+	.endm
-+
-+#if defined (__XEN_X86_64)
-+#include "../kernel/xen_entry.S"
-+		
-+#define	__swapgs
-+#define __cli
-+#define __sti	
-+#else
-+/*
-+ * Use the native instructions
-+ */	
-+#define	__swapgs	swapgs
-+#define __cli		cli
-+#define __sti		sti	
-+#endif			
-+
-+/*
-+ * 32bit SYSENTER instruction entry.
-+ *
-+ * Arguments:
-+ * %eax	System call number.
-+ * %ebx Arg1
-+ * %ecx Arg2
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp user stack
-+ * 0(%ebp) Arg6	
-+ * 	
-+ * Interrupts off.
-+ *	
-+ * This is purely a fast path. For anything complicated we use the int 0x80
-+ * path below.	Set up a complete hardware stack frame to share code
-+ * with the int 0x80 path.
-+ */ 	
-+ENTRY(ia32_sysenter_target)
-+	CFI_STARTPROC
-+	__swapgs 
-+	movq	%gs:pda_kernelstack, %rsp
-+	addq	$(PDA_STACKOFFSET),%rsp
-+	XEN_UNBLOCK_EVENTS(%r11)	
-+	__sti
-+ 	movl	%ebp,%ebp		/* zero extension */
-+	pushq	$__USER32_DS
-+	pushq	%rbp
-+	pushfq
-+	movl	$VSYSCALL32_SYSEXIT, %r10d
-+	pushq	$__USER32_CS
-+	movl	%eax, %eax
-+	pushq	%r10
-+	pushq	%rax
-+	cld
-+	SAVE_ARGS 0,0,1
-+ 	/* no need to do an access_ok check here because rbp has been
-+ 	   32bit zero extended */ 
-+1:	movl	(%rbp),%r9d
-+ 	.section __ex_table,"a"
-+ 	.quad 1b,ia32_badarg
-+ 	.previous	
-+	GET_THREAD_INFO(%r10)
-+	testl  $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+	jnz  sysenter_tracesys
-+sysenter_do_call:	
-+	cmpl	$(IA32_NR_syscalls),%eax
-+	jae	ia32_badsys
-+	IA32_ARG_FIXUP 1
-+	call	*ia32_sys_call_table(,%rax,8)
-+	movq	%rax,RAX-ARGOFFSET(%rsp)
-+	GET_THREAD_INFO(%r10)
-+	XEN_BLOCK_EVENTS(%r11)	
-+	__cli
-+	testl	$_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
-+	jnz	int_ret_from_sys_call
-+	/* clear IF, that popfq doesn't enable interrupts early */
-+	andl  $~0x200,EFLAGS-R11(%rsp) 
-+	RESTORE_ARGS 1,24,1,1,1,1
-+	popfq
-+	popq	%rcx				/* User %esp */
-+	movl	$VSYSCALL32_SYSEXIT,%edx	/* User %eip */
-+	__swapgs
-+	XEN_UNBLOCK_EVENTS(%r11)		
-+	__sti		/* sti only takes effect after the next instruction */
-+	/* sysexit */
-+	.byte	0xf, 0x35  /* TBD */
-+
-+sysenter_tracesys:
-+	SAVE_REST
-+	CLEAR_RREGS
-+	movq	$-ENOSYS,RAX(%rsp)	/* really needed? */
-+	movq	%rsp,%rdi        /* &pt_regs -> arg1 */
-+	call	syscall_trace_enter
-+	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
-+	RESTORE_REST
-+	movl	%ebp, %ebp
-+	/* no need to do an access_ok check here because rbp has been
-+	   32bit zero extended */ 
-+1:	movl	(%rbp),%r9d
-+	.section __ex_table,"a"
-+	.quad 1b,ia32_badarg
-+	.previous
-+	jmp	sysenter_do_call
-+	CFI_ENDPROC
-+
-+/*
-+ * 32bit SYSCALL instruction entry.
-+ *
-+ * Arguments:
-+ * %eax	System call number.
-+ * %ebx Arg1
-+ * %ecx return EIP 
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp Arg2    [note: not saved in the stack frame, should not be touched]
-+ * %esp user stack 
-+ * 0(%esp) Arg6
-+ * 	
-+ * Interrupts off.
-+ *	
-+ * This is purely a fast path. For anything complicated we use the int 0x80
-+ * path below.	Set up a complete hardware stack frame to share code
-+ * with the int 0x80 path.	
-+ */ 	
-+ENTRY(ia32_cstar_target)
-+	CFI_STARTPROC
-+	__swapgs
-+	movl	%esp,%r8d
-+	movq	%gs:pda_kernelstack,%rsp
-+	XEN_UNBLOCK_EVENTS(%r11)	
-+	__sti
-+	SAVE_ARGS 8,1,1
-+	movl 	%eax,%eax	/* zero extension */
-+	movq	%rax,ORIG_RAX-ARGOFFSET(%rsp)
-+	movq	%rcx,RIP-ARGOFFSET(%rsp)
-+	movq	%rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
-+	movl	%ebp,%ecx
-+	movq	$__USER32_CS,CS-ARGOFFSET(%rsp)
-+	movq	$__USER32_DS,SS-ARGOFFSET(%rsp)
-+	movq	%r11,EFLAGS-ARGOFFSET(%rsp)
-+	movq	%r8,RSP-ARGOFFSET(%rsp)	
-+	/* no need to do an access_ok check here because r8 has been
-+	   32bit zero extended */ 
-+	/* hardware stack frame is complete now */	
-+1:	movl	(%r8),%r9d
-+	.section __ex_table,"a"
-+	.quad 1b,ia32_badarg
-+	.previous	
-+	GET_THREAD_INFO(%r10)
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+	jnz   cstar_tracesys
-+cstar_do_call:	
-+	cmpl $IA32_NR_syscalls,%eax
-+	jae  ia32_badsys
-+	IA32_ARG_FIXUP 1
-+	call *ia32_sys_call_table(,%rax,8)
-+	movq %rax,RAX-ARGOFFSET(%rsp)
-+	GET_THREAD_INFO(%r10)
-+	XEN_BLOCK_EVENTS(%r11)		
-+	__cli
-+	testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
-+	jnz  int_ret_from_sys_call
-+	RESTORE_ARGS 1,-ARG_SKIP,1,1,1
-+	movl RIP-ARGOFFSET(%rsp),%ecx
-+	movl EFLAGS-ARGOFFSET(%rsp),%r11d	
-+	movl RSP-ARGOFFSET(%rsp),%esp
-+	__swapgs
-+	sysretl  /* TBD */
-+	
-+cstar_tracesys:	
-+	SAVE_REST
-+	CLEAR_RREGS
-+	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
-+	movq %rsp,%rdi        /* &pt_regs -> arg1 */
-+	call syscall_trace_enter
-+	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
-+	RESTORE_REST
-+	movl RSP-ARGOFFSET(%rsp), %r8d
-+	/* no need to do an access_ok check here because r8 has been
-+	   32bit zero extended */ 
-+1:	movl	(%r8),%r9d
-+	.section __ex_table,"a"
-+	.quad 1b,ia32_badarg
-+	.previous
-+	jmp cstar_do_call
-+				
-+ia32_badarg:
-+	movq $-EFAULT,%rax
-+	jmp ia32_sysret
-+	CFI_ENDPROC
-+
-+/* 
-+ * Emulated IA32 system calls via int 0x80. 
-+ *
-+ * Arguments:	 
-+ * %eax	System call number.
-+ * %ebx Arg1
-+ * %ecx Arg2
-+ * %edx Arg3
-+ * %esi Arg4
-+ * %edi Arg5
-+ * %ebp Arg6    [note: not saved in the stack frame, should not be touched]
-+ *
-+ * Notes:
-+ * Uses the same stack frame as the x86-64 version.	
-+ * All registers except %eax must be saved (but ptrace may violate that)
-+ * Arguments are zero extended. For system calls that want sign extension and
-+ * take long arguments a wrapper is needed. Most calls can just be called
-+ * directly.
-+ * Assumes it is only called from user space and entered with interrupts off.	
-+ */ 				
-+
-+ENTRY(ia32_syscall)
-+	CFI_STARTPROC
-+	__swapgs
-+	XEN_UNBLOCK_EVENTS(%r11)
-+	__sti
-+	movq (%rsp),%rcx
-+	movq 8(%rsp),%r11
-+        addq $0x10,%rsp /* skip rcx and r11 */
-+	movl %eax,%eax
-+	pushq %rax
-+	cld
-+/* 1:	jmp 1b	 */
-+	/* note the registers are not zero extended to the sf.
-+	   this could be a problem. */
-+	SAVE_ARGS 0,0,1
-+	GET_THREAD_INFO(%r10)
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
-+	jnz ia32_tracesys
-+ia32_do_syscall:	
-+	cmpl $(IA32_NR_syscalls),%eax
-+	jae  ia32_badsys
-+	IA32_ARG_FIXUP
-+	call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
-+ia32_sysret:
-+	movq %rax,RAX-ARGOFFSET(%rsp)
-+	jmp int_ret_from_sys_call 
-+
-+ia32_tracesys:			 
-+	SAVE_REST
-+	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
-+	movq %rsp,%rdi        /* &pt_regs -> arg1 */
-+	call syscall_trace_enter
-+	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
-+	RESTORE_REST
-+	jmp ia32_do_syscall
-+
-+ia32_badsys:
-+	movq $0,ORIG_RAX-ARGOFFSET(%rsp)
-+	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
-+	jmp int_ret_from_sys_call
-+
-+ni_syscall:
-+	movq %rax,%rdi
-+	jmp  sys32_ni_syscall			
-+
-+quiet_ni_syscall:
-+	movq $-ENOSYS,%rax
-+	ret
-+	CFI_ENDPROC
-+	
-+	.macro PTREGSCALL label, func, arg
-+	.globl \label
-+\label:
-+	leaq \func(%rip),%rax
-+	leaq -ARGOFFSET+8(%rsp),\arg	/* 8 for return address */
-+	jmp  ia32_ptregs_common	
-+	.endm
-+
-+	PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
-+	PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
-+	PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
-+	PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
-+	PTREGSCALL stub32_execve, sys32_execve, %rcx
-+	PTREGSCALL stub32_fork, sys_fork, %rdi
-+	PTREGSCALL stub32_clone, sys32_clone, %rdx
-+	PTREGSCALL stub32_vfork, sys_vfork, %rdi
-+	PTREGSCALL stub32_iopl, sys_iopl, %rsi
-+	PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
-+
-+ENTRY(ia32_ptregs_common)
-+	CFI_STARTPROC
-+	popq %r11
-+	SAVE_REST
-+	call *%rax
-+	RESTORE_REST
-+	jmp  ia32_sysret	/* misbalances the return cache */
-+	CFI_ENDPROC
-+
-+	.data
-+	.align 8
-+	.globl ia32_sys_call_table
-+ia32_sys_call_table:
-+	.quad sys_restart_syscall
-+	.quad sys_exit
-+	.quad stub32_fork
-+	.quad sys_read
-+	.quad sys_write
-+	.quad sys32_open		/* 5 */
-+	.quad sys_close
-+	.quad sys32_waitpid
-+	.quad sys_creat
-+	.quad sys_link
-+	.quad sys_unlink		/* 10 */
-+	.quad stub32_execve
-+	.quad sys_chdir
-+	.quad compat_sys_time
-+	.quad sys_mknod
-+	.quad sys_chmod		/* 15 */
-+	.quad sys_lchown16
-+	.quad quiet_ni_syscall			/* old break syscall holder */
-+	.quad sys_stat
-+	.quad sys32_lseek
-+	.quad sys_getpid		/* 20 */
-+	.quad compat_sys_mount	/* mount  */
-+	.quad sys_oldumount	/* old_umount  */
-+	.quad sys_setuid16
-+	.quad sys_getuid16
-+	.quad compat_sys_stime	/* stime */		/* 25 */
-+	.quad sys32_ptrace	/* ptrace */
-+	.quad sys_alarm
-+	.quad sys_fstat	/* (old)fstat */
-+	.quad sys_pause
-+	.quad compat_sys_utime	/* 30 */
-+	.quad quiet_ni_syscall	/* old stty syscall holder */
-+	.quad quiet_ni_syscall	/* old gtty syscall holder */
-+	.quad sys_access
-+	.quad sys_nice	
-+	.quad quiet_ni_syscall	/* 35 */	/* old ftime syscall holder */
-+	.quad sys_sync
-+	.quad sys32_kill
-+	.quad sys_rename
-+	.quad sys_mkdir
-+	.quad sys_rmdir		/* 40 */
-+	.quad sys_dup
-+	.quad sys32_pipe
-+	.quad compat_sys_times
-+	.quad quiet_ni_syscall			/* old prof syscall holder */
-+	.quad sys_brk		/* 45 */
-+	.quad sys_setgid16
-+	.quad sys_getgid16
-+	.quad sys_signal
-+	.quad sys_geteuid16
-+	.quad sys_getegid16	/* 50 */
-+	.quad sys_acct
-+	.quad sys_umount			/* new_umount */
-+	.quad quiet_ni_syscall			/* old lock syscall holder */
-+	.quad compat_sys_ioctl
-+	.quad compat_sys_fcntl64		/* 55 */
-+	.quad quiet_ni_syscall			/* old mpx syscall holder */
-+	.quad sys_setpgid
-+	.quad quiet_ni_syscall			/* old ulimit syscall holder */
-+	.quad sys32_olduname
-+	.quad sys_umask		/* 60 */
-+	.quad sys_chroot
-+	.quad sys32_ustat
-+	.quad sys_dup2
-+	.quad sys_getppid
-+	.quad sys_getpgrp		/* 65 */
-+	.quad sys_setsid
-+	.quad sys32_sigaction
-+	.quad sys_sgetmask
-+	.quad sys_ssetmask
-+	.quad sys_setreuid16	/* 70 */
-+	.quad sys_setregid16
-+	.quad stub32_sigsuspend
-+	.quad compat_sys_sigpending
-+	.quad sys_sethostname
-+	.quad compat_sys_setrlimit	/* 75 */
-+	.quad compat_sys_old_getrlimit	/* old_getrlimit */
-+	.quad compat_sys_getrusage
-+	.quad sys32_gettimeofday
-+	.quad sys32_settimeofday
-+	.quad sys_getgroups16	/* 80 */
-+	.quad sys_setgroups16
-+	.quad sys32_old_select
-+	.quad sys_symlink
-+	.quad sys_lstat
-+	.quad sys_readlink		/* 85 */
-+#ifdef CONFIG_IA32_AOUT
-+	.quad sys_uselib
-+#else
-+	.quad quiet_ni_syscall
-+#endif
-+	.quad sys_swapon
-+	.quad sys_reboot
-+	.quad compat_sys_old_readdir
-+	.quad sys32_mmap		/* 90 */
-+	.quad sys_munmap
-+	.quad sys_truncate
-+	.quad sys_ftruncate
-+	.quad sys_fchmod
-+	.quad sys_fchown16		/* 95 */
-+	.quad sys_getpriority
-+	.quad sys_setpriority
-+	.quad quiet_ni_syscall			/* old profil syscall holder */
-+	.quad compat_sys_statfs
-+	.quad compat_sys_fstatfs		/* 100 */
-+	.quad sys_ioperm
-+	.quad compat_sys_socketcall
-+	.quad sys_syslog
-+	.quad compat_sys_setitimer
-+	.quad compat_sys_getitimer	/* 105 */
-+	.quad compat_sys_newstat
-+	.quad compat_sys_newlstat
-+	.quad compat_sys_newfstat
-+	.quad sys32_uname
-+	.quad stub32_iopl		/* 110 */
-+	.quad sys_vhangup
-+	.quad quiet_ni_syscall	/* old "idle" system call */
-+	.quad sys32_vm86_warning	/* vm86old */ 
-+	.quad compat_sys_wait4
-+	.quad sys_swapoff		/* 115 */
-+	.quad sys32_sysinfo
-+	.quad sys32_ipc
-+	.quad sys_fsync
-+	.quad stub32_sigreturn
-+	.quad stub32_clone		/* 120 */
-+	.quad sys_setdomainname
-+	.quad sys_uname
-+	.quad sys_modify_ldt
-+	.quad sys32_adjtimex
-+	.quad sys32_mprotect		/* 125 */
-+	.quad compat_sys_sigprocmask
-+	.quad quiet_ni_syscall		/* create_module */
-+	.quad sys_init_module
-+	.quad sys_delete_module
-+	.quad quiet_ni_syscall		/* 130  get_kernel_syms */
-+	.quad sys_quotactl
-+	.quad sys_getpgid
-+	.quad sys_fchdir
-+	.quad quiet_ni_syscall	/* bdflush */
-+	.quad sys_sysfs		/* 135 */
-+	.quad sys_personality
-+	.quad quiet_ni_syscall	/* for afs_syscall */
-+	.quad sys_setfsuid16
-+	.quad sys_setfsgid16
-+	.quad sys_llseek		/* 140 */
-+	.quad compat_sys_getdents
-+	.quad compat_sys_select
-+	.quad sys_flock
-+	.quad sys_msync
-+	.quad compat_sys_readv		/* 145 */
-+	.quad compat_sys_writev
-+	.quad sys_getsid
-+	.quad sys_fdatasync
-+	.quad sys32_sysctl	/* sysctl */
-+	.quad sys_mlock		/* 150 */
-+	.quad sys_munlock
-+	.quad sys_mlockall
-+	.quad sys_munlockall
-+	.quad sys_sched_setparam
-+	.quad sys_sched_getparam   /* 155 */
-+	.quad sys_sched_setscheduler
-+	.quad sys_sched_getscheduler
-+	.quad sys_sched_yield
-+	.quad sys_sched_get_priority_max
-+	.quad sys_sched_get_priority_min  /* 160 */
-+	.quad sys_sched_rr_get_interval
-+	.quad compat_sys_nanosleep
-+	.quad sys_mremap
-+	.quad sys_setresuid16
-+	.quad sys_getresuid16	/* 165 */
-+	.quad sys32_vm86_warning	/* vm86 */ 
-+	.quad quiet_ni_syscall	/* query_module */
-+	.quad sys_poll
-+	.quad compat_sys_nfsservctl
-+	.quad sys_setresgid16	/* 170 */
-+	.quad sys_getresgid16
-+	.quad sys_prctl
-+	.quad stub32_rt_sigreturn
-+	.quad sys32_rt_sigaction
-+	.quad sys32_rt_sigprocmask	/* 175 */
-+	.quad sys32_rt_sigpending
-+	.quad compat_sys_rt_sigtimedwait
-+	.quad sys32_rt_sigqueueinfo
-+	.quad stub32_rt_sigsuspend
-+	.quad sys32_pread		/* 180 */
-+	.quad sys32_pwrite
-+	.quad sys_chown16
-+	.quad sys_getcwd
-+	.quad sys_capget
-+	.quad sys_capset
-+	.quad stub32_sigaltstack
-+	.quad sys32_sendfile
-+	.quad quiet_ni_syscall		/* streams1 */
-+	.quad quiet_ni_syscall		/* streams2 */
-+	.quad stub32_vfork            /* 190 */
-+	.quad compat_sys_getrlimit
-+	.quad sys32_mmap2
-+	.quad sys32_truncate64
-+	.quad sys32_ftruncate64
-+	.quad sys32_stat64		/* 195 */
-+	.quad sys32_lstat64
-+	.quad sys32_fstat64
-+	.quad sys_lchown
-+	.quad sys_getuid
-+	.quad sys_getgid		/* 200 */
-+	.quad sys_geteuid
-+	.quad sys_getegid
-+	.quad sys_setreuid
-+	.quad sys_setregid
-+	.quad sys_getgroups	/* 205 */
-+	.quad sys_setgroups
-+	.quad sys_fchown
-+	.quad sys_setresuid
-+	.quad sys_getresuid
-+	.quad sys_setresgid	/* 210 */
-+	.quad sys_getresgid
-+	.quad sys_chown
-+	.quad sys_setuid
-+	.quad sys_setgid
-+	.quad sys_setfsuid		/* 215 */
-+	.quad sys_setfsgid
-+	.quad sys_pivot_root
-+	.quad sys_mincore
-+	.quad sys_madvise
-+	.quad compat_sys_getdents64	/* 220 getdents64 */
-+	.quad compat_sys_fcntl64	
-+	.quad quiet_ni_syscall		/* tux */
-+	.quad quiet_ni_syscall    	/* security */
-+	.quad sys_gettid	
-+	.quad sys_readahead	/* 225 */
-+	.quad sys_setxattr
-+	.quad sys_lsetxattr
-+	.quad sys_fsetxattr
-+	.quad sys_getxattr
-+	.quad sys_lgetxattr	/* 230 */
-+	.quad sys_fgetxattr
-+	.quad sys_listxattr
-+	.quad sys_llistxattr
-+	.quad sys_flistxattr
-+	.quad sys_removexattr	/* 235 */
-+	.quad sys_lremovexattr
-+	.quad sys_fremovexattr
-+	.quad sys_tkill
-+	.quad sys_sendfile64 
-+	.quad compat_sys_futex		/* 240 */
-+	.quad compat_sys_sched_setaffinity
-+	.quad compat_sys_sched_getaffinity
-+	.quad sys32_set_thread_area
-+	.quad sys32_get_thread_area
-+	.quad compat_sys_io_setup	/* 245 */
-+	.quad sys_io_destroy
-+	.quad compat_sys_io_getevents
-+	.quad compat_sys_io_submit
-+	.quad sys_io_cancel
-+	.quad sys_fadvise64		/* 250 */
-+	.quad quiet_ni_syscall 	/* free_huge_pages */
-+	.quad sys_exit_group
-+	.quad sys32_lookup_dcookie
-+	.quad sys_epoll_create
-+	.quad sys_epoll_ctl		/* 255 */
-+	.quad sys_epoll_wait
-+	.quad sys_remap_file_pages
-+	.quad sys_set_tid_address
-+	.quad sys32_timer_create
-+	.quad compat_sys_timer_settime	/* 260 */
-+	.quad compat_sys_timer_gettime
-+	.quad sys_timer_getoverrun
-+	.quad sys_timer_delete
-+	.quad compat_sys_clock_settime
-+	.quad compat_sys_clock_gettime	/* 265 */
-+	.quad compat_sys_clock_getres
-+	.quad compat_sys_clock_nanosleep
-+	.quad compat_sys_statfs64
-+	.quad compat_sys_fstatfs64
-+	.quad sys_tgkill		/* 270 */
-+	.quad compat_sys_utimes
-+	.quad sys32_fadvise64_64
-+	.quad quiet_ni_syscall	/* sys_vserver */
-+	.quad sys_mbind
-+	.quad compat_sys_get_mempolicy	/* 275 */
-+	.quad sys_set_mempolicy
-+	.quad compat_sys_mq_open
-+	.quad sys_mq_unlink
-+	.quad compat_sys_mq_timedsend
-+	.quad compat_sys_mq_timedreceive	/* 280 */
-+	.quad compat_sys_mq_notify
-+	.quad compat_sys_mq_getsetattr
-+	.quad quiet_ni_syscall		/* reserved for kexec */
-+	.quad compat_sys_waitid
-+	.quad quiet_ni_syscall		/* sys_altroot */
-+	.quad sys_add_key
-+	.quad sys_request_key
-+	.quad sys_keyctl
-+	/* don't forget to change IA32_NR_syscalls */
-+ia32_syscall_end:		
-+	.rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
-+		.quad ni_syscall
-+	.endr
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/ia32/Makefile linux-2.6.12-xen/arch/xen/x86_64/ia32/Makefile
---- pristine-linux-2.6.12/arch/xen/x86_64/ia32/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/ia32/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,66 @@
-+#
-+# Makefile for the ia32 kernel emulation subsystem.
-+#
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+CFLAGS	+= -Iarch/$(XENARCH)/kernel
-+
-+obj-$(CONFIG_IA32_EMULATION) := ia32entry.o syscall32.o
-+
-+c-obj-$(CONFIG_IA32_EMULATION) := sys_ia32.o ia32_ioctl.o \
-+	ia32_signal.o tls32.o \
-+	ia32_binfmt.o fpu32.o ptrace32.o 
-+
-+s-obj-y :=
-+
-+sysv-$(CONFIG_SYSVIPC) := ipc32.o
-+c-obj-$(CONFIG_IA32_EMULATION) += $(sysv-y)
-+
-+c-obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
-+
-+$(obj)/syscall32.o: $(src)/syscall32.c \
-+	$(foreach F,int80 sysenter syscall,$(obj)/vsyscall-$F.so)
-+
-+# syscall32.c currently contains inline asm which has .incbin directives.
-+# This defeats ccache's signature checks, and also breaks distcc.
-+# Make sure neither ccache nor distcc compiles this file.
-+#
-+$(obj)/syscall32.o: override CC := env CCACHE_DISABLE=1 DISTCC_HOSTS=localhost $(CC)
-+
-+# Teach kbuild about targets
-+targets := $(foreach F,int80 sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
-+
-+# The DSO images are built using a special linker script
-+quiet_cmd_syscall = SYSCALL $@
-+      cmd_syscall = $(CC) -m32 -nostdlib -shared -s \
-+			   -Wl,-soname=linux-gate.so.1 -o $@ \
-+			   -Wl,-T,$(filter-out FORCE,$^)
-+
-+
-+$(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
-+$(obj)/vsyscall-%.so: $(obj)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
-+	$(call if_changed,syscall)
-+
-+AFLAGS_vsyscall-int80.o = -m32 -I$(obj)
-+AFLAGS_vsyscall-sysenter.o = -m32 -I$(obj)
-+AFLAGS_vsyscall-syscall.o = -m32 -I$(obj)
-+CFLAGS_ia32_ioctl.o += -Ifs/
-+
-+s-link	:= vsyscall-syscall.o vsyscall-sysenter.o vsyscall-sigreturn.o
-+
-+$(obj)/vsyscall.lds:
-+	@ln -fsn $(srctree)/arch/x86_64/ia32/$(notdir $@) $@
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
-+	@ln -fsn $(srctree)/arch/x86_64/ia32/$(notdir $@) $@
-+
-+$(obj)/vsyscall-int80.o $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-syscall.o: \
-+	$(obj)/vsyscall-sigreturn.S $(obj)/../../i386/kernel/vsyscall-note.S
-+
-+$(obj)/../../i386/kernel/vsyscall-note.S:
-+	@ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
-+
-+obj-y	+= $(c-obj-y) $(s-obj-y)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
-+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/ia32/syscall32.c linux-2.6.12-xen/arch/xen/x86_64/ia32/syscall32.c
---- pristine-linux-2.6.12/arch/xen/x86_64/ia32/syscall32.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/ia32/syscall32.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,153 @@
-+/* Copyright 2002,2003 Andi Kleen, SuSE Labs */
-+
-+/* vsyscall handling for 32bit processes. Map a stub page into it 
-+   on demand because 32bit cannot reach the kernel's fixmaps */
-+
-+#include <linux/mm.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/gfp.h>
-+#include <linux/init.h>
-+#include <linux/stringify.h>
-+#include <linux/security.h>
-+#include <asm/proto.h>
-+#include <asm/tlbflush.h>
-+#include <asm/ia32_unistd.h>
-+
-+#define USE_INT80
-+
-+#ifdef USE_INT80
-+/* 32bit VDSOs mapped into user space. */ 
-+asm(".section \".init.data\",\"aw\"\n"
-+    "syscall32_int80:\n"
-+    ".incbin \"arch/xen/x86_64/ia32/vsyscall-int80.so\"\n"
-+    "syscall32_int80_end:\n"
-+    "syscall32_syscall:\n"
-+    ".incbin \"arch/xen/x86_64/ia32/vsyscall-syscall.so\"\n"
-+    "syscall32_syscall_end:\n"
-+    "syscall32_sysenter:\n"
-+    ".incbin \"arch/xen/x86_64/ia32/vsyscall-sysenter.so\"\n"
-+    "syscall32_sysenter_end:\n"
-+    ".previous");
-+
-+extern unsigned char syscall32_int80[], syscall32_int80_end[];
-+#else
-+/* 32bit VDSOs mapped into user space. */ 
-+asm(".section \".init.data\",\"aw\"\n"
-+    "syscall32_syscall:\n"
-+    ".incbin \"arch/xen/x86_64/ia32/vsyscall-syscall.so\"\n"
-+    "syscall32_syscall_end:\n"
-+    "syscall32_sysenter:\n"
-+    ".incbin \"arch/xen/x86_64/ia32/vsyscall-sysenter.so\"\n"
-+    "syscall32_sysenter_end:\n"
-+    ".previous");
-+
-+static int use_sysenter = -1;
-+#endif
-+
-+extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
-+extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
-+extern int sysctl_vsyscall32;
-+
-+char *syscall32_page; 
-+
-+static struct page *
-+syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
-+{
-+	struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
-+	get_page(p);
-+	return p;
-+}
-+
-+/* Prevent VMA merging */
-+static void syscall32_vma_close(struct vm_area_struct *vma)
-+{
-+}
-+
-+static struct vm_operations_struct syscall32_vm_ops = {
-+	.close = syscall32_vma_close,
-+	.nopage = syscall32_nopage,
-+};
-+
-+struct linux_binprm;
-+
-+/* Setup a VMA at program startup for the vsyscall page */
-+int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
-+{
-+	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
-+	struct vm_area_struct *vma;
-+	struct mm_struct *mm = current->mm;
-+
-+	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
-+	if (!vma)
-+		return -ENOMEM;
-+	if (security_vm_enough_memory(npages)) {
-+		kmem_cache_free(vm_area_cachep, vma);
-+		return -ENOMEM;
-+	}
-+
-+	memset(vma, 0, sizeof(struct vm_area_struct));
-+	/* Could randomize here */
-+	vma->vm_start = VSYSCALL32_BASE;
-+	vma->vm_end = VSYSCALL32_END;
-+	/* MAYWRITE to allow gdb to COW and set breakpoints */
-+	vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYEXEC|VM_MAYWRITE;
-+	vma->vm_flags |= mm->def_flags;
-+	vma->vm_page_prot = protection_map[vma->vm_flags & 7];
-+	vma->vm_ops = &syscall32_vm_ops;
-+	vma->vm_mm = mm;
-+
-+	down_write(&mm->mmap_sem);
-+	insert_vm_struct(mm, vma);
-+	mm->total_vm += npages;
-+	up_write(&mm->mmap_sem);
-+	return 0;
-+}
-+
-+static int __init init_syscall32(void)
-+{ 
-+	syscall32_page = (void *)get_zeroed_page(GFP_KERNEL); 
-+	if (!syscall32_page) 
-+		panic("Cannot allocate syscall32 page"); 
-+
-+#ifdef USE_INT80
-+	/*
-+	 * At this point we use int 0x80.
-+	 */
-+	memcpy(syscall32_page, syscall32_int80,
-+	       syscall32_int80_end - syscall32_int80);
-+#else
-+
-+ 	if (use_sysenter > 0) {
-+ 		memcpy(syscall32_page, syscall32_sysenter,
-+ 		       syscall32_sysenter_end - syscall32_sysenter);
-+ 	} else {
-+  		memcpy(syscall32_page, syscall32_syscall,
-+  		       syscall32_syscall_end - syscall32_syscall);
-+  	}	
-+#endif
-+	return 0;
-+} 
-+
-+/*
-+ * This must be done early in case we have an initrd containing 32-bit
-+ * binaries (e.g., hotplug). This could be pushed upstream to arch/x86_64.
-+ */	
-+core_initcall(init_syscall32); 
-+
-+/* May not be __init: called during resume */
-+void syscall32_cpu_init(void)
-+{
-+#ifndef USE_INT80
-+	if (use_sysenter < 0)
-+ 		use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
-+
-+	/* Load these always in case some future AMD CPU supports
-+	   SYSENTER from compat mode too. */
-+	checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)(__KERNEL_CS | 3));
-+	checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
-+	checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
-+
-+	wrmsrl(MSR_CSTAR, ia32_cstar_target);
-+#endif
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/ia32/vsyscall-int80.S linux-2.6.12-xen/arch/xen/x86_64/ia32/vsyscall-int80.S
---- pristine-linux-2.6.12/arch/xen/x86_64/ia32/vsyscall-int80.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/ia32/vsyscall-int80.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,57 @@
-+/*
-+ * Code for the vsyscall page.  This version uses the old int $0x80 method.
-+ *
-+ * NOTE:
-+ * 1) __kernel_vsyscall _must_ be first in this page.
-+ * 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
-+ *    for details.
-+ */
-+#include <asm/ia32_unistd.h>
-+#include <asm/offset.h>
-+
-+	.text
-+	.section .text.vsyscall,"ax"
-+	.globl __kernel_vsyscall
-+	.type __kernel_vsyscall, at function
-+__kernel_vsyscall:
-+.LSTART_vsyscall:
-+	int $0x80
-+	ret
-+.LEND_vsyscall:
-+	.size __kernel_vsyscall,.-.LSTART_vsyscall
-+	.previous
-+
-+	.section .eh_frame,"a", at progbits
-+.LSTARTFRAME:
-+	.long .LENDCIE-.LSTARTCIE
-+.LSTARTCIE:
-+	.long 0			/* CIE ID */
-+	.byte 1			/* Version number */
-+	.string "zR"		/* NUL-terminated augmentation string */
-+	.uleb128 1		/* Code alignment factor */
-+	.sleb128 -4		/* Data alignment factor */
-+	.byte 8			/* Return address register column */
-+	.uleb128 1		/* Augmentation value length */
-+	.byte 0x1b		/* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-+	.byte 0x0c		/* DW_CFA_def_cfa */
-+	.uleb128 4
-+	.uleb128 4
-+	.byte 0x88		/* DW_CFA_offset, column 0x8 */
-+	.uleb128 1
-+	.align 4
-+.LENDCIE:
-+
-+	.long .LENDFDE1-.LSTARTFDE1	/* Length FDE */
-+.LSTARTFDE1:
-+	.long .LSTARTFDE1-.LSTARTFRAME	/* CIE pointer */
-+	.long .LSTART_vsyscall-.	/* PC-relative start address */
-+	.long .LEND_vsyscall-.LSTART_vsyscall
-+	.uleb128 0			/* Augmentation length */
-+	.align 4
-+.LENDFDE1:
-+		
-+/*
-+ * Get the common code for the sigreturn entry points.
-+ */
-+#define SYSCALL_ENTER_KERNEL    int $0x80
-+#include "vsyscall-sigreturn.S"
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/Kconfig linux-2.6.12-xen/arch/xen/x86_64/Kconfig
---- pristine-linux-2.6.12/arch/xen/x86_64/Kconfig	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/Kconfig	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,480 @@
-+#
-+# For a description of the syntax of this configuration file,
-+# see Documentation/kbuild/kconfig-language.txt.
-+#
-+# Note: ISA is disabled and will hopefully never be enabled.
-+# If you managed to buy an ISA x86-64 box you'll have to fix all the
-+# ISA drivers you need yourself.
-+#
-+
-+menu "X86_64 processor configuration"
-+
-+config XENARCH
-+	string
-+	default x86_64
-+
-+config X86_64
-+	bool
-+	default y
-+	help
-+	  Port to the x86-64 architecture. x86-64 is a 64-bit extension to the
-+	  classical 32-bit x86 architecture. For details see
-+	  <http://www.x86-64.org/>.
-+
-+config 64BIT
-+	def_bool y
-+
-+config X86
-+	bool
-+	default y
-+
-+config MMU
-+	bool
-+	default y
-+
-+config ISA
-+	bool
-+
-+config SBUS
-+	bool
-+
-+config RWSEM_GENERIC_SPINLOCK
-+	bool
-+	default y
-+
-+config RWSEM_XCHGADD_ALGORITHM
-+	bool
-+
-+config GENERIC_CALIBRATE_DELAY
-+	bool
-+	default y
-+
-+config X86_CMPXCHG
-+	bool
-+	default y
-+
-+config EARLY_PRINTK
-+	bool "Early Printk"
-+	default n
-+	help
-+	  Write kernel log output directly into the VGA buffer or to a serial
-+	  port.
-+
-+	  This is useful for kernel debugging when your machine crashes very
-+	  early before the console code is initialized. For normal operation
-+	  it is not recommended because it looks ugly and doesn't cooperate
-+	  with klogd/syslogd or the X server. You should normally N here,
-+	  unless you want to debug such a crash.
-+
-+config GENERIC_ISA_DMA
-+	bool
-+	default y
-+
-+config GENERIC_IOMAP
-+	bool
-+	default y
-+
-+#source "init/Kconfig"
-+
-+
-+menu "Processor type and features"
-+
-+choice
-+	prompt "Processor family"
-+	default MK8
-+
-+#config MK8
-+#	bool "AMD-Opteron/Athlon64"
-+#	help
-+#	  Optimize for AMD Opteron/Athlon64/Hammer/K8 CPUs.
-+
-+config MPSC
-+       bool "Intel EM64T"
-+       help
-+	  Optimize for Intel Pentium 4 and Xeon CPUs with Intel
-+	  Extended Memory 64 Technology(EM64T). For details see
-+	  <http://www.intel.com/technology/64bitextensions/>.
-+
-+config GENERIC_CPU
-+	bool "Generic-x86-64"
-+	help
-+	  Generic x86-64 CPU.
-+
-+endchoice
-+
-+#
-+# Define implied options from the CPU selection here
-+#
-+config X86_L1_CACHE_BYTES
-+	int
-+	default "128" if GENERIC_CPU || MPSC
-+	default "64" if MK8
-+
-+config X86_L1_CACHE_SHIFT
-+	int
-+	default "7" if GENERIC_CPU || MPSC
-+	default "6" if MK8
-+
-+config X86_TSC
-+	bool
-+	default n
-+
-+config X86_GOOD_APIC
-+	bool
-+	default y
-+
-+config X86_IO_APIC
-+	bool
-+	default XEN_PRIVILEGED_GUEST
-+
-+config X86_XEN_GENAPIC
-+	bool
-+	default XEN_PRIVILEGED_GUEST || SMP
-+
-+config X86_LOCAL_APIC
-+	bool
-+	default XEN_PRIVILEGED_GUEST
-+
-+config MICROCODE
-+	tristate "/dev/cpu/microcode - Intel CPU microcode support"
-+	---help---
-+	  If you say Y here the 'File systems' section, you will be
-+	  able to update the microcode on Intel processors. You will
-+	  obviously need the actual microcode binary data itself which is
-+	  not shipped with the Linux kernel.
-+
-+	  For latest news and information on obtaining all the required
-+	  ingredients for this driver, check:
-+	  <http://www.urbanmyth.org/microcode/>.
-+
-+	  To compile this driver as a module, choose M here: the
-+	  module will be called microcode.
-+	  If you use modprobe or kmod you may also want to add the line
-+	  'alias char-major-10-184 microcode' to your /etc/modules.conf file.
-+
-+config X86_MSR
-+	tristate "/dev/cpu/*/msr - Model-specific register support"
-+	help
-+	  This device gives privileged processes access to the x86
-+	  Model-Specific Registers (MSRs).  It is a character device with
-+	  major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
-+	  MSR accesses are directed to a specific CPU on multi-processor
-+	  systems.
-+
-+config X86_CPUID
-+	tristate "/dev/cpu/*/cpuid - CPU information support"
-+	help
-+	  This device gives processes access to the x86 CPUID instruction to
-+	  be executed on a specific processor.  It is a character device
-+	  with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
-+	  /dev/cpu/31/cpuid.
-+
-+# disable it for opteron optimized builds because it pulls in ACPI_BOOT
-+config X86_HT
-+	bool
-+	depends on SMP && !MK8
-+	default y
-+
-+config MATH_EMULATION
-+	bool
-+
-+config MCA
-+	bool
-+
-+config EISA
-+	bool
-+
-+config MTRR
-+	bool "MTRR (Memory Type Range Register) support"
-+	---help---
-+	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
-+	  the Memory Type Range Registers (MTRRs) may be used to control
-+	  processor access to memory ranges. This is most useful if you have
-+	  a video (VGA) card on a PCI or AGP bus. Enabling write-combining
-+	  allows bus write transfers to be combined into a larger transfer
-+	  before bursting over the PCI/AGP bus. This can increase performance
-+	  of image write operations 2.5 times or more. Saying Y here creates a
-+	  /proc/mtrr file which may be used to manipulate your processor's
-+	  MTRRs. Typically the X server should use this.
-+
-+	  This code has a reasonably generic interface so that similar
-+	  control registers on other processors can be easily supported
-+	  as well.
-+
-+	  Saying Y here also fixes a problem with buggy SMP BIOSes which only
-+	  set the MTRRs for the boot CPU and not for the secondary CPUs. This
-+	  can lead to all sorts of problems, so it's good to say Y here.
-+
-+	  Just say Y here, all x86-64 machines support MTRRs.
-+
-+	  See <file:Documentation/mtrr.txt> for more information.
-+
-+config SMP
-+	bool "Symmetric multi-processing support"
-+	---help---
-+	  This enables support for systems with more than one CPU. If you have
-+	  a system with only one CPU, like most personal computers, say N. If
-+	  you have a system with more than one CPU, say Y.
-+
-+	  If you say N here, the kernel will run on single and multiprocessor
-+	  machines, but will use only one CPU of a multiprocessor machine. If
-+	  you say Y here, the kernel will run on many, but not all,
-+	  singleprocessor machines. On a singleprocessor machine, the kernel
-+	  will run faster if you say N here.
-+
-+	  If you don't know what to do here, say N.
-+
-+#config PREEMPT
-+#	bool "Preemptible Kernel"
-+#	---help---
-+#	  This option reduces the latency of the kernel when reacting to
-+#	  real-time or interactive events by allowing a low priority process to
-+#	  be preempted even if it is in kernel mode executing a system call.
-+#	  This allows applications to run more reliably even when the system is
-+#	  under load. On contrary it may also break your drivers and add
-+#	  priority inheritance problems to your system. Don't select it if
-+#	  you rely on a stable system or have slightly obscure hardware.
-+#	  It's also not very well tested on x86-64 currently.
-+#	  You have been warned.
-+#
-+#	  Say Y here if you are feeling brave and building a kernel for a
-+#	  desktop, embedded or real-time system.  Say N if you are unsure.
-+
-+config SCHED_SMT
-+	bool "SMT (Hyperthreading) scheduler support"
-+	depends on SMP
-+	default n
-+	help
-+	  SMT scheduler support improves the CPU scheduler's decision making
-+	  when dealing with Intel Pentium 4 chips with HyperThreading at a
-+	  cost of slightly increased overhead in some places. If unsure say
-+	  N here.
-+
-+config K8_NUMA
-+       bool "K8 NUMA support"
-+       select NUMA
-+       depends on SMP
-+       help
-+	  Enable NUMA (Non Unified Memory Architecture) support for
-+	  AMD Opteron Multiprocessor systems. The kernel will try to allocate
-+	  memory used by a CPU on the local memory controller of the CPU
-+	  and add some more NUMA awareness to the kernel.
-+	  This code is recommended on all multiprocessor Opteron systems
-+	  and normally doesn't hurt on others.
-+
-+config NUMA_EMU
-+	bool "NUMA emulation support"
-+	select NUMA
-+	depends on SMP
-+	help
-+	  Enable NUMA emulation. A flat machine will be split
-+	  into virtual nodes when booted with "numa=fake=N", where N is the
-+	  number of nodes. This is only useful for debugging.
-+
-+config DISCONTIGMEM
-+       bool
-+       depends on NUMA
-+       default y
-+
-+config NUMA
-+       bool
-+       default n
-+
-+config HAVE_DEC_LOCK
-+	bool
-+	depends on SMP
-+	default y
-+
-+# actually 64 maximum, but you need to fix the APIC code first
-+# to use clustered mode or whatever your big iron needs
-+config NR_CPUS
-+	int "Maximum number of CPUs (2-255)"
-+	range 2 255
-+	depends on SMP
-+	default "16"
-+	help
-+	  This allows you to specify the maximum number of CPUs which this
-+	  kernel will support.  The maximum supported value is 32 and the
-+	  minimum value which makes sense is 2.
-+
-+	  This is purely to save memory - each supported CPU requires
-+	  memory in the static kernel configuration.
-+
-+config HPET_TIMER
-+	bool
-+	default n
-+	help
-+	  Use the IA-PC HPET (High Precision Event Timer) to manage
-+	  time in preference to the PIT and RTC, if a HPET is
-+	  present.  The HPET provides a stable time base on SMP
-+	  systems, unlike the RTC, but it is more expensive to access,
-+	  as it is off-chip.  You can find the HPET spec at
-+	  <http://www.intel.com/labs/platcomp/hpet/hpetspec.htm>.
-+
-+	  If unsure, say Y.
-+
-+config HPET_EMULATE_RTC
-+	bool "Provide RTC interrupt"
-+	depends on HPET_TIMER && RTC=y
-+
-+config GART_IOMMU
-+	bool "IOMMU support"
-+	depends on PCI
-+	help
-+	  Support the K8 IOMMU. Needed to run systems with more than 4GB of memory
-+	  properly with 32-bit PCI devices that do not support DAC (Double Address
-+	  Cycle). The IOMMU can be turned off at runtime with the iommu=off parameter.
-+	  Normally the kernel will take the right choice by itself.
-+	  If unsure, say Y.
-+
-+# need this always enabled with GART_IOMMU for the VIA workaround
-+config SWIOTLB
-+       bool
-+       depends on PCI
-+       default y
-+
-+config DUMMY_IOMMU
-+	bool
-+	depends on !GART_IOMMU
-+	default y
-+	help
-+	  Don't use IOMMU code. This will cause problems when you have more than 4GB
-+	  of memory and any 32-bit devices. Don't turn on unless you know what you
-+	  are doing.
-+
-+config X86_MCE
-+	bool "Machine check support" if EMBEDDED
-+	default n
-+	help
-+	   Include a machine check error handler to report hardware errors.
-+	   This version will require the mcelog utility to decode some
-+	   machine check error logs. See
-+	   ftp://ftp.x86-64.org/pub/linux/tools/mcelog
-+
-+config SECCOMP
-+	bool "Enable seccomp to safely compute untrusted bytecode"
-+	depends on PROC_FS
-+	default y
-+	help
-+	  This kernel feature is useful for number crunching applications
-+	  that may need to compute untrusted bytecode during their
-+	  execution. By using pipes or other transports made available to
-+	  the process as file descriptors supporting the read/write
-+	  syscalls, it's possible to isolate those applications in
-+	  their own address space using seccomp. Once seccomp is
-+	  enabled via /proc/<pid>/seccomp, it cannot be disabled
-+	  and the task is only allowed to execute a few safe syscalls
-+	  defined by each seccomp mode.
-+
-+	  If unsure, say Y. Only embedded should say N here.
-+
-+endmenu
-+
-+#
-+# Use the generic interrupt handling code in kernel/irq/:
-+#
-+config GENERIC_HARDIRQS
-+	bool
-+	default y
-+
-+config GENERIC_IRQ_PROBE
-+	bool
-+	default y
-+
-+# we have no ISA slots, but we do have ISA-style DMA.
-+config ISA_DMA_API
-+	bool
-+	default y
-+
-+menu "Power management options"
-+
-+source kernel/power/Kconfig
-+
-+source "arch/x86_64/kernel/cpufreq/Kconfig"
-+
-+endmenu
-+
-+menu "Bus options (PCI etc.)"
-+
-+config PCI
-+	bool "PCI support"
-+
-+# x86-64 doesn't support PCI BIOS access from long mode so always go direct.
-+config PCI_DIRECT
-+	bool
-+	depends on PCI
-+	default y
-+
-+config PCI_MMCONFIG
-+	bool "Support mmconfig PCI config space access"
-+	depends on PCI && ACPI
-+	select ACPI_BOOT
-+
-+config UNORDERED_IO
-+       bool "Unordered IO mapping access"
-+       depends on EXPERIMENTAL
-+       help
-+         Use unordered stores to access IO memory mappings in device drivers.
-+	 Still very experimental. When a driver works on IA64/ppc64/pa-risc it should
-+	 work with this option, but it makes the drivers behave differently
-+	 from i386. Requires that the driver writer used memory barriers
-+	 properly.
-+
-+#source "drivers/pci/pcie/Kconfig"
-+
-+#source "drivers/pci/Kconfig"
-+
-+#source "drivers/pcmcia/Kconfig"
-+
-+#source "drivers/pci/hotplug/Kconfig"
-+
-+endmenu
-+
-+
-+menu "Executable file formats / Emulations"
-+
-+# source "fs/Kconfig.binfmt"
-+
-+config IA32_EMULATION
-+	bool "IA32 Emulation"
-+	help
-+	  Include code to run 32-bit programs under a 64-bit kernel. You should likely
-+	  turn this on, unless you're 100% sure that you don't have any 32-bit programs
-+	  left.
-+
-+config IA32_AOUT
-+       bool "IA32 a.out support"
-+       depends on IA32_EMULATION
-+       help
-+         Support old a.out binaries in the 32bit emulation.
-+
-+config COMPAT
-+	bool
-+	depends on IA32_EMULATION
-+	default y
-+
-+config SYSVIPC_COMPAT
-+	bool
-+	depends on COMPAT && SYSVIPC
-+	default y
-+
-+config UID16
-+	bool
-+	depends on IA32_EMULATION
-+	default y
-+
-+endmenu
-+
-+# source drivers/Kconfig
-+
-+# source "drivers/firmware/Kconfig"
-+
-+# source fs/Kconfig
-+
-+#source "arch/x86_64/oprofile/Kconfig"
-+
-+# source "security/Kconfig"
-+
-+# source "crypto/Kconfig"
-+
-+endmenu
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/acpi/Makefile linux-2.6.12-xen/arch/xen/x86_64/kernel/acpi/Makefile
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/acpi/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/acpi/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,20 @@
-+i386-obj-$(CONFIG_ACPI_BOOT)		:= boot.o
-+c-obj-$(CONFIG_X86_IO_APIC)	        := earlyquirk.o
-+c-obj-$(CONFIG_ACPI_SLEEP)	        += sleep.o
-+s-obj-$(CONFIG_ACPI_SLEEP)	        += wakeup.o
-+
-+c-link                                  :=
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-+	@ln -fsn $(srctree)/arch/i386/kernel/acpi/$(notdir $@) $@
-+
-+$(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
-+	@ln -fsn $(srctree)/arch/x86_64/kernel/acpi/$(notdir $@) $@
-+
-+$(patsubst %.o,$(obj)/%.c,$(i386-obj-y)):
-+	@ln -fsn $(srctree)/arch/xen/i386/kernel/acpi/$(notdir $@) $@
-+
-+obj-y	+= $(c-obj-y) $(s-obj-y) $(i386-obj-y)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
-+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/apic.c linux-2.6.12-xen/arch/xen/x86_64/kernel/apic.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/apic.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/apic.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,201 @@
-+/*
-+ *	Local APIC handling, local APIC timers
-+ *
-+ *	(c) 1999, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
-+ *					thanks to Eric Gilmore
-+ *					and Rolf G. Tews
-+ *					for testing these extensively.
-+ *	Maciej W. Rozycki	:	Various updates and fixes.
-+ *	Mikael Pettersson	:	Power Management for UP-APIC.
-+ *	Pavel Machek and
-+ *	Mikael Pettersson	:	PM converted to driver model.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/irq.h>
-+#include <linux/delay.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/sysdev.h>
-+
-+#include <asm/atomic.h>
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/desc.h>
-+#include <asm/arch_hooks.h>
-+#include <asm/hpet.h>
-+
-+#include "io_ports.h"
-+
-+/*
-+ * Debug level
-+ */
-+int apic_verbosity;
-+int disable_apic;
-+
-+void smp_local_timer_interrupt(struct pt_regs *regs)
-+{
-+
-+	profile_tick(CPU_PROFILING, regs);
-+#ifndef CONFIG_XEN
-+	int cpu = smp_processor_id();
-+
-+	if (--per_cpu(prof_counter, cpu) <= 0) {
-+		/*
-+		 * The multiplier may have changed since the last time we got
-+		 * to this point as a result of the user writing to
-+		 * /proc/profile. In this case we need to adjust the APIC
-+		 * timer accordingly.
-+		 *
-+		 * Interrupts are already masked off at this point.
-+		 */
-+		per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
-+		if (per_cpu(prof_counter, cpu) != 
-+		    per_cpu(prof_old_multiplier, cpu)) {
-+			__setup_APIC_LVTT(calibration_result/
-+					per_cpu(prof_counter, cpu));
-+			per_cpu(prof_old_multiplier, cpu) =
-+				per_cpu(prof_counter, cpu);
-+		}
-+
-+#ifdef CONFIG_SMP
-+		update_process_times(user_mode(regs));
-+#endif
-+	}
-+#endif
-+
-+	/*
-+	 * We take the 'long' return path, and there every subsystem
-+	 * grabs the appropriate locks (kernel lock/ irq lock).
-+	 *
-+	 * we might want to decouple profiling from the 'long path',
-+	 * and do the profiling totally in assembly.
-+	 *
-+	 * Currently this isn't too much of an issue (performance wise),
-+	 * we can take more than 100K local irqs per second on a 100 MHz P5.
-+	 */
-+}
-+
-+/*
-+ * Local APIC timer interrupt. This is the most natural way for doing
-+ * local interrupts, but local timer interrupts can be emulated by
-+ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
-+ *
-+ * [ if a single-CPU system runs an SMP kernel then we call the local
-+ *   interrupt as well. Thus we cannot inline the local irq ... ]
-+ */
-+void smp_apic_timer_interrupt(struct pt_regs *regs)
-+{
-+	/*
-+	 * the NMI deadlock-detector uses this.
-+	 */
-+	add_pda(apic_timer_irqs, 1);
-+
-+	/*
-+	 * NOTE! We'd better ACK the irq immediately,
-+	 * because timer handling can be slow.
-+	 */
-+	ack_APIC_irq();
-+	/*
-+	 * update_process_times() expects us to have done irq_enter().
-+	 * Besides, if we don't timer interrupts ignore the global
-+	 * interrupt lock, which is the WrongThing (tm) to do.
-+	 */
-+	irq_enter();
-+	smp_local_timer_interrupt(regs);
-+	irq_exit();
-+}
-+
-+/*
-+ * This interrupt should _never_ happen with our APIC/SMP architecture
-+ */
-+asmlinkage void smp_spurious_interrupt(void)
-+{
-+	unsigned int v;
-+	irq_enter();
-+	/*
-+	 * Check if this really is a spurious interrupt and ACK it
-+	 * if it is a vectored one.  Just in case...
-+	 * Spurious interrupts should not be ACKed.
-+	 */
-+	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
-+	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
-+		ack_APIC_irq();
-+
-+#if 0
-+	static unsigned long last_warning; 
-+	static unsigned long skipped; 
-+
-+	/* see sw-dev-man vol 3, chapter 7.4.13.5 */
-+	if (time_before(last_warning+30*HZ,jiffies)) { 
-+		printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
-+		       smp_processor_id(), skipped);
-+		last_warning = jiffies; 
-+		skipped = 0;
-+	} else { 
-+		skipped++; 
-+	} 
-+#endif 
-+	irq_exit();
-+}
-+
-+/*
-+ * This interrupt should never happen with our APIC/SMP architecture
-+ */
-+
-+asmlinkage void smp_error_interrupt(void)
-+{
-+	unsigned int v, v1;
-+
-+	irq_enter();
-+	/* First tickle the hardware, only then report what went on. -- REW */
-+	v = apic_read(APIC_ESR);
-+	apic_write(APIC_ESR, 0);
-+	v1 = apic_read(APIC_ESR);
-+	ack_APIC_irq();
-+	atomic_inc(&irq_err_count);
-+
-+	/* Here is what the APIC error bits mean:
-+	   0: Send CS error
-+	   1: Receive CS error
-+	   2: Send accept error
-+	   3: Receive accept error
-+	   4: Reserved
-+	   5: Send illegal vector
-+	   6: Received illegal vector
-+	   7: Illegal register address
-+	*/
-+	printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
-+	        smp_processor_id(), v , v1);
-+	irq_exit();
-+}
-+
-+int get_physical_broadcast(void)
-+{
-+        return 0xff;
-+}
-+
-+/*
-+ * This initializes the IO-APIC and APIC hardware if this is
-+ * a UP kernel.
-+ */
-+int __init APIC_init_uniprocessor (void)
-+{
-+#ifdef CONFIG_X86_IO_APIC
-+	if (smp_found_config)
-+		if (!skip_ioapic_setup && nr_ioapics)
-+			setup_IO_APIC();
-+#endif
-+
-+	return 0;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/e820.c linux-2.6.12-xen/arch/xen/x86_64/kernel/e820.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/e820.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/e820.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,676 @@
-+/* 
-+ * Handle the memory map.
-+ * The functions here do the job until bootmem takes over.
-+ * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
-+ *
-+ *  Getting sanitize_e820_map() in sync with i386 version by applying change:
-+ *  -  Provisions for empty E820 memory regions (reported by certain BIOSes).
-+ *     Alex Achenbach <xela at slit.de>, December 2002.
-+ *  Venkatesh Pallipadi <venkatesh.pallipadi at intel.com>
-+ *
-+ */
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/types.h>
-+#include <linux/init.h>
-+#include <linux/bootmem.h>
-+#include <linux/ioport.h>
-+#include <linux/string.h>
-+#include <asm/page.h>
-+#include <asm/e820.h>
-+#include <asm/proto.h>
-+#include <asm/bootsetup.h>
-+#include <asm-xen/xen-public/memory.h>
-+
-+unsigned long pci_mem_start = 0xaeedbabe;
-+
-+/* 
-+ * PFN of last memory page.
-+ */
-+unsigned long end_pfn; 
-+unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;  
-+unsigned long end_pfn_map; 
-+
-+/* 
-+ * Add a memory region to the kernel e820 map.
-+ */ 
-+void __init add_memory_region(unsigned long start, unsigned long size, int type)
-+{
-+	int x = e820.nr_map;
-+
-+	if (x == E820MAX) {
-+		printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-+		return;
-+	}
-+
-+	e820.map[x].addr = start;
-+	e820.map[x].size = size;
-+	e820.map[x].type = type;
-+	e820.nr_map++;
-+}
-+
-+#ifndef CONFIG_XEN
-+extern char _end[];
-+
-+/* 
-+ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
-+ * The direct mapping extends to end_pfn_map, so that we can directly access
-+ * apertures, ACPI and other tables without having to play with fixmaps.
-+ */ 
-+
-+/* 
-+ * Last pfn which the user wants to use.
-+ */
-+
-+extern struct resource code_resource, data_resource;
-+
-+/* Check for some hardcoded bad areas that early boot is not allowed to touch */ 
-+static inline int bad_addr(unsigned long *addrp, unsigned long size)
-+{ 
-+	unsigned long addr = *addrp, last = addr + size; 
-+
-+	/* various gunk below that needed for SMP startup */
-+	if (addr < 0x8000) { 
-+		*addrp = 0x8000;
-+		return 1; 
-+	}
-+
-+	/* direct mapping tables of the kernel */
-+	if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) { 
-+		*addrp = table_end << PAGE_SHIFT; 
-+		return 1;
-+	} 
-+
-+	/* initrd */ 
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	if (LOADER_TYPE && INITRD_START && last >= INITRD_START && 
-+	    addr < INITRD_START+INITRD_SIZE) { 
-+		*addrp = INITRD_START + INITRD_SIZE; 
-+		return 1;
-+	} 
-+#endif
-+	/* kernel code + 640k memory hole (later should not be needed, but 
-+	   be paranoid for now) */
-+	if (last >= 640*1024 && addr < __pa_symbol(&_end)) { 
-+		*addrp = __pa_symbol(&_end);
-+		return 1;
-+	}
-+	/* XXX ramdisk image here? */ 
-+	return 0;
-+} 
-+
-+int __init e820_mapped(unsigned long start, unsigned long end, unsigned type) 
-+{ 
-+	int i;
-+	for (i = 0; i < e820.nr_map; i++) { 
-+		struct e820entry *ei = &e820.map[i]; 
-+		if (type && ei->type != type) 
-+			continue;
-+		if (ei->addr >= end || ei->addr + ei->size < start) 
-+			continue; 
-+		return 1; 
-+	} 
-+	return 0;
-+}
-+
-+/* 
-+ * Find a free area in a specific range. 
-+ */ 
-+unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size) 
-+{ 
-+	int i; 
-+	for (i = 0; i < e820.nr_map; i++) { 
-+		struct e820entry *ei = &e820.map[i]; 
-+		unsigned long addr = ei->addr, last; 
-+		if (ei->type != E820_RAM) 
-+			continue; 
-+		if (addr < start) 
-+			addr = start;
-+		if (addr > ei->addr + ei->size) 
-+			continue; 
-+		while (bad_addr(&addr, size) && addr+size < ei->addr + ei->size)
-+			;
-+		last = addr + size;
-+		if (last > ei->addr + ei->size)
-+			continue;
-+		if (last > end) 
-+			continue;
-+		return addr; 
-+	} 
-+	return -1UL;		
-+} 
-+
-+/* 
-+ * Free bootmem based on the e820 table for a node.
-+ */
-+void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
-+{
-+	int i;
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i]; 
-+		unsigned long last, addr;
-+
-+		if (ei->type != E820_RAM || 
-+		    ei->addr+ei->size <= start || 
-+		    ei->addr > end)
-+			continue;
-+
-+		addr = round_up(ei->addr, PAGE_SIZE);
-+		if (addr < start) 
-+			addr = start;
-+
-+		last = round_down(ei->addr + ei->size, PAGE_SIZE); 
-+		if (last >= end)
-+			last = end; 
-+
-+		if (last > addr && last-addr >= PAGE_SIZE)
-+			free_bootmem_node(pgdat, addr, last-addr);
-+	}
-+}
-+
-+/*
-+ * Find the highest page frame number we have available
-+ */
-+unsigned long __init e820_end_of_ram(void)
-+{
-+	int i;
-+	unsigned long end_pfn = 0;
-+	
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct e820entry *ei = &e820.map[i]; 
-+		unsigned long start, end;
-+
-+		start = round_up(ei->addr, PAGE_SIZE); 
-+		end = round_down(ei->addr + ei->size, PAGE_SIZE); 
-+		if (start >= end)
-+			continue;
-+		if (ei->type == E820_RAM) { 
-+		if (end > end_pfn<<PAGE_SHIFT)
-+			end_pfn = end>>PAGE_SHIFT;
-+		} else { 
-+			if (end > end_pfn_map<<PAGE_SHIFT) 
-+				end_pfn_map = end>>PAGE_SHIFT;
-+		} 
-+	}
-+
-+	if (end_pfn > end_pfn_map) 
-+		end_pfn_map = end_pfn;
-+	if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
-+		end_pfn_map = MAXMEM>>PAGE_SHIFT;
-+	if (end_pfn > end_user_pfn)
-+		end_pfn = end_user_pfn;
-+	if (end_pfn > end_pfn_map) 
-+		end_pfn = end_pfn_map; 
-+
-+	return end_pfn;	
-+}
-+
-+/* 
-+ * Mark e820 reserved areas as busy for the resource manager.
-+ */
-+void __init e820_reserve_resources(void)
-+{
-+	int i;
-+	for (i = 0; i < e820.nr_map; i++) {
-+		struct resource *res;
-+		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
-+			continue;
-+		res = alloc_bootmem_low(sizeof(struct resource));
-+		switch (e820.map[i].type) {
-+		case E820_RAM:	res->name = "System RAM"; break;
-+		case E820_ACPI:	res->name = "ACPI Tables"; break;
-+		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;
-+		default:	res->name = "reserved";
-+		}
-+		res->start = e820.map[i].addr;
-+		res->end = res->start + e820.map[i].size - 1;
-+		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+		request_resource(&iomem_resource, res);
-+		if (e820.map[i].type == E820_RAM) {
-+			/*
-+			 *  We don't know which RAM region contains kernel data,
-+			 *  so we try it repeatedly and let the resource manager
-+			 *  test it.
-+			 */
-+			request_resource(res, &code_resource);
-+			request_resource(res, &data_resource);
-+		}
-+	}
-+}
-+
-+void __init e820_print_map(char *who)
-+{
-+	int i;
-+
-+	for (i = 0; i < e820.nr_map; i++) {
-+		printk(" %s: %016Lx - %016Lx ", who,
-+			(unsigned long long) e820.map[i].addr,
-+			(unsigned long long) (e820.map[i].addr + e820.map[i].size));
-+		switch (e820.map[i].type) {
-+		case E820_RAM:	printk("(usable)\n");
-+				break;
-+		case E820_RESERVED:
-+				printk("(reserved)\n");
-+				break;
-+		case E820_ACPI:
-+				printk("(ACPI data)\n");
-+				break;
-+		case E820_NVS:
-+				printk("(ACPI NVS)\n");
-+				break;
-+		default:	printk("type %u\n", e820.map[i].type);
-+				break;
-+		}
-+	}
-+}
-+
-+/*
-+ * Sanitize the BIOS e820 map.
-+ *
-+ * Some e820 responses include overlapping entries.  The following 
-+ * replaces the original e820 map with a new one, removing overlaps.
-+ *
-+ */
-+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-+{
-+	struct change_member {
-+		struct e820entry *pbios; /* pointer to original bios entry */
-+		unsigned long long addr; /* address for this change point */
-+	};
-+	static struct change_member change_point_list[2*E820MAX] __initdata;
-+	static struct change_member *change_point[2*E820MAX] __initdata;
-+	static struct e820entry *overlap_list[E820MAX] __initdata;
-+	static struct e820entry new_bios[E820MAX] __initdata;
-+	struct change_member *change_tmp;
-+	unsigned long current_type, last_type;
-+	unsigned long long last_addr;
-+	int chgidx, still_changing;
-+	int overlap_entries;
-+	int new_bios_entry;
-+	int old_nr, new_nr, chg_nr;
-+	int i;
-+
-+	/*
-+		Visually we're performing the following (1,2,3,4 = memory types)...
-+
-+		Sample memory map (w/overlaps):
-+		   ____22__________________
-+		   ______________________4_
-+		   ____1111________________
-+		   _44_____________________
-+		   11111111________________
-+		   ____________________33__
-+		   ___________44___________
-+		   __________33333_________
-+		   ______________22________
-+		   ___________________2222_
-+		   _________111111111______
-+		   _____________________11_
-+		   _________________4______
-+
-+		Sanitized equivalent (no overlap):
-+		   1_______________________
-+		   _44_____________________
-+		   ___1____________________
-+		   ____22__________________
-+		   ______11________________
-+		   _________1______________
-+		   __________3_____________
-+		   ___________44___________
-+		   _____________33_________
-+		   _______________2________
-+		   ________________1_______
-+		   _________________4______
-+		   ___________________2____
-+		   ____________________33__
-+		   ______________________4_
-+	*/
-+
-+	/* if there's only one memory region, don't bother */
-+	if (*pnr_map < 2)
-+		return -1;
-+
-+	old_nr = *pnr_map;
-+
-+	/* bail out if we find any unreasonable addresses in bios map */
-+	for (i=0; i<old_nr; i++)
-+		if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
-+			return -1;
-+
-+	/* create pointers for initial change-point information (for sorting) */
-+	for (i=0; i < 2*old_nr; i++)
-+		change_point[i] = &change_point_list[i];
-+
-+	/* record all known change-points (starting and ending addresses),
-+	   omitting those that are for empty memory regions */
-+	chgidx = 0;
-+	for (i=0; i < old_nr; i++)	{
-+		if (biosmap[i].size != 0) {
-+			change_point[chgidx]->addr = biosmap[i].addr;
-+			change_point[chgidx++]->pbios = &biosmap[i];
-+			change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-+			change_point[chgidx++]->pbios = &biosmap[i];
-+		}
-+	}
-+	chg_nr = chgidx;
-+
-+	/* sort change-point list by memory addresses (low -> high) */
-+	still_changing = 1;
-+	while (still_changing)	{
-+		still_changing = 0;
-+		for (i=1; i < chg_nr; i++)  {
-+			/* if <current_addr> > <last_addr>, swap */
-+			/* or, if current=<start_addr> & last=<end_addr>, swap */
-+			if ((change_point[i]->addr < change_point[i-1]->addr) ||
-+				((change_point[i]->addr == change_point[i-1]->addr) &&
-+				 (change_point[i]->addr == change_point[i]->pbios->addr) &&
-+				 (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-+			   )
-+			{
-+				change_tmp = change_point[i];
-+				change_point[i] = change_point[i-1];
-+				change_point[i-1] = change_tmp;
-+				still_changing=1;
-+			}
-+		}
-+	}
-+
-+	/* create a new bios memory map, removing overlaps */
-+	overlap_entries=0;	 /* number of entries in the overlap table */
-+	new_bios_entry=0;	 /* index for creating new bios map entries */
-+	last_type = 0;		 /* start with undefined memory type */
-+	last_addr = 0;		 /* start with 0 as last starting address */
-+	/* loop through change-points, determining affect on the new bios map */
-+	for (chgidx=0; chgidx < chg_nr; chgidx++)
-+	{
-+		/* keep track of all overlapping bios entries */
-+		if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-+		{
-+			/* add map entry to overlap list (> 1 entry implies an overlap) */
-+			overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-+		}
-+		else
-+		{
-+			/* remove entry from list (order independent, so swap with last) */
-+			for (i=0; i<overlap_entries; i++)
-+			{
-+				if (overlap_list[i] == change_point[chgidx]->pbios)
-+					overlap_list[i] = overlap_list[overlap_entries-1];
-+			}
-+			overlap_entries--;
-+		}
-+		/* if there are overlapping entries, decide which "type" to use */
-+		/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-+		current_type = 0;
-+		for (i=0; i<overlap_entries; i++)
-+			if (overlap_list[i]->type > current_type)
-+				current_type = overlap_list[i]->type;
-+		/* continue building up new bios map based on this information */
-+		if (current_type != last_type)	{
-+			if (last_type != 0)	 {
-+				new_bios[new_bios_entry].size =
-+					change_point[chgidx]->addr - last_addr;
-+				/* move forward only if the new size was non-zero */
-+				if (new_bios[new_bios_entry].size != 0)
-+					if (++new_bios_entry >= E820MAX)
-+						break; 	/* no more space left for new bios entries */
-+			}
-+			if (current_type != 0)	{
-+				new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-+				new_bios[new_bios_entry].type = current_type;
-+				last_addr=change_point[chgidx]->addr;
-+			}
-+			last_type = current_type;
-+		}
-+	}
-+	new_nr = new_bios_entry;   /* retain count for new bios entries */
-+
-+	/* copy new bios mapping into original location */
-+	memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-+	*pnr_map = new_nr;
-+
-+	return 0;
-+}
-+
-+/*
-+ * Copy the BIOS e820 map into a safe place.
-+ *
-+ * Sanity-check it while we're at it..
-+ *
-+ * If we're lucky and live on a modern system, the setup code
-+ * will have given us a memory map that we can use to properly
-+ * set up memory.  If we aren't, we'll fake a memory map.
-+ *
-+ * We check to see that the memory map contains at least 2 elements
-+ * before we'll use it, because the detection code in setup.S may
-+ * not be perfect and most every PC known to man has two memory
-+ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
-+ * thinkpad 560x, for example, does not cooperate with the memory
-+ * detection code.)
-+ */
-+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-+{
-+	/* Only one memory region (or negative)? Ignore it */
-+	if (nr_map < 2)
-+		return -1;
-+
-+	do {
-+		unsigned long start = biosmap->addr;
-+		unsigned long size = biosmap->size;
-+		unsigned long end = start + size;
-+		unsigned long type = biosmap->type;
-+
-+		/* Overflow in 64 bits? Ignore the memory map. */
-+		if (start > end)
-+			return -1;
-+
-+		/*
-+		 * Some BIOSes claim RAM in the 640k - 1M region.
-+		 * Not right. Fix it up.
-+		 * 
-+		 * This should be removed on Hammer which is supposed to not
-+		 * have non e820 covered ISA mappings there, but I had some strange
-+		 * problems so it stays for now.  -AK
-+		 */
-+		if (type == E820_RAM) {
-+			if (start < 0x100000ULL && end > 0xA0000ULL) {
-+				if (start < 0xA0000ULL)
-+					add_memory_region(start, 0xA0000ULL-start, type);
-+				if (end <= 0x100000ULL)
-+					continue;
-+				start = 0x100000ULL;
-+				size = end - start;
-+			}
-+		}
-+
-+		add_memory_region(start, size, type);
-+	} while (biosmap++,--nr_map);
-+	return 0;
-+}
-+
-+void __init setup_memory_region(void)
-+{
-+	char *who = "BIOS-e820";
-+
-+	/*
-+	 * Try to copy the BIOS-supplied E820-map.
-+	 *
-+	 * Otherwise fake a memory map; one section from 0k->640k,
-+	 * the next section from 1mb->appropriate_mem_k
-+	 */
-+	sanitize_e820_map(E820_MAP, &E820_MAP_NR);
-+	if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
-+		unsigned long mem_size;
-+
-+		/* compare results from other methods and take the greater */
-+		if (ALT_MEM_K < EXT_MEM_K) {
-+			mem_size = EXT_MEM_K;
-+			who = "BIOS-88";
-+		} else {
-+			mem_size = ALT_MEM_K;
-+			who = "BIOS-e801";
-+		}
-+
-+		e820.nr_map = 0;
-+		add_memory_region(0, LOWMEMSIZE(), E820_RAM);
-+		add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
-+  	}
-+	printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-+	e820_print_map(who);
-+}
-+
-+#else  /* CONFIG_XEN */
-+
-+extern unsigned long xen_override_max_pfn;
-+extern union xen_start_info_union xen_start_info_union;
-+
-+unsigned long __init e820_end_of_ram(void)
-+{
-+	unsigned long max_end_pfn;
-+
-+	if (xen_override_max_pfn == 0) {
-+		max_end_pfn = xen_start_info->nr_pages;
-+		/* Default 8MB slack (to balance backend allocations). */
-+		max_end_pfn += 8 << (20 - PAGE_SHIFT);
-+	} else if (xen_override_max_pfn > xen_start_info->nr_pages) {
-+		max_end_pfn = xen_override_max_pfn;
-+	} else {
-+		max_end_pfn = xen_start_info->nr_pages;
-+	}
-+
-+	return max_end_pfn;
-+}
-+
-+void __init e820_reserve_resources(void) 
-+{
-+	dom0_op_t op;
-+	struct dom0_memory_map_entry *map;
-+	unsigned long gapstart, gapsize, last;
-+	int i, found = 0;
-+
-+	if (!(xen_start_info->flags & SIF_INITDOMAIN))
-+		return;
-+
-+	map = alloc_bootmem_low_pages(PAGE_SIZE);
-+	op.cmd = DOM0_PHYSICAL_MEMORY_MAP;
-+	op.u.physical_memory_map.memory_map = map;
-+	op.u.physical_memory_map.max_map_entries =
-+		PAGE_SIZE / sizeof(struct dom0_memory_map_entry);
-+	BUG_ON(HYPERVISOR_dom0_op(&op));
-+
-+	last = 0x100000000ULL;
-+	gapstart = 0x10000000;
-+	gapsize = 0x400000;
-+
-+	for (i = op.u.physical_memory_map.nr_map_entries - 1; i >= 0; i--) {
-+		struct resource *res;
-+
-+		if ((last > map[i].end) && ((last - map[i].end) > gapsize)) {
-+			gapsize = last - map[i].end;
-+			gapstart = map[i].end;
-+			found = 1;
-+		}
-+		if (map[i].start < last)
-+			last = map[i].start;
-+
-+		if (map[i].end > 0x100000000ULL)
-+			continue;
-+		res = alloc_bootmem_low(sizeof(struct resource));
-+		res->name = map[i].is_ram ? "System RAM" : "reserved";
-+		res->start = map[i].start;
-+		res->end = map[i].end - 1;
-+		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+		request_resource(&iomem_resource, res);
-+	}
-+
-+	free_bootmem(__pa(map), PAGE_SIZE);
-+
-+	if (!found) {
-+		gapstart = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
-+		gapstart = (gapstart << PAGE_SHIFT) + 1024*1024;
-+		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
-+		       KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
-+	}
-+
-+	/*
-+	 * Start allocating dynamic PCI memory a bit into the gap,
-+	 * aligned up to the nearest megabyte.
-+	 *
-+	 * Question: should we try to pad it up a bit (do something
-+	 * like " + (gapsize >> 3)" in there too?). We now have the
-+	 * technology.
-+	 */
-+	pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
-+
-+	printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
-+		pci_mem_start, gapstart, gapsize);
-+}
-+
-+#endif
-+
-+void __init parse_memopt(char *p, char **from) 
-+{ 
-+	end_user_pfn = memparse(p, from);
-+	end_user_pfn >>= PAGE_SHIFT;	
-+	xen_override_max_pfn = (unsigned long) end_user_pfn;
-+} 
-+
-+/*
-+ * Search for the biggest gap in the low 32 bits of the e820
-+ * memory space.  We pass this space to PCI to assign MMIO resources
-+ * for hotplug or unconfigured devices in.
-+ * Hopefully the BIOS let enough space left.
-+ */
-+__init void e820_setup_gap(void)
-+{
-+#ifndef CONFIG_XEN
-+	unsigned long gapstart, gapsize;
-+	unsigned long last;
-+	int i;
-+	int found = 0;
-+
-+	last = 0x100000000ull;
-+	gapstart = 0x10000000;
-+	gapsize = 0x400000;
-+	i = e820.nr_map;
-+	while (--i >= 0) {
-+		unsigned long long start = e820.map[i].addr;
-+		unsigned long long end = start + e820.map[i].size;
-+
-+		/*
-+		 * Since "last" is at most 4GB, we know we'll
-+		 * fit in 32 bits if this condition is true
-+		 */
-+		if (last > end) {
-+			unsigned long gap = last - end;
-+
-+			if (gap > gapsize) {
-+				gapsize = gap;
-+				gapstart = end;
-+				found = 1;
-+			}
-+		}
-+		if (start < last)
-+			last = start;
-+	}
-+
-+	if (!found) {
-+		gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
-+		printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
-+		       KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
-+	}
-+
-+	/*
-+	 * Start allocating dynamic PCI memory a bit into the gap,
-+	 * aligned up to the nearest megabyte.
-+	 *
-+	 * Question: should we try to pad it up a bit (do something
-+	 * like " + (gapsize >> 3)" in there too?). We now have the
-+	 * technology.
-+	 */
-+	pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
-+
-+	printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
-+		pci_mem_start, gapstart, gapsize);
-+#endif
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/early_printk.c linux-2.6.12-xen/arch/xen/x86_64/kernel/early_printk.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/early_printk.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/early_printk.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,251 @@
-+#include <linux/config.h>
-+#include <linux/console.h>
-+#include <linux/kernel.h>
-+#include <linux/init.h>
-+#include <linux/string.h>
-+#include <asm/io.h>
-+#include <asm/processor.h>
-+
-+#ifndef CONFIG_XEN
-+
-+/* Simple VGA output */
-+
-+#ifdef __i386__
-+#define VGABASE		(__ISA_IO_base + 0xb8000)
-+#else
-+#define VGABASE		((void __iomem *)0xffffffff800b8000UL)
-+#endif
-+
-+#define MAX_YPOS	25
-+#define MAX_XPOS	80
-+
-+static int current_ypos = 1, current_xpos = 0; 
-+
-+static void early_vga_write(struct console *con, const char *str, unsigned n)
-+{
-+	char c;
-+	int  i, k, j;
-+
-+	while ((c = *str++) != '\0' && n-- > 0) {
-+		if (current_ypos >= MAX_YPOS) {
-+			/* scroll 1 line up */
-+			for (k = 1, j = 0; k < MAX_YPOS; k++, j++) {
-+				for (i = 0; i < MAX_XPOS; i++) {
-+					writew(readw(VGABASE + 2*(MAX_XPOS*k + i)),
-+					       VGABASE + 2*(MAX_XPOS*j + i));
-+				}
-+			}
-+			for (i = 0; i < MAX_XPOS; i++)
-+				writew(0x720, VGABASE + 2*(MAX_XPOS*j + i));
-+			current_ypos = MAX_YPOS-1;
-+		}
-+		if (c == '\n') {
-+			current_xpos = 0;
-+			current_ypos++;
-+		} else if (c != '\r')  {
-+			writew(((0x7 << 8) | (unsigned short) c),
-+			       VGABASE + 2*(MAX_XPOS*current_ypos +
-+						current_xpos++));
-+			if (current_xpos >= MAX_XPOS) {
-+				current_xpos = 0;
-+				current_ypos++;
-+			}
-+		}
-+	}
-+}
-+
-+static struct console early_vga_console = {
-+	.name =		"earlyvga",
-+	.write =	early_vga_write,
-+	.flags =	CON_PRINTBUFFER,
-+	.index =	-1,
-+};
-+
-+/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */ 
-+
-+static int early_serial_base = 0x3f8;  /* ttyS0 */
-+
-+#define XMTRDY          0x20
-+
-+#define DLAB		0x80
-+
-+#define TXR             0       /*  Transmit register (WRITE) */
-+#define RXR             0       /*  Receive register  (READ)  */
-+#define IER             1       /*  Interrupt Enable          */
-+#define IIR             2       /*  Interrupt ID              */
-+#define FCR             2       /*  FIFO control              */
-+#define LCR             3       /*  Line control              */
-+#define MCR             4       /*  Modem control             */
-+#define LSR             5       /*  Line Status               */
-+#define MSR             6       /*  Modem Status              */
-+#define DLL             0       /*  Divisor Latch Low         */
-+#define DLH             1       /*  Divisor latch High        */
-+
-+static int early_serial_putc(unsigned char ch) 
-+{ 
-+	unsigned timeout = 0xffff; 
-+	while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout) 
-+		cpu_relax();
-+	outb(ch, early_serial_base + TXR);
-+	return timeout ? 0 : -1;
-+} 
-+
-+static void early_serial_write(struct console *con, const char *s, unsigned n)
-+{
-+	while (*s && n-- > 0) { 
-+		early_serial_putc(*s); 
-+		if (*s == '\n') 
-+			early_serial_putc('\r'); 
-+		s++; 
-+	} 
-+} 
-+
-+#define DEFAULT_BAUD 9600
-+
-+static __init void early_serial_init(char *s)
-+{
-+	unsigned char c; 
-+	unsigned divisor;
-+	unsigned baud = DEFAULT_BAUD;
-+	char *e;
-+
-+	if (*s == ',')
-+		++s;
-+
-+	if (*s) {
-+		unsigned port; 
-+		if (!strncmp(s,"0x",2)) {
-+			early_serial_base = simple_strtoul(s, &e, 16);
-+		} else {
-+			static int bases[] = { 0x3f8, 0x2f8 };
-+
-+			if (!strncmp(s,"ttyS",4))
-+				s += 4;
-+			port = simple_strtoul(s, &e, 10);
-+			if (port > 1 || s == e)
-+				port = 0;
-+			early_serial_base = bases[port];
-+		}
-+		s += strcspn(s, ",");
-+		if (*s == ',')
-+			s++;
-+	}
-+
-+	outb(0x3, early_serial_base + LCR);	/* 8n1 */
-+	outb(0, early_serial_base + IER);	/* no interrupt */
-+	outb(0, early_serial_base + FCR);	/* no fifo */
-+	outb(0x3, early_serial_base + MCR);	/* DTR + RTS */
-+
-+	if (*s) {
-+		baud = simple_strtoul(s, &e, 0); 
-+		if (baud == 0 || s == e) 
-+			baud = DEFAULT_BAUD;
-+	} 
-+	
-+	divisor = 115200 / baud; 
-+	c = inb(early_serial_base + LCR); 
-+	outb(c | DLAB, early_serial_base + LCR); 
-+	outb(divisor & 0xff, early_serial_base + DLL); 
-+	outb((divisor >> 8) & 0xff, early_serial_base + DLH); 
-+	outb(c & ~DLAB, early_serial_base + LCR);
-+}
-+
-+#else /* CONFIG_XEN */
-+
-+static void
-+early_serial_write(struct console *con, const char *s, unsigned count)
-+{
-+	int n;
-+
-+	while (count > 0) {
-+		n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
-+		if (n <= 0)
-+			break;
-+		count -= n;
-+		s += n;
-+	}
-+} 
-+
-+static __init void early_serial_init(char *s)
-+{
-+}
-+
-+/*
-+ * No early VGA console on Xen, as we do not have convenient ISA-space
-+ * mappings. Someone should fix this for domain 0. For now, use fake serial.
-+ */
-+#define early_vga_console early_serial_console
-+
-+#endif
-+
-+static struct console early_serial_console = {
-+	.name =		"earlyser",
-+	.write =	early_serial_write,
-+	.flags =	CON_PRINTBUFFER,
-+	.index =	-1,
-+};
-+
-+/* Direct interface for emergencies */
-+struct console *early_console = &early_vga_console;
-+static int early_console_initialized = 0;
-+
-+void early_printk(const char *fmt, ...)
-+{ 
-+	char buf[512]; 
-+	int n; 
-+	va_list ap;
-+
-+	va_start(ap,fmt); 
-+	n = vscnprintf(buf,512,fmt,ap);
-+	early_console->write(early_console,buf,n);
-+	va_end(ap); 
-+} 
-+
-+static int keep_early; 
-+
-+int __init setup_early_printk(char *opt) 
-+{  
-+	char *space;
-+	char buf[256]; 
-+
-+	if (early_console_initialized)
-+		return -1;
-+
-+	opt = strchr(opt, '=') + 1;
-+
-+	strlcpy(buf,opt,sizeof(buf)); 
-+	space = strchr(buf, ' '); 
-+	if (space)
-+		*space = 0; 
-+
-+	if (strstr(buf,"keep"))
-+		keep_early = 1; 
-+
-+	if (!strncmp(buf, "serial", 6)) { 
-+		early_serial_init(buf + 6);
-+		early_console = &early_serial_console;
-+	} else if (!strncmp(buf, "ttyS", 4)) { 
-+		early_serial_init(buf);
-+		early_console = &early_serial_console;		
-+	} else if (!strncmp(buf, "vga", 3)) {
-+		early_console = &early_vga_console; 
-+	}
-+	early_console_initialized = 1;
-+	register_console(early_console);       
-+	return 0;
-+}
-+
-+void __init disable_early_printk(void)
-+{ 
-+	if (!early_console_initialized || !early_console)
-+		return;
-+	if (!keep_early) {
-+		printk("disabling early console\n");
-+		unregister_console(early_console);
-+		early_console_initialized = 0;
-+	} else { 
-+		printk("keeping early console\n");
-+	}
-+} 
-+
-+__setup("earlyprintk=", setup_early_printk);
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/entry.S linux-2.6.12-xen/arch/xen/x86_64/kernel/entry.S
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/entry.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/entry.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1019 @@
-+/*
-+ *  linux/arch/x86_64/entry.S
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
-+ *  Copyright (C) 2000  Pavel Machek <pavel at suse.cz>
-+ * 
-+ *  $Id$
-+ *
-+ *  Jun Nakajima <jun.nakajima at intel.com>
-+ *  Asit Mallick <asit.k.mallick at intel.com>
-+ *      Modified for Xen
-+ */
-+
-+/*
-+ * entry.S contains the system-call and fault low-level handling routines.
-+ *
-+ * NOTE: This code handles signal-recognition, which happens every time
-+ * after an interrupt and after each system call.
-+ * 
-+ * Normal syscalls and interrupts don't save a full stack frame, this is 
-+ * only done for syscall tracing, signals or fork/exec et.al.
-+ * 
-+ * A note on terminology:	 
-+ * - top of stack: Architecture defined interrupt frame from SS to RIP 
-+ * at the top of the kernel process stack.	
-+ * - partial stack frame: partially saved registers upto R11.
-+ * - full stack frame: Like partial stack frame, but all register saved. 
-+ *	
-+ * TODO:	 
-+ * - schedule it carefully for the final hardware.
-+ */
-+
-+#define ASSEMBLY 1
-+#include <linux/config.h>
-+#include <linux/linkage.h>
-+#include <asm/segment.h>
-+#include <asm/smp.h>
-+#include <asm/cache.h>
-+#include <asm/errno.h>
-+#include <asm/dwarf2.h>
-+#include <asm/calling.h>
-+#include <asm/asm_offset.h>
-+#include <asm/msr.h>
-+#include <asm/unistd.h>
-+#include <asm/thread_info.h>
-+#include <asm/hw_irq.h>
-+#include <asm/errno.h>
-+#include <asm-xen/xen-public/arch-x86_64.h>
-+
-+#include "irq_vectors.h"
-+
-+#include "xen_entry.S"
-+	
-+	.code64
-+
-+#ifndef CONFIG_PREEMPT
-+#define retint_kernel retint_restore_args
-+#endif	
-+	
-+/*
-+ * C code is not supposed to know about undefined top of stack. Every time 
-+ * a C function with an pt_regs argument is called from the SYSCALL based 
-+ * fast path FIXUP_TOP_OF_STACK is needed.
-+ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
-+ * manipulation.
-+ */        	
-+		
-+	/* %rsp:at FRAMEEND */ 
-+	.macro FIXUP_TOP_OF_STACK tmp
-+	movq    $__USER_CS,CS(%rsp)
-+	movq 	$-1,RCX(%rsp)
-+	.endm
-+
-+	.macro RESTORE_TOP_OF_STACK tmp,offset=0
-+	.endm
-+
-+	.macro FAKE_STACK_FRAME child_rip
-+	/* push in order ss, rsp, eflags, cs, rip */
-+	xorq %rax, %rax
-+	pushq %rax /* ss */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	pushq %rax /* rsp */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	CFI_OFFSET	rip,0
-+	pushq $(1<<9) /* eflags - interrupts on */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	pushq $__KERNEL_CS /* cs */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	pushq \child_rip /* rip */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	CFI_OFFSET	rip,0
-+	pushq	%rax /* orig rax */
-+	CFI_ADJUST_CFA_OFFSET	8
-+	.endm
-+
-+	.macro UNFAKE_STACK_FRAME
-+	addq $8*6, %rsp
-+	CFI_ADJUST_CFA_OFFSET	-(6*8)
-+	.endm
-+
-+	.macro	CFI_DEFAULT_STACK
-+	CFI_ADJUST_CFA_OFFSET  (SS)
-+	CFI_OFFSET	r15,R15-SS
-+	CFI_OFFSET	r14,R14-SS
-+	CFI_OFFSET	r13,R13-SS
-+	CFI_OFFSET	r12,R12-SS
-+	CFI_OFFSET	rbp,RBP-SS
-+	CFI_OFFSET	rbx,RBX-SS
-+	CFI_OFFSET	r11,R11-SS
-+	CFI_OFFSET	r10,R10-SS
-+	CFI_OFFSET	r9,R9-SS
-+	CFI_OFFSET	r8,R8-SS
-+	CFI_OFFSET	rax,RAX-SS
-+	CFI_OFFSET	rcx,RCX-SS
-+	CFI_OFFSET	rdx,RDX-SS
-+	CFI_OFFSET	rsi,RSI-SS
-+	CFI_OFFSET	rdi,RDI-SS
-+	CFI_OFFSET	rsp,RSP-SS
-+	CFI_OFFSET	rip,RIP-SS
-+	.endm
-+
-+        /*
-+         * Must be consistent with the definition in arch-x86_64.h:    
-+         *     struct iret_context {
-+         *        u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+         *     };
-+         * #define VGCF_IN_SYSCALL (1<<8) 
-+         */
-+	.macro HYPERVISOR_IRET flag
-+	pushq $\flag
-+	jmp  hypercall_page + (__HYPERVISOR_iret * 32)
-+	.endm
-+
-+        .macro SWITCH_TO_KERNEL ssoff,adjust=0
-+	jc  1f
-+	orb  $1,\ssoff-\adjust+4(%rsp)
-+1:
-+        .endm
-+
-+/*
-+ * A newly forked process directly context switches into this.
-+ */ 	
-+/* rdi:	prev */	
-+ENTRY(ret_from_fork)
-+	CFI_STARTPROC
-+	CFI_DEFAULT_STACK
-+	call schedule_tail
-+	GET_THREAD_INFO(%rcx)
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-+	jnz rff_trace
-+rff_action:	
-+	RESTORE_REST
-+	testl $3,CS-ARGOFFSET(%rsp)	# from kernel_thread?
-+	je   int_ret_from_sys_call
-+	testl $_TIF_IA32,threadinfo_flags(%rcx)
-+	jnz  int_ret_from_sys_call
-+	RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
-+	jmp ret_from_sys_call
-+rff_trace:
-+	movq %rsp,%rdi
-+	call syscall_trace_leave
-+	GET_THREAD_INFO(%rcx)	
-+	jmp rff_action
-+	CFI_ENDPROC
-+
-+/*
-+ * System call entry. Upto 6 arguments in registers are supported.
-+ *
-+ * SYSCALL does not save anything on the stack and does not change the
-+ * stack pointer.
-+ */
-+		
-+/*
-+ * Register setup:	
-+ * rax  system call number
-+ * rdi  arg0
-+ * rcx  return address for syscall/sysret, C arg3 
-+ * rsi  arg1
-+ * rdx  arg2	
-+ * r10  arg3 	(--> moved to rcx for C)
-+ * r8   arg4
-+ * r9   arg5
-+ * r11  eflags for syscall/sysret, temporary for C
-+ * r12-r15,rbp,rbx saved by C code, not touched. 		
-+ * 
-+ * Interrupts are off on entry.
-+ * Only called from user space.
-+ *
-+ * XXX	if we had a free scratch register we could save the RSP into the stack frame
-+ *      and report it properly in ps. Unfortunately we haven't.
-+ */ 			 		
-+
-+ENTRY(system_call)
-+	CFI_STARTPROC
-+	SAVE_ARGS -8,0
-+	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
-+        XEN_UNBLOCK_EVENTS(%r11)        
-+	GET_THREAD_INFO(%rcx)
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-+	jnz tracesys
-+	cmpq $__NR_syscall_max,%rax
-+	ja badsys
-+	movq %r10,%rcx
-+	call *sys_call_table(,%rax,8)  # XXX:	 rip relative
-+	movq %rax,RAX-ARGOFFSET(%rsp)
-+/*
-+ * Syscall return path ending with SYSRET (fast path)
-+ * Has incomplete stack frame and undefined top of stack. 
-+ */		
-+	.globl ret_from_sys_call
-+ret_from_sys_call:
-+	movl $_TIF_ALLWORK_MASK,%edi
-+	/* edi:	flagmask */
-+sysret_check:		
-+	GET_THREAD_INFO(%rcx)
-+        XEN_BLOCK_EVENTS(%rsi)        
-+	movl threadinfo_flags(%rcx),%edx
-+	andl %edi,%edx
-+	jnz  sysret_careful 
-+        XEN_UNBLOCK_EVENTS(%rsi)                
-+	RESTORE_ARGS 0,8,0
-+        HYPERVISOR_IRET VGCF_IN_SYSCALL
-+
-+	/* Handle reschedules */
-+	/* edx:	work, edi: workmask */	
-+sysret_careful:
-+	bt $TIF_NEED_RESCHED,%edx
-+	jnc sysret_signal
-+        XEN_BLOCK_EVENTS(%rsi)        
-+	pushq %rdi
-+	call schedule
-+	popq  %rdi
-+	jmp sysret_check
-+
-+	/* Handle a signal */ 
-+sysret_signal:
-+/*	sti */
-+        XEN_UNBLOCK_EVENTS(%rsi)        
-+	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
-+	jz    1f
-+
-+	/* Really a signal */
-+	/* edx:	work flags (arg3) */
-+	leaq do_notify_resume(%rip),%rax
-+	leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
-+	xorl %esi,%esi # oldset -> arg2
-+	call ptregscall_common
-+1:	movl $_TIF_NEED_RESCHED,%edi
-+	jmp sysret_check
-+	
-+	/* Do syscall tracing */
-+tracesys:			 
-+	SAVE_REST
-+	movq $-ENOSYS,RAX(%rsp)
-+	FIXUP_TOP_OF_STACK %rdi
-+	movq %rsp,%rdi
-+	call syscall_trace_enter
-+	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
-+	RESTORE_REST
-+	cmpq $__NR_syscall_max,%rax
-+	ja  1f
-+	movq %r10,%rcx	/* fixup for C */
-+	call *sys_call_table(,%rax,8)
-+	movq %rax,RAX-ARGOFFSET(%rsp)
-+1:	SAVE_REST
-+	movq %rsp,%rdi
-+	call syscall_trace_leave
-+	RESTORE_TOP_OF_STACK %rbx
-+	RESTORE_REST
-+	jmp ret_from_sys_call
-+		
-+badsys:
-+	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)	
-+	jmp ret_from_sys_call
-+
-+/* 
-+ * Syscall return path ending with IRET.
-+ * Has correct top of stack, but partial stack frame.
-+ */ 	
-+ENTRY(int_ret_from_sys_call)	
-+        XEN_BLOCK_EVENTS(%rsi)
-+	testb $3,CS-ARGOFFSET(%rsp)
-+        jnz 1f
-+        /* Need to set the proper %ss (not NULL) for ring 3 iretq */
-+        movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
-+        jmp retint_restore_args   # retrun from ring3 kernel
-+1:              
-+	movl $_TIF_ALLWORK_MASK,%edi
-+	/* edi:	mask to check */
-+int_with_check:
-+	GET_THREAD_INFO(%rcx)
-+	movl threadinfo_flags(%rcx),%edx
-+	andl %edi,%edx
-+	jnz   int_careful
-+	jmp   retint_restore_args
-+
-+	/* Either reschedule or signal or syscall exit tracking needed. */
-+	/* First do a reschedule test. */
-+	/* edx:	work, edi: workmask */
-+int_careful:
-+	bt $TIF_NEED_RESCHED,%edx
-+	jnc  int_very_careful
-+/*	sti */
-+        XEN_UNBLOCK_EVENTS(%rsi)
-+	pushq %rdi
-+	call schedule
-+	popq %rdi
-+	cli
-+	jmp int_with_check
-+
-+	/* handle signals and tracing -- both require a full stack frame */
-+int_very_careful:
-+/*	sti */
-+        XEN_UNBLOCK_EVENTS(%rsi)
-+	SAVE_REST
-+	/* Check for syscall exit trace */	
-+	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
-+	jz int_signal
-+	pushq %rdi
-+	leaq 8(%rsp),%rdi	# &ptregs -> arg1	
-+	call syscall_trace_leave
-+	popq %rdi
-+	andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
-+	cli
-+	jmp int_restore_rest
-+	
-+int_signal:
-+	testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
-+	jz 1f
-+	movq %rsp,%rdi		# &ptregs -> arg1
-+	xorl %esi,%esi		# oldset -> arg2
-+	call do_notify_resume
-+1:	movl $_TIF_NEED_RESCHED,%edi	
-+int_restore_rest:
-+	RESTORE_REST
-+	cli
-+	jmp int_with_check
-+	CFI_ENDPROC
-+		
-+/* 
-+ * Certain special system calls that need to save a complete full stack frame.
-+ */ 								
-+	
-+	.macro PTREGSCALL label,func,arg
-+	.globl \label
-+\label:
-+	leaq	\func(%rip),%rax
-+	leaq    -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
-+	jmp	ptregscall_common
-+	.endm
-+
-+	PTREGSCALL stub_clone, sys_clone, %r8
-+	PTREGSCALL stub_fork, sys_fork, %rdi
-+	PTREGSCALL stub_vfork, sys_vfork, %rdi
-+	PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
-+	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
-+	PTREGSCALL stub_iopl, sys_iopl, %rsi
-+
-+ENTRY(ptregscall_common)
-+	CFI_STARTPROC
-+	popq %r11
-+	CFI_ADJUST_CFA_OFFSET	-8
-+	SAVE_REST
-+	movq %r11, %r15
-+	FIXUP_TOP_OF_STACK %r11
-+	call *%rax
-+	RESTORE_TOP_OF_STACK %r11
-+	movq %r15, %r11
-+	RESTORE_REST
-+	pushq %r11
-+	CFI_ADJUST_CFA_OFFSET	8
-+	ret
-+	CFI_ENDPROC
-+	
-+ENTRY(stub_execve)
-+	CFI_STARTPROC
-+	popq %r11
-+	CFI_ADJUST_CFA_OFFSET	-8
-+	SAVE_REST
-+	movq %r11, %r15
-+	FIXUP_TOP_OF_STACK %r11
-+	call sys_execve
-+	GET_THREAD_INFO(%rcx)
-+	bt $TIF_IA32,threadinfo_flags(%rcx)
-+	jc exec_32bit
-+	RESTORE_TOP_OF_STACK %r11
-+	movq %r15, %r11
-+	RESTORE_REST
-+	push %r11
-+	ret
-+
-+exec_32bit:
-+	CFI_ADJUST_CFA_OFFSET	REST_SKIP
-+	movq %rax,RAX(%rsp)
-+	RESTORE_REST
-+	jmp int_ret_from_sys_call
-+	CFI_ENDPROC
-+	
-+/*
-+ * sigreturn is special because it needs to restore all registers on return.
-+ * This cannot be done with SYSRET, so use the IRET return path instead.
-+ */                
-+ENTRY(stub_rt_sigreturn)
-+	CFI_STARTPROC
-+	addq $8, %rsp		
-+	SAVE_REST
-+	movq %rsp,%rdi
-+	FIXUP_TOP_OF_STACK %r11
-+	call sys_rt_sigreturn
-+	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
-+	RESTORE_REST
-+	jmp int_ret_from_sys_call
-+	CFI_ENDPROC
-+
-+/* 
-+ * Interrupt entry/exit.
-+ *
-+ * Interrupt entry points save only callee clobbered registers in fast path.
-+ *	
-+ * Entry runs with interrupts off.	
-+ */ 
-+
-+/* 0(%rsp): interrupt number */ 
-+	.macro interrupt func
-+	CFI_STARTPROC	simple
-+	CFI_DEF_CFA	rsp,(SS-RDI)
-+	CFI_REL_OFFSET	rsp,(RSP-ORIG_RAX)
-+	CFI_REL_OFFSET	rip,(RIP-ORIG_RAX)
-+	cld
-+#ifdef CONFIG_DEBUG_INFO
-+	SAVE_ALL	
-+	movq %rsp,%rdi
-+	/*
-+	 * Setup a stack frame pointer.  This allows gdb to trace
-+	 * back to the original stack.
-+	 */
-+	movq %rsp,%rbp
-+	CFI_DEF_CFA_REGISTER	rbp
-+#else		
-+	SAVE_ARGS
-+	leaq -ARGOFFSET(%rsp),%rdi	# arg1 for handler
-+#endif	
-+#if 0 /* For Xen we don't need to do this */       
-+	testl $3,CS(%rdi)
-+	je 1f
-+	swapgs	
-+#endif        
-+1:	addl $1,%gs:pda_irqcount	# RED-PEN should check preempt count
-+	movq %gs:pda_irqstackptr,%rax
-+	cmoveq %rax,%rsp							
-+	pushq %rdi			# save old stack	
-+	call \func
-+	.endm
-+
-+retint_check:			
-+	movl threadinfo_flags(%rcx),%edx
-+	andl %edi,%edx
-+	jnz  retint_careful
-+retint_restore_args:
-+        movb EVENT_MASK-REST_SKIP(%rsp), %al
-+        notb %al			# %al == ~saved_mask
-+        XEN_GET_VCPU_INFO(%rsi)
-+        andb evtchn_upcall_mask(%rsi),%al
-+	andb $1,%al			# %al == mask & ~saved_mask
-+	jnz restore_all_enable_events	# != 0 => reenable event delivery      
-+        XEN_PUT_VCPU_INFO(%rsi)
-+		
-+	RESTORE_ARGS 0,8,0						
-+	testb $3,8(%rsp)                # check CS
-+	jnz  user_mode
-+kernel_mode:
-+        orb   $3,1*8(%rsp)
-+	iretq
-+user_mode:
-+	HYPERVISOR_IRET 0
-+	
-+	/* edi: workmask, edx: work */	
-+retint_careful:
-+	bt    $TIF_NEED_RESCHED,%edx
-+	jnc   retint_signal
-+	XEN_UNBLOCK_EVENTS(%rsi)
-+/*	sti */        
-+	pushq %rdi
-+	call  schedule
-+	popq %rdi		
-+	XEN_BLOCK_EVENTS(%rsi)		
-+	GET_THREAD_INFO(%rcx)
-+/*	cli */
-+	jmp retint_check
-+	
-+retint_signal:
-+	testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
-+	jz    retint_restore_args
-+        XEN_UNBLOCK_EVENTS(%rsi)
-+	SAVE_REST
-+	movq $-1,ORIG_RAX(%rsp) 			
-+	xorq %rsi,%rsi		# oldset
-+	movq %rsp,%rdi		# &pt_regs
-+	call do_notify_resume
-+	RESTORE_REST
-+        XEN_BLOCK_EVENTS(%rsi)		
-+	movl $_TIF_NEED_RESCHED,%edi
-+	GET_THREAD_INFO(%rcx)
-+	jmp retint_check
-+
-+#ifdef CONFIG_PREEMPT
-+	/* Returning to kernel space. Check if we need preemption */
-+	/* rcx:	 threadinfo. interrupts off. */
-+	.p2align
-+retint_kernel:	
-+	cmpl $0,threadinfo_preempt_count(%rcx)
-+	jnz  retint_restore_args
-+	bt  $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
-+	jnc  retint_restore_args
-+	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
-+	jnc  retint_restore_args
-+	call preempt_schedule_irq
-+	jmp retint_kernel       /* check again */
-+#endif	
-+	CFI_ENDPROC
-+	
-+/*
-+ * APIC interrupts.
-+ */		
-+	.macro apicinterrupt num,func
-+	pushq $\num-256
-+	interrupt \func
-+	jmp error_entry
-+	CFI_ENDPROC
-+	.endm
-+
-+#if 0
-+ENTRY(reschedule_interrupt)
-+	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
-+
-+ENTRY(invalidate_interrupt)
-+	apicinterrupt INVALIDATE_TLB_VECTOR,smp_invalidate_interrupt
-+
-+ENTRY(call_function_interrupt)
-+	apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC	
-+ENTRY(apic_timer_interrupt)
-+	apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
-+
-+ENTRY(error_interrupt)
-+	apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
-+
-+ENTRY(spurious_interrupt)
-+	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
-+#endif
-+				
-+/*
-+ * Exception entry points.
-+ */ 		
-+	.macro zeroentry sym
-+        movq (%rsp),%rcx
-+        movq 8(%rsp),%r11
-+        addq $0x10,%rsp /* skip rcx and r11 */
-+	pushq $0	/* push error code/oldrax */ 
-+	pushq %rax	/* push real oldrax to the rdi slot */ 
-+	leaq  \sym(%rip),%rax
-+	jmp error_entry
-+	.endm	
-+
-+	.macro errorentry sym
-+        movq (%rsp),%rcx
-+        movq 8(%rsp),%r11
-+        addq $0x10,%rsp /* rsp points to the error code */
-+	pushq %rax
-+	leaq  \sym(%rip),%rax
-+	jmp error_entry
-+	.endm
-+
-+#if 0
-+	/* error code is on the stack already */
-+	/* handle NMI like exceptions that can happen everywhere */
-+	.macro paranoidentry sym
-+        movq (%rsp),%rcx
-+        movq 8(%rsp),%r11
-+        addq $0x10,%rsp /* skip rcx and r11 */        
-+	SAVE_ALL
-+	cld
-+	movl $1,%ebx
-+	movl  $MSR_GS_BASE,%ecx
-+	rdmsr
-+	testl %edx,%edx
-+	js    1f
-+/*	swapgs */
-+	xorl  %ebx,%ebx
-+1:	movq %rsp,%rdi
-+	movq ORIG_RAX(%rsp),%rsi
-+	movq $-1,ORIG_RAX(%rsp)
-+	call \sym
-+	cli
-+	.endm
-+#endif
-+	
-+/*
-+ * Exception entry point. This expects an error code/orig_rax on the stack
-+ * and the exception handler in %rax.	
-+ */ 		  				
-+ENTRY(error_entry)
-+	CFI_STARTPROC	simple
-+	CFI_DEF_CFA	rsp,(SS-RDI)
-+	CFI_REL_OFFSET	rsp,(RSP-RDI)
-+	CFI_REL_OFFSET	rip,(RIP-RDI)
-+	/* rdi slot contains rax, oldrax contains error code */
-+	cld	
-+	subq  $14*8,%rsp
-+	CFI_ADJUST_CFA_OFFSET	(14*8)
-+	movq %rsi,13*8(%rsp)
-+	CFI_REL_OFFSET	rsi,RSI
-+	movq 14*8(%rsp),%rsi	/* load rax from rdi slot */
-+	movq %rdx,12*8(%rsp)
-+	CFI_REL_OFFSET	rdx,RDX
-+	movq %rcx,11*8(%rsp)
-+	CFI_REL_OFFSET	rcx,RCX
-+	movq %rsi,10*8(%rsp)	/* store rax */ 
-+	CFI_REL_OFFSET	rax,RAX
-+	movq %r8, 9*8(%rsp)
-+	CFI_REL_OFFSET	r8,R8
-+	movq %r9, 8*8(%rsp)
-+	CFI_REL_OFFSET	r9,R9
-+	movq %r10,7*8(%rsp)
-+	CFI_REL_OFFSET	r10,R10
-+	movq %r11,6*8(%rsp)
-+	CFI_REL_OFFSET	r11,R11
-+	movq %rbx,5*8(%rsp) 
-+	CFI_REL_OFFSET	rbx,RBX
-+	movq %rbp,4*8(%rsp) 
-+	CFI_REL_OFFSET	rbp,RBP
-+	movq %r12,3*8(%rsp) 
-+	CFI_REL_OFFSET	r12,R12
-+	movq %r13,2*8(%rsp) 
-+	CFI_REL_OFFSET	r13,R13
-+	movq %r14,1*8(%rsp) 
-+	CFI_REL_OFFSET	r14,R14
-+	movq %r15,(%rsp) 
-+	CFI_REL_OFFSET	r15,R15
-+#if 0        
-+	cmpl $__KERNEL_CS,CS(%rsp)
-+	je  error_kernelspace
-+#endif        
-+error_call_handler:
-+	movq %rdi, RDI(%rsp)            
-+	movq %rsp,%rdi
-+	movq ORIG_RAX(%rsp),%rsi	# get error code 
-+	movq $-1,ORIG_RAX(%rsp)
-+	call *%rax
-+error_exit:		
-+	RESTORE_REST
-+/*	cli */
-+	XEN_BLOCK_EVENTS(%rsi)		
-+	GET_THREAD_INFO(%rcx)	
-+	testb $3,CS-ARGOFFSET(%rsp)
-+	jz retint_kernel
-+	movl  threadinfo_flags(%rcx),%edx
-+	movl  $_TIF_WORK_MASK,%edi	
-+	andl  %edi,%edx
-+	jnz   retint_careful
-+	jmp   retint_restore_args
-+
-+error_kernelspace:
-+         /*
-+         * We need to re-write the logic here because we don't do iretq to 
-+         * to return to user mode. It's still possible that we get trap/fault
-+         * in the kernel (when accessing buffers pointed to by system calls, 
-+         * for example).
-+         *
-+         */           
-+#if 0
-+	incl %ebx
-+       /* There are two places in the kernel that can potentially fault with
-+          usergs. Handle them here. The exception handlers after
-+	   iret run with kernel gs again, so don't set the user space flag.
-+	   B stepping K8s sometimes report an truncated RIP for IRET 
-+	   exceptions returning to compat mode. Check for these here too. */
-+	leaq iret_label(%rip),%rbp
-+	cmpq %rbp,RIP(%rsp) 
-+	je   error_swapgs
-+	movl %ebp,%ebp	/* zero extend */
-+	cmpq %rbp,RIP(%rsp) 
-+	je   error_swapgs
-+	cmpq $gs_change,RIP(%rsp)
-+        je   error_swapgs
-+	jmp  error_sti
-+#endif        
-+	
-+ENTRY(hypervisor_callback)
-+	zeroentry do_hypervisor_callback
-+        
-+/*
-+ * Copied from arch/xen/i386/kernel/entry.S
-+ */               
-+# A note on the "critical region" in our callback handler.
-+# We want to avoid stacking callback handlers due to events occurring
-+# during handling of the last event. To do this, we keep events disabled
-+# until we've done all processing. HOWEVER, we must enable events before
-+# popping the stack frame (can't be done atomically) and so it would still
-+# be possible to get enough handler activations to overflow the stack.
-+# Although unlikely, bugs of that kind are hard to track down, so we'd
-+# like to avoid the possibility.
-+# So, on entry to the handler we detect whether we interrupted an
-+# existing activation in its critical region -- if so, we pop the current
-+# activation and restart the handler using the previous one.
-+ENTRY(do_hypervisor_callback)   # do_hyperviosr_callback(struct *pt_regs)
-+# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
-+# see the correct pointer to the pt_regs
-+        addq $8, %rsp            # we don't return, adjust the stack frame
-+11:	movb $0, EVENT_MASK(%rsp)         
-+	call evtchn_do_upcall
-+        jmp  error_exit
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+ENTRY(nmi)
-+	zeroentry do_nmi_callback
-+ENTRY(do_nmi_callback)
-+        addq $8, %rsp
-+        call do_nmi
-+        RESTORE_REST
-+        XEN_BLOCK_EVENTS(%rsi)
-+        GET_THREAD_INFO(%rcx)
-+        jmp  retint_restore_args
-+#endif
-+
-+        ALIGN
-+restore_all_enable_events:  
-+	XEN_UNBLOCK_EVENTS(%rsi)        # %rsi is already set up...
-+
-+scrit:	/**** START OF CRITICAL REGION ****/
-+	XEN_TEST_PENDING(%rsi)
-+	jnz  14f			# process more events if necessary...
-+	XEN_PUT_VCPU_INFO(%rsi)
-+        RESTORE_ARGS 0,8,0
-+        testb $3,8(%rsp)                # check CS
-+        jnz  crit_user_mode
-+        orb   $3,1*8(%rsp)
-+        iretq
-+crit_user_mode:
-+        HYPERVISOR_IRET 0
-+        
-+14:	XEN_LOCKED_BLOCK_EVENTS(%rsi)
-+	XEN_PUT_VCPU_INFO(%rsi)
-+	SAVE_REST
-+        movq %rsp,%rdi                  # set the argument again
-+	jmp  11b
-+ecrit:  /**** END OF CRITICAL REGION ****/
-+# At this point, unlike on x86-32, we don't do the fixup to simplify the 
-+# code and the stack frame is more complex on x86-64.
-+# When the kernel is interrupted in the critical section, the kernel 
-+# will do IRET in that case, and everything will be restored at that point, 
-+# i.e. it just resumes from the next instruction interrupted with the same context. 
-+	
-+# Hypervisor uses this for application faults while it executes.
-+ENTRY(failsafe_callback)
-+	addq $0x10,%rsp /* skip rcx and r11 */	
-+1:	mov  (%rsp),%ds
-+2:	mov  8(%rsp),%es
-+3:	mov  16(%rsp),%fs
-+4:	mov  24(%rsp),%gs
-+	addq $0x20,%rsp /* skip the above selectors */		
-+	SAVE_ALL
-+	jmp  error_exit
-+.section .fixup,"ax";	\
-+6:	movq $0,(%rsp);	\
-+	jmp 1b;		\
-+7:	movq $0,8(%rsp);	\
-+	jmp 2b;		\
-+8:	movq $0,16(%rsp);	\
-+	jmp 3b;		\
-+9:	movq $0,24(%rsp);	\
-+	jmp 4b;		\
-+.previous;		\
-+.section __ex_table,"a";\
-+	.align 16;	\
-+	.quad 1b,6b;	\
-+	.quad 2b,7b;	\
-+	.quad 3b,8b;	\
-+	.quad 4b,9b;	\
-+.previous
-+ 
-+#if 0	      
-+        .section __ex_table,"a"
-+        .align 8
-+        .quad gs_change,bad_gs
-+        .previous
-+        .section .fixup,"ax"
-+	/* running with kernelgs */
-+bad_gs: 
-+/*	swapgs		*/	/* switch back to user gs */
-+	xorl %eax,%eax
-+        movl %eax,%gs
-+        jmp  2b
-+        .previous       
-+#endif
-+	
-+/*
-+ * Create a kernel thread.
-+ *
-+ * C extern interface:
-+ *	extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-+ *
-+ * asm input arguments:
-+ *	rdi: fn, rsi: arg, rdx: flags
-+ */
-+ENTRY(kernel_thread)
-+	CFI_STARTPROC
-+	FAKE_STACK_FRAME $child_rip
-+	SAVE_ALL
-+
-+	# rdi: flags, rsi: usp, rdx: will be &pt_regs
-+	movq %rdx,%rdi
-+	orq  kernel_thread_flags(%rip),%rdi
-+	movq $-1, %rsi
-+	movq %rsp, %rdx
-+
-+	xorl %r8d,%r8d
-+	xorl %r9d,%r9d
-+	
-+	# clone now
-+	call do_fork
-+	movq %rax,RAX(%rsp)
-+	xorl %edi,%edi
-+
-+	/*
-+	 * It isn't worth to check for reschedule here,
-+	 * so internally to the x86_64 port you can rely on kernel_thread()
-+	 * not to reschedule the child before returning, this avoids the need
-+	 * of hacks for example to fork off the per-CPU idle tasks.
-+         * [Hopefully no generic code relies on the reschedule -AK]	
-+	 */
-+	RESTORE_ALL
-+	UNFAKE_STACK_FRAME
-+	ret
-+	CFI_ENDPROC
-+
-+	
-+child_rip:
-+	/*
-+	 * Here we are in the child and the registers are set as they were
-+	 * at kernel_thread() invocation in the parent.
-+	 */
-+	movq %rdi, %rax
-+	movq %rsi, %rdi
-+	call *%rax
-+	# exit
-+	xorq %rdi, %rdi
-+	call do_exit
-+
-+/*
-+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
-+ *
-+ * C extern interface:
-+ *	 extern long execve(char *name, char **argv, char **envp)
-+ *
-+ * asm input arguments:
-+ *	rdi: name, rsi: argv, rdx: envp
-+ *
-+ * We want to fallback into:
-+ *	extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
-+ *
-+ * do_sys_execve asm fallback arguments:
-+ *	rdi: name, rsi: argv, rdx: envp, fake frame on the stack
-+ */
-+ENTRY(execve)
-+	CFI_STARTPROC
-+	FAKE_STACK_FRAME $0
-+	SAVE_ALL	
-+	call sys_execve
-+	movq %rax, RAX(%rsp)	
-+	RESTORE_REST
-+	testq %rax,%rax
-+	jne 1f
-+        jmp int_ret_from_sys_call
-+1:      RESTORE_ARGS
-+	UNFAKE_STACK_FRAME
-+	ret
-+	CFI_ENDPROC
-+
-+ENTRY(page_fault)
-+	errorentry do_page_fault
-+
-+ENTRY(coprocessor_error)
-+	zeroentry do_coprocessor_error
-+
-+ENTRY(simd_coprocessor_error)
-+	zeroentry do_simd_coprocessor_error	
-+
-+ENTRY(device_not_available)
-+	zeroentry math_state_restore
-+
-+	/* runs on exception stack */
-+ENTRY(debug)
-+	CFI_STARTPROC
-+/*	pushq $0
-+	CFI_ADJUST_CFA_OFFSET 8	*/
-+	zeroentry do_debug
-+/*	jmp paranoid_exit */
-+	CFI_ENDPROC
-+
-+#if 0
-+	/* runs on exception stack */	
-+ENTRY(nmi)
-+	CFI_STARTPROC
-+	pushq $-1
-+	CFI_ADJUST_CFA_OFFSET 8		
-+	paranoidentry do_nmi
-+	/*
-+ 	 * "Paranoid" exit path from exception stack.
-+  	 * Paranoid because this is used by NMIs and cannot take
-+	 * any kernel state for granted.
-+	 * We don't do kernel preemption checks here, because only
-+	 * NMI should be common and it does not enable IRQs and
-+	 * cannot get reschedule ticks.
-+	 */
-+	/* ebx:	no swapgs flag */
-+paranoid_exit:
-+	testl %ebx,%ebx				/* swapgs needed? */
-+	jnz paranoid_restore
-+	testl $3,CS(%rsp)
-+	jnz   paranoid_userspace
-+paranoid_swapgs:	
-+	swapgs
-+paranoid_restore:	
-+	RESTORE_ALL 8
-+	iretq
-+paranoid_userspace:	
-+	GET_THREAD_INFO(%rcx)
-+	movl threadinfo_flags(%rcx),%ebx
-+	andl $_TIF_WORK_MASK,%ebx
-+	jz paranoid_swapgs
-+	movq %rsp,%rdi			/* &pt_regs */
-+	call sync_regs
-+	movq %rax,%rsp			/* switch stack for scheduling */
-+	testl $_TIF_NEED_RESCHED,%ebx
-+	jnz paranoid_schedule
-+	movl %ebx,%edx			/* arg3: thread flags */
-+	sti
-+	xorl %esi,%esi 			/* arg2: oldset */
-+	movq %rsp,%rdi 			/* arg1: &pt_regs */
-+	call do_notify_resume
-+	cli
-+	jmp paranoid_userspace
-+paranoid_schedule:
-+	sti
-+	call schedule
-+	cli
-+	jmp paranoid_userspace
-+	CFI_ENDPROC
-+#endif        
-+
-+ENTRY(int3)
-+	zeroentry do_int3	
-+
-+ENTRY(overflow)
-+	zeroentry do_overflow
-+
-+ENTRY(bounds)
-+	zeroentry do_bounds
-+
-+ENTRY(invalid_op)
-+	zeroentry do_invalid_op	
-+
-+ENTRY(coprocessor_segment_overrun)
-+	zeroentry do_coprocessor_segment_overrun
-+
-+ENTRY(reserved)
-+	zeroentry do_reserved
-+
-+#if 0
-+	/* runs on exception stack */
-+ENTRY(double_fault)
-+	CFI_STARTPROC
-+	paranoidentry do_double_fault
-+	jmp paranoid_exit
-+	CFI_ENDPROC
-+#endif
-+
-+ENTRY(invalid_TSS)
-+	errorentry do_invalid_TSS
-+
-+ENTRY(segment_not_present)
-+	errorentry do_segment_not_present
-+
-+	/* runs on exception stack */
-+ENTRY(stack_segment)
-+	CFI_STARTPROC
-+	errorentry do_stack_segment
-+	CFI_ENDPROC
-+
-+ENTRY(general_protection)
-+	errorentry do_general_protection
-+
-+ENTRY(alignment_check)
-+	errorentry do_alignment_check
-+
-+ENTRY(divide_error)
-+	zeroentry do_divide_error
-+
-+ENTRY(spurious_interrupt_bug)
-+	zeroentry do_spurious_interrupt_bug
-+
-+#ifdef CONFIG_X86_MCE
-+	/* runs on exception stack */
-+ENTRY(machine_check)
-+	CFI_STARTPROC
-+	pushq $0
-+	CFI_ADJUST_CFA_OFFSET 8	
-+	paranoidentry do_machine_check
-+	jmp paranoid_exit
-+	CFI_ENDPROC
-+#endif
-+
-+ENTRY(call_debug)
-+       zeroentry do_call_debug
-+
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/genapic.c linux-2.6.12-xen/arch/xen/x86_64/kernel/genapic.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/genapic.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/genapic.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,123 @@
-+/*
-+ * Copyright 2004 James Cleverdon, IBM.
-+ * Subject to the GNU Public License, v.2
-+ *
-+ * Generic APIC sub-arch probe layer.
-+ *
-+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
-+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
-+ * James Cleverdon.
-+ */
-+#include <linux/config.h>
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+
-+#include <asm/smp.h>
-+#include <asm/ipi.h>
-+
-+#if defined(CONFIG_ACPI_BUS)
-+#include <acpi/acpi_bus.h>
-+#endif
-+
-+/* which logical CPU number maps to which CPU (physical APIC ID) */
-+u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+EXPORT_SYMBOL(x86_cpu_to_apicid);
-+u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+extern struct genapic apic_cluster;
-+extern struct genapic apic_flat;
-+
-+#ifndef CONFIG_XEN
-+struct genapic *genapic = &apic_flat;
-+#else
-+extern struct genapic apic_xen;
-+struct genapic *genapic = &apic_xen;
-+#endif
-+
-+
-+/*
-+ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
-+ */
-+void __init clustered_apic_check(void)
-+{
-+#ifndef CONFIG_XEN
-+	long i;
-+	u8 clusters, max_cluster;
-+	u8 id;
-+	u8 cluster_cnt[NUM_APIC_CLUSTERS];
-+
-+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
-+		/* AMD always uses flat mode right now */
-+		genapic = &apic_flat;
-+		goto print;
-+	}
-+
-+#if defined(CONFIG_ACPI_BUS)
-+	/*
-+	 * Some x86_64 machines use physical APIC mode regardless of how many
-+	 * procs/clusters are present (x86_64 ES7000 is an example).
-+	 */
-+	if (acpi_fadt.revision > FADT2_REVISION_ID)
-+		if (acpi_fadt.force_apic_physical_destination_mode) {
-+			genapic = &apic_cluster;
-+			goto print;
-+		}
-+#endif
-+
-+	memset(cluster_cnt, 0, sizeof(cluster_cnt));
-+
-+	for (i = 0; i < NR_CPUS; i++) {
-+		id = bios_cpu_apicid[i];
-+		if (id != BAD_APICID)
-+			cluster_cnt[APIC_CLUSTERID(id)]++;
-+	}
-+
-+	clusters = 0;
-+	max_cluster = 0;
-+	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
-+		if (cluster_cnt[i] > 0) {
-+			++clusters;
-+			if (cluster_cnt[i] > max_cluster)
-+				max_cluster = cluster_cnt[i];
-+		}
-+	}
-+
-+	/*
-+	 * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
-+	 * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
-+	 * else physical mode.
-+	 * (We don't use lowest priority delivery + HW APIC IRQ steering, so
-+	 * can ignore the clustered logical case and go straight to physical.)
-+	 */
-+	if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster)
-+		genapic = &apic_flat;
-+	else
-+		genapic = &apic_cluster;
-+
-+print:
-+#else
-+	/* hardcode to xen apic functions */
-+	genapic = &apic_xen;
-+#endif
-+	printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
-+}
-+
-+/* Same for both flat and clustered. */
-+
-+#ifdef CONFIG_XEN
-+extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
-+#endif
-+
-+void send_IPI_self(int vector)
-+{
-+#ifndef CONFIG_XEN
-+	__send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
-+#else
-+	xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
-+#endif
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/genapic_xen.c linux-2.6.12-xen/arch/xen/x86_64/kernel/genapic_xen.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/genapic_xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/genapic_xen.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,162 @@
-+/*
-+ * Copyright 2004 James Cleverdon, IBM.
-+ * Subject to the GNU Public License, v.2
-+ *
-+ * Xen APIC subarch code.  Maximum 8 CPUs, logical delivery.
-+ *
-+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
-+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
-+ * James Cleverdon.
-+ *
-+ * Hacked to pieces for Xen by Chris Wright.
-+ */
-+#include <linux/config.h>
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/string.h>
-+#include <linux/kernel.h>
-+#include <linux/ctype.h>
-+#include <linux/init.h>
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+#include <asm/smp.h>
-+#include <asm/ipi.h>
-+#else
-+#include <asm/apic.h>
-+#include <asm/apicdef.h>
-+#include <asm/genapic.h>
-+#endif
-+#include <asm-xen/evtchn.h>
-+
-+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
-+
-+static inline void __send_IPI_one(unsigned int cpu, int vector)
-+{
-+	int irq = per_cpu(ipi_to_irq, cpu)[vector];
-+	BUG_ON(irq < 0);
-+	notify_remote_via_irq(irq);
-+}
-+
-+void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
-+{
-+	int cpu;
-+
-+	switch (shortcut) {
-+	case APIC_DEST_SELF:
-+		__send_IPI_one(smp_processor_id(), vector);
-+		break;
-+	case APIC_DEST_ALLBUT:
-+		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+			if (cpu == smp_processor_id())
-+				continue;
-+			if (cpu_isset(cpu, cpu_online_map)) {
-+				__send_IPI_one(cpu, vector);
-+			}
-+		}
-+		break;
-+	case APIC_DEST_ALLINC:
-+		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+			if (cpu_isset(cpu, cpu_online_map)) {
-+				__send_IPI_one(cpu, vector);
-+			}
-+		}
-+		break;
-+	default:
-+		printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
-+		       vector);
-+		break;
-+	}
-+}
-+
-+static cpumask_t xen_target_cpus(void)
-+{
-+	return cpu_online_map;
-+}
-+
-+/*
-+ * Set up the logical destination ID.
-+ * Do nothing, not called now.
-+ */
-+static void xen_init_apic_ldr(void)
-+{
-+	Dprintk("%s\n", __FUNCTION__);
-+	return;
-+}
-+
-+static void xen_send_IPI_allbutself(int vector)
-+{
-+	/*
-+	 * if there are no other CPUs in the system then
-+	 * we get an APIC send error if we try to broadcast.
-+	 * thus we have to avoid sending IPIs in this case.
-+	 */
-+	Dprintk("%s\n", __FUNCTION__);
-+	if (num_online_cpus() > 1)
-+		xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
-+}
-+
-+static void xen_send_IPI_all(int vector)
-+{
-+	Dprintk("%s\n", __FUNCTION__);
-+	xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
-+}
-+
-+static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
-+{
-+	unsigned long mask = cpus_addr(cpumask)[0];
-+	unsigned int cpu;
-+	unsigned long flags;
-+
-+	Dprintk("%s\n", __FUNCTION__);
-+	local_irq_save(flags);
-+	WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
-+
-+	for (cpu = 0; cpu < NR_CPUS; ++cpu) {
-+		if (cpu_isset(cpu, cpumask)) {
-+			__send_IPI_one(cpu, vector);
-+		}
-+	}
-+	local_irq_restore(flags);
-+}
-+
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+static int xen_apic_id_registered(void)
-+{
-+	/* better be set */
-+	Dprintk("%s\n", __FUNCTION__);
-+	return physid_isset(smp_processor_id(), phys_cpu_present_map);
-+}
-+#endif
-+
-+static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
-+{
-+	Dprintk("%s\n", __FUNCTION__);
-+	return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
-+}
-+
-+static unsigned int phys_pkg_id(int index_msb)
-+{
-+	u32 ebx;
-+
-+	Dprintk("%s\n", __FUNCTION__);
-+	ebx = cpuid_ebx(1);
-+	return ((ebx >> 24) & 0xFF) >> index_msb;
-+}
-+
-+struct genapic apic_xen =  {
-+	.name = "xen",
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+	.int_delivery_mode = dest_LowestPrio,
-+#endif
-+	.int_dest_mode = (APIC_DEST_LOGICAL != 0),
-+	.int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
-+	.target_cpus = xen_target_cpus,
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+	.apic_id_registered = xen_apic_id_registered,
-+#endif
-+	.init_apic_ldr = xen_init_apic_ldr,
-+	.send_IPI_all = xen_send_IPI_all,
-+	.send_IPI_allbutself = xen_send_IPI_allbutself,
-+	.send_IPI_mask = xen_send_IPI_mask,
-+	.cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
-+	.phys_pkg_id = phys_pkg_id,
-+};
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/head64.c linux-2.6.12-xen/arch/xen/x86_64/kernel/head64.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/head64.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/head64.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,127 @@
-+/*
-+ *  linux/arch/x86_64/kernel/head64.c -- prepare to run common code
-+ *
-+ *  Copyright (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
-+ *
-+ *  $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $
-+ *
-+ *  Jun Nakajima <jun.nakajima at intel.com>
-+ *	Modified for Xen.
-+ */
-+
-+#include <linux/init.h>
-+#include <linux/linkage.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/percpu.h>
-+
-+#include <asm/processor.h>
-+#include <asm/proto.h>
-+#include <asm/smp.h>
-+#include <asm/bootsetup.h>
-+#include <asm/setup.h>
-+#include <asm/desc.h>
-+
-+unsigned long start_pfn;
-+
-+/* Don't add a printk in there. printk relies on the PDA which is not initialized 
-+   yet. */
-+#if 0
-+static void __init clear_bss(void)
-+{
-+	extern char __bss_start[], __bss_end[];
-+	memset(__bss_start, 0,
-+	       (unsigned long) __bss_end - (unsigned long) __bss_start);
-+}
-+#endif
-+
-+#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
-+#define OLD_CL_MAGIC_ADDR	0x90020
-+#define OLD_CL_MAGIC            0xA33F
-+#define OLD_CL_BASE_ADDR        0x90000
-+#define OLD_CL_OFFSET           0x90022
-+
-+extern char saved_command_line[];
-+
-+#if 0
-+static void __init copy_bootdata(char *real_mode_data)
-+{
-+	int new_data;
-+	char * command_line;
-+
-+	memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
-+	new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
-+	if (!new_data) {
-+		if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
-+			printk("so old bootloader that it does not support commandline?!\n");
-+			return;
-+		}
-+		new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
-+		printk("old bootloader convention, maybe loadlin?\n");
-+	}
-+	command_line = (char *) ((u64)(new_data));
-+	memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
-+	printk("Bootdata ok (command line is %s)\n", saved_command_line);	
-+}
-+#endif
-+
-+static void __init setup_boot_cpu_data(void)
-+{
-+	unsigned int dummy, eax;
-+
-+	/* get vendor info */
-+	cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
-+	      (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
-+	      (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
-+	      (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
-+
-+	/* get cpu type */
-+	cpuid(1, &eax, &dummy, &dummy,
-+		(unsigned int *) &boot_cpu_data.x86_capability);
-+	boot_cpu_data.x86 = (eax >> 8) & 0xf;
-+	boot_cpu_data.x86_model = (eax >> 4) & 0xf;
-+	boot_cpu_data.x86_mask = eax & 0xf;
-+}
-+
-+extern char _end[];
-+
-+void __init x86_64_start_kernel(char * real_mode_data)
-+{
-+	int i;
-+
-+        phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;
-+        start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) + 
-+		xen_start_info->nr_pt_frames;
-+
-+	for (i = 0; i < 256; i++)
-+		set_intr_gate(i, early_idt_handler);
-+#if 0
-+	asm volatile("lidt %0" :: "m" (idt_descr));
-+#endif
-+	pda_init(0);
-+        /*	copy_bootdata(real_mode_data); */
-+#ifdef CONFIG_SMP
-+	cpu_set(0, cpu_online_map);
-+#endif
-+#if 0
-+	s = strstr(saved_command_line, "earlyprintk=");
-+	if (s != NULL)
-+		setup_early_printk(s);
-+#endif
-+#ifdef CONFIG_DISCONTIGMEM
-+	s = strstr(saved_command_line, "numa=");
-+	if (s != NULL)
-+		numa_setup(s+5);
-+#endif
-+#ifdef CONFIG_X86_IO_APIC
-+	if (strstr(saved_command_line, "disableapic"))
-+		disable_apic = 1;
-+#endif
-+	/* You need early console to see that */
-+	if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
-+		panic("Kernel too big for kernel mapping\n");
-+
-+	setup_boot_cpu_data();
-+	start_kernel();
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/head.S linux-2.6.12-xen/arch/xen/x86_64/kernel/head.S
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/head.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/head.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,275 @@
-+/*
-+ *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
-+ *
-+ *  Copyright (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
-+ *  Copyright (C) 2000 Pavel Machek <pavel at suse.cz>
-+ *  Copyright (C) 2000 Karsten Keil <kkeil at suse.de>
-+ *  Copyright (C) 2001,2002 Andi Kleen <ak at suse.de>
-+ *
-+ *  $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
-+ *
-+ *  Jun Nakajima <jun.nakajima at intel.com>
-+ *    Modified for Xen                                
-+ */
-+
-+
-+#include <linux/linkage.h>
-+
-+.section __xen_guest
-+	.ascii	"GUEST_OS=linux,GUEST_VER=2.6"
-+	.ascii	",XEN_VER=xen-3.0"
-+	.ascii	",VIRT_BASE=0xffffffff80000000"
-+	.ascii	",HYPERCALL_PAGE=0x10d" /* __pa(hypercall_page) >> 12 */
-+	.ascii	",LOADER=generic"
-+	.byte	0
-+                
-+      
-+#include <linux/threads.h>
-+#include <asm/desc.h>
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/msr.h>
-+#include <asm/cache.h>
-+	
-+/* we are not able to switch in one step to the final KERNEL ADRESS SPACE
-+ * because we need identity-mapped pages on setup so define __START_KERNEL to
-+ * 0x100000 for this stage
-+ * 
-+ */
-+
-+	.text
-+	.code64
-+	.globl startup_64
-+startup_64:
-+ENTRY(_start)
-+	movq %rsi,xen_start_info(%rip)
-+
-+#ifdef CONFIG_SMP
-+ENTRY(startup_64_smp)
-+#endif /* CONFIG_SMP */
-+
-+	cld
-+
-+	movq init_rsp(%rip),%rsp
-+	/* zero EFLAGS after setting rsp */
-+	pushq $0
-+	popfq
-+	movq	initial_code(%rip),%rax
-+	jmp	*%rax
-+
-+	/* SMP bootup changes these two */	
-+	.globl	initial_code
-+initial_code:
-+	.quad	x86_64_start_kernel
-+	.globl init_rsp
-+init_rsp:
-+	.quad  init_thread_union+THREAD_SIZE-8
-+
-+ENTRY(early_idt_handler)
-+	xorl %eax,%eax
-+	movq 8(%rsp),%rsi	# get rip
-+	movq (%rsp),%rdx
-+	leaq early_idt_msg(%rip),%rdi
-+1:	hlt                     # generate #GP
-+	jmp 1b
-+
-+early_idt_msg:
-+	.asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
-+
-+#if 0
-+ENTRY(lgdt_finish)
-+        movl $(__USER_DS),%eax          # DS/ES contains default USER segment
-+        movw %ax,%ds
-+        movw %ax,%es
-+        movl $(__KERNEL_DS),%eax        
-+        movw %ax,%ss                    # after changing gdt.
-+        popq %rax                       # get the retrun address
-+        pushq $(__KERNEL_CS)
-+        pushq %rax
-+        lretq
-+#endif 
-+
-+ENTRY(stext)
-+ENTRY(_stext)
-+
-+	/*
-+	 * This default setting generates an ident mapping at address 0x100000
-+	 * and a mapping for the kernel that precisely maps virtual address
-+	 * 0xffffffff80000000 to physical address 0x000000. (always using
-+	 * 2Mbyte large pages provided by PAE mode)
-+	 */
-+.org 0x1000
-+ENTRY(init_level4_pgt)
-+	.fill	512,8,0
-+
-+        /*
-+         * We update two pgd entries to make kernel and user pgd consistent
-+         * at pgd_populate(). It can be used for kernel modules. So we place 
-+         * this page here for those cases to avoid memory corruption.
-+         * We also use this page to establish the initiali mapping for
-+         * vsyscall area.
-+         */
-+.org 0x2000
-+ENTRY(init_level4_user_pgt)
-+	.fill	512,8,0
-+
-+	/*
-+	 * In Xen the following pre-initialized pgt entries are re-initialized.
-+	 */
-+.org 0x3000
-+ENTRY(level3_kernel_pgt)
-+	.fill	510,8,0
-+	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
-+	.quad	0x0000000000105007		/* -> level2_kernel_pgt */
-+	.fill	1,8,0
-+
-+.org 0x4000
-+ENTRY(level2_ident_pgt)
-+	/* 40MB for bootup. 	*/
-+	.quad	0x0000000000000283
-+	.quad	0x0000000000200183
-+	.quad	0x0000000000400183
-+	.quad	0x0000000000600183
-+	.quad	0x0000000000800183
-+	.quad	0x0000000000A00183
-+	.quad	0x0000000000C00183
-+	.quad	0x0000000000E00183
-+	.quad	0x0000000001000183
-+	.quad	0x0000000001200183
-+	.quad	0x0000000001400183
-+	.quad	0x0000000001600183
-+	.quad	0x0000000001800183
-+	.quad	0x0000000001A00183
-+	.quad	0x0000000001C00183
-+	.quad	0x0000000001E00183
-+	.quad	0x0000000002000183
-+	.quad	0x0000000002200183
-+	.quad	0x0000000002400183
-+	.quad	0x0000000002600183
-+	/* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
-+	.globl temp_boot_pmds
-+temp_boot_pmds:
-+	.fill	492,8,0
-+
-+.org 0x5000
-+ENTRY(level2_kernel_pgt)
-+	/* 40MB kernel mapping. The kernel code cannot be bigger than that.
-+	   When you change this change KERNEL_TEXT_SIZE in page.h too. */
-+	/* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
-+	.quad	0x0000000000000183
-+	.quad	0x0000000000200183
-+	.quad	0x0000000000400183
-+	.quad	0x0000000000600183
-+	.quad	0x0000000000800183
-+	.quad	0x0000000000A00183
-+	.quad	0x0000000000C00183
-+	.quad	0x0000000000E00183
-+	.quad	0x0000000001000183
-+	.quad	0x0000000001200183
-+	.quad	0x0000000001400183
-+	.quad	0x0000000001600183
-+	.quad	0x0000000001800183
-+	.quad	0x0000000001A00183
-+	.quad	0x0000000001C00183
-+	.quad	0x0000000001E00183
-+	.quad	0x0000000002000183
-+	.quad	0x0000000002200183
-+	.quad	0x0000000002400183
-+	.quad	0x0000000002600183
-+	/* Module mapping starts here */
-+	.fill	492,8,0
-+	
-+        /*
-+         * This is used for vsyscall area mapping as we have a different
-+         * level4 page table for user.
-+         */
-+.org 0x6000
-+ENTRY(level3_user_pgt)
-+        .fill	512,8,0
-+
-+.org 0x7000
-+ENTRY(cpu_gdt_table)
-+/* The TLS descriptors are currently at a different place compared to i386.
-+   Hopefully nobody expects them at a fixed place (Wine?) */
-+	.quad	0x0000000000000000	/* NULL descriptor */
-+	.quad	0x008ffa000000ffff	/* __KERNEL_COMPAT32_CS */	
-+	.quad	0x00affa000000ffff	/* __KERNEL_CS */
-+	.quad	0x00cff2000000ffff	/* __KERNEL_DS */
-+	
-+       	.quad	0x00cffa000000ffff	/* __USER32_CS */
-+	.quad	0x00cff2000000ffff	/* __USER_DS, __USER32_DS  */		
-+	.quad	0x00affa000000ffff	/* __USER_CS */
-+	.quad	0x00cffa000000ffff	/* __KERNEL32_CS */        
-+	.quad	0,0			/* TSS */
-+	.quad	0,0			/* LDT */
-+	.quad   0,0,0			/* three TLS descriptors */ 
-+	.quad	0			/* unused now?   __KERNEL16_CS - 16bit PM for S3 wakeup. */
-+
-+gdt_end:
-+#if 0
-+	/* asm/segment.h:GDT_ENTRIES must match this */	
-+	/* This should be a multiple of the cache line size */
-+	/* GDTs of other CPUs: */	
-+	.fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
-+#endif
-+
-+.org 0x8000
-+ENTRY(empty_zero_page)
-+
-+.org 0x9000
-+ENTRY(empty_bad_page)
-+
-+.org 0xa000
-+ENTRY(empty_bad_pte_table)
-+
-+.org 0xb000
-+ENTRY(empty_bad_pmd_table)
-+
-+.org 0xc000
-+ENTRY(level3_physmem_pgt)
-+	.quad	0x0000000000105007		/* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
-+
-+.org 0xd000
-+ENTRY(hypercall_page)
-+
-+.org 0xe000
-+#ifdef CONFIG_ACPI_SLEEP
-+ENTRY(wakeup_level4_pgt)
-+	.quad	0x0000000000102007		/* -> level3_ident_pgt */
-+	.fill	255,8,0
-+	.quad	0x000000000010a007
-+	.fill	254,8,0
-+	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
-+	.quad	0x0000000000103007		/* -> level3_kernel_pgt */
-+#endif
-+
-+	.data
-+
-+	.align 16
-+	.globl cpu_gdt_descr
-+cpu_gdt_descr:
-+	.word	gdt_end-cpu_gdt_table
-+gdt:
-+	.quad	cpu_gdt_table
-+#ifdef CONFIG_SMP
-+	.rept	NR_CPUS-1
-+	.word	0
-+	.quad	0
-+	.endr
-+#endif
-+
-+/* We need valid kernel segments for data and code in long mode too
-+ * IRET will check the segment types  kkeil 2000/10/28
-+ * Also sysret mandates a special GDT layout 
-+ */
-+		 		
-+#if 0        		 		
-+.align L1_CACHE_BYTES
-+#endif
-+	.align  L1_CACHE_BYTES
-+ENTRY(idt_table)	
-+	.rept   256
-+	.quad   0
-+	.quad 	0
-+	.endr
-+
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/io_apic.c linux-2.6.12-xen/arch/xen/x86_64/kernel/io_apic.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/io_apic.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/io_apic.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1992 @@
-+/*
-+ *	Intel IO-APIC support for multi-Pentium hosts.
-+ *
-+ *	Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
-+ *
-+ *	Many thanks to Stig Venaas for trying out countless experimental
-+ *	patches and reporting/debugging problems patiently!
-+ *
-+ *	(c) 1999, Multiple IO-APIC support, developed by
-+ *	Ken-ichi Yaku <yaku at css1.kbnes.nec.co.jp> and
-+ *      Hidemi Kishimoto <kisimoto at css1.kbnes.nec.co.jp>,
-+ *	further tested and cleaned up by Zach Brown <zab at redhat.com>
-+ *	and Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs;
-+ *					thanks to Eric Gilmore
-+ *					and Rolf G. Tews
-+ *					for testing these extensively
-+ *	Paul Diefenbaugh	:	Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/irq.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/sched.h>
-+#include <linux/config.h>
-+#include <linux/smp_lock.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/acpi.h>
-+#include <linux/sysdev.h>
-+
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/mach_apic.h>
-+#include <asm/acpi.h>
-+
-+#define __apicdebuginit  __init
-+
-+int sis_apic_bug; /* not actually supported, dummy for compile */
-+
-+static int no_timer_check;
-+
-+static DEFINE_SPINLOCK(ioapic_lock);
-+
-+/*
-+ * # of IRQ routing registers
-+ */
-+int nr_ioapic_registers[MAX_IO_APICS];
-+
-+/*
-+ * Rough estimation of how many shared IRQs there are, can
-+ * be changed anytime.
-+ */
-+#define MAX_PLUS_SHARED_IRQS NR_IRQS
-+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
-+
-+/*
-+ * This is performance-critical, we want to do it O(1)
-+ *
-+ * the indexing order of this array favors 1:1 mappings
-+ * between pins and IRQs.
-+ */
-+
-+static struct irq_pin_list {
-+	short apic, pin, next;
-+} irq_2_pin[PIN_MAP_SIZE];
-+
-+int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
-+#ifdef CONFIG_PCI_MSI
-+#define vector_to_irq(vector) 	\
-+	(platform_legacy_irq(vector) ? vector : vector_irq[vector])
-+#else
-+#define vector_to_irq(vector)	(vector)
-+#endif
-+
-+#ifdef CONFIG_XEN
-+
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/xen-public/physdev.h>
-+
-+/* Fake i8259 */
-+#define make_8259A_irq(_irq)     (io_apic_irqs &= ~(1UL<<(_irq)))
-+#define disable_8259A_irq(_irq)  ((void)0)
-+#define i8259A_irq_pending(_irq) (0)
-+
-+unsigned long io_apic_irqs;
-+
-+static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
-+{
-+	physdev_op_t op;
-+	int ret;
-+
-+	op.cmd = PHYSDEVOP_APIC_READ;
-+	op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
-+	op.u.apic_op.offset = reg;
-+	ret = HYPERVISOR_physdev_op(&op);
-+	if (ret)
-+		return ret;
-+	return op.u.apic_op.value;
-+}
-+
-+static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-+{
-+	physdev_op_t op;
-+
-+	op.cmd = PHYSDEVOP_APIC_WRITE;
-+	op.u.apic_op.apic = mp_ioapics[apic].mpc_apicid;
-+	op.u.apic_op.offset = reg;
-+	op.u.apic_op.value = value;
-+	HYPERVISOR_physdev_op(&op);
-+}
-+
-+#define io_apic_read(a,r)    xen_io_apic_read(a,r)
-+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
-+
-+#define clear_IO_APIC() ((void)0)
-+
-+#endif /* !CONFIG_XEN */
-+
-+/*
-+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
-+ * shared ISA-space IRQs, so we have to support them. We are super
-+ * fast in the common case, and fast for shared ISA-space IRQs.
-+ */
-+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
-+{
-+	static int first_free_entry = NR_IRQS;
-+	struct irq_pin_list *entry = irq_2_pin + irq;
-+
-+	while (entry->next)
-+		entry = irq_2_pin + entry->next;
-+
-+	if (entry->pin != -1) {
-+		entry->next = first_free_entry;
-+		entry = irq_2_pin + entry->next;
-+		if (++first_free_entry >= PIN_MAP_SIZE)
-+			panic("io_apic.c: whoops");
-+	}
-+	entry->apic = apic;
-+	entry->pin = pin;
-+}
-+
-+#ifndef CONFIG_XEN
-+#define __DO_ACTION(R, ACTION, FINAL)					\
-+									\
-+{									\
-+	int pin;							\
-+	struct irq_pin_list *entry = irq_2_pin + irq;			\
-+									\
-+	for (;;) {							\
-+		unsigned int reg;					\
-+		pin = entry->pin;					\
-+		if (pin == -1)						\
-+			break;						\
-+		reg = io_apic_read(entry->apic, 0x10 + R + pin*2);	\
-+		reg ACTION;						\
-+		io_apic_modify(entry->apic, reg);			\
-+		if (!entry->next)					\
-+			break;						\
-+		entry = irq_2_pin + entry->next;			\
-+	}								\
-+	FINAL;								\
-+}
-+
-+#define DO_ACTION(name,R,ACTION, FINAL)					\
-+									\
-+	static void name##_IO_APIC_irq (unsigned int irq)		\
-+	__DO_ACTION(R, ACTION, FINAL)
-+
-+DO_ACTION( __mask,             0, |= 0x00010000, io_apic_sync(entry->apic) )
-+						/* mask = 1 */
-+DO_ACTION( __unmask,           0, &= 0xfffeffff, )
-+						/* mask = 0 */
-+
-+static void mask_IO_APIC_irq (unsigned int irq)
-+{
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	__mask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void unmask_IO_APIC_irq (unsigned int irq)
-+{
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	__unmask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
-+
-+	/* Check delivery_mode to be sure we're not clearing an SMI pin */
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
-+	*(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+	if (entry.delivery_mode == dest_SMI)
-+		return;
-+	/*
-+	 * Disable it in the IO-APIC irq-routing table:
-+	 */
-+	memset(&entry, 0, sizeof(entry));
-+	entry.mask = 1;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
-+	io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+static void clear_IO_APIC (void)
-+{
-+	int apic, pin;
-+
-+	for (apic = 0; apic < nr_ioapics; apic++)
-+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-+			clear_IO_APIC_pin(apic, pin);
-+}
-+
-+#endif /* !CONFIG_XEN */
-+
-+/*
-+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
-+ * specific CPU-side IRQs.
-+ */
-+
-+#define MAX_PIRQS 8
-+static int pirq_entries [MAX_PIRQS];
-+static int pirqs_enabled;
-+int skip_ioapic_setup;
-+int ioapic_force;
-+
-+/* dummy parsing: see setup.c */
-+
-+static int __init disable_ioapic_setup(char *str)
-+{
-+	skip_ioapic_setup = 1;
-+	return 1;
-+}
-+
-+static int __init enable_ioapic_setup(char *str)
-+{
-+	ioapic_force = 1;
-+	skip_ioapic_setup = 0;
-+	return 1;
-+}
-+
-+__setup("noapic", disable_ioapic_setup);
-+__setup("apic", enable_ioapic_setup);
-+
-+#include <asm/pci-direct.h>
-+#include <linux/pci_ids.h>
-+#include <linux/pci.h>
-+
-+/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
-+   off. Check for an Nvidia or VIA PCI bridge and turn it off.
-+   Use pci direct infrastructure because this runs before the PCI subsystem. 
-+
-+   Can be overwritten with "apic"
-+
-+   And another hack to disable the IOMMU on VIA chipsets.
-+
-+   Kludge-O-Rama. */
-+void __init check_ioapic(void) 
-+{ 
-+	int num,slot,func; 
-+	if (ioapic_force) 
-+		return; 
-+
-+	/* Poor man's PCI discovery */
-+	for (num = 0; num < 32; num++) { 
-+		for (slot = 0; slot < 32; slot++) { 
-+			for (func = 0; func < 8; func++) { 
-+				u32 class;
-+				u32 vendor;
-+				u8 type;
-+				class = read_pci_config(num,slot,func,
-+							PCI_CLASS_REVISION);
-+				if (class == 0xffffffff)
-+					break; 
-+
-+		       		if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
-+					continue; 
-+
-+				vendor = read_pci_config(num, slot, func, 
-+							 PCI_VENDOR_ID);
-+				vendor &= 0xffff;
-+				switch (vendor) { 
-+				case PCI_VENDOR_ID_VIA:
-+#ifdef CONFIG_GART_IOMMU
-+					if ((end_pfn >= (0xffffffff>>PAGE_SHIFT) ||
-+					     force_iommu) &&
-+					    !iommu_aperture_allowed) {
-+						printk(KERN_INFO
-+    "Looks like a VIA chipset. Disabling IOMMU. Overwrite with \"iommu=allowed\"\n");
-+						iommu_aperture_disabled = 1;
-+					}
-+#endif
-+					return;
-+				case PCI_VENDOR_ID_NVIDIA:
-+#ifdef CONFIG_ACPI
-+					/* All timer overrides on Nvidia
-+				           seem to be wrong. Skip them. */
-+					acpi_skip_timer_override = 1;
-+					printk(KERN_INFO 
-+	     "Nvidia board detected. Ignoring ACPI timer override.\n");
-+#endif
-+					/* RED-PEN skip them on mptables too? */
-+					return;
-+				} 
-+
-+				/* No multi-function device? */
-+				type = read_pci_config_byte(num,slot,func,
-+							    PCI_HEADER_TYPE);
-+				if (!(type & 0x80))
-+					break;
-+			} 
-+		}
-+	}
-+} 
-+
-+static int __init ioapic_pirq_setup(char *str)
-+{
-+	int i, max;
-+	int ints[MAX_PIRQS+1];
-+
-+	get_options(str, ARRAY_SIZE(ints), ints);
-+
-+	for (i = 0; i < MAX_PIRQS; i++)
-+		pirq_entries[i] = -1;
-+
-+	pirqs_enabled = 1;
-+	apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
-+	max = MAX_PIRQS;
-+	if (ints[0] < MAX_PIRQS)
-+		max = ints[0];
-+
-+	for (i = 0; i < max; i++) {
-+		apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
-+		/*
-+		 * PIRQs are mapped upside down, usually.
-+		 */
-+		pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
-+	}
-+	return 1;
-+}
-+
-+__setup("pirq=", ioapic_pirq_setup);
-+
-+/*
-+ * Find the IRQ entry number of a certain pin.
-+ */
-+static int find_irq_entry(int apic, int pin, int type)
-+{
-+	int i;
-+
-+	for (i = 0; i < mp_irq_entries; i++)
-+		if (mp_irqs[i].mpc_irqtype == type &&
-+		    (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
-+		     mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
-+		    mp_irqs[i].mpc_dstirq == pin)
-+			return i;
-+
-+	return -1;
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Find the pin to which IRQ[irq] (ISA) is connected
-+ */
-+static int __init find_isa_irq_pin(int irq, int type)
-+{
-+	int i;
-+
-+	for (i = 0; i < mp_irq_entries; i++) {
-+		int lbus = mp_irqs[i].mpc_srcbus;
-+
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-+		     mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
-+		    (mp_irqs[i].mpc_irqtype == type) &&
-+		    (mp_irqs[i].mpc_srcbusirq == irq))
-+
-+			return mp_irqs[i].mpc_dstirq;
-+	}
-+	return -1;
-+}
-+#endif
-+
-+/*
-+ * Find a specific PCI IRQ entry.
-+ * Not an __init, possibly needed by modules
-+ */
-+static int pin_2_irq(int idx, int apic, int pin);
-+
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
-+{
-+	int apic, i, best_guess = -1;
-+
-+	apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
-+		bus, slot, pin);
-+	if (mp_bus_id_to_pci_bus[bus] == -1) {
-+		apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
-+		return -1;
-+	}
-+	for (i = 0; i < mp_irq_entries; i++) {
-+		int lbus = mp_irqs[i].mpc_srcbus;
-+
-+		for (apic = 0; apic < nr_ioapics; apic++)
-+			if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
-+			    mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
-+				break;
-+
-+		if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
-+		    !mp_irqs[i].mpc_irqtype &&
-+		    (bus == lbus) &&
-+		    (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
-+			int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
-+
-+			if (!(apic || IO_APIC_IRQ(irq)))
-+				continue;
-+
-+			if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
-+				return irq;
-+			/*
-+			 * Use the first all-but-pin matching entry as a
-+			 * best-guess fuzzy result for broken mptables.
-+			 */
-+			if (best_guess < 0)
-+				best_guess = irq;
-+		}
-+	}
-+	return best_guess;
-+}
-+
-+/*
-+ * EISA Edge/Level control register, ELCR
-+ */
-+static int EISA_ELCR(unsigned int irq)
-+{
-+	if (irq < 16) {
-+		unsigned int port = 0x4d0 + (irq >> 3);
-+		return (inb(port) >> (irq & 7)) & 1;
-+	}
-+	apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
-+	return 0;
-+}
-+
-+/* EISA interrupts are always polarity zero and can be edge or level
-+ * trigger depending on the ELCR value.  If an interrupt is listed as
-+ * EISA conforming in the MP table, that means its trigger type must
-+ * be read in from the ELCR */
-+
-+#define default_EISA_trigger(idx)	(EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
-+#define default_EISA_polarity(idx)	(0)
-+
-+/* ISA interrupts are always polarity zero edge triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_ISA_trigger(idx)	(0)
-+#define default_ISA_polarity(idx)	(0)
-+
-+/* PCI interrupts are always polarity one level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_PCI_trigger(idx)	(1)
-+#define default_PCI_polarity(idx)	(1)
-+
-+/* MCA interrupts are always polarity zero level triggered,
-+ * when listed as conforming in the MP table. */
-+
-+#define default_MCA_trigger(idx)	(1)
-+#define default_MCA_polarity(idx)	(0)
-+
-+static int __init MPBIOS_polarity(int idx)
-+{
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+	int polarity;
-+
-+	/*
-+	 * Determine IRQ line polarity (high active or low active):
-+	 */
-+	switch (mp_irqs[idx].mpc_irqflag & 3)
-+	{
-+		case 0: /* conforms, ie. bus-type dependent polarity */
-+		{
-+			switch (mp_bus_id_to_type[bus])
-+			{
-+				case MP_BUS_ISA: /* ISA pin */
-+				{
-+					polarity = default_ISA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_EISA: /* EISA pin */
-+				{
-+					polarity = default_EISA_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_PCI: /* PCI pin */
-+				{
-+					polarity = default_PCI_polarity(idx);
-+					break;
-+				}
-+				case MP_BUS_MCA: /* MCA pin */
-+				{
-+					polarity = default_MCA_polarity(idx);
-+					break;
-+				}
-+				default:
-+				{
-+					printk(KERN_WARNING "broken BIOS!!\n");
-+					polarity = 1;
-+					break;
-+				}
-+			}
-+			break;
-+		}
-+		case 1: /* high active */
-+		{
-+			polarity = 0;
-+			break;
-+		}
-+		case 2: /* reserved */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			polarity = 1;
-+			break;
-+		}
-+		case 3: /* low active */
-+		{
-+			polarity = 1;
-+			break;
-+		}
-+		default: /* invalid */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			polarity = 1;
-+			break;
-+		}
-+	}
-+	return polarity;
-+}
-+
-+static int MPBIOS_trigger(int idx)
-+{
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+	int trigger;
-+
-+	/*
-+	 * Determine IRQ trigger mode (edge or level sensitive):
-+	 */
-+	switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
-+	{
-+		case 0: /* conforms, ie. bus-type dependent */
-+		{
-+			switch (mp_bus_id_to_type[bus])
-+			{
-+				case MP_BUS_ISA: /* ISA pin */
-+				{
-+					trigger = default_ISA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_EISA: /* EISA pin */
-+				{
-+					trigger = default_EISA_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_PCI: /* PCI pin */
-+				{
-+					trigger = default_PCI_trigger(idx);
-+					break;
-+				}
-+				case MP_BUS_MCA: /* MCA pin */
-+				{
-+					trigger = default_MCA_trigger(idx);
-+					break;
-+				}
-+				default:
-+				{
-+					printk(KERN_WARNING "broken BIOS!!\n");
-+					trigger = 1;
-+					break;
-+				}
-+			}
-+			break;
-+		}
-+		case 1: /* edge */
-+		{
-+			trigger = 0;
-+			break;
-+		}
-+		case 2: /* reserved */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			trigger = 1;
-+			break;
-+		}
-+		case 3: /* level */
-+		{
-+			trigger = 1;
-+			break;
-+		}
-+		default: /* invalid */
-+		{
-+			printk(KERN_WARNING "broken BIOS!!\n");
-+			trigger = 0;
-+			break;
-+		}
-+	}
-+	return trigger;
-+}
-+
-+static inline int irq_polarity(int idx)
-+{
-+	return MPBIOS_polarity(idx);
-+}
-+
-+static inline int irq_trigger(int idx)
-+{
-+	return MPBIOS_trigger(idx);
-+}
-+
-+static int pin_2_irq(int idx, int apic, int pin)
-+{
-+	int irq, i;
-+	int bus = mp_irqs[idx].mpc_srcbus;
-+
-+	/*
-+	 * Debugging check, we are in big trouble if this message pops up!
-+	 */
-+	if (mp_irqs[idx].mpc_dstirq != pin)
-+		printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
-+
-+	switch (mp_bus_id_to_type[bus])
-+	{
-+		case MP_BUS_ISA: /* ISA pin */
-+		case MP_BUS_EISA:
-+		case MP_BUS_MCA:
-+		{
-+			irq = mp_irqs[idx].mpc_srcbusirq;
-+			break;
-+		}
-+		case MP_BUS_PCI: /* PCI pin */
-+		{
-+			/*
-+			 * PCI IRQs are mapped in order
-+			 */
-+			i = irq = 0;
-+			while (i < apic)
-+				irq += nr_ioapic_registers[i++];
-+			irq += pin;
-+			break;
-+		}
-+		default:
-+		{
-+			printk(KERN_ERR "unknown bus type %d.\n",bus); 
-+			irq = 0;
-+			break;
-+		}
-+	}
-+
-+	/*
-+	 * PCI IRQ command line redirection. Yes, limits are hardcoded.
-+	 */
-+	if ((pin >= 16) && (pin <= 23)) {
-+		if (pirq_entries[pin-16] != -1) {
-+			if (!pirq_entries[pin-16]) {
-+				apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
-+			} else {
-+				irq = pirq_entries[pin-16];
-+				apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
-+						pin-16, irq);
-+			}
-+		}
-+	}
-+	return irq;
-+}
-+
-+static inline int IO_APIC_irq_trigger(int irq)
-+{
-+	int apic, idx, pin;
-+
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+		for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+			idx = find_irq_entry(apic,pin,mp_INT);
-+			if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
-+				return irq_trigger(idx);
-+		}
-+	}
-+	/*
-+	 * nonexistent IRQs are edge default
-+	 */
-+	return 0;
-+}
-+
-+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-+u8 irq_vector[NR_IRQ_VECTORS];
-+
-+int assign_irq_vector(int irq)
-+{
-+	static int current_vector = FIRST_DEVICE_VECTOR;
-+	physdev_op_t op;
-+  
-+  	BUG_ON(irq >= NR_IRQ_VECTORS);
-+  	if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
-+  		return IO_APIC_VECTOR(irq);
-+
-+	op.cmd = PHYSDEVOP_ASSIGN_VECTOR;
-+	op.u.irq_op.irq = irq;
-+	if (HYPERVISOR_physdev_op(&op))
-+		return -ENOSPC;
-+	current_vector = op.u.irq_op.vector;
-+
-+	vector_irq[current_vector] = irq;
-+	if (irq != AUTO_ASSIGN)
-+		IO_APIC_VECTOR(irq) = current_vector;
-+
-+	return current_vector;
-+}
-+
-+extern void (*interrupt[NR_IRQS])(void);
-+#ifndef CONFIG_XEN
-+static struct hw_interrupt_type ioapic_level_type;
-+static struct hw_interrupt_type ioapic_edge_type;
-+
-+#define IOAPIC_AUTO	-1
-+#define IOAPIC_EDGE	0
-+#define IOAPIC_LEVEL	1
-+
-+static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
-+{
-+	if (use_pci_vector() && !platform_legacy_irq(irq)) {
-+		if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+				trigger == IOAPIC_LEVEL)
-+			irq_desc[vector].handler = &ioapic_level_type;
-+		else
-+			irq_desc[vector].handler = &ioapic_edge_type;
-+		set_intr_gate(vector, interrupt[vector]);
-+	} else	{
-+		if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-+				trigger == IOAPIC_LEVEL)
-+			irq_desc[irq].handler = &ioapic_level_type;
-+		else
-+			irq_desc[irq].handler = &ioapic_edge_type;
-+		set_intr_gate(vector, interrupt[irq]);
-+	}
-+}
-+#else
-+#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
-+#endif /* !CONFIG_XEN */
-+
-+static void __init setup_IO_APIC_irqs(void)
-+{
-+	struct IO_APIC_route_entry entry;
-+	int apic, pin, idx, irq, first_notcon = 1, vector;
-+	unsigned long flags;
-+
-+	apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-+
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+	for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-+
-+		/*
-+		 * add it to the IO-APIC irq-routing table:
-+		 */
-+		memset(&entry,0,sizeof(entry));
-+
-+		entry.delivery_mode = INT_DELIVERY_MODE;
-+		entry.dest_mode = INT_DEST_MODE;
-+		entry.mask = 0;				/* enable IRQ */
-+		entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+
-+		idx = find_irq_entry(apic,pin,mp_INT);
-+		if (idx == -1) {
-+			if (first_notcon) {
-+				apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
-+				first_notcon = 0;
-+			} else
-+				apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
-+			continue;
-+		}
-+
-+		entry.trigger = irq_trigger(idx);
-+		entry.polarity = irq_polarity(idx);
-+
-+		if (irq_trigger(idx)) {
-+			entry.trigger = 1;
-+			entry.mask = 1;
-+			entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+		}
-+
-+		irq = pin_2_irq(idx, apic, pin);
-+		add_pin_to_irq(irq, apic, pin);
-+
-+		if (/* !apic && */ !IO_APIC_IRQ(irq))
-+			continue;
-+
-+		if (IO_APIC_IRQ(irq)) {
-+			vector = assign_irq_vector(irq);
-+			entry.vector = vector;
-+
-+			ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-+			if (!apic && (irq < 16))
-+				disable_8259A_irq(irq);
-+		}
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
-+		io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+	}
-+	}
-+
-+	if (!first_notcon)
-+		apic_printk(APIC_VERBOSE," not connected.\n");
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * Set up the 8259A-master output pin as broadcast to all
-+ * CPUs.
-+ */
-+static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
-+
-+	memset(&entry,0,sizeof(entry));
-+
-+	disable_8259A_irq(0);
-+
-+	/* mask LVT0 */
-+	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+
-+	/*
-+	 * We use logical delivery to get the timer IRQ
-+	 * to the first CPU.
-+	 */
-+	entry.dest_mode = INT_DEST_MODE;
-+	entry.mask = 0;					/* unmask IRQ now */
-+	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+	entry.delivery_mode = INT_DELIVERY_MODE;
-+	entry.polarity = 0;
-+	entry.trigger = 0;
-+	entry.vector = vector;
-+
-+	/*
-+	 * The timer IRQ doesn't have to know that behind the
-+	 * scene we have a 8259A-master in AEOI mode ...
-+	 */
-+	irq_desc[0].handler = &ioapic_edge_type;
-+
-+	/*
-+	 * Add it to the IO-APIC irq-routing table:
-+	 */
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
-+	io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	enable_8259A_irq(0);
-+}
-+
-+void __init UNEXPECTED_IO_APIC(void)
-+{
-+}
-+
-+void __apicdebuginit print_IO_APIC(void)
-+{
-+	int apic, i;
-+	union IO_APIC_reg_00 reg_00;
-+	union IO_APIC_reg_01 reg_01;
-+	union IO_APIC_reg_02 reg_02;
-+	unsigned long flags;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+	printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
-+	for (i = 0; i < nr_ioapics; i++)
-+		printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-+		       mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
-+
-+	/*
-+	 * We are a bit conservative about what we expect.  We have to
-+	 * know about every hardware change ASAP.
-+	 */
-+	printk(KERN_INFO "testing the IO APIC.......................\n");
-+
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_00.raw = io_apic_read(apic, 0);
-+	reg_01.raw = io_apic_read(apic, 1);
-+	if (reg_01.bits.version >= 0x10)
-+		reg_02.raw = io_apic_read(apic, 2);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	printk("\n");
-+	printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
-+	printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
-+	printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
-+	if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
-+		UNEXPECTED_IO_APIC();
-+
-+	printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
-+	printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
-+	if (	(reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
-+		(reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
-+		(reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
-+		(reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
-+		(reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
-+		(reg_01.bits.entries != 0x2E) &&
-+		(reg_01.bits.entries != 0x3F) &&
-+		(reg_01.bits.entries != 0x03) 
-+	)
-+		UNEXPECTED_IO_APIC();
-+
-+	printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
-+	printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
-+	if (	(reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
-+		(reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
-+		(reg_01.bits.version != 0x10) && /* oldest IO-APICs */
-+		(reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
-+		(reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
-+		(reg_01.bits.version != 0x20)    /* Intel P64H (82806 AA) */
-+	)
-+		UNEXPECTED_IO_APIC();
-+	if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
-+		UNEXPECTED_IO_APIC();
-+
-+	if (reg_01.bits.version >= 0x10) {
-+		printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
-+		printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
-+		if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
-+			UNEXPECTED_IO_APIC();
-+	}
-+
-+	printk(KERN_DEBUG ".... IRQ redirection table:\n");
-+
-+	printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
-+			  " Stat Dest Deli Vect:   \n");
-+
-+	for (i = 0; i <= reg_01.bits.entries; i++) {
-+		struct IO_APIC_route_entry entry;
-+
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		*(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
-+		*(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+		printk(KERN_DEBUG " %02x %03X %02X  ",
-+			i,
-+			entry.dest.logical.logical_dest,
-+			entry.dest.physical.physical_dest
-+		);
-+
-+		printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
-+			entry.mask,
-+			entry.trigger,
-+			entry.irr,
-+			entry.polarity,
-+			entry.delivery_status,
-+			entry.dest_mode,
-+			entry.delivery_mode,
-+			entry.vector
-+		);
-+	}
-+	}
-+	if (use_pci_vector())
-+		printk(KERN_INFO "Using vector-based indexing\n");
-+	printk(KERN_DEBUG "IRQ to pin mappings:\n");
-+	for (i = 0; i < NR_IRQS; i++) {
-+		struct irq_pin_list *entry = irq_2_pin + i;
-+		if (entry->pin < 0)
-+			continue;
-+ 		if (use_pci_vector() && !platform_legacy_irq(i))
-+			printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
-+		else
-+			printk(KERN_DEBUG "IRQ%d ", i);
-+		for (;;) {
-+			printk("-> %d:%d", entry->apic, entry->pin);
-+			if (!entry->next)
-+				break;
-+			entry = irq_2_pin + entry->next;
-+		}
-+		printk("\n");
-+	}
-+
-+	printk(KERN_INFO ".................................... done.\n");
-+
-+	return;
-+}
-+
-+#if 0
-+
-+static __apicdebuginit void print_APIC_bitfield (int base)
-+{
-+	unsigned int v;
-+	int i, j;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+	printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
-+	for (i = 0; i < 8; i++) {
-+		v = apic_read(base + i*0x10);
-+		for (j = 0; j < 32; j++) {
-+			if (v & (1<<j))
-+				printk("1");
-+			else
-+				printk("0");
-+		}
-+		printk("\n");
-+	}
-+}
-+
-+void __apicdebuginit print_local_APIC(void * dummy)
-+{
-+	unsigned int v, ver, maxlvt;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+	printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-+		smp_processor_id(), hard_smp_processor_id());
-+	v = apic_read(APIC_ID);
-+	printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, GET_APIC_ID(v));
-+	v = apic_read(APIC_LVR);
-+	printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-+	ver = GET_APIC_VERSION(v);
-+	maxlvt = get_maxlvt();
-+
-+	v = apic_read(APIC_TASKPRI);
-+	printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-+
-+	if (APIC_INTEGRATED(ver)) {			/* !82489DX */
-+		v = apic_read(APIC_ARBPRI);
-+		printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-+			v & APIC_ARBPRI_MASK);
-+		v = apic_read(APIC_PROCPRI);
-+		printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
-+	}
-+
-+	v = apic_read(APIC_EOI);
-+	printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
-+	v = apic_read(APIC_RRR);
-+	printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-+	v = apic_read(APIC_LDR);
-+	printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-+	v = apic_read(APIC_DFR);
-+	printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-+	v = apic_read(APIC_SPIV);
-+	printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-+
-+	printk(KERN_DEBUG "... APIC ISR field:\n");
-+	print_APIC_bitfield(APIC_ISR);
-+	printk(KERN_DEBUG "... APIC TMR field:\n");
-+	print_APIC_bitfield(APIC_TMR);
-+	printk(KERN_DEBUG "... APIC IRR field:\n");
-+	print_APIC_bitfield(APIC_IRR);
-+
-+	if (APIC_INTEGRATED(ver)) {		/* !82489DX */
-+		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP. */
-+			apic_write(APIC_ESR, 0);
-+		v = apic_read(APIC_ESR);
-+		printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
-+	}
-+
-+	v = apic_read(APIC_ICR);
-+	printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
-+	v = apic_read(APIC_ICR2);
-+	printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
-+
-+	v = apic_read(APIC_LVTT);
-+	printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-+
-+	if (maxlvt > 3) {                       /* PC is LVT#4. */
-+		v = apic_read(APIC_LVTPC);
-+		printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-+	}
-+	v = apic_read(APIC_LVT0);
-+	printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-+	v = apic_read(APIC_LVT1);
-+	printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-+
-+	if (maxlvt > 2) {			/* ERR is LVT#3. */
-+		v = apic_read(APIC_LVTERR);
-+		printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-+	}
-+
-+	v = apic_read(APIC_TMICT);
-+	printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-+	v = apic_read(APIC_TMCCT);
-+	printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-+	v = apic_read(APIC_TDCR);
-+	printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-+	printk("\n");
-+}
-+
-+void print_all_local_APICs (void)
-+{
-+	on_each_cpu(print_local_APIC, NULL, 1, 1);
-+}
-+
-+void __apicdebuginit print_PIC(void)
-+{
-+	extern spinlock_t i8259A_lock;
-+	unsigned int v;
-+	unsigned long flags;
-+
-+	if (apic_verbosity == APIC_QUIET)
-+		return;
-+
-+	printk(KERN_DEBUG "\nprinting PIC contents\n");
-+
-+	spin_lock_irqsave(&i8259A_lock, flags);
-+
-+	v = inb(0xa1) << 8 | inb(0x21);
-+	printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
-+
-+	v = inb(0xa0) << 8 | inb(0x20);
-+	printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
-+
-+	outb(0x0b,0xa0);
-+	outb(0x0b,0x20);
-+	v = inb(0xa0) << 8 | inb(0x20);
-+	outb(0x0a,0xa0);
-+	outb(0x0a,0x20);
-+
-+	spin_unlock_irqrestore(&i8259A_lock, flags);
-+
-+	printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
-+
-+	v = inb(0x4d1) << 8 | inb(0x4d0);
-+	printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-+}
-+
-+#endif  /*  0  */
-+
-+#else
-+void __init print_IO_APIC(void) { }
-+#endif /* !CONFIG_XEN */
-+
-+static void __init enable_IO_APIC(void)
-+{
-+	union IO_APIC_reg_01 reg_01;
-+	int i;
-+	unsigned long flags;
-+
-+	for (i = 0; i < PIN_MAP_SIZE; i++) {
-+		irq_2_pin[i].pin = -1;
-+		irq_2_pin[i].next = 0;
-+	}
-+	if (!pirqs_enabled)
-+		for (i = 0; i < MAX_PIRQS; i++)
-+			pirq_entries[i] = -1;
-+
-+	/*
-+	 * The number of IO-APIC IRQ registers (== #pins):
-+	 */
-+	for (i = 0; i < nr_ioapics; i++) {
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_01.raw = io_apic_read(i, 1);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		nr_ioapic_registers[i] = reg_01.bits.entries+1;
-+	}
-+
-+	/*
-+	 * Do not trust the IO-APIC being empty at bootup
-+	 */
-+	clear_IO_APIC();
-+}
-+
-+/*
-+ * Not an __init, needed by the reboot code
-+ */
-+void disable_IO_APIC(void)
-+{
-+	/*
-+	 * Clear the IO-APIC before rebooting:
-+	 */
-+	clear_IO_APIC();
-+
-+#ifndef CONFIG_XEN
-+	disconnect_bsp_APIC();
-+#endif
-+}
-+
-+/*
-+ * function to set the IO-APIC physical IDs based on the
-+ * values stored in the MPC table.
-+ *
-+ * by Matt Domsch <Matt_Domsch at dell.com>  Tue Dec 21 12:25:05 CST 1999
-+ */
-+
-+#ifndef CONFIG_XEN
-+static void __init setup_ioapic_ids_from_mpc (void)
-+{
-+	union IO_APIC_reg_00 reg_00;
-+	int apic;
-+	int i;
-+	unsigned char old_id;
-+	unsigned long flags;
-+
-+	/*
-+	 * Set the IOAPIC ID to the value stored in the MPC table.
-+	 */
-+	for (apic = 0; apic < nr_ioapics; apic++) {
-+
-+		/* Read the register 0 value */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_00.raw = io_apic_read(apic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		
-+		old_id = mp_ioapics[apic].mpc_apicid;
-+
-+
-+		printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
-+
-+
-+		/*
-+		 * We need to adjust the IRQ routing table
-+		 * if the ID changed.
-+		 */
-+		if (old_id != mp_ioapics[apic].mpc_apicid)
-+			for (i = 0; i < mp_irq_entries; i++)
-+				if (mp_irqs[i].mpc_dstapic == old_id)
-+					mp_irqs[i].mpc_dstapic
-+						= mp_ioapics[apic].mpc_apicid;
-+
-+		/*
-+		 * Read the right value from the MPC table and
-+		 * write it into the ID register.
-+	 	 */
-+		apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
-+				mp_ioapics[apic].mpc_apicid);
-+
-+		reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		io_apic_write(apic, 0, reg_00.raw);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+		/*
-+		 * Sanity check
-+		 */
-+		spin_lock_irqsave(&ioapic_lock, flags);
-+		reg_00.raw = io_apic_read(apic, 0);
-+		spin_unlock_irqrestore(&ioapic_lock, flags);
-+		if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
-+			printk("could not set ID!\n");
-+		else
-+			apic_printk(APIC_VERBOSE," ok.\n");
-+	}
-+}
-+#else
-+static void __init setup_ioapic_ids_from_mpc(void) { }
-+#endif
-+
-+/*
-+ * There is a nasty bug in some older SMP boards, their mptable lies
-+ * about the timer IRQ. We do the following to work around the situation:
-+ *
-+ *	- timer IRQ defaults to IO-APIC IRQ
-+ *	- if this function detects that timer IRQs are defunct, then we fall
-+ *	  back to ISA timer IRQs
-+ */
-+#ifndef CONFIG_XEN
-+static int __init timer_irq_works(void)
-+{
-+	unsigned long t1 = jiffies;
-+
-+	local_irq_enable();
-+	/* Let ten ticks pass... */
-+	mdelay((10 * 1000) / HZ);
-+
-+	/*
-+	 * Expect a few ticks at least, to be sure some possible
-+	 * glue logic does not lock up after one or two first
-+	 * ticks in a non-ExtINT mode.  Also the local APIC
-+	 * might have cached one ExtINT interrupt.  Finally, at
-+	 * least one tick may be lost due to delays.
-+	 */
-+
-+	/* jiffies wrap? */
-+	if (jiffies - t1 > 4)
-+		return 1;
-+	return 0;
-+}
-+
-+/*
-+ * In the SMP+IOAPIC case it might happen that there are an unspecified
-+ * number of pending IRQ events unhandled. These cases are very rare,
-+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
-+ * better to do it this way as thus we do not have to be aware of
-+ * 'pending' interrupts in the IRQ path, except at this point.
-+ */
-+/*
-+ * Edge triggered needs to resend any interrupt
-+ * that was delayed but this is now handled in the device
-+ * independent code.
-+ */
-+
-+/*
-+ * Starting up a edge-triggered IO-APIC interrupt is
-+ * nasty - we need to make sure that we get the edge.
-+ * If it is already asserted for some reason, we need
-+ * return 1 to indicate that is was pending.
-+ *
-+ * This is not complete - we should be able to fake
-+ * an edge even if it isn't on the 8259A...
-+ */
-+
-+static unsigned int startup_edge_ioapic_irq(unsigned int irq)
-+{
-+	int was_pending = 0;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	if (irq < 16) {
-+		disable_8259A_irq(irq);
-+		if (i8259A_irq_pending(irq))
-+			was_pending = 1;
-+	}
-+	__unmask_IO_APIC_irq(irq);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return was_pending;
-+}
-+
-+/*
-+ * Once we have recorded IRQ_PENDING already, we can mask the
-+ * interrupt for real. This prevents IRQ storms from unhandled
-+ * devices.
-+ */
-+static void ack_edge_ioapic_irq(unsigned int irq)
-+{
-+	if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
-+					== (IRQ_PENDING | IRQ_DISABLED))
-+		mask_IO_APIC_irq(irq);
-+	ack_APIC_irq();
-+}
-+
-+/*
-+ * Level triggered interrupts can just be masked,
-+ * and shutting down and starting up the interrupt
-+ * is the same as enabling and disabling them -- except
-+ * with a startup need to return a "was pending" value.
-+ *
-+ * Level triggered interrupts are special because we
-+ * do not touch any IO-APIC register while handling
-+ * them. We ack the APIC in the end-IRQ handler, not
-+ * in the start-IRQ-handler. Protection against reentrance
-+ * from the same interrupt is still provided, both by the
-+ * generic IRQ layer and by the fact that an unacked local
-+ * APIC does not accept IRQs.
-+ */
-+static unsigned int startup_level_ioapic_irq (unsigned int irq)
-+{
-+	unmask_IO_APIC_irq(irq);
-+
-+	return 0; /* don't check for pending */
-+}
-+
-+static void end_level_ioapic_irq (unsigned int irq)
-+{
-+	ack_APIC_irq();
-+}
-+
-+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
-+{
-+	unsigned long flags;
-+	unsigned int dest;
-+
-+	dest = cpu_mask_to_apicid(mask);
-+
-+	/*
-+	 * Only the high 8 bits are valid.
-+	 */
-+	dest = SET_APIC_LOGICAL_ID(dest);
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	__DO_ACTION(1, = dest, )
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+#ifdef CONFIG_PCI_MSI
-+static unsigned int startup_edge_ioapic_vector(unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	return startup_edge_ioapic_irq(irq);
-+}
-+
-+static void ack_edge_ioapic_vector(unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	ack_edge_ioapic_irq(irq);
-+}
-+
-+static unsigned int startup_level_ioapic_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	return startup_level_ioapic_irq (irq);
-+}
-+
-+static void end_level_ioapic_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	end_level_ioapic_irq(irq);
-+}
-+
-+static void mask_IO_APIC_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	mask_IO_APIC_irq(irq);
-+}
-+
-+static void unmask_IO_APIC_vector (unsigned int vector)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	unmask_IO_APIC_irq(irq);
-+}
-+
-+static void set_ioapic_affinity_vector (unsigned int vector,
-+					cpumask_t cpu_mask)
-+{
-+	int irq = vector_to_irq(vector);
-+
-+	set_ioapic_affinity_irq(irq, cpu_mask);
-+}
-+#endif
-+
-+/*
-+ * Level and edge triggered IO-APIC interrupts need different handling,
-+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
-+ * handled with the level-triggered descriptor, but that one has slightly
-+ * more overhead. Level-triggered interrupts cannot be handled with the
-+ * edge-triggered handler, without risking IRQ storms and other ugly
-+ * races.
-+ */
-+
-+static struct hw_interrupt_type ioapic_edge_type = {
-+	.typename = "IO-APIC-edge",
-+	.startup 	= startup_edge_ioapic,
-+	.shutdown 	= shutdown_edge_ioapic,
-+	.enable 	= enable_edge_ioapic,
-+	.disable 	= disable_edge_ioapic,
-+	.ack 		= ack_edge_ioapic,
-+	.end 		= end_edge_ioapic,
-+	.set_affinity = set_ioapic_affinity,
-+};
-+
-+static struct hw_interrupt_type ioapic_level_type = {
-+	.typename = "IO-APIC-level",
-+	.startup 	= startup_level_ioapic,
-+	.shutdown 	= shutdown_level_ioapic,
-+	.enable 	= enable_level_ioapic,
-+	.disable 	= disable_level_ioapic,
-+	.ack 		= mask_and_ack_level_ioapic,
-+	.end 		= end_level_ioapic,
-+	.set_affinity = set_ioapic_affinity,
-+};
-+#endif /* !CONFIG_XEN */
-+
-+static inline void init_IO_APIC_traps(void)
-+{
-+	int irq;
-+
-+	/*
-+	 * NOTE! The local APIC isn't very good at handling
-+	 * multiple interrupts at the same interrupt level.
-+	 * As the interrupt level is determined by taking the
-+	 * vector number and shifting that right by 4, we
-+	 * want to spread these out a bit so that they don't
-+	 * all fall in the same interrupt level.
-+	 *
-+	 * Also, we've got to be careful not to trash gate
-+	 * 0x80, because int 0x80 is hm, kind of importantish. ;)
-+	 */
-+	for (irq = 0; irq < NR_IRQS ; irq++) {
-+		int tmp = irq;
-+		if (use_pci_vector()) {
-+			if (!platform_legacy_irq(tmp))
-+				if ((tmp = vector_to_irq(tmp)) == -1)
-+					continue;
-+		}
-+		if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
-+			/*
-+			 * Hmm.. We don't have an entry for this,
-+			 * so default to an old-fashioned 8259
-+			 * interrupt if we can..
-+			 */
-+			if (irq < 16)
-+				make_8259A_irq(irq);
-+#ifndef CONFIG_XEN
-+			else
-+				/* Strange. Oh, well.. */
-+				irq_desc[irq].handler = &no_irq_type;
-+#endif
-+		}
-+	}
-+}
-+
-+#ifndef CONFIG_XEN
-+static void enable_lapic_irq (unsigned int irq)
-+{
-+	unsigned long v;
-+
-+	v = apic_read(APIC_LVT0);
-+	apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
-+}
-+
-+static void disable_lapic_irq (unsigned int irq)
-+{
-+	unsigned long v;
-+
-+	v = apic_read(APIC_LVT0);
-+	apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
-+}
-+
-+static void ack_lapic_irq (unsigned int irq)
-+{
-+	ack_APIC_irq();
-+}
-+
-+static void end_lapic_irq (unsigned int i) { /* nothing */ }
-+
-+static struct hw_interrupt_type lapic_irq_type = {
-+	.typename = "local-APIC-edge",
-+	.startup = NULL, /* startup_irq() not used for IRQ0 */
-+	.shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
-+	.enable = enable_lapic_irq,
-+	.disable = disable_lapic_irq,
-+	.ack = ack_lapic_irq,
-+	.end = end_lapic_irq,
-+};
-+
-+static void setup_nmi (void)
-+{
-+	/*
-+ 	 * Dirty trick to enable the NMI watchdog ...
-+	 * We put the 8259A master into AEOI mode and
-+	 * unmask on all local APICs LVT0 as NMI.
-+	 *
-+	 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
-+	 * is from Maciej W. Rozycki - so we do not have to EOI from
-+	 * the NMI handler or the timer interrupt.
-+	 */ 
-+	printk(KERN_INFO "activating NMI Watchdog ...");
-+
-+	enable_NMI_through_LVT0(NULL);
-+
-+	printk(" done.\n");
-+}
-+
-+/*
-+ * This looks a bit hackish but it's about the only one way of sending
-+ * a few INTA cycles to 8259As and any associated glue logic.  ICR does
-+ * not support the ExtINT mode, unfortunately.  We need to send these
-+ * cycles as some i82489DX-based boards have glue logic that keeps the
-+ * 8259A interrupt line asserted until INTA.  --macro
-+ */
-+static inline void unlock_ExtINT_logic(void)
-+{
-+	int pin, i;
-+	struct IO_APIC_route_entry entry0, entry1;
-+	unsigned char save_control, save_freq_select;
-+	unsigned long flags;
-+
-+	pin = find_isa_irq_pin(8, mp_INT);
-+	if (pin == -1)
-+		return;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	*(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin);
-+	*(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+	clear_IO_APIC_pin(0, pin);
-+
-+	memset(&entry1, 0, sizeof(entry1));
-+
-+	entry1.dest_mode = 0;			/* physical delivery */
-+	entry1.mask = 0;			/* unmask IRQ now */
-+	entry1.dest.physical.physical_dest = hard_smp_processor_id();
-+	entry1.delivery_mode = dest_ExtINT;
-+	entry1.polarity = entry0.polarity;
-+	entry1.trigger = 0;
-+	entry1.vector = 0;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
-+	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	save_control = CMOS_READ(RTC_CONTROL);
-+	save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-+	CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
-+		   RTC_FREQ_SELECT);
-+	CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
-+
-+	i = 100;
-+	while (i-- > 0) {
-+		mdelay(10);
-+		if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
-+			i -= 10;
-+	}
-+
-+	CMOS_WRITE(save_control, RTC_CONTROL);
-+	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+	clear_IO_APIC_pin(0, pin);
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
-+	io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+}
-+
-+/*
-+ * This code may look a bit paranoid, but it's supposed to cooperate with
-+ * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
-+ * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
-+ * fanatically on his truly buggy board.
-+ */
-+static inline void check_timer(void)
-+{
-+	int pin1, pin2;
-+	int vector;
-+
-+	/*
-+	 * get/set the timer IRQ vector:
-+	 */
-+	disable_8259A_irq(0);
-+	vector = assign_irq_vector(0);
-+	set_intr_gate(vector, interrupt[0]);
-+
-+	/*
-+	 * Subtle, code in do_timer_interrupt() expects an AEOI
-+	 * mode for the 8259A whenever interrupts are routed
-+	 * through I/O APICs.  Also IRQ0 has to be enabled in
-+	 * the 8259A which implies the virtual wire has to be
-+	 * disabled in the local APIC.
-+	 */
-+	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-+	init_8259A(1);
-+	enable_8259A_irq(0);
-+
-+	pin1 = find_isa_irq_pin(0, mp_INT);
-+	pin2 = find_isa_irq_pin(0, mp_ExtINT);
-+
-+	apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2);
-+
-+	if (pin1 != -1) {
-+		/*
-+		 * Ok, does IRQ0 through the IOAPIC work?
-+		 */
-+		unmask_IO_APIC_irq(0);
-+		if (!no_timer_check && timer_irq_works()) {
-+			nmi_watchdog_default();
-+			if (nmi_watchdog == NMI_IO_APIC) {
-+				disable_8259A_irq(0);
-+				setup_nmi();
-+				enable_8259A_irq(0);
-+			}
-+			return;
-+		}
-+		clear_IO_APIC_pin(0, pin1);
-+		apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
-+	}
-+
-+	apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
-+	if (pin2 != -1) {
-+		apic_printk(APIC_VERBOSE,"\n..... (found pin %d) ...", pin2);
-+		/*
-+		 * legacy devices should be connected to IO APIC #0
-+		 */
-+		setup_ExtINT_IRQ0_pin(pin2, vector);
-+		if (timer_irq_works()) {
-+			printk("works.\n");
-+			nmi_watchdog_default();
-+			if (nmi_watchdog == NMI_IO_APIC) {
-+				setup_nmi();
-+			}
-+			return;
-+		}
-+		/*
-+		 * Cleanup, just in case ...
-+		 */
-+		clear_IO_APIC_pin(0, pin2);
-+	}
-+	printk(" failed.\n");
-+
-+	if (nmi_watchdog) {
-+		printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
-+		nmi_watchdog = 0;
-+	}
-+
-+	apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
-+
-+	disable_8259A_irq(0);
-+	irq_desc[0].handler = &lapic_irq_type;
-+	apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);	/* Fixed mode */
-+	enable_8259A_irq(0);
-+
-+	if (timer_irq_works()) {
-+		apic_printk(APIC_QUIET, " works.\n");
-+		return;
-+	}
-+	apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
-+	apic_printk(APIC_VERBOSE," failed.\n");
-+
-+	apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
-+
-+	init_8259A(0);
-+	make_8259A_irq(0);
-+	apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
-+
-+	unlock_ExtINT_logic();
-+
-+	if (timer_irq_works()) {
-+		apic_printk(APIC_VERBOSE," works.\n");
-+		return;
-+	}
-+	apic_printk(APIC_VERBOSE," failed :(.\n");
-+	panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
-+}
-+#else
-+#define check_timer() ((void)0)
-+#endif /* !CONFIG_XEN */
-+
-+static int __init notimercheck(char *s)
-+{
-+	no_timer_check = 1;
-+	return 1;
-+}
-+__setup("no_timer_check", notimercheck);
-+
-+/*
-+ *
-+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
-+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
-+ *   Linux doesn't really care, as it's not actually used
-+ *   for any interrupt handling anyway.
-+ */
-+#define PIC_IRQS	(1<<2)
-+
-+void __init setup_IO_APIC(void)
-+{
-+	enable_IO_APIC();
-+
-+	if (acpi_ioapic)
-+		io_apic_irqs = ~0;	/* all IRQs go through IOAPIC */
-+	else
-+		io_apic_irqs = ~PIC_IRQS;
-+
-+	apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
-+
-+	/*
-+	 * Set up the IO-APIC IRQ routing table.
-+	 */
-+	if (!acpi_ioapic)
-+		setup_ioapic_ids_from_mpc();
-+#ifndef CONFIG_XEN
-+	sync_Arb_IDs();
-+#endif /* !CONFIG_XEN */
-+	setup_IO_APIC_irqs();
-+	init_IO_APIC_traps();
-+	check_timer();
-+	if (!acpi_ioapic)
-+		print_IO_APIC();
-+}
-+
-+struct sysfs_ioapic_data {
-+	struct sys_device dev;
-+	struct IO_APIC_route_entry entry[0];
-+};
-+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
-+
-+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
-+{
-+	struct IO_APIC_route_entry *entry;
-+	struct sysfs_ioapic_data *data;
-+	unsigned long flags;
-+	int i;
-+
-+	data = container_of(dev, struct sysfs_ioapic_data, dev);
-+	entry = data->entry;
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+		*(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
-+		*(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
-+	}
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return 0;
-+}
-+
-+static int ioapic_resume(struct sys_device *dev)
-+{
-+	struct IO_APIC_route_entry *entry;
-+	struct sysfs_ioapic_data *data;
-+	unsigned long flags;
-+	union IO_APIC_reg_00 reg_00;
-+	int i;
-+
-+	data = container_of(dev, struct sysfs_ioapic_data, dev);
-+	entry = data->entry;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_00.raw = io_apic_read(dev->id, 0);
-+	if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
-+		reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
-+		io_apic_write(dev->id, 0, reg_00.raw);
-+	}
-+	for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
-+		io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
-+		io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
-+	}
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return 0;
-+}
-+
-+static struct sysdev_class ioapic_sysdev_class = {
-+	set_kset_name("ioapic"),
-+	.suspend = ioapic_suspend,
-+	.resume = ioapic_resume,
-+};
-+
-+static int __init ioapic_init_sysfs(void)
-+{
-+	struct sys_device * dev;
-+	int i, size, error = 0;
-+
-+	error = sysdev_class_register(&ioapic_sysdev_class);
-+	if (error)
-+		return error;
-+
-+	for (i = 0; i < nr_ioapics; i++ ) {
-+		size = sizeof(struct sys_device) + nr_ioapic_registers[i]
-+			* sizeof(struct IO_APIC_route_entry);
-+		mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
-+		if (!mp_ioapic_data[i]) {
-+			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+			continue;
-+		}
-+		memset(mp_ioapic_data[i], 0, size);
-+		dev = &mp_ioapic_data[i]->dev;
-+		dev->id = i;
-+		dev->cls = &ioapic_sysdev_class;
-+		error = sysdev_register(dev);
-+		if (error) {
-+			kfree(mp_ioapic_data[i]);
-+			mp_ioapic_data[i] = NULL;
-+			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-+			continue;
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+device_initcall(ioapic_init_sysfs);
-+
-+/* --------------------------------------------------------------------------
-+                          ACPI-based IOAPIC Configuration
-+   -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI_BOOT
-+
-+#define IO_APIC_MAX_ID		0xFE
-+
-+int __init io_apic_get_version (int ioapic)
-+{
-+	union IO_APIC_reg_01	reg_01;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_01.raw = io_apic_read(ioapic, 1);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return reg_01.bits.version;
-+}
-+
-+
-+int __init io_apic_get_redir_entries (int ioapic)
-+{
-+	union IO_APIC_reg_01	reg_01;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	reg_01.raw = io_apic_read(ioapic, 1);
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return reg_01.bits.entries;
-+}
-+
-+
-+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
-+{
-+	struct IO_APIC_route_entry entry;
-+	unsigned long flags;
-+
-+	if (!IO_APIC_IRQ(irq)) {
-+		apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
-+			ioapic);
-+		return -EINVAL;
-+	}
-+
-+	/*
-+	 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
-+	 * Note that we mask (disable) IRQs now -- these get enabled when the
-+	 * corresponding device driver registers for this IRQ.
-+	 */
-+
-+	memset(&entry,0,sizeof(entry));
-+
-+	entry.delivery_mode = INT_DELIVERY_MODE;
-+	entry.dest_mode = INT_DEST_MODE;
-+	entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-+	entry.trigger = edge_level;
-+	entry.polarity = active_high_low;
-+	entry.mask = 1;					 /* Disabled (masked) */
-+
-+	/*
-+	 * IRQs < 16 are already in the irq_2_pin[] map
-+	 */
-+	if (irq >= 16)
-+		add_pin_to_irq(irq, ioapic, pin);
-+
-+	entry.vector = assign_irq_vector(irq);
-+
-+	apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
-+		"IRQ %d Mode:%i Active:%i)\n", ioapic, 
-+	       mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
-+	       edge_level, active_high_low);
-+
-+	ioapic_register_intr(irq, entry.vector, edge_level);
-+
-+	if (!ioapic && (irq < 16))
-+		disable_8259A_irq(irq);
-+
-+	spin_lock_irqsave(&ioapic_lock, flags);
-+	io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
-+	io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
-+	spin_unlock_irqrestore(&ioapic_lock, flags);
-+
-+	return 0;
-+}
-+
-+#endif /*CONFIG_ACPI_BOOT*/
-+
-+
-+#ifndef CONFIG_XEN
-+/*
-+ * This function currently is only a helper for the i386 smp boot process where
-+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
-+ * so mask in all cases should simply be TARGET_CPUS
-+ */
-+void __init setup_ioapic_dest(void)
-+{
-+	int pin, ioapic, irq, irq_entry;
-+
-+	if (skip_ioapic_setup == 1)
-+		return;
-+
-+	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
-+		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-+			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
-+			if (irq_entry == -1)
-+				continue;
-+			irq = pin_2_irq(irq_entry, ioapic, pin);
-+			set_ioapic_affinity_irq(irq, TARGET_CPUS);
-+		}
-+
-+	}
-+}
-+#endif /* !CONFIG_XEN */
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/ioport.c linux-2.6.12-xen/arch/xen/x86_64/kernel/ioport.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/ioport.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/ioport.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,57 @@
-+/*
-+ *	linux/arch/x86_64/kernel/ioport.c
-+ *
-+ * This contains the io-permission bitmap code - written by obz, with changes
-+ * by Linus.
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/stddef.h>
-+#include <linux/slab.h>
-+#include <linux/thread_info.h>
-+#include <asm-xen/xen-public/physdev.h>
-+
-+/*
-+ * sys_iopl has to be used when you want to access the IO ports
-+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
-+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
-+ *
-+ */
-+
-+asmlinkage long sys_iopl(unsigned int new_io_pl, struct pt_regs *regs)
-+{
-+        unsigned int old_io_pl = current->thread.io_pl;
-+        physdev_op_t op;
-+
-+	if (new_io_pl > 3)
-+		return -EINVAL;
-+
-+	/* Need "raw I/O" privileges for direct port access. */
-+	if ((new_io_pl > old_io_pl) && !capable(CAP_SYS_RAWIO))
-+		return -EPERM;
-+
-+	/* Change our version of the privilege levels. */
-+	current->thread.io_pl = new_io_pl;
-+
-+	/* Force the change at ring 0. */
-+	op.cmd             = PHYSDEVOP_SET_IOPL;
-+	op.u.set_iopl.iopl = (new_io_pl == 0) ? 1 : new_io_pl;
-+	HYPERVISOR_physdev_op(&op);
-+
-+	return 0;
-+}
-+
-+/*
-+ * this changes the io permissions bitmap in the current task.
-+ */
-+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
-+{
-+  return turn_on ? sys_iopl(3, NULL) : 0;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/irq.c linux-2.6.12-xen/arch/xen/x86_64/kernel/irq.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/irq.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/irq.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,107 @@
-+/*
-+ *	linux/arch/x86_64/kernel/irq.c
-+ *
-+ *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
-+ *
-+ * This file contains the lowest level x86_64-specific interrupt
-+ * entry and irq statistics code. All the remaining irq logic is
-+ * done by the generic kernel/irq/ code and in the
-+ * x86_64-specific irq controller code. (e.g. i8259.c and
-+ * io_apic.c.)
-+ */
-+
-+#include <linux/kernel_stat.h>
-+#include <linux/interrupt.h>
-+#include <linux/seq_file.h>
-+#include <linux/module.h>
-+#include <asm/uaccess.h>
-+#include <asm/io_apic.h>
-+
-+atomic_t irq_err_count;
-+#ifdef CONFIG_X86_IO_APIC
-+#ifdef APIC_MISMATCH_DEBUG
-+atomic_t irq_mis_count;
-+#endif
-+#endif
-+
-+/*
-+ * Generic, controller-independent functions:
-+ */
-+
-+int show_interrupts(struct seq_file *p, void *v)
-+{
-+	int i = *(loff_t *) v, j;
-+	struct irqaction * action;
-+	unsigned long flags;
-+
-+	if (i == 0) {
-+		seq_printf(p, "           ");
-+		for (j=0; j<NR_CPUS; j++)
-+			if (cpu_online(j))
-+				seq_printf(p, "CPU%d       ",j);
-+		seq_putc(p, '\n');
-+	}
-+
-+	if (i < NR_IRQS) {
-+		spin_lock_irqsave(&irq_desc[i].lock, flags);
-+		action = irq_desc[i].action;
-+		if (!action) 
-+			goto skip;
-+		seq_printf(p, "%3d: ",i);
-+#ifndef CONFIG_SMP
-+		seq_printf(p, "%10u ", kstat_irqs(i));
-+#else
-+		for (j=0; j<NR_CPUS; j++)
-+			if (cpu_online(j))
-+			seq_printf(p, "%10u ",
-+				kstat_cpu(j).irqs[i]);
-+#endif
-+		seq_printf(p, " %14s", irq_desc[i].handler->typename);
-+
-+		seq_printf(p, "  %s", action->name);
-+		for (action=action->next; action; action = action->next)
-+			seq_printf(p, ", %s", action->name);
-+		seq_putc(p, '\n');
-+skip:
-+		spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-+	} else if (i == NR_IRQS) {
-+		seq_printf(p, "NMI: ");
-+		for (j = 0; j < NR_CPUS; j++)
-+			if (cpu_online(j))
-+				seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
-+		seq_putc(p, '\n');
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		seq_printf(p, "LOC: ");
-+		for (j = 0; j < NR_CPUS; j++)
-+			if (cpu_online(j))
-+				seq_printf(p, "%10u ", cpu_pda[j].apic_timer_irqs);
-+		seq_putc(p, '\n');
-+#endif
-+		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-+#ifdef CONFIG_X86_IO_APIC
-+#ifdef APIC_MISMATCH_DEBUG
-+		seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
-+#endif
-+#endif
-+	}
-+	return 0;
-+}
-+
-+/*
-+ * do_IRQ handles all normal device IRQ's (the special
-+ * SMP cross-CPU interrupts have their own specific
-+ * handlers).
-+ */
-+asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
-+{	
-+	/* high bits used in ret_from_ code  */
-+        int irq = regs->orig_rax & __IRQ_MASK(HARDIRQ_BITS);
-+
-+	irq_enter();
-+
-+	__do_IRQ(irq, regs);
-+	irq_exit();
-+
-+	return 1;
-+}
-+
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/ldt.c linux-2.6.12-xen/arch/xen/x86_64/kernel/ldt.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/ldt.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/ldt.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,282 @@
-+/*
-+ * linux/arch/x86_64/kernel/ldt.c
-+ *
-+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
-+ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-+ * Copyright (C) 2002 Andi Kleen
-+ * 
-+ * This handles calls from both 32bit and 64bit mode.
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/vmalloc.h>
-+#include <linux/slab.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/ldt.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/pgalloc.h>
-+
-+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
-+static void flush_ldt(void *null)
-+{
-+	if (current->active_mm)
-+               load_LDT(&current->active_mm->context);
-+}
-+#endif
-+
-+static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
-+{
-+	void *oldldt;
-+	void *newldt;
-+	unsigned oldsize;
-+
-+	if (mincount <= (unsigned)pc->size)
-+		return 0;
-+	oldsize = pc->size;
-+	mincount = (mincount+511)&(~511);
-+	if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
-+		newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
-+	else
-+		newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
-+
-+	if (!newldt)
-+		return -ENOMEM;
-+
-+	if (oldsize)
-+		memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
-+	oldldt = pc->ldt;
-+	memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
-+	wmb();
-+	pc->ldt = newldt;
-+	wmb();
-+	pc->size = mincount;
-+	wmb();
-+	if (reload) {
-+#ifdef CONFIG_SMP
-+		cpumask_t mask;
-+
-+		preempt_disable();
-+#endif
-+		make_pages_readonly(
-+			pc->ldt,
-+			(pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		load_LDT(pc);
-+#ifdef CONFIG_SMP
-+		mask = cpumask_of_cpu(smp_processor_id());
-+		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-+			smp_call_function(flush_ldt, NULL, 1, 1);
-+		preempt_enable();
-+#endif
-+	}
-+	if (oldsize) {
-+		make_pages_writable(
-+			oldldt,
-+			(oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
-+			vfree(oldldt);
-+		else
-+			kfree(oldldt);
-+	}
-+	return 0;
-+}
-+
-+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
-+{
-+	int err = alloc_ldt(new, old->size, 0);
-+	if (err < 0)
-+		return err;
-+	memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-+	make_pages_readonly(
-+		new->ldt,
-+		(new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+		XENFEAT_writable_descriptor_tables);
-+	return 0;
-+}
-+
-+/*
-+ * we do not have to muck with descriptors here, that is
-+ * done in switch_mm() as needed.
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-+{
-+	struct mm_struct * old_mm;
-+	int retval = 0;
-+
-+	memset(&mm->context, 0, sizeof(mm->context));
-+	init_MUTEX(&mm->context.sem);
-+	old_mm = current->mm;
-+	if (old_mm && old_mm->context.size > 0) {
-+		down(&old_mm->context.sem);
-+		retval = copy_ldt(&mm->context, &old_mm->context);
-+		up(&old_mm->context.sem);
-+	}
-+	if (retval == 0) {
-+		spin_lock(&mm_unpinned_lock);
-+		list_add(&mm->context.unpinned, &mm_unpinned);
-+		spin_unlock(&mm_unpinned_lock);
-+	}
-+	return retval;
-+}
-+
-+/*
-+ * 
-+ * Don't touch the LDT register - we're already in the next thread.
-+ */
-+void destroy_context(struct mm_struct *mm)
-+{
-+	if (mm->context.size) {
-+		if (mm == current->active_mm)
-+			clear_LDT();
-+		make_pages_writable(
-+			mm->context.ldt,
-+			(mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
-+			XENFEAT_writable_descriptor_tables);
-+		if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
-+			vfree(mm->context.ldt);
-+		else
-+			kfree(mm->context.ldt);
-+		mm->context.size = 0;
-+	}
-+	if (!mm->context.pinned) {
-+		spin_lock(&mm_unpinned_lock);
-+		list_del(&mm->context.unpinned);
-+		spin_unlock(&mm_unpinned_lock);
-+	}
-+}
-+
-+static int read_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+	int err;
-+	unsigned long size;
-+	struct mm_struct * mm = current->mm;
-+
-+	if (!mm->context.size)
-+		return 0;
-+	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
-+		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
-+
-+	down(&mm->context.sem);
-+	size = mm->context.size*LDT_ENTRY_SIZE;
-+	if (size > bytecount)
-+		size = bytecount;
-+
-+	err = 0;
-+	if (copy_to_user(ptr, mm->context.ldt, size))
-+		err = -EFAULT;
-+	up(&mm->context.sem);
-+	if (err < 0)
-+		goto error_return;
-+	if (size != bytecount) {
-+		/* zero-fill the rest */
-+		if (clear_user(ptr+size, bytecount-size) != 0) {
-+			err = -EFAULT;
-+			goto error_return;
-+		}
-+	}
-+	return bytecount;
-+error_return:
-+	return err;
-+}
-+
-+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
-+{
-+	/* Arbitrary number */ 
-+	/* x86-64 default LDT is all zeros */
-+	if (bytecount > 128) 
-+		bytecount = 128; 	
-+	if (clear_user(ptr, bytecount))
-+		return -EFAULT;
-+	return bytecount; 
-+}
-+
-+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
-+{
-+	struct task_struct *me = current;
-+	struct mm_struct * mm = me->mm;
-+	__u32 entry_1, entry_2, *lp;
-+	unsigned long mach_lp;
-+	int error;
-+	struct user_desc ldt_info;
-+
-+	error = -EINVAL;
-+
-+	if (bytecount != sizeof(ldt_info))
-+		goto out;
-+	error = -EFAULT; 	
-+	if (copy_from_user(&ldt_info, ptr, bytecount))
-+		goto out;
-+
-+	error = -EINVAL;
-+	if (ldt_info.entry_number >= LDT_ENTRIES)
-+		goto out;
-+	if (ldt_info.contents == 3) {
-+		if (oldmode)
-+			goto out;
-+		if (ldt_info.seg_not_present == 0)
-+			goto out;
-+	}
-+
-+	down(&mm->context.sem);
-+	if (ldt_info.entry_number >= (unsigned)mm->context.size) {
-+		error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
-+		if (error < 0)
-+			goto out_unlock;
-+	}
-+
-+	lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
-+ 	mach_lp = arbitrary_virt_to_machine(lp);
-+
-+   	/* Allow LDTs to be cleared by the user. */
-+   	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
-+		if (oldmode || LDT_empty(&ldt_info)) {
-+			entry_1 = 0;
-+			entry_2 = 0;
-+			goto install;
-+		}
-+	}
-+
-+	entry_1 = LDT_entry_a(&ldt_info);
-+	entry_2 = LDT_entry_b(&ldt_info);
-+	if (oldmode)
-+		entry_2 &= ~(1 << 20);
-+
-+	/* Install the new entry ...  */
-+install:
-+	error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
-+
-+out_unlock:
-+	up(&mm->context.sem);
-+out:
-+	return error;
-+}
-+
-+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
-+{
-+	int ret = -ENOSYS;
-+
-+	switch (func) {
-+	case 0:
-+		ret = read_ldt(ptr, bytecount);
-+		break;
-+	case 1:
-+		ret = write_ldt(ptr, bytecount, 1);
-+		break;
-+	case 2:
-+		ret = read_default_ldt(ptr, bytecount);
-+		break;
-+	case 0x11:
-+		ret = write_ldt(ptr, bytecount, 0);
-+		break;
-+	}
-+	return ret;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/Makefile linux-2.6.12-xen/arch/xen/x86_64/kernel/Makefile
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,71 @@
-+#
-+# Makefile for the linux kernel.
-+#
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+CFLAGS	+= -Iarch/$(XENARCH)/kernel
-+
-+extra-y 	:= head.o head64.o init_task.o
-+
-+obj-y	:= process.o signal.o entry.o traps.o  \
-+		ioport.o ldt.o setup.o \
-+		x8664_ksyms.o vsyscall.o \
-+		setup64.o e820.o irq.o early_printk.o
-+c-obj-y	:= semaphore.o i387.o sys_x86_64.o \
-+		ptrace.o quirks.o syscall.o bootflag.o
-+
-+i386-obj-y			:= time.o
-+#obj-y				+= ../../i386/kernel/timers/
-+
-+s-obj-y	:=
-+
-+#obj-$(CONFIG_X86_MCE)         += mce.o
-+#obj-$(CONFIG_MTRR)		+= ../../i386/kernel/cpu/mtrr/
-+obj-$(CONFIG_ACPI_BOOT)		+= acpi/
-+c-obj-$(CONFIG_X86_MSR)		+= msr.o
-+obj-$(CONFIG_MICROCODE)		+= microcode.o
-+obj-$(CONFIG_X86_CPUID)		+= cpuid.o
-+obj-$(CONFIG_SMP)		+= smp.o
-+obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o
-+c-obj-$(CONFIG_X86_LOCAL_APIC)	+= nmi.o
-+obj-$(CONFIG_X86_IO_APIC)	+= io_apic.o mpparse.o
-+obj-$(CONFIG_X86_XEN_GENAPIC)	+= genapic.o genapic_xen.o
-+c-obj-$(CONFIG_X86_IO_APIC)	+= genapic_cluster.o genapic_flat.o
-+#obj-$(CONFIG_PM)		+= suspend.o
-+#obj-$(CONFIG_SOFTWARE_SUSPEND)	+= suspend_asm.o
-+#obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
-+#obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
-+#obj-$(CONFIG_GART_IOMMU)	+= pci-gart.o aperture.o
-+obj-$(CONFIG_DUMMY_IOMMU)	+= pci-nommu.o
-+i386-obj-$(CONFIG_DUMMY_IOMMU)	+= pci-dma.o
-+i386-obj-$(CONFIG_SWIOTLB)	+= swiotlb.o
-+obj-$(CONFIG_KPROBES)		+= kprobes.o
-+#obj-$(CONFIG_X86_PM_TIMER)	+= pmtimer.o
-+
-+c-obj-$(CONFIG_MODULES)		+= module.o
-+
-+obj-y				+= topology.o
-+c-obj-y				+= intel_cacheinfo.o
-+
-+bootflag-y			+= ../../../i386/kernel/bootflag.o
-+cpuid-$(subst m,y,$(CONFIG_X86_CPUID))  += ../../../i386/kernel/cpuid.o
-+topology-y                     += ../../../i386/mach-default/topology.o
-+#swiotlb-$(CONFIG_SWIOTLB)      += ../../../ia64/lib/swiotlb.o
-+microcode-$(subst m,y,$(CONFIG_MICROCODE))  += ../../i386/kernel/microcode.o
-+intel_cacheinfo-y		+= ../../../i386/kernel/cpu/intel_cacheinfo.o
-+quirks-y			+= ../../i386/kernel/quirks.o
-+
-+c-link	:= init_task.o
-+s-link	:= vsyscall.o 
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-obj-m) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
-+	@ln -fsn $(srctree)/arch/x86_64/kernel/$(notdir $@) $@
-+
-+$(patsubst %.o,$(obj)/%.c,$(i386-obj-y)):
-+	@ln -fsn $(srctree)/arch/xen/i386/kernel/$(notdir $@) $@
-+
-+obj-y	+= $(c-obj-y) $(s-obj-y) $(i386-obj-y)
-+obj-m	+= $(c-obj-m)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-m) $(c-obj-) $(c-link) $(i386-obj-y))
-+clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/mpparse.c linux-2.6.12-xen/arch/xen/x86_64/kernel/mpparse.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/mpparse.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/mpparse.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,963 @@
-+/*
-+ *	Intel Multiprocessor Specification 1.1 and 1.4
-+ *	compliant MP-table parsing routines.
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998, 1999, 2000 Ingo Molnar <mingo at redhat.com>
-+ *
-+ *	Fixes
-+ *		Erich Boleyn	:	MP v1.4 and additional changes.
-+ *		Alan Cox	:	Added EBDA scanning
-+ *		Ingo Molnar	:	various cleanups and rewrites
-+ *		Maciej W. Rozycki:	Bits for default MP configurations
-+ *		Paul Diefenbaugh:	Added full ACPI support
-+ */
-+
-+#include <linux/mm.h>
-+#include <linux/irq.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/config.h>
-+#include <linux/bootmem.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/acpi.h>
-+
-+#include <asm/smp.h>
-+#include <asm/mtrr.h>
-+#include <asm/mpspec.h>
-+#include <asm/pgalloc.h>
-+#include <asm/io_apic.h>
-+#include <asm/proto.h>
-+#include <asm/acpi.h>
-+
-+/* Have we found an MP table */
-+int smp_found_config;
-+unsigned int __initdata maxcpus = NR_CPUS;
-+
-+int acpi_found_madt;
-+
-+/*
-+ * Various Linux-internal data structures created from the
-+ * MP-table.
-+ */
-+int apic_version [MAX_APICS];
-+unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
-+cpumask_t pci_bus_to_cpumask [256] = { [0 ... 255] = CPU_MASK_ALL };
-+
-+static int mp_current_pci_id = 0;
-+/* I/O APIC entries */
-+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
-+
-+/* # of MP IRQ source entries */
-+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-+
-+/* MP IRQ source entries */
-+int mp_irq_entries;
-+
-+int nr_ioapics;
-+int pic_mode;
-+unsigned long mp_lapic_addr = 0;
-+
-+
-+
-+/* Processor that is doing the boot up */
-+unsigned int boot_cpu_id = -1U;
-+/* Internal processor count */
-+static unsigned int num_processors = 0;
-+
-+/* Bitmask of physically existing CPUs */
-+physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
-+
-+/* ACPI MADT entry parsing functions */
-+#ifdef CONFIG_ACPI_BOOT
-+extern struct acpi_boot_flags acpi_boot;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+extern int acpi_parse_lapic (acpi_table_entry_header *header);
-+extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
-+extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
-+#endif /*CONFIG_X86_LOCAL_APIC*/
-+#ifdef CONFIG_X86_IO_APIC
-+extern int acpi_parse_ioapic (acpi_table_entry_header *header);
-+#endif /*CONFIG_X86_IO_APIC*/
-+#endif /*CONFIG_ACPI_BOOT*/
-+
-+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
-+
-+
-+/*
-+ * Intel MP BIOS table parsing routines:
-+ */
-+
-+/*
-+ * Checksum an MP configuration block.
-+ */
-+
-+static int __init mpf_checksum(unsigned char *mp, int len)
-+{
-+	int sum = 0;
-+
-+	while (len--)
-+		sum += *mp++;
-+
-+	return sum & 0xFF;
-+}
-+
-+#ifndef CONFIG_XEN
-+static void __init MP_processor_info (struct mpc_config_processor *m)
-+{
-+	int ver;
-+	static int found_bsp=0;
-+
-+	if (!(m->mpc_cpuflag & CPU_ENABLED))
-+		return;
-+
-+	printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
-+		m->mpc_apicid,
-+	       (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
-+	       (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
-+		m->mpc_apicver);
-+
-+	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+		Dprintk("    Bootup CPU\n");
-+		boot_cpu_id = m->mpc_apicid;
-+	}
-+	if (num_processors >= NR_CPUS) {
-+		printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
-+			" Processor ignored.\n", NR_CPUS);
-+		return;
-+	}
-+
-+	num_processors++;
-+
-+	if (m->mpc_apicid > MAX_APICS) {
-+		printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
-+			m->mpc_apicid, MAX_APICS);
-+		return;
-+	}
-+	ver = m->mpc_apicver;
-+
-+	physid_set(m->mpc_apicid, phys_cpu_present_map);
-+	/*
-+	 * Validate version
-+	 */
-+	if (ver == 0x0) {
-+		printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
-+		ver = 0x10;
-+	}
-+	apic_version[m->mpc_apicid] = ver;
-+ 	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
-+ 		/*
-+ 		 * bios_cpu_apicid is required to have processors listed
-+ 		 * in same order as logical cpu numbers. Hence the first
-+ 		 * entry is BSP, and so on.
-+ 		 */
-+ 		bios_cpu_apicid[0] = m->mpc_apicid;
-+ 		x86_cpu_to_apicid[0] = m->mpc_apicid;
-+ 		found_bsp = 1;
-+ 	} else {
-+ 		bios_cpu_apicid[num_processors - found_bsp] = m->mpc_apicid;
-+ 		x86_cpu_to_apicid[num_processors - found_bsp] = m->mpc_apicid;
-+ 	}
-+}
-+#else
-+void __init MP_processor_info (struct mpc_config_processor *m)
-+{
-+	num_processors++;
-+}
-+#endif /* CONFIG_XEN */
-+
-+static void __init MP_bus_info (struct mpc_config_bus *m)
-+{
-+	char str[7];
-+
-+	memcpy(str, m->mpc_bustype, 6);
-+	str[6] = 0;
-+	Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
-+
-+	if (strncmp(str, "ISA", 3) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
-+	} else if (strncmp(str, "EISA", 4) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
-+	} else if (strncmp(str, "PCI", 3) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
-+		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
-+		mp_current_pci_id++;
-+	} else if (strncmp(str, "MCA", 3) == 0) {
-+		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-+	} else {
-+		printk(KERN_ERR "Unknown bustype %s\n", str);
-+	}
-+}
-+
-+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
-+{
-+	if (!(m->mpc_flags & MPC_APIC_USABLE))
-+		return;
-+
-+	printk("I/O APIC #%d Version %d at 0x%X.\n",
-+		m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
-+	if (nr_ioapics >= MAX_IO_APICS) {
-+		printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
-+			MAX_IO_APICS, nr_ioapics);
-+		panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
-+	}
-+	if (!m->mpc_apicaddr) {
-+		printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
-+			" found in MP table, skipping!\n");
-+		return;
-+	}
-+	mp_ioapics[nr_ioapics] = *m;
-+	nr_ioapics++;
-+}
-+
-+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
-+{
-+	mp_irqs [mp_irq_entries] = *m;
-+	Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
-+		" IRQ %02x, APIC ID %x, APIC INT %02x\n",
-+			m->mpc_irqtype, m->mpc_irqflag & 3,
-+			(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
-+			m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
-+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+		panic("Max # of irq sources exceeded!!\n");
-+}
-+
-+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
-+{
-+	Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
-+		" IRQ %02x, APIC ID %x, APIC LINT %02x\n",
-+			m->mpc_irqtype, m->mpc_irqflag & 3,
-+			(m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
-+			m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
-+	/*
-+	 * Well it seems all SMP boards in existence
-+	 * use ExtINT/LVT1 == LINT0 and
-+	 * NMI/LVT2 == LINT1 - the following check
-+	 * will show us if this assumptions is false.
-+	 * Until then we do not have to add baggage.
-+	 */
-+	if ((m->mpc_irqtype == mp_ExtINT) &&
-+		(m->mpc_destapiclint != 0))
-+			BUG();
-+	if ((m->mpc_irqtype == mp_NMI) &&
-+		(m->mpc_destapiclint != 1))
-+			BUG();
-+}
-+
-+/*
-+ * Read/parse the MPC
-+ */
-+
-+static int __init smp_read_mpc(struct mp_config_table *mpc)
-+{
-+	char str[16];
-+	int count=sizeof(*mpc);
-+	unsigned char *mpt=((unsigned char *)mpc)+count;
-+
-+	if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
-+		printk("SMP mptable: bad signature [%c%c%c%c]!\n",
-+			mpc->mpc_signature[0],
-+			mpc->mpc_signature[1],
-+			mpc->mpc_signature[2],
-+			mpc->mpc_signature[3]);
-+		return 0;
-+	}
-+	if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
-+		printk("SMP mptable: checksum error!\n");
-+		return 0;
-+	}
-+	if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
-+		printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
-+			mpc->mpc_spec);
-+		return 0;
-+	}
-+	if (!mpc->mpc_lapic) {
-+		printk(KERN_ERR "SMP mptable: null local APIC address!\n");
-+		return 0;
-+	}
-+	memcpy(str,mpc->mpc_oem,8);
-+	str[8]=0;
-+	printk(KERN_INFO "OEM ID: %s ",str);
-+
-+	memcpy(str,mpc->mpc_productid,12);
-+	str[12]=0;
-+	printk(KERN_INFO "Product ID: %s ",str);
-+
-+	printk(KERN_INFO "APIC at: 0x%X\n",mpc->mpc_lapic);
-+
-+	/* save the local APIC address, it might be non-default */
-+	if (!acpi_lapic)
-+	mp_lapic_addr = mpc->mpc_lapic;
-+
-+	/*
-+	 *	Now process the configuration blocks.
-+	 */
-+	while (count < mpc->mpc_length) {
-+		switch(*mpt) {
-+			case MP_PROCESSOR:
-+			{
-+				struct mpc_config_processor *m=
-+					(struct mpc_config_processor *)mpt;
-+				if (!acpi_lapic)
-+				MP_processor_info(m);
-+				mpt += sizeof(*m);
-+				count += sizeof(*m);
-+				break;
-+			}
-+			case MP_BUS:
-+			{
-+				struct mpc_config_bus *m=
-+					(struct mpc_config_bus *)mpt;
-+				MP_bus_info(m);
-+				mpt += sizeof(*m);
-+				count += sizeof(*m);
-+				break;
-+			}
-+			case MP_IOAPIC:
-+			{
-+				struct mpc_config_ioapic *m=
-+					(struct mpc_config_ioapic *)mpt;
-+				MP_ioapic_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			case MP_INTSRC:
-+			{
-+				struct mpc_config_intsrc *m=
-+					(struct mpc_config_intsrc *)mpt;
-+
-+				MP_intsrc_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+			case MP_LINTSRC:
-+			{
-+				struct mpc_config_lintsrc *m=
-+					(struct mpc_config_lintsrc *)mpt;
-+				MP_lintsrc_info(m);
-+				mpt+=sizeof(*m);
-+				count+=sizeof(*m);
-+				break;
-+			}
-+		}
-+	}
-+	clustered_apic_check();
-+	if (!num_processors)
-+		printk(KERN_ERR "SMP mptable: no processors registered!\n");
-+	return num_processors;
-+}
-+
-+static int __init ELCR_trigger(unsigned int irq)
-+{
-+	unsigned int port;
-+
-+	port = 0x4d0 + (irq >> 3);
-+	return (inb(port) >> (irq & 7)) & 1;
-+}
-+
-+static void __init construct_default_ioirq_mptable(int mpc_default_type)
-+{
-+	struct mpc_config_intsrc intsrc;
-+	int i;
-+	int ELCR_fallback = 0;
-+
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqflag = 0;			/* conforming */
-+	intsrc.mpc_srcbus = 0;
-+	intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
-+
-+	intsrc.mpc_irqtype = mp_INT;
-+
-+	/*
-+	 *  If true, we have an ISA/PCI system with no IRQ entries
-+	 *  in the MP table. To prevent the PCI interrupts from being set up
-+	 *  incorrectly, we try to use the ELCR. The sanity check to see if
-+	 *  there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
-+	 *  never be level sensitive, so we simply see if the ELCR agrees.
-+	 *  If it does, we assume it's valid.
-+	 */
-+	if (mpc_default_type == 5) {
-+		printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
-+
-+		if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
-+			printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
-+		else {
-+			printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
-+			ELCR_fallback = 1;
-+		}
-+	}
-+
-+	for (i = 0; i < 16; i++) {
-+		switch (mpc_default_type) {
-+		case 2:
-+			if (i == 0 || i == 13)
-+				continue;	/* IRQ0 & IRQ13 not connected */
-+			/* fall through */
-+		default:
-+			if (i == 2)
-+				continue;	/* IRQ2 is never connected */
-+		}
-+
-+		if (ELCR_fallback) {
-+			/*
-+			 *  If the ELCR indicates a level-sensitive interrupt, we
-+			 *  copy that information over to the MP table in the
-+			 *  irqflag field (level sensitive, active high polarity).
-+			 */
-+			if (ELCR_trigger(i))
-+				intsrc.mpc_irqflag = 13;
-+			else
-+				intsrc.mpc_irqflag = 0;
-+		}
-+
-+		intsrc.mpc_srcbusirq = i;
-+		intsrc.mpc_dstirq = i ? i : 2;		/* IRQ0 to INTIN2 */
-+		MP_intsrc_info(&intsrc);
-+	}
-+
-+	intsrc.mpc_irqtype = mp_ExtINT;
-+	intsrc.mpc_srcbusirq = 0;
-+	intsrc.mpc_dstirq = 0;				/* 8259A to INTIN0 */
-+	MP_intsrc_info(&intsrc);
-+}
-+
-+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
-+{
-+	struct mpc_config_processor processor;
-+	struct mpc_config_bus bus;
-+	struct mpc_config_ioapic ioapic;
-+	struct mpc_config_lintsrc lintsrc;
-+	int linttypes[2] = { mp_ExtINT, mp_NMI };
-+	int i;
-+
-+	/*
-+	 * local APIC has default address
-+	 */
-+	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-+
-+	/*
-+	 * 2 CPUs, numbered 0 & 1.
-+	 */
-+	processor.mpc_type = MP_PROCESSOR;
-+	/* Either an integrated APIC or a discrete 82489DX. */
-+	processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+	processor.mpc_cpuflag = CPU_ENABLED;
-+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
-+				   (boot_cpu_data.x86_model << 4) |
-+				   boot_cpu_data.x86_mask;
-+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+	processor.mpc_reserved[0] = 0;
-+	processor.mpc_reserved[1] = 0;
-+	for (i = 0; i < 2; i++) {
-+		processor.mpc_apicid = i;
-+		MP_processor_info(&processor);
-+	}
-+
-+	bus.mpc_type = MP_BUS;
-+	bus.mpc_busid = 0;
-+	switch (mpc_default_type) {
-+		default:
-+			printk(KERN_ERR "???\nUnknown standard configuration %d\n",
-+				mpc_default_type);
-+			/* fall through */
-+		case 1:
-+		case 5:
-+			memcpy(bus.mpc_bustype, "ISA   ", 6);
-+			break;
-+		case 2:
-+		case 6:
-+		case 3:
-+			memcpy(bus.mpc_bustype, "EISA  ", 6);
-+			break;
-+		case 4:
-+		case 7:
-+			memcpy(bus.mpc_bustype, "MCA   ", 6);
-+	}
-+	MP_bus_info(&bus);
-+	if (mpc_default_type > 4) {
-+		bus.mpc_busid = 1;
-+		memcpy(bus.mpc_bustype, "PCI   ", 6);
-+		MP_bus_info(&bus);
-+	}
-+
-+	ioapic.mpc_type = MP_IOAPIC;
-+	ioapic.mpc_apicid = 2;
-+	ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
-+	ioapic.mpc_flags = MPC_APIC_USABLE;
-+	ioapic.mpc_apicaddr = 0xFEC00000;
-+	MP_ioapic_info(&ioapic);
-+
-+	/*
-+	 * We set up most of the low 16 IO-APIC pins according to MPS rules.
-+	 */
-+	construct_default_ioirq_mptable(mpc_default_type);
-+
-+	lintsrc.mpc_type = MP_LINTSRC;
-+	lintsrc.mpc_irqflag = 0;		/* conforming */
-+	lintsrc.mpc_srcbusid = 0;
-+	lintsrc.mpc_srcbusirq = 0;
-+	lintsrc.mpc_destapic = MP_APIC_ALL;
-+	for (i = 0; i < 2; i++) {
-+		lintsrc.mpc_irqtype = linttypes[i];
-+		lintsrc.mpc_destapiclint = i;
-+		MP_lintsrc_info(&lintsrc);
-+	}
-+}
-+
-+static struct intel_mp_floating *mpf_found;
-+
-+/*
-+ * Scan the memory blocks for an SMP configuration block.
-+ */
-+void __init get_smp_config (void)
-+{
-+	struct intel_mp_floating *mpf = mpf_found;
-+
-+	/*
-+ 	 * ACPI may be used to obtain the entire SMP configuration or just to 
-+ 	 * enumerate/configure processors (CONFIG_ACPI_BOOT).  Note that 
-+ 	 * ACPI supports both logical (e.g. Hyper-Threading) and physical 
-+ 	 * processors, where MPS only supports physical.
-+ 	 */
-+ 	if (acpi_lapic && acpi_ioapic) {
-+ 		printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
-+ 		return;
-+	}
-+ 	else if (acpi_lapic)
-+ 		printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
-+
-+	printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
-+	if (mpf->mpf_feature2 & (1<<7)) {
-+		printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
-+		pic_mode = 1;
-+	} else {
-+		printk(KERN_INFO "    Virtual Wire compatibility mode.\n");
-+		pic_mode = 0;
-+	}
-+
-+	/*
-+	 * Now see if we need to read further.
-+	 */
-+	if (mpf->mpf_feature1 != 0) {
-+
-+		printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
-+		construct_default_ISA_mptable(mpf->mpf_feature1);
-+
-+	} else if (mpf->mpf_physptr) {
-+
-+		/*
-+		 * Read the physical hardware table.  Anything here will
-+		 * override the defaults.
-+		 */
-+ 		if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
-+			smp_found_config = 0;
-+			printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
-+			printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
-+			return;
-+		}
-+		/*
-+		 * If there are no explicit MP IRQ entries, then we are
-+		 * broken.  We set up most of the low 16 IO-APIC pins to
-+		 * ISA defaults and hope it will work.
-+		 */
-+		if (!mp_irq_entries) {
-+			struct mpc_config_bus bus;
-+
-+			printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
-+
-+			bus.mpc_type = MP_BUS;
-+			bus.mpc_busid = 0;
-+			memcpy(bus.mpc_bustype, "ISA   ", 6);
-+			MP_bus_info(&bus);
-+
-+			construct_default_ioirq_mptable(0);
-+		}
-+
-+	} else
-+		BUG();
-+
-+	printk(KERN_INFO "Processors: %d\n", num_processors);
-+	/*
-+	 * Only use the first configuration found.
-+	 */
-+}
-+
-+static int __init smp_scan_config (unsigned long base, unsigned long length)
-+{
-+	extern void __bad_mpf_size(void); 
-+	unsigned int *bp = isa_bus_to_virt(base);
-+	struct intel_mp_floating *mpf;
-+
-+	Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
-+	if (sizeof(*mpf) != 16)
-+		__bad_mpf_size();
-+
-+	while (length > 0) {
-+		mpf = (struct intel_mp_floating *)bp;
-+		if ((*bp == SMP_MAGIC_IDENT) &&
-+			(mpf->mpf_length == 1) &&
-+			!mpf_checksum((unsigned char *)bp, 16) &&
-+			((mpf->mpf_specification == 1)
-+				|| (mpf->mpf_specification == 4)) ) {
-+
-+			smp_found_config = 1;
-+			mpf_found = mpf;
-+			return 1;
-+		}
-+		bp += 4;
-+		length -= 16;
-+	}
-+	return 0;
-+}
-+
-+void __init find_intel_smp (void)
-+{
-+	unsigned int address;
-+
-+	/*
-+	 * FIXME: Linux assumes you have 640K of base ram..
-+	 * this continues the error...
-+	 *
-+	 * 1) Scan the bottom 1K for a signature
-+	 * 2) Scan the top 1K of base RAM
-+	 * 3) Scan the 64K of bios
-+	 */
-+	if (smp_scan_config(0x0,0x400) ||
-+		smp_scan_config(639*0x400,0x400) ||
-+			smp_scan_config(0xF0000,0x10000))
-+		return;
-+	/*
-+	 * If it is an SMP machine we should know now, unless the
-+	 * configuration is in an EISA/MCA bus machine with an
-+	 * extended bios data area.
-+	 *
-+	 * there is a real-mode segmented pointer pointing to the
-+	 * 4K EBDA area at 0x40E, calculate and scan it here.
-+	 *
-+	 * NOTE! There are Linux loaders that will corrupt the EBDA
-+	 * area, and as such this kind of SMP config may be less
-+	 * trustworthy, simply because the SMP table may have been
-+	 * stomped on during early boot. These loaders are buggy and
-+	 * should be fixed.
-+	 */
-+
-+	address = *(unsigned short *)phys_to_virt(0x40E);
-+	address <<= 4;
-+	if (smp_scan_config(address, 0x1000))
-+		return;
-+
-+	/* If we have come this far, we did not find an MP table  */
-+	 printk(KERN_INFO "No mptable found.\n");
-+}
-+
-+/*
-+ * - Intel MP Configuration Table
-+ */
-+void __init find_smp_config (void)
-+{
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	find_intel_smp();
-+#endif
-+}
-+
-+
-+/* --------------------------------------------------------------------------
-+                            ACPI-based MP Configuration
-+   -------------------------------------------------------------------------- */
-+
-+#ifdef CONFIG_ACPI_BOOT
-+
-+void __init mp_register_lapic_address (
-+	u64			address)
-+{
-+#ifndef CONFIG_XEN
-+	mp_lapic_addr = (unsigned long) address;
-+
-+	if (boot_cpu_id == -1U)
-+		boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
-+
-+	Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
-+#endif
-+}
-+
-+
-+void __init mp_register_lapic (
-+	u8			id, 
-+	u8			enabled)
-+{
-+	struct mpc_config_processor processor;
-+	int			boot_cpu = 0;
-+	
-+	if (id >= MAX_APICS) {
-+		printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
-+			id, MAX_APICS);
-+		return;
-+	}
-+
-+	if (id == boot_cpu_physical_apicid)
-+		boot_cpu = 1;
-+
-+#ifndef CONFIG_XEN
-+	processor.mpc_type = MP_PROCESSOR;
-+	processor.mpc_apicid = id;
-+	processor.mpc_apicver = 0x10; /* TBD: lapic version */
-+	processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
-+	processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
-+	processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 
-+		(boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-+	processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
-+	processor.mpc_reserved[0] = 0;
-+	processor.mpc_reserved[1] = 0;
-+#endif
-+
-+	MP_processor_info(&processor);
-+}
-+
-+#ifdef CONFIG_X86_IO_APIC
-+
-+#define MP_ISA_BUS		0
-+#define MP_MAX_IOAPIC_PIN	127
-+
-+static struct mp_ioapic_routing {
-+	int			apic_id;
-+	int			gsi_start;
-+	int			gsi_end;
-+	u32			pin_programmed[4];
-+} mp_ioapic_routing[MAX_IO_APICS];
-+
-+
-+static int mp_find_ioapic (
-+	int			gsi)
-+{
-+	int			i = 0;
-+
-+	/* Find the IOAPIC that manages this GSI. */
-+	for (i = 0; i < nr_ioapics; i++) {
-+		if ((gsi >= mp_ioapic_routing[i].gsi_start)
-+			&& (gsi <= mp_ioapic_routing[i].gsi_end))
-+			return i;
-+	}
-+
-+	printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
-+
-+	return -1;
-+}
-+	
-+
-+void __init mp_register_ioapic (
-+	u8			id, 
-+	u32			address,
-+	u32			gsi_base)
-+{
-+	int			idx = 0;
-+
-+	if (nr_ioapics >= MAX_IO_APICS) {
-+		printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
-+			"(found %d)\n", MAX_IO_APICS, nr_ioapics);
-+		panic("Recompile kernel with bigger MAX_IO_APICS!\n");
-+	}
-+	if (!address) {
-+		printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
-+			" found in MADT table, skipping!\n");
-+		return;
-+	}
-+
-+	idx = nr_ioapics++;
-+
-+	mp_ioapics[idx].mpc_type = MP_IOAPIC;
-+	mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
-+	mp_ioapics[idx].mpc_apicaddr = address;
-+
-+	mp_ioapics[idx].mpc_apicid = id;
-+	mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
-+	
-+	/* 
-+	 * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
-+	 * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
-+	 */
-+	mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
-+	mp_ioapic_routing[idx].gsi_start = gsi_base;
-+	mp_ioapic_routing[idx].gsi_end = gsi_base + 
-+		io_apic_get_redir_entries(idx);
-+
-+	printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
-+		"GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 
-+		mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
-+		mp_ioapic_routing[idx].gsi_start,
-+		mp_ioapic_routing[idx].gsi_end);
-+
-+	return;
-+}
-+
-+
-+void __init mp_override_legacy_irq (
-+	u8			bus_irq,
-+	u8			polarity, 
-+	u8			trigger, 
-+	u32			gsi)
-+{
-+	struct mpc_config_intsrc intsrc;
-+	int			ioapic = -1;
-+	int			pin = -1;
-+
-+	/* 
-+	 * Convert 'gsi' to 'ioapic.pin'.
-+	 */
-+	ioapic = mp_find_ioapic(gsi);
-+	if (ioapic < 0)
-+		return;
-+	pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
-+
-+	/*
-+	 * TBD: This check is for faulty timer entries, where the override
-+	 *      erroneously sets the trigger to level, resulting in a HUGE 
-+	 *      increase of timer interrupts!
-+	 */
-+	if ((bus_irq == 0) && (trigger == 3))
-+		trigger = 1;
-+
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqtype = mp_INT;
-+	intsrc.mpc_irqflag = (trigger << 2) | polarity;
-+	intsrc.mpc_srcbus = MP_ISA_BUS;
-+	intsrc.mpc_srcbusirq = bus_irq;				       /* IRQ */
-+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;	   /* APIC ID */
-+	intsrc.mpc_dstirq = pin;				    /* INTIN# */
-+
-+	Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", 
-+		intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-+		(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-+		intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
-+
-+	mp_irqs[mp_irq_entries] = intsrc;
-+	if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+		panic("Max # of irq sources exceeded!\n");
-+
-+	return;
-+}
-+
-+
-+void __init mp_config_acpi_legacy_irqs (void)
-+{
-+	struct mpc_config_intsrc intsrc;
-+	int			i = 0;
-+	int			ioapic = -1;
-+
-+	/* 
-+	 * Fabricate the legacy ISA bus (bus #31).
-+	 */
-+	mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
-+	Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
-+
-+	/* 
-+	 * Locate the IOAPIC that manages the ISA IRQs (0-15). 
-+	 */
-+	ioapic = mp_find_ioapic(0);
-+	if (ioapic < 0)
-+		return;
-+
-+	intsrc.mpc_type = MP_INTSRC;
-+	intsrc.mpc_irqflag = 0;					/* Conforming */
-+	intsrc.mpc_srcbus = MP_ISA_BUS;
-+	intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
-+
-+	/* 
-+	 * Use the default configuration for the IRQs 0-15.  Unless
-+	 * overridden by (MADT) interrupt source override entries.
-+	 */
-+	for (i = 0; i < 16; i++) {
-+		int idx;
-+
-+		for (idx = 0; idx < mp_irq_entries; idx++) {
-+			struct mpc_config_intsrc *irq = mp_irqs + idx;
-+
-+			/* Do we already have a mapping for this ISA IRQ? */
-+			if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
-+				break;
-+
-+			/* Do we already have a mapping for this IOAPIC pin */
-+			if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
-+				(irq->mpc_dstirq == i))
-+				break;
-+		}
-+
-+		if (idx != mp_irq_entries) {
-+			printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
-+			continue;			/* IRQ already used */
-+		}
-+
-+		intsrc.mpc_irqtype = mp_INT;
-+		intsrc.mpc_srcbusirq = i;		   /* Identity mapped */
-+		intsrc.mpc_dstirq = i;
-+
-+		Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
-+			"%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 
-+			(intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 
-+			intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 
-+			intsrc.mpc_dstirq);
-+
-+		mp_irqs[mp_irq_entries] = intsrc;
-+		if (++mp_irq_entries == MAX_IRQ_SOURCES)
-+			panic("Max # of irq sources exceeded!\n");
-+	}
-+
-+	return;
-+}
-+
-+int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
-+{
-+	int			ioapic = -1;
-+	int			ioapic_pin = 0;
-+	int			idx, bit = 0;
-+
-+	if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
-+		return gsi;
-+
-+#ifdef CONFIG_ACPI_BUS
-+	/* Don't set up the ACPI SCI because it's already set up */
-+	if (acpi_fadt.sci_int == gsi)
-+		return gsi;
-+#endif
-+
-+	ioapic = mp_find_ioapic(gsi);
-+	if (ioapic < 0) {
-+		printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
-+		return gsi;
-+	}
-+
-+	ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
-+
-+	/* 
-+	 * Avoid pin reprogramming.  PRTs typically include entries  
-+	 * with redundant pin->gsi mappings (but unique PCI devices);
-+	 * we only program the IOAPIC on the first.
-+	 */
-+	bit = ioapic_pin % 32;
-+	idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
-+	if (idx > 3) {
-+		printk(KERN_ERR "Invalid reference to IOAPIC pin "
-+			"%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 
-+			ioapic_pin);
-+		return gsi;
-+	}
-+	if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
-+		Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
-+			mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
-+		return gsi;
-+	}
-+
-+	mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
-+
-+	io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
-+		edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
-+		active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
-+	return gsi;
-+}
-+
-+#endif /*CONFIG_X86_IO_APIC*/
-+#endif /*CONFIG_ACPI_BOOT*/
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/pci-nommu.c linux-2.6.12-xen/arch/xen/x86_64/kernel/pci-nommu.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/pci-nommu.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/pci-nommu.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,98 @@
-+/* Fallback functions when the main IOMMU code is not compiled in. This
-+   code is roughly equivalent to i386. */
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/pci.h>
-+#include <linux/string.h>
-+#include <asm/proto.h>
-+#include <asm/processor.h>
-+
-+int iommu_merge = 0;
-+EXPORT_SYMBOL(iommu_merge);
-+
-+dma_addr_t bad_dma_address;
-+EXPORT_SYMBOL(bad_dma_address);
-+
-+int iommu_bio_merge = 0;
-+EXPORT_SYMBOL(iommu_bio_merge);
-+
-+int iommu_sac_force = 0;
-+EXPORT_SYMBOL(iommu_sac_force);
-+
-+#if 0
-+/* 
-+ * Dummy IO MMU functions
-+ */
-+
-+void *dma_alloc_coherent(struct device *hwdev, size_t size,
-+			 dma_addr_t *dma_handle, unsigned gfp)
-+{
-+	void *ret;
-+	u64 mask;
-+	int order = get_order(size);
-+
-+	if (hwdev)
-+		mask = hwdev->coherent_dma_mask & *hwdev->dma_mask;
-+	else
-+		mask = 0xffffffff;
-+	for (;;) {
-+		ret = (void *)__get_free_pages(gfp, order);
-+		if (ret == NULL)
-+			return NULL;
-+		*dma_handle = virt_to_bus(ret);
-+		if ((*dma_handle & ~mask) == 0)
-+			break;
-+		free_pages((unsigned long)ret, order);
-+		if (gfp & GFP_DMA)
-+			return NULL;
-+		gfp |= GFP_DMA;
-+	}
-+
-+	memset(ret, 0, size);
-+	return ret;
-+}
-+EXPORT_SYMBOL(dma_alloc_coherent);
-+
-+void dma_free_coherent(struct device *hwdev, size_t size,
-+			 void *vaddr, dma_addr_t dma_handle)
-+{
-+	free_pages((unsigned long)vaddr, get_order(size));
-+}
-+EXPORT_SYMBOL(dma_free_coherent);
-+#endif
-+
-+#if 0
-+int dma_supported(struct device *hwdev, u64 mask)
-+{
-+        /*
-+         * we fall back to GFP_DMA when the mask isn't all 1s,
-+         * so we can't guarantee allocations that must be
-+         * within a tighter range than GFP_DMA..
-+	 * RED-PEN this won't work for pci_map_single. Caller has to
-+	 * use GFP_DMA in the first place.
-+         */
-+        if (mask < 0x00ffffff)
-+                return 0;
-+
-+	return 1;
-+} 
-+EXPORT_SYMBOL(dma_supported);
-+#endif
-+
-+int dma_get_cache_alignment(void)
-+{
-+	return boot_cpu_data.x86_clflush_size;
-+}
-+EXPORT_SYMBOL(dma_get_cache_alignment);
-+
-+static int __init check_ram(void) 
-+{ 
-+	if (end_pfn >= 0xffffffff>>PAGE_SHIFT) { 
-+		printk(
-+		KERN_ERR "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
-+		KERN_ERR "WARNING 32bit PCI may malfunction.\n");
-+	} 
-+	return 0;
-+} 
-+__initcall(check_ram);
-+
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/process.c linux-2.6.12-xen/arch/xen/x86_64/kernel/process.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/process.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/process.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,752 @@
-+/*
-+ *  linux/arch/x86-64/kernel/process.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *
-+ *  Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
-+ * 
-+ *  X86-64 port
-+ *	Andi Kleen.
-+ * 
-+ *  $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
-+ * 
-+ *  Jun Nakajima <jun.nakajima at intel.com> 
-+ *     Modified for Xen
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of process handling..
-+ */
-+
-+#include <stdarg.h>
-+
-+#include <linux/cpu.h>
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/elfcore.h>
-+#include <linux/smp.h>
-+#include <linux/slab.h>
-+#include <linux/user.h>
-+#include <linux/module.h>
-+#include <linux/a.out.h>
-+#include <linux/interrupt.h>
-+#include <linux/delay.h>
-+#include <linux/irq.h>
-+#include <linux/ptrace.h>
-+#include <linux/utsname.h>
-+#include <linux/random.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/mmu_context.h>
-+#include <asm/pda.h>
-+#include <asm/prctl.h>
-+#include <asm/kdebug.h>
-+#include <asm-xen/xen-public/dom0_ops.h>
-+#include <asm-xen/xen-public/physdev.h>
-+#include <asm/desc.h>
-+#include <asm/proto.h>
-+#include <asm/hardirq.h>
-+#include <asm/ia32.h>
-+
-+asmlinkage extern void ret_from_fork(void);
-+
-+unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
-+
-+static atomic_t hlt_counter = ATOMIC_INIT(0);
-+
-+unsigned long boot_option_idle_override = 0;
-+EXPORT_SYMBOL(boot_option_idle_override);
-+
-+/*
-+ * Powermanagement idle function, if any..
-+ */
-+void (*pm_idle)(void);
-+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-+
-+void disable_hlt(void)
-+{
-+	atomic_inc(&hlt_counter);
-+}
-+
-+EXPORT_SYMBOL(disable_hlt);
-+
-+void enable_hlt(void)
-+{
-+	atomic_dec(&hlt_counter);
-+}
-+
-+EXPORT_SYMBOL(enable_hlt);
-+
-+/* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
-+extern void stop_hz_timer(void);
-+extern void start_hz_timer(void);
-+void xen_idle(void)
-+{
-+	local_irq_disable();
-+
-+	if (need_resched()) {
-+		local_irq_enable();
-+	} else {
-+		stop_hz_timer();
-+		/* Blocking includes an implicit local_irq_enable(). */
-+		HYPERVISOR_sched_op(SCHEDOP_block, 0);
-+		start_hz_timer();
-+	}
-+}
-+
-+#ifdef CONFIG_HOTPLUG_CPU
-+#include <asm/nmi.h>
-+/* We don't actually take CPU down, just spin without interrupts. */
-+static inline void play_dead(void)
-+{
-+	/* Ack it */
-+	__get_cpu_var(cpu_state) = CPU_DEAD;
-+
-+	/* We shouldn't have to disable interrupts while dead, but
-+	 * some interrupts just don't seem to go away, and this makes
-+	 * it "work" for testing purposes. */
-+	/* Death loop */
-+	while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
-+		HYPERVISOR_sched_op(SCHEDOP_yield, 0);
-+
-+	local_irq_disable();
-+	__flush_tlb_all();
-+	cpu_set(smp_processor_id(), cpu_online_map);
-+	local_irq_enable();
-+}
-+#else
-+static inline void play_dead(void)
-+{
-+	BUG();
-+}
-+#endif /* CONFIG_HOTPLUG_CPU */
-+
-+/*
-+ * The idle thread. There's no useful work to be
-+ * done, so just try to conserve power and have a
-+ * low exit latency (ie sit in a loop waiting for
-+ * somebody to say that they'd like to reschedule)
-+ */
-+void cpu_idle (void)
-+{
-+	int cpu = smp_processor_id();
-+
-+	/* endless idle loop with no priority at all */
-+	while (1) {
-+		while (!need_resched()) {
-+			if (__get_cpu_var(cpu_idle_state))
-+				__get_cpu_var(cpu_idle_state) = 0;
-+			rmb();
-+			
-+			if (cpu_is_offline(cpu))
-+				play_dead();
-+
-+			xen_idle();
-+		}
-+
-+		schedule();
-+	}
-+}
-+
-+void cpu_idle_wait(void)
-+{
-+	unsigned int cpu, this_cpu = get_cpu();
-+	cpumask_t map;
-+
-+	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
-+	put_cpu();
-+
-+ 	cpus_clear(map);
-+	for_each_online_cpu(cpu) {
-+		per_cpu(cpu_idle_state, cpu) = 1;
-+		cpu_set(cpu, map);
-+	}
-+
-+	__get_cpu_var(cpu_idle_state) = 0;
-+
-+	wmb();
-+	do {
-+		ssleep(1);
-+		for_each_online_cpu(cpu) {
-+			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
-+				cpu_clear(cpu, map);
-+		}
-+		cpus_and(map, map, cpu_online_map);
-+	} while (!cpus_empty(map));
-+}
-+EXPORT_SYMBOL_GPL(cpu_idle_wait);
-+
-+/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
-+/* Always use xen_idle() instead. */
-+void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
-+
-+/* Prints also some state that isn't saved in the pt_regs */ 
-+void __show_regs(struct pt_regs * regs)
-+{
-+	unsigned long fs, gs, shadowgs;
-+	unsigned int fsindex,gsindex;
-+	unsigned int ds,cs,es; 
-+
-+	printk("\n");
-+	print_modules();
-+	printk("Pid: %d, comm: %.20s %s %s\n", 
-+	       current->pid, current->comm, print_tainted(), system_utsname.release);
-+	printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
-+	printk_address(regs->rip); 
-+	printk("\nRSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
-+	printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
-+	       regs->rax, regs->rbx, regs->rcx);
-+	printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
-+	       regs->rdx, regs->rsi, regs->rdi); 
-+	printk("RBP: %016lx R08: %016lx R09: %016lx\n",
-+	       regs->rbp, regs->r8, regs->r9); 
-+	printk("R10: %016lx R11: %016lx R12: %016lx\n",
-+	       regs->r10, regs->r11, regs->r12); 
-+	printk("R13: %016lx R14: %016lx R15: %016lx\n",
-+	       regs->r13, regs->r14, regs->r15); 
-+
-+	asm("mov %%ds,%0" : "=r" (ds)); 
-+	asm("mov %%cs,%0" : "=r" (cs)); 
-+	asm("mov %%es,%0" : "=r" (es)); 
-+	asm("mov %%fs,%0" : "=r" (fsindex));
-+	asm("mov %%gs,%0" : "=r" (gsindex));
-+
-+	rdmsrl(MSR_FS_BASE, fs);
-+	rdmsrl(MSR_GS_BASE, gs); 
-+	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 
-+
-+	printk("FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 
-+	       fs,fsindex,gs,gsindex,shadowgs); 
-+	printk("CS:  %04x DS: %04x ES: %04x\n", cs, ds, es); 
-+
-+}
-+
-+void show_regs(struct pt_regs *regs)
-+{
-+	__show_regs(regs);
-+	show_trace(&regs->rsp);
-+}
-+
-+/*
-+ * Free current thread data structures etc..
-+ */
-+void exit_thread(void)
-+{
-+	struct task_struct *me = current;
-+	struct thread_struct *t = &me->thread;
-+	if (me->thread.io_bitmap_ptr) { 
-+		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
-+
-+		kfree(t->io_bitmap_ptr);
-+		t->io_bitmap_ptr = NULL;
-+		/*
-+		 * Careful, clear this in the TSS too:
-+		 */
-+		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
-+		t->io_bitmap_max = 0;
-+		put_cpu();
-+	}
-+}
-+
-+void load_gs_index(unsigned gs)
-+{
-+	HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs);
-+}
-+
-+void flush_thread(void)
-+{
-+	struct task_struct *tsk = current;
-+	struct thread_info *t = current_thread_info();
-+
-+	if (t->flags & _TIF_ABI_PENDING)
-+		t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
-+
-+	tsk->thread.debugreg0 = 0;
-+	tsk->thread.debugreg1 = 0;
-+	tsk->thread.debugreg2 = 0;
-+	tsk->thread.debugreg3 = 0;
-+	tsk->thread.debugreg6 = 0;
-+	tsk->thread.debugreg7 = 0;
-+	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
-+	/*
-+	 * Forget coprocessor state..
-+	 */
-+	clear_fpu(tsk);
-+	clear_used_math();
-+}
-+
-+void release_thread(struct task_struct *dead_task)
-+{
-+	if (dead_task->mm) {
-+		if (dead_task->mm->context.size) {
-+			printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
-+					dead_task->comm,
-+					dead_task->mm->context.ldt,
-+					dead_task->mm->context.size);
-+			BUG();
-+		}
-+	}
-+}
-+
-+static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
-+{
-+	struct user_desc ud = { 
-+		.base_addr = addr,
-+		.limit = 0xfffff,
-+		.contents = (3 << 3), /* user */
-+		.seg_32bit = 1,
-+		.limit_in_pages = 1,
-+		.useable = 1,
-+	};
-+	struct n_desc_struct *desc = (void *)t->thread.tls_array;
-+	desc += tls;
-+	desc->a = LDT_entry_a(&ud); 
-+	desc->b = LDT_entry_b(&ud); 
-+}
-+
-+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
-+{
-+	struct desc_struct *desc = (void *)t->thread.tls_array;
-+	desc += tls;
-+	return desc->base0 | 
-+		(((u32)desc->base1) << 16) | 
-+		(((u32)desc->base2) << 24);
-+}
-+
-+/*
-+ * This gets called before we allocate a new thread and copy
-+ * the current task into it.
-+ */
-+void prepare_to_copy(struct task_struct *tsk)
-+{
-+	unlazy_fpu(tsk);
-+}
-+
-+int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, 
-+		unsigned long unused,
-+	struct task_struct * p, struct pt_regs * regs)
-+{
-+	int err;
-+	struct pt_regs * childregs;
-+	struct task_struct *me = current;
-+
-+	childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
-+
-+	*childregs = *regs;
-+
-+	childregs->rax = 0;
-+	childregs->rsp = rsp;
-+	if (rsp == ~0UL) {
-+		childregs->rsp = (unsigned long)childregs;
-+	}
-+
-+	p->thread.rsp = (unsigned long) childregs;
-+	p->thread.rsp0 = (unsigned long) (childregs+1);
-+	p->thread.userrsp = me->thread.userrsp; 
-+
-+	set_ti_thread_flag(p->thread_info, TIF_FORK);
-+
-+	p->thread.fs = me->thread.fs;
-+	p->thread.gs = me->thread.gs;
-+
-+	asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
-+	asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
-+	asm("mov %%es,%0" : "=m" (p->thread.es));
-+	asm("mov %%ds,%0" : "=m" (p->thread.ds));
-+
-+	if (unlikely(me->thread.io_bitmap_ptr != NULL)) { 
-+		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
-+		if (!p->thread.io_bitmap_ptr) {
-+			p->thread.io_bitmap_max = 0;
-+			return -ENOMEM;
-+		}
-+		memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
-+	} 
-+
-+	/*
-+	 * Set a new TLS for the child thread?
-+	 */
-+	if (clone_flags & CLONE_SETTLS) {
-+#ifdef CONFIG_IA32_EMULATION
-+		if (test_thread_flag(TIF_IA32))
-+			err = ia32_child_tls(p, childregs); 
-+		else 			
-+#endif	 
-+			err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 
-+		if (err) 
-+			goto out;
-+	}
-+        p->thread.io_pl = current->thread.io_pl;
-+
-+	err = 0;
-+out:
-+	if (err && p->thread.io_bitmap_ptr) {
-+		kfree(p->thread.io_bitmap_ptr);
-+		p->thread.io_bitmap_max = 0;
-+	}
-+	return err;
-+}
-+
-+/*
-+ * This special macro can be used to load a debugging register
-+ */
-+#define loaddebug(thread,register) \
-+		HYPERVISOR_set_debugreg((register),	\
-+			(thread->debugreg ## register))
-+
-+
-+static inline void __save_init_fpu( struct task_struct *tsk )
-+{
-+	asm volatile( "rex64 ; fxsave %0 ; fnclex"
-+		      : "=m" (tsk->thread.i387.fxsave));
-+	tsk->thread_info->status &= ~TS_USEDFPU;
-+}
-+
-+/*
-+ *	switch_to(x,y) should switch tasks from x to y.
-+ *
-+ * This could still be optimized: 
-+ * - fold all the options into a flag word and test it with a single test.
-+ * - could test fs/gs bitsliced
-+ */
-+struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+	struct thread_struct *prev = &prev_p->thread,
-+				 *next = &next_p->thread;
-+	int cpu = smp_processor_id();  
-+	struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+	physdev_op_t iopl_op, iobmp_op;
-+	multicall_entry_t _mcl[8], *mcl = _mcl;
-+
-+	/*
-+	 * This is basically '__unlazy_fpu', except that we queue a
-+	 * multicall to indicate FPU task switch, rather than
-+	 * synchronously trapping to Xen.
-+	 */
-+	if (prev_p->thread_info->status & TS_USEDFPU) {
-+		__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
-+		mcl->op      = __HYPERVISOR_fpu_taskswitch;
-+		mcl->args[0] = 1;
-+		mcl++;
-+	}
-+
-+	/*
-+	 * Reload esp0, LDT and the page table pointer:
-+	 */
-+	tss->rsp0 = next->rsp0;
-+	mcl->op      = __HYPERVISOR_stack_switch;
-+	mcl->args[0] = __KERNEL_DS;
-+	mcl->args[1] = tss->rsp0;
-+	mcl++;
-+
-+	/*
-+	 * Load the per-thread Thread-Local Storage descriptor.
-+	 * This is load_TLS(next, cpu) with multicalls.
-+	 */
-+#define C(i) do {							\
-+	if (unlikely(next->tls_array[i] != prev->tls_array[i])) {	\
-+		mcl->op      = __HYPERVISOR_update_descriptor;		\
-+		mcl->args[0] = virt_to_machine(				\
-+			&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
-+		mcl->args[1] = next->tls_array[i];			\
-+		mcl++;							\
-+	}								\
-+} while (0)
-+	C(0); C(1); C(2);
-+#undef C
-+
-+	if (unlikely(prev->io_pl != next->io_pl)) {
-+		iopl_op.cmd             = PHYSDEVOP_SET_IOPL;
-+		iopl_op.u.set_iopl.iopl = (next->io_pl == 0) ? 1 : next->io_pl;
-+		mcl->op      = __HYPERVISOR_physdev_op;
-+		mcl->args[0] = (unsigned long)&iopl_op;
-+		mcl++;
-+	}
-+
-+	if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
-+		iobmp_op.cmd                     =
-+			PHYSDEVOP_SET_IOBITMAP;
-+		iobmp_op.u.set_iobitmap.bitmap   =
-+			(char *)next->io_bitmap_ptr;
-+		iobmp_op.u.set_iobitmap.nr_ports =
-+			next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
-+		mcl->op      = __HYPERVISOR_physdev_op;
-+		mcl->args[0] = (unsigned long)&iobmp_op;
-+		mcl++;
-+	}
-+
-+	(void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
-+	/* 
-+	 * Switch DS and ES.
-+	 * This won't pick up thread selector changes, but I guess that is ok.
-+	 */
-+	if (unlikely(next->es))
-+		loadsegment(es, next->es); 
-+	
-+	if (unlikely(next->ds))
-+		loadsegment(ds, next->ds);
-+
-+	/* 
-+	 * Switch FS and GS.
-+	 */
-+	if (unlikely(next->fsindex))
-+		loadsegment(fs, next->fsindex);
-+
-+	if (next->fs)
-+		HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs); 
-+	
-+	if (unlikely(next->gsindex))
-+		load_gs_index(next->gsindex);
-+
-+	if (next->gs)
-+		HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs); 
-+
-+	/* 
-+	 * Switch the PDA context.
-+	 */
-+	prev->userrsp = read_pda(oldrsp); 
-+	write_pda(oldrsp, next->userrsp); 
-+	write_pda(pcurrent, next_p); 
-+	write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
-+
-+	/*
-+	 * Now maybe reload the debug registers
-+	 */
-+	if (unlikely(next->debugreg7)) {
-+		loaddebug(next, 0);
-+		loaddebug(next, 1);
-+		loaddebug(next, 2);
-+		loaddebug(next, 3);
-+		/* no 4 and 5 */
-+		loaddebug(next, 6);
-+		loaddebug(next, 7);
-+	}
-+
-+	return prev_p;
-+}
-+
-+/*
-+ * sys_execve() executes a new program.
-+ */
-+asmlinkage 
-+long sys_execve(char __user *name, char __user * __user *argv,
-+		char __user * __user *envp, struct pt_regs regs)
-+{
-+	long error;
-+	char * filename;
-+
-+	filename = getname(name);
-+	error = PTR_ERR(filename);
-+	if (IS_ERR(filename)) 
-+		return error;
-+	error = do_execve(filename, argv, envp, &regs); 
-+	if (error == 0) {
-+		task_lock(current);
-+		current->ptrace &= ~PT_DTRACE;
-+		task_unlock(current);
-+	}
-+	putname(filename);
-+	return error;
-+}
-+
-+void set_personality_64bit(void)
-+{
-+	/* inherit personality from parent */
-+
-+	/* Make sure to be in 64bit mode */
-+	clear_thread_flag(TIF_IA32); 
-+
-+	/* TBD: overwrites user setup. Should have two bits.
-+	   But 64bit processes have always behaved this way,
-+	   so it's not too bad. The main problem is just that
-+   	   32bit childs are affected again. */
-+	current->personality &= ~READ_IMPLIES_EXEC;
-+}
-+
-+asmlinkage long sys_fork(struct pt_regs *regs)
-+{
-+	return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
-+}
-+
-+asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
-+{
-+	if (!newsp)
-+		newsp = regs->rsp;
-+	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
-+}
-+
-+/*
-+ * This is trivial, and on the face of it looks like it
-+ * could equally well be done in user mode.
-+ *
-+ * Not so, for quite unobvious reasons - register pressure.
-+ * In user mode vfork() cannot have a stack frame, and if
-+ * done by calling the "clone()" system call directly, you
-+ * do not have enough call-clobbered registers to hold all
-+ * the information you need.
-+ */
-+asmlinkage long sys_vfork(struct pt_regs *regs)
-+{
-+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
-+		    NULL, NULL);
-+}
-+
-+unsigned long get_wchan(struct task_struct *p)
-+{
-+	unsigned long stack;
-+	u64 fp,rip;
-+	int count = 0;
-+
-+	if (!p || p == current || p->state==TASK_RUNNING)
-+		return 0; 
-+	stack = (unsigned long)p->thread_info; 
-+	if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
-+		return 0;
-+	fp = *(u64 *)(p->thread.rsp);
-+	do { 
-+		if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
-+			return 0; 
-+		rip = *(u64 *)(fp+8); 
-+		if (!in_sched_functions(rip))
-+			return rip; 
-+		fp = *(u64 *)fp; 
-+	} while (count++ < 16); 
-+	return 0;
-+}
-+
-+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
-+{ 
-+	int ret = 0; 
-+	int doit = task == current;
-+	int cpu;
-+
-+	switch (code) { 
-+	case ARCH_SET_GS:
-+		if (addr >= TASK_SIZE) 
-+			return -EPERM; 
-+		cpu = get_cpu();
-+		/* handle small bases via the GDT because that's faster to 
-+		   switch. */
-+		if (addr <= 0xffffffff) {  
-+			set_32bit_tls(task, GS_TLS, addr); 
-+			if (doit) { 
-+				load_TLS(&task->thread, cpu);
-+				load_gs_index(GS_TLS_SEL); 
-+			}
-+			task->thread.gsindex = GS_TLS_SEL; 
-+			task->thread.gs = 0;
-+		} else { 
-+			task->thread.gsindex = 0;
-+			task->thread.gs = addr;
-+			if (doit) {
-+		load_gs_index(0);
-+                ret = HYPERVISOR_set_segment_base(SEGBASE_GS_USER, addr);
-+			} 
-+		}
-+		put_cpu();
-+		break;
-+	case ARCH_SET_FS:
-+		/* Not strictly needed for fs, but do it for symmetry
-+		   with gs */
-+		if (addr >= TASK_SIZE)
-+			return -EPERM; 
-+		cpu = get_cpu();
-+		/* handle small bases via the GDT because that's faster to 
-+		   switch. */
-+		if (addr <= 0xffffffff) { 
-+			set_32bit_tls(task, FS_TLS, addr);
-+			if (doit) { 
-+				load_TLS(&task->thread, cpu); 
-+				asm volatile("mov %0,%%fs" :: "r" (FS_TLS_SEL));
-+			}
-+			task->thread.fsindex = FS_TLS_SEL;
-+			task->thread.fs = 0;
-+		} else { 
-+			task->thread.fsindex = 0;
-+			task->thread.fs = addr;
-+			if (doit) {
-+				/* set the selector to 0 to not confuse
-+				   __switch_to */
-+		asm volatile("mov %0,%%fs" :: "r" (0));
-+                                ret = HYPERVISOR_set_segment_base(SEGBASE_FS, addr);
-+
-+			}
-+		}
-+		put_cpu();
-+		break;
-+	case ARCH_GET_FS: { 
-+		unsigned long base; 
-+		if (task->thread.fsindex == FS_TLS_SEL)
-+			base = read_32bit_tls(task, FS_TLS);
-+		else if (doit) {
-+			rdmsrl(MSR_FS_BASE, base);
-+		} else
-+			base = task->thread.fs;
-+		ret = put_user(base, (unsigned long __user *)addr); 
-+		break; 
-+	}
-+	case ARCH_GET_GS: { 
-+		unsigned long base;
-+		if (task->thread.gsindex == GS_TLS_SEL)
-+			base = read_32bit_tls(task, GS_TLS);
-+		else if (doit) {
-+			rdmsrl(MSR_KERNEL_GS_BASE, base);
-+		} else
-+			base = task->thread.gs;
-+		ret = put_user(base, (unsigned long __user *)addr); 
-+		break;
-+	}
-+
-+	default:
-+		ret = -EINVAL;
-+		break;
-+	} 
-+
-+	return ret;	
-+} 
-+
-+long sys_arch_prctl(int code, unsigned long addr)
-+{
-+	return do_arch_prctl(current, code, addr);
-+} 
-+
-+/* 
-+ * Capture the user space registers if the task is not running (in user space)
-+ */
-+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-+{
-+	struct pt_regs *pp, ptregs;
-+
-+	pp = (struct pt_regs *)(tsk->thread.rsp0);
-+	--pp; 
-+
-+	ptregs = *pp; 
-+	ptregs.cs &= 0xffff;
-+	ptregs.ss &= 0xffff;
-+
-+	elf_core_copy_regs(regs, &ptregs);
-+ 
-+        boot_option_idle_override = 1;
-+	return 1;
-+}
-+
-+unsigned long arch_align_stack(unsigned long sp)
-+{
-+	if (randomize_va_space)
-+		sp -= get_random_int() % 8192;
-+	return sp & ~0xf;
-+}
-+
-+#ifndef CONFIG_SMP
-+void _restore_vcpu(void)
-+{
-+}
-+#endif
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/setup64.c linux-2.6.12-xen/arch/xen/x86_64/kernel/setup64.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/setup64.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/setup64.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,359 @@
-+/* 
-+ * X86-64 specific CPU setup.
-+ * Copyright (C) 1995  Linus Torvalds
-+ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
-+ * See setup.c for older changelog.
-+ * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
-+ *
-+ * Jun Nakajima <jun.nakajima at intel.com> 
-+ *   Modified for Xen
-+ *
-+ */ 
-+#include <linux/config.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/string.h>
-+#include <linux/bootmem.h>
-+#include <linux/bitops.h>
-+#include <asm/bootsetup.h>
-+#include <asm/pda.h>
-+#include <asm/pgtable.h>
-+#include <asm/processor.h>
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/mmu_context.h>
-+#include <asm/smp.h>
-+#include <asm/i387.h>
-+#include <asm/percpu.h>
-+#include <asm/mtrr.h>
-+#include <asm/proto.h>
-+#include <asm/mman.h>
-+#include <asm/numa.h>
-+#ifdef CONFIG_XEN
-+#include <asm/hypervisor.h>
-+#endif
-+
-+char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
-+
-+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-+
-+struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned; 
-+
-+extern struct task_struct init_task;
-+
-+extern unsigned char __per_cpu_start[], __per_cpu_end[]; 
-+
-+extern struct desc_ptr cpu_gdt_descr[];
-+struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table }; 
-+
-+char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
-+
-+unsigned long __supported_pte_mask = ~0UL;
-+static int do_not_nx __initdata = 0;
-+
-+/* noexec=on|off
-+Control non executable mappings for 64bit processes.
-+
-+on	Enable(default)
-+off	Disable
-+*/ 
-+int __init nonx_setup(char *str)
-+{
-+	if (!strncmp(str, "on", 2)) {
-+                __supported_pte_mask |= _PAGE_NX; 
-+ 		do_not_nx = 0; 
-+	} else if (!strncmp(str, "off", 3)) {
-+		do_not_nx = 1;
-+		__supported_pte_mask &= ~_PAGE_NX;
-+        }
-+	return 0;
-+} 
-+__setup("noexec=", nonx_setup);	/* parsed early actually */
-+
-+int force_personality32 = READ_IMPLIES_EXEC;
-+
-+/* noexec32=on|off
-+Control non executable heap for 32bit processes.
-+To control the stack too use noexec=off
-+
-+on	PROT_READ does not imply PROT_EXEC for 32bit processes
-+off	PROT_READ implies PROT_EXEC (default)
-+*/
-+static int __init nonx32_setup(char *str)
-+{
-+	if (!strcmp(str, "on"))
-+		force_personality32 &= ~READ_IMPLIES_EXEC;
-+	else if (!strcmp(str, "off"))
-+		force_personality32 |= READ_IMPLIES_EXEC;
-+	return 0;
-+}
-+__setup("noexec32=", nonx32_setup);
-+
-+/*
-+ * Great future plan:
-+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
-+ * Always point %gs to its beginning
-+ */
-+void __init setup_per_cpu_areas(void)
-+{ 
-+	int i;
-+	unsigned long size;
-+
-+	/* Copy section for each CPU (we discard the original) */
-+	size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
-+#ifdef CONFIG_MODULES
-+	if (size < PERCPU_ENOUGH_ROOM)
-+		size = PERCPU_ENOUGH_ROOM;
-+#endif
-+
-+	for (i = 0; i < NR_CPUS; i++) { 
-+		unsigned char *ptr;
-+
-+		if (!NODE_DATA(cpu_to_node(i))) {
-+			printk("cpu with no node %d, num_online_nodes %d\n",
-+			       i, num_online_nodes());
-+			ptr = alloc_bootmem(size);
-+		} else { 
-+			ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
-+		}
-+		if (!ptr)
-+			panic("Cannot allocate cpu data for CPU %d\n", i);
-+		cpu_pda[i].data_offset = ptr - __per_cpu_start;
-+		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-+	}
-+} 
-+
-+#ifdef CONFIG_XEN
-+static void switch_pt(void)
-+{
-+	xen_pt_switch(__pa(init_level4_pgt));
-+        xen_new_user_pt(__pa(init_level4_user_pgt));
-+}
-+
-+void __cpuinit cpu_gdt_init(struct desc_ptr *gdt_descr)
-+{
-+	unsigned long frames[16];
-+	unsigned long va;
-+	int f;
-+
-+	for (va = gdt_descr->address, f = 0;
-+	     va < gdt_descr->address + gdt_descr->size;
-+	     va += PAGE_SIZE, f++) {
-+		frames[f] = virt_to_mfn(va);
-+		make_page_readonly(
-+			(void *)va, XENFEAT_writable_descriptor_tables);
-+	}
-+	if (HYPERVISOR_set_gdt(frames, gdt_descr->size /
-+                               sizeof (struct desc_struct)))
-+		BUG();
-+}
-+#else
-+static void switch_pt(void)
-+{
-+	asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
-+}
-+
-+void __init cpu_gdt_init(struct desc_ptr *gdt_descr)
-+{
-+#ifdef CONFIG_SMP
-+	int cpu = stack_smp_processor_id();
-+#else
-+	int cpu = smp_processor_id();
-+#endif
-+
-+	asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
-+	asm volatile("lidt %0" :: "m" (idt_descr));
-+}
-+#endif
-+
-+void pda_init(int cpu)
-+{ 
-+	struct x8664_pda *pda = &cpu_pda[cpu];
-+
-+	/* Setup up data that may be needed in __get_free_pages early */
-+	asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 
-+#ifndef CONFIG_XEN
-+	wrmsrl(MSR_GS_BASE, cpu_pda + cpu);
-+#else
-+	HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, 
-+				    (unsigned long)(cpu_pda + cpu));
-+#endif
-+
-+	pda->me = pda;
-+	pda->cpunumber = cpu; 
-+	pda->irqcount = -1;
-+	pda->kernelstack = 
-+		(unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; 
-+	pda->active_mm = &init_mm;
-+	pda->mmu_state = 0;
-+
-+	if (cpu == 0) {
-+#ifdef CONFIG_XEN
-+		xen_init_pt();
-+#endif
-+		/* others are initialized in smpboot.c */
-+		pda->pcurrent = &init_task;
-+		pda->irqstackptr = boot_cpu_stack; 
-+	} else {
-+		pda->irqstackptr = (char *)
-+			__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
-+		if (!pda->irqstackptr)
-+			panic("cannot allocate irqstack for cpu %d", cpu); 
-+	}
-+
-+	switch_pt();
-+
-+	pda->irqstackptr += IRQSTACKSIZE-64;
-+} 
-+
-+char boot_exception_stacks[N_EXCEPTION_STACKS * EXCEPTION_STKSZ] 
-+__attribute__((section(".bss.page_aligned")));
-+
-+/* May not be marked __init: used by software suspend */
-+void syscall_init(void)
-+{
-+#ifndef CONFIG_XEN
-+	/* 
-+	 * LSTAR and STAR live in a bit strange symbiosis.
-+	 * They both write to the same internal register. STAR allows to set CS/DS
-+	 * but only a 32bit target. LSTAR sets the 64bit rip. 	 
-+	 */ 
-+	wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32); 
-+	wrmsrl(MSR_LSTAR, system_call); 
-+
-+	/* Flags to clear on syscall */
-+	wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 
-+#endif
-+#ifdef CONFIG_IA32_EMULATION   		
-+	syscall32_cpu_init ();
-+#endif
-+}
-+
-+void __cpuinit check_efer(void)
-+{
-+	unsigned long efer;
-+
-+	rdmsrl(MSR_EFER, efer); 
-+        if (!(efer & EFER_NX) || do_not_nx) { 
-+                __supported_pte_mask &= ~_PAGE_NX; 
-+        }       
-+}
-+
-+/*
-+ * cpu_init() initializes state that is per-CPU. Some data is already
-+ * initialized (naturally) in the bootstrap process, such as the GDT
-+ * and IDT. We reload them nevertheless, this function acts as a
-+ * 'CPU state barrier', nothing should get across.
-+ * A lot of state is already set up in PDA init.
-+ */
-+void __cpuinit cpu_init (void)
-+{
-+#ifdef CONFIG_SMP
-+	int cpu = stack_smp_processor_id();
-+#else
-+	int cpu = smp_processor_id();
-+#endif
-+	struct tss_struct *t = &per_cpu(init_tss, cpu);
-+	unsigned long v; 
-+	char *estacks = NULL; 
-+	struct task_struct *me;
-+	int i;
-+
-+	/* CPU 0 is initialised in head64.c */
-+	if (cpu != 0) {
-+		pda_init(cpu);
-+	} else 
-+		estacks = boot_exception_stacks; 
-+
-+	me = current;
-+
-+	if (cpu_test_and_set(cpu, cpu_initialized))
-+		panic("CPU#%d already initialized!\n", cpu);
-+
-+	printk("Initializing CPU#%d\n", cpu);
-+
-+		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-+
-+	/*
-+	 * Initialize the per-CPU GDT with the boot GDT,
-+	 * and set up the GDT descriptor:
-+	 */
-+#ifndef CONFIG_XEN 
-+	if (cpu) {
-+		memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
-+	}	
-+
-+	cpu_gdt_descr[cpu].size = GDT_SIZE;
-+	cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
-+
-+	memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
-+#else
-+	memcpy(me->thread.tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
-+	    GDT_ENTRY_TLS_ENTRIES * 8);
-+
-+	cpu_gdt_init(&cpu_gdt_descr[cpu]);
-+#endif
-+       
-+	/*
-+	 * Delete NT
-+	 */
-+
-+	asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax");
-+
-+	syscall_init();
-+
-+	wrmsrl(MSR_FS_BASE, 0);
-+	wrmsrl(MSR_KERNEL_GS_BASE, 0);
-+	barrier(); 
-+
-+	check_efer();
-+
-+	/*
-+	 * set up and load the per-CPU TSS
-+	 */
-+	for (v = 0; v < N_EXCEPTION_STACKS; v++) {
-+		if (cpu) {
-+			estacks = (char *)__get_free_pages(GFP_ATOMIC, 
-+						   EXCEPTION_STACK_ORDER);
-+			if (!estacks)
-+				panic("Cannot allocate exception stack %ld %d\n",
-+				      v, cpu); 
-+		}
-+		estacks += EXCEPTION_STKSZ;
-+		t->ist[v] = (unsigned long)estacks;
-+	}
-+
-+	t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
-+	/*
-+	 * <= is required because the CPU will access up to
-+	 * 8 bits beyond the end of the IO permission bitmap.
-+	 */
-+	for (i = 0; i <= IO_BITMAP_LONGS; i++)
-+		t->io_bitmap[i] = ~0UL;
-+
-+	atomic_inc(&init_mm.mm_count);
-+	me->active_mm = &init_mm;
-+	if (me->mm)
-+		BUG();
-+	enter_lazy_tlb(&init_mm, me);
-+
-+#ifndef CONFIG_XEN
-+	set_tss_desc(cpu, t);
-+	load_TR_desc();
-+#endif
-+	load_LDT(&init_mm.context);
-+
-+	/*
-+	 * Clear all 6 debug registers:
-+	 */
-+
-+	set_debug(0UL, 0);
-+	set_debug(0UL, 1);
-+	set_debug(0UL, 2);
-+	set_debug(0UL, 3);
-+	set_debug(0UL, 6);
-+	set_debug(0UL, 7);
-+
-+	fpu_init(); 
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/setup.c linux-2.6.12-xen/arch/xen/x86_64/kernel/setup.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/setup.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/setup.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1472 @@
-+/*
-+ *  linux/arch/x86-64/kernel/setup.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *
-+ *  Nov 2001 Dave Jones <davej at suse.de>
-+ *  Forked from i386 setup code.
-+ *
-+ *  $Id$
-+ */
-+
-+/*
-+ * This file handles the architecture-dependent parts of initialization
-+ */
-+
-+#include <linux/errno.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/stddef.h>
-+#include <linux/unistd.h>
-+#include <linux/ptrace.h>
-+#include <linux/slab.h>
-+#include <linux/user.h>
-+#include <linux/a.out.h>
-+#include <linux/tty.h>
-+#include <linux/ioport.h>
-+#include <linux/delay.h>
-+#include <linux/config.h>
-+#include <linux/init.h>
-+#include <linux/initrd.h>
-+#include <linux/highmem.h>
-+#include <linux/bootmem.h>
-+#include <linux/module.h>
-+#include <asm/processor.h>
-+#include <linux/console.h>
-+#include <linux/seq_file.h>
-+#include <linux/root_dev.h>
-+#include <linux/pci.h>
-+#include <linux/acpi.h>
-+#include <linux/kallsyms.h>
-+#include <linux/edd.h>
-+#include <asm/mtrr.h>
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <asm/io.h>
-+#include <asm/smp.h>
-+#include <asm/msr.h>
-+#include <asm/desc.h>
-+#include <video/edid.h>
-+#include <asm/e820.h>
-+#include <asm/dma.h>
-+#include <asm/mpspec.h>
-+#include <asm/mmu_context.h>
-+#include <asm/bootsetup.h>
-+#include <asm/proto.h>
-+#include <asm/setup.h>
-+#include <asm/mach_apic.h>
-+#include <asm/numa.h>
-+#ifdef CONFIG_XEN
-+#include <linux/percpu.h>
-+#include <asm-xen/xen-public/physdev.h>
-+#include "setup_arch_pre.h"
-+#include <asm/hypervisor.h>
-+#include <asm-xen/xen-public/nmi.h>
-+#include <asm-xen/features.h>
-+#define PFN_UP(x)       (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-+#define PFN_PHYS(x)     ((x) << PAGE_SHIFT)
-+#define end_pfn_map end_pfn
-+#include <asm/mach-xen/setup_arch_post.h>
-+
-+extern unsigned long start_pfn;
-+extern struct edid_info edid_info;
-+
-+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
-+EXPORT_SYMBOL(HYPERVISOR_shared_info);
-+
-+extern char hypercall_page[PAGE_SIZE];
-+EXPORT_SYMBOL(hypercall_page);
-+
-+/* Allows setting of maximum possible memory size  */
-+unsigned long xen_override_max_pfn;
-+
-+unsigned long *phys_to_machine_mapping;
-+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
-+
-+EXPORT_SYMBOL(phys_to_machine_mapping);
-+
-+DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
-+DEFINE_PER_CPU(int, nr_multicall_ents);
-+
-+/* Raw start-of-day parameters from the hypervisor. */
-+start_info_t *xen_start_info;
-+EXPORT_SYMBOL(xen_start_info);
-+#endif
-+
-+/*
-+ * Machine setup..
-+ */
-+
-+struct cpuinfo_x86 boot_cpu_data;
-+
-+unsigned long mmu_cr4_features;
-+
-+int acpi_disabled;
-+EXPORT_SYMBOL(acpi_disabled);
-+#ifdef	CONFIG_ACPI_BOOT
-+extern int __initdata acpi_ht;
-+extern acpi_interrupt_flags	acpi_sci_flags;
-+int __initdata acpi_force = 0;
-+#endif
-+
-+int acpi_numa __initdata;
-+
-+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
-+int bootloader_type;
-+
-+unsigned long saved_video_mode;
-+
-+#ifdef CONFIG_SWIOTLB
-+int swiotlb;
-+EXPORT_SYMBOL(swiotlb);
-+#endif
-+
-+/*
-+ * Setup options
-+ */
-+struct drive_info_struct { char dummy[32]; } drive_info;
-+struct screen_info screen_info;
-+struct sys_desc_table_struct {
-+	unsigned short length;
-+	unsigned char table[0];
-+};
-+
-+struct edid_info edid_info;
-+struct e820map e820;
-+
-+extern int root_mountflags;
-+extern char _text, _etext, _edata, _end;
-+
-+char command_line[COMMAND_LINE_SIZE];
-+
-+struct resource standard_io_resources[] = {
-+	{ .name = "dma1", .start = 0x00, .end = 0x1f,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "pic1", .start = 0x20, .end = 0x21,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "timer0", .start = 0x40, .end = 0x43,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "timer1", .start = 0x50, .end = 0x53,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "keyboard", .start = 0x60, .end = 0x6f,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "pic2", .start = 0xa0, .end = 0xa1,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "dma2", .start = 0xc0, .end = 0xdf,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO },
-+	{ .name = "fpu", .start = 0xf0, .end = 0xff,
-+		.flags = IORESOURCE_BUSY | IORESOURCE_IO }
-+};
-+
-+#define STANDARD_IO_RESOURCES \
-+	(sizeof standard_io_resources / sizeof standard_io_resources[0])
-+
-+#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
-+
-+struct resource data_resource = {
-+	.name = "Kernel data",
-+	.start = 0,
-+	.end = 0,
-+	.flags = IORESOURCE_RAM,
-+};
-+struct resource code_resource = {
-+	.name = "Kernel code",
-+	.start = 0,
-+	.end = 0,
-+	.flags = IORESOURCE_RAM,
-+};
-+
-+#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
-+
-+#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
-+static struct resource system_rom_resource = {
-+	.name = "System ROM",
-+	.start = 0xf0000,
-+	.end = 0xfffff,
-+	.flags = IORESOURCE_ROM,
-+};
-+
-+static struct resource extension_rom_resource = {
-+	.name = "Extension ROM",
-+	.start = 0xe0000,
-+	.end = 0xeffff,
-+	.flags = IORESOURCE_ROM,
-+};
-+
-+static struct resource adapter_rom_resources[] = {
-+	{ .name = "Adapter ROM", .start = 0xc8000, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM },
-+	{ .name = "Adapter ROM", .start = 0, .end = 0,
-+		.flags = IORESOURCE_ROM }
-+};
-+#endif
-+
-+#define ADAPTER_ROM_RESOURCES \
-+	(sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
-+
-+#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
-+static struct resource video_rom_resource = {
-+	.name = "Video ROM",
-+	.start = 0xc0000,
-+	.end = 0xc7fff,
-+	.flags = IORESOURCE_ROM,
-+};
-+#endif
-+
-+static struct resource video_ram_resource = {
-+	.name = "Video RAM area",
-+	.start = 0xa0000,
-+	.end = 0xbffff,
-+	.flags = IORESOURCE_RAM,
-+};
-+
-+#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
-+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
-+
-+static int __init romchecksum(unsigned char *rom, unsigned long length)
-+{
-+	unsigned char *p, sum = 0;
-+
-+	for (p = rom; p < rom + length; p++)
-+		sum += *p;
-+	return sum == 0;
-+}
-+
-+static void __init probe_roms(void)
-+{
-+	unsigned long start, length, upper;
-+	unsigned char *rom;
-+	int	      i;
-+
-+	/* video rom */
-+	upper = adapter_rom_resources[0].start;
-+	for (start = video_rom_resource.start; start < upper; start += 2048) {
-+		rom = isa_bus_to_virt(start);
-+		if (!romsignature(rom))
-+			continue;
-+
-+		video_rom_resource.start = start;
-+
-+		/* 0 < length <= 0x7f * 512, historically */
-+		length = rom[2] * 512;
-+
-+		/* if checksum okay, trust length byte */
-+		if (length && romchecksum(rom, length))
-+			video_rom_resource.end = start + length - 1;
-+
-+		request_resource(&iomem_resource, &video_rom_resource);
-+		break;
-+			}
-+
-+	start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-+	if (start < upper)
-+		start = upper;
-+
-+	/* system rom */
-+	request_resource(&iomem_resource, &system_rom_resource);
-+	upper = system_rom_resource.start;
-+
-+	/* check for extension rom (ignore length byte!) */
-+	rom = isa_bus_to_virt(extension_rom_resource.start);
-+	if (romsignature(rom)) {
-+		length = extension_rom_resource.end - extension_rom_resource.start + 1;
-+		if (romchecksum(rom, length)) {
-+			request_resource(&iomem_resource, &extension_rom_resource);
-+			upper = extension_rom_resource.start;
-+		}
-+	}
-+
-+	/* check for adapter roms on 2k boundaries */
-+	for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
-+		rom = isa_bus_to_virt(start);
-+		if (!romsignature(rom))
-+			continue;
-+
-+		/* 0 < length <= 0x7f * 512, historically */
-+		length = rom[2] * 512;
-+
-+		/* but accept any length that fits if checksum okay */
-+		if (!length || start + length > upper || !romchecksum(rom, length))
-+			continue;
-+
-+		adapter_rom_resources[i].start = start;
-+		adapter_rom_resources[i].end = start + length - 1;
-+		request_resource(&iomem_resource, &adapter_rom_resources[i]);
-+
-+		start = adapter_rom_resources[i++].end & ~2047UL;
-+	}
-+}
-+#endif
-+
-+static __init void parse_cmdline_early (char ** cmdline_p)
-+{
-+	char c = ' ', *to = command_line, *from = COMMAND_LINE;
-+	int len = 0;
-+
-+	/* Save unparsed command line copy for /proc/cmdline */
-+#ifdef CONFIG_XEN
-+	int max_cmdline;
-+	
-+	if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
-+		max_cmdline = COMMAND_LINE_SIZE;
-+	memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
-+	saved_command_line[max_cmdline-1] = '\0';
-+#else
-+	memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
-+	saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
-+#endif
-+
-+	for (;;) {
-+		if (c != ' ') 
-+			goto next_char; 
-+
-+#ifdef  CONFIG_SMP
-+		/*
-+		 * If the BIOS enumerates physical processors before logical,
-+		 * maxcpus=N at enumeration-time can be used to disable HT.
-+		 */
-+		else if (!memcmp(from, "maxcpus=", 8)) {
-+			extern unsigned int maxcpus;
-+
-+			maxcpus = simple_strtoul(from + 8, NULL, 0);
-+		}
-+#endif
-+#ifdef CONFIG_ACPI_BOOT
-+		/* "acpi=off" disables both ACPI table parsing and interpreter init */
-+		if (!memcmp(from, "acpi=off", 8))
-+			disable_acpi();
-+
-+		if (!memcmp(from, "acpi=force", 10)) { 
-+			/* add later when we do DMI horrors: */
-+			acpi_force = 1;
-+			acpi_disabled = 0;
-+		}
-+
-+		/* acpi=ht just means: do ACPI MADT parsing 
-+		   at bootup, but don't enable the full ACPI interpreter */
-+		if (!memcmp(from, "acpi=ht", 7)) { 
-+			if (!acpi_force)
-+				disable_acpi();
-+			acpi_ht = 1; 
-+		}
-+                else if (!memcmp(from, "pci=noacpi", 10)) 
-+			acpi_disable_pci();
-+		else if (!memcmp(from, "acpi=noirq", 10))
-+			acpi_noirq_set();
-+
-+		else if (!memcmp(from, "acpi_sci=edge", 13))
-+			acpi_sci_flags.trigger =  1;
-+		else if (!memcmp(from, "acpi_sci=level", 14))
-+			acpi_sci_flags.trigger = 3;
-+		else if (!memcmp(from, "acpi_sci=high", 13))
-+			acpi_sci_flags.polarity = 1;
-+		else if (!memcmp(from, "acpi_sci=low", 12))
-+			acpi_sci_flags.polarity = 3;
-+
-+		/* acpi=strict disables out-of-spec workarounds */
-+		else if (!memcmp(from, "acpi=strict", 11)) {
-+			acpi_strict = 1;
-+		}
-+#ifdef CONFIG_X86_IO_APIC
-+		else if (!memcmp(from, "acpi_skip_timer_override", 24))
-+			acpi_skip_timer_override = 1;
-+#endif
-+#endif
-+
-+#ifndef CONFIG_XEN
-+		if (!memcmp(from, "nolapic", 7) ||
-+		    !memcmp(from, "disableapic", 11))
-+			disable_apic = 1;
-+
-+		if (!memcmp(from, "noapic", 6)) 
-+			skip_ioapic_setup = 1;
-+
-+		if (!memcmp(from, "apic", 4)) { 
-+			skip_ioapic_setup = 0;
-+			ioapic_force = 1;
-+		}
-+#endif
-+			
-+		if (!memcmp(from, "mem=", 4))
-+			parse_memopt(from+4, &from); 
-+
-+#ifdef CONFIG_DISCONTIGMEM
-+		if (!memcmp(from, "numa=", 5))
-+			numa_setup(from+5); 
-+#endif
-+
-+#ifdef CONFIG_GART_IOMMU 
-+		if (!memcmp(from,"iommu=",6)) { 
-+			iommu_setup(from+6); 
-+		}
-+#endif
-+
-+		if (!memcmp(from,"oops=panic", 10))
-+			panic_on_oops = 1;
-+
-+		if (!memcmp(from, "noexec=", 7))
-+			nonx_setup(from + 7);
-+
-+	next_char:
-+		c = *(from++);
-+		if (!c)
-+			break;
-+		if (COMMAND_LINE_SIZE <= ++len)
-+			break;
-+		*(to++) = c;
-+	}
-+	*to = '\0';
-+	*cmdline_p = command_line;
-+}
-+
-+#ifndef CONFIG_DISCONTIGMEM
-+#ifdef CONFIG_XEN
-+static void __init contig_initmem_init(void)
-+{
-+        unsigned long bootmap_size = init_bootmem(start_pfn, end_pfn);
-+        free_bootmem(0, xen_start_info->nr_pages << PAGE_SHIFT);
-+        reserve_bootmem(HIGH_MEMORY,
-+                        (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1)
-+                        - HIGH_MEMORY);
-+}
-+#else
-+static void __init contig_initmem_init(void)
-+{
-+        unsigned long bootmap_size, bootmap; 
-+        bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
-+        bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
-+        if (bootmap == -1L) 
-+                panic("Cannot find bootmem map of size %ld\n",bootmap_size);
-+        bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
-+        e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT); 
-+        reserve_bootmem(bootmap, bootmap_size);
-+} 
-+#endif	/* !CONFIG_XEN */
-+#endif
-+
-+/* Use inline assembly to define this because the nops are defined 
-+   as inline assembly strings in the include files and we cannot 
-+   get them easily into strings. */
-+asm("\t.data\nk8nops: " 
-+    K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
-+    K8_NOP7 K8_NOP8); 
-+    
-+extern unsigned char k8nops[];
-+static unsigned char *k8_nops[ASM_NOP_MAX+1] = { 
-+     NULL,
-+     k8nops,
-+     k8nops + 1,
-+     k8nops + 1 + 2,
-+     k8nops + 1 + 2 + 3,
-+     k8nops + 1 + 2 + 3 + 4,
-+     k8nops + 1 + 2 + 3 + 4 + 5,
-+     k8nops + 1 + 2 + 3 + 4 + 5 + 6,
-+     k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
-+}; 
-+
-+/* Replace instructions with better alternatives for this CPU type.
-+
-+   This runs before SMP is initialized to avoid SMP problems with
-+   self modifying code. This implies that assymetric systems where
-+   APs have less capabilities than the boot processor are not handled. 
-+   In this case boot with "noreplacement". */ 
-+void apply_alternatives(void *start, void *end) 
-+{ 
-+	struct alt_instr *a; 
-+	int diff, i, k;
-+	for (a = start; (void *)a < end; a++) { 
-+		if (!boot_cpu_has(a->cpuid))
-+			continue;
-+
-+		BUG_ON(a->replacementlen > a->instrlen); 
-+		__inline_memcpy(a->instr, a->replacement, a->replacementlen); 
-+		diff = a->instrlen - a->replacementlen; 
-+
-+		/* Pad the rest with nops */
-+		for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
-+			k = diff;
-+			if (k > ASM_NOP_MAX)
-+				k = ASM_NOP_MAX;
-+			__inline_memcpy(a->instr + i, k8_nops[k], k); 
-+		} 
-+	}
-+} 
-+
-+static int no_replacement __initdata = 0; 
-+ 
-+void __init alternative_instructions(void)
-+{
-+	extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
-+	if (no_replacement) 
-+		return;
-+	apply_alternatives(__alt_instructions, __alt_instructions_end);
-+}
-+
-+static int __init noreplacement_setup(char *s)
-+{ 
-+     no_replacement = 1; 
-+     return 0; 
-+} 
-+
-+__setup("noreplacement", noreplacement_setup); 
-+
-+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
-+struct edd edd;
-+#ifdef CONFIG_EDD_MODULE
-+EXPORT_SYMBOL(edd);
-+#endif
-+/**
-+ * copy_edd() - Copy the BIOS EDD information
-+ *              from boot_params into a safe place.
-+ *
-+ */
-+static inline void copy_edd(void)
-+{
-+     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
-+     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
-+     edd.mbr_signature_nr = EDD_MBR_SIG_NR;
-+     edd.edd_info_nr = EDD_NR;
-+}
-+#else
-+static inline void copy_edd(void)
-+{
-+}
-+#endif
-+
-+#ifndef CONFIG_XEN
-+#define EBDA_ADDR_POINTER 0x40E
-+static void __init reserve_ebda_region(void)
-+{
-+	unsigned int addr;
-+	/** 
-+	 * there is a real-mode segmented pointer pointing to the 
-+	 * 4K EBDA area at 0x40E
-+	 */
-+	addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
-+	addr <<= 4;
-+	if (addr)
-+		reserve_bootmem_generic(addr, PAGE_SIZE);
-+}
-+#endif
-+
-+void __init setup_arch(char **cmdline_p)
-+{
-+	unsigned long kernel_end;
-+
-+#ifdef CONFIG_XEN
-+ 	ROOT_DEV = MKDEV(RAMDISK_MAJOR,0); 
-+ 	drive_info = DRIVE_INFO;
-+	kernel_end = 0;		/* dummy */
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+ 	screen_info = SCREEN_INFO;
-+
-+	/* This is drawn from a dump from vgacon:startup in standard Linux. */
-+	screen_info.orig_video_mode = 3; 
-+	screen_info.orig_video_isVGA = 1;
-+	screen_info.orig_video_lines = 25;
-+	screen_info.orig_video_cols = 80;
-+	screen_info.orig_video_ega_bx = 3;
-+	screen_info.orig_video_points = 16;
-+#endif
-+	edid_info = EDID_INFO;
-+	saved_video_mode = SAVED_VIDEO_MODE;
-+	bootloader_type = LOADER_TYPE;
-+
-+#ifdef CONFIG_BLK_DEV_RAM
-+	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+
-+
-+#endif
-+
-+	setup_xen_features();
-+
-+	HYPERVISOR_vm_assist(VMASST_CMD_enable,
-+			     VMASST_TYPE_writable_pagetables);
-+
-+	ARCH_SETUP
-+#else
-+ 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
-+ 	drive_info = DRIVE_INFO;
-+ 	screen_info = SCREEN_INFO;
-+	edid_info = EDID_INFO;
-+	saved_video_mode = SAVED_VIDEO_MODE;
-+	bootloader_type = LOADER_TYPE;
-+
-+#ifdef CONFIG_BLK_DEV_RAM
-+	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
-+	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
-+	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
-+#endif
-+	setup_memory_region();
-+	copy_edd();
-+#endif	/* !CONFIG_XEN */
-+
-+	if (!MOUNT_ROOT_RDONLY)
-+		root_mountflags &= ~MS_RDONLY;
-+	init_mm.start_code = (unsigned long) &_text;
-+	init_mm.end_code = (unsigned long) &_etext;
-+	init_mm.end_data = (unsigned long) &_edata;
-+#ifdef CONFIG_XEN
-+	init_mm.brk = start_pfn << PAGE_SHIFT;
-+#else
-+	init_mm.brk = (unsigned long) &_end;	
-+
-+	code_resource.start = virt_to_phys(&_text);
-+	code_resource.end = virt_to_phys(&_etext)-1;
-+	data_resource.start = virt_to_phys(&_etext);
-+	data_resource.end = virt_to_phys(&_edata)-1;
-+#endif
-+
-+	parse_cmdline_early(cmdline_p);
-+
-+	early_identify_cpu(&boot_cpu_data);
-+
-+	/*
-+	 * partially used pages are not usable - thus
-+	 * we are rounding upwards:
-+	 */
-+	end_pfn = e820_end_of_ram();
-+
-+	check_efer();
-+
-+	init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
-+
-+#ifdef CONFIG_ACPI_NUMA
-+	/*
-+	 * Parse SRAT to discover nodes.
-+	 */
-+	acpi_numa_init();
-+#endif
-+
-+#ifdef CONFIG_DISCONTIGMEM
-+	numa_initmem_init(0, end_pfn); 
-+#else
-+	contig_initmem_init(); 
-+#endif
-+
-+#ifndef CONFIG_XEN
-+	/* Reserve direct mapping */
-+	reserve_bootmem_generic(table_start << PAGE_SHIFT, 
-+				(table_end - table_start) << PAGE_SHIFT);
-+
-+	/* reserve kernel */
-+	kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
-+	reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
-+
-+	/*
-+	 * reserve physical page 0 - it's a special BIOS page on many boxes,
-+	 * enabling clean reboots, SMP operation, laptop functions.
-+	 */
-+	reserve_bootmem_generic(0, PAGE_SIZE);
-+
-+	/* reserve ebda region */
-+	reserve_ebda_region();
-+#endif
-+
-+#ifdef CONFIG_SMP
-+	/*
-+	 * But first pinch a few for the stack/trampoline stuff
-+	 * FIXME: Don't need the extra page at 4K, but need to fix
-+	 * trampoline before removing it. (see the GDT stuff)
-+	 */
-+	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
-+
-+	/* Reserve SMP trampoline */
-+	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
-+#endif
-+
-+#ifdef CONFIG_ACPI_SLEEP
-+       /*
-+        * Reserve low memory region for sleep support.
-+        */
-+       acpi_reserve_bootmem();
-+#endif
-+#ifdef CONFIG_XEN
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	if (xen_start_info->mod_start) {
-+		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
-+			/*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
-+			initrd_start = INITRD_START + PAGE_OFFSET;
-+			initrd_end = initrd_start+INITRD_SIZE;
-+			initrd_below_start_ok = 1;
-+		} else {
-+			printk(KERN_ERR "initrd extends beyond end of memory "
-+				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+				(unsigned long)(INITRD_START + INITRD_SIZE),
-+				(unsigned long)(end_pfn << PAGE_SHIFT));
-+			initrd_start = 0;
-+		}
-+	}
-+#endif
-+#else	/* CONFIG_XEN */
-+#ifdef CONFIG_BLK_DEV_INITRD
-+	if (LOADER_TYPE && INITRD_START) {
-+		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
-+			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
-+			initrd_start =
-+				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
-+			initrd_end = initrd_start+INITRD_SIZE;
-+		}
-+		else {
-+			printk(KERN_ERR "initrd extends beyond end of memory "
-+			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
-+			    (unsigned long)(INITRD_START + INITRD_SIZE),
-+			    (unsigned long)(end_pfn << PAGE_SHIFT));
-+			initrd_start = 0;
-+		}
-+	}
-+#endif
-+#endif	/* !CONFIG_XEN */
-+	paging_init();
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	/*
-+	 * Find and reserve possible boot-time SMP configuration:
-+	 */
-+	find_smp_config();
-+#endif
-+#ifdef CONFIG_XEN
-+	{
-+		int i, j, k, fpp;
-+		unsigned long va;
-+
-+		/* Make sure we have a large enough P->M table. */
-+		phys_to_machine_mapping = alloc_bootmem(
-+			end_pfn * sizeof(unsigned long));
-+		memset(phys_to_machine_mapping, ~0,
-+		       end_pfn * sizeof(unsigned long));
-+		memcpy(phys_to_machine_mapping,
-+		       (unsigned long *)xen_start_info->mfn_list,
-+		       xen_start_info->nr_pages * sizeof(unsigned long));
-+		free_bootmem(
-+			__pa(xen_start_info->mfn_list), 
-+			PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
-+					sizeof(unsigned long))));
-+
-+		/* 'Initial mapping' of old p2m table must be destroyed. */
-+		for (va = xen_start_info->mfn_list;
-+		     va < (xen_start_info->mfn_list +
-+			   (xen_start_info->nr_pages*sizeof(unsigned long)));
-+		     va += PAGE_SIZE) {
-+			HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+		}
-+
-+		/* 'Initial mapping' of initrd must be destroyed. */
-+		for (va = xen_start_info->mod_start;
-+		     va < (xen_start_info->mod_start+xen_start_info->mod_len);
-+		     va += PAGE_SIZE) {
-+			HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+		}
-+
-+		/* 
-+		 * Initialise the list of the frames that specify the list of 
-+		 * frames that make up the p2m table. Used by save/restore
-+		 */
-+		pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
-+		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
-+		  virt_to_mfn(pfn_to_mfn_frame_list_list);
-+
-+		fpp = PAGE_SIZE/sizeof(unsigned long);
-+		for ( i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++ )
-+		{
-+			if ( (j % fpp) == 0 )
-+			{
-+				k++;
-+				BUG_ON(k>=fpp);
-+				pfn_to_mfn_frame_list[k] = alloc_bootmem(PAGE_SIZE);
-+				pfn_to_mfn_frame_list_list[k] = 
-+					virt_to_mfn(pfn_to_mfn_frame_list[k]);
-+				j=0;
-+			}
-+			pfn_to_mfn_frame_list[k][j] = 
-+				virt_to_mfn(&phys_to_machine_mapping[i]);
-+		}
-+		HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
-+
-+	}
-+
-+	if ( ! (xen_start_info->flags & SIF_INITDOMAIN))
-+	{
-+		acpi_disabled = 1;
-+#ifdef  CONFIG_ACPI_BOOT
-+		acpi_ht = 0;
-+#endif
-+	}
-+#endif
-+
-+#ifndef CONFIG_XEN
-+	check_ioapic();
-+#endif
-+
-+#ifdef CONFIG_ACPI_BOOT
-+	/*
-+	 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
-+	 * Call this early for SRAT node setup.
-+	 */
-+	acpi_boot_table_init();
-+
-+	/*
-+	 * Read APIC and some other early information from ACPI tables.
-+	 */
-+	acpi_boot_init();
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	/*
-+	 * get boot-time SMP configuration:
-+	 */
-+	if (smp_found_config)
-+		get_smp_config();
-+#ifndef CONFIG_XEN
-+	init_apic_mappings();
-+#endif
-+#endif
-+
-+#if defined(CONFIG_XEN_PRIVILEGED_GUEST) || !defined(CONFIG_XEN)
-+	/*
-+	 * Request address space for all standard RAM and ROM resources
-+	 * and also for regions reported as reserved by the e820.
-+	 */
-+	probe_roms();
-+	e820_reserve_resources(); 
-+#endif
-+
-+	request_resource(&iomem_resource, &video_ram_resource);
-+
-+	{
-+	unsigned i;
-+	/* request I/O space for devices used on all i[345]86 PCs */
-+	for (i = 0; i < STANDARD_IO_RESOURCES; i++)
-+		request_resource(&ioport_resource, &standard_io_resources[i]);
-+	}
-+
-+	e820_setup_gap();
-+
-+#ifdef CONFIG_GART_IOMMU
-+       iommu_hole_init();
-+#endif
-+
-+#ifdef CONFIG_XEN
-+       {
-+	       physdev_op_t op;
-+
-+	       op.cmd             = PHYSDEVOP_SET_IOPL;
-+	       op.u.set_iopl.iopl = 1;
-+	       HYPERVISOR_physdev_op(&op);
-+
-+	       if (xen_start_info->flags & SIF_INITDOMAIN) {
-+		       if (!(xen_start_info->flags & SIF_PRIVILEGED))
-+			       panic("Xen granted us console access "
-+				     "but not privileged status");
-+		       
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+	       conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+	       conswitchp = &dummy_con;
-+#endif
-+#endif
-+	       } else {
-+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
-+		       extern const struct consw xennull_con;
-+		       extern int console_use_vt;
-+#if defined(CONFIG_VGA_CONSOLE)
-+		/* disable VGA driver */
-+		       ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
-+#endif
-+		       conswitchp = &xennull_con;
-+		       console_use_vt = 0;
-+#endif
-+	       }
-+       }
-+#else	/* CONFIG_XEN */
-+
-+#ifdef CONFIG_VT
-+#if defined(CONFIG_VGA_CONSOLE)
-+	conswitchp = &vga_con;
-+#elif defined(CONFIG_DUMMY_CONSOLE)
-+	conswitchp = &dummy_con;
-+#endif
-+#endif
-+
-+#endif /* !CONFIG_XEN */
-+}
-+
-+static int __init get_model_name(struct cpuinfo_x86 *c)
-+{
-+	unsigned int *v;
-+
-+	if (c->extended_cpuid_level < 0x80000004)
-+		return 0;
-+
-+	v = (unsigned int *) c->x86_model_id;
-+	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
-+	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
-+	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
-+	c->x86_model_id[48] = 0;
-+	return 1;
-+}
-+
-+
-+static void __init display_cacheinfo(struct cpuinfo_x86 *c)
-+{
-+	unsigned int n, dummy, eax, ebx, ecx, edx;
-+
-+	n = c->extended_cpuid_level;
-+
-+	if (n >= 0x80000005) {
-+		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
-+		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
-+			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
-+		c->x86_cache_size=(ecx>>24)+(edx>>24);
-+		/* On K8 L1 TLB is inclusive, so don't count it */
-+		c->x86_tlbsize = 0;
-+	}
-+
-+	if (n >= 0x80000006) {
-+		cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
-+		ecx = cpuid_ecx(0x80000006);
-+		c->x86_cache_size = ecx >> 16;
-+		c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
-+
-+		printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
-+		c->x86_cache_size, ecx & 0xFF);
-+	}
-+
-+	if (n >= 0x80000007)
-+		cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 
-+	if (n >= 0x80000008) {
-+		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 
-+		c->x86_virt_bits = (eax >> 8) & 0xff;
-+		c->x86_phys_bits = eax & 0xff;
-+	}
-+}
-+
-+/*
-+ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
-+ * Assumes number of cores is a power of two.
-+ */
-+static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
-+{
-+#ifdef CONFIG_SMP
-+	int cpu = smp_processor_id();
-+	int node = 0;
-+	unsigned bits;
-+	if (c->x86_num_cores == 1)
-+		return;
-+
-+	bits = 0;
-+	while ((1 << bits) < c->x86_num_cores)
-+		bits++;
-+
-+	/* Low order bits define the core id (index of core in socket) */
-+	cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
-+	/* Convert the APIC ID into the socket ID */
-+	phys_proc_id[cpu] >>= bits;
-+
-+#ifdef CONFIG_NUMA
-+	/* When an ACPI SRAT table is available use the mappings from SRAT
-+ 	   instead. */
-+	if (acpi_numa <= 0) {
-+		node = phys_proc_id[cpu];
-+		if (!node_online(node))
-+			node = first_node(node_online_map);
-+		cpu_to_node[cpu] = node;
-+	} else {
-+		node = cpu_to_node[cpu];
-+	}
-+#endif
-+
-+	printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
-+			cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
-+#endif
-+}
-+
-+static int __init init_amd(struct cpuinfo_x86 *c)
-+{
-+	int r;
-+	int level;
-+
-+	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-+	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-+	clear_bit(0*32+31, &c->x86_capability);
-+	
-+	/* C-stepping K8? */
-+	level = cpuid_eax(1);
-+	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
-+		set_bit(X86_FEATURE_K8_C, &c->x86_capability);
-+
-+	r = get_model_name(c);
-+	if (!r) { 
-+		switch (c->x86) { 
-+		case 15:
-+			/* Should distinguish Models here, but this is only
-+			   a fallback anyways. */
-+			strcpy(c->x86_model_id, "Hammer");
-+			break; 
-+		} 
-+	} 
-+	display_cacheinfo(c);
-+
-+	if (c->extended_cpuid_level >= 0x80000008) {
-+		c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
-+		if (c->x86_num_cores & (c->x86_num_cores - 1))
-+			c->x86_num_cores = 1;
-+
-+		amd_detect_cmp(c);
-+	}
-+
-+	return r;
-+}
-+
-+static void __init detect_ht(struct cpuinfo_x86 *c)
-+{
-+#ifdef CONFIG_SMP
-+	u32 	eax, ebx, ecx, edx;
-+	int 	index_msb, tmp;
-+	int 	cpu = smp_processor_id();
-+	
-+	if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
-+		return;
-+
-+	cpuid(1, &eax, &ebx, &ecx, &edx);
-+	smp_num_siblings = (ebx & 0xff0000) >> 16;
-+	
-+	if (smp_num_siblings == 1) {
-+		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
-+	} else if (smp_num_siblings > 1) {
-+		index_msb = 31;
-+		/*
-+		 * At this point we only support two siblings per
-+		 * processor package.
-+		 */
-+		if (smp_num_siblings > NR_CPUS) {
-+			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
-+			smp_num_siblings = 1;
-+			return;
-+		}
-+		tmp = smp_num_siblings;
-+		while ((tmp & 0x80000000 ) == 0) {
-+			tmp <<=1 ;
-+			index_msb--;
-+		}
-+		if (smp_num_siblings & (smp_num_siblings - 1))
-+			index_msb++;
-+		phys_proc_id[cpu] = phys_pkg_id(index_msb);
-+		
-+		printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
-+		       phys_proc_id[cpu]);
-+
-+		smp_num_siblings = smp_num_siblings / c->x86_num_cores;
-+
-+		tmp = smp_num_siblings;
-+		index_msb = 31;
-+		while ((tmp & 0x80000000) == 0) {
-+			tmp <<=1 ;
-+			index_msb--;
-+		}
-+		if (smp_num_siblings & (smp_num_siblings - 1))
-+			index_msb++;
-+
-+		cpu_core_id[cpu] = phys_pkg_id(index_msb);
-+
-+		if (c->x86_num_cores > 1)
-+			printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
-+			       cpu_core_id[cpu]);
-+	}
-+#endif
-+}
-+
-+/*
-+ * find out the number of processor cores on the die
-+ */
-+static int __init intel_num_cpu_cores(struct cpuinfo_x86 *c)
-+{
-+	unsigned int eax;
-+
-+	if (c->cpuid_level < 4)
-+		return 1;
-+
-+	__asm__("cpuid"
-+		: "=a" (eax)
-+		: "0" (4), "c" (0)
-+		: "bx", "dx");
-+
-+	if (eax & 0x1f)
-+		return ((eax >> 26) + 1);
-+	else
-+		return 1;
-+}
-+
-+static void __init init_intel(struct cpuinfo_x86 *c)
-+{
-+	/* Cache sizes */
-+	unsigned n;
-+
-+	init_intel_cacheinfo(c);
-+	n = c->extended_cpuid_level;
-+	if (n >= 0x80000008) {
-+		unsigned eax = cpuid_eax(0x80000008);
-+		c->x86_virt_bits = (eax >> 8) & 0xff;
-+		c->x86_phys_bits = eax & 0xff;
-+	}
-+
-+	if (c->x86 == 15)
-+		c->x86_cache_alignment = c->x86_clflush_size * 2;
-+	if (c->x86 >= 15)
-+		set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
-+ 	c->x86_num_cores = intel_num_cpu_cores(c);
-+}
-+
-+void __init get_cpu_vendor(struct cpuinfo_x86 *c)
-+{
-+	char *v = c->x86_vendor_id;
-+
-+	if (!strcmp(v, "AuthenticAMD"))
-+		c->x86_vendor = X86_VENDOR_AMD;
-+	else if (!strcmp(v, "GenuineIntel"))
-+		c->x86_vendor = X86_VENDOR_INTEL;
-+	else
-+		c->x86_vendor = X86_VENDOR_UNKNOWN;
-+}
-+
-+struct cpu_model_info {
-+	int vendor;
-+	int family;
-+	char *model_names[16];
-+};
-+
-+/* Do some early cpuid on the boot CPU to get some parameter that are
-+   needed before check_bugs. Everything advanced is in identify_cpu
-+   below. */
-+void __init early_identify_cpu(struct cpuinfo_x86 *c)
-+{
-+	u32 tfms;
-+
-+	c->loops_per_jiffy = loops_per_jiffy;
-+	c->x86_cache_size = -1;
-+	c->x86_vendor = X86_VENDOR_UNKNOWN;
-+	c->x86_model = c->x86_mask = 0;	/* So far unknown... */
-+	c->x86_vendor_id[0] = '\0'; /* Unset */
-+	c->x86_model_id[0] = '\0';  /* Unset */
-+	c->x86_clflush_size = 64;
-+	c->x86_cache_alignment = c->x86_clflush_size;
-+	c->x86_num_cores = 1;
-+	c->extended_cpuid_level = 0;
-+	memset(&c->x86_capability, 0, sizeof c->x86_capability);
-+
-+	/* Get vendor name */
-+	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
-+	      (unsigned int *)&c->x86_vendor_id[0],
-+	      (unsigned int *)&c->x86_vendor_id[8],
-+	      (unsigned int *)&c->x86_vendor_id[4]);
-+		
-+	get_cpu_vendor(c);
-+
-+	/* Initialize the standard set of capabilities */
-+	/* Note that the vendor-specific code below might override */
-+
-+	/* Intel-defined flags: level 0x00000001 */
-+	if (c->cpuid_level >= 0x00000001) {
-+		__u32 misc;
-+		cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
-+		      &c->x86_capability[0]);
-+		c->x86 = (tfms >> 8) & 0xf;
-+		c->x86_model = (tfms >> 4) & 0xf;
-+		c->x86_mask = tfms & 0xf;
-+		if (c->x86 == 0xf) {
-+			c->x86 += (tfms >> 20) & 0xff;
-+			c->x86_model += ((tfms >> 16) & 0xF) << 4;
-+		} 
-+		if (c->x86_capability[0] & (1<<19)) 
-+			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
-+	} else {
-+		/* Have CPUID level 0 only - unheard of */
-+		c->x86 = 4;
-+	}
-+
-+#ifdef CONFIG_SMP
-+	phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
-+#endif
-+}
-+
-+/*
-+ * This does the hard work of actually picking apart the CPU stuff...
-+ */
-+void __init identify_cpu(struct cpuinfo_x86 *c)
-+{
-+	int i;
-+	u32 xlvl;
-+
-+	early_identify_cpu(c);
-+
-+	/* AMD-defined flags: level 0x80000001 */
-+	xlvl = cpuid_eax(0x80000000);
-+	c->extended_cpuid_level = xlvl;
-+	if ((xlvl & 0xffff0000) == 0x80000000) {
-+		if (xlvl >= 0x80000001) {
-+			c->x86_capability[1] = cpuid_edx(0x80000001);
-+			c->x86_capability[6] = cpuid_ecx(0x80000001);
-+		}
-+		if (xlvl >= 0x80000004)
-+			get_model_name(c); /* Default name */
-+	}
-+
-+	/* Transmeta-defined flags: level 0x80860001 */
-+	xlvl = cpuid_eax(0x80860000);
-+	if ((xlvl & 0xffff0000) == 0x80860000) {
-+		/* Don't set x86_cpuid_level here for now to not confuse. */
-+		if (xlvl >= 0x80860001)
-+			c->x86_capability[2] = cpuid_edx(0x80860001);
-+	}
-+
-+	/*
-+	 * Vendor-specific initialization.  In this section we
-+	 * canonicalize the feature flags, meaning if there are
-+	 * features a certain CPU supports which CPUID doesn't
-+	 * tell us, CPUID claiming incorrect flags, or other bugs,
-+	 * we handle them here.
-+	 *
-+	 * At the end of this section, c->x86_capability better
-+	 * indicate the features this CPU genuinely supports!
-+	 */
-+	switch (c->x86_vendor) {
-+	case X86_VENDOR_AMD:
-+		init_amd(c);
-+		break;
-+
-+	case X86_VENDOR_INTEL:
-+		init_intel(c);
-+		break;
-+
-+	case X86_VENDOR_UNKNOWN:
-+	default:
-+		display_cacheinfo(c);
-+		break;
-+	}
-+
-+	select_idle_routine(c);
-+	detect_ht(c); 
-+
-+	/*
-+	 * On SMP, boot_cpu_data holds the common feature set between
-+	 * all CPUs; so make sure that we indicate which features are
-+	 * common between the CPUs.  The first time this routine gets
-+	 * executed, c == &boot_cpu_data.
-+	 */
-+	if (c != &boot_cpu_data) {
-+		/* AND the already accumulated flags with these */
-+		for (i = 0 ; i < NCAPINTS ; i++)
-+			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
-+	}
-+
-+#ifdef CONFIG_X86_MCE
-+	mcheck_init(c);
-+#endif
-+#ifdef CONFIG_NUMA
-+	if (c != &boot_cpu_data)
-+		numa_add_cpu(c - cpu_data);
-+#endif
-+}
-+ 
-+
-+void __init print_cpu_info(struct cpuinfo_x86 *c)
-+{
-+	if (c->x86_model_id[0])
-+		printk("%s", c->x86_model_id);
-+
-+	if (c->x86_mask || c->cpuid_level >= 0) 
-+		printk(" stepping %02x\n", c->x86_mask);
-+	else
-+		printk("\n");
-+}
-+
-+/*
-+ *	Get CPU information for use by the procfs.
-+ */
-+
-+static int show_cpuinfo(struct seq_file *m, void *v)
-+{
-+	struct cpuinfo_x86 *c = v;
-+
-+	/* 
-+	 * These flag bits must match the definitions in <asm/cpufeature.h>.
-+	 * NULL means this bit is undefined or reserved; either way it doesn't
-+	 * have meaning as far as Linux is concerned.  Note that it's important
-+	 * to realize there is a difference between this table and CPUID -- if
-+	 * applications want to get the raw CPUID data, they should access
-+	 * /dev/cpu/<cpu_nr>/cpuid instead.
-+	 */
-+	static char *x86_cap_flags[] = {
-+		/* Intel-defined */
-+	        "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
-+	        "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
-+	        "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
-+	        "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
-+
-+		/* AMD-defined */
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
-+		NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
-+
-+		/* Transmeta-defined */
-+		"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+		/* Other (Linux-defined) */
-+		"cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
-+		"constant_tsc", NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+		/* Intel-defined (#2) */
-+		"pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est",
-+		"tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+		/* VIA/Cyrix/Centaur-defined */
-+		NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+
-+		/* AMD-defined (#2) */
-+		"lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+		NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-+	};
-+	static char *x86_power_flags[] = { 
-+		"ts",	/* temperature sensor */
-+		"fid",  /* frequency id control */
-+		"vid",  /* voltage id control */
-+		"ttp",  /* thermal trip */
-+		"tm",
-+		"stc"
-+	};
-+
-+
-+#ifdef CONFIG_SMP
-+	if (!cpu_online(c-cpu_data))
-+		return 0;
-+#endif
-+
-+	seq_printf(m,"processor\t: %u\n"
-+		     "vendor_id\t: %s\n"
-+		     "cpu family\t: %d\n"
-+		     "model\t\t: %d\n"
-+		     "model name\t: %s\n",
-+		     (unsigned)(c-cpu_data),
-+		     c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
-+		     c->x86,
-+		     (int)c->x86_model,
-+		     c->x86_model_id[0] ? c->x86_model_id : "unknown");
-+	
-+	if (c->x86_mask || c->cpuid_level >= 0)
-+		seq_printf(m, "stepping\t: %d\n", c->x86_mask);
-+	else
-+		seq_printf(m, "stepping\t: unknown\n");
-+	
-+	if (cpu_has(c,X86_FEATURE_TSC)) {
-+		seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
-+			     cpu_khz / 1000, (cpu_khz % 1000));
-+	}
-+
-+	/* Cache size */
-+	if (c->x86_cache_size >= 0) 
-+		seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
-+	
-+#ifdef CONFIG_SMP
-+	if (smp_num_siblings * c->x86_num_cores > 1) {
-+		int cpu = c - cpu_data;
-+		seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
-+		seq_printf(m, "siblings\t: %d\n",
-+				c->x86_num_cores * smp_num_siblings);
-+		seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
-+		seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
-+	}
-+#endif	
-+
-+	seq_printf(m,
-+	        "fpu\t\t: yes\n"
-+	        "fpu_exception\t: yes\n"
-+	        "cpuid level\t: %d\n"
-+	        "wp\t\t: yes\n"
-+	        "flags\t\t:",
-+		   c->cpuid_level);
-+
-+	{ 
-+		int i; 
-+		for ( i = 0 ; i < 32*NCAPINTS ; i++ )
-+			if ( test_bit(i, &c->x86_capability) &&
-+			     x86_cap_flags[i] != NULL )
-+				seq_printf(m, " %s", x86_cap_flags[i]);
-+	}
-+		
-+	seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
-+		   c->loops_per_jiffy/(500000/HZ),
-+		   (c->loops_per_jiffy/(5000/HZ)) % 100);
-+
-+	if (c->x86_tlbsize > 0) 
-+		seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
-+	seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
-+	seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
-+
-+	seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 
-+		   c->x86_phys_bits, c->x86_virt_bits);
-+
-+	seq_printf(m, "power management:");
-+	{
-+		unsigned i;
-+		for (i = 0; i < 32; i++) 
-+			if (c->x86_power & (1 << i)) {
-+				if (i < ARRAY_SIZE(x86_power_flags))
-+					seq_printf(m, " %s", x86_power_flags[i]);
-+				else
-+					seq_printf(m, " [%d]", i);
-+			}
-+	}
-+
-+	seq_printf(m, "\n\n");
-+
-+	return 0;
-+}
-+
-+static void *c_start(struct seq_file *m, loff_t *pos)
-+{
-+	return *pos < NR_CPUS ? cpu_data + *pos : NULL;
-+}
-+
-+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-+{
-+	++*pos;
-+	return c_start(m, pos);
-+}
-+
-+static void c_stop(struct seq_file *m, void *v)
-+{
-+}
-+
-+struct seq_operations cpuinfo_op = {
-+	.start =c_start,
-+	.next =	c_next,
-+	.stop =	c_stop,
-+	.show =	show_cpuinfo,
-+};
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/signal.c linux-2.6.12-xen/arch/xen/x86_64/kernel/signal.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/signal.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/signal.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,493 @@
-+/*
-+ *  linux/arch/x86_64/kernel/signal.c
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *  Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
-+ *
-+ *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
-+ *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
-+ *  2000-2002   x86-64 support by Andi Kleen
-+ * 
-+ *  $Id: signal.c,v 1.18 2001/10/17 22:30:37 ak Exp $
-+ */
-+
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/kernel.h>
-+#include <linux/signal.h>
-+#include <linux/errno.h>
-+#include <linux/wait.h>
-+#include <linux/ptrace.h>
-+#include <linux/unistd.h>
-+#include <linux/stddef.h>
-+#include <linux/personality.h>
-+#include <linux/compiler.h>
-+#include <asm/ucontext.h>
-+#include <asm/uaccess.h>
-+#include <asm/i387.h>
-+#include <asm/proto.h>
-+#include <asm/ia32_unistd.h>
-+
-+/* #define DEBUG_SIG 1 */
-+
-+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-+
-+void ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-+               sigset_t *set, struct pt_regs * regs); 
-+void ia32_setup_frame(int sig, struct k_sigaction *ka,
-+            sigset_t *set, struct pt_regs * regs); 
-+
-+asmlinkage long
-+sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
-+{
-+	sigset_t saveset, newset;
-+
-+	/* XXX: Don't preclude handling different sized sigset_t's.  */
-+	if (sigsetsize != sizeof(sigset_t))
-+		return -EINVAL;
-+
-+	if (copy_from_user(&newset, unewset, sizeof(newset)))
-+		return -EFAULT;
-+	sigdelsetmask(&newset, ~_BLOCKABLE);
-+
-+	spin_lock_irq(&current->sighand->siglock);
-+	saveset = current->blocked;
-+	current->blocked = newset;
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
-+#ifdef DEBUG_SIG
-+	printk("rt_sigsuspend savset(%lx) newset(%lx) regs(%p) rip(%lx)\n",
-+		saveset, newset, regs, regs->rip);
-+#endif 
-+	regs->rax = -EINTR;
-+	while (1) {
-+		current->state = TASK_INTERRUPTIBLE;
-+		schedule();
-+		if (do_signal(regs, &saveset))
-+			return -EINTR;
-+	}
-+}
-+
-+asmlinkage long
-+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
-+		struct pt_regs *regs)
-+{
-+	return do_sigaltstack(uss, uoss, regs->rsp);
-+}
-+
-+
-+/*
-+ * Do a signal return; undo the signal stack.
-+ */
-+
-+struct rt_sigframe
-+{
-+	char __user *pretcode;
-+	struct ucontext uc;
-+	struct siginfo info;
-+};
-+
-+static int
-+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned long *prax)
-+{
-+	unsigned int err = 0;
-+
-+	/* Always make any pending restarted system calls return -EINTR */
-+	current_thread_info()->restart_block.fn = do_no_restart_syscall;
-+
-+#define COPY(x)		err |= __get_user(regs->x, &sc->x)
-+
-+	COPY(rdi); COPY(rsi); COPY(rbp); COPY(rsp); COPY(rbx);
-+	COPY(rdx); COPY(rcx); COPY(rip);
-+	COPY(r8);
-+	COPY(r9);
-+	COPY(r10);
-+	COPY(r11);
-+	COPY(r12);
-+	COPY(r13);
-+	COPY(r14);
-+	COPY(r15);
-+
-+	{
-+		unsigned int tmpflags;
-+		err |= __get_user(tmpflags, &sc->eflags);
-+		regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
-+		regs->orig_rax = -1;		/* disable syscall checks */
-+	}
-+
-+	{
-+		struct _fpstate __user * buf;
-+		err |= __get_user(buf, &sc->fpstate);
-+
-+		if (buf) {
-+			if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
-+				goto badframe;
-+			err |= restore_i387(buf);
-+		} else {
-+			struct task_struct *me = current;
-+			if (used_math()) {
-+				clear_fpu(me);
-+				clear_used_math();
-+			}
-+		}
-+	}
-+
-+	err |= __get_user(*prax, &sc->rax);
-+	return err;
-+
-+badframe:
-+	return 1;
-+}
-+
-+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
-+{
-+	struct rt_sigframe __user *frame;
-+	sigset_t set;
-+	unsigned long eax;
-+
-+	frame = (struct rt_sigframe __user *)(regs->rsp - 8);
-+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) {
-+		goto badframe;
-+	} 
-+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) { 
-+		goto badframe;
-+	} 
-+
-+	sigdelsetmask(&set, ~_BLOCKABLE);
-+	spin_lock_irq(&current->sighand->siglock);
-+	current->blocked = set;
-+	recalc_sigpending();
-+	spin_unlock_irq(&current->sighand->siglock);
-+	
-+	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &eax))
-+		goto badframe;
-+
-+#ifdef DEBUG_SIG
-+	printk("%d sigreturn rip:%lx rsp:%lx frame:%p rax:%lx\n",current->pid,regs.rip,regs.rsp,frame,eax);
-+#endif
-+
-+	if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->rsp) == -EFAULT)
-+		goto badframe;
-+
-+	return eax;
-+
-+badframe:
-+	signal_fault(regs,frame,"sigreturn");
-+	return 0;
-+}	
-+
-+/*
-+ * Set up a signal frame.
-+ */
-+
-+static inline int
-+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me)
-+{
-+	int err = 0;
-+
-+	err |= __put_user(0, &sc->gs);
-+	err |= __put_user(0, &sc->fs);
-+
-+	err |= __put_user(regs->rdi, &sc->rdi);
-+	err |= __put_user(regs->rsi, &sc->rsi);
-+	err |= __put_user(regs->rbp, &sc->rbp);
-+	err |= __put_user(regs->rsp, &sc->rsp);
-+	err |= __put_user(regs->rbx, &sc->rbx);
-+	err |= __put_user(regs->rdx, &sc->rdx);
-+	err |= __put_user(regs->rcx, &sc->rcx);
-+	err |= __put_user(regs->rax, &sc->rax);
-+	err |= __put_user(regs->r8, &sc->r8);
-+	err |= __put_user(regs->r9, &sc->r9);
-+	err |= __put_user(regs->r10, &sc->r10);
-+	err |= __put_user(regs->r11, &sc->r11);
-+	err |= __put_user(regs->r12, &sc->r12);
-+	err |= __put_user(regs->r13, &sc->r13);
-+	err |= __put_user(regs->r14, &sc->r14);
-+	err |= __put_user(regs->r15, &sc->r15);
-+	err |= __put_user(me->thread.trap_no, &sc->trapno);
-+	err |= __put_user(me->thread.error_code, &sc->err);
-+	err |= __put_user(regs->rip, &sc->rip);
-+	err |= __put_user(regs->eflags, &sc->eflags);
-+	err |= __put_user(mask, &sc->oldmask);
-+	err |= __put_user(me->thread.cr2, &sc->cr2);
-+
-+	return err;
-+}
-+
-+/*
-+ * Determine which stack to use..
-+ */
-+
-+static void __user *
-+get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
-+{
-+	unsigned long rsp;
-+
-+	/* Default to using normal stack - redzone*/
-+	rsp = regs->rsp - 128;
-+
-+	/* This is the X/Open sanctioned signal stack switching.  */
-+	/* RED-PEN: redzone on that stack? */
-+	if (ka->sa.sa_flags & SA_ONSTACK) {
-+		if (sas_ss_flags(rsp) == 0)
-+			rsp = current->sas_ss_sp + current->sas_ss_size;
-+	}
-+
-+	return (void __user *)round_down(rsp - size, 16); 
-+}
-+
-+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
-+			   sigset_t *set, struct pt_regs * regs)
-+{
-+	struct rt_sigframe __user *frame;
-+	struct _fpstate __user *fp = NULL; 
-+	int err = 0;
-+	struct task_struct *me = current;
-+
-+	if (used_math()) {
-+		fp = get_stack(ka, regs, sizeof(struct _fpstate)); 
-+		frame = (void __user *)round_down(
-+			(unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
-+
-+		if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
-+			goto give_sigsegv;
-+
-+		if (save_i387(fp) < 0) 
-+			err |= -1; 
-+	} else
-+		frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
-+
-+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-+		goto give_sigsegv;
-+
-+	if (ka->sa.sa_flags & SA_SIGINFO) { 
-+		err |= copy_siginfo_to_user(&frame->info, info);
-+		if (err)
-+			goto give_sigsegv;
-+	}
-+		
-+	/* Create the ucontext.  */
-+	err |= __put_user(0, &frame->uc.uc_flags);
-+	err |= __put_user(0, &frame->uc.uc_link);
-+	err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-+	err |= __put_user(sas_ss_flags(regs->rsp),
-+			  &frame->uc.uc_stack.ss_flags);
-+	err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
-+	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
-+	err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
-+	if (sizeof(*set) == 16) { 
-+		__put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
-+		__put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 
-+	} else
-+		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-+
-+	/* Set up to return from userspace.  If provided, use a stub
-+	   already in userspace.  */
-+	/* x86-64 should always use SA_RESTORER. */
-+	if (ka->sa.sa_flags & SA_RESTORER) {
-+		err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
-+	} else {
-+		/* could use a vstub here */
-+		goto give_sigsegv; 
-+	}
-+
-+	if (err)
-+		goto give_sigsegv;
-+
-+#ifdef DEBUG_SIG
-+	printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax);
-+#endif
-+
-+	/* Set up registers for signal handler */
-+	{ 
-+		struct exec_domain *ed = current_thread_info()->exec_domain;
-+		if (unlikely(ed && ed->signal_invmap && sig < 32))
-+			sig = ed->signal_invmap[sig];
-+	} 
-+	regs->rdi = sig;
-+	/* In case the signal handler was declared without prototypes */ 
-+	regs->rax = 0;	
-+
-+	/* This also works for non SA_SIGINFO handlers because they expect the
-+	   next argument after the signal number on the stack. */
-+	regs->rsi = (unsigned long)&frame->info; 
-+	regs->rdx = (unsigned long)&frame->uc; 
-+	regs->rip = (unsigned long) ka->sa.sa_handler;
-+
-+	regs->rsp = (unsigned long)frame;
-+
-+	set_fs(USER_DS);
-+	regs->eflags &= ~TF_MASK;
-+	if (test_thread_flag(TIF_SINGLESTEP))
-+		ptrace_notify(SIGTRAP);
-+#ifdef DEBUG_SIG
-+	printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
-+		current->comm, current->pid, frame, regs->rip, frame->pretcode);
-+#endif
-+
-+	return;
-+
-+give_sigsegv:
-+	force_sigsegv(sig, current);
-+}
-+
-+/*
-+ * OK, we're invoking a handler
-+ */	
-+
-+static void
-+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-+		sigset_t *oldset, struct pt_regs *regs)
-+{
-+#ifdef DEBUG_SIG
-+	printk("handle_signal pid:%d sig:%lu rip:%lx rsp:%lx regs=%p\n",
-+		current->pid, sig,
-+		regs->rip, regs->rsp, regs);
-+#endif
-+
-+	/* Are we from a system call? */
-+	if ((long)regs->orig_rax >= 0) {
-+		/* If so, check system call restarting.. */
-+		switch (regs->rax) {
-+		        case -ERESTART_RESTARTBLOCK:
-+			case -ERESTARTNOHAND:
-+				regs->rax = -EINTR;
-+				break;
-+
-+			case -ERESTARTSYS:
-+				if (!(ka->sa.sa_flags & SA_RESTART)) {
-+					regs->rax = -EINTR;
-+					break;
-+				}
-+				/* fallthrough */
-+			case -ERESTARTNOINTR:
-+				regs->rax = regs->orig_rax;
-+				regs->rip -= 2;
-+				break;
-+		}
-+	}
-+
-+	/*
-+	 * If TF is set due to a debugger (PT_DTRACE), clear the TF
-+	 * flag so that register information in the sigcontext is
-+	 * correct.
-+	 */
-+	if (unlikely(regs->eflags & TF_MASK)) {
-+		if (likely(current->ptrace & PT_DTRACE)) {
-+			current->ptrace &= ~PT_DTRACE;
-+			regs->eflags &= ~TF_MASK;
-+		}
-+	}
-+
-+#ifdef CONFIG_IA32_EMULATION
-+	if (test_thread_flag(TIF_IA32)) {
-+		if (ka->sa.sa_flags & SA_SIGINFO)
-+			ia32_setup_rt_frame(sig, ka, info, oldset, regs);
-+		else
-+			ia32_setup_frame(sig, ka, oldset, regs);
-+	} else 
-+#endif
-+	setup_rt_frame(sig, ka, info, oldset, regs);
-+
-+	if (!(ka->sa.sa_flags & SA_NODEFER)) {
-+		spin_lock_irq(&current->sighand->siglock);
-+		sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
-+		sigaddset(&current->blocked,sig);
-+		recalc_sigpending();
-+		spin_unlock_irq(&current->sighand->siglock);
-+	}
-+}
-+
-+/*
-+ * Note that 'init' is a special process: it doesn't get signals it doesn't
-+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
-+ * mistake.
-+ */
-+int do_signal(struct pt_regs *regs, sigset_t *oldset)
-+{
-+	struct k_sigaction ka;
-+	siginfo_t info;
-+	int signr;
-+
-+	/*
-+	 * We want the common case to go fast, which
-+	 * is why we may in certain cases get here from
-+	 * kernel mode. Just return without doing anything
-+	 * if so.
-+	 */
-+	if ((regs->cs & 2) != 2)
-+		return 1;
-+
-+	if (try_to_freeze(0))
-+		goto no_signal;
-+
-+	if (!oldset)
-+		oldset = &current->blocked;
-+
-+	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-+	if (signr > 0) {
-+		/* Reenable any watchpoints before delivering the
-+		 * signal to user space. The processor register will
-+		 * have been cleared if the watchpoint triggered
-+		 * inside the kernel.
-+		 */
-+                if (current->thread.debugreg7) {
-+                        HYPERVISOR_set_debugreg(7,
-+						current->thread.debugreg7);
-+		}
-+
-+		/* Whee!  Actually deliver the signal.  */
-+		handle_signal(signr, &info, &ka, oldset, regs);
-+		return 1;
-+	}
-+
-+ no_signal:
-+	/* Did we come from a system call? */
-+	if ((long)regs->orig_rax >= 0) {
-+		/* Restart the system call - no handlers present */
-+		long res = regs->rax;
-+		if (res == -ERESTARTNOHAND ||
-+		    res == -ERESTARTSYS ||
-+		    res == -ERESTARTNOINTR) {
-+			regs->rax = regs->orig_rax;
-+			regs->rip -= 2;
-+		}
-+		if (regs->rax == (unsigned long)-ERESTART_RESTARTBLOCK) {
-+			regs->rax = test_thread_flag(TIF_IA32) ?
-+					__NR_ia32_restart_syscall :
-+					__NR_restart_syscall;
-+			regs->rip -= 2;
-+		}
-+	}
-+	return 0;
-+}
-+
-+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, __u32 thread_info_flags)
-+{
-+#ifdef DEBUG_SIG
-+	printk("do_notify_resume flags:%x rip:%lx rsp:%lx caller:%lx pending:%lx\n",
-+	       thread_info_flags, regs->rip, regs->rsp, __builtin_return_address(0),signal_pending(current)); 
-+#endif
-+	       
-+	/* Pending single-step? */
-+	if (thread_info_flags & _TIF_SINGLESTEP) {
-+		regs->eflags |= TF_MASK;
-+		clear_thread_flag(TIF_SINGLESTEP);
-+	}
-+
-+	/* deal with pending signal delivery */
-+	if (thread_info_flags & _TIF_SIGPENDING)
-+		do_signal(regs,oldset);
-+}
-+
-+void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
-+{ 
-+	struct task_struct *me = current; 
-+	if (exception_trace)
-+		printk("%s[%d] bad frame in %s frame:%p rip:%lx rsp:%lx orax:%lx\n",
-+	       me->comm,me->pid,where,frame,regs->rip,regs->rsp,regs->orig_rax); 
-+
-+	force_sig(SIGSEGV, me); 
-+} 
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/smp.c linux-2.6.12-xen/arch/xen/x86_64/kernel/smp.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/smp.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/smp.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,493 @@
-+/*
-+ *	Intel SMP support routines.
-+ *
-+ *	(c) 1995 Alan Cox, Building #3 <alan at redhat.com>
-+ *	(c) 1998-99, 2000 Ingo Molnar <mingo at redhat.com>
-+ *      (c) 2002,2003 Andi Kleen, SuSE Labs.
-+ *
-+ *	This code is released under the GNU General Public License version 2 or
-+ *	later.
-+ */
-+
-+#include <linux/init.h>
-+
-+#include <linux/mm.h>
-+#include <linux/irq.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/smp_lock.h>
-+#include <linux/smp.h>
-+#include <linux/kernel_stat.h>
-+#include <linux/mc146818rtc.h>
-+#include <linux/interrupt.h>
-+
-+#include <asm/mtrr.h>
-+#include <asm/pgalloc.h>
-+#include <asm/tlbflush.h>
-+#include <asm/mach_apic.h>
-+#include <asm/mmu_context.h>
-+#include <asm/proto.h>
-+#include <asm/apicdef.h>
-+#ifdef CONFIG_XEN
-+#include <asm-xen/evtchn.h>
-+#endif
-+
-+#ifndef CONFIG_XEN
-+/*
-+ *	Smarter SMP flushing macros. 
-+ *		c/o Linus Torvalds.
-+ *
-+ *	These mean you can really definitely utterly forget about
-+ *	writing to user space from interrupts. (Its not allowed anyway).
-+ *
-+ *	Optimizations Manfred Spraul <manfred at colorfullife.com>
-+ */
-+
-+static cpumask_t flush_cpumask;
-+static struct mm_struct * flush_mm;
-+static unsigned long flush_va;
-+static DEFINE_SPINLOCK(tlbstate_lock);
-+#define FLUSH_ALL	-1ULL
-+#endif
-+
-+/*
-+ * We cannot call mmdrop() because we are in interrupt context, 
-+ * instead update mm->cpu_vm_mask.
-+ */
-+static inline void leave_mm (unsigned long cpu)
-+{
-+	if (read_pda(mmu_state) == TLBSTATE_OK)
-+		BUG();
-+	clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
-+	load_cr3(swapper_pg_dir);
-+}
-+
-+#ifndef CONFIG_XEN
-+/*
-+ *
-+ * The flush IPI assumes that a thread switch happens in this order:
-+ * [cpu0: the cpu that switches]
-+ * 1) switch_mm() either 1a) or 1b)
-+ * 1a) thread switch to a different mm
-+ * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
-+ * 	Stop ipi delivery for the old mm. This is not synchronized with
-+ * 	the other cpus, but smp_invalidate_interrupt ignore flush ipis
-+ * 	for the wrong mm, and in the worst case we perform a superfluous
-+ * 	tlb flush.
-+ * 1a2) set cpu mmu_state to TLBSTATE_OK
-+ * 	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
-+ *	was in lazy tlb mode.
-+ * 1a3) update cpu active_mm
-+ * 	Now cpu0 accepts tlb flushes for the new mm.
-+ * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
-+ * 	Now the other cpus will send tlb flush ipis.
-+ * 1a4) change cr3.
-+ * 1b) thread switch without mm change
-+ *	cpu active_mm is correct, cpu0 already handles
-+ *	flush ipis.
-+ * 1b1) set cpu mmu_state to TLBSTATE_OK
-+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
-+ * 	Atomically set the bit [other cpus will start sending flush ipis],
-+ * 	and test the bit.
-+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
-+ * 2) switch %%esp, ie current
-+ *
-+ * The interrupt must handle 2 special cases:
-+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
-+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
-+ *   runs in kernel space, the cpu could load tlb entries for user space
-+ *   pages.
-+ *
-+ * The good news is that cpu mmu_state is local to each cpu, no
-+ * write/read ordering problems.
-+ */
-+
-+/*
-+ * TLB flush IPI:
-+ *
-+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
-+ * 2) Leave the mm if we are in the lazy tlb mode.
-+ */
-+
-+asmlinkage void smp_invalidate_interrupt (void)
-+{
-+	unsigned long cpu;
-+
-+	cpu = get_cpu();
-+
-+	if (!cpu_isset(cpu, flush_cpumask))
-+		goto out;
-+		/* 
-+		 * This was a BUG() but until someone can quote me the
-+		 * line from the intel manual that guarantees an IPI to
-+		 * multiple CPUs is retried _only_ on the erroring CPUs
-+		 * its staying as a return
-+		 *
-+		 * BUG();
-+		 */
-+		 
-+	if (flush_mm == read_pda(active_mm)) {
-+		if (read_pda(mmu_state) == TLBSTATE_OK) {
-+			if (flush_va == FLUSH_ALL)
-+				local_flush_tlb();
-+			else
-+				__flush_tlb_one(flush_va);
-+		} else
-+			leave_mm(cpu);
-+	}
-+	ack_APIC_irq();
-+	cpu_clear(cpu, flush_cpumask);
-+
-+out:
-+	put_cpu_no_resched();
-+}
-+
-+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
-+						unsigned long va)
-+{
-+	cpumask_t tmp;
-+	/*
-+	 * A couple of (to be removed) sanity checks:
-+	 *
-+	 * - we do not send IPIs to not-yet booted CPUs.
-+	 * - current CPU must not be in mask
-+	 * - mask must exist :)
-+	 */
-+	BUG_ON(cpus_empty(cpumask));
-+	cpus_and(tmp, cpumask, cpu_online_map);
-+	BUG_ON(!cpus_equal(tmp, cpumask));
-+	BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-+	if (!mm)
-+		BUG();
-+
-+	/*
-+	 * I'm not happy about this global shared spinlock in the
-+	 * MM hot path, but we'll see how contended it is.
-+	 * Temporarily this turns IRQs off, so that lockups are
-+	 * detected by the NMI watchdog.
-+	 */
-+	spin_lock(&tlbstate_lock);
-+	
-+	flush_mm = mm;
-+	flush_va = va;
-+	cpus_or(flush_cpumask, cpumask, flush_cpumask);
-+
-+	/*
-+	 * We have to send the IPI only to
-+	 * CPUs affected.
-+	 */
-+	send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
-+
-+	while (!cpus_empty(flush_cpumask))
-+		mb();	/* nothing. lockup detection does not belong here */;
-+
-+	flush_mm = NULL;
-+	flush_va = 0;
-+	spin_unlock(&tlbstate_lock);
-+}
-+	
-+void flush_tlb_current_task(void)
-+{
-+	struct mm_struct *mm = current->mm;
-+	cpumask_t cpu_mask;
-+
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
-+
-+	local_flush_tlb();
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+	preempt_enable();
-+}
-+
-+void flush_tlb_mm (struct mm_struct * mm)
-+{
-+	cpumask_t cpu_mask;
-+
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
-+
-+	if (current->active_mm == mm) {
-+		if (current->mm)
-+			local_flush_tlb();
-+		else
-+			leave_mm(smp_processor_id());
-+	}
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-+
-+	preempt_enable();
-+}
-+
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{
-+	struct mm_struct *mm = vma->vm_mm;
-+	cpumask_t cpu_mask;
-+
-+	preempt_disable();
-+	cpu_mask = mm->cpu_vm_mask;
-+	cpu_clear(smp_processor_id(), cpu_mask);
-+
-+	if (current->active_mm == mm) {
-+		if(current->mm)
-+			__flush_tlb_one(va);
-+		 else
-+		 	leave_mm(smp_processor_id());
-+	}
-+
-+	if (!cpus_empty(cpu_mask))
-+		flush_tlb_others(cpu_mask, mm, va);
-+
-+	preempt_enable();
-+}
-+
-+static void do_flush_tlb_all(void* info)
-+{
-+	unsigned long cpu = smp_processor_id();
-+
-+	__flush_tlb_all();
-+	if (read_pda(mmu_state) == TLBSTATE_LAZY)
-+		leave_mm(cpu);
-+}
-+
-+void flush_tlb_all(void)
-+{
-+	on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
-+}
-+#else
-+asmlinkage void smp_invalidate_interrupt (void)
-+{ return; }
-+void flush_tlb_current_task(void)
-+{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
-+void flush_tlb_mm (struct mm_struct * mm)
-+{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
-+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
-+{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
-+void flush_tlb_all(void)
-+{ xen_tlb_flush_all(); }
-+#endif /* Xen */
-+
-+void smp_kdb_stop(void)
-+{
-+	send_IPI_allbutself(KDB_VECTOR);
-+}
-+
-+/*
-+ * this function sends a 'reschedule' IPI to another CPU.
-+ * it goes straight through and wastes no time serializing
-+ * anything. Worst case is that we lose a reschedule ...
-+ */
-+
-+void smp_send_reschedule(int cpu)
-+{
-+	send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
-+}
-+
-+/*
-+ * Structure and data for smp_call_function(). This is designed to minimise
-+ * static memory requirements. It also looks cleaner.
-+ */
-+static DEFINE_SPINLOCK(call_lock);
-+
-+struct call_data_struct {
-+	void (*func) (void *info);
-+	void *info;
-+	atomic_t started;
-+	atomic_t finished;
-+	int wait;
-+};
-+
-+static struct call_data_struct * call_data;
-+
-+/*
-+ * this function sends a 'generic call function' IPI to all other CPUs
-+ * in the system.
-+ */
-+static void __smp_call_function (void (*func) (void *info), void *info,
-+				int nonatomic, int wait)
-+{
-+	struct call_data_struct data;
-+	int cpus = num_online_cpus()-1;
-+
-+	if (!cpus)
-+		return;
-+
-+	data.func = func;
-+	data.info = info;
-+	atomic_set(&data.started, 0);
-+	data.wait = wait;
-+	if (wait)
-+		atomic_set(&data.finished, 0);
-+
-+	call_data = &data;
-+	wmb();
-+	/* Send a message to all other CPUs and wait for them to respond */
-+	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-+
-+	/* Wait for response */
-+	while (atomic_read(&data.started) != cpus)
-+#ifndef CONFIG_XEN
-+		cpu_relax();
-+#else
-+		barrier();
-+#endif
-+
-+	if (!wait)
-+		return;
-+
-+	while (atomic_read(&data.finished) != cpus)
-+#ifndef CONFIG_XEN
-+		cpu_relax();
-+#else
-+		barrier();
-+#endif
-+}
-+
-+/*
-+ * smp_call_function - run a function on all other CPUs.
-+ * @func: The function to run. This must be fast and non-blocking.
-+ * @info: An arbitrary pointer to pass to the function.
-+ * @nonatomic: currently unused.
-+ * @wait: If true, wait (atomically) until function has completed on other
-+ *        CPUs.
-+ *
-+ * Returns 0 on success, else a negative status code. Does not return until
-+ * remote CPUs are nearly ready to execute func or are or have executed.
-+ *
-+ * You must not call this function with disabled interrupts or from a
-+ * hardware interrupt handler or from a bottom half handler.
-+ * Actually there are a few legal cases, like panic.
-+ */
-+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-+			int wait)
-+{
-+	spin_lock(&call_lock);
-+	__smp_call_function(func,info,nonatomic,wait);
-+	spin_unlock(&call_lock);
-+	return 0;
-+}
-+
-+void smp_stop_cpu(void)
-+{
-+	/*
-+	 * Remove this CPU:
-+	 */
-+	cpu_clear(smp_processor_id(), cpu_online_map);
-+	local_irq_disable();
-+#ifndef CONFIG_XEN
-+	disable_local_APIC();
-+#endif
-+	local_irq_enable(); 
-+}
-+
-+static void smp_really_stop_cpu(void *dummy)
-+{
-+	smp_stop_cpu(); 
-+	for (;;) 
-+		asm("hlt"); 
-+} 
-+
-+void smp_send_stop(void)
-+{
-+	int nolock = 0;
-+#ifndef CONFIG_XEN
-+	if (reboot_force)
-+		return;
-+#endif
-+	/* Don't deadlock on the call lock in panic */
-+	if (!spin_trylock(&call_lock)) {
-+		/* ignore locking because we have paniced anyways */
-+		nolock = 1;
-+	}
-+	__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
-+	if (!nolock)
-+		spin_unlock(&call_lock);
-+
-+	local_irq_disable();
-+#ifndef CONFIG_XEN
-+	disable_local_APIC();
-+#endif
-+	local_irq_enable();
-+}
-+
-+/*
-+ * Reschedule call back. Nothing to do,
-+ * all the work is done automatically when
-+ * we return from the interrupt.
-+ */
-+#ifndef CONFIG_XEN
-+asmlinkage void smp_reschedule_interrupt(void)
-+#else
-+asmlinkage irqreturn_t smp_reschedule_interrupt(void)
-+#endif
-+{
-+#ifndef CONFIG_XEN
-+	ack_APIC_irq();
-+#else
-+	return IRQ_HANDLED;
-+#endif
-+}
-+
-+#ifndef CONFIG_XEN
-+asmlinkage void smp_call_function_interrupt(void)
-+#else
-+asmlinkage irqreturn_t smp_call_function_interrupt(void)
-+#endif
-+{
-+	void (*func) (void *info) = call_data->func;
-+	void *info = call_data->info;
-+	int wait = call_data->wait;
-+
-+#ifndef CONFIG_XEN
-+	ack_APIC_irq();
-+#endif
-+	/*
-+	 * Notify initiating CPU that I've grabbed the data and am
-+	 * about to execute the function
-+	 */
-+	mb();
-+	atomic_inc(&call_data->started);
-+	/*
-+	 * At this point the info structure may be out of scope unless wait==1
-+	 */
-+	irq_enter();
-+	(*func)(info);
-+	irq_exit();
-+	if (wait) {
-+		mb();
-+		atomic_inc(&call_data->finished);
-+	}
-+#ifdef CONFIG_XEN
-+	return IRQ_HANDLED;
-+#endif
-+}
-+
-+int safe_smp_processor_id(void)
-+{
-+#ifdef CONFIG_XEN
-+	return smp_processor_id();
-+#else
-+	int apicid, i;
-+
-+	if (disable_apic)
-+		return 0;
-+
-+	apicid = hard_smp_processor_id();
-+	if (x86_cpu_to_apicid[apicid] == apicid)
-+		return apicid;
-+
-+	for (i = 0; i < NR_CPUS; ++i) {
-+		if (x86_cpu_to_apicid[i] == apicid)
-+			return i;
-+	}
-+
-+	/* No entries in x86_cpu_to_apicid?  Either no MPS|ACPI,
-+	 * or called too early.  Either way, we must be CPU 0. */
-+      	if (x86_cpu_to_apicid[0] == BAD_APICID)
-+		return 0;
-+
-+	return 0; /* Should not happen */
-+#endif
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/traps.c linux-2.6.12-xen/arch/xen/x86_64/kernel/traps.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/traps.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/traps.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,994 @@
-+/*
-+ *  linux/arch/x86-64/traps.c
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
-+ *
-+ *  Pentium III FXSR, SSE support
-+ *	Gareth Hughes <gareth at valinux.com>, May 2000
-+ *
-+ *  $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
-+ */
-+
-+/*
-+ * 'Traps.c' handles hardware traps and faults after we have saved some
-+ * state in 'entry.S'.
-+ */
-+#include <linux/config.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/ptrace.h>
-+#include <linux/timer.h>
-+#include <linux/mm.h>
-+#include <linux/init.h>
-+#include <linux/delay.h>
-+#include <linux/spinlock.h>
-+#include <linux/interrupt.h>
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/nmi.h>
-+
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/debugreg.h>
-+#include <asm/desc.h>
-+#include <asm/i387.h>
-+#include <asm/kdebug.h>
-+#include <asm/processor.h>
-+
-+#include <asm/smp.h>
-+#include <asm/pgalloc.h>
-+#include <asm/pda.h>
-+#include <asm/proto.h>
-+#include <asm/nmi.h>
-+
-+#include <linux/irq.h>
-+
-+
-+extern struct gate_struct idt_table[256]; 
-+
-+asmlinkage void divide_error(void);
-+asmlinkage void debug(void);
-+asmlinkage void nmi(void);
-+asmlinkage void int3(void);
-+asmlinkage void overflow(void);
-+asmlinkage void bounds(void);
-+asmlinkage void invalid_op(void);
-+asmlinkage void device_not_available(void);
-+asmlinkage void double_fault(void);
-+asmlinkage void coprocessor_segment_overrun(void);
-+asmlinkage void invalid_TSS(void);
-+asmlinkage void segment_not_present(void);
-+asmlinkage void stack_segment(void);
-+asmlinkage void general_protection(void);
-+asmlinkage void page_fault(void);
-+asmlinkage void coprocessor_error(void);
-+asmlinkage void simd_coprocessor_error(void);
-+asmlinkage void reserved(void);
-+asmlinkage void alignment_check(void);
-+asmlinkage void machine_check(void);
-+asmlinkage void spurious_interrupt_bug(void);
-+asmlinkage void call_debug(void);
-+
-+struct notifier_block *die_chain;
-+static DEFINE_SPINLOCK(die_notifier_lock);
-+
-+int register_die_notifier(struct notifier_block *nb)
-+{
-+	int err = 0;
-+	unsigned long flags;
-+	spin_lock_irqsave(&die_notifier_lock, flags);
-+	err = notifier_chain_register(&die_chain, nb);
-+	spin_unlock_irqrestore(&die_notifier_lock, flags);
-+	return err;
-+}
-+
-+static inline void conditional_sti(struct pt_regs *regs)
-+{
-+	if (regs->eflags & X86_EFLAGS_IF)
-+		local_irq_enable();
-+}
-+
-+static int kstack_depth_to_print = 10;
-+
-+#ifdef CONFIG_KALLSYMS
-+#include <linux/kallsyms.h> 
-+int printk_address(unsigned long address)
-+{ 
-+	unsigned long offset = 0, symsize;
-+	const char *symname;
-+	char *modname;
-+	char *delim = ":"; 
-+	char namebuf[128];
-+
-+	symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); 
-+	if (!symname) 
-+		return printk("[<%016lx>]", address);
-+	if (!modname) 
-+		modname = delim = ""; 		
-+        return printk("<%016lx>{%s%s%s%s%+ld}",
-+		      address,delim,modname,delim,symname,offset); 
-+} 
-+#else
-+int printk_address(unsigned long address)
-+{ 
-+	return printk("[<%016lx>]", address);
-+} 
-+#endif
-+
-+static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
-+					unsigned *usedp, const char **idp)
-+{
-+	static const char ids[N_EXCEPTION_STACKS][8] = {
-+		[DEBUG_STACK - 1] = "#DB",
-+		[NMI_STACK - 1] = "NMI",
-+		[DOUBLEFAULT_STACK - 1] = "#DF",
-+		[STACKFAULT_STACK - 1] = "#SS",
-+		[MCE_STACK - 1] = "#MC",
-+	};
-+	unsigned k;
-+
-+	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
-+		unsigned long end;
-+
-+		end = per_cpu(init_tss, cpu).ist[k];
-+		if (stack >= end)
-+			continue;
-+		if (stack >= end - EXCEPTION_STKSZ) {
-+			if (*usedp & (1U << k))
-+				break;
-+			*usedp |= 1U << k;
-+			*idp = ids[k];
-+			return (unsigned long *)end;
-+		}
-+	}
-+	return NULL;
-+}
-+
-+/*
-+ * x86-64 can have upto three kernel stacks: 
-+ * process stack
-+ * interrupt stack
-+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
-+ */
-+
-+void show_trace(unsigned long *stack)
-+{
-+	unsigned long addr;
-+	const unsigned cpu = safe_smp_processor_id();
-+	unsigned long *irqstack_end = (unsigned long *)cpu_pda[cpu].irqstackptr;
-+	int i;
-+	unsigned used = 0;
-+
-+	printk("\nCall Trace:");
-+
-+#define HANDLE_STACK(cond) \
-+	do while (cond) { \
-+		addr = *stack++; \
-+		if (kernel_text_address(addr)) { \
-+			/* \
-+			 * If the address is either in the text segment of the \
-+			 * kernel, or in the region which contains vmalloc'ed \
-+			 * memory, it *may* be the address of a calling \
-+			 * routine; if so, print it so that someone tracing \
-+			 * down the cause of the crash will be able to figure \
-+			 * out the call path that was taken. \
-+			 */ \
-+			i += printk_address(addr); \
-+			if (i > 50) { \
-+				printk("\n       "); \
-+				i = 0; \
-+			} \
-+			else \
-+				i += printk(" "); \
-+		} \
-+	} while (0)
-+
-+	for(i = 0; ; ) {
-+		const char *id;
-+		unsigned long *estack_end;
-+		estack_end = in_exception_stack(cpu, (unsigned long)stack,
-+						&used, &id);
-+
-+		if (estack_end) {
-+			i += printk(" <%s> ", id);
-+			HANDLE_STACK (stack < estack_end);
-+			i += printk(" <EOE> ");
-+			stack = (unsigned long *) estack_end[-2];
-+			continue;
-+		}
-+		if (irqstack_end) {
-+			unsigned long *irqstack;
-+			irqstack = irqstack_end -
-+				(IRQSTACKSIZE - 64) / sizeof(*irqstack);
-+
-+			if (stack >= irqstack && stack < irqstack_end) {
-+				i += printk(" <IRQ> ");
-+				HANDLE_STACK (stack < irqstack_end);
-+				stack = (unsigned long *) (irqstack_end[-1]);
-+				irqstack_end = NULL;
-+				i += printk(" <EOI> ");
-+				continue;
-+			}
-+		}
-+		break;
-+	}
-+
-+	HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
-+#undef HANDLE_STACK
-+	printk("\n");
-+}
-+
-+void show_stack(struct task_struct *tsk, unsigned long * rsp)
-+{
-+	unsigned long *stack;
-+	int i;
-+	const int cpu = safe_smp_processor_id();
-+	unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
-+	unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE);    
-+
-+	// debugging aid: "show_stack(NULL, NULL);" prints the
-+	// back trace for this cpu.
-+
-+	if (rsp == NULL) {
-+		if (tsk)
-+			rsp = (unsigned long *)tsk->thread.rsp;
-+		else
-+			rsp = (unsigned long *)&rsp;
-+	}
-+
-+	stack = rsp;
-+	for(i=0; i < kstack_depth_to_print; i++) {
-+		if (stack >= irqstack && stack <= irqstack_end) {
-+			if (stack == irqstack_end) {
-+				stack = (unsigned long *) (irqstack_end[-1]);
-+				printk(" <EOI> ");
-+			}
-+		} else {
-+		if (((long) stack & (THREAD_SIZE-1)) == 0)
-+			break;
-+		}
-+		if (i && ((i % 4) == 0))
-+			printk("\n       ");
-+		printk("%016lx ", *stack++);
-+		touch_nmi_watchdog();
-+	}
-+	show_trace((unsigned long *)rsp);
-+}
-+
-+/*
-+ * The architecture-independent dump_stack generator
-+ */
-+void dump_stack(void)
-+{
-+	unsigned long dummy;
-+	show_trace(&dummy);
-+}
-+
-+EXPORT_SYMBOL(dump_stack);
-+
-+void show_registers(struct pt_regs *regs)
-+{
-+	int i;
-+	int in_kernel = (regs->cs & 3) == 0;
-+	unsigned long rsp;
-+	const int cpu = safe_smp_processor_id(); 
-+	struct task_struct *cur = cpu_pda[cpu].pcurrent; 
-+
-+		rsp = regs->rsp;
-+
-+	printk("CPU %d ", cpu);
-+	__show_regs(regs);
-+	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
-+		cur->comm, cur->pid, cur->thread_info, cur);
-+
-+	/*
-+	 * When in-kernel, we also print out the stack and code at the
-+	 * time of the fault..
-+	 */
-+	if (in_kernel) {
-+
-+		printk("Stack: ");
-+		show_stack(NULL, (unsigned long*)rsp);
-+
-+		printk("\nCode: ");
-+		if(regs->rip < PAGE_OFFSET)
-+			goto bad;
-+
-+		for(i=0;i<20;i++)
-+		{
-+			unsigned char c;
-+			if(__get_user(c, &((unsigned char*)regs->rip)[i])) {
-+bad:
-+				printk(" Bad RIP value.");
-+				break;
-+			}
-+			printk("%02x ", c);
-+		}
-+	}
-+	printk("\n");
-+}	
-+
-+void handle_BUG(struct pt_regs *regs)
-+{ 
-+	struct bug_frame f;
-+	char tmp;
-+
-+	if (regs->cs & 3)
-+		return; 
-+	if (__copy_from_user(&f, (struct bug_frame *) regs->rip, 
-+			     sizeof(struct bug_frame)))
-+		return; 
-+	if ((unsigned long)f.filename < __PAGE_OFFSET || 
-+	    f.ud2[0] != 0x0f || f.ud2[1] != 0x0b) 
-+		return;
-+	if (__get_user(tmp, f.filename))
-+		f.filename = "unmapped filename"; 
-+	printk("----------- [cut here ] --------- [please bite here ] ---------\n");
-+	printk(KERN_ALERT "Kernel BUG at %.50s:%d\n", f.filename, f.line);
-+} 
-+
-+#ifdef CONFIG_BUG
-+void out_of_line_bug(void)
-+{ 
-+	BUG(); 
-+} 
-+#endif
-+
-+static DEFINE_SPINLOCK(die_lock);
-+static int die_owner = -1;
-+
-+void oops_begin(void)
-+{
-+	int cpu = safe_smp_processor_id(); 
-+	/* racy, but better than risking deadlock. */ 
-+	local_irq_disable();
-+	if (!spin_trylock(&die_lock)) { 
-+		if (cpu == die_owner) 
-+			/* nested oops. should stop eventually */;
-+		else
-+			spin_lock(&die_lock); 
-+	}
-+	die_owner = cpu; 
-+	console_verbose();
-+	bust_spinlocks(1); 
-+}
-+
-+void oops_end(void)
-+{ 
-+	die_owner = -1;
-+	bust_spinlocks(0); 
-+	spin_unlock(&die_lock); 
-+	if (panic_on_oops)
-+		panic("Oops"); 
-+} 
-+
-+void __die(const char * str, struct pt_regs * regs, long err)
-+{
-+	static int die_counter;
-+	printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
-+#ifdef CONFIG_PREEMPT
-+	printk("PREEMPT ");
-+#endif
-+#ifdef CONFIG_SMP
-+	printk("SMP ");
-+#endif
-+#ifdef CONFIG_DEBUG_PAGEALLOC
-+	printk("DEBUG_PAGEALLOC");
-+#endif
-+	printk("\n");
-+	notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
-+	show_registers(regs);
-+	/* Executive summary in case the oops scrolled away */
-+	printk(KERN_ALERT "RIP ");
-+	printk_address(regs->rip); 
-+	printk(" RSP <%016lx>\n", regs->rsp); 
-+}
-+
-+void die(const char * str, struct pt_regs * regs, long err)
-+{
-+	oops_begin();
-+	handle_BUG(regs);
-+	__die(str, regs, err);
-+	oops_end();
-+	do_exit(SIGSEGV); 
-+}
-+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-+{
-+	if (!(regs->eflags & VM_MASK) && (regs->cs == __KERNEL_CS))
-+		die(str, regs, err);
-+}
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+void die_nmi(char *str, struct pt_regs *regs)
-+{
-+	oops_begin();
-+	/*
-+	 * We are in trouble anyway, lets at least try
-+	 * to get a message out.
-+	 */
-+	printk(str, safe_smp_processor_id());
-+	show_registers(regs);
-+	if (panic_on_timeout || panic_on_oops)
-+		panic("nmi watchdog");
-+	printk("console shuts up ...\n");
-+	oops_end();
-+	do_exit(SIGSEGV);
-+}
-+#endif
-+
-+static void do_trap(int trapnr, int signr, char *str, 
-+			   struct pt_regs * regs, long error_code, siginfo_t *info)
-+{
-+	conditional_sti(regs);
-+
-+#ifdef CONFIG_CHECKING
-+       { 
-+               unsigned long gs; 
-+               struct x8664_pda *pda = cpu_pda + safe_smp_processor_id(); 
-+               rdmsrl(MSR_GS_BASE, gs); 
-+               if (gs != (unsigned long)pda) { 
-+                       wrmsrl(MSR_GS_BASE, pda); 
-+                       printk("%s: wrong gs %lx expected %p rip %lx\n", str, gs, pda,
-+			      regs->rip);
-+               }
-+       }
-+#endif
-+
-+	if ((regs->cs & 3)  != 0) { 
-+		struct task_struct *tsk = current;
-+
-+		if (exception_trace && unhandled_signal(tsk, signr))
-+			printk(KERN_INFO
-+			       "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
-+			       tsk->comm, tsk->pid, str,
-+			       regs->rip,regs->rsp,error_code); 
-+
-+		tsk->thread.error_code = error_code;
-+		tsk->thread.trap_no = trapnr;
-+		if (info)
-+			force_sig_info(signr, info, tsk);
-+		else
-+			force_sig(signr, tsk);
-+		return;
-+	}
-+
-+
-+	/* kernel trap */ 
-+	{	     
-+		const struct exception_table_entry *fixup;
-+		fixup = search_exception_tables(regs->rip);
-+		if (fixup) {
-+			regs->rip = fixup->fixup;
-+		} else	
-+			die(str, regs, error_code);
-+		return;
-+	}
-+}
-+
-+#define DO_ERROR(trapnr, signr, str, name) \
-+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+							== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, regs, error_code, NULL); \
-+}
-+
-+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
-+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
-+{ \
-+	siginfo_t info; \
-+	info.si_signo = signr; \
-+	info.si_errno = 0; \
-+	info.si_code = sicode; \
-+	info.si_addr = (void __user *)siaddr; \
-+	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
-+							== NOTIFY_STOP) \
-+		return; \
-+	do_trap(trapnr, signr, str, regs, error_code, &info); \
-+}
-+
-+DO_ERROR_INFO( 0, SIGFPE,  "divide error", divide_error, FPE_INTDIV, regs->rip)
-+DO_ERROR( 4, SIGSEGV, "overflow", overflow)
-+DO_ERROR( 5, SIGSEGV, "bounds", bounds)
-+DO_ERROR_INFO( 6, SIGILL,  "invalid operand", invalid_op, ILL_ILLOPN, regs->rip)
-+DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
-+DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun)
-+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
-+DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
-+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-+DO_ERROR(18, SIGSEGV, "reserved", reserved)
-+DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
-+DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
-+
-+asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
-+{
-+	conditional_sti(regs);
-+
-+#ifdef CONFIG_CHECKING
-+       { 
-+               unsigned long gs; 
-+               struct x8664_pda *pda = cpu_pda + safe_smp_processor_id(); 
-+               rdmsrl(MSR_GS_BASE, gs); 
-+               if (gs != (unsigned long)pda) { 
-+                       wrmsrl(MSR_GS_BASE, pda); 
-+		       oops_in_progress++;
-+                       printk("general protection handler: wrong gs %lx expected %p\n", gs, pda);
-+		       oops_in_progress--;
-+               }
-+       }
-+#endif
-+
-+	if ((regs->cs & 3)!=0) { 
-+		struct task_struct *tsk = current;
-+
-+		if (exception_trace && unhandled_signal(tsk, SIGSEGV))
-+			printk(KERN_INFO
-+		       "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
-+			       tsk->comm, tsk->pid,
-+			       regs->rip,regs->rsp,error_code); 
-+
-+		tsk->thread.error_code = error_code;
-+		tsk->thread.trap_no = 13;
-+		force_sig(SIGSEGV, tsk);
-+		return;
-+	} 
-+
-+	/* kernel gp */
-+	{
-+		const struct exception_table_entry *fixup;
-+		fixup = search_exception_tables(regs->rip);
-+		if (fixup) {
-+			regs->rip = fixup->fixup;
-+			return;
-+		}
-+		if (notify_die(DIE_GPF, "general protection fault", regs,
-+					error_code, 13, SIGSEGV) == NOTIFY_STOP)
-+			return;
-+		die("general protection fault", regs, error_code);
-+	}
-+}
-+
-+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
-+{
-+	printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
-+	printk("You probably have a hardware problem with your RAM chips\n");
-+
-+#if 0 /* XEN */
-+	/* Clear and disable the memory parity error line. */
-+	reason = (reason & 0xf) | 4;
-+	outb(reason, 0x61);
-+#endif /* XEN */
-+}
-+
-+static void io_check_error(unsigned char reason, struct pt_regs * regs)
-+{
-+	printk("NMI: IOCK error (debug interrupt?)\n");
-+	show_registers(regs);
-+
-+#if 0 /* XEN */
-+	/* Re-enable the IOCK line, wait for a few seconds */
-+	reason = (reason & 0xf) | 8;
-+	outb(reason, 0x61);
-+	mdelay(2000);
-+	reason &= ~8;
-+	outb(reason, 0x61);
-+#endif /* XEN */
-+}
-+
-+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
-+{	printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
-+	printk("Dazed and confused, but trying to continue\n");
-+	printk("Do you have a strange power saving mode enabled?\n");
-+}
-+
-+/* Runs on IST stack. This code must keep interrupts off all the time.
-+   Nested NMIs are prevented by the CPU. */
-+asmlinkage void default_do_nmi(struct pt_regs *regs)
-+{
-+	unsigned char reason = 0;
-+
-+	/* Only the BSP gets external NMIs from the system.  */
-+	if (!smp_processor_id())
-+		reason = get_nmi_reason();
-+
-+	if (!(reason & 0xc0)) {
-+		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
-+								== NOTIFY_STOP)
-+			return;
-+#ifdef CONFIG_X86_LOCAL_APIC
-+		/*
-+		 * Ok, so this is none of the documented NMI sources,
-+		 * so it must be the NMI watchdog.
-+		 */
-+		if (nmi_watchdog > 0) {
-+			nmi_watchdog_tick(regs,reason);
-+			return;
-+		}
-+#endif
-+		unknown_nmi_error(reason, regs);
-+		return;
-+	}
-+	if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
-+		return; 
-+
-+	/* AK: following checks seem to be broken on modern chipsets. FIXME */
-+
-+	if (reason & 0x80)
-+		mem_parity_error(reason, regs);
-+	if (reason & 0x40)
-+		io_check_error(reason, regs);
-+}
-+
-+asmlinkage void do_int3(struct pt_regs * regs, long error_code)
-+{
-+	if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
-+		return;
-+	}
-+	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
-+	return;
-+}
-+
-+/* Help handler running on IST stack to switch back to user stack
-+   for scheduling or signal handling. The actual stack switch is done in
-+   entry.S */
-+asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
-+{
-+	struct pt_regs *regs = eregs;
-+	/* Did already sync */
-+	if (eregs == (struct pt_regs *)eregs->rsp)
-+		;
-+	/* Exception from user space */
-+	else if (eregs->cs & 3)
-+		regs = ((struct pt_regs *)current->thread.rsp0) - 1;
-+	/* Exception from kernel and interrupts are enabled. Move to
-+ 	   kernel process stack. */
-+	else if (eregs->eflags & X86_EFLAGS_IF)
-+		regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
-+	if (eregs != regs)
-+		*regs = *eregs;
-+	return regs;
-+}
-+
-+/* runs on IST stack. */
-+asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code)
-+{
-+	unsigned long condition;
-+	struct task_struct *tsk = current;
-+	siginfo_t info;
-+
-+#ifdef CONFIG_CHECKING
-+       { 
-+	       /* RED-PEN interaction with debugger - could destroy gs */
-+               unsigned long gs; 
-+               struct x8664_pda *pda = cpu_pda + safe_smp_processor_id(); 
-+               rdmsrl(MSR_GS_BASE, gs); 
-+               if (gs != (unsigned long)pda) { 
-+                       wrmsrl(MSR_GS_BASE, pda); 
-+                       printk("debug handler: wrong gs %lx expected %p\n", gs, pda);
-+               }
-+       }
-+#endif
-+
-+	asm("movq %%db6,%0" : "=r" (condition));
-+
-+	if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
-+						SIGTRAP) == NOTIFY_STOP)
-+		return;
-+
-+	conditional_sti(regs);
-+
-+	/* Mask out spurious debug traps due to lazy DR7 setting */
-+	if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
-+		if (!tsk->thread.debugreg7) { 
-+			goto clear_dr7;
-+		}
-+	}
-+
-+	tsk->thread.debugreg6 = condition;
-+
-+	/* Mask out spurious TF errors due to lazy TF clearing */
-+	if (condition & DR_STEP) {
-+		/*
-+		 * The TF error should be masked out only if the current
-+		 * process is not traced and if the TRAP flag has been set
-+		 * previously by a tracing process (condition detected by
-+		 * the PT_DTRACE flag); remember that the i386 TRAP flag
-+		 * can be modified by the process itself in user mode,
-+		 * allowing programs to debug themselves without the ptrace()
-+		 * interface.
-+		 */
-+                if ((regs->cs & 3) == 0)
-+                       goto clear_TF_reenable;
-+		/*
-+		 * Was the TF flag set by a debugger? If so, clear it now,
-+		 * so that register information is correct.
-+		 */
-+		if (tsk->ptrace & PT_DTRACE) {
-+			regs->eflags &= ~TF_MASK;
-+			tsk->ptrace &= ~PT_DTRACE;
-+		}
-+	}
-+
-+	/* Ok, finally something we can handle */
-+	tsk->thread.trap_no = 1;
-+	tsk->thread.error_code = error_code;
-+	info.si_signo = SIGTRAP;
-+	info.si_errno = 0;
-+	info.si_code = TRAP_BRKPT;
-+	if ((regs->cs & 3) == 0) 
-+		goto clear_dr7; 
-+
-+	info.si_addr = (void __user *)regs->rip;
-+	force_sig_info(SIGTRAP, &info, tsk);	
-+clear_dr7:
-+	asm volatile("movq %0,%%db7"::"r"(0UL));
-+	return;
-+
-+clear_TF_reenable:
-+	set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
-+	regs->eflags &= ~TF_MASK;
-+}
-+
-+static int kernel_math_error(struct pt_regs *regs, char *str)
-+{
-+	const struct exception_table_entry *fixup;
-+	fixup = search_exception_tables(regs->rip);
-+	if (fixup) {
-+		regs->rip = fixup->fixup;
-+		return 1;
-+	}
-+	notify_die(DIE_GPF, str, regs, 0, 16, SIGFPE);
-+	/* Illegal floating point operation in the kernel */
-+	die(str, regs, 0);
-+	return 0;
-+}
-+
-+/*
-+ * Note that we play around with the 'TS' bit in an attempt to get
-+ * the correct behaviour even in the presence of the asynchronous
-+ * IRQ13 behaviour
-+ */
-+asmlinkage void do_coprocessor_error(struct pt_regs *regs)
-+{
-+	void __user *rip = (void __user *)(regs->rip);
-+	struct task_struct * task;
-+	siginfo_t info;
-+	unsigned short cwd, swd;
-+
-+	conditional_sti(regs);
-+	if ((regs->cs & 3) == 0 &&
-+	    kernel_math_error(regs, "kernel x87 math error"))
-+		return;
-+
-+	/*
-+	 * Save the info for the exception handler and clear the error.
-+	 */
-+	task = current;
-+	save_init_fpu(task);
-+	task->thread.trap_no = 16;
-+	task->thread.error_code = 0;
-+	info.si_signo = SIGFPE;
-+	info.si_errno = 0;
-+	info.si_code = __SI_FAULT;
-+	info.si_addr = rip;
-+	/*
-+	 * (~cwd & swd) will mask out exceptions that are not set to unmasked
-+	 * status.  0x3f is the exception bits in these regs, 0x200 is the
-+	 * C1 reg you need in case of a stack fault, 0x040 is the stack
-+	 * fault bit.  We should only be taking one exception at a time,
-+	 * so if this combination doesn't produce any single exception,
-+	 * then we have a bad program that isn't synchronizing its FPU usage
-+	 * and it will suffer the consequences since we won't be able to
-+	 * fully reproduce the context of the exception
-+	 */
-+	cwd = get_fpu_cwd(task);
-+	swd = get_fpu_swd(task);
-+	switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
-+		case 0x000:
-+		default:
-+			break;
-+		case 0x001: /* Invalid Op */
-+		case 0x041: /* Stack Fault */
-+		case 0x241: /* Stack Fault | Direction */
-+			info.si_code = FPE_FLTINV;
-+			break;
-+		case 0x002: /* Denormalize */
-+		case 0x010: /* Underflow */
-+			info.si_code = FPE_FLTUND;
-+			break;
-+		case 0x004: /* Zero Divide */
-+			info.si_code = FPE_FLTDIV;
-+			break;
-+		case 0x008: /* Overflow */
-+			info.si_code = FPE_FLTOVF;
-+			break;
-+		case 0x020: /* Precision */
-+			info.si_code = FPE_FLTRES;
-+			break;
-+	}
-+	force_sig_info(SIGFPE, &info, task);
-+}
-+
-+asmlinkage void bad_intr(void)
-+{
-+	printk("bad interrupt"); 
-+}
-+
-+asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
-+{
-+	void __user *rip = (void __user *)(regs->rip);
-+	struct task_struct * task;
-+	siginfo_t info;
-+	unsigned short mxcsr;
-+
-+	conditional_sti(regs);
-+	if ((regs->cs & 3) == 0 &&
-+        	kernel_math_error(regs, "kernel simd math error"))
-+		return;
-+
-+	/*
-+	 * Save the info for the exception handler and clear the error.
-+	 */
-+	task = current;
-+	save_init_fpu(task);
-+	task->thread.trap_no = 19;
-+	task->thread.error_code = 0;
-+	info.si_signo = SIGFPE;
-+	info.si_errno = 0;
-+	info.si_code = __SI_FAULT;
-+	info.si_addr = rip;
-+	/*
-+	 * The SIMD FPU exceptions are handled a little differently, as there
-+	 * is only a single status/control register.  Thus, to determine which
-+	 * unmasked exception was caught we must mask the exception mask bits
-+	 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-+	 */
-+	mxcsr = get_fpu_mxcsr(task);
-+	switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
-+		case 0x000:
-+		default:
-+			break;
-+		case 0x001: /* Invalid Op */
-+			info.si_code = FPE_FLTINV;
-+			break;
-+		case 0x002: /* Denormalize */
-+		case 0x010: /* Underflow */
-+			info.si_code = FPE_FLTUND;
-+			break;
-+		case 0x004: /* Zero Divide */
-+			info.si_code = FPE_FLTDIV;
-+			break;
-+		case 0x008: /* Overflow */
-+			info.si_code = FPE_FLTOVF;
-+			break;
-+		case 0x020: /* Precision */
-+			info.si_code = FPE_FLTRES;
-+			break;
-+	}
-+	force_sig_info(SIGFPE, &info, task);
-+}
-+
-+asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
-+{
-+}
-+
-+#if 0
-+asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
-+{
-+}
-+#endif
-+
-+/*
-+ *  'math_state_restore()' saves the current math information in the
-+ * old math state array, and gets the new ones from the current task
-+ *
-+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
-+ * Don't touch unless you *really* know how it works.
-+ */
-+asmlinkage void math_state_restore(void)
-+{
-+	struct task_struct *me = current;
-+        /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
-+
-+	if (!used_math())
-+		init_fpu(me);
-+	restore_fpu_checking(&me->thread.i387.fxsave);
-+	me->thread_info->status |= TS_USEDFPU;
-+}
-+
-+void do_call_debug(struct pt_regs *regs) 
-+{ 
-+	notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT); 
-+}
-+
-+
-+/*
-+ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
-+ * specify <dpl>|4 in the second field.
-+ */
-+static trap_info_t trap_table[] = {
-+        {  0, 0|4, (__KERNEL_CS|0x3), (unsigned long)divide_error               },
-+        {  1, 0|4, (__KERNEL_CS|0x3), (unsigned long)debug                      },
-+        {  3, 3|4, (__KERNEL_CS|0x3), (unsigned long)int3                       },
-+        {  4, 3|4, (__KERNEL_CS|0x3), (unsigned long)overflow                   },
-+        {  5, 3|4, (__KERNEL_CS|0x3), (unsigned long)bounds                     },
-+        {  6, 0|4, (__KERNEL_CS|0x3), (unsigned long)invalid_op                 },
-+        {  7, 0|4, (__KERNEL_CS|0x3), (unsigned long)device_not_available       },
-+        {  9, 0|4, (__KERNEL_CS|0x3), (unsigned long)coprocessor_segment_overrun},
-+        { 10, 0|4, (__KERNEL_CS|0x3), (unsigned long)invalid_TSS                },
-+        { 11, 0|4, (__KERNEL_CS|0x3), (unsigned long)segment_not_present        },
-+        { 12, 0|4, (__KERNEL_CS|0x3), (unsigned long)stack_segment              },
-+        { 13, 0|4, (__KERNEL_CS|0x3), (unsigned long)general_protection         },
-+        { 14, 0|4, (__KERNEL_CS|0x3), (unsigned long)page_fault                 },
-+        { 15, 0|4, (__KERNEL_CS|0x3), (unsigned long)spurious_interrupt_bug     },
-+        { 16, 0|4, (__KERNEL_CS|0x3), (unsigned long)coprocessor_error          },
-+        { 17, 0|4, (__KERNEL_CS|0x3), (unsigned long)alignment_check            },
-+#ifdef CONFIG_X86_MCE
-+        { 18, 0|4, (__KERNEL_CS|0x3), (unsigned long)machine_check              },
-+#endif
-+        { 19, 0|4, (__KERNEL_CS|0x3), (unsigned long)simd_coprocessor_error     },
-+#ifdef CONFIG_IA32_EMULATION
-+	{ IA32_SYSCALL_VECTOR, 3|4, (__KERNEL_CS|0x3), (unsigned long)ia32_syscall},
-+#endif
-+        {  0, 0,           0, 0                                              }
-+};
-+
-+void __init trap_init(void)
-+{
-+        int ret;
-+
-+        ret = HYPERVISOR_set_trap_table(trap_table);
-+        
-+        if (ret) 
-+                printk("HYPERVISOR_set_trap_table faild: error %d\n",
-+                       ret);
-+
-+#ifdef CONFIG_IA32_EMULATION
-+	set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
-+#endif
-+       
-+	/*
-+	 * Should be a barrier for any external CPU state.
-+	 */
-+	cpu_init();
-+}
-+
-+void smp_trap_init(trap_info_t *trap_ctxt)
-+{
-+	trap_info_t *t = trap_table;
-+	int i;
-+
-+	for (i = 0; i < 256; i++) {
-+		trap_ctxt[i].vector = i;
-+		trap_ctxt[i].cs     = FLAT_KERNEL_CS;
-+	}
-+
-+	for (t = trap_table; t->address; t++) {
-+		trap_ctxt[t->vector].flags = t->flags;
-+		trap_ctxt[t->vector].cs = t->cs;
-+		trap_ctxt[t->vector].address = t->address;
-+	}
-+}
-+
-+
-+/* Actual parsing is done early in setup.c. */
-+static int __init oops_dummy(char *s)
-+{ 
-+	panic_on_oops = 1;
-+	return -1; 
-+} 
-+__setup("oops=", oops_dummy); 
-+
-+static int __init kstack_setup(char *s)
-+{
-+	kstack_depth_to_print = simple_strtoul(s,NULL,0);
-+	return 0;
-+}
-+__setup("kstack=", kstack_setup);
-+
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/vsyscall.c linux-2.6.12-xen/arch/xen/x86_64/kernel/vsyscall.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/vsyscall.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/vsyscall.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,241 @@
-+/*
-+ *  linux/arch/x86_64/kernel/vsyscall.c
-+ *
-+ *  Copyright (C) 2001 Andrea Arcangeli <andrea at suse.de> SuSE
-+ *  Copyright 2003 Andi Kleen, SuSE Labs.
-+ *
-+ *  Thanks to hpa at transmeta.com for some useful hint.
-+ *  Special thanks to Ingo Molnar for his early experience with
-+ *  a different vsyscall implementation for Linux/IA32 and for the name.
-+ *
-+ *  vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
-+ *  at virtual address -10Mbyte+1024bytes etc... There are at max 4
-+ *  vsyscalls. One vsyscall can reserve more than 1 slot to avoid
-+ *  jumping out of line if necessary. We cannot add more with this
-+ *  mechanism because older kernels won't return -ENOSYS.
-+ *  If we want more than four we need a vDSO.
-+ *
-+ *  Note: the concept clashes with user mode linux. If you use UML and
-+ *  want per guest time just set the kernel.vsyscall64 sysctl to 0.
-+ */
-+
-+#include <linux/time.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/timer.h>
-+#include <linux/seqlock.h>
-+#include <linux/jiffies.h>
-+#include <linux/sysctl.h>
-+
-+#include <asm/vsyscall.h>
-+#include <asm/pgtable.h>
-+#include <asm/page.h>
-+#include <asm/fixmap.h>
-+#include <asm/errno.h>
-+#include <asm/io.h>
-+
-+#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
-+#define force_inline __attribute__((always_inline)) inline
-+
-+int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
-+seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
-+
-+#include <asm/unistd.h>
-+
-+static force_inline void timeval_normalize(struct timeval * tv)
-+{
-+	time_t __sec;
-+
-+	__sec = tv->tv_usec / 1000000;
-+	if (__sec) {
-+		tv->tv_usec %= 1000000;
-+		tv->tv_sec += __sec;
-+	}
-+}
-+
-+static force_inline void do_vgettimeofday(struct timeval * tv)
-+{
-+	long sequence, t;
-+	unsigned long sec, usec;
-+
-+	do {
-+		sequence = read_seqbegin(&__xtime_lock);
-+		
-+		sec = __xtime.tv_sec;
-+		usec = (__xtime.tv_nsec / 1000) +
-+			(__jiffies - __wall_jiffies) * (1000000 / HZ);
-+
-+		if (__vxtime.mode != VXTIME_HPET) {
-+			sync_core();
-+			rdtscll(t);
-+			if (t < __vxtime.last_tsc)
-+				t = __vxtime.last_tsc;
-+			usec += ((t - __vxtime.last_tsc) *
-+				 __vxtime.tsc_quot) >> 32;
-+			/* See comment in x86_64 do_gettimeofday. */
-+		} else {
-+			usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
-+				  __vxtime.last) * __vxtime.quot) >> 32;
-+		}
-+	} while (read_seqretry(&__xtime_lock, sequence));
-+
-+	tv->tv_sec = sec + usec / 1000000;
-+	tv->tv_usec = usec % 1000000;
-+}
-+
-+/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
-+static force_inline void do_get_tz(struct timezone * tz)
-+{
-+	*tz = __sys_tz;
-+}
-+
-+static force_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
-+{
-+	int ret;
-+	asm volatile("vsysc2: syscall"
-+		: "=a" (ret)
-+		: "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
-+	return ret;
-+}
-+
-+static force_inline long time_syscall(long *t)
-+{
-+	long secs;
-+	asm volatile("vsysc1: syscall"
-+		: "=a" (secs)
-+		: "0" (__NR_time),"D" (t) : __syscall_clobber);
-+	return secs;
-+}
-+
-+static int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
-+{
-+	if (unlikely(!__sysctl_vsyscall))
-+		return gettimeofday(tv,tz);
-+	if (tv)
-+		do_vgettimeofday(tv);
-+	if (tz)
-+		do_get_tz(tz);
-+	return 0;
-+}
-+
-+/* This will break when the xtime seconds get inaccurate, but that is
-+ * unlikely */
-+static time_t __vsyscall(1) vtime(time_t *t)
-+{
-+	if (unlikely(!__sysctl_vsyscall))
-+		return time_syscall(t);
-+	else if (t)
-+		*t = __xtime.tv_sec;		
-+	return __xtime.tv_sec;
-+}
-+
-+static long __vsyscall(2) venosys_0(void)
-+{
-+	return -ENOSYS;
-+}
-+
-+static long __vsyscall(3) venosys_1(void)
-+{
-+	return -ENOSYS;
-+}
-+
-+#ifdef CONFIG_SYSCTL
-+
-+#define SYSCALL 0x050f
-+#define NOP2    0x9090
-+
-+/*
-+ * NOP out syscall in vsyscall page when not needed.
-+ */
-+static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
-+                        void __user *buffer, size_t *lenp, loff_t *ppos)
-+{
-+	extern u16 vsysc1, vsysc2;
-+	u16 *map1, *map2;
-+	int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
-+	if (!write)
-+		return ret;
-+	/* gcc has some trouble with __va(__pa()), so just do it this
-+	   way. */
-+	map1 = ioremap(__pa_symbol(&vsysc1), 2);
-+	if (!map1)
-+		return -ENOMEM;
-+	map2 = ioremap(__pa_symbol(&vsysc2), 2);
-+	if (!map2) {
-+		ret = -ENOMEM;
-+		goto out;
-+	}
-+	if (!sysctl_vsyscall) {
-+		*map1 = SYSCALL;
-+		*map2 = SYSCALL;
-+	} else {
-+		*map1 = NOP2;
-+		*map2 = NOP2;
-+	}
-+	iounmap(map2);
-+out:
-+	iounmap(map1);
-+	return ret;
-+}
-+
-+static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
-+				void __user *oldval, size_t __user *oldlenp,
-+				void __user *newval, size_t newlen,
-+				void **context)
-+{
-+	return -ENOSYS;
-+}
-+
-+static ctl_table kernel_table2[] = {
-+	{ .ctl_name = 99, .procname = "vsyscall64",
-+	  .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
-+	  .strategy = vsyscall_sysctl_nostrat,
-+	  .proc_handler = vsyscall_sysctl_change },
-+	{ 0, }
-+};
-+
-+static ctl_table kernel_root_table2[] = {
-+	{ .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
-+	  .child = kernel_table2 },
-+	{ 0 },
-+};
-+
-+#endif
-+
-+static void __init map_vsyscall(void)
-+{
-+	extern char __vsyscall_0;
-+	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
-+
-+	__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
-+}
-+
-+#ifdef CONFIG_XEN
-+static void __init map_vsyscall_user(void)
-+{
-+	extern void __set_fixmap_user(enum fixed_addresses, unsigned long, pgprot_t);
-+	extern char __vsyscall_0;
-+	unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
-+
-+	__set_fixmap_user(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
-+}
-+#endif
-+
-+static int __init vsyscall_init(void)
-+{
-+	BUG_ON(((unsigned long) &vgettimeofday !=
-+			VSYSCALL_ADDR(__NR_vgettimeofday)));
-+	BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
-+	BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
-+	map_vsyscall();
-+#ifdef CONFIG_XEN
-+	map_vsyscall_user();
-+	sysctl_vsyscall = 0; /* disable vgettimeofay() */
-+#endif
-+#ifdef CONFIG_SYSCTL
-+	register_sysctl_table(kernel_root_table2, 0);
-+#endif
-+	return 0;
-+}
-+
-+__initcall(vsyscall_init);
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/x8664_ksyms.c linux-2.6.12-xen/arch/xen/x86_64/kernel/x8664_ksyms.c
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/x8664_ksyms.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/x8664_ksyms.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,208 @@
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/smp.h>
-+#include <linux/user.h>
-+#include <linux/sched.h>
-+#include <linux/in6.h>
-+#include <linux/interrupt.h>
-+#include <linux/smp_lock.h>
-+#include <linux/pm.h>
-+#include <linux/pci.h>
-+#include <linux/apm_bios.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/syscalls.h>
-+#include <linux/tty.h>
-+#include <linux/ioctl32.h>
-+
-+#include <asm/semaphore.h>
-+#include <asm/processor.h>
-+#include <asm/i387.h>
-+#include <asm/uaccess.h>
-+#include <asm/checksum.h>
-+#include <asm/io.h>
-+#include <asm/delay.h>
-+#include <asm/irq.h>
-+#include <asm/mmx.h>
-+#include <asm/desc.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/nmi.h>
-+#include <asm/kdebug.h>
-+#include <asm/unistd.h>
-+#include <asm/tlbflush.h>
-+#include <asm/kdebug.h>
-+
-+extern spinlock_t rtc_lock;
-+
-+#ifdef CONFIG_SMP
-+extern void __write_lock_failed(rwlock_t *rw);
-+extern void __read_lock_failed(rwlock_t *rw);
-+#endif
-+
-+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
-+extern struct drive_info_struct drive_info;
-+EXPORT_SYMBOL(drive_info);
-+#endif
-+
-+extern unsigned long get_cmos_time(void);
-+
-+/* platform dependent support */
-+EXPORT_SYMBOL(boot_cpu_data);
-+//EXPORT_SYMBOL(dump_fpu);
-+EXPORT_SYMBOL(__ioremap);
-+EXPORT_SYMBOL(ioremap_nocache);
-+EXPORT_SYMBOL(iounmap);
-+EXPORT_SYMBOL(enable_irq);
-+EXPORT_SYMBOL(disable_irq);
-+EXPORT_SYMBOL(disable_irq_nosync);
-+EXPORT_SYMBOL(probe_irq_mask);
-+EXPORT_SYMBOL(kernel_thread);
-+EXPORT_SYMBOL(pm_idle);
-+EXPORT_SYMBOL(pm_power_off);
-+EXPORT_SYMBOL(get_cmos_time);
-+
-+EXPORT_SYMBOL(__down_failed);
-+EXPORT_SYMBOL(__down_failed_interruptible);
-+EXPORT_SYMBOL(__down_failed_trylock);
-+EXPORT_SYMBOL(__up_wakeup);
-+/* Networking helper routines. */
-+EXPORT_SYMBOL(csum_partial_copy_nocheck);
-+EXPORT_SYMBOL(ip_compute_csum);
-+/* Delay loops */
-+EXPORT_SYMBOL(__udelay);
-+EXPORT_SYMBOL(__ndelay);
-+EXPORT_SYMBOL(__delay);
-+EXPORT_SYMBOL(__const_udelay);
-+
-+EXPORT_SYMBOL(__get_user_1);
-+EXPORT_SYMBOL(__get_user_2);
-+EXPORT_SYMBOL(__get_user_4);
-+EXPORT_SYMBOL(__get_user_8);
-+EXPORT_SYMBOL(__put_user_1);
-+EXPORT_SYMBOL(__put_user_2);
-+EXPORT_SYMBOL(__put_user_4);
-+EXPORT_SYMBOL(__put_user_8);
-+
-+EXPORT_SYMBOL(strpbrk);
-+EXPORT_SYMBOL(strstr);
-+
-+EXPORT_SYMBOL(strncpy_from_user);
-+EXPORT_SYMBOL(__strncpy_from_user);
-+EXPORT_SYMBOL(clear_user);
-+EXPORT_SYMBOL(__clear_user);
-+EXPORT_SYMBOL(copy_user_generic);
-+EXPORT_SYMBOL(copy_from_user);
-+EXPORT_SYMBOL(copy_to_user);
-+EXPORT_SYMBOL(copy_in_user);
-+EXPORT_SYMBOL(strnlen_user);
-+
-+#ifdef CONFIG_PCI
-+EXPORT_SYMBOL(pci_alloc_consistent);
-+EXPORT_SYMBOL(pci_free_consistent);
-+#endif
-+
-+#ifdef CONFIG_PCI
-+EXPORT_SYMBOL(pci_mem_start);
-+#endif
-+
-+EXPORT_SYMBOL(copy_page);
-+EXPORT_SYMBOL(clear_page);
-+
-+EXPORT_SYMBOL(cpu_pda);
-+#ifdef CONFIG_SMP
-+EXPORT_SYMBOL(__write_lock_failed);
-+EXPORT_SYMBOL(__read_lock_failed);
-+
-+EXPORT_SYMBOL(synchronize_irq);
-+EXPORT_SYMBOL(smp_call_function);
-+#endif
-+
-+#ifdef CONFIG_VT
-+EXPORT_SYMBOL(screen_info);
-+#endif
-+
-+EXPORT_SYMBOL(get_wchan);
-+
-+EXPORT_SYMBOL(rtc_lock);
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+EXPORT_SYMBOL_GPL(set_nmi_callback);
-+EXPORT_SYMBOL_GPL(unset_nmi_callback);
-+#endif
-+
-+/* Export string functions. We normally rely on gcc builtin for most of these,
-+   but gcc sometimes decides not to inline them. */    
-+#undef memcpy
-+#undef memset
-+#undef memmove
-+#undef memchr
-+#undef strlen
-+#undef strncmp
-+#undef strncpy
-+#undef strchr	
-+
-+extern void * memset(void *,int,__kernel_size_t);
-+extern size_t strlen(const char *);
-+extern void * memmove(void * dest,const void *src,size_t count);
-+extern void *memchr(const void *s, int c, size_t n);
-+extern void * memcpy(void *,const void *,__kernel_size_t);
-+extern void * __memcpy(void *,const void *,__kernel_size_t);
-+
-+EXPORT_SYMBOL(memset);
-+EXPORT_SYMBOL(strlen);
-+EXPORT_SYMBOL(memmove);
-+EXPORT_SYMBOL(strncmp);
-+EXPORT_SYMBOL(strncpy);
-+EXPORT_SYMBOL(strchr);
-+EXPORT_SYMBOL(strncat);
-+EXPORT_SYMBOL(memchr);
-+EXPORT_SYMBOL(strrchr);
-+EXPORT_SYMBOL(strnlen);
-+EXPORT_SYMBOL(memscan);
-+EXPORT_SYMBOL(memcpy);
-+EXPORT_SYMBOL(__memcpy);
-+
-+#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
-+/* prototypes are wrong, these are assembly with custom calling functions */
-+extern void rwsem_down_read_failed_thunk(void);
-+extern void rwsem_wake_thunk(void);
-+extern void rwsem_downgrade_thunk(void);
-+extern void rwsem_down_write_failed_thunk(void);
-+EXPORT_SYMBOL(rwsem_down_read_failed_thunk);
-+EXPORT_SYMBOL(rwsem_wake_thunk);
-+EXPORT_SYMBOL(rwsem_downgrade_thunk);
-+EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
-+#endif
-+
-+EXPORT_SYMBOL(empty_zero_page);
-+
-+#ifdef CONFIG_HAVE_DEC_LOCK
-+EXPORT_SYMBOL(_atomic_dec_and_lock);
-+#endif
-+
-+EXPORT_SYMBOL(die_chain);
-+EXPORT_SYMBOL(register_die_notifier);
-+
-+#ifdef CONFIG_SMP
-+EXPORT_SYMBOL(cpu_sibling_map);
-+EXPORT_SYMBOL(smp_num_siblings);
-+#endif
-+
-+extern void do_softirq_thunk(void);
-+EXPORT_SYMBOL(do_softirq_thunk);
-+
-+#ifdef CONFIG_BUG
-+EXPORT_SYMBOL(out_of_line_bug);
-+#endif
-+
-+EXPORT_SYMBOL(init_level4_pgt);
-+
-+extern unsigned long __supported_pte_mask;
-+EXPORT_SYMBOL(__supported_pte_mask);
-+
-+#ifdef CONFIG_SMP
-+EXPORT_SYMBOL(flush_tlb_page);
-+#endif
-+
-+EXPORT_SYMBOL(cpu_khz);
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/kernel/xen_entry.S linux-2.6.12-xen/arch/xen/x86_64/kernel/xen_entry.S
---- pristine-linux-2.6.12/arch/xen/x86_64/kernel/xen_entry.S	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/kernel/xen_entry.S	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,41 @@
-+/*
-+ * Copied from arch/xen/i386/kernel/entry.S
-+ */                        
-+/* Offsets into shared_info_t. */                
-+#define evtchn_upcall_pending		0
-+#define evtchn_upcall_mask		1
-+
-+#define sizeof_vcpu_shift		6
-+
-+#ifdef CONFIG_SMP
-+//#define preempt_disable(reg)	incl threadinfo_preempt_count(reg)
-+//#define preempt_enable(reg)	decl threadinfo_preempt_count(reg)
-+#define preempt_disable(reg)
-+#define preempt_enable(reg)
-+#define XEN_GET_VCPU_INFO(reg)	preempt_disable(%rbp)			; \
-+				movq %gs:pda_cpunumber,reg		; \
-+				shl  $32, reg				; \
-+				shr  $32-sizeof_vcpu_shift,reg		; \
-+				addq HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg)	preempt_enable(%rbp)			; \
-+#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
-+#else
-+#define XEN_GET_VCPU_INFO(reg)	movq HYPERVISOR_shared_info,reg
-+#define XEN_PUT_VCPU_INFO(reg)
-+#define XEN_PUT_VCPU_INFO_fixup
-+#endif
-+
-+#define XEN_LOCKED_BLOCK_EVENTS(reg)	movb $1,evtchn_upcall_mask(reg)
-+#define XEN_LOCKED_UNBLOCK_EVENTS(reg)	movb $0,evtchn_upcall_mask(reg)
-+#define XEN_BLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
-+				XEN_LOCKED_BLOCK_EVENTS(reg)		; \
-+    				XEN_PUT_VCPU_INFO(reg)
-+#define XEN_UNBLOCK_EVENTS(reg)	XEN_GET_VCPU_INFO(reg)			; \
-+				XEN_LOCKED_UNBLOCK_EVENTS(reg)		; \
-+    				XEN_PUT_VCPU_INFO(reg)
-+#define XEN_TEST_PENDING(reg)	testb $0xFF,evtchn_upcall_pending(reg)
-+
-+EVENT_MASK      = (CS+4)
-+VGCF_IN_SYSCALL = (1<<8)
-+        
-+	
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/Makefile linux-2.6.12-xen/arch/xen/x86_64/Makefile
---- pristine-linux-2.6.12/arch/xen/x86_64/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,93 @@
-+#
-+# x86_64/Makefile
-+#
-+# This file is included by the global makefile so that you can add your own
-+# architecture-specific flags and dependencies. Remember to do have actions
-+# for "archclean" and "archdep" for cleaning up and making dependencies for
-+# this architecture
-+#
-+# This file is subject to the terms and conditions of the GNU General Public
-+# License.  See the file "COPYING" in the main directory of this archive
-+# for more details.
-+#
-+# Copyright (C) 1994 by Linus Torvalds
-+#
-+# 19990713  Artur Skawina <skawina at geocities.com>
-+#           Added '-march' and '-mpreferred-stack-boundary' support
-+# 20000913  Pavel Machek <pavel at suse.cz>
-+#	    Converted for x86_64 architecture
-+# 20010105  Andi Kleen, add IA32 compiler.
-+#           ....and later removed it again....
-+# 20050205  Jun Nakajima <jun.nakajima at intel.com> 
-+#           Modified for Xen
-+#
-+# $Id: Makefile,v 1.31 2002/03/22 15:56:07 ak Exp $
-+
-+#
-+# early bootup linking needs 32bit. You can either use real 32bit tools
-+# here or 64bit tools in 32bit mode.
-+#
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+IA32_CC := $(CC) $(CPPFLAGS) -m32 -O2 -fomit-frame-pointer
-+IA32_LD := $(LD) -m elf_i386
-+IA32_AS := $(CC) $(AFLAGS) -m32 -Wa,--32 -traditional -c
-+IA32_OBJCOPY := $(CROSS_COMPILE)objcopy
-+IA32_CPP := $(CROSS_COMPILE)gcc -m32 -E
-+export IA32_CC IA32_LD IA32_AS IA32_OBJCOPY IA32_CPP
-+
-+
-+LDFLAGS		:= -m elf_x86_64
-+#LDFLAGS_vmlinux := -e stext
-+
-+CHECKFLAGS      += -D__x86_64__ -m64
-+
-+cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
-+cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
-+CFLAGS += $(cflags-y)
-+
-+CFLAGS += -mno-red-zone
-+CFLAGS += -mcmodel=kernel
-+CFLAGS += -pipe
-+# this makes reading assembly source easier, but produces worse code
-+# actually it makes the kernel smaller too.
-+CFLAGS += -fno-reorder-blocks	
-+CFLAGS += -Wno-sign-compare
-+ifneq ($(CONFIG_DEBUG_INFO),y)
-+CFLAGS += -fno-asynchronous-unwind-tables
-+# -fweb shrinks the kernel a bit, but the difference is very small
-+# it also messes up debugging, so don't use it for now.
-+#CFLAGS += $(call cc-option,-fweb)
-+endif
-+# -funit-at-a-time shrinks the kernel .text considerably
-+# unfortunately it makes reading oopses harder.
-+CFLAGS += $(call cc-option,-funit-at-a-time,)
-+
-+head-y := arch/xen/x86_64/kernel/head.o arch/xen/x86_64/kernel/head64.o arch/xen/x86_64/kernel/init_task.o
-+
-+libs-y 					+= arch/x86_64/lib/
-+core-y					+= arch/xen/x86_64/kernel/ arch/xen/x86_64/mm/
-+core-$(CONFIG_IA32_EMULATION)		+= arch/xen/x86_64/ia32/
-+drivers-$(CONFIG_PCI)			+= arch/xen/x86_64/pci/
-+drivers-$(CONFIG_OPROFILE)		+= arch/x86_64/oprofile/
-+
-+# for clean
-+obj-	+= kernel/ mm/ pci/
-+
-+xenflags-y += -Iinclude/asm-xen/asm-x86_64/mach-xen
-+
-+CFLAGS += $(xenflags-y)
-+AFLAGS += $(xenflags-y)
-+
-+prepare: include/asm-$(XENARCH)/asm_offsets.h
-+CLEAN_FILES += include/asm-$(XENARCH)/asm_offsets.h
-+
-+arch/$(XENARCH)/kernel/asm-offsets.s: include/asm include/.asm-ignore \
-+	include/linux/version.h include/config/MARKER
-+
-+include/asm-$(XENARCH)/offset.h: arch/$(XENARCH)/kernel/asm-offsets.s
-+	$(call filechk,gen-asm-offsets)
-+
-+include/asm-$(XENARCH)/asm_offsets.h: include/asm-$(XENARCH)/offset.h
-+	ln -fsn offset.h include/asm-$(XENARCH)/asm_offsets.h
-+	ln -fsn offset.h include/asm-$(XENARCH)/asm_offset.h
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/mm/fault.c linux-2.6.12-xen/arch/xen/x86_64/mm/fault.c
---- pristine-linux-2.6.12/arch/xen/x86_64/mm/fault.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/mm/fault.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,596 @@
-+/*
-+ *  linux/arch/x86-64/mm/fault.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/smp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <linux/tty.h>
-+#include <linux/vt_kern.h>		/* For unblank_screen() */
-+#include <linux/compiler.h>
-+#include <linux/module.h>
-+#include <linux/percpu.h>
-+#include <linux/kprobes.h>
-+
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgalloc.h>
-+#include <asm/smp.h>
-+#include <asm/tlbflush.h>
-+#include <asm/proto.h>
-+#include <asm/kdebug.h>
-+#include <asm-generic/sections.h>
-+#include <asm/kdebug.h>
-+
-+DEFINE_PER_CPU(pgd_t *, cur_pgd);
-+
-+void bust_spinlocks(int yes)
-+{
-+	int loglevel_save = console_loglevel;
-+	if (yes) {
-+		oops_in_progress = 1;
-+	} else {
-+#ifdef CONFIG_VT
-+		unblank_screen();
-+#endif
-+		oops_in_progress = 0;
-+		/*
-+		 * OK, the message is on the console.  Now we call printk()
-+		 * without oops_in_progress set so that printk will give klogd
-+		 * a poke.  Hold onto your hats...
-+		 */
-+		console_loglevel = 15;		/* NMI oopser may have shut the console up */
-+		printk(" ");
-+		console_loglevel = loglevel_save;
-+	}
-+}
-+
-+/* Sometimes the CPU reports invalid exceptions on prefetch.
-+   Check that here and ignore.
-+   Opcode checker based on code by Richard Brunner */
-+static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
-+				unsigned long error_code)
-+{ 
-+	unsigned char *instr;
-+	int scan_more = 1;
-+	int prefetch = 0; 
-+	unsigned char *max_instr;
-+
-+	/* If it was a exec fault ignore */
-+	if (error_code & (1<<4))
-+		return 0;
-+	
-+	instr = (unsigned char *)convert_rip_to_linear(current, regs);
-+	max_instr = instr + 15;
-+
-+	if ((regs->cs & 3) != 0 && instr >= (unsigned char *)TASK_SIZE)
-+		return 0;
-+
-+	while (scan_more && instr < max_instr) { 
-+		unsigned char opcode;
-+		unsigned char instr_hi;
-+		unsigned char instr_lo;
-+
-+		if (__get_user(opcode, instr))
-+			break; 
-+
-+		instr_hi = opcode & 0xf0; 
-+		instr_lo = opcode & 0x0f; 
-+		instr++;
-+
-+		switch (instr_hi) { 
-+		case 0x20:
-+		case 0x30:
-+			/* Values 0x26,0x2E,0x36,0x3E are valid x86
-+			   prefixes.  In long mode, the CPU will signal
-+			   invalid opcode if some of these prefixes are
-+			   present so we will never get here anyway */
-+			scan_more = ((instr_lo & 7) == 0x6);
-+			break;
-+			
-+		case 0x40:
-+			/* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
-+			   Need to figure out under what instruction mode the
-+			   instruction was issued ... */
-+			/* Could check the LDT for lm, but for now it's good
-+			   enough to assume that long mode only uses well known
-+			   segments or kernel. */
-+			scan_more = ((regs->cs & 3) == 0) || (regs->cs == __USER_CS);
-+			break;
-+			
-+		case 0x60:
-+			/* 0x64 thru 0x67 are valid prefixes in all modes. */
-+			scan_more = (instr_lo & 0xC) == 0x4;
-+			break;		
-+		case 0xF0:
-+			/* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
-+			scan_more = !instr_lo || (instr_lo>>1) == 1;
-+			break;			
-+		case 0x00:
-+			/* Prefetch instruction is 0x0F0D or 0x0F18 */
-+			scan_more = 0;
-+			if (__get_user(opcode, instr)) 
-+				break;
-+			prefetch = (instr_lo == 0xF) &&
-+				(opcode == 0x0D || opcode == 0x18);
-+			break;			
-+		default:
-+			scan_more = 0;
-+			break;
-+		} 
-+	}
-+	return prefetch;
-+}
-+
-+static int bad_address(void *p) 
-+{ 
-+	unsigned long dummy;
-+	return __get_user(dummy, (unsigned long *)p);
-+} 
-+
-+void dump_pagetable(unsigned long address)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+
-+	preempt_disable();
-+	pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
-+	preempt_enable();
-+	pgd += pgd_index(address);
-+	printk("PGD %lx ", pgd_val(*pgd));
-+	if (bad_address(pgd)) goto bad;
-+	if (!pgd_present(*pgd)) goto ret; 
-+
-+	pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
-+	if (bad_address(pud)) goto bad;
-+	printk("PUD %lx ", pud_val(*pud));
-+	if (!pud_present(*pud))	goto ret;
-+
-+	pmd = pmd_offset(pud, address);
-+	if (bad_address(pmd)) goto bad;
-+	printk("PMD %lx ", pmd_val(*pmd));
-+	if (!pmd_present(*pmd))	goto ret;	 
-+
-+	pte = pte_offset_kernel(pmd, address);
-+	if (bad_address(pte)) goto bad;
-+	printk("PTE %lx", pte_val(*pte)); 
-+ret:
-+	printk("\n");
-+	return;
-+bad:
-+	printk("BAD\n");
-+}
-+
-+static const char errata93_warning[] = 
-+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
-+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
-+KERN_ERR "******* Please consider a BIOS update.\n"
-+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
-+
-+/* Workaround for K8 erratum #93 & buggy BIOS.
-+   BIOS SMM functions are required to use a specific workaround
-+   to avoid corruption of the 64bit RIP register on C stepping K8. 
-+   A lot of BIOS that didn't get tested properly miss this. 
-+   The OS sees this as a page fault with the upper 32bits of RIP cleared.
-+   Try to work around it here.
-+   Note we only handle faults in kernel here. */
-+
-+static int is_errata93(struct pt_regs *regs, unsigned long address) 
-+{
-+	static int warned;
-+	if (address != regs->rip)
-+		return 0;
-+	if ((address >> 32) != 0) 
-+		return 0;
-+	address |= 0xffffffffUL << 32;
-+	if ((address >= (u64)_stext && address <= (u64)_etext) || 
-+	    (address >= MODULES_VADDR && address <= MODULES_END)) { 
-+		if (!warned) {
-+			printk(errata93_warning); 		
-+			warned = 1;
-+		}
-+		regs->rip = address;
-+		return 1;
-+	}
-+	return 0;
-+} 
-+
-+int unhandled_signal(struct task_struct *tsk, int sig)
-+{
-+	if (tsk->pid == 1)
-+		return 1;
-+	/* Warn for strace, but not for gdb */
-+	if (!test_ti_thread_flag(tsk->thread_info, TIF_SYSCALL_TRACE) &&
-+	    (tsk->ptrace & PT_PTRACED))
-+		return 0;
-+	return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
-+		(tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
-+}
-+
-+static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
-+				 unsigned long error_code)
-+{
-+	oops_begin();
-+	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
-+	       current->comm, address);
-+	dump_pagetable(address);
-+	__die("Bad pagetable", regs, error_code);
-+	oops_end();
-+	do_exit(SIGKILL);
-+}
-+
-+/*
-+ * Handle a fault on the vmalloc or module mapping area
-+ *
-+ * This assumes no large pages in there.
-+ */
-+static int vmalloc_fault(unsigned long address)
-+{
-+	pgd_t *pgd, *pgd_ref;
-+	pud_t *pud, *pud_ref;
-+	pmd_t *pmd, *pmd_ref;
-+	pte_t *pte, *pte_ref;
-+
-+	/* Copy kernel mappings over when needed. This can also
-+	   happen within a race in page table update. In the later
-+	   case just flush. */
-+
-+	/* On Xen the line below does not always work. Needs investigating! */
-+	/*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
-+	preempt_disable();
-+	pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id());
-+	preempt_enable();
-+	pgd += pgd_index(address);
-+	pgd_ref = pgd_offset_k(address);
-+	if (pgd_none(*pgd_ref))
-+		return -1;
-+	if (pgd_none(*pgd))
-+		set_pgd(pgd, *pgd_ref);
-+
-+	/* Below here mismatches are bugs because these lower tables
-+	   are shared */
-+
-+	pud = pud_offset(pgd, address);
-+	pud_ref = pud_offset(pgd_ref, address);
-+	if (pud_none(*pud_ref))
-+		return -1;
-+	if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
-+		BUG();
-+	pmd = pmd_offset(pud, address);
-+	pmd_ref = pmd_offset(pud_ref, address);
-+	if (pmd_none(*pmd_ref))
-+		return -1;
-+	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
-+		BUG();
-+	pte_ref = pte_offset_kernel(pmd_ref, address);
-+	if (!pte_present(*pte_ref))
-+		return -1;
-+	pte = pte_offset_kernel(pmd, address);
-+	/* Don't use pte_page here, because the mappings can point
-+	   outside mem_map, and the NUMA hash lookup cannot handle
-+	   that. */
-+	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
-+		BUG();
-+	__flush_tlb_all();
-+	return 0;
-+}
-+
-+int page_fault_trace = 0;
-+int exception_trace = 1;
-+
-+
-+#define MEM_VERBOSE 1
-+
-+#ifdef MEM_VERBOSE
-+#define MEM_LOG(_f, _a...)			\
-+	printk("fault.c:[%d]-> " _f "\n",	\
-+	__LINE__ , ## _a )
-+#else
-+#define MEM_LOG(_f, _a...) ((void)0)
-+#endif
-+
-+/*
-+ * This routine handles page faults.  It determines the address,
-+ * and the problem, and then passes it off to one of the appropriate
-+ * routines.
-+ *
-+ * error_code:
-+ *	bit 0 == 0 means no page found, 1 means protection fault
-+ *	bit 1 == 0 means read, 1 means write
-+ *	bit 2 == 0 means kernel, 1 means user-mode
-+ *      bit 3 == 1 means fault was an instruction fetch
-+ */
-+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
-+{
-+	struct task_struct *tsk;
-+	struct mm_struct *mm;
-+	struct vm_area_struct * vma;
-+	unsigned long address;
-+	const struct exception_table_entry *fixup;
-+	int write;
-+	siginfo_t info;
-+
-+	if (!user_mode(regs))
-+		error_code &= ~4; /* means kernel */
-+
-+#ifdef CONFIG_CHECKING
-+	{ 
-+		unsigned long gs; 
-+		struct x8664_pda *pda = cpu_pda + stack_smp_processor_id(); 
-+		rdmsrl(MSR_GS_BASE, gs); 
-+		if (gs != (unsigned long)pda) { 
-+			wrmsrl(MSR_GS_BASE, pda); 
-+			printk("page_fault: wrong gs %lx expected %p\n", gs, pda);
-+		}
-+	}
-+#endif
-+
-+	/* get the address */
-+	address = HYPERVISOR_shared_info->vcpu_info[
-+		smp_processor_id()].arch.cr2;
-+
-+	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
-+					SIGSEGV) == NOTIFY_STOP)
-+		return;
-+
-+	if (likely(regs->eflags & X86_EFLAGS_IF))
-+		local_irq_enable();
-+
-+	if (unlikely(page_fault_trace))
-+		printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
-+		       regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code); 
-+
-+	tsk = current;
-+	mm = tsk->mm;
-+	info.si_code = SEGV_MAPERR;
-+
-+
-+	/*
-+	 * We fault-in kernel-space virtual memory on-demand. The
-+	 * 'reference' page table is init_mm.pgd.
-+	 *
-+	 * NOTE! We MUST NOT take any locks for this case. We may
-+	 * be in an interrupt or a critical region, and should
-+	 * only copy the information from the master page table,
-+	 * nothing more.
-+	 *
-+	 * This verifies that the fault happens in kernel space
-+	 * (error_code & 4) == 0, and that the fault was not a
-+	 * protection error (error_code & 1) == 0.
-+	 */
-+	if (unlikely(address >= TASK_SIZE)) {
-+		if (!(error_code & 5) &&
-+		      ((address >= VMALLOC_START && address < VMALLOC_END) ||
-+		       (address >= MODULES_VADDR && address < MODULES_END))) {
-+			if (vmalloc_fault(address) < 0)
-+				goto bad_area_nosemaphore;
-+			return;
-+		}
-+		/*
-+		 * Don't take the mm semaphore here. If we fixup a prefetch
-+		 * fault we could otherwise deadlock.
-+		 */
-+		goto bad_area_nosemaphore;
-+	}
-+
-+	if (unlikely(error_code & (1 << 3)))
-+		pgtable_bad(address, regs, error_code);
-+
-+	/*
-+	 * If we're in an interrupt or have no user
-+	 * context, we must not take the fault..
-+	 */
-+	if (unlikely(in_atomic() || !mm))
-+		goto bad_area_nosemaphore;
-+
-+ again:
-+	/* When running in the kernel we expect faults to occur only to
-+	 * addresses in user space.  All other faults represent errors in the
-+	 * kernel and should generate an OOPS.  Unfortunatly, in the case of an
-+	 * erroneous fault occuring in a code path which already holds mmap_sem
-+	 * we will deadlock attempting to validate the fault against the
-+	 * address space.  Luckily the kernel only validly references user
-+	 * space from well defined areas of code, which are listed in the
-+	 * exceptions table.
-+	 *
-+	 * As the vast majority of faults will be valid we will only perform
-+	 * the source reference check when there is a possibilty of a deadlock.
-+	 * Attempt to lock the address space, if we cannot we then validate the
-+	 * source.  If this is invalid we can skip the address space check,
-+	 * thus avoiding the deadlock.
-+	 */
-+	if (!down_read_trylock(&mm->mmap_sem)) {
-+		if ((error_code & 4) == 0 &&
-+		    !search_exception_tables(regs->rip))
-+			goto bad_area_nosemaphore;
-+		down_read(&mm->mmap_sem);
-+	}
-+
-+	vma = find_vma(mm, address);
-+	if (!vma)
-+		goto bad_area;
-+	if (likely(vma->vm_start <= address))
-+		goto good_area;
-+	if (!(vma->vm_flags & VM_GROWSDOWN))
-+		goto bad_area;
-+	if (error_code & 4) {
-+		// XXX: align red zone size with ABI 
-+		if (address + 128 < regs->rsp)
-+			goto bad_area;
-+	}
-+	if (expand_stack(vma, address))
-+		goto bad_area;
-+/*
-+ * Ok, we have a good vm_area for this memory access, so
-+ * we can handle it..
-+ */
-+good_area:
-+	info.si_code = SEGV_ACCERR;
-+	write = 0;
-+	switch (error_code & 3) {
-+		default:	/* 3: write, present */
-+			/* fall through */
-+		case 2:		/* write, not present */
-+			if (!(vma->vm_flags & VM_WRITE))
-+				goto bad_area;
-+			write++;
-+			break;
-+		case 1:		/* read, present */
-+			goto bad_area;
-+		case 0:		/* read, not present */
-+			if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
-+				goto bad_area;
-+	}
-+
-+	/*
-+	 * If for any reason at all we couldn't handle the fault,
-+	 * make sure we exit gracefully rather than endlessly redo
-+	 * the fault.
-+	 */
-+	switch (handle_mm_fault(mm, vma, address, write)) {
-+	case 1:
-+		tsk->min_flt++;
-+		break;
-+	case 2:
-+		tsk->maj_flt++;
-+		break;
-+	case 0:
-+		goto do_sigbus;
-+	default:
-+		goto out_of_memory;
-+	}
-+
-+	up_read(&mm->mmap_sem);
-+	return;
-+
-+/*
-+ * Something tried to access memory that isn't in our memory map..
-+ * Fix it, but check if it's kernel or user first..
-+ */
-+bad_area:
-+	up_read(&mm->mmap_sem);
-+
-+bad_area_nosemaphore:
-+	/* User mode accesses just cause a SIGSEGV */
-+	if (error_code & 4) {
-+		if (is_prefetch(regs, address, error_code))
-+			return;
-+
-+		/* Work around K8 erratum #100 K8 in compat mode
-+		   occasionally jumps to illegal addresses >4GB.  We
-+		   catch this here in the page fault handler because
-+		   these addresses are not reachable. Just detect this
-+		   case and return.  Any code segment in LDT is
-+		   compatibility mode. */
-+		if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
-+		    (address >> 32))
-+			return;
-+
-+		if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
-+			printk(
-+		       "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
-+					tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
-+					tsk->comm, tsk->pid, address, regs->rip,
-+					regs->rsp, error_code);
-+		}
-+       
-+		tsk->thread.cr2 = address;
-+		/* Kernel addresses are always protection faults */
-+		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-+		tsk->thread.trap_no = 14;
-+		info.si_signo = SIGSEGV;
-+		info.si_errno = 0;
-+		/* info.si_code has been set above */
-+		info.si_addr = (void __user *)address;
-+		force_sig_info(SIGSEGV, &info, tsk);
-+		return;
-+	}
-+
-+no_context:
-+	
-+	/* Are we prepared to handle this kernel fault?  */
-+	fixup = search_exception_tables(regs->rip);
-+	if (fixup) {
-+		regs->rip = fixup->fixup;
-+		return;
-+	}
-+
-+	/* 
-+	 * Hall of shame of CPU/BIOS bugs.
-+	 */
-+
-+ 	if (is_prefetch(regs, address, error_code))
-+ 		return;
-+
-+	if (is_errata93(regs, address))
-+		return; 
-+
-+/*
-+ * Oops. The kernel tried to access some bad page. We'll have to
-+ * terminate things with extreme prejudice.
-+ */
-+
-+	oops_begin(); 
-+
-+	if (address < PAGE_SIZE)
-+		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-+	else
-+		printk(KERN_ALERT "Unable to handle kernel paging request");
-+	printk(" at %016lx RIP: \n" KERN_ALERT,address);
-+	printk_address(regs->rip);
-+	printk("\n");
-+	dump_pagetable(address);
-+	__die("Oops", regs, error_code);
-+	/* Executive summary in case the body of the oops scrolled away */
-+	printk(KERN_EMERG "CR2: %016lx\n", address);
-+	oops_end(); 
-+	do_exit(SIGKILL);
-+
-+/*
-+ * We ran out of memory, or some other thing happened to us that made
-+ * us unable to handle the page fault gracefully.
-+ */
-+out_of_memory:
-+	up_read(&mm->mmap_sem);
-+	if (current->pid == 1) { 
-+		yield();
-+		goto again;
-+	}
-+	printk("VM: killing process %s\n", tsk->comm);
-+	if (error_code & 4)
-+		do_exit(SIGKILL);
-+	goto no_context;
-+
-+do_sigbus:
-+	up_read(&mm->mmap_sem);
-+
-+	/* Kernel mode? Handle exceptions or die */
-+	if (!(error_code & 4))
-+		goto no_context;
-+
-+	tsk->thread.cr2 = address;
-+	tsk->thread.error_code = error_code;
-+	tsk->thread.trap_no = 14;
-+	info.si_signo = SIGBUS;
-+	info.si_errno = 0;
-+	info.si_code = BUS_ADRERR;
-+	info.si_addr = (void __user *)address;
-+	force_sig_info(SIGBUS, &info, tsk);
-+	return;
-+}
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/mm/init.c linux-2.6.12-xen/arch/xen/x86_64/mm/init.c
---- pristine-linux-2.6.12/arch/xen/x86_64/mm/init.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/mm/init.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,964 @@
-+/*
-+ *  linux/arch/x86_64/mm/init.c
-+ *
-+ *  Copyright (C) 1995  Linus Torvalds
-+ *  Copyright (C) 2000  Pavel Machek <pavel at suse.cz>
-+ *  Copyright (C) 2002,2003 Andi Kleen <ak at suse.de>
-+ *
-+ *  Jun Nakajima <jun.nakajima at intel.com>
-+ *	Modified for Xen.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/ptrace.h>
-+#include <linux/mman.h>
-+#include <linux/mm.h>
-+#include <linux/swap.h>
-+#include <linux/smp.h>
-+#include <linux/init.h>
-+#include <linux/pagemap.h>
-+#include <linux/bootmem.h>
-+#include <linux/proc_fs.h>
-+
-+#include <asm/processor.h>
-+#include <asm/system.h>
-+#include <asm/uaccess.h>
-+#include <asm/pgtable.h>
-+#include <asm/pgalloc.h>
-+#include <asm/dma.h>
-+#include <asm/fixmap.h>
-+#include <asm/e820.h>
-+#include <asm/apic.h>
-+#include <asm/tlb.h>
-+#include <asm/mmu_context.h>
-+#include <asm/proto.h>
-+#include <asm/smp.h>
-+
-+#include <asm-xen/features.h>
-+
-+#ifndef Dprintk
-+#define Dprintk(x...)
-+#endif
-+
-+extern unsigned long *contiguous_bitmap;
-+
-+#if defined(CONFIG_SWIOTLB)
-+extern void swiotlb_init(void);
-+#endif
-+
-+extern char _stext[];
-+
-+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-+extern unsigned long start_pfn;
-+
-+/*
-+ * Use this until direct mapping is established, i.e. before __va() is 
-+ * available in init_memory_mapping().
-+ */
-+
-+#define addr_to_page(addr, page)				\
-+	(addr) &= PHYSICAL_PAGE_MASK;				\
-+	(page) = ((unsigned long *) ((unsigned long)		\
-+	(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) +	\
-+	__START_KERNEL_map)))
-+
-+static void early_make_page_readonly(void *va, unsigned int feature)
-+{
-+	unsigned long addr, _va = (unsigned long)va;
-+	pte_t pte, *ptep;
-+	unsigned long *page = (unsigned long *) init_level4_pgt;
-+
-+	if (xen_feature(feature))
-+		return;
-+
-+	addr = (unsigned long) page[pgd_index(_va)];
-+	addr_to_page(addr, page);
-+
-+	addr = page[pud_index(_va)];
-+	addr_to_page(addr, page);
-+
-+	addr = page[pmd_index(_va)];
-+	addr_to_page(addr, page);
-+
-+	ptep = (pte_t *) &page[pte_index(_va)];
-+
-+	pte.pte = ptep->pte & ~_PAGE_RW;
-+	if (HYPERVISOR_update_va_mapping(_va, pte, 0))
-+		BUG();
-+}
-+
-+void make_page_readonly(void *va, unsigned int feature)
-+{
-+	pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
-+	unsigned long addr = (unsigned long) va;
-+
-+	if (xen_feature(feature))
-+		return;
-+
-+	pgd = pgd_offset_k(addr);
-+	pud = pud_offset(pgd, addr);
-+	pmd = pmd_offset(pud, addr);
-+	ptep = pte_offset_kernel(pmd, addr);
-+
-+	pte.pte = ptep->pte & ~_PAGE_RW;
-+	if (HYPERVISOR_update_va_mapping(addr, pte, 0))
-+		xen_l1_entry_update(ptep, pte); /* fallback */
-+
-+	if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
-+		make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT), feature);
-+}
-+
-+void make_page_writable(void *va, unsigned int feature)
-+{
-+	pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
-+	unsigned long addr = (unsigned long) va;
-+
-+	if (xen_feature(feature))
-+		return;
-+
-+	pgd = pgd_offset_k(addr);
-+	pud = pud_offset(pgd, addr);
-+	pmd = pmd_offset(pud, addr);
-+	ptep = pte_offset_kernel(pmd, addr);
-+
-+	pte.pte = ptep->pte | _PAGE_RW;
-+	if (HYPERVISOR_update_va_mapping(addr, pte, 0))
-+		xen_l1_entry_update(ptep, pte); /* fallback */
-+
-+	if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
-+		make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT), feature);
-+}
-+
-+void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
-+{
-+	if (xen_feature(feature))
-+		return;
-+
-+	while (nr-- != 0) {
-+		make_page_readonly(va, feature);
-+		va = (void*)((unsigned long)va + PAGE_SIZE);
-+	}
-+}
-+
-+void make_pages_writable(void *va, unsigned nr, unsigned int feature)
-+{
-+	if (xen_feature(feature))
-+		return;
-+
-+	while (nr-- != 0) {
-+		make_page_writable(va, feature);
-+		va = (void*)((unsigned long)va + PAGE_SIZE);
-+	}
-+}
-+
-+/*
-+ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
-+ * physical space so we can cache the place of the first one and move
-+ * around without checking the pgd every time.
-+ */
-+
-+void show_mem(void)
-+{
-+	int i, total = 0, reserved = 0;
-+	int shared = 0, cached = 0;
-+	pg_data_t *pgdat;
-+	struct page *page;
-+
-+	printk("Mem-info:\n");
-+	show_free_areas();
-+	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-+
-+	for_each_pgdat(pgdat) {
-+               for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-+			page = pfn_to_page(pgdat->node_start_pfn + i);
-+			total++;
-+                       if (PageReserved(page))
-+			reserved++;
-+                       else if (PageSwapCache(page))
-+			cached++;
-+                       else if (page_count(page))
-+                               shared += page_count(page) - 1;
-+               }
-+	}
-+	printk("%d pages of RAM\n", total);
-+	printk("%d reserved pages\n",reserved);
-+	printk("%d pages shared\n",shared);
-+	printk("%d pages swap cached\n",cached);
-+}
-+
-+/* References to section boundaries */
-+
-+extern char _text, _etext, _edata, __bss_start, _end[];
-+extern char __init_begin, __init_end;
-+
-+int after_bootmem;
-+
-+static void *spp_getpage(void)
-+{ 
-+	void *ptr;
-+	if (after_bootmem)
-+		ptr = (void *) get_zeroed_page(GFP_ATOMIC); 
-+	else
-+		ptr = alloc_bootmem_pages(PAGE_SIZE);
-+	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
-+		panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
-+
-+	Dprintk("spp_getpage %p\n", ptr);
-+	return ptr;
-+} 
-+
-+#define pgd_offset_u(address) (pgd_t *)(init_level4_user_pgt + pgd_index(address))
-+
-+static inline pud_t *pud_offset_u(unsigned long address)
-+{
-+	pud_t *pud = level3_user_pgt;
-+
-+	return pud + pud_index(address);
-+}
-+
-+static void set_pte_phys(unsigned long vaddr,
-+			 unsigned long phys, pgprot_t prot, int user_mode)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte, new_pte;
-+
-+	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
-+
-+	pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
-+	if (pgd_none(*pgd)) {
-+		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
-+		return;
-+	}
-+	pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
-+	if (pud_none(*pud)) {
-+		pmd = (pmd_t *) spp_getpage(); 
-+		make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+		xen_pmd_pin(__pa(pmd));
-+		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-+		if (pmd != pmd_offset(pud, 0)) {
-+			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
-+			return;
-+		}
-+	}
-+	pmd = pmd_offset(pud, vaddr);
-+	if (pmd_none(*pmd)) {
-+		pte = (pte_t *) spp_getpage();
-+		make_page_readonly(pte, XENFEAT_writable_page_tables);
-+		xen_pte_pin(__pa(pte));
-+		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-+		if (pte != pte_offset_kernel(pmd, 0)) {
-+			printk("PAGETABLE BUG #02!\n");
-+			return;
-+		}
-+	}
-+	new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
-+
-+	pte = pte_offset_kernel(pmd, vaddr);
-+	if (!pte_none(*pte) &&
-+	    pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
-+		pte_ERROR(*pte);
-+	set_pte(pte, new_pte);
-+
-+	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
-+	 */
-+	__flush_tlb_one(vaddr);
-+}
-+
-+static void set_pte_phys_ma(unsigned long vaddr,
-+			 unsigned long phys, pgprot_t prot)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte, new_pte;
-+
-+	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
-+
-+	pgd = pgd_offset_k(vaddr);
-+	if (pgd_none(*pgd)) {
-+		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
-+		return;
-+	}
-+	pud = pud_offset(pgd, vaddr);
-+	if (pud_none(*pud)) {
-+
-+		pmd = (pmd_t *) spp_getpage(); 
-+		make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+		xen_pmd_pin(__pa(pmd));
-+
-+		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-+
-+		if (pmd != pmd_offset(pud, 0)) {
-+			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
-+			return;
-+		}
-+	}
-+	pmd = pmd_offset(pud, vaddr);
-+
-+	if (pmd_none(*pmd)) {
-+		pte = (pte_t *) spp_getpage();
-+		make_page_readonly(pte, XENFEAT_writable_page_tables);
-+		xen_pte_pin(__pa(pte));
-+
-+		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-+		if (pte != pte_offset_kernel(pmd, 0)) {
-+			printk("PAGETABLE BUG #02!\n");
-+			return;
-+		}
-+	}
-+
-+	new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
-+	pte = pte_offset_kernel(pmd, vaddr);
-+
-+	/* 
-+	 * Note that the pte page is already RO, thus we want to use
-+	 * xen_l1_entry_update(), not set_pte().
-+	 */
-+	xen_l1_entry_update(pte, 
-+			    pfn_pte_ma(phys >> PAGE_SHIFT, prot));
-+
-+	/*
-+	 * It's enough to flush this one mapping.
-+	 * (PGE mappings get flushed as well)
-+	 */
-+	__flush_tlb_one(vaddr);
-+}
-+
-+#define SET_FIXMAP_KERNEL 0
-+#define SET_FIXMAP_USER   1
-+
-+/* NOTE: this is meant to be run only at boot */
-+void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
-+{
-+	unsigned long address = __fix_to_virt(idx);
-+
-+	if (idx >= __end_of_fixed_addresses) {
-+		printk("Invalid __set_fixmap\n");
-+		return;
-+	}
-+	switch (idx) {
-+	case VSYSCALL_FIRST_PAGE:
-+		set_pte_phys(address, phys, prot, SET_FIXMAP_KERNEL);
-+		break;
-+	default:
-+		set_pte_phys_ma(address, phys, prot);
-+		break;
-+	}
-+}
-+
-+/*
-+ * At this point it only supports vsyscall area.
-+ */
-+void __set_fixmap_user (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
-+{
-+	unsigned long address = __fix_to_virt(idx);
-+
-+	if (idx >= __end_of_fixed_addresses) {
-+		printk("Invalid __set_fixmap\n");
-+		return;
-+	}
-+
-+	set_pte_phys(address, phys, prot, SET_FIXMAP_USER); 
-+}
-+
-+unsigned long __initdata table_start, tables_space; 
-+
-+unsigned long get_machine_pfn(unsigned long addr)
-+{
-+	pud_t* pud = pud_offset_k(addr);
-+	pmd_t* pmd = pmd_offset(pud, addr);
-+	pte_t *pte = pte_offset_kernel(pmd, addr);
-+
-+	return pte_mfn(*pte);
-+} 
-+
-+static __init void *alloc_static_page(unsigned long *phys)
-+{
-+	unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
-+	*phys = start_pfn << PAGE_SHIFT;
-+	start_pfn++;
-+	memset((void *)va, 0, PAGE_SIZE);
-+	return (void *)va;
-+} 
-+
-+#define PTE_SIZE PAGE_SIZE
-+
-+static inline void __set_pte(pte_t *dst, pte_t val)
-+{
-+	*dst = val;
-+}
-+
-+static inline int make_readonly(unsigned long paddr)
-+{
-+	int readonly = 0;
-+
-+	/* Make old and new page tables read-only. */
-+	if (!xen_feature(XENFEAT_writable_page_tables)
-+	    && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
-+	    && (paddr < ((table_start << PAGE_SHIFT) + tables_space)))
-+		readonly = 1;
-+	/*
-+	 * No need for writable mapping of kernel image. This also ensures that
-+	 * page and descriptor tables embedded inside don't have writable
-+	 * mappings. 
-+	 */
-+	if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end)))
-+		readonly = 1;
-+
-+	return readonly;
-+}
-+
-+static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
-+{ 
-+	long i, j, k; 
-+	unsigned long paddr;
-+
-+	i = pud_index(address);
-+	pud = pud + i;
-+	for (; i < PTRS_PER_PUD; pud++, i++) {
-+		unsigned long pmd_phys;
-+		pmd_t *pmd;
-+
-+		paddr = address + i*PUD_SIZE;
-+		if (paddr >= end) { 
-+			for (; i < PTRS_PER_PUD; i++, pud++) 
-+				set_pud(pud, __pud(0)); 
-+			break;
-+		} 
-+
-+		pmd = alloc_static_page(&pmd_phys);
-+		early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
-+		xen_pmd_pin(pmd_phys);
-+		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
-+      		for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
-+			unsigned long pte_phys;
-+			pte_t *pte, *pte_save;
-+
-+			if (paddr >= end) { 
-+				for (; j < PTRS_PER_PMD; j++, pmd++)
-+					set_pmd(pmd,  __pmd(0)); 
-+				break;
-+			}
-+			pte = alloc_static_page(&pte_phys);
-+			pte_save = pte;
-+			for (k = 0; k < PTRS_PER_PTE; pte++, k++, paddr += PTE_SIZE) {
-+				if ((paddr >= end) ||
-+				    ((paddr >> PAGE_SHIFT) >=
-+				     xen_start_info->nr_pages)) { 
-+					__set_pte(pte, __pte(0)); 
-+					continue;
-+				}
-+				if (make_readonly(paddr)) {
-+					__set_pte(pte, 
-+						__pte(paddr | (_KERNPG_TABLE & ~_PAGE_RW)));
-+					continue;
-+				}
-+				__set_pte(pte, __pte(paddr | _KERNPG_TABLE));
-+			}
-+			pte = pte_save;
-+			early_make_page_readonly(
-+				pte, XENFEAT_writable_page_tables);
-+			xen_pte_pin(pte_phys);
-+			set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
-+		}
-+	}
-+	__flush_tlb();
-+} 
-+
-+static void __init find_early_table_space(unsigned long end)
-+{
-+	unsigned long puds, pmds, ptes; 
-+
-+	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-+	pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-+	ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
-+
-+	tables_space =
-+		round_up(puds * 8, PAGE_SIZE) + 
-+		round_up(pmds * 8, PAGE_SIZE) + 
-+		round_up(ptes * 8, PAGE_SIZE); 
-+}
-+
-+void __init xen_init_pt(void)
-+{
-+	unsigned long addr, *page;
-+	int i;
-+
-+	for (i = 0; i < NR_CPUS; i++)
-+		per_cpu(cur_pgd, i) = init_mm.pgd;
-+
-+	memset((void *)init_level4_pgt,   0, PAGE_SIZE);
-+	memset((void *)level3_kernel_pgt, 0, PAGE_SIZE);
-+	memset((void *)level2_kernel_pgt, 0, PAGE_SIZE);
-+
-+	/* Find the initial pte page that was built for us. */
-+	page = (unsigned long *)xen_start_info->pt_base;
-+	addr = page[pgd_index(__START_KERNEL_map)];
-+	addr_to_page(addr, page);
-+	addr = page[pud_index(__START_KERNEL_map)];
-+	addr_to_page(addr, page);
-+
-+	/* Construct mapping of initial pte page in our own directories. */
-+	init_level4_pgt[pgd_index(__START_KERNEL_map)] = 
-+		mk_kernel_pgd(__pa_symbol(level3_kernel_pgt));
-+	level3_kernel_pgt[pud_index(__START_KERNEL_map)] = 
-+		__pud(__pa_symbol(level2_kernel_pgt) |
-+		      _KERNPG_TABLE | _PAGE_USER);
-+	memcpy((void *)level2_kernel_pgt, page, PAGE_SIZE);
-+
-+	early_make_page_readonly(init_level4_pgt,
-+				 XENFEAT_writable_page_tables);
-+	early_make_page_readonly(init_level4_user_pgt,
-+				 XENFEAT_writable_page_tables);
-+	early_make_page_readonly(level3_kernel_pgt,
-+				 XENFEAT_writable_page_tables);
-+	early_make_page_readonly(level3_user_pgt,
-+				 XENFEAT_writable_page_tables);
-+	early_make_page_readonly(level2_kernel_pgt,
-+				 XENFEAT_writable_page_tables);
-+
-+	xen_pgd_pin(__pa_symbol(init_level4_pgt));
-+	xen_pgd_pin(__pa_symbol(init_level4_user_pgt));
-+	xen_pud_pin(__pa_symbol(level3_kernel_pgt));
-+	xen_pud_pin(__pa_symbol(level3_user_pgt));
-+	xen_pmd_pin(__pa_symbol(level2_kernel_pgt));
-+
-+	set_pgd((pgd_t *)(init_level4_user_pgt + 511), 
-+		mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
-+}
-+
-+void __init extend_init_mapping(void) 
-+{
-+	unsigned long va = __START_KERNEL_map;
-+	unsigned long phys, addr, *pte_page;
-+	pmd_t *pmd;
-+	pte_t *pte, new_pte;
-+	unsigned long *page = (unsigned long *)init_level4_pgt;
-+
-+	addr = page[pgd_index(va)];
-+	addr_to_page(addr, page);
-+	addr = page[pud_index(va)];
-+	addr_to_page(addr, page);
-+
-+	/* Kill mapping of low 1MB. */
-+	while (va < (unsigned long)&_text) {
-+		HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+		va += PAGE_SIZE;
-+	}
-+
-+	/* Ensure init mappings cover kernel text/data and initial tables. */
-+	while (va < (__START_KERNEL_map
-+		     + (start_pfn << PAGE_SHIFT)
-+		     + tables_space)) {
-+		pmd = (pmd_t *)&page[pmd_index(va)];
-+		if (pmd_none(*pmd)) {
-+			pte_page = alloc_static_page(&phys);
-+			early_make_page_readonly(
-+				pte_page, XENFEAT_writable_page_tables);
-+			xen_pte_pin(phys);
-+			set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER));
-+		} else {
-+			addr = page[pmd_index(va)];
-+			addr_to_page(addr, pte_page);
-+		}
-+		pte = (pte_t *)&pte_page[pte_index(va)];
-+		if (pte_none(*pte)) {
-+			new_pte = pfn_pte(
-+				(va - __START_KERNEL_map) >> PAGE_SHIFT, 
-+				__pgprot(_KERNPG_TABLE | _PAGE_USER));
-+			xen_l1_entry_update(pte, new_pte);
-+		}
-+		va += PAGE_SIZE;
-+	}
-+
-+	/* Finally, blow away any spurious initial mappings. */
-+	while (1) {
-+		pmd = (pmd_t *)&page[pmd_index(va)];
-+		if (pmd_none(*pmd))
-+			break;
-+		HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0);
-+		va += PAGE_SIZE;
-+	}
-+}
-+
-+/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
-+   This runs before bootmem is initialized and gets pages directly from the 
-+   physical memory. To access them they are temporarily mapped. */
-+void __init init_memory_mapping(unsigned long start, unsigned long end)
-+{ 
-+	unsigned long next; 
-+
-+	Dprintk("init_memory_mapping\n");
-+
-+	find_early_table_space(end);
-+	extend_init_mapping();
-+
-+	table_start = start_pfn;
-+
-+	start = (unsigned long)__va(start);
-+	end = (unsigned long)__va(end);
-+
-+	for (; start < end; start = next) {
-+		unsigned long pud_phys; 
-+		pud_t *pud = alloc_static_page(&pud_phys);
-+		early_make_page_readonly(pud, XENFEAT_writable_page_tables);
-+		xen_pud_pin(pud_phys);
-+		next = start + PGDIR_SIZE;
-+		if (next > end) 
-+			next = end; 
-+		phys_pud_init(pud, __pa(start), __pa(next));
-+		set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
-+	}
-+
-+	printk("kernel direct mapping tables upto %lx @ %lx-%lx\n",
-+	       __pa(end), table_start<<PAGE_SHIFT, start_pfn<<PAGE_SHIFT);
-+
-+	BUG_ON(start_pfn != (table_start + (tables_space >> PAGE_SHIFT)));
-+
-+	__flush_tlb_all();
-+}
-+
-+extern struct x8664_pda cpu_pda[NR_CPUS];
-+
-+void zap_low_mappings(void)
-+{
-+	/* this is not required for Xen */
-+#if 0
-+	swap_low_mappings();
-+#endif
-+}
-+
-+#ifndef CONFIG_DISCONTIGMEM
-+void __init paging_init(void)
-+{
-+	{
-+		unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
-+		/*	unsigned int max_dma; */
-+		/* max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; */
-+		/* if (end_pfn < max_dma) */
-+			zones_size[ZONE_DMA] = end_pfn;
-+#if 0
-+		else {
-+			zones_size[ZONE_DMA] = max_dma;
-+			zones_size[ZONE_NORMAL] = end_pfn - max_dma;
-+		}
-+#endif
-+		free_area_init(zones_size);
-+	}
-+
-+	set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
-+	HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
-+
-+	memset(empty_zero_page, 0, sizeof(empty_zero_page));
-+	init_mm.context.pinned = 1;
-+
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+	{
-+		int i;
-+		/* Setup mapping of lower 1st MB */
-+		for (i = 0; i < NR_FIX_ISAMAPS; i++)
-+			if (xen_start_info->flags & SIF_PRIVILEGED)
-+				set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
-+			else
-+				__set_fixmap(FIX_ISAMAP_BEGIN - i,
-+					     virt_to_mfn(empty_zero_page) << PAGE_SHIFT,
-+					     PAGE_KERNEL_RO);
-+	}
-+#endif
-+
-+}
-+#endif
-+
-+/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
-+   from the CPU leading to inconsistent cache lines. address and size
-+   must be aligned to 2MB boundaries. 
-+   Does nothing when the mapping doesn't exist. */
-+void __init clear_kernel_mapping(unsigned long address, unsigned long size) 
-+{
-+	unsigned long end = address + size;
-+
-+	BUG_ON(address & ~LARGE_PAGE_MASK);
-+	BUG_ON(size & ~LARGE_PAGE_MASK); 
-+	
-+	for (; address < end; address += LARGE_PAGE_SIZE) { 
-+		pgd_t *pgd = pgd_offset_k(address);
-+		pud_t *pud;
-+		pmd_t *pmd;
-+		if (pgd_none(*pgd))
-+			continue;
-+		pud = pud_offset(pgd, address);
-+		if (pud_none(*pud))
-+			continue; 
-+		pmd = pmd_offset(pud, address);
-+		if (!pmd || pmd_none(*pmd))
-+			continue; 
-+		if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { 
-+			/* Could handle this, but it should not happen currently. */
-+			printk(KERN_ERR 
-+	       "clear_kernel_mapping: mapping has been split. will leak memory\n"); 
-+			pmd_ERROR(*pmd); 
-+		}
-+		set_pmd(pmd, __pmd(0)); 		
-+	}
-+	__flush_tlb_all();
-+} 
-+
-+static inline int page_is_ram (unsigned long pagenr)
-+{
-+	return 1;
-+}
-+
-+static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
-+			 kcore_vsyscall;
-+
-+void __init mem_init(void)
-+{
-+	int codesize, reservedpages, datasize, initsize;
-+	int tmp;
-+
-+	contiguous_bitmap = alloc_bootmem_low_pages(
-+		(end_pfn + 2*BITS_PER_LONG) >> 3);
-+	BUG_ON(!contiguous_bitmap);
-+	memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
-+
-+#if defined(CONFIG_SWIOTLB)
-+	swiotlb_init();	
-+#endif
-+
-+	/* How many end-of-memory variables you have, grandma! */
-+	max_low_pfn = end_pfn;
-+	max_pfn = end_pfn;
-+	num_physpages = end_pfn;
-+	high_memory = (void *) __va(end_pfn * PAGE_SIZE);
-+
-+	/* clear the zero-page */
-+	memset(empty_zero_page, 0, PAGE_SIZE);
-+
-+	reservedpages = 0;
-+
-+	/* this will put all low memory onto the freelists */
-+#ifdef CONFIG_DISCONTIGMEM
-+	totalram_pages += numa_free_all_bootmem();
-+	tmp = 0;
-+	/* should count reserved pages here for all nodes */ 
-+#else
-+	max_mapnr = end_pfn;
-+	if (!mem_map) BUG();
-+
-+	totalram_pages += free_all_bootmem();
-+
-+	for (tmp = 0; tmp < end_pfn; tmp++)
-+		/*
-+		 * Only count reserved RAM pages
-+		 */
-+		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
-+			reservedpages++;
-+#endif
-+
-+	after_bootmem = 1;
-+
-+	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
-+	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
-+	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
-+
-+	/* Register memory areas for /proc/kcore */
-+	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
-+	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, 
-+		   VMALLOC_END-VMALLOC_START);
-+	kclist_add(&kcore_kernel, &_stext, _end - _stext);
-+	kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
-+	kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 
-+				 VSYSCALL_END - VSYSCALL_START);
-+
-+	printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
-+		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-+		end_pfn << (PAGE_SHIFT-10),
-+		codesize >> 10,
-+		reservedpages << (PAGE_SHIFT-10),
-+		datasize >> 10,
-+		initsize >> 10);
-+
-+	/*
-+	 * Subtle. SMP is doing its boot stuff late (because it has to
-+	 * fork idle threads) - but it also needs low mappings for the
-+	 * protected-mode entry to work. We zap these entries only after
-+	 * the WP-bit has been tested.
-+	 */
-+#ifndef CONFIG_SMP
-+	zap_low_mappings();
-+#endif
-+}
-+
-+extern char __initdata_begin[], __initdata_end[];
-+
-+void free_initmem(void)
-+{
-+#ifdef __DO_LATER__
-+	/*
-+	 * Some pages can be pinned, but some are not. Unpinning such pages 
-+	 * triggers BUG(). 
-+	 */
-+	unsigned long addr;
-+
-+	addr = (unsigned long)(&__init_begin);
-+	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
-+		ClearPageReserved(virt_to_page(addr));
-+		set_page_count(virt_to_page(addr), 1);
-+		memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE); 
-+		xen_pte_unpin(__pa(addr));
-+		make_page_writable(
-+			__va(__pa(addr)), XENFEAT_writable_page_tables);
-+		/*
-+		 * Make pages from __PAGE_OFFSET address as well
-+		 */
-+		make_page_writable(
-+			(void *)addr, XENFEAT_writable_page_tables);
-+		free_page(addr);
-+		totalram_pages++;
-+	}
-+	memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
-+	printk ("Freeing unused kernel memory: %luk freed\n", (&__init_end - &__init_begin) >> 10);
-+#endif
-+}
-+
-+#ifdef CONFIG_BLK_DEV_INITRD
-+void free_initrd_mem(unsigned long start, unsigned long end)
-+{
-+	if (start < (unsigned long)&_end)
-+		return;
-+	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
-+	for (; start < end; start += PAGE_SIZE) {
-+		ClearPageReserved(virt_to_page(start));
-+		set_page_count(virt_to_page(start), 1);
-+		free_page(start);
-+		totalram_pages++;
-+	}
-+}
-+#endif
-+
-+void __init reserve_bootmem_generic(unsigned long phys, unsigned len) 
-+{ 
-+	/* Should check here against the e820 map to avoid double free */ 
-+#ifdef CONFIG_DISCONTIGMEM
-+	int nid = phys_to_nid(phys);
-+  	reserve_bootmem_node(NODE_DATA(nid), phys, len);
-+#else       		
-+	reserve_bootmem(phys, len);    
-+#endif
-+}
-+
-+int kern_addr_valid(unsigned long addr) 
-+{ 
-+	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
-+       pgd_t *pgd;
-+       pud_t *pud;
-+       pmd_t *pmd;
-+       pte_t *pte;
-+
-+	if (above != 0 && above != -1UL)
-+		return 0; 
-+	
-+	pgd = pgd_offset_k(addr);
-+	if (pgd_none(*pgd))
-+		return 0;
-+
-+	pud = pud_offset_k(addr);
-+	if (pud_none(*pud))
-+		return 0; 
-+
-+	pmd = pmd_offset(pud, addr);
-+	if (pmd_none(*pmd))
-+		return 0;
-+	if (pmd_large(*pmd))
-+		return pfn_valid(pmd_pfn(*pmd));
-+
-+	pte = pte_offset_kernel(pmd, addr);
-+	if (pte_none(*pte))
-+		return 0;
-+	return pfn_valid(pte_pfn(*pte));
-+}
-+
-+#ifdef CONFIG_SYSCTL
-+#include <linux/sysctl.h>
-+
-+extern int exception_trace, page_fault_trace;
-+
-+static ctl_table debug_table2[] = {
-+	{ 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
-+	  proc_dointvec },
-+#ifdef CONFIG_CHECKING
-+	{ 100, "page-fault-trace", &page_fault_trace, sizeof(int), 0644, NULL,
-+	  proc_dointvec },
-+#endif
-+	{ 0, }
-+}; 
-+
-+static ctl_table debug_root_table2[] = { 
-+	{ .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555, 
-+	   .child = debug_table2 }, 
-+	{ 0 }, 
-+}; 
-+
-+static __init int x8664_sysctl_init(void)
-+{ 
-+	register_sysctl_table(debug_root_table2, 1);
-+	return 0;
-+}
-+__initcall(x8664_sysctl_init);
-+#endif
-+
-+/* A pseudo VMAs to allow ptrace access for the vsyscall page.   This only
-+   covers the 64bit vsyscall page now. 32bit has a real VMA now and does
-+   not need special handling anymore. */
-+
-+static struct vm_area_struct gate_vma = {
-+	.vm_start = VSYSCALL_START,
-+	.vm_end = VSYSCALL_END,
-+	.vm_page_prot = PAGE_READONLY
-+};
-+
-+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
-+{
-+#ifdef CONFIG_IA32_EMULATION
-+	if (test_tsk_thread_flag(tsk, TIF_IA32))
-+		return NULL;
-+#endif
-+	return &gate_vma;
-+}
-+
-+int in_gate_area(struct task_struct *task, unsigned long addr)
-+{
-+	struct vm_area_struct *vma = get_gate_vma(task);
-+	if (!vma)
-+		return 0;
-+	return (addr >= vma->vm_start) && (addr < vma->vm_end);
-+}
-+
-+/* Use this when you have no reliable task/vma, typically from interrupt
-+ * context.  It is less reliable than using the task's vma and may give
-+ * false positives.
-+ */
-+int in_gate_area_no_task(unsigned long addr)
-+{
-+	return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/mm/Makefile linux-2.6.12-xen/arch/xen/x86_64/mm/Makefile
---- pristine-linux-2.6.12/arch/xen/x86_64/mm/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/mm/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,31 @@
-+#
-+# Makefile for the linux x86_64-specific parts of the memory manager.
-+#
-+
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+
-+CFLAGS	+= -Iarch/$(XENARCH)/mm
-+
-+obj-y	:= init.o fault.o pageattr.o
-+c-obj-y	:= extable.o
-+
-+i386-obj-y := hypervisor.o ioremap.o
-+
-+#obj-y	 := init.o fault.o ioremap.o extable.o pageattr.o
-+#c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-+c-obj-$(CONFIG_DISCONTIGMEM) += numa.o
-+c-obj-$(CONFIG_K8_NUMA) += k8topology.o
-+
-+hugetlbpage-y = ../../../i386/mm/hugetlbpage.o
-+
-+c-link	:=
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-+	@ln -fsn $(srctree)/arch/x86_64/mm/$(notdir $@) $@
-+
-+$(patsubst %.o,$(obj)/%.c,$(i386-obj-y)):
-+	ln -fsn $(srctree)/arch/xen/i386/mm/$(notdir $@) $@
-+
-+obj-y	+= $(c-obj-y) $(i386-obj-y)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link) $(i386-obj-y))
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/mm/pageattr.c linux-2.6.12-xen/arch/xen/x86_64/mm/pageattr.c
---- pristine-linux-2.6.12/arch/xen/x86_64/mm/pageattr.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/mm/pageattr.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,382 @@
-+/* 
-+ * Copyright 2002 Andi Kleen, SuSE Labs. 
-+ * Thanks to Ben LaHaise for precious feedback.
-+ */ 
-+
-+#include <linux/config.h>
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/highmem.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <asm/uaccess.h>
-+#include <asm/processor.h>
-+#include <asm/tlbflush.h>
-+#include <asm/io.h>
-+
-+#ifdef CONFIG_XEN
-+#include <asm/pgalloc.h>
-+#include <asm/mmu_context.h>
-+
-+LIST_HEAD(mm_unpinned);
-+DEFINE_SPINLOCK(mm_unpinned_lock);
-+
-+static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
-+{
-+	struct page *page = virt_to_page(pt);
-+	unsigned long pfn = page_to_pfn(page);
-+
-+	BUG_ON(HYPERVISOR_update_va_mapping(
-+		       (unsigned long)__va(pfn << PAGE_SHIFT),
-+		       pfn_pte(pfn, flags), 0));
-+}
-+
-+static void mm_walk(struct mm_struct *mm, pgprot_t flags)
-+{
-+	pgd_t       *pgd;
-+	pud_t       *pud;
-+	pmd_t       *pmd;
-+	pte_t       *pte;
-+	int          g,u,m;
-+
-+	pgd = mm->pgd;
-+	for (g = 0; g <= USER_PTRS_PER_PGD; g++, pgd++) {
-+		if (pgd_none(*pgd))
-+			continue;
-+		pud = pud_offset(pgd, 0);
-+		if (PTRS_PER_PUD > 1) /* not folded */ 
-+			mm_walk_set_prot(pud,flags);
-+		for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
-+			if (pud_none(*pud))
-+				continue;
-+			pmd = pmd_offset(pud, 0);
-+			if (PTRS_PER_PMD > 1) /* not folded */ 
-+				mm_walk_set_prot(pmd,flags);
-+			for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
-+				if (pmd_none(*pmd))
-+					continue;
-+				pte = pte_offset_kernel(pmd,0);
-+				mm_walk_set_prot(pte,flags);
-+			}
-+		}
-+	}
-+}
-+
-+void mm_pin(struct mm_struct *mm)
-+{
-+	spin_lock(&mm->page_table_lock);
-+
-+	mm_walk(mm, PAGE_KERNEL_RO);
-+	BUG_ON(HYPERVISOR_update_va_mapping(
-+		       (unsigned long)mm->pgd,
-+		       pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
-+		       UVMF_TLB_FLUSH));
-+	BUG_ON(HYPERVISOR_update_va_mapping(
-+		       (unsigned long)__user_pgd(mm->pgd),
-+		       pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL_RO),
-+		       UVMF_TLB_FLUSH));
-+	xen_pgd_pin(__pa(mm->pgd)); /* kernel */
-+	xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
-+	mm->context.pinned = 1;
-+	spin_lock(&mm_unpinned_lock);
-+	list_del(&mm->context.unpinned);
-+	spin_unlock(&mm_unpinned_lock);
-+
-+	spin_unlock(&mm->page_table_lock);
-+}
-+
-+void mm_unpin(struct mm_struct *mm)
-+{
-+	spin_lock(&mm->page_table_lock);
-+
-+	xen_pgd_unpin(__pa(mm->pgd));
-+	xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
-+	BUG_ON(HYPERVISOR_update_va_mapping(
-+		       (unsigned long)mm->pgd,
-+		       pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
-+	BUG_ON(HYPERVISOR_update_va_mapping(
-+		       (unsigned long)__user_pgd(mm->pgd),
-+		       pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0));
-+	mm_walk(mm, PAGE_KERNEL);
-+	xen_tlb_flush();
-+	mm->context.pinned = 0;
-+	spin_lock(&mm_unpinned_lock);
-+	list_add(&mm->context.unpinned, &mm_unpinned);
-+	spin_unlock(&mm_unpinned_lock);
-+
-+	spin_unlock(&mm->page_table_lock);
-+}
-+
-+void mm_pin_all(void)
-+{
-+	while (!list_empty(&mm_unpinned))	
-+		mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
-+				  context.unpinned));
-+}
-+
-+void _arch_exit_mmap(struct mm_struct *mm)
-+{
-+    struct task_struct *tsk = current;
-+
-+    task_lock(tsk);
-+
-+    /*
-+     * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
-+     * *much* faster this way, as no tlb flushes means bigger wrpt batches.
-+     */
-+    if ( tsk->active_mm == mm )
-+    {
-+        tsk->active_mm = &init_mm;
-+        atomic_inc(&init_mm.mm_count);
-+
-+        switch_mm(mm, &init_mm, tsk);
-+
-+        atomic_dec(&mm->mm_count);
-+        BUG_ON(atomic_read(&mm->mm_count) == 0);
-+    }
-+
-+    task_unlock(tsk);
-+
-+    if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) )
-+        mm_unpin(mm);
-+}
-+
-+void pte_free(struct page *pte)
-+{
-+	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
-+
-+	if (!pte_write(*virt_to_ptep(va)))
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
-+	__free_page(pte);
-+}
-+#endif	/* CONFIG_XEN */
-+
-+static inline pte_t *lookup_address(unsigned long address) 
-+{ 
-+	pgd_t *pgd = pgd_offset_k(address);
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t *pte;
-+	if (pgd_none(*pgd))
-+		return NULL;
-+	pud = pud_offset(pgd, address);
-+	if (!pud_present(*pud))
-+		return NULL; 
-+	pmd = pmd_offset(pud, address);
-+	if (!pmd_present(*pmd))
-+		return NULL; 
-+	if (pmd_large(*pmd))
-+		return (pte_t *)pmd;
-+	pte = pte_offset_kernel(pmd, address);
-+	if (pte && !pte_present(*pte))
-+		pte = NULL; 
-+	return pte;
-+} 
-+
-+static struct page *split_large_page(unsigned long address, pgprot_t prot,
-+				     pgprot_t ref_prot)
-+{ 
-+	int i; 
-+	unsigned long addr;
-+	struct page *base = alloc_pages(GFP_KERNEL, 0);
-+	pte_t *pbase;
-+	if (!base) 
-+		return NULL;
-+	address = __pa(address);
-+	addr = address & LARGE_PAGE_MASK; 
-+	pbase = (pte_t *)page_address(base);
-+	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
-+		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
-+				   addr == address ? prot : ref_prot);
-+	}
-+	return base;
-+} 
-+
-+
-+static void flush_kernel_map(void *address) 
-+{
-+	if (0 && address && cpu_has_clflush) {
-+		/* is this worth it? */ 
-+		int i;
-+		for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 
-+			asm volatile("clflush (%0)" :: "r" (address + i)); 
-+	} else
-+		asm volatile("wbinvd":::"memory"); 
-+	if (address)
-+		__flush_tlb_one(address);
-+	else
-+		__flush_tlb_all();
-+}
-+
-+
-+static inline void flush_map(unsigned long address)
-+{	
-+	on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
-+}
-+
-+struct deferred_page { 
-+	struct deferred_page *next; 
-+	struct page *fpage;
-+	unsigned long address;
-+}; 
-+static struct deferred_page *df_list; /* protected by init_mm.mmap_sem */
-+
-+static inline void save_page(unsigned long address, struct page *fpage)
-+{
-+	struct deferred_page *df;
-+	df = kmalloc(sizeof(struct deferred_page), GFP_KERNEL); 
-+	if (!df) {
-+		flush_map(address);
-+		__free_page(fpage);
-+	} else { 
-+		df->next = df_list;
-+		df->fpage = fpage;
-+		df->address = address;
-+		df_list = df;
-+	} 			
-+}
-+
-+/* 
-+ * No more special protections in this 2/4MB area - revert to a
-+ * large page again. 
-+ */
-+static void revert_page(unsigned long address, pgprot_t ref_prot)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t large_pte;
-+
-+	pgd = pgd_offset_k(address);
-+	BUG_ON(pgd_none(*pgd));
-+	pud = pud_offset(pgd,address);
-+	BUG_ON(pud_none(*pud));
-+	pmd = pmd_offset(pud, address);
-+	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
-+	pgprot_val(ref_prot) |= _PAGE_PSE;
-+	large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
-+	set_pte((pte_t *)pmd, large_pte);
-+}      
-+
-+static int
-+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
-+				   pgprot_t ref_prot)
-+{ 
-+	pte_t *kpte; 
-+	struct page *kpte_page;
-+	unsigned kpte_flags;
-+	kpte = lookup_address(address);
-+	if (!kpte) return 0;
-+	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
-+	kpte_flags = pte_val(*kpte); 
-+	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
-+		if ((kpte_flags & _PAGE_PSE) == 0) { 
-+			set_pte(kpte, pfn_pte(pfn, prot));
-+		} else {
-+ 			/*
-+ 			 * split_large_page will take the reference for this change_page_attr
-+ 			 * on the split page.
-+ 			 */
-+			struct page *split = split_large_page(address, prot, ref_prot); 
-+			if (!split)
-+				return -ENOMEM;
-+			set_pte(kpte,mk_pte(split, ref_prot));
-+			kpte_page = split;
-+		}	
-+		get_page(kpte_page);
-+	} else if ((kpte_flags & _PAGE_PSE) == 0) { 
-+		set_pte(kpte, pfn_pte(pfn, ref_prot));
-+		__put_page(kpte_page);
-+	} else
-+		BUG();
-+
-+	/* on x86-64 the direct mapping set at boot is not using 4k pages */
-+	/*
-+	 * ..., but the XEN guest kernels (currently) do:
-+	 * If the pte was reserved, it means it was created at boot
-+	 * time (not via split_large_page) and in turn we must not
-+	 * replace it with a large page.
-+	 */
-+#ifndef CONFIG_XEN
-+ 	BUG_ON(PageReserved(kpte_page));
-+#else
-+	if (!PageReserved(kpte_page))
-+#endif
-+		switch (page_count(kpte_page)) {
-+		case 1:
-+			save_page(address, kpte_page); 		     
-+			revert_page(address, ref_prot);
-+			break;
-+		case 0:
-+			BUG(); /* memleak and failed 2M page regeneration */
-+	 	}
-+	return 0;
-+} 
-+
-+/*
-+ * Change the page attributes of an page in the linear mapping.
-+ *
-+ * This should be used when a page is mapped with a different caching policy
-+ * than write-back somewhere - some CPUs do not like it when mappings with
-+ * different caching policies exist. This changes the page attributes of the
-+ * in kernel linear mapping too.
-+ * 
-+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
-+ * This function only deals with the kernel linear map.
-+ * 
-+ * Caller must call global_flush_tlb() after this.
-+ */
-+int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
-+{
-+	int err = 0; 
-+	int i; 
-+
-+	down_write(&init_mm.mmap_sem);
-+	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
-+		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
-+
-+		err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
-+		if (err) 
-+			break; 
-+		/* Handle kernel mapping too which aliases part of the
-+		 * lowmem */
-+		if (__pa(address) < KERNEL_TEXT_SIZE) {
-+			unsigned long addr2;
-+			pgprot_t prot2 = prot;
-+			addr2 = __START_KERNEL_map + __pa(address);
-+ 			pgprot_val(prot2) &= ~_PAGE_NX;
-+			err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
-+		} 
-+	} 	
-+	up_write(&init_mm.mmap_sem); 
-+	return err;
-+}
-+
-+/* Don't call this for MMIO areas that may not have a mem_map entry */
-+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-+{
-+	unsigned long addr = (unsigned long)page_address(page);
-+	return change_page_attr_addr(addr, numpages, prot);
-+}
-+
-+void global_flush_tlb(void)
-+{ 
-+	struct deferred_page *df, *next_df;
-+
-+	down_read(&init_mm.mmap_sem);
-+	df = xchg(&df_list, NULL);
-+	up_read(&init_mm.mmap_sem);
-+	if (!df)
-+		return;
-+	flush_map((df && !df->next) ? df->address : 0);
-+	for (; df; df = next_df) { 
-+		next_df = df->next;
-+		if (df->fpage) 
-+			__free_page(df->fpage);
-+		kfree(df);
-+	} 
-+} 
-+
-+EXPORT_SYMBOL(change_page_attr);
-+EXPORT_SYMBOL(global_flush_tlb);
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/pci/Makefile linux-2.6.12-xen/arch/xen/x86_64/pci/Makefile
---- pristine-linux-2.6.12/arch/xen/x86_64/pci/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/pci/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,39 @@
-+#
-+# Makefile for X86_64 specific PCI routines
-+#
-+# Reuse the i386 PCI subsystem
-+#
-+XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
-+CFLAGS	+= -Iarch/$(XENARCH)/pci
-+
-+CFLAGS += -Iarch/i386/pci
-+
-+c-xen-obj-y		:= i386.o
-+c-i386-obj-y		+= fixup.o
-+c-i386-obj-$(CONFIG_ACPI_PCI)	+= acpi.o
-+c-i386-obj-y			+= legacy.o common.o
-+c-i386-obj-$(CONFIG_PCI_DIRECT)+= direct.o
-+c-xen-obj-y		+= irq.o
-+# mmconfig has a 64bit special
-+c-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
-+
-+c-obj-$(CONFIG_NUMA)	+= k8-bus.o
-+
-+c-link	:=
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-xen-obj-y)):
-+	@ln -fsn $(srctree)/arch/xen/i386/pci/$(notdir $@) $@
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)):
-+	@ln -fsn $(srctree)/arch/x86_64/pci/$(notdir $@) $@
-+
-+$(patsubst %.o,$(obj)/%.c,$(c-i386-obj-y)):
-+	@ln -fsn $(srctree)/arch/i386/pci/$(notdir $@) $@
-+
-+# Make sure irq.o gets linked in before common.o
-+obj-y	+= $(patsubst common.o,$(c-xen-obj-y) common.o,$(c-i386-obj-y))
-+obj-y	+= $(c-obj-y)
-+
-+clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
-+clean-files += $(patsubst %.o,%.c,$(c-i386-obj-y) $(c-i386-obj-))
-+clean-files += $(patsubst %.o,%.c,$(c-xen-obj-y) $(c-xen-obj-))
-diff -Nurp pristine-linux-2.6.12/arch/xen/x86_64/pci/Makefile-BUS linux-2.6.12-xen/arch/xen/x86_64/pci/Makefile-BUS
---- pristine-linux-2.6.12/arch/xen/x86_64/pci/Makefile-BUS	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/arch/xen/x86_64/pci/Makefile-BUS	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,22 @@
-+#
-+# Makefile for X86_64 specific PCI routines
-+#
-+# Reuse the i386 PCI subsystem
-+#
-+CFLAGS += -I arch/i386/pci
-+
-+obj-y		:= i386.o
-+obj-$(CONFIG_PCI_DIRECT)+= direct.o
-+obj-y		+= fixup.o
-+obj-$(CONFIG_ACPI_PCI)	+= acpi.o
-+obj-y			+= legacy.o irq.o common.o
-+# mmconfig has a 64bit special
-+obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
-+
-+direct-y += ../../i386/pci/direct.o
-+acpi-y   += ../../i386/pci/acpi.o
-+legacy-y += ../../i386/pci/legacy.o
-+irq-y    += ../../i386/pci/irq.o
-+common-y += ../../i386/pci/common.o
-+fixup-y  += ../../i386/pci/fixup.o
-+i386-y  += ../../i386/pci/i386.o
-diff -Nurp pristine-linux-2.6.12/.config linux-2.6.12-xen/.config
---- pristine-linux-2.6.12/.config	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/.config	2006-02-25 00:12:57.401432383 +0100
-@@ -0,0 +1,2966 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.12.6-xen
-+# Sat Feb 25 00:12:55 2006
-+#
-+CONFIG_XEN=y
-+CONFIG_ARCH_XEN=y
-+CONFIG_NO_IDLE_HZ=y
-+
-+#
-+# XEN
-+#
-+CONFIG_XEN_PRIVILEGED_GUEST=y
-+CONFIG_XEN_PHYSDEV_ACCESS=y
-+CONFIG_XEN_BLKDEV_BACKEND=y
-+# CONFIG_XEN_BLKDEV_TAP_BE is not set
-+CONFIG_XEN_NETDEV_BACKEND=y
-+# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
-+# CONFIG_XEN_TPMDEV_FRONTEND is not set
-+# CONFIG_XEN_TPMDEV_BACKEND is not set
-+CONFIG_XEN_BLKDEV_FRONTEND=y
-+CONFIG_XEN_NETDEV_FRONTEND=y
-+# CONFIG_XEN_BLKDEV_TAP is not set
-+# CONFIG_XEN_SHADOW_MODE is not set
-+CONFIG_XEN_SCRUB_PAGES=y
-+CONFIG_XEN_X86=y
-+# CONFIG_XEN_X86_64 is not set
-+CONFIG_HAVE_ARCH_ALLOC_SKB=y
-+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-+
-+#
-+# Code maturity level options
-+#
-+CONFIG_EXPERIMENTAL=y
-+# CONFIG_CLEAN_COMPILE is not set
-+CONFIG_BROKEN=y
-+CONFIG_BROKEN_ON_SMP=y
-+CONFIG_LOCK_KERNEL=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+
-+#
-+# General setup
-+#
-+CONFIG_LOCALVERSION=""
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+CONFIG_POSIX_MQUEUE=y
-+CONFIG_BSD_PROCESS_ACCT=y
-+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-+CONFIG_SYSCTL=y
-+# CONFIG_AUDIT is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_KOBJECT_UEVENT=y
-+CONFIG_IKCONFIG=y
-+CONFIG_IKCONFIG_PROC=y
-+# CONFIG_CPUSETS is not set
-+CONFIG_EMBEDDED=y
-+CONFIG_KALLSYMS=y
-+# CONFIG_KALLSYMS_ALL is not set
-+# CONFIG_KALLSYMS_EXTRA_PASS is not set
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_EPOLL=y
-+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-+CONFIG_SHMEM=y
-+CONFIG_CC_ALIGN_FUNCTIONS=0
-+CONFIG_CC_ALIGN_LABELS=0
-+CONFIG_CC_ALIGN_LOOPS=0
-+CONFIG_CC_ALIGN_JUMPS=0
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+
-+#
-+# Loadable module support
-+#
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+CONFIG_MODULE_FORCE_UNLOAD=y
-+CONFIG_OBSOLETE_MODPARM=y
-+CONFIG_MODVERSIONS=y
-+# CONFIG_MODULE_SRCVERSION_ALL is not set
-+CONFIG_KMOD=y
-+CONFIG_STOP_MACHINE=y
-+
-+#
-+# X86 Processor Configuration
-+#
-+CONFIG_XENARCH="i386"
-+CONFIG_X86=y
-+CONFIG_MMU=y
-+CONFIG_UID16=y
-+CONFIG_GENERIC_ISA_DMA=y
-+CONFIG_GENERIC_IOMAP=y
-+# CONFIG_M386 is not set
-+# CONFIG_M486 is not set
-+# CONFIG_M586 is not set
-+# CONFIG_M586TSC is not set
-+# CONFIG_M586MMX is not set
-+CONFIG_M686=y
-+# CONFIG_MPENTIUMII is not set
-+# CONFIG_MPENTIUMIII is not set
-+# CONFIG_MPENTIUMM is not set
-+# CONFIG_MPENTIUM4 is not set
-+# CONFIG_MK6 is not set
-+# CONFIG_MK7 is not set
-+# CONFIG_MK8 is not set
-+# CONFIG_MCRUSOE is not set
-+# CONFIG_MEFFICEON is not set
-+# CONFIG_MWINCHIPC6 is not set
-+# CONFIG_MWINCHIP2 is not set
-+# CONFIG_MWINCHIP3D is not set
-+# CONFIG_MGEODEGX1 is not set
-+# CONFIG_MCYRIXIII is not set
-+# CONFIG_MVIAC3_2 is not set
-+# CONFIG_X86_GENERIC is not set
-+CONFIG_X86_CMPXCHG=y
-+CONFIG_X86_XADD=y
-+CONFIG_X86_L1_CACHE_SHIFT=5
-+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_X86_PPRO_FENCE=y
-+CONFIG_X86_WP_WORKS_OK=y
-+CONFIG_X86_INVLPG=y
-+CONFIG_X86_BSWAP=y
-+CONFIG_X86_POPAD_OK=y
-+CONFIG_X86_GOOD_APIC=y
-+CONFIG_X86_USE_PPRO_CHECKSUM=y
-+# CONFIG_HPET_TIMER is not set
-+# CONFIG_HPET_EMULATE_RTC is not set
-+CONFIG_SMP=y
-+CONFIG_SMP_ALTERNATIVES=y
-+CONFIG_NR_CPUS=8
-+# CONFIG_SCHED_SMT is not set
-+# CONFIG_X86_REBOOTFIXUPS is not set
-+CONFIG_MICROCODE=y
-+CONFIG_X86_CPUID=m
-+CONFIG_SWIOTLB=y
-+
-+#
-+# Firmware Drivers
-+#
-+CONFIG_EDD=m
-+# CONFIG_NOHIGHMEM is not set
-+CONFIG_HIGHMEM4G=y
-+# CONFIG_HIGHMEM64G is not set
-+CONFIG_HIGHMEM=y
-+CONFIG_MTRR=y
-+CONFIG_HAVE_DEC_LOCK=y
-+# CONFIG_REGPARM is not set
-+CONFIG_X86_LOCAL_APIC=y
-+CONFIG_X86_IO_APIC=y
-+CONFIG_HOTPLUG_CPU=y
-+
-+#
-+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
-+#
-+CONFIG_PCI=y
-+# CONFIG_PCI_GOMMCONFIG is not set
-+# CONFIG_PCI_GODIRECT is not set
-+CONFIG_PCI_GOANY=y
-+CONFIG_PCI_DIRECT=y
-+CONFIG_PCI_MMCONFIG=y
-+# CONFIG_PCIEPORTBUS is not set
-+# CONFIG_PCI_MSI is not set
-+# CONFIG_PCI_LEGACY_PROC is not set
-+CONFIG_PCI_NAMES=y
-+# CONFIG_PCI_DEBUG is not set
-+CONFIG_ISA_DMA_API=y
-+CONFIG_ISA=y
-+# CONFIG_EISA is not set
-+# CONFIG_MCA is not set
-+CONFIG_SCx200=m
-+
-+#
-+# PCCARD (PCMCIA/CardBus) support
-+#
-+CONFIG_PCCARD=m
-+# CONFIG_PCMCIA_DEBUG is not set
-+CONFIG_PCMCIA=m
-+CONFIG_CARDBUS=y
-+
-+#
-+# PC-card bridges
-+#
-+CONFIG_YENTA=m
-+CONFIG_PD6729=m
-+CONFIG_I82092=m
-+CONFIG_I82365=m
-+CONFIG_TCIC=m
-+CONFIG_PCMCIA_PROBE=y
-+CONFIG_PCCARD_NONSTATIC=m
-+
-+#
-+# PCI Hotplug Support
-+#
-+CONFIG_HOTPLUG_PCI=m
-+CONFIG_HOTPLUG_PCI_FAKE=m
-+# CONFIG_HOTPLUG_PCI_ACPI is not set
-+CONFIG_HOTPLUG_PCI_CPCI=y
-+CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
-+CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
-+CONFIG_HOTPLUG_PCI_SHPC=m
-+# CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE is not set
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+CONFIG_X86_SMP=y
-+CONFIG_X86_BIOS_REBOOT=y
-+CONFIG_X86_TRAMPOLINE=y
-+CONFIG_SECCOMP=y
-+# CONFIG_EARLY_PRINTK is not set
-+
-+#
-+# Executable file formats
-+#
-+CONFIG_BINFMT_ELF=y
-+CONFIG_BINFMT_AOUT=m
-+CONFIG_BINFMT_MISC=m
-+
-+#
-+# Device Drivers
-+#
-+
-+#
-+# Generic Driver Options
-+#
-+CONFIG_STANDALONE=y
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+CONFIG_FW_LOADER=m
-+# CONFIG_DEBUG_DRIVER is not set
-+
-+#
-+# Memory Technology Devices (MTD)
-+#
-+CONFIG_MTD=m
-+# CONFIG_MTD_DEBUG is not set
-+CONFIG_MTD_CONCAT=m
-+CONFIG_MTD_PARTITIONS=y
-+CONFIG_MTD_REDBOOT_PARTS=m
-+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-+# CONFIG_MTD_CMDLINE_PARTS is not set
-+
-+#
-+# User Modules And Translation Layers
-+#
-+CONFIG_MTD_CHAR=m
-+CONFIG_MTD_BLOCK=m
-+CONFIG_MTD_BLOCK_RO=m
-+CONFIG_FTL=m
-+CONFIG_NFTL=m
-+CONFIG_NFTL_RW=y
-+CONFIG_INFTL=m
-+
-+#
-+# RAM/ROM/Flash chip drivers
-+#
-+CONFIG_MTD_CFI=m
-+CONFIG_MTD_JEDECPROBE=m
-+CONFIG_MTD_GEN_PROBE=m
-+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-+CONFIG_MTD_MAP_BANK_WIDTH_1=y
-+CONFIG_MTD_MAP_BANK_WIDTH_2=y
-+CONFIG_MTD_MAP_BANK_WIDTH_4=y
-+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-+CONFIG_MTD_CFI_I1=y
-+CONFIG_MTD_CFI_I2=y
-+# CONFIG_MTD_CFI_I4 is not set
-+# CONFIG_MTD_CFI_I8 is not set
-+CONFIG_MTD_CFI_INTELEXT=m
-+CONFIG_MTD_CFI_AMDSTD=m
-+CONFIG_MTD_CFI_AMDSTD_RETRY=0
-+CONFIG_MTD_CFI_STAA=m
-+CONFIG_MTD_CFI_UTIL=m
-+CONFIG_MTD_RAM=m
-+CONFIG_MTD_ROM=m
-+CONFIG_MTD_ABSENT=m
-+# CONFIG_MTD_OBSOLETE_CHIPS is not set
-+
-+#
-+# Mapping drivers for chip access
-+#
-+CONFIG_MTD_COMPLEX_MAPPINGS=y
-+CONFIG_MTD_PHYSMAP=m
-+CONFIG_MTD_PHYSMAP_START=0x8000000
-+CONFIG_MTD_PHYSMAP_LEN=0x4000000
-+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
-+CONFIG_MTD_PNC2000=m
-+CONFIG_MTD_SC520CDP=m
-+CONFIG_MTD_NETSC520=m
-+CONFIG_MTD_TS5500=m
-+CONFIG_MTD_SBC_GXX=m
-+CONFIG_MTD_ELAN_104NC=m
-+CONFIG_MTD_SCx200_DOCFLASH=m
-+# CONFIG_MTD_AMD76XROM is not set
-+# CONFIG_MTD_ICHXROM is not set
-+# CONFIG_MTD_SCB2_FLASH is not set
-+CONFIG_MTD_NETtel=m
-+CONFIG_MTD_DILNETPC=m
-+CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
-+# CONFIG_MTD_L440GX is not set
-+CONFIG_MTD_PCI=m
-+CONFIG_MTD_PCMCIA=m
-+
-+#
-+# Self-contained MTD device drivers
-+#
-+CONFIG_MTD_PMC551=m
-+# CONFIG_MTD_PMC551_BUGFIX is not set
-+# CONFIG_MTD_PMC551_DEBUG is not set
-+CONFIG_MTD_SLRAM=m
-+CONFIG_MTD_PHRAM=m
-+CONFIG_MTD_MTDRAM=m
-+CONFIG_MTDRAM_TOTAL_SIZE=4096
-+CONFIG_MTDRAM_ERASE_SIZE=128
-+CONFIG_MTD_BLKMTD=m
-+# CONFIG_MTD_BLOCK2MTD is not set
-+
-+#
-+# Disk-On-Chip Device Drivers
-+#
-+CONFIG_MTD_DOC2000=m
-+CONFIG_MTD_DOC2001=m
-+CONFIG_MTD_DOC2001PLUS=m
-+CONFIG_MTD_DOCPROBE=m
-+CONFIG_MTD_DOCECC=m
-+# CONFIG_MTD_DOCPROBE_ADVANCED is not set
-+CONFIG_MTD_DOCPROBE_ADDRESS=0
-+
-+#
-+# NAND Flash Device Drivers
-+#
-+CONFIG_MTD_NAND=m
-+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-+CONFIG_MTD_NAND_IDS=m
-+CONFIG_MTD_NAND_DISKONCHIP=m
-+# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
-+CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
-+# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
-+# CONFIG_MTD_NAND_NANDSIM is not set
-+
-+#
-+# Parallel port support
-+#
-+CONFIG_PARPORT=m
-+CONFIG_PARPORT_PC=m
-+CONFIG_PARPORT_SERIAL=m
-+CONFIG_PARPORT_PC_FIFO=y
-+# CONFIG_PARPORT_PC_SUPERIO is not set
-+CONFIG_PARPORT_PC_PCMCIA=m
-+CONFIG_PARPORT_NOT_PC=y
-+# CONFIG_PARPORT_GSC is not set
-+CONFIG_PARPORT_1284=y
-+
-+#
-+# Plug and Play support
-+#
-+CONFIG_PNP=y
-+# CONFIG_PNP_DEBUG is not set
-+
-+#
-+# Protocols
-+#
-+CONFIG_ISAPNP=y
-+# CONFIG_PNPBIOS is not set
-+# CONFIG_PNPACPI is not set
-+
-+#
-+# Block devices
-+#
-+CONFIG_BLK_DEV_FD=m
-+CONFIG_BLK_DEV_XD=m
-+CONFIG_PARIDE=m
-+CONFIG_PARIDE_PARPORT=m
-+
-+#
-+# Parallel IDE high-level drivers
-+#
-+CONFIG_PARIDE_PD=m
-+CONFIG_PARIDE_PCD=m
-+CONFIG_PARIDE_PF=m
-+CONFIG_PARIDE_PT=m
-+CONFIG_PARIDE_PG=m
-+
-+#
-+# Parallel IDE protocol modules
-+#
-+CONFIG_PARIDE_ATEN=m
-+CONFIG_PARIDE_BPCK=m
-+CONFIG_PARIDE_BPCK6=m
-+CONFIG_PARIDE_COMM=m
-+CONFIG_PARIDE_DSTR=m
-+CONFIG_PARIDE_FIT2=m
-+CONFIG_PARIDE_FIT3=m
-+CONFIG_PARIDE_EPAT=m
-+# CONFIG_PARIDE_EPATC8 is not set
-+CONFIG_PARIDE_EPIA=m
-+CONFIG_PARIDE_FRIQ=m
-+CONFIG_PARIDE_FRPW=m
-+CONFIG_PARIDE_KBIC=m
-+CONFIG_PARIDE_KTTI=m
-+CONFIG_PARIDE_ON20=m
-+CONFIG_PARIDE_ON26=m
-+CONFIG_BLK_CPQ_DA=m
-+CONFIG_BLK_CPQ_CISS_DA=m
-+CONFIG_CISS_SCSI_TAPE=y
-+CONFIG_BLK_DEV_DAC960=m
-+CONFIG_BLK_DEV_UMEM=m
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=m
-+CONFIG_BLK_DEV_CRYPTOLOOP=m
-+CONFIG_BLK_DEV_NBD=m
-+CONFIG_BLK_DEV_SX8=m
-+# CONFIG_BLK_DEV_UB is not set
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=16384
-+CONFIG_BLK_DEV_INITRD=y
-+CONFIG_INITRAMFS_SOURCE=""
-+CONFIG_LBD=y
-+CONFIG_CDROM_PKTCDVD=m
-+CONFIG_CDROM_PKTCDVD_BUFFERS=8
-+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
-+
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+CONFIG_ATA_OVER_ETH=m
-+
-+#
-+# ATA/ATAPI/MFM/RLL support
-+#
-+CONFIG_IDE=y
-+CONFIG_BLK_DEV_IDE=y
-+
-+#
-+# Please see Documentation/ide.txt for help/info on IDE drives
-+#
-+# CONFIG_BLK_DEV_IDE_SATA is not set
-+# CONFIG_BLK_DEV_HD_IDE is not set
-+CONFIG_BLK_DEV_IDEDISK=y
-+CONFIG_IDEDISK_MULTI_MODE=y
-+CONFIG_BLK_DEV_IDECS=m
-+CONFIG_BLK_DEV_IDECD=y
-+CONFIG_BLK_DEV_IDETAPE=m
-+CONFIG_BLK_DEV_IDEFLOPPY=y
-+CONFIG_BLK_DEV_IDESCSI=m
-+# CONFIG_IDE_TASK_IOCTL is not set
-+
-+#
-+# IDE chipset support/bugfixes
-+#
-+CONFIG_IDE_GENERIC=y
-+CONFIG_BLK_DEV_CMD640=y
-+CONFIG_BLK_DEV_CMD640_ENHANCED=y
-+CONFIG_BLK_DEV_IDEPNP=y
-+CONFIG_BLK_DEV_IDEPCI=y
-+CONFIG_IDEPCI_SHARE_IRQ=y
-+# CONFIG_BLK_DEV_OFFBOARD is not set
-+CONFIG_BLK_DEV_GENERIC=y
-+CONFIG_BLK_DEV_OPTI621=m
-+CONFIG_BLK_DEV_RZ1000=y
-+CONFIG_BLK_DEV_IDEDMA_PCI=y
-+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-+CONFIG_IDEDMA_PCI_AUTO=y
-+# CONFIG_IDEDMA_ONLYDISK is not set
-+CONFIG_BLK_DEV_AEC62XX=y
-+CONFIG_BLK_DEV_ALI15X3=y
-+# CONFIG_WDC_ALI15X3 is not set
-+CONFIG_BLK_DEV_AMD74XX=y
-+CONFIG_BLK_DEV_ATIIXP=y
-+CONFIG_BLK_DEV_CMD64X=y
-+CONFIG_BLK_DEV_TRIFLEX=y
-+CONFIG_BLK_DEV_CY82C693=y
-+CONFIG_BLK_DEV_CS5520=y
-+CONFIG_BLK_DEV_CS5530=y
-+CONFIG_BLK_DEV_HPT34X=y
-+# CONFIG_HPT34X_AUTODMA is not set
-+CONFIG_BLK_DEV_HPT366=y
-+CONFIG_BLK_DEV_SC1200=m
-+CONFIG_BLK_DEV_PIIX=y
-+CONFIG_BLK_DEV_NS87415=m
-+CONFIG_BLK_DEV_PDC202XX_OLD=y
-+CONFIG_PDC202XX_BURST=y
-+CONFIG_BLK_DEV_PDC202XX_NEW=y
-+CONFIG_PDC202XX_FORCE=y
-+CONFIG_BLK_DEV_SVWKS=y
-+CONFIG_BLK_DEV_SIIMAGE=y
-+CONFIG_BLK_DEV_SIS5513=y
-+CONFIG_BLK_DEV_SLC90E66=y
-+CONFIG_BLK_DEV_TRM290=m
-+CONFIG_BLK_DEV_VIA82CXXX=y
-+# CONFIG_IDE_ARM is not set
-+# CONFIG_IDE_CHIPSETS is not set
-+CONFIG_BLK_DEV_IDEDMA=y
-+# CONFIG_IDEDMA_IVB is not set
-+CONFIG_IDEDMA_AUTO=y
-+# CONFIG_BLK_DEV_HD is not set
-+
-+#
-+# SCSI device support
-+#
-+CONFIG_SCSI=m
-+CONFIG_SCSI_PROC_FS=y
-+
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=m
-+CONFIG_CHR_DEV_ST=m
-+CONFIG_CHR_DEV_OSST=m
-+CONFIG_BLK_DEV_SR=m
-+# CONFIG_BLK_DEV_SR_VENDOR is not set
-+CONFIG_CHR_DEV_SG=m
-+
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+CONFIG_SCSI_MULTI_LUN=y
-+CONFIG_SCSI_CONSTANTS=y
-+CONFIG_SCSI_LOGGING=y
-+
-+#
-+# SCSI Transport Attributes
-+#
-+CONFIG_SCSI_SPI_ATTRS=m
-+CONFIG_SCSI_FC_ATTRS=m
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+
-+#
-+# SCSI low-level drivers
-+#
-+CONFIG_BLK_DEV_3W_XXXX_RAID=m
-+CONFIG_SCSI_3W_9XXX=m
-+# CONFIG_SCSI_7000FASST is not set
-+CONFIG_SCSI_ACARD=m
-+CONFIG_SCSI_AHA152X=m
-+# CONFIG_SCSI_AHA1542 is not set
-+CONFIG_SCSI_AACRAID=m
-+CONFIG_SCSI_AIC7XXX=m
-+CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
-+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-+CONFIG_AIC7XXX_DEBUG_ENABLE=y
-+CONFIG_AIC7XXX_DEBUG_MASK=0
-+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
-+CONFIG_SCSI_AIC7XXX_OLD=m
-+CONFIG_SCSI_AIC79XX=m
-+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-+CONFIG_AIC79XX_RESET_DELAY_MS=15000
-+CONFIG_AIC79XX_ENABLE_RD_STRM=y
-+CONFIG_AIC79XX_DEBUG_ENABLE=y
-+CONFIG_AIC79XX_DEBUG_MASK=0
-+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
-+CONFIG_SCSI_DPT_I2O=m
-+CONFIG_SCSI_ADVANSYS=m
-+CONFIG_SCSI_IN2000=m
-+CONFIG_MEGARAID_NEWGEN=y
-+CONFIG_MEGARAID_MM=m
-+CONFIG_MEGARAID_MAILBOX=m
-+CONFIG_SCSI_SATA=y
-+CONFIG_SCSI_SATA_AHCI=m
-+CONFIG_SCSI_SATA_SVW=m
-+CONFIG_SCSI_ATA_PIIX=m
-+CONFIG_SCSI_SATA_NV=m
-+CONFIG_SCSI_SATA_PROMISE=m
-+# CONFIG_SCSI_SATA_QSTOR is not set
-+CONFIG_SCSI_SATA_SX4=m
-+CONFIG_SCSI_SATA_SIL=m
-+CONFIG_SCSI_SATA_SIS=m
-+CONFIG_SCSI_SATA_ULI=m
-+CONFIG_SCSI_SATA_VIA=m
-+CONFIG_SCSI_SATA_VITESSE=m
-+CONFIG_SCSI_BUSLOGIC=m
-+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-+# CONFIG_SCSI_CPQFCTS is not set
-+CONFIG_SCSI_DMX3191D=m
-+CONFIG_SCSI_DTC3280=m
-+CONFIG_SCSI_EATA=m
-+CONFIG_SCSI_EATA_TAGGED_QUEUE=y
-+CONFIG_SCSI_EATA_LINKED_COMMANDS=y
-+CONFIG_SCSI_EATA_MAX_TAGS=16
-+CONFIG_SCSI_EATA_PIO=m
-+CONFIG_SCSI_FUTURE_DOMAIN=m
-+CONFIG_SCSI_GDTH=m
-+CONFIG_SCSI_GENERIC_NCR5380=m
-+CONFIG_SCSI_GENERIC_NCR5380_MMIO=m
-+CONFIG_SCSI_GENERIC_NCR53C400=y
-+CONFIG_SCSI_IPS=m
-+# CONFIG_SCSI_INITIO is not set
-+# CONFIG_SCSI_INIA100 is not set
-+CONFIG_SCSI_PPA=m
-+CONFIG_SCSI_IMM=m
-+# CONFIG_SCSI_IZIP_EPP16 is not set
-+# CONFIG_SCSI_IZIP_SLOW_CTR is not set
-+CONFIG_SCSI_NCR53C406A=m
-+CONFIG_SCSI_SYM53C8XX_2=m
-+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-+CONFIG_SCSI_IPR=m
-+# CONFIG_SCSI_IPR_TRACE is not set
-+# CONFIG_SCSI_IPR_DUMP is not set
-+CONFIG_SCSI_PAS16=m
-+# CONFIG_SCSI_PCI2000 is not set
-+# CONFIG_SCSI_PCI2220I is not set
-+CONFIG_SCSI_PSI240I=m
-+CONFIG_SCSI_QLOGIC_FAS=m
-+CONFIG_SCSI_QLOGIC_ISP=m
-+CONFIG_SCSI_QLOGIC_FC=m
-+CONFIG_SCSI_QLOGIC_FC_FIRMWARE=y
-+CONFIG_SCSI_QLOGIC_1280=m
-+CONFIG_SCSI_QLOGIC_1280_1040=y
-+CONFIG_SCSI_QLA2XXX=m
-+CONFIG_SCSI_QLA21XX=m
-+CONFIG_SCSI_QLA22XX=m
-+CONFIG_SCSI_QLA2300=m
-+CONFIG_SCSI_QLA2322=m
-+CONFIG_SCSI_QLA6312=m
-+CONFIG_SCSI_LPFC=m
-+# CONFIG_SCSI_SEAGATE is not set
-+CONFIG_SCSI_SYM53C416=m
-+CONFIG_SCSI_DC395x=m
-+CONFIG_SCSI_DC390T=m
-+CONFIG_SCSI_T128=m
-+CONFIG_SCSI_U14_34F=m
-+CONFIG_SCSI_U14_34F_TAGGED_QUEUE=y
-+CONFIG_SCSI_U14_34F_LINKED_COMMANDS=y
-+CONFIG_SCSI_U14_34F_MAX_TAGS=8
-+# CONFIG_SCSI_ULTRASTOR is not set
-+CONFIG_SCSI_NSP32=m
-+CONFIG_SCSI_DEBUG=m
-+
-+#
-+# PCMCIA SCSI adapter support
-+#
-+CONFIG_PCMCIA_AHA152X=m
-+CONFIG_PCMCIA_FDOMAIN=m
-+CONFIG_PCMCIA_NINJA_SCSI=m
-+CONFIG_PCMCIA_QLOGIC=m
-+CONFIG_PCMCIA_SYM53C500=m
-+
-+#
-+# Old CD-ROM drivers (not SCSI, not IDE)
-+#
-+CONFIG_CD_NO_IDESCSI=y
-+CONFIG_AZTCD=m
-+CONFIG_GSCD=m
-+# CONFIG_SBPCD is not set
-+CONFIG_MCDX=m
-+CONFIG_OPTCD=m
-+# CONFIG_CM206 is not set
-+CONFIG_SJCD=m
-+CONFIG_ISP16_CDI=m
-+CONFIG_CDU31A=m
-+CONFIG_CDU535=m
-+
-+#
-+# Multi-device support (RAID and LVM)
-+#
-+CONFIG_MD=y
-+CONFIG_BLK_DEV_MD=m
-+CONFIG_MD_LINEAR=m
-+CONFIG_MD_RAID0=m
-+CONFIG_MD_RAID1=m
-+CONFIG_MD_RAID10=m
-+CONFIG_MD_RAID5=m
-+CONFIG_MD_RAID6=m
-+CONFIG_MD_MULTIPATH=m
-+CONFIG_MD_FAULTY=m
-+CONFIG_BLK_DEV_DM=m
-+CONFIG_DM_CRYPT=m
-+CONFIG_DM_SNAPSHOT=m
-+CONFIG_DM_MIRROR=m
-+CONFIG_DM_ZERO=m
-+CONFIG_DM_MULTIPATH=m
-+CONFIG_DM_MULTIPATH_EMC=m
-+
-+#
-+# Fusion MPT device support
-+#
-+CONFIG_FUSION=m
-+CONFIG_FUSION_MAX_SGE=40
-+CONFIG_FUSION_CTL=m
-+CONFIG_FUSION_LAN=m
-+
-+#
-+# IEEE 1394 (FireWire) support
-+#
-+CONFIG_IEEE1394=m
-+
-+#
-+# Subsystem Options
-+#
-+# CONFIG_IEEE1394_VERBOSEDEBUG is not set
-+# CONFIG_IEEE1394_OUI_DB is not set
-+CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
-+CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
-+
-+#
-+# Device Drivers
-+#
-+CONFIG_IEEE1394_PCILYNX=m
-+CONFIG_IEEE1394_OHCI1394=m
-+
-+#
-+# Protocol Drivers
-+#
-+CONFIG_IEEE1394_VIDEO1394=m
-+CONFIG_IEEE1394_SBP2=m
-+# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-+CONFIG_IEEE1394_ETH1394=m
-+CONFIG_IEEE1394_DV1394=m
-+CONFIG_IEEE1394_RAWIO=m
-+CONFIG_IEEE1394_CMP=m
-+CONFIG_IEEE1394_AMDTP=m
-+
-+#
-+# I2O device support
-+#
-+CONFIG_I2O=m
-+CONFIG_I2O_CONFIG=m
-+CONFIG_I2O_BLOCK=m
-+CONFIG_I2O_SCSI=m
-+CONFIG_I2O_PROC=m
-+
-+#
-+# Networking support
-+#
-+CONFIG_NET=y
-+
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=m
-+CONFIG_PACKET_MMAP=y
-+CONFIG_UNIX=m
-+CONFIG_NET_KEY=m
-+CONFIG_INET=y
-+CONFIG_IP_MULTICAST=y
-+CONFIG_IP_ADVANCED_ROUTER=y
-+CONFIG_IP_MULTIPLE_TABLES=y
-+CONFIG_IP_ROUTE_FWMARK=y
-+CONFIG_IP_ROUTE_MULTIPATH=y
-+# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
-+CONFIG_IP_ROUTE_VERBOSE=y
-+# CONFIG_IP_PNP is not set
-+CONFIG_NET_IPIP=m
-+CONFIG_NET_IPGRE=m
-+CONFIG_NET_IPGRE_BROADCAST=y
-+CONFIG_IP_MROUTE=y
-+CONFIG_IP_PIMSM_V1=y
-+CONFIG_IP_PIMSM_V2=y
-+# CONFIG_ARPD is not set
-+CONFIG_SYN_COOKIES=y
-+CONFIG_INET_AH=m
-+CONFIG_INET_ESP=m
-+CONFIG_INET_IPCOMP=m
-+CONFIG_INET_TUNNEL=m
-+CONFIG_IP_TCPDIAG=m
-+CONFIG_IP_TCPDIAG_IPV6=y
-+
-+#
-+# IP: Virtual Server Configuration
-+#
-+CONFIG_IP_VS=m
-+# CONFIG_IP_VS_DEBUG is not set
-+CONFIG_IP_VS_TAB_BITS=12
-+
-+#
-+# IPVS transport protocol load balancing support
-+#
-+CONFIG_IP_VS_PROTO_TCP=y
-+CONFIG_IP_VS_PROTO_UDP=y
-+CONFIG_IP_VS_PROTO_ESP=y
-+CONFIG_IP_VS_PROTO_AH=y
-+
-+#
-+# IPVS scheduler
-+#
-+CONFIG_IP_VS_RR=m
-+CONFIG_IP_VS_WRR=m
-+CONFIG_IP_VS_LC=m
-+CONFIG_IP_VS_WLC=m
-+CONFIG_IP_VS_LBLC=m
-+CONFIG_IP_VS_LBLCR=m
-+CONFIG_IP_VS_DH=m
-+CONFIG_IP_VS_SH=m
-+CONFIG_IP_VS_SED=m
-+CONFIG_IP_VS_NQ=m
-+
-+#
-+# IPVS application helper
-+#
-+CONFIG_IP_VS_FTP=m
-+CONFIG_IPV6=m
-+CONFIG_IPV6_PRIVACY=y
-+CONFIG_INET6_AH=m
-+CONFIG_INET6_ESP=m
-+CONFIG_INET6_IPCOMP=m
-+CONFIG_INET6_TUNNEL=m
-+CONFIG_IPV6_TUNNEL=m
-+CONFIG_NETFILTER=y
-+# CONFIG_NETFILTER_DEBUG is not set
-+CONFIG_BRIDGE_NETFILTER=y
-+
-+#
-+# IP: Netfilter Configuration
-+#
-+CONFIG_IP_NF_CONNTRACK=m
-+CONFIG_IP_NF_CT_ACCT=y
-+CONFIG_IP_NF_CONNTRACK_MARK=y
-+CONFIG_IP_NF_CT_PROTO_SCTP=m
-+CONFIG_IP_NF_FTP=m
-+CONFIG_IP_NF_IRC=m
-+CONFIG_IP_NF_TFTP=m
-+CONFIG_IP_NF_AMANDA=m
-+CONFIG_IP_NF_QUEUE=m
-+CONFIG_IP_NF_IPTABLES=m
-+CONFIG_IP_NF_MATCH_LIMIT=m
-+CONFIG_IP_NF_MATCH_IPRANGE=m
-+CONFIG_IP_NF_MATCH_MAC=m
-+CONFIG_IP_NF_MATCH_PKTTYPE=m
-+CONFIG_IP_NF_MATCH_MARK=m
-+CONFIG_IP_NF_MATCH_MULTIPORT=m
-+CONFIG_IP_NF_MATCH_TOS=m
-+CONFIG_IP_NF_MATCH_RECENT=m
-+CONFIG_IP_NF_MATCH_ECN=m
-+CONFIG_IP_NF_MATCH_DSCP=m
-+CONFIG_IP_NF_MATCH_AH_ESP=m
-+CONFIG_IP_NF_MATCH_LENGTH=m
-+CONFIG_IP_NF_MATCH_TTL=m
-+CONFIG_IP_NF_MATCH_TCPMSS=m
-+CONFIG_IP_NF_MATCH_HELPER=m
-+CONFIG_IP_NF_MATCH_STATE=m
-+CONFIG_IP_NF_MATCH_CONNTRACK=m
-+CONFIG_IP_NF_MATCH_OWNER=m
-+CONFIG_IP_NF_MATCH_PHYSDEV=m
-+CONFIG_IP_NF_MATCH_ADDRTYPE=m
-+CONFIG_IP_NF_MATCH_REALM=m
-+CONFIG_IP_NF_MATCH_SCTP=m
-+CONFIG_IP_NF_MATCH_COMMENT=m
-+CONFIG_IP_NF_MATCH_CONNMARK=m
-+CONFIG_IP_NF_MATCH_HASHLIMIT=m
-+CONFIG_IP_NF_FILTER=m
-+CONFIG_IP_NF_TARGET_REJECT=m
-+CONFIG_IP_NF_TARGET_LOG=m
-+CONFIG_IP_NF_TARGET_ULOG=m
-+CONFIG_IP_NF_TARGET_TCPMSS=m
-+CONFIG_IP_NF_NAT=m
-+CONFIG_IP_NF_NAT_NEEDED=y
-+CONFIG_IP_NF_TARGET_MASQUERADE=m
-+CONFIG_IP_NF_TARGET_REDIRECT=m
-+CONFIG_IP_NF_TARGET_NETMAP=m
-+CONFIG_IP_NF_TARGET_SAME=m
-+CONFIG_IP_NF_NAT_SNMP_BASIC=m
-+CONFIG_IP_NF_NAT_IRC=m
-+CONFIG_IP_NF_NAT_FTP=m
-+CONFIG_IP_NF_NAT_TFTP=m
-+CONFIG_IP_NF_NAT_AMANDA=m
-+CONFIG_IP_NF_MANGLE=m
-+CONFIG_IP_NF_TARGET_TOS=m
-+CONFIG_IP_NF_TARGET_ECN=m
-+CONFIG_IP_NF_TARGET_DSCP=m
-+CONFIG_IP_NF_TARGET_MARK=m
-+CONFIG_IP_NF_TARGET_CLASSIFY=m
-+CONFIG_IP_NF_TARGET_CONNMARK=m
-+CONFIG_IP_NF_TARGET_CLUSTERIP=m
-+CONFIG_IP_NF_RAW=m
-+CONFIG_IP_NF_TARGET_NOTRACK=m
-+CONFIG_IP_NF_ARPTABLES=m
-+CONFIG_IP_NF_ARPFILTER=m
-+CONFIG_IP_NF_ARP_MANGLE=m
-+
-+#
-+# IPv6: Netfilter Configuration (EXPERIMENTAL)
-+#
-+CONFIG_IP6_NF_QUEUE=m
-+CONFIG_IP6_NF_IPTABLES=m
-+CONFIG_IP6_NF_MATCH_LIMIT=m
-+CONFIG_IP6_NF_MATCH_MAC=m
-+CONFIG_IP6_NF_MATCH_RT=m
-+CONFIG_IP6_NF_MATCH_OPTS=m
-+CONFIG_IP6_NF_MATCH_FRAG=m
-+CONFIG_IP6_NF_MATCH_HL=m
-+CONFIG_IP6_NF_MATCH_MULTIPORT=m
-+CONFIG_IP6_NF_MATCH_OWNER=m
-+CONFIG_IP6_NF_MATCH_MARK=m
-+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-+CONFIG_IP6_NF_MATCH_AHESP=m
-+CONFIG_IP6_NF_MATCH_LENGTH=m
-+CONFIG_IP6_NF_MATCH_EUI64=m
-+CONFIG_IP6_NF_MATCH_PHYSDEV=m
-+CONFIG_IP6_NF_FILTER=m
-+CONFIG_IP6_NF_TARGET_LOG=m
-+CONFIG_IP6_NF_MANGLE=m
-+CONFIG_IP6_NF_TARGET_MARK=m
-+CONFIG_IP6_NF_RAW=m
-+
-+#
-+# DECnet: Netfilter Configuration
-+#
-+CONFIG_DECNET_NF_GRABULATOR=m
-+
-+#
-+# Bridge: Netfilter Configuration
-+#
-+CONFIG_BRIDGE_NF_EBTABLES=m
-+CONFIG_BRIDGE_EBT_BROUTE=m
-+CONFIG_BRIDGE_EBT_T_FILTER=m
-+CONFIG_BRIDGE_EBT_T_NAT=m
-+CONFIG_BRIDGE_EBT_802_3=m
-+CONFIG_BRIDGE_EBT_AMONG=m
-+CONFIG_BRIDGE_EBT_ARP=m
-+CONFIG_BRIDGE_EBT_IP=m
-+CONFIG_BRIDGE_EBT_LIMIT=m
-+CONFIG_BRIDGE_EBT_MARK=m
-+CONFIG_BRIDGE_EBT_PKTTYPE=m
-+CONFIG_BRIDGE_EBT_STP=m
-+CONFIG_BRIDGE_EBT_VLAN=m
-+CONFIG_BRIDGE_EBT_ARPREPLY=m
-+CONFIG_BRIDGE_EBT_DNAT=m
-+CONFIG_BRIDGE_EBT_MARK_T=m
-+CONFIG_BRIDGE_EBT_REDIRECT=m
-+CONFIG_BRIDGE_EBT_SNAT=m
-+CONFIG_BRIDGE_EBT_LOG=m
-+# CONFIG_BRIDGE_EBT_ULOG is not set
-+CONFIG_XFRM=y
-+CONFIG_XFRM_USER=m
-+
-+#
-+# SCTP Configuration (EXPERIMENTAL)
-+#
-+CONFIG_IP_SCTP=m
-+# CONFIG_SCTP_DBG_MSG is not set
-+# CONFIG_SCTP_DBG_OBJCNT is not set
-+# CONFIG_SCTP_HMAC_NONE is not set
-+# CONFIG_SCTP_HMAC_SHA1 is not set
-+CONFIG_SCTP_HMAC_MD5=y
-+CONFIG_ATM=y
-+CONFIG_ATM_CLIP=y
-+# CONFIG_ATM_CLIP_NO_ICMP is not set
-+CONFIG_ATM_LANE=m
-+CONFIG_ATM_MPOA=m
-+CONFIG_ATM_BR2684=m
-+# CONFIG_ATM_BR2684_IPFILTER is not set
-+CONFIG_BRIDGE=m
-+CONFIG_VLAN_8021Q=m
-+CONFIG_DECNET=m
-+# CONFIG_DECNET_ROUTER is not set
-+CONFIG_LLC=y
-+CONFIG_LLC2=m
-+CONFIG_IPX=m
-+# CONFIG_IPX_INTERN is not set
-+CONFIG_ATALK=m
-+CONFIG_DEV_APPLETALK=y
-+CONFIG_LTPC=m
-+CONFIG_COPS=m
-+CONFIG_COPS_DAYNA=y
-+CONFIG_COPS_TANGENT=y
-+CONFIG_IPDDP=m
-+CONFIG_IPDDP_ENCAP=y
-+CONFIG_IPDDP_DECAP=y
-+CONFIG_X25=m
-+CONFIG_LAPB=m
-+# CONFIG_NET_DIVERT is not set
-+CONFIG_ECONET=m
-+CONFIG_ECONET_AUNUDP=y
-+CONFIG_ECONET_NATIVE=y
-+CONFIG_WAN_ROUTER=m
-+
-+#
-+# QoS and/or fair queueing
-+#
-+CONFIG_NET_SCHED=y
-+CONFIG_NET_SCH_CLK_JIFFIES=y
-+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-+# CONFIG_NET_SCH_CLK_CPU is not set
-+CONFIG_NET_SCH_CBQ=m
-+CONFIG_NET_SCH_HTB=m
-+CONFIG_NET_SCH_HFSC=m
-+CONFIG_NET_SCH_ATM=m
-+CONFIG_NET_SCH_PRIO=m
-+CONFIG_NET_SCH_RED=m
-+CONFIG_NET_SCH_SFQ=m
-+CONFIG_NET_SCH_TEQL=m
-+CONFIG_NET_SCH_TBF=m
-+CONFIG_NET_SCH_GRED=m
-+CONFIG_NET_SCH_DSMARK=m
-+CONFIG_NET_SCH_NETEM=m
-+CONFIG_NET_SCH_INGRESS=m
-+CONFIG_NET_QOS=y
-+CONFIG_NET_ESTIMATOR=y
-+CONFIG_NET_CLS=y
-+CONFIG_NET_CLS_BASIC=m
-+CONFIG_NET_CLS_TCINDEX=m
-+CONFIG_NET_CLS_ROUTE4=m
-+CONFIG_NET_CLS_ROUTE=y
-+CONFIG_NET_CLS_FW=m
-+CONFIG_NET_CLS_U32=m
-+# CONFIG_CLS_U32_PERF is not set
-+# CONFIG_NET_CLS_IND is not set
-+# CONFIG_CLS_U32_MARK is not set
-+CONFIG_NET_CLS_RSVP=m
-+CONFIG_NET_CLS_RSVP6=m
-+CONFIG_NET_EMATCH=y
-+CONFIG_NET_EMATCH_STACK=32
-+CONFIG_NET_EMATCH_CMP=m
-+CONFIG_NET_EMATCH_NBYTE=m
-+CONFIG_NET_EMATCH_U32=m
-+CONFIG_NET_EMATCH_META=m
-+# CONFIG_NET_CLS_ACT is not set
-+CONFIG_NET_CLS_POLICE=y
-+
-+#
-+# Network testing
-+#
-+CONFIG_NET_PKTGEN=m
-+CONFIG_NETPOLL=y
-+# CONFIG_NETPOLL_RX is not set
-+# CONFIG_NETPOLL_TRAP is not set
-+CONFIG_NET_POLL_CONTROLLER=y
-+CONFIG_HAMRADIO=y
-+
-+#
-+# Packet Radio protocols
-+#
-+CONFIG_AX25=m
-+# CONFIG_AX25_DAMA_SLAVE is not set
-+CONFIG_NETROM=m
-+CONFIG_ROSE=m
-+
-+#
-+# AX.25 network device drivers
-+#
-+CONFIG_MKISS=m
-+CONFIG_6PACK=m
-+CONFIG_BPQETHER=m
-+# CONFIG_DMASCC is not set
-+CONFIG_SCC=m
-+# CONFIG_SCC_DELAY is not set
-+# CONFIG_SCC_TRXECHO is not set
-+CONFIG_BAYCOM_SER_FDX=m
-+CONFIG_BAYCOM_SER_HDX=m
-+CONFIG_BAYCOM_PAR=m
-+CONFIG_BAYCOM_EPP=m
-+CONFIG_YAM=m
-+CONFIG_IRDA=m
-+
-+#
-+# IrDA protocols
-+#
-+CONFIG_IRLAN=m
-+CONFIG_IRNET=m
-+CONFIG_IRCOMM=m
-+# CONFIG_IRDA_ULTRA is not set
-+
-+#
-+# IrDA options
-+#
-+CONFIG_IRDA_CACHE_LAST_LSAP=y
-+CONFIG_IRDA_FAST_RR=y
-+CONFIG_IRDA_DEBUG=y
-+
-+#
-+# Infrared-port device drivers
-+#
-+
-+#
-+# SIR device drivers
-+#
-+CONFIG_IRTTY_SIR=m
-+
-+#
-+# Dongle support
-+#
-+CONFIG_DONGLE=y
-+CONFIG_ESI_DONGLE=m
-+CONFIG_ACTISYS_DONGLE=m
-+CONFIG_TEKRAM_DONGLE=m
-+CONFIG_LITELINK_DONGLE=m
-+CONFIG_MA600_DONGLE=m
-+CONFIG_GIRBIL_DONGLE=m
-+CONFIG_MCP2120_DONGLE=m
-+CONFIG_OLD_BELKIN_DONGLE=m
-+CONFIG_ACT200L_DONGLE=m
-+
-+#
-+# Old SIR device drivers
-+#
-+CONFIG_IRPORT_SIR=m
-+
-+#
-+# Old Serial dongle support
-+#
-+# CONFIG_DONGLE_OLD is not set
-+
-+#
-+# FIR device drivers
-+#
-+CONFIG_USB_IRDA=m
-+CONFIG_SIGMATEL_FIR=m
-+CONFIG_NSC_FIR=m
-+CONFIG_WINBOND_FIR=m
-+# CONFIG_TOSHIBA_FIR is not set
-+CONFIG_SMC_IRCC_FIR=m
-+CONFIG_ALI_FIR=m
-+CONFIG_VLSI_FIR=m
-+CONFIG_VIA_FIR=m
-+CONFIG_BT=m
-+CONFIG_BT_L2CAP=m
-+CONFIG_BT_SCO=m
-+CONFIG_BT_RFCOMM=m
-+CONFIG_BT_RFCOMM_TTY=y
-+CONFIG_BT_BNEP=m
-+CONFIG_BT_BNEP_MC_FILTER=y
-+CONFIG_BT_BNEP_PROTO_FILTER=y
-+CONFIG_BT_CMTP=m
-+CONFIG_BT_HIDP=m
-+
-+#
-+# Bluetooth device drivers
-+#
-+CONFIG_BT_HCIUSB=m
-+CONFIG_BT_HCIUSB_SCO=y
-+CONFIG_BT_HCIUART=m
-+CONFIG_BT_HCIUART_H4=y
-+CONFIG_BT_HCIUART_BCSP=y
-+# CONFIG_BT_HCIUART_BCSP_TXCRC is not set
-+CONFIG_BT_HCIBCM203X=m
-+# CONFIG_BT_HCIBPA10X is not set
-+CONFIG_BT_HCIBFUSB=m
-+CONFIG_BT_HCIDTL1=m
-+CONFIG_BT_HCIBT3C=m
-+CONFIG_BT_HCIBLUECARD=m
-+CONFIG_BT_HCIBTUART=m
-+CONFIG_BT_HCIVHCI=m
-+CONFIG_NETDEVICES=y
-+CONFIG_DUMMY=m
-+CONFIG_BONDING=m
-+CONFIG_EQUALIZER=m
-+CONFIG_TUN=m
-+CONFIG_NET_SB1000=m
-+
-+#
-+# ARCnet devices
-+#
-+CONFIG_ARCNET=m
-+CONFIG_ARCNET_1201=m
-+CONFIG_ARCNET_1051=m
-+CONFIG_ARCNET_RAW=m
-+# CONFIG_ARCNET_CAP is not set
-+CONFIG_ARCNET_COM90xx=m
-+CONFIG_ARCNET_COM90xxIO=m
-+CONFIG_ARCNET_RIM_I=m
-+CONFIG_ARCNET_COM20020=m
-+CONFIG_ARCNET_COM20020_ISA=m
-+CONFIG_ARCNET_COM20020_PCI=m
-+
-+#
-+# Ethernet (10 or 100Mbit)
-+#
-+CONFIG_NET_ETHERNET=y
-+CONFIG_MII=m
-+CONFIG_HAPPYMEAL=m
-+CONFIG_SUNGEM=m
-+CONFIG_NET_VENDOR_3COM=y
-+CONFIG_EL1=m
-+CONFIG_EL2=m
-+# CONFIG_ELPLUS is not set
-+CONFIG_EL16=m
-+CONFIG_EL3=m
-+# CONFIG_3C515 is not set
-+CONFIG_VORTEX=m
-+CONFIG_TYPHOON=m
-+# CONFIG_LANCE is not set
-+CONFIG_NET_VENDOR_SMC=y
-+CONFIG_WD80x3=m
-+CONFIG_ULTRA=m
-+CONFIG_SMC9194=m
-+CONFIG_NET_VENDOR_RACAL=y
-+CONFIG_NI5010=m
-+CONFIG_NI52=m
-+# CONFIG_NI65 is not set
-+
-+#
-+# Tulip family network device support
-+#
-+CONFIG_NET_TULIP=y
-+CONFIG_DE2104X=m
-+CONFIG_TULIP=m
-+# CONFIG_TULIP_MWI is not set
-+# CONFIG_TULIP_MMIO is not set
-+# CONFIG_TULIP_NAPI is not set
-+CONFIG_DE4X5=m
-+CONFIG_WINBOND_840=m
-+CONFIG_DM9102=m
-+CONFIG_PCMCIA_XIRCOM=m
-+# CONFIG_PCMCIA_XIRTULIP is not set
-+CONFIG_AT1700=m
-+CONFIG_DEPCA=m
-+CONFIG_HP100=m
-+CONFIG_NET_ISA=y
-+CONFIG_E2100=m
-+CONFIG_EWRK3=m
-+CONFIG_EEXPRESS=m
-+CONFIG_EEXPRESS_PRO=m
-+CONFIG_HPLAN_PLUS=m
-+CONFIG_HPLAN=m
-+CONFIG_LP486E=m
-+CONFIG_ETH16I=m
-+CONFIG_NE2000=m
-+CONFIG_ZNET=m
-+CONFIG_SEEQ8005=m
-+CONFIG_NET_PCI=y
-+CONFIG_PCNET32=m
-+CONFIG_AMD8111_ETH=m
-+# CONFIG_AMD8111E_NAPI is not set
-+CONFIG_ADAPTEC_STARFIRE=m
-+# CONFIG_ADAPTEC_STARFIRE_NAPI is not set
-+CONFIG_AC3200=m
-+CONFIG_APRICOT=m
-+CONFIG_B44=m
-+CONFIG_FORCEDETH=m
-+CONFIG_CS89x0=m
-+# CONFIG_DGRS is not set
-+CONFIG_EEPRO100=m
-+CONFIG_E100=m
-+CONFIG_FEALNX=m
-+CONFIG_NATSEMI=m
-+CONFIG_NE2K_PCI=m
-+CONFIG_8139CP=m
-+CONFIG_8139TOO=m
-+CONFIG_8139TOO_PIO=y
-+CONFIG_8139TOO_TUNE_TWISTER=y
-+CONFIG_8139TOO_8129=y
-+# CONFIG_8139_OLD_RX_RESET is not set
-+CONFIG_SIS900=m
-+CONFIG_EPIC100=m
-+CONFIG_SUNDANCE=m
-+# CONFIG_SUNDANCE_MMIO is not set
-+CONFIG_TLAN=m
-+CONFIG_VIA_RHINE=m
-+# CONFIG_VIA_RHINE_MMIO is not set
-+CONFIG_NET_POCKET=y
-+CONFIG_ATP=m
-+CONFIG_DE600=m
-+CONFIG_DE620=m
-+
-+#
-+# Ethernet (1000 Mbit)
-+#
-+# CONFIG_ACENIC is not set
-+CONFIG_DL2K=m
-+CONFIG_E1000=m
-+# CONFIG_E1000_NAPI is not set
-+CONFIG_NS83820=m
-+CONFIG_HAMACHI=m
-+CONFIG_YELLOWFIN=m
-+CONFIG_R8169=m
-+# CONFIG_R8169_NAPI is not set
-+# CONFIG_R8169_VLAN is not set
-+CONFIG_SK98LIN=m
-+CONFIG_VIA_VELOCITY=m
-+CONFIG_TIGON3=m
-+CONFIG_BNX2=m
-+
-+#
-+# Ethernet (10000 Mbit)
-+#
-+CONFIG_IXGB=m
-+# CONFIG_IXGB_NAPI is not set
-+CONFIG_S2IO=m
-+# CONFIG_S2IO_NAPI is not set
-+# CONFIG_2BUFF_MODE is not set
-+
-+#
-+# Token Ring devices
-+#
-+CONFIG_TR=y
-+CONFIG_IBMTR=m
-+CONFIG_IBMOL=m
-+CONFIG_IBMLS=m
-+CONFIG_3C359=m
-+CONFIG_TMS380TR=m
-+CONFIG_TMSPCI=m
-+CONFIG_SKISA=m
-+CONFIG_PROTEON=m
-+CONFIG_ABYSS=m
-+# CONFIG_SMCTR is not set
-+
-+#
-+# Wireless LAN (non-hamradio)
-+#
-+CONFIG_NET_RADIO=y
-+
-+#
-+# Obsolete Wireless cards support (pre-802.11)
-+#
-+CONFIG_STRIP=m
-+CONFIG_ARLAN=m
-+CONFIG_WAVELAN=m
-+CONFIG_PCMCIA_WAVELAN=m
-+CONFIG_PCMCIA_NETWAVE=m
-+
-+#
-+# Wireless 802.11 Frequency Hopping cards support
-+#
-+CONFIG_PCMCIA_RAYCS=m
-+
-+#
-+# Wireless 802.11b ISA/PCI cards support
-+#
-+CONFIG_AIRO=m
-+CONFIG_HERMES=m
-+CONFIG_PLX_HERMES=m
-+CONFIG_TMD_HERMES=m
-+CONFIG_PCI_HERMES=m
-+CONFIG_ATMEL=m
-+CONFIG_PCI_ATMEL=m
-+
-+#
-+# Wireless 802.11b Pcmcia/Cardbus cards support
-+#
-+CONFIG_PCMCIA_HERMES=m
-+CONFIG_AIRO_CS=m
-+CONFIG_PCMCIA_ATMEL=m
-+CONFIG_PCMCIA_WL3501=m
-+
-+#
-+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
-+#
-+CONFIG_PRISM54=m
-+CONFIG_NET_WIRELESS=y
-+
-+#
-+# PCMCIA network device support
-+#
-+CONFIG_NET_PCMCIA=y
-+CONFIG_PCMCIA_3C589=m
-+CONFIG_PCMCIA_3C574=m
-+CONFIG_PCMCIA_FMVJ18X=m
-+CONFIG_PCMCIA_PCNET=m
-+CONFIG_PCMCIA_NMCLAN=m
-+CONFIG_PCMCIA_SMC91C92=m
-+CONFIG_PCMCIA_XIRC2PS=m
-+CONFIG_PCMCIA_AXNET=m
-+CONFIG_ARCNET_COM20020_CS=m
-+CONFIG_PCMCIA_IBMTR=m
-+
-+#
-+# Wan interfaces
-+#
-+CONFIG_WAN=y
-+CONFIG_HOSTESS_SV11=m
-+CONFIG_COSA=m
-+CONFIG_DSCC4=m
-+CONFIG_DSCC4_PCISYNC=y
-+CONFIG_DSCC4_PCI_RST=y
-+CONFIG_LANMEDIA=m
-+CONFIG_SEALEVEL_4021=m
-+CONFIG_SYNCLINK_SYNCPPP=m
-+CONFIG_HDLC=m
-+CONFIG_HDLC_RAW=y
-+CONFIG_HDLC_RAW_ETH=y
-+CONFIG_HDLC_CISCO=y
-+CONFIG_HDLC_FR=y
-+CONFIG_HDLC_PPP=y
-+CONFIG_HDLC_X25=y
-+CONFIG_PCI200SYN=m
-+CONFIG_WANXL=m
-+CONFIG_PC300=m
-+CONFIG_PC300_MLPPP=y
-+CONFIG_N2=m
-+CONFIG_C101=m
-+CONFIG_FARSYNC=m
-+CONFIG_DLCI=m
-+CONFIG_DLCI_COUNT=24
-+CONFIG_DLCI_MAX=8
-+CONFIG_SDLA=m
-+CONFIG_WAN_ROUTER_DRIVERS=y
-+# CONFIG_VENDOR_SANGOMA is not set
-+CONFIG_CYCLADES_SYNC=m
-+CONFIG_CYCLOMX_X25=y
-+CONFIG_LAPBETHER=m
-+CONFIG_X25_ASY=m
-+CONFIG_SBNI=m
-+# CONFIG_SBNI_MULTILINE is not set
-+
-+#
-+# ATM drivers
-+#
-+CONFIG_ATM_TCP=m
-+CONFIG_ATM_LANAI=m
-+CONFIG_ATM_ENI=m
-+# CONFIG_ATM_ENI_DEBUG is not set
-+# CONFIG_ATM_ENI_TUNE_BURST is not set
-+CONFIG_ATM_FIRESTREAM=m
-+CONFIG_ATM_ZATM=m
-+# CONFIG_ATM_ZATM_DEBUG is not set
-+CONFIG_ATM_NICSTAR=m
-+# CONFIG_ATM_NICSTAR_USE_SUNI is not set
-+# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
-+CONFIG_ATM_IDT77252=m
-+# CONFIG_ATM_IDT77252_DEBUG is not set
-+# CONFIG_ATM_IDT77252_RCV_ALL is not set
-+CONFIG_ATM_IDT77252_USE_SUNI=y
-+CONFIG_ATM_AMBASSADOR=m
-+# CONFIG_ATM_AMBASSADOR_DEBUG is not set
-+CONFIG_ATM_HORIZON=m
-+# CONFIG_ATM_HORIZON_DEBUG is not set
-+CONFIG_ATM_IA=m
-+# CONFIG_ATM_IA_DEBUG is not set
-+CONFIG_ATM_FORE200E_MAYBE=m
-+CONFIG_ATM_FORE200E_PCA=y
-+CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
-+# CONFIG_ATM_FORE200E_USE_TASKLET is not set
-+CONFIG_ATM_FORE200E_TX_RETRY=16
-+CONFIG_ATM_FORE200E_DEBUG=0
-+CONFIG_ATM_FORE200E=m
-+CONFIG_ATM_HE=m
-+CONFIG_ATM_HE_USE_SUNI=y
-+CONFIG_FDDI=y
-+CONFIG_DEFXX=m
-+CONFIG_SKFP=m
-+CONFIG_HIPPI=y
-+CONFIG_ROADRUNNER=m
-+# CONFIG_ROADRUNNER_LARGE_RINGS is not set
-+CONFIG_PLIP=m
-+CONFIG_PPP=m
-+CONFIG_PPP_MULTILINK=y
-+CONFIG_PPP_FILTER=y
-+CONFIG_PPP_ASYNC=m
-+CONFIG_PPP_SYNC_TTY=m
-+CONFIG_PPP_DEFLATE=m
-+CONFIG_PPP_BSDCOMP=m
-+CONFIG_PPPOE=m
-+CONFIG_PPPOATM=m
-+CONFIG_SLIP=m
-+CONFIG_SLIP_COMPRESSED=y
-+CONFIG_SLIP_SMART=y
-+CONFIG_SLIP_MODE_SLIP6=y
-+CONFIG_NET_FC=y
-+CONFIG_SHAPER=m
-+CONFIG_NETCONSOLE=m
-+
-+#
-+# ISDN subsystem
-+#
-+CONFIG_ISDN=m
-+
-+#
-+# Old ISDN4Linux
-+#
-+CONFIG_ISDN_I4L=m
-+CONFIG_ISDN_PPP=y
-+CONFIG_ISDN_PPP_VJ=y
-+CONFIG_ISDN_MPP=y
-+CONFIG_IPPP_FILTER=y
-+CONFIG_ISDN_PPP_BSDCOMP=m
-+CONFIG_ISDN_AUDIO=y
-+CONFIG_ISDN_TTY_FAX=y
-+CONFIG_ISDN_X25=y
-+
-+#
-+# ISDN feature submodules
-+#
-+# CONFIG_ISDN_DRV_LOOP is not set
-+# CONFIG_ISDN_DIVERSION is not set
-+
-+#
-+# ISDN4Linux hardware drivers
-+#
-+
-+#
-+# Passive cards
-+#
-+CONFIG_ISDN_DRV_HISAX=m
-+
-+#
-+# D-channel protocol features
-+#
-+CONFIG_HISAX_EURO=y
-+CONFIG_DE_AOC=y
-+# CONFIG_HISAX_NO_SENDCOMPLETE is not set
-+# CONFIG_HISAX_NO_LLC is not set
-+# CONFIG_HISAX_NO_KEYPAD is not set
-+CONFIG_HISAX_1TR6=y
-+CONFIG_HISAX_NI1=y
-+CONFIG_HISAX_MAX_CARDS=8
-+
-+#
-+# HiSax supported cards
-+#
-+CONFIG_HISAX_16_0=y
-+CONFIG_HISAX_16_3=y
-+CONFIG_HISAX_TELESPCI=y
-+CONFIG_HISAX_S0BOX=y
-+CONFIG_HISAX_AVM_A1=y
-+CONFIG_HISAX_FRITZPCI=y
-+CONFIG_HISAX_AVM_A1_PCMCIA=y
-+CONFIG_HISAX_ELSA=y
-+CONFIG_HISAX_IX1MICROR2=y
-+CONFIG_HISAX_DIEHLDIVA=y
-+CONFIG_HISAX_ASUSCOM=y
-+CONFIG_HISAX_TELEINT=y
-+CONFIG_HISAX_HFCS=y
-+CONFIG_HISAX_SEDLBAUER=y
-+CONFIG_HISAX_SPORTSTER=y
-+CONFIG_HISAX_MIC=y
-+CONFIG_HISAX_NETJET=y
-+CONFIG_HISAX_NETJET_U=y
-+CONFIG_HISAX_NICCY=y
-+CONFIG_HISAX_ISURF=y
-+CONFIG_HISAX_HSTSAPHIR=y
-+CONFIG_HISAX_BKM_A4T=y
-+CONFIG_HISAX_SCT_QUADRO=y
-+CONFIG_HISAX_GAZEL=y
-+CONFIG_HISAX_HFC_PCI=y
-+CONFIG_HISAX_W6692=y
-+CONFIG_HISAX_HFC_SX=y
-+CONFIG_HISAX_ENTERNOW_PCI=y
-+# CONFIG_HISAX_DEBUG is not set
-+
-+#
-+# HiSax PCMCIA card service modules
-+#
-+CONFIG_HISAX_SEDLBAUER_CS=m
-+CONFIG_HISAX_ELSA_CS=m
-+CONFIG_HISAX_AVM_A1_CS=m
-+CONFIG_HISAX_TELES_CS=m
-+
-+#
-+# HiSax sub driver modules
-+#
-+CONFIG_HISAX_ST5481=m
-+CONFIG_HISAX_HFCUSB=m
-+CONFIG_HISAX_HFC4S8S=m
-+CONFIG_HISAX_FRITZ_PCIPNP=m
-+CONFIG_HISAX_HDLC=y
-+
-+#
-+# Active cards
-+#
-+CONFIG_ISDN_DRV_ICN=m
-+CONFIG_ISDN_DRV_PCBIT=m
-+CONFIG_ISDN_DRV_SC=m
-+CONFIG_ISDN_DRV_ACT2000=m
-+# CONFIG_HYSDN is not set
-+
-+#
-+# CAPI subsystem
-+#
-+CONFIG_ISDN_CAPI=m
-+CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
-+CONFIG_ISDN_CAPI_MIDDLEWARE=y
-+CONFIG_ISDN_CAPI_CAPI20=m
-+CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
-+CONFIG_ISDN_CAPI_CAPIFS=m
-+CONFIG_ISDN_CAPI_CAPIDRV=m
-+
-+#
-+# CAPI hardware drivers
-+#
-+
-+#
-+# Active AVM cards
-+#
-+CONFIG_CAPI_AVM=y
-+CONFIG_ISDN_DRV_AVMB1_B1ISA=m
-+CONFIG_ISDN_DRV_AVMB1_B1PCI=m
-+CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-+CONFIG_ISDN_DRV_AVMB1_T1ISA=m
-+CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-+CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
-+CONFIG_ISDN_DRV_AVMB1_T1PCI=m
-+CONFIG_ISDN_DRV_AVMB1_C4=m
-+
-+#
-+# Active Eicon DIVA Server cards
-+#
-+CONFIG_CAPI_EICON=y
-+CONFIG_ISDN_DIVAS=m
-+CONFIG_ISDN_DIVAS_BRIPCI=y
-+CONFIG_ISDN_DIVAS_PRIPCI=y
-+CONFIG_ISDN_DIVAS_DIVACAPI=m
-+CONFIG_ISDN_DIVAS_USERIDI=m
-+CONFIG_ISDN_DIVAS_MAINT=m
-+
-+#
-+# Telephony Support
-+#
-+CONFIG_PHONE=m
-+CONFIG_PHONE_IXJ=m
-+CONFIG_PHONE_IXJ_PCMCIA=m
-+
-+#
-+# Input device support
-+#
-+CONFIG_INPUT=y
-+
-+#
-+# Userland interfaces
-+#
-+CONFIG_INPUT_MOUSEDEV=y
-+CONFIG_INPUT_MOUSEDEV_PSAUX=y
-+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-+CONFIG_INPUT_JOYDEV=m
-+CONFIG_INPUT_TSDEV=m
-+CONFIG_INPUT_TSDEV_SCREEN_X=240
-+CONFIG_INPUT_TSDEV_SCREEN_Y=320
-+CONFIG_INPUT_EVDEV=m
-+CONFIG_INPUT_EVBUG=m
-+
-+#
-+# Input Device Drivers
-+#
-+CONFIG_INPUT_KEYBOARD=y
-+CONFIG_KEYBOARD_ATKBD=y
-+CONFIG_KEYBOARD_SUNKBD=m
-+CONFIG_KEYBOARD_LKKBD=m
-+CONFIG_KEYBOARD_XTKBD=m
-+CONFIG_KEYBOARD_NEWTON=m
-+CONFIG_INPUT_MOUSE=y
-+CONFIG_MOUSE_PS2=y
-+CONFIG_MOUSE_SERIAL=m
-+CONFIG_MOUSE_INPORT=m
-+# CONFIG_MOUSE_ATIXL is not set
-+CONFIG_MOUSE_LOGIBM=m
-+CONFIG_MOUSE_PC110PAD=m
-+CONFIG_MOUSE_VSXXXAA=m
-+CONFIG_INPUT_JOYSTICK=y
-+CONFIG_JOYSTICK_ANALOG=m
-+CONFIG_JOYSTICK_A3D=m
-+CONFIG_JOYSTICK_ADI=m
-+CONFIG_JOYSTICK_COBRA=m
-+CONFIG_JOYSTICK_GF2K=m
-+CONFIG_JOYSTICK_GRIP=m
-+CONFIG_JOYSTICK_GRIP_MP=m
-+CONFIG_JOYSTICK_GUILLEMOT=m
-+CONFIG_JOYSTICK_INTERACT=m
-+CONFIG_JOYSTICK_SIDEWINDER=m
-+CONFIG_JOYSTICK_TMDC=m
-+CONFIG_JOYSTICK_IFORCE=m
-+CONFIG_JOYSTICK_IFORCE_USB=y
-+CONFIG_JOYSTICK_IFORCE_232=y
-+CONFIG_JOYSTICK_WARRIOR=m
-+CONFIG_JOYSTICK_MAGELLAN=m
-+CONFIG_JOYSTICK_SPACEORB=m
-+CONFIG_JOYSTICK_SPACEBALL=m
-+CONFIG_JOYSTICK_STINGER=m
-+CONFIG_JOYSTICK_TWIDJOY=m
-+CONFIG_JOYSTICK_DB9=m
-+CONFIG_JOYSTICK_GAMECON=m
-+CONFIG_JOYSTICK_TURBOGRAFX=m
-+CONFIG_JOYSTICK_JOYDUMP=m
-+CONFIG_INPUT_TOUCHSCREEN=y
-+CONFIG_TOUCHSCREEN_GUNZE=m
-+CONFIG_TOUCHSCREEN_ELO=m
-+CONFIG_TOUCHSCREEN_MTOUCH=m
-+CONFIG_TOUCHSCREEN_MK712=m
-+CONFIG_INPUT_MISC=y
-+CONFIG_INPUT_PCSPKR=m
-+CONFIG_INPUT_UINPUT=m
-+
-+#
-+# Hardware I/O ports
-+#
-+CONFIG_SERIO=y
-+CONFIG_SERIO_I8042=y
-+CONFIG_SERIO_SERPORT=m
-+CONFIG_SERIO_CT82C710=m
-+CONFIG_SERIO_PARKBD=m
-+CONFIG_SERIO_PCIPS2=m
-+CONFIG_SERIO_LIBPS2=y
-+CONFIG_SERIO_RAW=m
-+CONFIG_GAMEPORT=m
-+CONFIG_GAMEPORT_NS558=m
-+CONFIG_GAMEPORT_L4=m
-+CONFIG_GAMEPORT_EMU10K1=m
-+CONFIG_GAMEPORT_VORTEX=m
-+CONFIG_GAMEPORT_FM801=m
-+# CONFIG_GAMEPORT_CS461X is not set
-+
-+#
-+# Character devices
-+#
-+CONFIG_VT=y
-+CONFIG_VT_CONSOLE=y
-+CONFIG_HW_CONSOLE=y
-+# CONFIG_SERIAL_NONSTANDARD is not set
-+
-+#
-+# Serial drivers
-+#
-+CONFIG_SERIAL_8250=m
-+# CONFIG_SERIAL_8250_CS is not set
-+# CONFIG_SERIAL_8250_ACPI is not set
-+CONFIG_SERIAL_8250_NR_UARTS=4
-+# CONFIG_SERIAL_8250_EXTENDED is not set
-+
-+#
-+# Non-8250 serial port support
-+#
-+CONFIG_SERIAL_CORE=m
-+CONFIG_SERIAL_JSM=m
-+CONFIG_UNIX98_PTYS=y
-+CONFIG_LEGACY_PTYS=y
-+CONFIG_LEGACY_PTY_COUNT=256
-+CONFIG_PRINTER=m
-+# CONFIG_LP_CONSOLE is not set
-+CONFIG_PPDEV=m
-+CONFIG_TIPAR=m
-+
-+#
-+# IPMI
-+#
-+CONFIG_IPMI_HANDLER=m
-+# CONFIG_IPMI_PANIC_EVENT is not set
-+CONFIG_IPMI_DEVICE_INTERFACE=m
-+CONFIG_IPMI_SI=m
-+CONFIG_IPMI_WATCHDOG=m
-+CONFIG_IPMI_POWEROFF=m
-+
-+#
-+# Watchdog Cards
-+#
-+CONFIG_WATCHDOG=y
-+# CONFIG_WATCHDOG_NOWAYOUT is not set
-+
-+#
-+# Watchdog Device Drivers
-+#
-+CONFIG_SOFT_WATCHDOG=m
-+CONFIG_ACQUIRE_WDT=m
-+CONFIG_ADVANTECH_WDT=m
-+CONFIG_ALIM1535_WDT=m
-+CONFIG_ALIM7101_WDT=m
-+CONFIG_SC520_WDT=m
-+CONFIG_EUROTECH_WDT=m
-+CONFIG_IB700_WDT=m
-+CONFIG_WAFER_WDT=m
-+CONFIG_I8XX_TCO=m
-+CONFIG_SC1200_WDT=m
-+CONFIG_SCx200_WDT=m
-+CONFIG_60XX_WDT=m
-+CONFIG_CPU5_WDT=m
-+CONFIG_W83627HF_WDT=m
-+CONFIG_W83877F_WDT=m
-+CONFIG_MACHZ_WDT=m
-+
-+#
-+# ISA-based Watchdog Cards
-+#
-+CONFIG_PCWATCHDOG=m
-+CONFIG_MIXCOMWD=m
-+CONFIG_WDT=m
-+CONFIG_WDT_501=y
-+
-+#
-+# PCI-based Watchdog Cards
-+#
-+CONFIG_PCIPCWATCHDOG=m
-+CONFIG_WDTPCI=m
-+CONFIG_WDT_501_PCI=y
-+
-+#
-+# USB-based Watchdog Cards
-+#
-+CONFIG_USBPCWATCHDOG=m
-+CONFIG_HW_RANDOM=m
-+CONFIG_NVRAM=m
-+CONFIG_RTC=m
-+CONFIG_GEN_RTC=m
-+CONFIG_GEN_RTC_X=y
-+CONFIG_DTLK=m
-+CONFIG_R3964=m
-+CONFIG_APPLICOM=m
-+CONFIG_SONYPI=m
-+
-+#
-+# Ftape, the floppy tape device driver
-+#
-+# CONFIG_FTAPE is not set
-+CONFIG_AGP=m
-+CONFIG_AGP_ALI=m
-+CONFIG_AGP_ATI=m
-+CONFIG_AGP_AMD=m
-+CONFIG_AGP_AMD64=m
-+CONFIG_AGP_INTEL=m
-+CONFIG_AGP_NVIDIA=m
-+CONFIG_AGP_SIS=m
-+CONFIG_AGP_SWORKS=m
-+CONFIG_AGP_VIA=m
-+CONFIG_AGP_EFFICEON=m
-+CONFIG_DRM=m
-+CONFIG_DRM_TDFX=m
-+# CONFIG_DRM_GAMMA is not set
-+CONFIG_DRM_R128=m
-+CONFIG_DRM_RADEON=m
-+CONFIG_DRM_I810=m
-+CONFIG_DRM_I830=m
-+CONFIG_DRM_I915=m
-+CONFIG_DRM_MGA=m
-+CONFIG_DRM_SIS=m
-+
-+#
-+# PCMCIA character devices
-+#
-+CONFIG_SYNCLINK_CS=m
-+CONFIG_MWAVE=m
-+CONFIG_SCx200_GPIO=m
-+CONFIG_RAW_DRIVER=m
-+# CONFIG_HPET is not set
-+CONFIG_MAX_RAW_DEVS=256
-+CONFIG_HANGCHECK_TIMER=m
-+
-+#
-+# TPM devices
-+#
-+# CONFIG_TCG_TPM is not set
-+
-+#
-+# I2C support
-+#
-+CONFIG_I2C=m
-+CONFIG_I2C_CHARDEV=m
-+
-+#
-+# I2C Algorithms
-+#
-+CONFIG_I2C_ALGOBIT=m
-+CONFIG_I2C_ALGOPCF=m
-+CONFIG_I2C_ALGOPCA=m
-+
-+#
-+# I2C Hardware Bus support
-+#
-+CONFIG_I2C_ALI1535=m
-+CONFIG_I2C_ALI1563=m
-+CONFIG_I2C_ALI15X3=m
-+CONFIG_I2C_AMD756=m
-+CONFIG_I2C_AMD756_S4882=m
-+CONFIG_I2C_AMD8111=m
-+CONFIG_I2C_ELEKTOR=m
-+CONFIG_I2C_I801=m
-+CONFIG_I2C_I810=m
-+CONFIG_I2C_PIIX4=m
-+CONFIG_I2C_ISA=m
-+CONFIG_I2C_NFORCE2=m
-+CONFIG_I2C_PARPORT=m
-+CONFIG_I2C_PARPORT_LIGHT=m
-+CONFIG_I2C_PROSAVAGE=m
-+CONFIG_I2C_SAVAGE4=m
-+CONFIG_SCx200_I2C=m
-+CONFIG_SCx200_I2C_SCL=12
-+CONFIG_SCx200_I2C_SDA=13
-+CONFIG_SCx200_ACB=m
-+CONFIG_I2C_SIS5595=m
-+CONFIG_I2C_SIS630=m
-+CONFIG_I2C_SIS96X=m
-+CONFIG_I2C_STUB=m
-+CONFIG_I2C_VIA=m
-+CONFIG_I2C_VIAPRO=m
-+CONFIG_I2C_VOODOO3=m
-+CONFIG_I2C_PCA_ISA=m
-+
-+#
-+# Hardware Sensors Chip support
-+#
-+CONFIG_I2C_SENSOR=m
-+CONFIG_SENSORS_ADM1021=m
-+CONFIG_SENSORS_ADM1025=m
-+CONFIG_SENSORS_ADM1026=m
-+CONFIG_SENSORS_ADM1031=m
-+CONFIG_SENSORS_ASB100=m
-+CONFIG_SENSORS_DS1621=m
-+CONFIG_SENSORS_FSCHER=m
-+CONFIG_SENSORS_FSCPOS=m
-+CONFIG_SENSORS_GL518SM=m
-+CONFIG_SENSORS_GL520SM=m
-+CONFIG_SENSORS_IT87=m
-+CONFIG_SENSORS_LM63=m
-+CONFIG_SENSORS_LM75=m
-+CONFIG_SENSORS_LM77=m
-+CONFIG_SENSORS_LM78=m
-+CONFIG_SENSORS_LM80=m
-+CONFIG_SENSORS_LM83=m
-+CONFIG_SENSORS_LM85=m
-+CONFIG_SENSORS_LM87=m
-+CONFIG_SENSORS_LM90=m
-+CONFIG_SENSORS_LM92=m
-+CONFIG_SENSORS_MAX1619=m
-+CONFIG_SENSORS_PC87360=m
-+# CONFIG_SENSORS_SMSC47B397 is not set
-+CONFIG_SENSORS_SIS5595=m
-+CONFIG_SENSORS_SMSC47M1=m
-+CONFIG_SENSORS_VIA686A=m
-+CONFIG_SENSORS_W83781D=m
-+CONFIG_SENSORS_W83L785TS=m
-+CONFIG_SENSORS_W83627HF=m
-+
-+#
-+# Other I2C Chip support
-+#
-+CONFIG_SENSORS_DS1337=m
-+CONFIG_SENSORS_EEPROM=m
-+CONFIG_SENSORS_PCF8574=m
-+CONFIG_SENSORS_PCF8591=m
-+CONFIG_SENSORS_RTC8564=m
-+# CONFIG_I2C_DEBUG_CORE is not set
-+# CONFIG_I2C_DEBUG_ALGO is not set
-+# CONFIG_I2C_DEBUG_BUS is not set
-+# CONFIG_I2C_DEBUG_CHIP is not set
-+
-+#
-+# Dallas's 1-wire bus
-+#
-+CONFIG_W1=m
-+CONFIG_W1_MATROX=m
-+CONFIG_W1_DS9490=m
-+CONFIG_W1_DS9490_BRIDGE=m
-+CONFIG_W1_THERM=m
-+CONFIG_W1_SMEM=m
-+
-+#
-+# Misc devices
-+#
-+CONFIG_IBM_ASM=m
-+
-+#
-+# Multimedia devices
-+#
-+CONFIG_VIDEO_DEV=m
-+
-+#
-+# Video For Linux
-+#
-+
-+#
-+# Video Adapters
-+#
-+CONFIG_VIDEO_BT848=m
-+CONFIG_VIDEO_PMS=m
-+CONFIG_VIDEO_BWQCAM=m
-+CONFIG_VIDEO_CQCAM=m
-+CONFIG_VIDEO_W9966=m
-+CONFIG_VIDEO_CPIA=m
-+CONFIG_VIDEO_CPIA_PP=m
-+CONFIG_VIDEO_CPIA_USB=m
-+CONFIG_VIDEO_SAA5246A=m
-+CONFIG_VIDEO_SAA5249=m
-+CONFIG_TUNER_3036=m
-+CONFIG_VIDEO_STRADIS=m
-+CONFIG_VIDEO_ZORAN=m
-+CONFIG_VIDEO_ZORAN_BUZ=m
-+CONFIG_VIDEO_ZORAN_DC10=m
-+CONFIG_VIDEO_ZORAN_DC30=m
-+CONFIG_VIDEO_ZORAN_LML33=m
-+CONFIG_VIDEO_ZORAN_LML33R10=m
-+# CONFIG_VIDEO_ZR36120 is not set
-+CONFIG_VIDEO_MEYE=m
-+# CONFIG_VIDEO_SAA7134 is not set
-+CONFIG_VIDEO_MXB=m
-+CONFIG_VIDEO_DPC=m
-+CONFIG_VIDEO_HEXIUM_ORION=m
-+CONFIG_VIDEO_HEXIUM_GEMINI=m
-+CONFIG_VIDEO_CX88=m
-+# CONFIG_VIDEO_CX88_DVB is not set
-+CONFIG_VIDEO_OVCAMCHIP=m
-+
-+#
-+# Radio Adapters
-+#
-+CONFIG_RADIO_CADET=m
-+CONFIG_RADIO_RTRACK=m
-+CONFIG_RADIO_RTRACK2=m
-+CONFIG_RADIO_AZTECH=m
-+CONFIG_RADIO_GEMTEK=m
-+CONFIG_RADIO_GEMTEK_PCI=m
-+CONFIG_RADIO_MAXIRADIO=m
-+CONFIG_RADIO_MAESTRO=m
-+CONFIG_RADIO_MIROPCM20=m
-+CONFIG_RADIO_MIROPCM20_RDS=m
-+CONFIG_RADIO_SF16FMI=m
-+CONFIG_RADIO_SF16FMR2=m
-+CONFIG_RADIO_TERRATEC=m
-+CONFIG_RADIO_TRUST=m
-+CONFIG_RADIO_TYPHOON=m
-+CONFIG_RADIO_TYPHOON_PROC_FS=y
-+CONFIG_RADIO_ZOLTRIX=m
-+
-+#
-+# Digital Video Broadcasting Devices
-+#
-+CONFIG_DVB=y
-+CONFIG_DVB_CORE=m
-+
-+#
-+# Supported SAA7146 based PCI Adapters
-+#
-+CONFIG_DVB_AV7110=m
-+# CONFIG_DVB_AV7110_OSD is not set
-+CONFIG_DVB_BUDGET=m
-+CONFIG_DVB_BUDGET_CI=m
-+CONFIG_DVB_BUDGET_AV=m
-+CONFIG_DVB_BUDGET_PATCH=m
-+
-+#
-+# Supported USB Adapters
-+#
-+CONFIG_DVB_TTUSB_BUDGET=m
-+CONFIG_DVB_TTUSB_DEC=m
-+CONFIG_DVB_DIBUSB=m
-+CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES=y
-+# CONFIG_DVB_DIBCOM_DEBUG is not set
-+CONFIG_DVB_CINERGYT2=m
-+# CONFIG_DVB_CINERGYT2_TUNING is not set
-+
-+#
-+# Supported FlexCopII (B2C2) Adapters
-+#
-+CONFIG_DVB_B2C2_FLEXCOP=m
-+CONFIG_DVB_B2C2_FLEXCOP_PCI=m
-+CONFIG_DVB_B2C2_FLEXCOP_USB=m
-+# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
-+CONFIG_DVB_B2C2_SKYSTAR=m
-+
-+#
-+# Supported BT878 Adapters
-+#
-+CONFIG_DVB_BT8XX=m
-+
-+#
-+# Supported DVB Frontends
-+#
-+
-+#
-+# Customise DVB Frontends
-+#
-+
-+#
-+# DVB-S (satellite) frontends
-+#
-+CONFIG_DVB_STV0299=m
-+CONFIG_DVB_CX24110=m
-+CONFIG_DVB_TDA8083=m
-+CONFIG_DVB_TDA80XX=m
-+CONFIG_DVB_MT312=m
-+CONFIG_DVB_VES1X93=m
-+
-+#
-+# DVB-T (terrestrial) frontends
-+#
-+CONFIG_DVB_SP8870=m
-+CONFIG_DVB_SP887X=m
-+CONFIG_DVB_CX22700=m
-+CONFIG_DVB_CX22702=m
-+CONFIG_DVB_L64781=m
-+CONFIG_DVB_TDA1004X=m
-+CONFIG_DVB_NXT6000=m
-+CONFIG_DVB_MT352=m
-+CONFIG_DVB_DIB3000MB=m
-+CONFIG_DVB_DIB3000MC=m
-+
-+#
-+# DVB-C (cable) frontends
-+#
-+CONFIG_DVB_ATMEL_AT76C651=m
-+CONFIG_DVB_VES1820=m
-+CONFIG_DVB_TDA10021=m
-+CONFIG_DVB_STV0297=m
-+
-+#
-+# ATSC (North American/Korean Terresterial DTV) frontends
-+#
-+CONFIG_DVB_NXT2002=m
-+CONFIG_DVB_OR51211=m
-+CONFIG_DVB_OR51132=m
-+CONFIG_VIDEO_SAA7146=m
-+CONFIG_VIDEO_SAA7146_VV=m
-+CONFIG_VIDEO_VIDEOBUF=m
-+CONFIG_VIDEO_TUNER=m
-+CONFIG_VIDEO_BUF=m
-+CONFIG_VIDEO_BTCX=m
-+CONFIG_VIDEO_IR=m
-+CONFIG_VIDEO_TVEEPROM=m
-+
-+#
-+# Graphics support
-+#
-+CONFIG_FB=y
-+CONFIG_FB_CFB_FILLRECT=m
-+CONFIG_FB_CFB_COPYAREA=m
-+CONFIG_FB_CFB_IMAGEBLIT=m
-+CONFIG_FB_SOFT_CURSOR=m
-+# CONFIG_FB_MACMODES is not set
-+CONFIG_FB_MODE_HELPERS=y
-+CONFIG_FB_TILEBLITTING=y
-+CONFIG_FB_CIRRUS=m
-+CONFIG_FB_PM2=m
-+CONFIG_FB_PM2_FIFO_DISCONNECT=y
-+CONFIG_FB_CYBER2000=m
-+# CONFIG_FB_ASILIANT is not set
-+# CONFIG_FB_IMSTT is not set
-+CONFIG_FB_VGA16=m
-+# CONFIG_FB_VESA is not set
-+CONFIG_VIDEO_SELECT=y
-+CONFIG_FB_HGA=m
-+# CONFIG_FB_HGA_ACCEL is not set
-+CONFIG_FB_NVIDIA=m
-+CONFIG_FB_NVIDIA_I2C=y
-+CONFIG_FB_RIVA=m
-+CONFIG_FB_RIVA_I2C=y
-+CONFIG_FB_RIVA_DEBUG=y
-+CONFIG_FB_I810=m
-+# CONFIG_FB_I810_GTF is not set
-+CONFIG_FB_INTEL=m
-+# CONFIG_FB_INTEL_DEBUG is not set
-+CONFIG_FB_MATROX=m
-+CONFIG_FB_MATROX_MILLENIUM=y
-+CONFIG_FB_MATROX_MYSTIQUE=y
-+# CONFIG_FB_MATROX_G is not set
-+CONFIG_FB_MATROX_I2C=m
-+CONFIG_FB_MATROX_MULTIHEAD=y
-+CONFIG_FB_RADEON_OLD=m
-+CONFIG_FB_RADEON=m
-+CONFIG_FB_RADEON_I2C=y
-+# CONFIG_FB_RADEON_DEBUG is not set
-+CONFIG_FB_ATY128=m
-+CONFIG_FB_ATY=m
-+CONFIG_FB_ATY_CT=y
-+CONFIG_FB_ATY_GENERIC_LCD=y
-+CONFIG_FB_ATY_XL_INIT=y
-+CONFIG_FB_ATY_GX=y
-+CONFIG_FB_SAVAGE=m
-+CONFIG_FB_SAVAGE_I2C=y
-+CONFIG_FB_SAVAGE_ACCEL=y
-+CONFIG_FB_SIS=m
-+CONFIG_FB_SIS_300=y
-+CONFIG_FB_SIS_315=y
-+CONFIG_FB_NEOMAGIC=m
-+CONFIG_FB_KYRO=m
-+CONFIG_FB_3DFX=m
-+# CONFIG_FB_3DFX_ACCEL is not set
-+CONFIG_FB_VOODOO1=m
-+CONFIG_FB_TRIDENT=m
-+# CONFIG_FB_TRIDENT_ACCEL is not set
-+# CONFIG_FB_PM3 is not set
-+CONFIG_FB_GEODE=y
-+CONFIG_FB_GEODE_GX1=m
-+CONFIG_FB_S1D13XXX=m
-+CONFIG_FB_VIRTUAL=m
-+
-+#
-+# Console display driver support
-+#
-+CONFIG_VGA_CONSOLE=y
-+CONFIG_MDA_CONSOLE=m
-+CONFIG_DUMMY_CONSOLE=y
-+CONFIG_FRAMEBUFFER_CONSOLE=m
-+# CONFIG_FONTS is not set
-+CONFIG_FONT_8x8=y
-+CONFIG_FONT_8x16=y
-+
-+#
-+# Logo configuration
-+#
-+# CONFIG_LOGO is not set
-+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-+
-+#
-+# Sound
-+#
-+CONFIG_SOUND=m
-+
-+#
-+# Advanced Linux Sound Architecture
-+#
-+CONFIG_SND=m
-+CONFIG_SND_TIMER=m
-+CONFIG_SND_PCM=m
-+CONFIG_SND_HWDEP=m
-+CONFIG_SND_RAWMIDI=m
-+CONFIG_SND_SEQUENCER=m
-+CONFIG_SND_SEQ_DUMMY=m
-+CONFIG_SND_OSSEMUL=y
-+CONFIG_SND_MIXER_OSS=m
-+CONFIG_SND_PCM_OSS=m
-+CONFIG_SND_SEQUENCER_OSS=y
-+CONFIG_SND_RTCTIMER=m
-+# CONFIG_SND_VERBOSE_PRINTK is not set
-+# CONFIG_SND_DEBUG is not set
-+CONFIG_SND_GENERIC_PM=y
-+
-+#
-+# Generic devices
-+#
-+CONFIG_SND_MPU401_UART=m
-+CONFIG_SND_OPL3_LIB=m
-+CONFIG_SND_OPL4_LIB=m
-+CONFIG_SND_VX_LIB=m
-+CONFIG_SND_DUMMY=m
-+CONFIG_SND_VIRMIDI=m
-+CONFIG_SND_MTPAV=m
-+CONFIG_SND_SERIAL_U16550=m
-+CONFIG_SND_MPU401=m
-+
-+#
-+# ISA devices
-+#
-+CONFIG_SND_AD1848_LIB=m
-+CONFIG_SND_CS4231_LIB=m
-+CONFIG_SND_AD1816A=m
-+CONFIG_SND_AD1848=m
-+CONFIG_SND_CS4231=m
-+CONFIG_SND_CS4232=m
-+CONFIG_SND_CS4236=m
-+CONFIG_SND_ES968=m
-+CONFIG_SND_ES1688=m
-+CONFIG_SND_ES18XX=m
-+CONFIG_SND_GUS_SYNTH=m
-+CONFIG_SND_GUSCLASSIC=m
-+CONFIG_SND_GUSEXTREME=m
-+CONFIG_SND_GUSMAX=m
-+CONFIG_SND_INTERWAVE=m
-+CONFIG_SND_INTERWAVE_STB=m
-+CONFIG_SND_OPTI92X_AD1848=m
-+CONFIG_SND_OPTI92X_CS4231=m
-+CONFIG_SND_OPTI93X=m
-+CONFIG_SND_SB8=m
-+CONFIG_SND_SB16=m
-+CONFIG_SND_SBAWE=m
-+CONFIG_SND_SB16_CSP=y
-+CONFIG_SND_WAVEFRONT=m
-+CONFIG_SND_ALS100=m
-+CONFIG_SND_AZT2320=m
-+CONFIG_SND_CMI8330=m
-+CONFIG_SND_DT019X=m
-+CONFIG_SND_OPL3SA2=m
-+CONFIG_SND_SGALAXY=m
-+CONFIG_SND_SSCAPE=m
-+
-+#
-+# PCI devices
-+#
-+CONFIG_SND_AC97_CODEC=m
-+CONFIG_SND_ALI5451=m
-+CONFIG_SND_ATIIXP=m
-+CONFIG_SND_ATIIXP_MODEM=m
-+CONFIG_SND_AU8810=m
-+CONFIG_SND_AU8820=m
-+CONFIG_SND_AU8830=m
-+CONFIG_SND_AZT3328=m
-+CONFIG_SND_BT87X=m
-+# CONFIG_SND_BT87X_OVERCLOCK is not set
-+CONFIG_SND_CS46XX=m
-+CONFIG_SND_CS46XX_NEW_DSP=y
-+CONFIG_SND_CS4281=m
-+CONFIG_SND_EMU10K1=m
-+# CONFIG_SND_EMU10K1X is not set
-+# CONFIG_SND_CA0106 is not set
-+CONFIG_SND_KORG1212=m
-+CONFIG_SND_MIXART=m
-+CONFIG_SND_NM256=m
-+CONFIG_SND_RME32=m
-+CONFIG_SND_RME96=m
-+CONFIG_SND_RME9652=m
-+CONFIG_SND_HDSP=m
-+CONFIG_SND_TRIDENT=m
-+CONFIG_SND_YMFPCI=m
-+CONFIG_SND_ALS4000=m
-+CONFIG_SND_CMIPCI=m
-+CONFIG_SND_ENS1370=m
-+CONFIG_SND_ENS1371=m
-+CONFIG_SND_ES1938=m
-+CONFIG_SND_ES1968=m
-+CONFIG_SND_MAESTRO3=m
-+CONFIG_SND_FM801=m
-+CONFIG_SND_FM801_TEA575X=m
-+CONFIG_SND_ICE1712=m
-+CONFIG_SND_ICE1724=m
-+CONFIG_SND_INTEL8X0=m
-+CONFIG_SND_INTEL8X0M=m
-+CONFIG_SND_SONICVIBES=m
-+CONFIG_SND_VIA82XX=m
-+# CONFIG_SND_VIA82XX_MODEM is not set
-+CONFIG_SND_VX222=m
-+CONFIG_SND_HDA_INTEL=m
-+
-+#
-+# USB devices
-+#
-+CONFIG_SND_USB_AUDIO=m
-+CONFIG_SND_USB_USX2Y=m
-+
-+#
-+# PCMCIA devices
-+#
-+CONFIG_SND_VXPOCKET=m
-+CONFIG_SND_VXP440=m
-+CONFIG_SND_PDAUDIOCF=m
-+
-+#
-+# Open Sound System
-+#
-+CONFIG_SOUND_PRIME=m
-+CONFIG_SOUND_BT878=m
-+CONFIG_SOUND_CMPCI=m
-+# CONFIG_SOUND_CMPCI_FM is not set
-+# CONFIG_SOUND_CMPCI_MIDI is not set
-+CONFIG_SOUND_CMPCI_JOYSTICK=y
-+CONFIG_SOUND_EMU10K1=m
-+CONFIG_MIDI_EMU10K1=y
-+CONFIG_SOUND_FUSION=m
-+CONFIG_SOUND_CS4281=m
-+CONFIG_SOUND_ES1370=m
-+CONFIG_SOUND_ES1371=m
-+CONFIG_SOUND_ESSSOLO1=m
-+CONFIG_SOUND_MAESTRO=m
-+CONFIG_SOUND_MAESTRO3=m
-+CONFIG_SOUND_ICH=m
-+CONFIG_SOUND_SONICVIBES=m
-+CONFIG_SOUND_TRIDENT=m
-+# CONFIG_SOUND_MSNDCLAS is not set
-+# CONFIG_SOUND_MSNDPIN is not set
-+CONFIG_SOUND_VIA82CXXX=m
-+CONFIG_MIDI_VIA82CXXX=y
-+CONFIG_SOUND_OSS=m
-+# CONFIG_SOUND_TRACEINIT is not set
-+# CONFIG_SOUND_DMAP is not set
-+# CONFIG_SOUND_AD1816 is not set
-+CONFIG_SOUND_AD1889=m
-+CONFIG_SOUND_SGALAXY=m
-+CONFIG_SOUND_ADLIB=m
-+CONFIG_SOUND_ACI_MIXER=m
-+CONFIG_SOUND_CS4232=m
-+CONFIG_SOUND_SSCAPE=m
-+CONFIG_SOUND_GUS=m
-+CONFIG_SOUND_GUS16=y
-+CONFIG_SOUND_GUSMAX=y
-+CONFIG_SOUND_VMIDI=m
-+CONFIG_SOUND_TRIX=m
-+CONFIG_SOUND_MSS=m
-+CONFIG_SOUND_MPU401=m
-+CONFIG_SOUND_NM256=m
-+CONFIG_SOUND_MAD16=m
-+CONFIG_MAD16_OLDCARD=y
-+CONFIG_SOUND_PAS=m
-+CONFIG_SOUND_PSS=m
-+CONFIG_PSS_MIXER=y
-+CONFIG_SOUND_SB=m
-+# CONFIG_SOUND_AWE32_SYNTH is not set
-+CONFIG_SOUND_WAVEFRONT=m
-+CONFIG_SOUND_MAUI=m
-+CONFIG_SOUND_YM3812=m
-+CONFIG_SOUND_OPL3SA1=m
-+CONFIG_SOUND_OPL3SA2=m
-+CONFIG_SOUND_YMFPCI=m
-+# CONFIG_SOUND_YMFPCI_LEGACY is not set
-+CONFIG_SOUND_UART6850=m
-+CONFIG_SOUND_AEDSP16=m
-+CONFIG_SC6600=y
-+CONFIG_SC6600_JOY=y
-+CONFIG_SC6600_CDROM=4
-+CONFIG_SC6600_CDROMBASE=0x0
-+# CONFIG_AEDSP16_MSS is not set
-+# CONFIG_AEDSP16_SBPRO is not set
-+# CONFIG_AEDSP16_MPU401 is not set
-+CONFIG_SOUND_TVMIXER=m
-+CONFIG_SOUND_KAHLUA=m
-+CONFIG_SOUND_ALI5455=m
-+CONFIG_SOUND_FORTE=m
-+CONFIG_SOUND_RME96XX=m
-+CONFIG_SOUND_AD1980=m
-+
-+#
-+# USB support
-+#
-+CONFIG_USB_ARCH_HAS_HCD=y
-+CONFIG_USB_ARCH_HAS_OHCI=y
-+CONFIG_USB=y
-+# CONFIG_USB_DEBUG is not set
-+
-+#
-+# Miscellaneous USB options
-+#
-+CONFIG_USB_DEVICEFS=y
-+CONFIG_USB_BANDWIDTH=y
-+# CONFIG_USB_DYNAMIC_MINORS is not set
-+# CONFIG_USB_OTG is not set
-+
-+#
-+# USB Host Controller Drivers
-+#
-+CONFIG_USB_EHCI_HCD=y
-+CONFIG_USB_EHCI_SPLIT_ISO=y
-+CONFIG_USB_EHCI_ROOT_HUB_TT=y
-+CONFIG_USB_OHCI_HCD=m
-+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
-+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-+CONFIG_USB_UHCI_HCD=m
-+CONFIG_USB_SL811_HCD=m
-+CONFIG_USB_SL811_CS=m
-+
-+#
-+# USB Device Class drivers
-+#
-+CONFIG_USB_AUDIO=m
-+
-+#
-+# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
-+#
-+CONFIG_USB_MIDI=m
-+CONFIG_USB_ACM=m
-+CONFIG_USB_PRINTER=m
-+
-+#
-+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-+#
-+CONFIG_USB_STORAGE=m
-+# CONFIG_USB_STORAGE_DEBUG is not set
-+CONFIG_USB_STORAGE_DATAFAB=y
-+CONFIG_USB_STORAGE_FREECOM=y
-+CONFIG_USB_STORAGE_ISD200=y
-+CONFIG_USB_STORAGE_DPCM=y
-+CONFIG_USB_STORAGE_USBAT=y
-+CONFIG_USB_STORAGE_SDDR09=y
-+CONFIG_USB_STORAGE_SDDR55=y
-+CONFIG_USB_STORAGE_JUMPSHOT=y
-+
-+#
-+# USB Input Devices
-+#
-+CONFIG_USB_HID=m
-+CONFIG_USB_HIDINPUT=y
-+# CONFIG_HID_FF is not set
-+CONFIG_USB_HIDDEV=y
-+
-+#
-+# USB HID Boot Protocol drivers
-+#
-+CONFIG_USB_KBD=m
-+CONFIG_USB_MOUSE=m
-+CONFIG_USB_AIPTEK=m
-+CONFIG_USB_WACOM=m
-+CONFIG_USB_KBTAB=m
-+CONFIG_USB_POWERMATE=m
-+CONFIG_USB_MTOUCH=m
-+CONFIG_USB_EGALAX=m
-+CONFIG_USB_XPAD=m
-+CONFIG_USB_ATI_REMOTE=m
-+
-+#
-+# USB Imaging devices
-+#
-+CONFIG_USB_MDC800=m
-+CONFIG_USB_MICROTEK=m
-+
-+#
-+# USB Multimedia devices
-+#
-+# CONFIG_USB_DABUSB is not set
-+CONFIG_USB_VICAM=m
-+CONFIG_USB_DSBR=m
-+CONFIG_USB_IBMCAM=m
-+CONFIG_USB_KONICAWC=m
-+CONFIG_USB_OV511=m
-+CONFIG_USB_SE401=m
-+CONFIG_USB_SN9C102=m
-+CONFIG_USB_STV680=m
-+CONFIG_USB_W9968CF=m
-+CONFIG_USB_PWC=m
-+
-+#
-+# USB Network Adapters
-+#
-+CONFIG_USB_CATC=m
-+CONFIG_USB_KAWETH=m
-+CONFIG_USB_PEGASUS=m
-+CONFIG_USB_RTL8150=m
-+CONFIG_USB_USBNET=m
-+
-+#
-+# USB Host-to-Host Cables
-+#
-+CONFIG_USB_ALI_M5632=y
-+CONFIG_USB_AN2720=y
-+CONFIG_USB_BELKIN=y
-+CONFIG_USB_GENESYS=y
-+CONFIG_USB_NET1080=y
-+CONFIG_USB_PL2301=y
-+CONFIG_USB_KC2190=y
-+
-+#
-+# Intelligent USB Devices/Gadgets
-+#
-+CONFIG_USB_ARMLINUX=y
-+CONFIG_USB_EPSON2888=y
-+CONFIG_USB_ZAURUS=y
-+CONFIG_USB_CDCETHER=y
-+
-+#
-+# USB Network Adapters
-+#
-+CONFIG_USB_AX8817X=y
-+CONFIG_USB_ZD1201=m
-+CONFIG_USB_MON=m
-+
-+#
-+# USB port drivers
-+#
-+CONFIG_USB_USS720=m
-+
-+#
-+# USB Serial Converter support
-+#
-+CONFIG_USB_SERIAL=m
-+CONFIG_USB_SERIAL_GENERIC=y
-+CONFIG_USB_SERIAL_AIRPRIME=m
-+CONFIG_USB_SERIAL_BELKIN=m
-+CONFIG_USB_SERIAL_WHITEHEAT=m
-+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-+CONFIG_USB_SERIAL_CP2101=m
-+CONFIG_USB_SERIAL_CYPRESS_M8=m
-+CONFIG_USB_SERIAL_EMPEG=m
-+CONFIG_USB_SERIAL_FTDI_SIO=m
-+CONFIG_USB_SERIAL_VISOR=m
-+CONFIG_USB_SERIAL_IPAQ=m
-+CONFIG_USB_SERIAL_IR=m
-+CONFIG_USB_SERIAL_EDGEPORT=m
-+CONFIG_USB_SERIAL_EDGEPORT_TI=m
-+# CONFIG_USB_SERIAL_GARMIN is not set
-+CONFIG_USB_SERIAL_IPW=m
-+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
-+CONFIG_USB_SERIAL_KEYSPAN=m
-+# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set
-+CONFIG_USB_SERIAL_KLSI=m
-+CONFIG_USB_SERIAL_KOBIL_SCT=m
-+CONFIG_USB_SERIAL_MCT_U232=m
-+CONFIG_USB_SERIAL_PL2303=m
-+CONFIG_USB_SERIAL_HP4X=m
-+CONFIG_USB_SERIAL_SAFE=m
-+# CONFIG_USB_SERIAL_SAFE_PADDED is not set
-+# CONFIG_USB_SERIAL_TI is not set
-+CONFIG_USB_SERIAL_CYBERJACK=m
-+CONFIG_USB_SERIAL_XIRCOM=m
-+CONFIG_USB_SERIAL_OPTION=m
-+CONFIG_USB_SERIAL_OMNINET=m
-+CONFIG_USB_EZUSB=y
-+
-+#
-+# USB Miscellaneous drivers
-+#
-+# CONFIG_USB_EMI62 is not set
-+# CONFIG_USB_EMI26 is not set
-+CONFIG_USB_AUERSWALD=m
-+CONFIG_USB_RIO500=m
-+CONFIG_USB_LEGOTOWER=m
-+CONFIG_USB_LCD=m
-+CONFIG_USB_LED=m
-+CONFIG_USB_CYTHERM=m
-+CONFIG_USB_PHIDGETKIT=m
-+CONFIG_USB_PHIDGETSERVO=m
-+# CONFIG_USB_IDMOUSE is not set
-+CONFIG_USB_SISUSBVGA=m
-+CONFIG_USB_TEST=m
-+
-+#
-+# USB ATM/DSL drivers
-+#
-+CONFIG_USB_ATM=m
-+CONFIG_USB_SPEEDTOUCH=m
-+
-+#
-+# USB Gadget Support
-+#
-+CONFIG_USB_GADGET=m
-+# CONFIG_USB_GADGET_DEBUG_FILES is not set
-+CONFIG_USB_GADGET_NET2280=y
-+CONFIG_USB_NET2280=m
-+# CONFIG_USB_GADGET_PXA2XX is not set
-+# CONFIG_USB_GADGET_GOKU is not set
-+# CONFIG_USB_GADGET_LH7A40X is not set
-+# CONFIG_USB_GADGET_OMAP is not set
-+# CONFIG_USB_GADGET_DUMMY_HCD is not set
-+CONFIG_USB_GADGET_DUALSPEED=y
-+CONFIG_USB_ZERO=m
-+CONFIG_USB_ETH=m
-+CONFIG_USB_ETH_RNDIS=y
-+CONFIG_USB_GADGETFS=m
-+CONFIG_USB_FILE_STORAGE=m
-+# CONFIG_USB_FILE_STORAGE_TEST is not set
-+CONFIG_USB_G_SERIAL=m
-+
-+#
-+# MMC/SD Card support
-+#
-+# CONFIG_MMC is not set
-+
-+#
-+# InfiniBand support
-+#
-+# CONFIG_INFINIBAND is not set
-+
-+#
-+# Power management options
-+#
-+
-+#
-+# ACPI (Advanced Configuration and Power Interface) Support
-+#
-+CONFIG_ACPI=y
-+CONFIG_ACPI_BOOT=y
-+CONFIG_ACPI_INTERPRETER=y
-+CONFIG_ACPI_AC=m
-+CONFIG_ACPI_BATTERY=m
-+CONFIG_ACPI_BUTTON=m
-+CONFIG_ACPI_VIDEO=m
-+CONFIG_ACPI_FAN=m
-+CONFIG_ACPI_PROCESSOR=m
-+# CONFIG_ACPI_HOTPLUG_CPU is not set
-+CONFIG_ACPI_THERMAL=m
-+CONFIG_ACPI_ASUS=m
-+CONFIG_ACPI_IBM=m
-+CONFIG_ACPI_TOSHIBA=m
-+CONFIG_ACPI_BLACKLIST_YEAR=0
-+# CONFIG_ACPI_DEBUG is not set
-+CONFIG_ACPI_BUS=y
-+CONFIG_ACPI_EC=y
-+CONFIG_ACPI_POWER=y
-+CONFIG_ACPI_PCI=y
-+CONFIG_ACPI_SYSTEM=y
-+# CONFIG_X86_PM_TIMER is not set
-+# CONFIG_ACPI_CONTAINER is not set
-+
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+CONFIG_EXT2_FS_XATTR=y
-+CONFIG_EXT2_FS_POSIX_ACL=y
-+CONFIG_EXT2_FS_SECURITY=y
-+CONFIG_EXT3_FS=m
-+CONFIG_EXT3_FS_XATTR=y
-+CONFIG_EXT3_FS_POSIX_ACL=y
-+CONFIG_EXT3_FS_SECURITY=y
-+CONFIG_JBD=m
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
-+CONFIG_REISERFS_FS=m
-+# CONFIG_REISERFS_CHECK is not set
-+# CONFIG_REISERFS_PROC_INFO is not set
-+# CONFIG_REISERFS_FS_XATTR is not set
-+CONFIG_JFS_FS=m
-+CONFIG_JFS_POSIX_ACL=y
-+# CONFIG_JFS_SECURITY is not set
-+# CONFIG_JFS_DEBUG is not set
-+CONFIG_JFS_STATISTICS=y
-+CONFIG_FS_POSIX_ACL=y
-+
-+#
-+# XFS support
-+#
-+CONFIG_XFS_FS=m
-+CONFIG_XFS_EXPORT=y
-+CONFIG_XFS_RT=y
-+CONFIG_XFS_QUOTA=y
-+CONFIG_XFS_SECURITY=y
-+CONFIG_XFS_POSIX_ACL=y
-+CONFIG_MINIX_FS=m
-+CONFIG_ROMFS_FS=m
-+CONFIG_QUOTA=y
-+CONFIG_QFMT_V1=m
-+CONFIG_QFMT_V2=m
-+CONFIG_QUOTACTL=y
-+CONFIG_DNOTIFY=y
-+CONFIG_AUTOFS_FS=m
-+CONFIG_AUTOFS4_FS=m
-+
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=m
-+CONFIG_JOLIET=y
-+CONFIG_ZISOFS=y
-+CONFIG_ZISOFS_FS=m
-+CONFIG_UDF_FS=m
-+CONFIG_UDF_NLS=y
-+
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=m
-+CONFIG_MSDOS_FS=m
-+CONFIG_VFAT_FS=m
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-+CONFIG_NTFS_FS=m
-+# CONFIG_NTFS_DEBUG is not set
-+# CONFIG_NTFS_RW is not set
-+
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
-+CONFIG_SYSFS=y
-+# CONFIG_DEVFS_FS is not set
-+CONFIG_DEVPTS_FS_XATTR=y
-+CONFIG_DEVPTS_FS_SECURITY=y
-+CONFIG_TMPFS=y
-+CONFIG_TMPFS_XATTR=y
-+CONFIG_TMPFS_SECURITY=y
-+# CONFIG_HUGETLBFS is not set
-+# CONFIG_HUGETLB_PAGE is not set
-+CONFIG_RAMFS=y
-+
-+#
-+# Miscellaneous filesystems
-+#
-+CONFIG_ADFS_FS=m
-+# CONFIG_ADFS_FS_RW is not set
-+CONFIG_AFFS_FS=m
-+CONFIG_HFS_FS=m
-+CONFIG_HFSPLUS_FS=m
-+CONFIG_BEFS_FS=m
-+# CONFIG_BEFS_DEBUG is not set
-+CONFIG_BFS_FS=m
-+CONFIG_EFS_FS=m
-+CONFIG_JFFS_FS=m
-+CONFIG_JFFS_FS_VERBOSE=0
-+CONFIG_JFFS_PROC_FS=y
-+CONFIG_JFFS2_FS=m
-+CONFIG_JFFS2_FS_DEBUG=0
-+# CONFIG_JFFS2_FS_NAND is not set
-+# CONFIG_JFFS2_FS_NOR_ECC is not set
-+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-+CONFIG_JFFS2_ZLIB=y
-+CONFIG_JFFS2_RTIME=y
-+# CONFIG_JFFS2_RUBIN is not set
-+CONFIG_CRAMFS=y
-+CONFIG_VXFS_FS=m
-+CONFIG_HPFS_FS=m
-+CONFIG_QNX4FS_FS=m
-+# CONFIG_QNX4FS_RW is not set
-+CONFIG_SYSV_FS=m
-+CONFIG_UFS_FS=m
-+# CONFIG_UFS_FS_WRITE is not set
-+
-+#
-+# Network File Systems
-+#
-+CONFIG_NFS_FS=m
-+CONFIG_NFS_V3=y
-+CONFIG_NFS_V4=y
-+CONFIG_NFS_DIRECTIO=y
-+CONFIG_NFSD=m
-+CONFIG_NFSD_V3=y
-+CONFIG_NFSD_V4=y
-+CONFIG_NFSD_TCP=y
-+CONFIG_LOCKD=m
-+CONFIG_LOCKD_V4=y
-+CONFIG_EXPORTFS=m
-+CONFIG_SUNRPC=m
-+CONFIG_SUNRPC_GSS=m
-+CONFIG_RPCSEC_GSS_KRB5=m
-+CONFIG_RPCSEC_GSS_SPKM3=m
-+CONFIG_SMB_FS=m
-+# CONFIG_SMB_NLS_DEFAULT is not set
-+CONFIG_CIFS=m
-+# CONFIG_CIFS_STATS is not set
-+# CONFIG_CIFS_XATTR is not set
-+# CONFIG_CIFS_EXPERIMENTAL is not set
-+CONFIG_NCP_FS=m
-+CONFIG_NCPFS_PACKET_SIGNING=y
-+CONFIG_NCPFS_IOCTL_LOCKING=y
-+CONFIG_NCPFS_STRONG=y
-+CONFIG_NCPFS_NFS_NS=y
-+CONFIG_NCPFS_OS2_NS=y
-+# CONFIG_NCPFS_SMALLDOS is not set
-+CONFIG_NCPFS_NLS=y
-+CONFIG_NCPFS_EXTRAS=y
-+CONFIG_CODA_FS=m
-+# CONFIG_CODA_FS_OLD_API is not set
-+CONFIG_AFS_FS=m
-+CONFIG_RXRPC=m
-+
-+#
-+# Partition Types
-+#
-+CONFIG_PARTITION_ADVANCED=y
-+CONFIG_ACORN_PARTITION=y
-+CONFIG_ACORN_PARTITION_CUMANA=y
-+# CONFIG_ACORN_PARTITION_EESOX is not set
-+CONFIG_ACORN_PARTITION_ICS=y
-+# CONFIG_ACORN_PARTITION_ADFS is not set
-+# CONFIG_ACORN_PARTITION_POWERTEC is not set
-+CONFIG_ACORN_PARTITION_RISCIX=y
-+CONFIG_OSF_PARTITION=y
-+CONFIG_AMIGA_PARTITION=y
-+CONFIG_ATARI_PARTITION=y
-+CONFIG_MAC_PARTITION=y
-+CONFIG_MSDOS_PARTITION=y
-+CONFIG_BSD_DISKLABEL=y
-+CONFIG_MINIX_SUBPARTITION=y
-+CONFIG_SOLARIS_X86_PARTITION=y
-+CONFIG_UNIXWARE_DISKLABEL=y
-+CONFIG_LDM_PARTITION=y
-+# CONFIG_LDM_DEBUG is not set
-+CONFIG_SGI_PARTITION=y
-+CONFIG_ULTRIX_PARTITION=y
-+CONFIG_SUN_PARTITION=y
-+CONFIG_EFI_PARTITION=y
-+
-+#
-+# Native Language Support
-+#
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="cp437"
-+CONFIG_NLS_CODEPAGE_437=m
-+CONFIG_NLS_CODEPAGE_737=m
-+CONFIG_NLS_CODEPAGE_775=m
-+CONFIG_NLS_CODEPAGE_850=m
-+CONFIG_NLS_CODEPAGE_852=m
-+CONFIG_NLS_CODEPAGE_855=m
-+CONFIG_NLS_CODEPAGE_857=m
-+CONFIG_NLS_CODEPAGE_860=m
-+CONFIG_NLS_CODEPAGE_861=m
-+CONFIG_NLS_CODEPAGE_862=m
-+CONFIG_NLS_CODEPAGE_863=m
-+CONFIG_NLS_CODEPAGE_864=m
-+CONFIG_NLS_CODEPAGE_865=m
-+CONFIG_NLS_CODEPAGE_866=m
-+CONFIG_NLS_CODEPAGE_869=m
-+CONFIG_NLS_CODEPAGE_936=m
-+CONFIG_NLS_CODEPAGE_950=m
-+CONFIG_NLS_CODEPAGE_932=m
-+CONFIG_NLS_CODEPAGE_949=m
-+CONFIG_NLS_CODEPAGE_874=m
-+CONFIG_NLS_ISO8859_8=m
-+CONFIG_NLS_CODEPAGE_1250=m
-+CONFIG_NLS_CODEPAGE_1251=m
-+CONFIG_NLS_ASCII=m
-+CONFIG_NLS_ISO8859_1=m
-+CONFIG_NLS_ISO8859_2=m
-+CONFIG_NLS_ISO8859_3=m
-+CONFIG_NLS_ISO8859_4=m
-+CONFIG_NLS_ISO8859_5=m
-+CONFIG_NLS_ISO8859_6=m
-+CONFIG_NLS_ISO8859_7=m
-+CONFIG_NLS_ISO8859_9=m
-+CONFIG_NLS_ISO8859_13=m
-+CONFIG_NLS_ISO8859_14=m
-+CONFIG_NLS_ISO8859_15=m
-+CONFIG_NLS_KOI8_R=m
-+CONFIG_NLS_KOI8_U=m
-+CONFIG_NLS_UTF8=m
-+
-+#
-+# Security options
-+#
-+CONFIG_KEYS=y
-+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
-+CONFIG_SECURITY=y
-+# CONFIG_SECURITY_NETWORK is not set
-+CONFIG_SECURITY_CAPABILITIES=y
-+CONFIG_SECURITY_ROOTPLUG=m
-+CONFIG_SECURITY_SECLVL=m
-+CONFIG_SECURITY_SELINUX=y
-+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
-+CONFIG_SECURITY_SELINUX_DISABLE=y
-+CONFIG_SECURITY_SELINUX_DEVELOP=y
-+CONFIG_SECURITY_SELINUX_AVC_STATS=y
-+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-+
-+#
-+# Cryptographic options
-+#
-+CONFIG_CRYPTO=y
-+CONFIG_CRYPTO_HMAC=y
-+CONFIG_CRYPTO_NULL=m
-+CONFIG_CRYPTO_MD4=m
-+CONFIG_CRYPTO_MD5=y
-+CONFIG_CRYPTO_SHA1=m
-+CONFIG_CRYPTO_SHA256=m
-+CONFIG_CRYPTO_SHA512=m
-+CONFIG_CRYPTO_WP512=m
-+CONFIG_CRYPTO_TGR192=m
-+CONFIG_CRYPTO_DES=m
-+CONFIG_CRYPTO_BLOWFISH=m
-+CONFIG_CRYPTO_TWOFISH=m
-+CONFIG_CRYPTO_SERPENT=m
-+CONFIG_CRYPTO_AES_586=m
-+CONFIG_CRYPTO_CAST5=m
-+CONFIG_CRYPTO_CAST6=m
-+CONFIG_CRYPTO_TEA=m
-+CONFIG_CRYPTO_ARC4=m
-+CONFIG_CRYPTO_KHAZAD=m
-+CONFIG_CRYPTO_ANUBIS=m
-+CONFIG_CRYPTO_DEFLATE=m
-+CONFIG_CRYPTO_MICHAEL_MIC=m
-+CONFIG_CRYPTO_CRC32C=m
-+CONFIG_CRYPTO_TEST=m
-+
-+#
-+# Hardware crypto devices
-+#
-+# CONFIG_CRYPTO_DEV_PADLOCK is not set
-+
-+#
-+# Library routines
-+#
-+CONFIG_CRC_CCITT=m
-+CONFIG_CRC32=y
-+CONFIG_LIBCRC32C=m
-+CONFIG_ZLIB_INFLATE=y
-+CONFIG_ZLIB_DEFLATE=m
-+CONFIG_REED_SOLOMON=m
-+CONFIG_REED_SOLOMON_DEC16=y
-+
-+#
-+# Kernel hacking
-+#
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_DEBUG_KERNEL=y
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_LOG_BUF_SHIFT=14
-+# CONFIG_SCHEDSTATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+# CONFIG_DEBUG_HIGHMEM is not set
-+# CONFIG_DEBUG_BUGVERBOSE is not set
-+# CONFIG_DEBUG_INFO is not set
-+# CONFIG_DEBUG_FS is not set
-+# CONFIG_FRAME_POINTER is not set
-+# CONFIG_DEBUG_STACKOVERFLOW is not set
-+# CONFIG_KPROBES is not set
-+# CONFIG_DEBUG_STACK_USAGE is not set
-+# CONFIG_DEBUG_PAGEALLOC is not set
-+# CONFIG_4KSTACKS is not set
-+CONFIG_X86_FIND_SMP_CONFIG=y
-+CONFIG_X86_MPPARSE=y
-diff -Nurp pristine-linux-2.6.12/.config.cmd linux-2.6.12-xen/.config.cmd
---- pristine-linux-2.6.12/.config.cmd	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/.config.cmd	2006-02-25 00:12:57.492418667 +0100
-@@ -0,0 +1,191 @@
-+deps_config := \
-+	lib/Kconfig.debug \
-+	arch/xen/Kconfig.debug \
-+	lib/Kconfig \
-+	drivers/crypto/Kconfig \
-+	crypto/Kconfig \
-+	security/selinux/Kconfig \
-+	security/Kconfig \
-+	fs/nls/Kconfig \
-+	fs/partitions/Kconfig \
-+	fs/ncpfs/Kconfig \
-+	fs/xfs/Kconfig \
-+	fs/Kconfig \
-+	drivers/acpi/Kconfig \
-+	drivers/char/tpm/Kconfig.domU \
-+	drivers/infiniband/ulp/ipoib/Kconfig \
-+	drivers/infiniband/hw/mthca/Kconfig \
-+	drivers/infiniband/Kconfig \
-+	drivers/mmc/Kconfig \
-+	drivers/usb/gadget/Kconfig \
-+	drivers/usb/atm/Kconfig \
-+	drivers/usb/misc/sisusbvga/Kconfig \
-+	drivers/usb/misc/Kconfig \
-+	drivers/usb/serial/Kconfig \
-+	drivers/usb/mon/Kconfig \
-+	drivers/usb/net/Kconfig \
-+	drivers/usb/media/Kconfig \
-+	drivers/usb/image/Kconfig \
-+	drivers/usb/input/Kconfig \
-+	drivers/usb/storage/Kconfig \
-+	drivers/usb/class/Kconfig \
-+	drivers/usb/host/Kconfig \
-+	drivers/usb/core/Kconfig \
-+	drivers/usb/Kconfig \
-+	sound/oss/Kconfig \
-+	sound/parisc/Kconfig \
-+	sound/sparc/Kconfig \
-+	sound/pcmcia/Kconfig \
-+	sound/usb/Kconfig \
-+	sound/mips/Kconfig \
-+	sound/arm/Kconfig \
-+	sound/ppc/Kconfig \
-+	sound/pci/Kconfig \
-+	sound/isa/Kconfig \
-+	sound/drivers/Kconfig \
-+	sound/core/Kconfig \
-+	sound/oss/dmasound/Kconfig \
-+	sound/Kconfig \
-+	drivers/video/backlight/Kconfig \
-+	drivers/video/logo/Kconfig \
-+	drivers/video/console/Kconfig \
-+	drivers/video/geode/Kconfig \
-+	drivers/video/Kconfig \
-+	drivers/media/common/Kconfig \
-+	drivers/media/dvb/frontends/Kconfig \
-+	drivers/media/dvb/bt8xx/Kconfig \
-+	drivers/media/dvb/b2c2/Kconfig \
-+	drivers/media/dvb/cinergyT2/Kconfig \
-+	drivers/media/dvb/dibusb/Kconfig \
-+	drivers/media/dvb/ttusb-dec/Kconfig \
-+	drivers/media/dvb/ttusb-budget/Kconfig \
-+	drivers/media/dvb/ttpci/Kconfig \
-+	drivers/media/dvb/dvb-core/Kconfig \
-+	drivers/media/dvb/Kconfig \
-+	drivers/media/radio/Kconfig \
-+	drivers/media/video/Kconfig \
-+	drivers/media/Kconfig \
-+	drivers/misc/Kconfig \
-+	drivers/w1/Kconfig \
-+	drivers/i2c/chips/Kconfig \
-+	drivers/i2c/busses/Kconfig \
-+	drivers/i2c/algos/Kconfig \
-+	drivers/i2c/Kconfig \
-+	drivers/char/tpm/Kconfig \
-+	drivers/char/pcmcia/Kconfig \
-+	drivers/char/drm/Kconfig \
-+	drivers/char/agp/Kconfig \
-+	drivers/char/ftape/Kconfig \
-+	drivers/char/watchdog/Kconfig \
-+	drivers/char/ipmi/Kconfig \
-+	drivers/serial/Kconfig \
-+	drivers/char/Kconfig \
-+	drivers/input/gameport/Kconfig \
-+	drivers/input/serio/Kconfig \
-+	drivers/input/misc/Kconfig \
-+	drivers/input/touchscreen/Kconfig \
-+	drivers/input/joystick/iforce/Kconfig \
-+	drivers/input/joystick/Kconfig \
-+	drivers/input/mouse/Kconfig \
-+	drivers/input/keyboard/Kconfig \
-+	drivers/input/Kconfig \
-+	drivers/telephony/Kconfig \
-+	drivers/isdn/hardware/eicon/Kconfig \
-+	drivers/isdn/hardware/avm/Kconfig \
-+	drivers/isdn/hardware/Kconfig \
-+	drivers/isdn/capi/Kconfig \
-+	drivers/isdn/hysdn/Kconfig \
-+	drivers/isdn/act2000/Kconfig \
-+	drivers/isdn/sc/Kconfig \
-+	drivers/isdn/pcbit/Kconfig \
-+	drivers/isdn/icn/Kconfig \
-+	drivers/isdn/hisax/Kconfig \
-+	drivers/isdn/i4l/Kconfig \
-+	drivers/isdn/Kconfig \
-+	drivers/s390/net/Kconfig \
-+	drivers/atm/Kconfig \
-+	drivers/net/wan/Kconfig \
-+	drivers/net/pcmcia/Kconfig \
-+	drivers/net/wireless/Kconfig \
-+	drivers/net/tokenring/Kconfig \
-+	drivers/net/fec_8xx/Kconfig \
-+	drivers/net/tulip/Kconfig \
-+	drivers/net/arm/Kconfig \
-+	drivers/net/arcnet/Kconfig \
-+	drivers/net/Kconfig \
-+	drivers/bluetooth/Kconfig \
-+	net/bluetooth/hidp/Kconfig \
-+	net/bluetooth/cmtp/Kconfig \
-+	net/bluetooth/bnep/Kconfig \
-+	net/bluetooth/rfcomm/Kconfig \
-+	net/bluetooth/Kconfig \
-+	drivers/net/irda/Kconfig \
-+	net/irda/ircomm/Kconfig \
-+	net/irda/irnet/Kconfig \
-+	net/irda/irlan/Kconfig \
-+	net/irda/Kconfig \
-+	drivers/net/hamradio/Kconfig \
-+	net/ax25/Kconfig \
-+	net/sched/Kconfig \
-+	drivers/net/appletalk/Kconfig \
-+	net/ipx/Kconfig \
-+	net/llc/Kconfig \
-+	net/decnet/Kconfig \
-+	net/sctp/Kconfig \
-+	net/xfrm/Kconfig \
-+	net/bridge/netfilter/Kconfig \
-+	net/decnet/netfilter/Kconfig \
-+	net/ipv6/netfilter/Kconfig \
-+	net/ipv4/netfilter/Kconfig \
-+	net/ipv6/Kconfig \
-+	net/ipv4/ipvs/Kconfig \
-+	net/ipv4/Kconfig \
-+	net/Kconfig \
-+	drivers/message/i2o/Kconfig \
-+	drivers/ieee1394/Kconfig \
-+	drivers/message/fusion/Kconfig \
-+	drivers/md/Kconfig \
-+	drivers/cdrom/Kconfig \
-+	drivers/scsi/pcmcia/Kconfig \
-+	drivers/scsi/arm/Kconfig \
-+	drivers/scsi/qla2xxx/Kconfig \
-+	drivers/scsi/megaraid/Kconfig.megaraid \
-+	drivers/scsi/aic7xxx/Kconfig.aic79xx \
-+	drivers/scsi/aic7xxx/Kconfig.aic7xxx \
-+	drivers/scsi/Kconfig \
-+	drivers/ide/Kconfig \
-+	drivers/block/Kconfig.iosched \
-+	drivers/s390/block/Kconfig \
-+	drivers/block/paride/Kconfig \
-+	drivers/block/Kconfig \
-+	drivers/pnp/pnpacpi/Kconfig \
-+	drivers/pnp/pnpbios/Kconfig \
-+	drivers/pnp/isapnp/Kconfig \
-+	drivers/pnp/Kconfig \
-+	drivers/parport/Kconfig \
-+	drivers/mtd/nand/Kconfig \
-+	drivers/mtd/devices/Kconfig \
-+	drivers/mtd/maps/Kconfig \
-+	drivers/mtd/chips/Kconfig \
-+	drivers/mtd/Kconfig \
-+	drivers/base/Kconfig \
-+	arch/xen/Kconfig.drivers \
-+	fs/Kconfig.binfmt \
-+	drivers/cpufreq/Kconfig \
-+	arch/x86_64/kernel/cpufreq/Kconfig \
-+	kernel/power/Kconfig \
-+	arch/xen/x86_64/Kconfig \
-+	drivers/pci/hotplug/Kconfig \
-+	drivers/pcmcia/Kconfig \
-+	drivers/mca/Kconfig \
-+	drivers/eisa/Kconfig \
-+	drivers/pci/Kconfig \
-+	drivers/pci/pcie/Kconfig \
-+	drivers/firmware/Kconfig \
-+	arch/xen/i386/Kconfig \
-+	init/Kconfig \
-+	arch/xen/Kconfig
-+
-+.config include/linux/autoconf.h: $(deps_config)
-+
-+$(deps_config):
-diff -Nurp pristine-linux-2.6.12/.config.old linux-2.6.12-xen/.config.old
---- pristine-linux-2.6.12/.config.old	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/.config.old	2006-02-25 00:12:55.054786131 +0100
-@@ -0,0 +1,2966 @@
-+#
-+# Automatically generated make config: don't edit
-+# Linux kernel version: 2.6.12.6-xen
-+# Sat Feb 25 00:12:55 2006
-+#
-+CONFIG_XEN=y
-+CONFIG_ARCH_XEN=y
-+CONFIG_NO_IDLE_HZ=y
-+
-+#
-+# XEN
-+#
-+CONFIG_XEN_PRIVILEGED_GUEST=y
-+CONFIG_XEN_PHYSDEV_ACCESS=y
-+CONFIG_XEN_BLKDEV_BACKEND=y
-+# CONFIG_XEN_BLKDEV_TAP_BE is not set
-+CONFIG_XEN_NETDEV_BACKEND=y
-+# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
-+# CONFIG_XEN_TPMDEV_FRONTEND is not set
-+# CONFIG_XEN_TPMDEV_BACKEND is not set
-+CONFIG_XEN_BLKDEV_FRONTEND=y
-+CONFIG_XEN_NETDEV_FRONTEND=y
-+# CONFIG_XEN_BLKDEV_TAP is not set
-+# CONFIG_XEN_SHADOW_MODE is not set
-+CONFIG_XEN_SCRUB_PAGES=y
-+CONFIG_XEN_X86=y
-+# CONFIG_XEN_X86_64 is not set
-+CONFIG_HAVE_ARCH_ALLOC_SKB=y
-+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
-+
-+#
-+# Code maturity level options
-+#
-+CONFIG_EXPERIMENTAL=y
-+# CONFIG_CLEAN_COMPILE is not set
-+CONFIG_BROKEN=y
-+CONFIG_BROKEN_ON_SMP=y
-+CONFIG_LOCK_KERNEL=y
-+CONFIG_INIT_ENV_ARG_LIMIT=32
-+
-+#
-+# General setup
-+#
-+CONFIG_LOCALVERSION=""
-+CONFIG_SWAP=y
-+CONFIG_SYSVIPC=y
-+CONFIG_POSIX_MQUEUE=y
-+CONFIG_BSD_PROCESS_ACCT=y
-+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
-+CONFIG_SYSCTL=y
-+# CONFIG_AUDIT is not set
-+CONFIG_HOTPLUG=y
-+CONFIG_KOBJECT_UEVENT=y
-+CONFIG_IKCONFIG=y
-+CONFIG_IKCONFIG_PROC=y
-+# CONFIG_CPUSETS is not set
-+CONFIG_EMBEDDED=y
-+CONFIG_KALLSYMS=y
-+# CONFIG_KALLSYMS_ALL is not set
-+# CONFIG_KALLSYMS_EXTRA_PASS is not set
-+CONFIG_PRINTK=y
-+CONFIG_BUG=y
-+CONFIG_BASE_FULL=y
-+CONFIG_FUTEX=y
-+CONFIG_EPOLL=y
-+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-+CONFIG_SHMEM=y
-+CONFIG_CC_ALIGN_FUNCTIONS=0
-+CONFIG_CC_ALIGN_LABELS=0
-+CONFIG_CC_ALIGN_LOOPS=0
-+CONFIG_CC_ALIGN_JUMPS=0
-+# CONFIG_TINY_SHMEM is not set
-+CONFIG_BASE_SMALL=0
-+
-+#
-+# Loadable module support
-+#
-+CONFIG_MODULES=y
-+CONFIG_MODULE_UNLOAD=y
-+CONFIG_MODULE_FORCE_UNLOAD=y
-+CONFIG_OBSOLETE_MODPARM=y
-+CONFIG_MODVERSIONS=y
-+# CONFIG_MODULE_SRCVERSION_ALL is not set
-+CONFIG_KMOD=y
-+CONFIG_STOP_MACHINE=y
-+
-+#
-+# X86 Processor Configuration
-+#
-+CONFIG_XENARCH="i386"
-+CONFIG_X86=y
-+CONFIG_MMU=y
-+CONFIG_UID16=y
-+CONFIG_GENERIC_ISA_DMA=y
-+CONFIG_GENERIC_IOMAP=y
-+# CONFIG_M386 is not set
-+# CONFIG_M486 is not set
-+# CONFIG_M586 is not set
-+# CONFIG_M586TSC is not set
-+# CONFIG_M586MMX is not set
-+CONFIG_M686=y
-+# CONFIG_MPENTIUMII is not set
-+# CONFIG_MPENTIUMIII is not set
-+# CONFIG_MPENTIUMM is not set
-+# CONFIG_MPENTIUM4 is not set
-+# CONFIG_MK6 is not set
-+# CONFIG_MK7 is not set
-+# CONFIG_MK8 is not set
-+# CONFIG_MCRUSOE is not set
-+# CONFIG_MEFFICEON is not set
-+# CONFIG_MWINCHIPC6 is not set
-+# CONFIG_MWINCHIP2 is not set
-+# CONFIG_MWINCHIP3D is not set
-+# CONFIG_MGEODEGX1 is not set
-+# CONFIG_MCYRIXIII is not set
-+# CONFIG_MVIAC3_2 is not set
-+# CONFIG_X86_GENERIC is not set
-+CONFIG_X86_CMPXCHG=y
-+CONFIG_X86_XADD=y
-+CONFIG_X86_L1_CACHE_SHIFT=5
-+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-+CONFIG_GENERIC_CALIBRATE_DELAY=y
-+CONFIG_X86_PPRO_FENCE=y
-+CONFIG_X86_WP_WORKS_OK=y
-+CONFIG_X86_INVLPG=y
-+CONFIG_X86_BSWAP=y
-+CONFIG_X86_POPAD_OK=y
-+CONFIG_X86_GOOD_APIC=y
-+CONFIG_X86_USE_PPRO_CHECKSUM=y
-+# CONFIG_HPET_TIMER is not set
-+# CONFIG_HPET_EMULATE_RTC is not set
-+CONFIG_SMP=y
-+CONFIG_SMP_ALTERNATIVES=y
-+CONFIG_NR_CPUS=8
-+# CONFIG_SCHED_SMT is not set
-+# CONFIG_X86_REBOOTFIXUPS is not set
-+CONFIG_MICROCODE=y
-+CONFIG_X86_CPUID=m
-+CONFIG_SWIOTLB=y
-+
-+#
-+# Firmware Drivers
-+#
-+CONFIG_EDD=m
-+# CONFIG_NOHIGHMEM is not set
-+CONFIG_HIGHMEM4G=y
-+# CONFIG_HIGHMEM64G is not set
-+CONFIG_HIGHMEM=y
-+CONFIG_MTRR=y
-+CONFIG_HAVE_DEC_LOCK=y
-+# CONFIG_REGPARM is not set
-+CONFIG_X86_LOCAL_APIC=y
-+CONFIG_X86_IO_APIC=y
-+CONFIG_HOTPLUG_CPU=y
-+
-+#
-+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
-+#
-+CONFIG_PCI=y
-+# CONFIG_PCI_GOMMCONFIG is not set
-+# CONFIG_PCI_GODIRECT is not set
-+CONFIG_PCI_GOANY=y
-+CONFIG_PCI_DIRECT=y
-+CONFIG_PCI_MMCONFIG=y
-+# CONFIG_PCIEPORTBUS is not set
-+# CONFIG_PCI_MSI is not set
-+# CONFIG_PCI_LEGACY_PROC is not set
-+CONFIG_PCI_NAMES=y
-+# CONFIG_PCI_DEBUG is not set
-+CONFIG_ISA_DMA_API=y
-+CONFIG_ISA=y
-+# CONFIG_EISA is not set
-+# CONFIG_MCA is not set
-+CONFIG_SCx200=m
-+
-+#
-+# PCCARD (PCMCIA/CardBus) support
-+#
-+CONFIG_PCCARD=m
-+# CONFIG_PCMCIA_DEBUG is not set
-+CONFIG_PCMCIA=m
-+CONFIG_CARDBUS=y
-+
-+#
-+# PC-card bridges
-+#
-+CONFIG_YENTA=m
-+CONFIG_PD6729=m
-+CONFIG_I82092=m
-+CONFIG_I82365=m
-+CONFIG_TCIC=m
-+CONFIG_PCMCIA_PROBE=y
-+CONFIG_PCCARD_NONSTATIC=m
-+
-+#
-+# PCI Hotplug Support
-+#
-+CONFIG_HOTPLUG_PCI=m
-+CONFIG_HOTPLUG_PCI_FAKE=m
-+# CONFIG_HOTPLUG_PCI_ACPI is not set
-+CONFIG_HOTPLUG_PCI_CPCI=y
-+CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
-+CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
-+CONFIG_HOTPLUG_PCI_SHPC=m
-+# CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE is not set
-+CONFIG_GENERIC_HARDIRQS=y
-+CONFIG_GENERIC_IRQ_PROBE=y
-+CONFIG_X86_SMP=y
-+CONFIG_X86_BIOS_REBOOT=y
-+CONFIG_X86_TRAMPOLINE=y
-+CONFIG_SECCOMP=y
-+# CONFIG_EARLY_PRINTK is not set
-+
-+#
-+# Executable file formats
-+#
-+CONFIG_BINFMT_ELF=y
-+CONFIG_BINFMT_AOUT=m
-+CONFIG_BINFMT_MISC=m
-+
-+#
-+# Device Drivers
-+#
-+
-+#
-+# Generic Driver Options
-+#
-+CONFIG_STANDALONE=y
-+CONFIG_PREVENT_FIRMWARE_BUILD=y
-+CONFIG_FW_LOADER=m
-+# CONFIG_DEBUG_DRIVER is not set
-+
-+#
-+# Memory Technology Devices (MTD)
-+#
-+CONFIG_MTD=m
-+# CONFIG_MTD_DEBUG is not set
-+CONFIG_MTD_CONCAT=m
-+CONFIG_MTD_PARTITIONS=y
-+CONFIG_MTD_REDBOOT_PARTS=m
-+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
-+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
-+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
-+# CONFIG_MTD_CMDLINE_PARTS is not set
-+
-+#
-+# User Modules And Translation Layers
-+#
-+CONFIG_MTD_CHAR=m
-+CONFIG_MTD_BLOCK=m
-+CONFIG_MTD_BLOCK_RO=m
-+CONFIG_FTL=m
-+CONFIG_NFTL=m
-+CONFIG_NFTL_RW=y
-+CONFIG_INFTL=m
-+
-+#
-+# RAM/ROM/Flash chip drivers
-+#
-+CONFIG_MTD_CFI=m
-+CONFIG_MTD_JEDECPROBE=m
-+CONFIG_MTD_GEN_PROBE=m
-+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
-+CONFIG_MTD_MAP_BANK_WIDTH_1=y
-+CONFIG_MTD_MAP_BANK_WIDTH_2=y
-+CONFIG_MTD_MAP_BANK_WIDTH_4=y
-+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-+CONFIG_MTD_CFI_I1=y
-+CONFIG_MTD_CFI_I2=y
-+# CONFIG_MTD_CFI_I4 is not set
-+# CONFIG_MTD_CFI_I8 is not set
-+CONFIG_MTD_CFI_INTELEXT=m
-+CONFIG_MTD_CFI_AMDSTD=m
-+CONFIG_MTD_CFI_AMDSTD_RETRY=0
-+CONFIG_MTD_CFI_STAA=m
-+CONFIG_MTD_CFI_UTIL=m
-+CONFIG_MTD_RAM=m
-+CONFIG_MTD_ROM=m
-+CONFIG_MTD_ABSENT=m
-+# CONFIG_MTD_OBSOLETE_CHIPS is not set
-+
-+#
-+# Mapping drivers for chip access
-+#
-+CONFIG_MTD_COMPLEX_MAPPINGS=y
-+CONFIG_MTD_PHYSMAP=m
-+CONFIG_MTD_PHYSMAP_START=0x8000000
-+CONFIG_MTD_PHYSMAP_LEN=0x4000000
-+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
-+CONFIG_MTD_PNC2000=m
-+CONFIG_MTD_SC520CDP=m
-+CONFIG_MTD_NETSC520=m
-+CONFIG_MTD_TS5500=m
-+CONFIG_MTD_SBC_GXX=m
-+CONFIG_MTD_ELAN_104NC=m
-+CONFIG_MTD_SCx200_DOCFLASH=m
-+# CONFIG_MTD_AMD76XROM is not set
-+# CONFIG_MTD_ICHXROM is not set
-+# CONFIG_MTD_SCB2_FLASH is not set
-+CONFIG_MTD_NETtel=m
-+CONFIG_MTD_DILNETPC=m
-+CONFIG_MTD_DILNETPC_BOOTSIZE=0x80000
-+# CONFIG_MTD_L440GX is not set
-+CONFIG_MTD_PCI=m
-+CONFIG_MTD_PCMCIA=m
-+
-+#
-+# Self-contained MTD device drivers
-+#
-+CONFIG_MTD_PMC551=m
-+# CONFIG_MTD_PMC551_BUGFIX is not set
-+# CONFIG_MTD_PMC551_DEBUG is not set
-+CONFIG_MTD_SLRAM=m
-+CONFIG_MTD_PHRAM=m
-+CONFIG_MTD_MTDRAM=m
-+CONFIG_MTDRAM_TOTAL_SIZE=4096
-+CONFIG_MTDRAM_ERASE_SIZE=128
-+CONFIG_MTD_BLKMTD=m
-+# CONFIG_MTD_BLOCK2MTD is not set
-+
-+#
-+# Disk-On-Chip Device Drivers
-+#
-+CONFIG_MTD_DOC2000=m
-+CONFIG_MTD_DOC2001=m
-+CONFIG_MTD_DOC2001PLUS=m
-+CONFIG_MTD_DOCPROBE=m
-+CONFIG_MTD_DOCECC=m
-+# CONFIG_MTD_DOCPROBE_ADVANCED is not set
-+CONFIG_MTD_DOCPROBE_ADDRESS=0
-+
-+#
-+# NAND Flash Device Drivers
-+#
-+CONFIG_MTD_NAND=m
-+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
-+CONFIG_MTD_NAND_IDS=m
-+CONFIG_MTD_NAND_DISKONCHIP=m
-+# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
-+CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
-+# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
-+# CONFIG_MTD_NAND_NANDSIM is not set
-+
-+#
-+# Parallel port support
-+#
-+CONFIG_PARPORT=m
-+CONFIG_PARPORT_PC=m
-+CONFIG_PARPORT_SERIAL=m
-+CONFIG_PARPORT_PC_FIFO=y
-+# CONFIG_PARPORT_PC_SUPERIO is not set
-+CONFIG_PARPORT_PC_PCMCIA=m
-+CONFIG_PARPORT_NOT_PC=y
-+# CONFIG_PARPORT_GSC is not set
-+CONFIG_PARPORT_1284=y
-+
-+#
-+# Plug and Play support
-+#
-+CONFIG_PNP=y
-+# CONFIG_PNP_DEBUG is not set
-+
-+#
-+# Protocols
-+#
-+CONFIG_ISAPNP=y
-+# CONFIG_PNPBIOS is not set
-+# CONFIG_PNPACPI is not set
-+
-+#
-+# Block devices
-+#
-+CONFIG_BLK_DEV_FD=m
-+CONFIG_BLK_DEV_XD=m
-+CONFIG_PARIDE=m
-+CONFIG_PARIDE_PARPORT=m
-+
-+#
-+# Parallel IDE high-level drivers
-+#
-+CONFIG_PARIDE_PD=m
-+CONFIG_PARIDE_PCD=m
-+CONFIG_PARIDE_PF=m
-+CONFIG_PARIDE_PT=m
-+CONFIG_PARIDE_PG=m
-+
-+#
-+# Parallel IDE protocol modules
-+#
-+CONFIG_PARIDE_ATEN=m
-+CONFIG_PARIDE_BPCK=m
-+CONFIG_PARIDE_BPCK6=m
-+CONFIG_PARIDE_COMM=m
-+CONFIG_PARIDE_DSTR=m
-+CONFIG_PARIDE_FIT2=m
-+CONFIG_PARIDE_FIT3=m
-+CONFIG_PARIDE_EPAT=m
-+# CONFIG_PARIDE_EPATC8 is not set
-+CONFIG_PARIDE_EPIA=m
-+CONFIG_PARIDE_FRIQ=m
-+CONFIG_PARIDE_FRPW=m
-+CONFIG_PARIDE_KBIC=m
-+CONFIG_PARIDE_KTTI=m
-+CONFIG_PARIDE_ON20=m
-+CONFIG_PARIDE_ON26=m
-+CONFIG_BLK_CPQ_DA=m
-+CONFIG_BLK_CPQ_CISS_DA=m
-+CONFIG_CISS_SCSI_TAPE=y
-+CONFIG_BLK_DEV_DAC960=m
-+CONFIG_BLK_DEV_UMEM=m
-+# CONFIG_BLK_DEV_COW_COMMON is not set
-+CONFIG_BLK_DEV_LOOP=m
-+CONFIG_BLK_DEV_CRYPTOLOOP=m
-+CONFIG_BLK_DEV_NBD=m
-+CONFIG_BLK_DEV_SX8=m
-+# CONFIG_BLK_DEV_UB is not set
-+CONFIG_BLK_DEV_RAM=y
-+CONFIG_BLK_DEV_RAM_COUNT=16
-+CONFIG_BLK_DEV_RAM_SIZE=16384
-+CONFIG_BLK_DEV_INITRD=y
-+CONFIG_INITRAMFS_SOURCE=""
-+CONFIG_LBD=y
-+CONFIG_CDROM_PKTCDVD=m
-+CONFIG_CDROM_PKTCDVD_BUFFERS=8
-+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
-+
-+#
-+# IO Schedulers
-+#
-+CONFIG_IOSCHED_NOOP=y
-+CONFIG_IOSCHED_AS=y
-+CONFIG_IOSCHED_DEADLINE=y
-+CONFIG_IOSCHED_CFQ=y
-+CONFIG_ATA_OVER_ETH=m
-+
-+#
-+# ATA/ATAPI/MFM/RLL support
-+#
-+CONFIG_IDE=y
-+CONFIG_BLK_DEV_IDE=y
-+
-+#
-+# Please see Documentation/ide.txt for help/info on IDE drives
-+#
-+# CONFIG_BLK_DEV_IDE_SATA is not set
-+# CONFIG_BLK_DEV_HD_IDE is not set
-+CONFIG_BLK_DEV_IDEDISK=y
-+CONFIG_IDEDISK_MULTI_MODE=y
-+CONFIG_BLK_DEV_IDECS=m
-+CONFIG_BLK_DEV_IDECD=y
-+CONFIG_BLK_DEV_IDETAPE=m
-+CONFIG_BLK_DEV_IDEFLOPPY=y
-+CONFIG_BLK_DEV_IDESCSI=m
-+# CONFIG_IDE_TASK_IOCTL is not set
-+
-+#
-+# IDE chipset support/bugfixes
-+#
-+CONFIG_IDE_GENERIC=y
-+CONFIG_BLK_DEV_CMD640=y
-+CONFIG_BLK_DEV_CMD640_ENHANCED=y
-+CONFIG_BLK_DEV_IDEPNP=y
-+CONFIG_BLK_DEV_IDEPCI=y
-+CONFIG_IDEPCI_SHARE_IRQ=y
-+# CONFIG_BLK_DEV_OFFBOARD is not set
-+CONFIG_BLK_DEV_GENERIC=y
-+CONFIG_BLK_DEV_OPTI621=m
-+CONFIG_BLK_DEV_RZ1000=y
-+CONFIG_BLK_DEV_IDEDMA_PCI=y
-+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-+CONFIG_IDEDMA_PCI_AUTO=y
-+# CONFIG_IDEDMA_ONLYDISK is not set
-+CONFIG_BLK_DEV_AEC62XX=y
-+CONFIG_BLK_DEV_ALI15X3=y
-+# CONFIG_WDC_ALI15X3 is not set
-+CONFIG_BLK_DEV_AMD74XX=y
-+CONFIG_BLK_DEV_ATIIXP=y
-+CONFIG_BLK_DEV_CMD64X=y
-+CONFIG_BLK_DEV_TRIFLEX=y
-+CONFIG_BLK_DEV_CY82C693=y
-+CONFIG_BLK_DEV_CS5520=y
-+CONFIG_BLK_DEV_CS5530=y
-+CONFIG_BLK_DEV_HPT34X=y
-+# CONFIG_HPT34X_AUTODMA is not set
-+CONFIG_BLK_DEV_HPT366=y
-+CONFIG_BLK_DEV_SC1200=m
-+CONFIG_BLK_DEV_PIIX=y
-+CONFIG_BLK_DEV_NS87415=m
-+CONFIG_BLK_DEV_PDC202XX_OLD=y
-+CONFIG_PDC202XX_BURST=y
-+CONFIG_BLK_DEV_PDC202XX_NEW=y
-+CONFIG_PDC202XX_FORCE=y
-+CONFIG_BLK_DEV_SVWKS=y
-+CONFIG_BLK_DEV_SIIMAGE=y
-+CONFIG_BLK_DEV_SIS5513=y
-+CONFIG_BLK_DEV_SLC90E66=y
-+CONFIG_BLK_DEV_TRM290=m
-+CONFIG_BLK_DEV_VIA82CXXX=y
-+# CONFIG_IDE_ARM is not set
-+# CONFIG_IDE_CHIPSETS is not set
-+CONFIG_BLK_DEV_IDEDMA=y
-+# CONFIG_IDEDMA_IVB is not set
-+CONFIG_IDEDMA_AUTO=y
-+# CONFIG_BLK_DEV_HD is not set
-+
-+#
-+# SCSI device support
-+#
-+CONFIG_SCSI=m
-+CONFIG_SCSI_PROC_FS=y
-+
-+#
-+# SCSI support type (disk, tape, CD-ROM)
-+#
-+CONFIG_BLK_DEV_SD=m
-+CONFIG_CHR_DEV_ST=m
-+CONFIG_CHR_DEV_OSST=m
-+CONFIG_BLK_DEV_SR=m
-+# CONFIG_BLK_DEV_SR_VENDOR is not set
-+CONFIG_CHR_DEV_SG=m
-+
-+#
-+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+#
-+CONFIG_SCSI_MULTI_LUN=y
-+CONFIG_SCSI_CONSTANTS=y
-+CONFIG_SCSI_LOGGING=y
-+
-+#
-+# SCSI Transport Attributes
-+#
-+CONFIG_SCSI_SPI_ATTRS=m
-+CONFIG_SCSI_FC_ATTRS=m
-+# CONFIG_SCSI_ISCSI_ATTRS is not set
-+
-+#
-+# SCSI low-level drivers
-+#
-+CONFIG_BLK_DEV_3W_XXXX_RAID=m
-+CONFIG_SCSI_3W_9XXX=m
-+# CONFIG_SCSI_7000FASST is not set
-+CONFIG_SCSI_ACARD=m
-+CONFIG_SCSI_AHA152X=m
-+# CONFIG_SCSI_AHA1542 is not set
-+CONFIG_SCSI_AACRAID=m
-+CONFIG_SCSI_AIC7XXX=m
-+CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
-+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-+CONFIG_AIC7XXX_DEBUG_ENABLE=y
-+CONFIG_AIC7XXX_DEBUG_MASK=0
-+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
-+CONFIG_SCSI_AIC7XXX_OLD=m
-+CONFIG_SCSI_AIC79XX=m
-+CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-+CONFIG_AIC79XX_RESET_DELAY_MS=15000
-+CONFIG_AIC79XX_ENABLE_RD_STRM=y
-+CONFIG_AIC79XX_DEBUG_ENABLE=y
-+CONFIG_AIC79XX_DEBUG_MASK=0
-+CONFIG_AIC79XX_REG_PRETTY_PRINT=y
-+CONFIG_SCSI_DPT_I2O=m
-+CONFIG_SCSI_ADVANSYS=m
-+CONFIG_SCSI_IN2000=m
-+CONFIG_MEGARAID_NEWGEN=y
-+CONFIG_MEGARAID_MM=m
-+CONFIG_MEGARAID_MAILBOX=m
-+CONFIG_SCSI_SATA=y
-+CONFIG_SCSI_SATA_AHCI=m
-+CONFIG_SCSI_SATA_SVW=m
-+CONFIG_SCSI_ATA_PIIX=m
-+CONFIG_SCSI_SATA_NV=m
-+CONFIG_SCSI_SATA_PROMISE=m
-+# CONFIG_SCSI_SATA_QSTOR is not set
-+CONFIG_SCSI_SATA_SX4=m
-+CONFIG_SCSI_SATA_SIL=m
-+CONFIG_SCSI_SATA_SIS=m
-+CONFIG_SCSI_SATA_ULI=m
-+CONFIG_SCSI_SATA_VIA=m
-+CONFIG_SCSI_SATA_VITESSE=m
-+CONFIG_SCSI_BUSLOGIC=m
-+# CONFIG_SCSI_OMIT_FLASHPOINT is not set
-+# CONFIG_SCSI_CPQFCTS is not set
-+CONFIG_SCSI_DMX3191D=m
-+CONFIG_SCSI_DTC3280=m
-+CONFIG_SCSI_EATA=m
-+CONFIG_SCSI_EATA_TAGGED_QUEUE=y
-+CONFIG_SCSI_EATA_LINKED_COMMANDS=y
-+CONFIG_SCSI_EATA_MAX_TAGS=16
-+CONFIG_SCSI_EATA_PIO=m
-+CONFIG_SCSI_FUTURE_DOMAIN=m
-+CONFIG_SCSI_GDTH=m
-+CONFIG_SCSI_GENERIC_NCR5380=m
-+CONFIG_SCSI_GENERIC_NCR5380_MMIO=m
-+CONFIG_SCSI_GENERIC_NCR53C400=y
-+CONFIG_SCSI_IPS=m
-+# CONFIG_SCSI_INITIO is not set
-+# CONFIG_SCSI_INIA100 is not set
-+CONFIG_SCSI_PPA=m
-+CONFIG_SCSI_IMM=m
-+# CONFIG_SCSI_IZIP_EPP16 is not set
-+# CONFIG_SCSI_IZIP_SLOW_CTR is not set
-+CONFIG_SCSI_NCR53C406A=m
-+CONFIG_SCSI_SYM53C8XX_2=m
-+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
-+CONFIG_SCSI_IPR=m
-+# CONFIG_SCSI_IPR_TRACE is not set
-+# CONFIG_SCSI_IPR_DUMP is not set
-+CONFIG_SCSI_PAS16=m
-+# CONFIG_SCSI_PCI2000 is not set
-+# CONFIG_SCSI_PCI2220I is not set
-+CONFIG_SCSI_PSI240I=m
-+CONFIG_SCSI_QLOGIC_FAS=m
-+CONFIG_SCSI_QLOGIC_ISP=m
-+CONFIG_SCSI_QLOGIC_FC=m
-+CONFIG_SCSI_QLOGIC_FC_FIRMWARE=y
-+CONFIG_SCSI_QLOGIC_1280=m
-+CONFIG_SCSI_QLOGIC_1280_1040=y
-+CONFIG_SCSI_QLA2XXX=m
-+CONFIG_SCSI_QLA21XX=m
-+CONFIG_SCSI_QLA22XX=m
-+CONFIG_SCSI_QLA2300=m
-+CONFIG_SCSI_QLA2322=m
-+CONFIG_SCSI_QLA6312=m
-+CONFIG_SCSI_LPFC=m
-+# CONFIG_SCSI_SEAGATE is not set
-+CONFIG_SCSI_SYM53C416=m
-+CONFIG_SCSI_DC395x=m
-+CONFIG_SCSI_DC390T=m
-+CONFIG_SCSI_T128=m
-+CONFIG_SCSI_U14_34F=m
-+CONFIG_SCSI_U14_34F_TAGGED_QUEUE=y
-+CONFIG_SCSI_U14_34F_LINKED_COMMANDS=y
-+CONFIG_SCSI_U14_34F_MAX_TAGS=8
-+# CONFIG_SCSI_ULTRASTOR is not set
-+CONFIG_SCSI_NSP32=m
-+CONFIG_SCSI_DEBUG=m
-+
-+#
-+# PCMCIA SCSI adapter support
-+#
-+CONFIG_PCMCIA_AHA152X=m
-+CONFIG_PCMCIA_FDOMAIN=m
-+CONFIG_PCMCIA_NINJA_SCSI=m
-+CONFIG_PCMCIA_QLOGIC=m
-+CONFIG_PCMCIA_SYM53C500=m
-+
-+#
-+# Old CD-ROM drivers (not SCSI, not IDE)
-+#
-+CONFIG_CD_NO_IDESCSI=y
-+CONFIG_AZTCD=m
-+CONFIG_GSCD=m
-+# CONFIG_SBPCD is not set
-+CONFIG_MCDX=m
-+CONFIG_OPTCD=m
-+# CONFIG_CM206 is not set
-+CONFIG_SJCD=m
-+CONFIG_ISP16_CDI=m
-+CONFIG_CDU31A=m
-+CONFIG_CDU535=m
-+
-+#
-+# Multi-device support (RAID and LVM)
-+#
-+CONFIG_MD=y
-+CONFIG_BLK_DEV_MD=m
-+CONFIG_MD_LINEAR=m
-+CONFIG_MD_RAID0=m
-+CONFIG_MD_RAID1=m
-+CONFIG_MD_RAID10=m
-+CONFIG_MD_RAID5=m
-+CONFIG_MD_RAID6=m
-+CONFIG_MD_MULTIPATH=m
-+CONFIG_MD_FAULTY=m
-+CONFIG_BLK_DEV_DM=m
-+CONFIG_DM_CRYPT=m
-+CONFIG_DM_SNAPSHOT=m
-+CONFIG_DM_MIRROR=m
-+CONFIG_DM_ZERO=m
-+CONFIG_DM_MULTIPATH=m
-+CONFIG_DM_MULTIPATH_EMC=m
-+
-+#
-+# Fusion MPT device support
-+#
-+CONFIG_FUSION=m
-+CONFIG_FUSION_MAX_SGE=40
-+CONFIG_FUSION_CTL=m
-+CONFIG_FUSION_LAN=m
-+
-+#
-+# IEEE 1394 (FireWire) support
-+#
-+CONFIG_IEEE1394=m
-+
-+#
-+# Subsystem Options
-+#
-+# CONFIG_IEEE1394_VERBOSEDEBUG is not set
-+# CONFIG_IEEE1394_OUI_DB is not set
-+CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
-+CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
-+
-+#
-+# Device Drivers
-+#
-+CONFIG_IEEE1394_PCILYNX=m
-+CONFIG_IEEE1394_OHCI1394=m
-+
-+#
-+# Protocol Drivers
-+#
-+CONFIG_IEEE1394_VIDEO1394=m
-+CONFIG_IEEE1394_SBP2=m
-+# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-+CONFIG_IEEE1394_ETH1394=m
-+CONFIG_IEEE1394_DV1394=m
-+CONFIG_IEEE1394_RAWIO=m
-+CONFIG_IEEE1394_CMP=m
-+CONFIG_IEEE1394_AMDTP=m
-+
-+#
-+# I2O device support
-+#
-+CONFIG_I2O=m
-+CONFIG_I2O_CONFIG=m
-+CONFIG_I2O_BLOCK=m
-+CONFIG_I2O_SCSI=m
-+CONFIG_I2O_PROC=m
-+
-+#
-+# Networking support
-+#
-+CONFIG_NET=y
-+
-+#
-+# Networking options
-+#
-+CONFIG_PACKET=m
-+CONFIG_PACKET_MMAP=y
-+CONFIG_UNIX=m
-+CONFIG_NET_KEY=m
-+CONFIG_INET=y
-+CONFIG_IP_MULTICAST=y
-+CONFIG_IP_ADVANCED_ROUTER=y
-+CONFIG_IP_MULTIPLE_TABLES=y
-+CONFIG_IP_ROUTE_FWMARK=y
-+CONFIG_IP_ROUTE_MULTIPATH=y
-+# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set
-+CONFIG_IP_ROUTE_VERBOSE=y
-+# CONFIG_IP_PNP is not set
-+CONFIG_NET_IPIP=m
-+CONFIG_NET_IPGRE=m
-+CONFIG_NET_IPGRE_BROADCAST=y
-+CONFIG_IP_MROUTE=y
-+CONFIG_IP_PIMSM_V1=y
-+CONFIG_IP_PIMSM_V2=y
-+# CONFIG_ARPD is not set
-+CONFIG_SYN_COOKIES=y
-+CONFIG_INET_AH=m
-+CONFIG_INET_ESP=m
-+CONFIG_INET_IPCOMP=m
-+CONFIG_INET_TUNNEL=m
-+CONFIG_IP_TCPDIAG=m
-+CONFIG_IP_TCPDIAG_IPV6=y
-+
-+#
-+# IP: Virtual Server Configuration
-+#
-+CONFIG_IP_VS=m
-+# CONFIG_IP_VS_DEBUG is not set
-+CONFIG_IP_VS_TAB_BITS=12
-+
-+#
-+# IPVS transport protocol load balancing support
-+#
-+CONFIG_IP_VS_PROTO_TCP=y
-+CONFIG_IP_VS_PROTO_UDP=y
-+CONFIG_IP_VS_PROTO_ESP=y
-+CONFIG_IP_VS_PROTO_AH=y
-+
-+#
-+# IPVS scheduler
-+#
-+CONFIG_IP_VS_RR=m
-+CONFIG_IP_VS_WRR=m
-+CONFIG_IP_VS_LC=m
-+CONFIG_IP_VS_WLC=m
-+CONFIG_IP_VS_LBLC=m
-+CONFIG_IP_VS_LBLCR=m
-+CONFIG_IP_VS_DH=m
-+CONFIG_IP_VS_SH=m
-+CONFIG_IP_VS_SED=m
-+CONFIG_IP_VS_NQ=m
-+
-+#
-+# IPVS application helper
-+#
-+CONFIG_IP_VS_FTP=m
-+CONFIG_IPV6=m
-+CONFIG_IPV6_PRIVACY=y
-+CONFIG_INET6_AH=m
-+CONFIG_INET6_ESP=m
-+CONFIG_INET6_IPCOMP=m
-+CONFIG_INET6_TUNNEL=m
-+CONFIG_IPV6_TUNNEL=m
-+CONFIG_NETFILTER=y
-+# CONFIG_NETFILTER_DEBUG is not set
-+CONFIG_BRIDGE_NETFILTER=y
-+
-+#
-+# IP: Netfilter Configuration
-+#
-+CONFIG_IP_NF_CONNTRACK=m
-+CONFIG_IP_NF_CT_ACCT=y
-+CONFIG_IP_NF_CONNTRACK_MARK=y
-+CONFIG_IP_NF_CT_PROTO_SCTP=m
-+CONFIG_IP_NF_FTP=m
-+CONFIG_IP_NF_IRC=m
-+CONFIG_IP_NF_TFTP=m
-+CONFIG_IP_NF_AMANDA=m
-+CONFIG_IP_NF_QUEUE=m
-+CONFIG_IP_NF_IPTABLES=m
-+CONFIG_IP_NF_MATCH_LIMIT=m
-+CONFIG_IP_NF_MATCH_IPRANGE=m
-+CONFIG_IP_NF_MATCH_MAC=m
-+CONFIG_IP_NF_MATCH_PKTTYPE=m
-+CONFIG_IP_NF_MATCH_MARK=m
-+CONFIG_IP_NF_MATCH_MULTIPORT=m
-+CONFIG_IP_NF_MATCH_TOS=m
-+CONFIG_IP_NF_MATCH_RECENT=m
-+CONFIG_IP_NF_MATCH_ECN=m
-+CONFIG_IP_NF_MATCH_DSCP=m
-+CONFIG_IP_NF_MATCH_AH_ESP=m
-+CONFIG_IP_NF_MATCH_LENGTH=m
-+CONFIG_IP_NF_MATCH_TTL=m
-+CONFIG_IP_NF_MATCH_TCPMSS=m
-+CONFIG_IP_NF_MATCH_HELPER=m
-+CONFIG_IP_NF_MATCH_STATE=m
-+CONFIG_IP_NF_MATCH_CONNTRACK=m
-+CONFIG_IP_NF_MATCH_OWNER=m
-+CONFIG_IP_NF_MATCH_PHYSDEV=m
-+CONFIG_IP_NF_MATCH_ADDRTYPE=m
-+CONFIG_IP_NF_MATCH_REALM=m
-+CONFIG_IP_NF_MATCH_SCTP=m
-+CONFIG_IP_NF_MATCH_COMMENT=m
-+CONFIG_IP_NF_MATCH_CONNMARK=m
-+CONFIG_IP_NF_MATCH_HASHLIMIT=m
-+CONFIG_IP_NF_FILTER=m
-+CONFIG_IP_NF_TARGET_REJECT=m
-+CONFIG_IP_NF_TARGET_LOG=m
-+CONFIG_IP_NF_TARGET_ULOG=m
-+CONFIG_IP_NF_TARGET_TCPMSS=m
-+CONFIG_IP_NF_NAT=m
-+CONFIG_IP_NF_NAT_NEEDED=y
-+CONFIG_IP_NF_TARGET_MASQUERADE=m
-+CONFIG_IP_NF_TARGET_REDIRECT=m
-+CONFIG_IP_NF_TARGET_NETMAP=m
-+CONFIG_IP_NF_TARGET_SAME=m
-+CONFIG_IP_NF_NAT_SNMP_BASIC=m
-+CONFIG_IP_NF_NAT_IRC=m
-+CONFIG_IP_NF_NAT_FTP=m
-+CONFIG_IP_NF_NAT_TFTP=m
-+CONFIG_IP_NF_NAT_AMANDA=m
-+CONFIG_IP_NF_MANGLE=m
-+CONFIG_IP_NF_TARGET_TOS=m
-+CONFIG_IP_NF_TARGET_ECN=m
-+CONFIG_IP_NF_TARGET_DSCP=m
-+CONFIG_IP_NF_TARGET_MARK=m
-+CONFIG_IP_NF_TARGET_CLASSIFY=m
-+CONFIG_IP_NF_TARGET_CONNMARK=m
-+CONFIG_IP_NF_TARGET_CLUSTERIP=m
-+CONFIG_IP_NF_RAW=m
-+CONFIG_IP_NF_TARGET_NOTRACK=m
-+CONFIG_IP_NF_ARPTABLES=m
-+CONFIG_IP_NF_ARPFILTER=m
-+CONFIG_IP_NF_ARP_MANGLE=m
-+
-+#
-+# IPv6: Netfilter Configuration (EXPERIMENTAL)
-+#
-+CONFIG_IP6_NF_QUEUE=m
-+CONFIG_IP6_NF_IPTABLES=m
-+CONFIG_IP6_NF_MATCH_LIMIT=m
-+CONFIG_IP6_NF_MATCH_MAC=m
-+CONFIG_IP6_NF_MATCH_RT=m
-+CONFIG_IP6_NF_MATCH_OPTS=m
-+CONFIG_IP6_NF_MATCH_FRAG=m
-+CONFIG_IP6_NF_MATCH_HL=m
-+CONFIG_IP6_NF_MATCH_MULTIPORT=m
-+CONFIG_IP6_NF_MATCH_OWNER=m
-+CONFIG_IP6_NF_MATCH_MARK=m
-+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-+CONFIG_IP6_NF_MATCH_AHESP=m
-+CONFIG_IP6_NF_MATCH_LENGTH=m
-+CONFIG_IP6_NF_MATCH_EUI64=m
-+CONFIG_IP6_NF_MATCH_PHYSDEV=m
-+CONFIG_IP6_NF_FILTER=m
-+CONFIG_IP6_NF_TARGET_LOG=m
-+CONFIG_IP6_NF_MANGLE=m
-+CONFIG_IP6_NF_TARGET_MARK=m
-+CONFIG_IP6_NF_RAW=m
-+
-+#
-+# DECnet: Netfilter Configuration
-+#
-+CONFIG_DECNET_NF_GRABULATOR=m
-+
-+#
-+# Bridge: Netfilter Configuration
-+#
-+CONFIG_BRIDGE_NF_EBTABLES=m
-+CONFIG_BRIDGE_EBT_BROUTE=m
-+CONFIG_BRIDGE_EBT_T_FILTER=m
-+CONFIG_BRIDGE_EBT_T_NAT=m
-+CONFIG_BRIDGE_EBT_802_3=m
-+CONFIG_BRIDGE_EBT_AMONG=m
-+CONFIG_BRIDGE_EBT_ARP=m
-+CONFIG_BRIDGE_EBT_IP=m
-+CONFIG_BRIDGE_EBT_LIMIT=m
-+CONFIG_BRIDGE_EBT_MARK=m
-+CONFIG_BRIDGE_EBT_PKTTYPE=m
-+CONFIG_BRIDGE_EBT_STP=m
-+CONFIG_BRIDGE_EBT_VLAN=m
-+CONFIG_BRIDGE_EBT_ARPREPLY=m
-+CONFIG_BRIDGE_EBT_DNAT=m
-+CONFIG_BRIDGE_EBT_MARK_T=m
-+CONFIG_BRIDGE_EBT_REDIRECT=m
-+CONFIG_BRIDGE_EBT_SNAT=m
-+CONFIG_BRIDGE_EBT_LOG=m
-+# CONFIG_BRIDGE_EBT_ULOG is not set
-+CONFIG_XFRM=y
-+CONFIG_XFRM_USER=m
-+
-+#
-+# SCTP Configuration (EXPERIMENTAL)
-+#
-+CONFIG_IP_SCTP=m
-+# CONFIG_SCTP_DBG_MSG is not set
-+# CONFIG_SCTP_DBG_OBJCNT is not set
-+# CONFIG_SCTP_HMAC_NONE is not set
-+# CONFIG_SCTP_HMAC_SHA1 is not set
-+CONFIG_SCTP_HMAC_MD5=y
-+CONFIG_ATM=y
-+CONFIG_ATM_CLIP=y
-+# CONFIG_ATM_CLIP_NO_ICMP is not set
-+CONFIG_ATM_LANE=m
-+CONFIG_ATM_MPOA=m
-+CONFIG_ATM_BR2684=m
-+# CONFIG_ATM_BR2684_IPFILTER is not set
-+CONFIG_BRIDGE=m
-+CONFIG_VLAN_8021Q=m
-+CONFIG_DECNET=m
-+# CONFIG_DECNET_ROUTER is not set
-+CONFIG_LLC=y
-+CONFIG_LLC2=m
-+CONFIG_IPX=m
-+# CONFIG_IPX_INTERN is not set
-+CONFIG_ATALK=m
-+CONFIG_DEV_APPLETALK=y
-+CONFIG_LTPC=m
-+CONFIG_COPS=m
-+CONFIG_COPS_DAYNA=y
-+CONFIG_COPS_TANGENT=y
-+CONFIG_IPDDP=m
-+CONFIG_IPDDP_ENCAP=y
-+CONFIG_IPDDP_DECAP=y
-+CONFIG_X25=m
-+CONFIG_LAPB=m
-+# CONFIG_NET_DIVERT is not set
-+CONFIG_ECONET=m
-+CONFIG_ECONET_AUNUDP=y
-+CONFIG_ECONET_NATIVE=y
-+CONFIG_WAN_ROUTER=m
-+
-+#
-+# QoS and/or fair queueing
-+#
-+CONFIG_NET_SCHED=y
-+CONFIG_NET_SCH_CLK_JIFFIES=y
-+# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-+# CONFIG_NET_SCH_CLK_CPU is not set
-+CONFIG_NET_SCH_CBQ=m
-+CONFIG_NET_SCH_HTB=m
-+CONFIG_NET_SCH_HFSC=m
-+CONFIG_NET_SCH_ATM=m
-+CONFIG_NET_SCH_PRIO=m
-+CONFIG_NET_SCH_RED=m
-+CONFIG_NET_SCH_SFQ=m
-+CONFIG_NET_SCH_TEQL=m
-+CONFIG_NET_SCH_TBF=m
-+CONFIG_NET_SCH_GRED=m
-+CONFIG_NET_SCH_DSMARK=m
-+CONFIG_NET_SCH_NETEM=m
-+CONFIG_NET_SCH_INGRESS=m
-+CONFIG_NET_QOS=y
-+CONFIG_NET_ESTIMATOR=y
-+CONFIG_NET_CLS=y
-+CONFIG_NET_CLS_BASIC=m
-+CONFIG_NET_CLS_TCINDEX=m
-+CONFIG_NET_CLS_ROUTE4=m
-+CONFIG_NET_CLS_ROUTE=y
-+CONFIG_NET_CLS_FW=m
-+CONFIG_NET_CLS_U32=m
-+# CONFIG_CLS_U32_PERF is not set
-+# CONFIG_NET_CLS_IND is not set
-+# CONFIG_CLS_U32_MARK is not set
-+CONFIG_NET_CLS_RSVP=m
-+CONFIG_NET_CLS_RSVP6=m
-+CONFIG_NET_EMATCH=y
-+CONFIG_NET_EMATCH_STACK=32
-+CONFIG_NET_EMATCH_CMP=m
-+CONFIG_NET_EMATCH_NBYTE=m
-+CONFIG_NET_EMATCH_U32=m
-+CONFIG_NET_EMATCH_META=m
-+# CONFIG_NET_CLS_ACT is not set
-+CONFIG_NET_CLS_POLICE=y
-+
-+#
-+# Network testing
-+#
-+CONFIG_NET_PKTGEN=m
-+CONFIG_NETPOLL=y
-+# CONFIG_NETPOLL_RX is not set
-+# CONFIG_NETPOLL_TRAP is not set
-+CONFIG_NET_POLL_CONTROLLER=y
-+CONFIG_HAMRADIO=y
-+
-+#
-+# Packet Radio protocols
-+#
-+CONFIG_AX25=m
-+# CONFIG_AX25_DAMA_SLAVE is not set
-+CONFIG_NETROM=m
-+CONFIG_ROSE=m
-+
-+#
-+# AX.25 network device drivers
-+#
-+CONFIG_MKISS=m
-+CONFIG_6PACK=m
-+CONFIG_BPQETHER=m
-+# CONFIG_DMASCC is not set
-+CONFIG_SCC=m
-+# CONFIG_SCC_DELAY is not set
-+# CONFIG_SCC_TRXECHO is not set
-+CONFIG_BAYCOM_SER_FDX=m
-+CONFIG_BAYCOM_SER_HDX=m
-+CONFIG_BAYCOM_PAR=m
-+CONFIG_BAYCOM_EPP=m
-+CONFIG_YAM=m
-+CONFIG_IRDA=m
-+
-+#
-+# IrDA protocols
-+#
-+CONFIG_IRLAN=m
-+CONFIG_IRNET=m
-+CONFIG_IRCOMM=m
-+# CONFIG_IRDA_ULTRA is not set
-+
-+#
-+# IrDA options
-+#
-+CONFIG_IRDA_CACHE_LAST_LSAP=y
-+CONFIG_IRDA_FAST_RR=y
-+CONFIG_IRDA_DEBUG=y
-+
-+#
-+# Infrared-port device drivers
-+#
-+
-+#
-+# SIR device drivers
-+#
-+CONFIG_IRTTY_SIR=m
-+
-+#
-+# Dongle support
-+#
-+CONFIG_DONGLE=y
-+CONFIG_ESI_DONGLE=m
-+CONFIG_ACTISYS_DONGLE=m
-+CONFIG_TEKRAM_DONGLE=m
-+CONFIG_LITELINK_DONGLE=m
-+CONFIG_MA600_DONGLE=m
-+CONFIG_GIRBIL_DONGLE=m
-+CONFIG_MCP2120_DONGLE=m
-+CONFIG_OLD_BELKIN_DONGLE=m
-+CONFIG_ACT200L_DONGLE=m
-+
-+#
-+# Old SIR device drivers
-+#
-+CONFIG_IRPORT_SIR=m
-+
-+#
-+# Old Serial dongle support
-+#
-+# CONFIG_DONGLE_OLD is not set
-+
-+#
-+# FIR device drivers
-+#
-+CONFIG_USB_IRDA=m
-+CONFIG_SIGMATEL_FIR=m
-+CONFIG_NSC_FIR=m
-+CONFIG_WINBOND_FIR=m
-+# CONFIG_TOSHIBA_FIR is not set
-+CONFIG_SMC_IRCC_FIR=m
-+CONFIG_ALI_FIR=m
-+CONFIG_VLSI_FIR=m
-+CONFIG_VIA_FIR=m
-+CONFIG_BT=m
-+CONFIG_BT_L2CAP=m
-+CONFIG_BT_SCO=m
-+CONFIG_BT_RFCOMM=m
-+CONFIG_BT_RFCOMM_TTY=y
-+CONFIG_BT_BNEP=m
-+CONFIG_BT_BNEP_MC_FILTER=y
-+CONFIG_BT_BNEP_PROTO_FILTER=y
-+CONFIG_BT_CMTP=m
-+CONFIG_BT_HIDP=m
-+
-+#
-+# Bluetooth device drivers
-+#
-+CONFIG_BT_HCIUSB=m
-+CONFIG_BT_HCIUSB_SCO=y
-+CONFIG_BT_HCIUART=m
-+CONFIG_BT_HCIUART_H4=y
-+CONFIG_BT_HCIUART_BCSP=y
-+# CONFIG_BT_HCIUART_BCSP_TXCRC is not set
-+CONFIG_BT_HCIBCM203X=m
-+# CONFIG_BT_HCIBPA10X is not set
-+CONFIG_BT_HCIBFUSB=m
-+CONFIG_BT_HCIDTL1=m
-+CONFIG_BT_HCIBT3C=m
-+CONFIG_BT_HCIBLUECARD=m
-+CONFIG_BT_HCIBTUART=m
-+CONFIG_BT_HCIVHCI=m
-+CONFIG_NETDEVICES=y
-+CONFIG_DUMMY=m
-+CONFIG_BONDING=m
-+CONFIG_EQUALIZER=m
-+CONFIG_TUN=m
-+CONFIG_NET_SB1000=m
-+
-+#
-+# ARCnet devices
-+#
-+CONFIG_ARCNET=m
-+CONFIG_ARCNET_1201=m
-+CONFIG_ARCNET_1051=m
-+CONFIG_ARCNET_RAW=m
-+# CONFIG_ARCNET_CAP is not set
-+CONFIG_ARCNET_COM90xx=m
-+CONFIG_ARCNET_COM90xxIO=m
-+CONFIG_ARCNET_RIM_I=m
-+CONFIG_ARCNET_COM20020=m
-+CONFIG_ARCNET_COM20020_ISA=m
-+CONFIG_ARCNET_COM20020_PCI=m
-+
-+#
-+# Ethernet (10 or 100Mbit)
-+#
-+CONFIG_NET_ETHERNET=y
-+CONFIG_MII=m
-+CONFIG_HAPPYMEAL=m
-+CONFIG_SUNGEM=m
-+CONFIG_NET_VENDOR_3COM=y
-+CONFIG_EL1=m
-+CONFIG_EL2=m
-+# CONFIG_ELPLUS is not set
-+CONFIG_EL16=m
-+CONFIG_EL3=m
-+# CONFIG_3C515 is not set
-+CONFIG_VORTEX=m
-+CONFIG_TYPHOON=m
-+# CONFIG_LANCE is not set
-+CONFIG_NET_VENDOR_SMC=y
-+CONFIG_WD80x3=m
-+CONFIG_ULTRA=m
-+CONFIG_SMC9194=m
-+CONFIG_NET_VENDOR_RACAL=y
-+CONFIG_NI5010=m
-+CONFIG_NI52=m
-+# CONFIG_NI65 is not set
-+
-+#
-+# Tulip family network device support
-+#
-+CONFIG_NET_TULIP=y
-+CONFIG_DE2104X=m
-+CONFIG_TULIP=m
-+# CONFIG_TULIP_MWI is not set
-+# CONFIG_TULIP_MMIO is not set
-+# CONFIG_TULIP_NAPI is not set
-+CONFIG_DE4X5=m
-+CONFIG_WINBOND_840=m
-+CONFIG_DM9102=m
-+CONFIG_PCMCIA_XIRCOM=m
-+# CONFIG_PCMCIA_XIRTULIP is not set
-+CONFIG_AT1700=m
-+CONFIG_DEPCA=m
-+CONFIG_HP100=m
-+CONFIG_NET_ISA=y
-+CONFIG_E2100=m
-+CONFIG_EWRK3=m
-+CONFIG_EEXPRESS=m
-+CONFIG_EEXPRESS_PRO=m
-+CONFIG_HPLAN_PLUS=m
-+CONFIG_HPLAN=m
-+CONFIG_LP486E=m
-+CONFIG_ETH16I=m
-+CONFIG_NE2000=m
-+CONFIG_ZNET=m
-+CONFIG_SEEQ8005=m
-+CONFIG_NET_PCI=y
-+CONFIG_PCNET32=m
-+CONFIG_AMD8111_ETH=m
-+# CONFIG_AMD8111E_NAPI is not set
-+CONFIG_ADAPTEC_STARFIRE=m
-+# CONFIG_ADAPTEC_STARFIRE_NAPI is not set
-+CONFIG_AC3200=m
-+CONFIG_APRICOT=m
-+CONFIG_B44=m
-+CONFIG_FORCEDETH=m
-+CONFIG_CS89x0=m
-+# CONFIG_DGRS is not set
-+CONFIG_EEPRO100=m
-+CONFIG_E100=m
-+CONFIG_FEALNX=m
-+CONFIG_NATSEMI=m
-+CONFIG_NE2K_PCI=m
-+CONFIG_8139CP=m
-+CONFIG_8139TOO=m
-+CONFIG_8139TOO_PIO=y
-+CONFIG_8139TOO_TUNE_TWISTER=y
-+CONFIG_8139TOO_8129=y
-+# CONFIG_8139_OLD_RX_RESET is not set
-+CONFIG_SIS900=m
-+CONFIG_EPIC100=m
-+CONFIG_SUNDANCE=m
-+# CONFIG_SUNDANCE_MMIO is not set
-+CONFIG_TLAN=m
-+CONFIG_VIA_RHINE=m
-+# CONFIG_VIA_RHINE_MMIO is not set
-+CONFIG_NET_POCKET=y
-+CONFIG_ATP=m
-+CONFIG_DE600=m
-+CONFIG_DE620=m
-+
-+#
-+# Ethernet (1000 Mbit)
-+#
-+# CONFIG_ACENIC is not set
-+CONFIG_DL2K=m
-+CONFIG_E1000=m
-+# CONFIG_E1000_NAPI is not set
-+CONFIG_NS83820=m
-+CONFIG_HAMACHI=m
-+CONFIG_YELLOWFIN=m
-+CONFIG_R8169=m
-+# CONFIG_R8169_NAPI is not set
-+# CONFIG_R8169_VLAN is not set
-+CONFIG_SK98LIN=m
-+CONFIG_VIA_VELOCITY=m
-+CONFIG_TIGON3=m
-+CONFIG_BNX2=m
-+
-+#
-+# Ethernet (10000 Mbit)
-+#
-+CONFIG_IXGB=m
-+# CONFIG_IXGB_NAPI is not set
-+CONFIG_S2IO=m
-+# CONFIG_S2IO_NAPI is not set
-+# CONFIG_2BUFF_MODE is not set
-+
-+#
-+# Token Ring devices
-+#
-+CONFIG_TR=y
-+CONFIG_IBMTR=m
-+CONFIG_IBMOL=m
-+CONFIG_IBMLS=m
-+CONFIG_3C359=m
-+CONFIG_TMS380TR=m
-+CONFIG_TMSPCI=m
-+CONFIG_SKISA=m
-+CONFIG_PROTEON=m
-+CONFIG_ABYSS=m
-+# CONFIG_SMCTR is not set
-+
-+#
-+# Wireless LAN (non-hamradio)
-+#
-+CONFIG_NET_RADIO=y
-+
-+#
-+# Obsolete Wireless cards support (pre-802.11)
-+#
-+CONFIG_STRIP=m
-+CONFIG_ARLAN=m
-+CONFIG_WAVELAN=m
-+CONFIG_PCMCIA_WAVELAN=m
-+CONFIG_PCMCIA_NETWAVE=m
-+
-+#
-+# Wireless 802.11 Frequency Hopping cards support
-+#
-+CONFIG_PCMCIA_RAYCS=m
-+
-+#
-+# Wireless 802.11b ISA/PCI cards support
-+#
-+CONFIG_AIRO=m
-+CONFIG_HERMES=m
-+CONFIG_PLX_HERMES=m
-+CONFIG_TMD_HERMES=m
-+CONFIG_PCI_HERMES=m
-+CONFIG_ATMEL=m
-+CONFIG_PCI_ATMEL=m
-+
-+#
-+# Wireless 802.11b Pcmcia/Cardbus cards support
-+#
-+CONFIG_PCMCIA_HERMES=m
-+CONFIG_AIRO_CS=m
-+CONFIG_PCMCIA_ATMEL=m
-+CONFIG_PCMCIA_WL3501=m
-+
-+#
-+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
-+#
-+CONFIG_PRISM54=m
-+CONFIG_NET_WIRELESS=y
-+
-+#
-+# PCMCIA network device support
-+#
-+CONFIG_NET_PCMCIA=y
-+CONFIG_PCMCIA_3C589=m
-+CONFIG_PCMCIA_3C574=m
-+CONFIG_PCMCIA_FMVJ18X=m
-+CONFIG_PCMCIA_PCNET=m
-+CONFIG_PCMCIA_NMCLAN=m
-+CONFIG_PCMCIA_SMC91C92=m
-+CONFIG_PCMCIA_XIRC2PS=m
-+CONFIG_PCMCIA_AXNET=m
-+CONFIG_ARCNET_COM20020_CS=m
-+CONFIG_PCMCIA_IBMTR=m
-+
-+#
-+# Wan interfaces
-+#
-+CONFIG_WAN=y
-+CONFIG_HOSTESS_SV11=m
-+CONFIG_COSA=m
-+CONFIG_DSCC4=m
-+CONFIG_DSCC4_PCISYNC=y
-+CONFIG_DSCC4_PCI_RST=y
-+CONFIG_LANMEDIA=m
-+CONFIG_SEALEVEL_4021=m
-+CONFIG_SYNCLINK_SYNCPPP=m
-+CONFIG_HDLC=m
-+CONFIG_HDLC_RAW=y
-+CONFIG_HDLC_RAW_ETH=y
-+CONFIG_HDLC_CISCO=y
-+CONFIG_HDLC_FR=y
-+CONFIG_HDLC_PPP=y
-+CONFIG_HDLC_X25=y
-+CONFIG_PCI200SYN=m
-+CONFIG_WANXL=m
-+CONFIG_PC300=m
-+CONFIG_PC300_MLPPP=y
-+CONFIG_N2=m
-+CONFIG_C101=m
-+CONFIG_FARSYNC=m
-+CONFIG_DLCI=m
-+CONFIG_DLCI_COUNT=24
-+CONFIG_DLCI_MAX=8
-+CONFIG_SDLA=m
-+CONFIG_WAN_ROUTER_DRIVERS=y
-+# CONFIG_VENDOR_SANGOMA is not set
-+CONFIG_CYCLADES_SYNC=m
-+CONFIG_CYCLOMX_X25=y
-+CONFIG_LAPBETHER=m
-+CONFIG_X25_ASY=m
-+CONFIG_SBNI=m
-+# CONFIG_SBNI_MULTILINE is not set
-+
-+#
-+# ATM drivers
-+#
-+CONFIG_ATM_TCP=m
-+CONFIG_ATM_LANAI=m
-+CONFIG_ATM_ENI=m
-+# CONFIG_ATM_ENI_DEBUG is not set
-+# CONFIG_ATM_ENI_TUNE_BURST is not set
-+CONFIG_ATM_FIRESTREAM=m
-+CONFIG_ATM_ZATM=m
-+# CONFIG_ATM_ZATM_DEBUG is not set
-+CONFIG_ATM_NICSTAR=m
-+# CONFIG_ATM_NICSTAR_USE_SUNI is not set
-+# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
-+CONFIG_ATM_IDT77252=m
-+# CONFIG_ATM_IDT77252_DEBUG is not set
-+# CONFIG_ATM_IDT77252_RCV_ALL is not set
-+CONFIG_ATM_IDT77252_USE_SUNI=y
-+CONFIG_ATM_AMBASSADOR=m
-+# CONFIG_ATM_AMBASSADOR_DEBUG is not set
-+CONFIG_ATM_HORIZON=m
-+# CONFIG_ATM_HORIZON_DEBUG is not set
-+CONFIG_ATM_IA=m
-+# CONFIG_ATM_IA_DEBUG is not set
-+CONFIG_ATM_FORE200E_MAYBE=m
-+CONFIG_ATM_FORE200E_PCA=y
-+CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
-+# CONFIG_ATM_FORE200E_USE_TASKLET is not set
-+CONFIG_ATM_FORE200E_TX_RETRY=16
-+CONFIG_ATM_FORE200E_DEBUG=0
-+CONFIG_ATM_FORE200E=m
-+CONFIG_ATM_HE=m
-+CONFIG_ATM_HE_USE_SUNI=y
-+CONFIG_FDDI=y
-+CONFIG_DEFXX=m
-+CONFIG_SKFP=m
-+CONFIG_HIPPI=y
-+CONFIG_ROADRUNNER=m
-+# CONFIG_ROADRUNNER_LARGE_RINGS is not set
-+CONFIG_PLIP=m
-+CONFIG_PPP=m
-+CONFIG_PPP_MULTILINK=y
-+CONFIG_PPP_FILTER=y
-+CONFIG_PPP_ASYNC=m
-+CONFIG_PPP_SYNC_TTY=m
-+CONFIG_PPP_DEFLATE=m
-+CONFIG_PPP_BSDCOMP=m
-+CONFIG_PPPOE=m
-+CONFIG_PPPOATM=m
-+CONFIG_SLIP=m
-+CONFIG_SLIP_COMPRESSED=y
-+CONFIG_SLIP_SMART=y
-+CONFIG_SLIP_MODE_SLIP6=y
-+CONFIG_NET_FC=y
-+CONFIG_SHAPER=m
-+CONFIG_NETCONSOLE=m
-+
-+#
-+# ISDN subsystem
-+#
-+CONFIG_ISDN=m
-+
-+#
-+# Old ISDN4Linux
-+#
-+CONFIG_ISDN_I4L=m
-+CONFIG_ISDN_PPP=y
-+CONFIG_ISDN_PPP_VJ=y
-+CONFIG_ISDN_MPP=y
-+CONFIG_IPPP_FILTER=y
-+CONFIG_ISDN_PPP_BSDCOMP=m
-+CONFIG_ISDN_AUDIO=y
-+CONFIG_ISDN_TTY_FAX=y
-+CONFIG_ISDN_X25=y
-+
-+#
-+# ISDN feature submodules
-+#
-+# CONFIG_ISDN_DRV_LOOP is not set
-+# CONFIG_ISDN_DIVERSION is not set
-+
-+#
-+# ISDN4Linux hardware drivers
-+#
-+
-+#
-+# Passive cards
-+#
-+CONFIG_ISDN_DRV_HISAX=m
-+
-+#
-+# D-channel protocol features
-+#
-+CONFIG_HISAX_EURO=y
-+CONFIG_DE_AOC=y
-+# CONFIG_HISAX_NO_SENDCOMPLETE is not set
-+# CONFIG_HISAX_NO_LLC is not set
-+# CONFIG_HISAX_NO_KEYPAD is not set
-+CONFIG_HISAX_1TR6=y
-+CONFIG_HISAX_NI1=y
-+CONFIG_HISAX_MAX_CARDS=8
-+
-+#
-+# HiSax supported cards
-+#
-+CONFIG_HISAX_16_0=y
-+CONFIG_HISAX_16_3=y
-+CONFIG_HISAX_TELESPCI=y
-+CONFIG_HISAX_S0BOX=y
-+CONFIG_HISAX_AVM_A1=y
-+CONFIG_HISAX_FRITZPCI=y
-+CONFIG_HISAX_AVM_A1_PCMCIA=y
-+CONFIG_HISAX_ELSA=y
-+CONFIG_HISAX_IX1MICROR2=y
-+CONFIG_HISAX_DIEHLDIVA=y
-+CONFIG_HISAX_ASUSCOM=y
-+CONFIG_HISAX_TELEINT=y
-+CONFIG_HISAX_HFCS=y
-+CONFIG_HISAX_SEDLBAUER=y
-+CONFIG_HISAX_SPORTSTER=y
-+CONFIG_HISAX_MIC=y
-+CONFIG_HISAX_NETJET=y
-+CONFIG_HISAX_NETJET_U=y
-+CONFIG_HISAX_NICCY=y
-+CONFIG_HISAX_ISURF=y
-+CONFIG_HISAX_HSTSAPHIR=y
-+CONFIG_HISAX_BKM_A4T=y
-+CONFIG_HISAX_SCT_QUADRO=y
-+CONFIG_HISAX_GAZEL=y
-+CONFIG_HISAX_HFC_PCI=y
-+CONFIG_HISAX_W6692=y
-+CONFIG_HISAX_HFC_SX=y
-+CONFIG_HISAX_ENTERNOW_PCI=y
-+# CONFIG_HISAX_DEBUG is not set
-+
-+#
-+# HiSax PCMCIA card service modules
-+#
-+CONFIG_HISAX_SEDLBAUER_CS=m
-+CONFIG_HISAX_ELSA_CS=m
-+CONFIG_HISAX_AVM_A1_CS=m
-+CONFIG_HISAX_TELES_CS=m
-+
-+#
-+# HiSax sub driver modules
-+#
-+CONFIG_HISAX_ST5481=m
-+CONFIG_HISAX_HFCUSB=m
-+CONFIG_HISAX_HFC4S8S=m
-+CONFIG_HISAX_FRITZ_PCIPNP=m
-+CONFIG_HISAX_HDLC=y
-+
-+#
-+# Active cards
-+#
-+CONFIG_ISDN_DRV_ICN=m
-+CONFIG_ISDN_DRV_PCBIT=m
-+CONFIG_ISDN_DRV_SC=m
-+CONFIG_ISDN_DRV_ACT2000=m
-+# CONFIG_HYSDN is not set
-+
-+#
-+# CAPI subsystem
-+#
-+CONFIG_ISDN_CAPI=m
-+CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y
-+CONFIG_ISDN_CAPI_MIDDLEWARE=y
-+CONFIG_ISDN_CAPI_CAPI20=m
-+CONFIG_ISDN_CAPI_CAPIFS_BOOL=y
-+CONFIG_ISDN_CAPI_CAPIFS=m
-+CONFIG_ISDN_CAPI_CAPIDRV=m
-+
-+#
-+# CAPI hardware drivers
-+#
-+
-+#
-+# Active AVM cards
-+#
-+CONFIG_CAPI_AVM=y
-+CONFIG_ISDN_DRV_AVMB1_B1ISA=m
-+CONFIG_ISDN_DRV_AVMB1_B1PCI=m
-+CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-+CONFIG_ISDN_DRV_AVMB1_T1ISA=m
-+CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-+CONFIG_ISDN_DRV_AVMB1_AVM_CS=m
-+CONFIG_ISDN_DRV_AVMB1_T1PCI=m
-+CONFIG_ISDN_DRV_AVMB1_C4=m
-+
-+#
-+# Active Eicon DIVA Server cards
-+#
-+CONFIG_CAPI_EICON=y
-+CONFIG_ISDN_DIVAS=m
-+CONFIG_ISDN_DIVAS_BRIPCI=y
-+CONFIG_ISDN_DIVAS_PRIPCI=y
-+CONFIG_ISDN_DIVAS_DIVACAPI=m
-+CONFIG_ISDN_DIVAS_USERIDI=m
-+CONFIG_ISDN_DIVAS_MAINT=m
-+
-+#
-+# Telephony Support
-+#
-+CONFIG_PHONE=m
-+CONFIG_PHONE_IXJ=m
-+CONFIG_PHONE_IXJ_PCMCIA=m
-+
-+#
-+# Input device support
-+#
-+CONFIG_INPUT=y
-+
-+#
-+# Userland interfaces
-+#
-+CONFIG_INPUT_MOUSEDEV=y
-+CONFIG_INPUT_MOUSEDEV_PSAUX=y
-+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-+CONFIG_INPUT_JOYDEV=m
-+CONFIG_INPUT_TSDEV=m
-+CONFIG_INPUT_TSDEV_SCREEN_X=240
-+CONFIG_INPUT_TSDEV_SCREEN_Y=320
-+CONFIG_INPUT_EVDEV=m
-+CONFIG_INPUT_EVBUG=m
-+
-+#
-+# Input Device Drivers
-+#
-+CONFIG_INPUT_KEYBOARD=y
-+CONFIG_KEYBOARD_ATKBD=y
-+CONFIG_KEYBOARD_SUNKBD=m
-+CONFIG_KEYBOARD_LKKBD=m
-+CONFIG_KEYBOARD_XTKBD=m
-+CONFIG_KEYBOARD_NEWTON=m
-+CONFIG_INPUT_MOUSE=y
-+CONFIG_MOUSE_PS2=y
-+CONFIG_MOUSE_SERIAL=m
-+CONFIG_MOUSE_INPORT=m
-+# CONFIG_MOUSE_ATIXL is not set
-+CONFIG_MOUSE_LOGIBM=m
-+CONFIG_MOUSE_PC110PAD=m
-+CONFIG_MOUSE_VSXXXAA=m
-+CONFIG_INPUT_JOYSTICK=y
-+CONFIG_JOYSTICK_ANALOG=m
-+CONFIG_JOYSTICK_A3D=m
-+CONFIG_JOYSTICK_ADI=m
-+CONFIG_JOYSTICK_COBRA=m
-+CONFIG_JOYSTICK_GF2K=m
-+CONFIG_JOYSTICK_GRIP=m
-+CONFIG_JOYSTICK_GRIP_MP=m
-+CONFIG_JOYSTICK_GUILLEMOT=m
-+CONFIG_JOYSTICK_INTERACT=m
-+CONFIG_JOYSTICK_SIDEWINDER=m
-+CONFIG_JOYSTICK_TMDC=m
-+CONFIG_JOYSTICK_IFORCE=m
-+CONFIG_JOYSTICK_IFORCE_USB=y
-+CONFIG_JOYSTICK_IFORCE_232=y
-+CONFIG_JOYSTICK_WARRIOR=m
-+CONFIG_JOYSTICK_MAGELLAN=m
-+CONFIG_JOYSTICK_SPACEORB=m
-+CONFIG_JOYSTICK_SPACEBALL=m
-+CONFIG_JOYSTICK_STINGER=m
-+CONFIG_JOYSTICK_TWIDJOY=m
-+CONFIG_JOYSTICK_DB9=m
-+CONFIG_JOYSTICK_GAMECON=m
-+CONFIG_JOYSTICK_TURBOGRAFX=m
-+CONFIG_JOYSTICK_JOYDUMP=m
-+CONFIG_INPUT_TOUCHSCREEN=y
-+CONFIG_TOUCHSCREEN_GUNZE=m
-+CONFIG_TOUCHSCREEN_ELO=m
-+CONFIG_TOUCHSCREEN_MTOUCH=m
-+CONFIG_TOUCHSCREEN_MK712=m
-+CONFIG_INPUT_MISC=y
-+CONFIG_INPUT_PCSPKR=m
-+CONFIG_INPUT_UINPUT=m
-+
-+#
-+# Hardware I/O ports
-+#
-+CONFIG_SERIO=y
-+CONFIG_SERIO_I8042=y
-+CONFIG_SERIO_SERPORT=m
-+CONFIG_SERIO_CT82C710=m
-+CONFIG_SERIO_PARKBD=m
-+CONFIG_SERIO_PCIPS2=m
-+CONFIG_SERIO_LIBPS2=y
-+CONFIG_SERIO_RAW=m
-+CONFIG_GAMEPORT=m
-+CONFIG_GAMEPORT_NS558=m
-+CONFIG_GAMEPORT_L4=m
-+CONFIG_GAMEPORT_EMU10K1=m
-+CONFIG_GAMEPORT_VORTEX=m
-+CONFIG_GAMEPORT_FM801=m
-+# CONFIG_GAMEPORT_CS461X is not set
-+
-+#
-+# Character devices
-+#
-+CONFIG_VT=y
-+CONFIG_VT_CONSOLE=y
-+CONFIG_HW_CONSOLE=y
-+# CONFIG_SERIAL_NONSTANDARD is not set
-+
-+#
-+# Serial drivers
-+#
-+CONFIG_SERIAL_8250=m
-+# CONFIG_SERIAL_8250_CS is not set
-+# CONFIG_SERIAL_8250_ACPI is not set
-+CONFIG_SERIAL_8250_NR_UARTS=4
-+# CONFIG_SERIAL_8250_EXTENDED is not set
-+
-+#
-+# Non-8250 serial port support
-+#
-+CONFIG_SERIAL_CORE=m
-+CONFIG_SERIAL_JSM=m
-+CONFIG_UNIX98_PTYS=y
-+CONFIG_LEGACY_PTYS=y
-+CONFIG_LEGACY_PTY_COUNT=256
-+CONFIG_PRINTER=m
-+# CONFIG_LP_CONSOLE is not set
-+CONFIG_PPDEV=m
-+CONFIG_TIPAR=m
-+
-+#
-+# IPMI
-+#
-+CONFIG_IPMI_HANDLER=m
-+# CONFIG_IPMI_PANIC_EVENT is not set
-+CONFIG_IPMI_DEVICE_INTERFACE=m
-+CONFIG_IPMI_SI=m
-+CONFIG_IPMI_WATCHDOG=m
-+CONFIG_IPMI_POWEROFF=m
-+
-+#
-+# Watchdog Cards
-+#
-+CONFIG_WATCHDOG=y
-+# CONFIG_WATCHDOG_NOWAYOUT is not set
-+
-+#
-+# Watchdog Device Drivers
-+#
-+CONFIG_SOFT_WATCHDOG=m
-+CONFIG_ACQUIRE_WDT=m
-+CONFIG_ADVANTECH_WDT=m
-+CONFIG_ALIM1535_WDT=m
-+CONFIG_ALIM7101_WDT=m
-+CONFIG_SC520_WDT=m
-+CONFIG_EUROTECH_WDT=m
-+CONFIG_IB700_WDT=m
-+CONFIG_WAFER_WDT=m
-+CONFIG_I8XX_TCO=m
-+CONFIG_SC1200_WDT=m
-+CONFIG_SCx200_WDT=m
-+CONFIG_60XX_WDT=m
-+CONFIG_CPU5_WDT=m
-+CONFIG_W83627HF_WDT=m
-+CONFIG_W83877F_WDT=m
-+CONFIG_MACHZ_WDT=m
-+
-+#
-+# ISA-based Watchdog Cards
-+#
-+CONFIG_PCWATCHDOG=m
-+CONFIG_MIXCOMWD=m
-+CONFIG_WDT=m
-+CONFIG_WDT_501=y
-+
-+#
-+# PCI-based Watchdog Cards
-+#
-+CONFIG_PCIPCWATCHDOG=m
-+CONFIG_WDTPCI=m
-+CONFIG_WDT_501_PCI=y
-+
-+#
-+# USB-based Watchdog Cards
-+#
-+CONFIG_USBPCWATCHDOG=m
-+CONFIG_HW_RANDOM=m
-+CONFIG_NVRAM=m
-+CONFIG_RTC=m
-+CONFIG_GEN_RTC=m
-+CONFIG_GEN_RTC_X=y
-+CONFIG_DTLK=m
-+CONFIG_R3964=m
-+CONFIG_APPLICOM=m
-+CONFIG_SONYPI=m
-+
-+#
-+# Ftape, the floppy tape device driver
-+#
-+# CONFIG_FTAPE is not set
-+CONFIG_AGP=m
-+CONFIG_AGP_ALI=m
-+CONFIG_AGP_ATI=m
-+CONFIG_AGP_AMD=m
-+CONFIG_AGP_AMD64=m
-+CONFIG_AGP_INTEL=m
-+CONFIG_AGP_NVIDIA=m
-+CONFIG_AGP_SIS=m
-+CONFIG_AGP_SWORKS=m
-+CONFIG_AGP_VIA=m
-+CONFIG_AGP_EFFICEON=m
-+CONFIG_DRM=m
-+CONFIG_DRM_TDFX=m
-+# CONFIG_DRM_GAMMA is not set
-+CONFIG_DRM_R128=m
-+CONFIG_DRM_RADEON=m
-+CONFIG_DRM_I810=m
-+CONFIG_DRM_I830=m
-+CONFIG_DRM_I915=m
-+CONFIG_DRM_MGA=m
-+CONFIG_DRM_SIS=m
-+
-+#
-+# PCMCIA character devices
-+#
-+CONFIG_SYNCLINK_CS=m
-+CONFIG_MWAVE=m
-+CONFIG_SCx200_GPIO=m
-+CONFIG_RAW_DRIVER=m
-+# CONFIG_HPET is not set
-+CONFIG_MAX_RAW_DEVS=256
-+CONFIG_HANGCHECK_TIMER=m
-+
-+#
-+# TPM devices
-+#
-+# CONFIG_TCG_TPM is not set
-+
-+#
-+# I2C support
-+#
-+CONFIG_I2C=m
-+CONFIG_I2C_CHARDEV=m
-+
-+#
-+# I2C Algorithms
-+#
-+CONFIG_I2C_ALGOBIT=m
-+CONFIG_I2C_ALGOPCF=m
-+CONFIG_I2C_ALGOPCA=m
-+
-+#
-+# I2C Hardware Bus support
-+#
-+CONFIG_I2C_ALI1535=m
-+CONFIG_I2C_ALI1563=m
-+CONFIG_I2C_ALI15X3=m
-+CONFIG_I2C_AMD756=m
-+CONFIG_I2C_AMD756_S4882=m
-+CONFIG_I2C_AMD8111=m
-+CONFIG_I2C_ELEKTOR=m
-+CONFIG_I2C_I801=m
-+CONFIG_I2C_I810=m
-+CONFIG_I2C_PIIX4=m
-+CONFIG_I2C_ISA=m
-+CONFIG_I2C_NFORCE2=m
-+CONFIG_I2C_PARPORT=m
-+CONFIG_I2C_PARPORT_LIGHT=m
-+CONFIG_I2C_PROSAVAGE=m
-+CONFIG_I2C_SAVAGE4=m
-+CONFIG_SCx200_I2C=m
-+CONFIG_SCx200_I2C_SCL=12
-+CONFIG_SCx200_I2C_SDA=13
-+CONFIG_SCx200_ACB=m
-+CONFIG_I2C_SIS5595=m
-+CONFIG_I2C_SIS630=m
-+CONFIG_I2C_SIS96X=m
-+CONFIG_I2C_STUB=m
-+CONFIG_I2C_VIA=m
-+CONFIG_I2C_VIAPRO=m
-+CONFIG_I2C_VOODOO3=m
-+CONFIG_I2C_PCA_ISA=m
-+
-+#
-+# Hardware Sensors Chip support
-+#
-+CONFIG_I2C_SENSOR=m
-+CONFIG_SENSORS_ADM1021=m
-+CONFIG_SENSORS_ADM1025=m
-+CONFIG_SENSORS_ADM1026=m
-+CONFIG_SENSORS_ADM1031=m
-+CONFIG_SENSORS_ASB100=m
-+CONFIG_SENSORS_DS1621=m
-+CONFIG_SENSORS_FSCHER=m
-+CONFIG_SENSORS_FSCPOS=m
-+CONFIG_SENSORS_GL518SM=m
-+CONFIG_SENSORS_GL520SM=m
-+CONFIG_SENSORS_IT87=m
-+CONFIG_SENSORS_LM63=m
-+CONFIG_SENSORS_LM75=m
-+CONFIG_SENSORS_LM77=m
-+CONFIG_SENSORS_LM78=m
-+CONFIG_SENSORS_LM80=m
-+CONFIG_SENSORS_LM83=m
-+CONFIG_SENSORS_LM85=m
-+CONFIG_SENSORS_LM87=m
-+CONFIG_SENSORS_LM90=m
-+CONFIG_SENSORS_LM92=m
-+CONFIG_SENSORS_MAX1619=m
-+CONFIG_SENSORS_PC87360=m
-+# CONFIG_SENSORS_SMSC47B397 is not set
-+CONFIG_SENSORS_SIS5595=m
-+CONFIG_SENSORS_SMSC47M1=m
-+CONFIG_SENSORS_VIA686A=m
-+CONFIG_SENSORS_W83781D=m
-+CONFIG_SENSORS_W83L785TS=m
-+CONFIG_SENSORS_W83627HF=m
-+
-+#
-+# Other I2C Chip support
-+#
-+CONFIG_SENSORS_DS1337=m
-+CONFIG_SENSORS_EEPROM=m
-+CONFIG_SENSORS_PCF8574=m
-+CONFIG_SENSORS_PCF8591=m
-+CONFIG_SENSORS_RTC8564=m
-+# CONFIG_I2C_DEBUG_CORE is not set
-+# CONFIG_I2C_DEBUG_ALGO is not set
-+# CONFIG_I2C_DEBUG_BUS is not set
-+# CONFIG_I2C_DEBUG_CHIP is not set
-+
-+#
-+# Dallas's 1-wire bus
-+#
-+CONFIG_W1=m
-+CONFIG_W1_MATROX=m
-+CONFIG_W1_DS9490=m
-+CONFIG_W1_DS9490_BRIDGE=m
-+CONFIG_W1_THERM=m
-+CONFIG_W1_SMEM=m
-+
-+#
-+# Misc devices
-+#
-+CONFIG_IBM_ASM=m
-+
-+#
-+# Multimedia devices
-+#
-+CONFIG_VIDEO_DEV=m
-+
-+#
-+# Video For Linux
-+#
-+
-+#
-+# Video Adapters
-+#
-+CONFIG_VIDEO_BT848=m
-+CONFIG_VIDEO_PMS=m
-+CONFIG_VIDEO_BWQCAM=m
-+CONFIG_VIDEO_CQCAM=m
-+CONFIG_VIDEO_W9966=m
-+CONFIG_VIDEO_CPIA=m
-+CONFIG_VIDEO_CPIA_PP=m
-+CONFIG_VIDEO_CPIA_USB=m
-+CONFIG_VIDEO_SAA5246A=m
-+CONFIG_VIDEO_SAA5249=m
-+CONFIG_TUNER_3036=m
-+CONFIG_VIDEO_STRADIS=m
-+CONFIG_VIDEO_ZORAN=m
-+CONFIG_VIDEO_ZORAN_BUZ=m
-+CONFIG_VIDEO_ZORAN_DC10=m
-+CONFIG_VIDEO_ZORAN_DC30=m
-+CONFIG_VIDEO_ZORAN_LML33=m
-+CONFIG_VIDEO_ZORAN_LML33R10=m
-+# CONFIG_VIDEO_ZR36120 is not set
-+CONFIG_VIDEO_MEYE=m
-+# CONFIG_VIDEO_SAA7134 is not set
-+CONFIG_VIDEO_MXB=m
-+CONFIG_VIDEO_DPC=m
-+CONFIG_VIDEO_HEXIUM_ORION=m
-+CONFIG_VIDEO_HEXIUM_GEMINI=m
-+CONFIG_VIDEO_CX88=m
-+# CONFIG_VIDEO_CX88_DVB is not set
-+CONFIG_VIDEO_OVCAMCHIP=m
-+
-+#
-+# Radio Adapters
-+#
-+CONFIG_RADIO_CADET=m
-+CONFIG_RADIO_RTRACK=m
-+CONFIG_RADIO_RTRACK2=m
-+CONFIG_RADIO_AZTECH=m
-+CONFIG_RADIO_GEMTEK=m
-+CONFIG_RADIO_GEMTEK_PCI=m
-+CONFIG_RADIO_MAXIRADIO=m
-+CONFIG_RADIO_MAESTRO=m
-+CONFIG_RADIO_MIROPCM20=m
-+CONFIG_RADIO_MIROPCM20_RDS=m
-+CONFIG_RADIO_SF16FMI=m
-+CONFIG_RADIO_SF16FMR2=m
-+CONFIG_RADIO_TERRATEC=m
-+CONFIG_RADIO_TRUST=m
-+CONFIG_RADIO_TYPHOON=m
-+CONFIG_RADIO_TYPHOON_PROC_FS=y
-+CONFIG_RADIO_ZOLTRIX=m
-+
-+#
-+# Digital Video Broadcasting Devices
-+#
-+CONFIG_DVB=y
-+CONFIG_DVB_CORE=m
-+
-+#
-+# Supported SAA7146 based PCI Adapters
-+#
-+CONFIG_DVB_AV7110=m
-+# CONFIG_DVB_AV7110_OSD is not set
-+CONFIG_DVB_BUDGET=m
-+CONFIG_DVB_BUDGET_CI=m
-+CONFIG_DVB_BUDGET_AV=m
-+CONFIG_DVB_BUDGET_PATCH=m
-+
-+#
-+# Supported USB Adapters
-+#
-+CONFIG_DVB_TTUSB_BUDGET=m
-+CONFIG_DVB_TTUSB_DEC=m
-+CONFIG_DVB_DIBUSB=m
-+CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES=y
-+# CONFIG_DVB_DIBCOM_DEBUG is not set
-+CONFIG_DVB_CINERGYT2=m
-+# CONFIG_DVB_CINERGYT2_TUNING is not set
-+
-+#
-+# Supported FlexCopII (B2C2) Adapters
-+#
-+CONFIG_DVB_B2C2_FLEXCOP=m
-+CONFIG_DVB_B2C2_FLEXCOP_PCI=m
-+CONFIG_DVB_B2C2_FLEXCOP_USB=m
-+# CONFIG_DVB_B2C2_FLEXCOP_DEBUG is not set
-+CONFIG_DVB_B2C2_SKYSTAR=m
-+
-+#
-+# Supported BT878 Adapters
-+#
-+CONFIG_DVB_BT8XX=m
-+
-+#
-+# Supported DVB Frontends
-+#
-+
-+#
-+# Customise DVB Frontends
-+#
-+
-+#
-+# DVB-S (satellite) frontends
-+#
-+CONFIG_DVB_STV0299=m
-+CONFIG_DVB_CX24110=m
-+CONFIG_DVB_TDA8083=m
-+CONFIG_DVB_TDA80XX=m
-+CONFIG_DVB_MT312=m
-+CONFIG_DVB_VES1X93=m
-+
-+#
-+# DVB-T (terrestrial) frontends
-+#
-+CONFIG_DVB_SP8870=m
-+CONFIG_DVB_SP887X=m
-+CONFIG_DVB_CX22700=m
-+CONFIG_DVB_CX22702=m
-+CONFIG_DVB_L64781=m
-+CONFIG_DVB_TDA1004X=m
-+CONFIG_DVB_NXT6000=m
-+CONFIG_DVB_MT352=m
-+CONFIG_DVB_DIB3000MB=m
-+CONFIG_DVB_DIB3000MC=m
-+
-+#
-+# DVB-C (cable) frontends
-+#
-+CONFIG_DVB_ATMEL_AT76C651=m
-+CONFIG_DVB_VES1820=m
-+CONFIG_DVB_TDA10021=m
-+CONFIG_DVB_STV0297=m
-+
-+#
-+# ATSC (North American/Korean Terresterial DTV) frontends
-+#
-+CONFIG_DVB_NXT2002=m
-+CONFIG_DVB_OR51211=m
-+CONFIG_DVB_OR51132=m
-+CONFIG_VIDEO_SAA7146=m
-+CONFIG_VIDEO_SAA7146_VV=m
-+CONFIG_VIDEO_VIDEOBUF=m
-+CONFIG_VIDEO_TUNER=m
-+CONFIG_VIDEO_BUF=m
-+CONFIG_VIDEO_BTCX=m
-+CONFIG_VIDEO_IR=m
-+CONFIG_VIDEO_TVEEPROM=m
-+
-+#
-+# Graphics support
-+#
-+CONFIG_FB=y
-+CONFIG_FB_CFB_FILLRECT=m
-+CONFIG_FB_CFB_COPYAREA=m
-+CONFIG_FB_CFB_IMAGEBLIT=m
-+CONFIG_FB_SOFT_CURSOR=m
-+# CONFIG_FB_MACMODES is not set
-+CONFIG_FB_MODE_HELPERS=y
-+CONFIG_FB_TILEBLITTING=y
-+CONFIG_FB_CIRRUS=m
-+CONFIG_FB_PM2=m
-+CONFIG_FB_PM2_FIFO_DISCONNECT=y
-+CONFIG_FB_CYBER2000=m
-+# CONFIG_FB_ASILIANT is not set
-+# CONFIG_FB_IMSTT is not set
-+CONFIG_FB_VGA16=m
-+# CONFIG_FB_VESA is not set
-+CONFIG_VIDEO_SELECT=y
-+CONFIG_FB_HGA=m
-+# CONFIG_FB_HGA_ACCEL is not set
-+CONFIG_FB_NVIDIA=m
-+CONFIG_FB_NVIDIA_I2C=y
-+CONFIG_FB_RIVA=m
-+CONFIG_FB_RIVA_I2C=y
-+CONFIG_FB_RIVA_DEBUG=y
-+CONFIG_FB_I810=m
-+# CONFIG_FB_I810_GTF is not set
-+CONFIG_FB_INTEL=m
-+# CONFIG_FB_INTEL_DEBUG is not set
-+CONFIG_FB_MATROX=m
-+CONFIG_FB_MATROX_MILLENIUM=y
-+CONFIG_FB_MATROX_MYSTIQUE=y
-+# CONFIG_FB_MATROX_G is not set
-+CONFIG_FB_MATROX_I2C=m
-+CONFIG_FB_MATROX_MULTIHEAD=y
-+CONFIG_FB_RADEON_OLD=m
-+CONFIG_FB_RADEON=m
-+CONFIG_FB_RADEON_I2C=y
-+# CONFIG_FB_RADEON_DEBUG is not set
-+CONFIG_FB_ATY128=m
-+CONFIG_FB_ATY=m
-+CONFIG_FB_ATY_CT=y
-+CONFIG_FB_ATY_GENERIC_LCD=y
-+CONFIG_FB_ATY_XL_INIT=y
-+CONFIG_FB_ATY_GX=y
-+CONFIG_FB_SAVAGE=m
-+CONFIG_FB_SAVAGE_I2C=y
-+CONFIG_FB_SAVAGE_ACCEL=y
-+CONFIG_FB_SIS=m
-+CONFIG_FB_SIS_300=y
-+CONFIG_FB_SIS_315=y
-+CONFIG_FB_NEOMAGIC=m
-+CONFIG_FB_KYRO=m
-+CONFIG_FB_3DFX=m
-+# CONFIG_FB_3DFX_ACCEL is not set
-+CONFIG_FB_VOODOO1=m
-+CONFIG_FB_TRIDENT=m
-+# CONFIG_FB_TRIDENT_ACCEL is not set
-+# CONFIG_FB_PM3 is not set
-+CONFIG_FB_GEODE=y
-+CONFIG_FB_GEODE_GX1=m
-+CONFIG_FB_S1D13XXX=m
-+CONFIG_FB_VIRTUAL=m
-+
-+#
-+# Console display driver support
-+#
-+CONFIG_VGA_CONSOLE=y
-+CONFIG_MDA_CONSOLE=m
-+CONFIG_DUMMY_CONSOLE=y
-+CONFIG_FRAMEBUFFER_CONSOLE=m
-+# CONFIG_FONTS is not set
-+CONFIG_FONT_8x8=y
-+CONFIG_FONT_8x16=y
-+
-+#
-+# Logo configuration
-+#
-+# CONFIG_LOGO is not set
-+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-+
-+#
-+# Sound
-+#
-+CONFIG_SOUND=m
-+
-+#
-+# Advanced Linux Sound Architecture
-+#
-+CONFIG_SND=m
-+CONFIG_SND_TIMER=m
-+CONFIG_SND_PCM=m
-+CONFIG_SND_HWDEP=m
-+CONFIG_SND_RAWMIDI=m
-+CONFIG_SND_SEQUENCER=m
-+CONFIG_SND_SEQ_DUMMY=m
-+CONFIG_SND_OSSEMUL=y
-+CONFIG_SND_MIXER_OSS=m
-+CONFIG_SND_PCM_OSS=m
-+CONFIG_SND_SEQUENCER_OSS=y
-+CONFIG_SND_RTCTIMER=m
-+# CONFIG_SND_VERBOSE_PRINTK is not set
-+# CONFIG_SND_DEBUG is not set
-+CONFIG_SND_GENERIC_PM=y
-+
-+#
-+# Generic devices
-+#
-+CONFIG_SND_MPU401_UART=m
-+CONFIG_SND_OPL3_LIB=m
-+CONFIG_SND_OPL4_LIB=m
-+CONFIG_SND_VX_LIB=m
-+CONFIG_SND_DUMMY=m
-+CONFIG_SND_VIRMIDI=m
-+CONFIG_SND_MTPAV=m
-+CONFIG_SND_SERIAL_U16550=m
-+CONFIG_SND_MPU401=m
-+
-+#
-+# ISA devices
-+#
-+CONFIG_SND_AD1848_LIB=m
-+CONFIG_SND_CS4231_LIB=m
-+CONFIG_SND_AD1816A=m
-+CONFIG_SND_AD1848=m
-+CONFIG_SND_CS4231=m
-+CONFIG_SND_CS4232=m
-+CONFIG_SND_CS4236=m
-+CONFIG_SND_ES968=m
-+CONFIG_SND_ES1688=m
-+CONFIG_SND_ES18XX=m
-+CONFIG_SND_GUS_SYNTH=m
-+CONFIG_SND_GUSCLASSIC=m
-+CONFIG_SND_GUSEXTREME=m
-+CONFIG_SND_GUSMAX=m
-+CONFIG_SND_INTERWAVE=m
-+CONFIG_SND_INTERWAVE_STB=m
-+CONFIG_SND_OPTI92X_AD1848=m
-+CONFIG_SND_OPTI92X_CS4231=m
-+CONFIG_SND_OPTI93X=m
-+CONFIG_SND_SB8=m
-+CONFIG_SND_SB16=m
-+CONFIG_SND_SBAWE=m
-+CONFIG_SND_SB16_CSP=y
-+CONFIG_SND_WAVEFRONT=m
-+CONFIG_SND_ALS100=m
-+CONFIG_SND_AZT2320=m
-+CONFIG_SND_CMI8330=m
-+CONFIG_SND_DT019X=m
-+CONFIG_SND_OPL3SA2=m
-+CONFIG_SND_SGALAXY=m
-+CONFIG_SND_SSCAPE=m
-+
-+#
-+# PCI devices
-+#
-+CONFIG_SND_AC97_CODEC=m
-+CONFIG_SND_ALI5451=m
-+CONFIG_SND_ATIIXP=m
-+CONFIG_SND_ATIIXP_MODEM=m
-+CONFIG_SND_AU8810=m
-+CONFIG_SND_AU8820=m
-+CONFIG_SND_AU8830=m
-+CONFIG_SND_AZT3328=m
-+CONFIG_SND_BT87X=m
-+# CONFIG_SND_BT87X_OVERCLOCK is not set
-+CONFIG_SND_CS46XX=m
-+CONFIG_SND_CS46XX_NEW_DSP=y
-+CONFIG_SND_CS4281=m
-+CONFIG_SND_EMU10K1=m
-+# CONFIG_SND_EMU10K1X is not set
-+# CONFIG_SND_CA0106 is not set
-+CONFIG_SND_KORG1212=m
-+CONFIG_SND_MIXART=m
-+CONFIG_SND_NM256=m
-+CONFIG_SND_RME32=m
-+CONFIG_SND_RME96=m
-+CONFIG_SND_RME9652=m
-+CONFIG_SND_HDSP=m
-+CONFIG_SND_TRIDENT=m
-+CONFIG_SND_YMFPCI=m
-+CONFIG_SND_ALS4000=m
-+CONFIG_SND_CMIPCI=m
-+CONFIG_SND_ENS1370=m
-+CONFIG_SND_ENS1371=m
-+CONFIG_SND_ES1938=m
-+CONFIG_SND_ES1968=m
-+CONFIG_SND_MAESTRO3=m
-+CONFIG_SND_FM801=m
-+CONFIG_SND_FM801_TEA575X=m
-+CONFIG_SND_ICE1712=m
-+CONFIG_SND_ICE1724=m
-+CONFIG_SND_INTEL8X0=m
-+CONFIG_SND_INTEL8X0M=m
-+CONFIG_SND_SONICVIBES=m
-+CONFIG_SND_VIA82XX=m
-+# CONFIG_SND_VIA82XX_MODEM is not set
-+CONFIG_SND_VX222=m
-+CONFIG_SND_HDA_INTEL=m
-+
-+#
-+# USB devices
-+#
-+CONFIG_SND_USB_AUDIO=m
-+CONFIG_SND_USB_USX2Y=m
-+
-+#
-+# PCMCIA devices
-+#
-+CONFIG_SND_VXPOCKET=m
-+CONFIG_SND_VXP440=m
-+CONFIG_SND_PDAUDIOCF=m
-+
-+#
-+# Open Sound System
-+#
-+CONFIG_SOUND_PRIME=m
-+CONFIG_SOUND_BT878=m
-+CONFIG_SOUND_CMPCI=m
-+# CONFIG_SOUND_CMPCI_FM is not set
-+# CONFIG_SOUND_CMPCI_MIDI is not set
-+CONFIG_SOUND_CMPCI_JOYSTICK=y
-+CONFIG_SOUND_EMU10K1=m
-+CONFIG_MIDI_EMU10K1=y
-+CONFIG_SOUND_FUSION=m
-+CONFIG_SOUND_CS4281=m
-+CONFIG_SOUND_ES1370=m
-+CONFIG_SOUND_ES1371=m
-+CONFIG_SOUND_ESSSOLO1=m
-+CONFIG_SOUND_MAESTRO=m
-+CONFIG_SOUND_MAESTRO3=m
-+CONFIG_SOUND_ICH=m
-+CONFIG_SOUND_SONICVIBES=m
-+CONFIG_SOUND_TRIDENT=m
-+# CONFIG_SOUND_MSNDCLAS is not set
-+# CONFIG_SOUND_MSNDPIN is not set
-+CONFIG_SOUND_VIA82CXXX=m
-+CONFIG_MIDI_VIA82CXXX=y
-+CONFIG_SOUND_OSS=m
-+# CONFIG_SOUND_TRACEINIT is not set
-+# CONFIG_SOUND_DMAP is not set
-+# CONFIG_SOUND_AD1816 is not set
-+CONFIG_SOUND_AD1889=m
-+CONFIG_SOUND_SGALAXY=m
-+CONFIG_SOUND_ADLIB=m
-+CONFIG_SOUND_ACI_MIXER=m
-+CONFIG_SOUND_CS4232=m
-+CONFIG_SOUND_SSCAPE=m
-+CONFIG_SOUND_GUS=m
-+CONFIG_SOUND_GUS16=y
-+CONFIG_SOUND_GUSMAX=y
-+CONFIG_SOUND_VMIDI=m
-+CONFIG_SOUND_TRIX=m
-+CONFIG_SOUND_MSS=m
-+CONFIG_SOUND_MPU401=m
-+CONFIG_SOUND_NM256=m
-+CONFIG_SOUND_MAD16=m
-+CONFIG_MAD16_OLDCARD=y
-+CONFIG_SOUND_PAS=m
-+CONFIG_SOUND_PSS=m
-+CONFIG_PSS_MIXER=y
-+CONFIG_SOUND_SB=m
-+# CONFIG_SOUND_AWE32_SYNTH is not set
-+CONFIG_SOUND_WAVEFRONT=m
-+CONFIG_SOUND_MAUI=m
-+CONFIG_SOUND_YM3812=m
-+CONFIG_SOUND_OPL3SA1=m
-+CONFIG_SOUND_OPL3SA2=m
-+CONFIG_SOUND_YMFPCI=m
-+# CONFIG_SOUND_YMFPCI_LEGACY is not set
-+CONFIG_SOUND_UART6850=m
-+CONFIG_SOUND_AEDSP16=m
-+CONFIG_SC6600=y
-+CONFIG_SC6600_JOY=y
-+CONFIG_SC6600_CDROM=4
-+CONFIG_SC6600_CDROMBASE=0x0
-+# CONFIG_AEDSP16_MSS is not set
-+# CONFIG_AEDSP16_SBPRO is not set
-+# CONFIG_AEDSP16_MPU401 is not set
-+CONFIG_SOUND_TVMIXER=m
-+CONFIG_SOUND_KAHLUA=m
-+CONFIG_SOUND_ALI5455=m
-+CONFIG_SOUND_FORTE=m
-+CONFIG_SOUND_RME96XX=m
-+CONFIG_SOUND_AD1980=m
-+
-+#
-+# USB support
-+#
-+CONFIG_USB_ARCH_HAS_HCD=y
-+CONFIG_USB_ARCH_HAS_OHCI=y
-+CONFIG_USB=y
-+# CONFIG_USB_DEBUG is not set
-+
-+#
-+# Miscellaneous USB options
-+#
-+CONFIG_USB_DEVICEFS=y
-+CONFIG_USB_BANDWIDTH=y
-+# CONFIG_USB_DYNAMIC_MINORS is not set
-+# CONFIG_USB_OTG is not set
-+
-+#
-+# USB Host Controller Drivers
-+#
-+CONFIG_USB_EHCI_HCD=y
-+CONFIG_USB_EHCI_SPLIT_ISO=y
-+CONFIG_USB_EHCI_ROOT_HUB_TT=y
-+CONFIG_USB_OHCI_HCD=m
-+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
-+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-+CONFIG_USB_UHCI_HCD=m
-+CONFIG_USB_SL811_HCD=m
-+CONFIG_USB_SL811_CS=m
-+
-+#
-+# USB Device Class drivers
-+#
-+CONFIG_USB_AUDIO=m
-+
-+#
-+# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
-+#
-+CONFIG_USB_MIDI=m
-+CONFIG_USB_ACM=m
-+CONFIG_USB_PRINTER=m
-+
-+#
-+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-+#
-+CONFIG_USB_STORAGE=m
-+# CONFIG_USB_STORAGE_DEBUG is not set
-+CONFIG_USB_STORAGE_DATAFAB=y
-+CONFIG_USB_STORAGE_FREECOM=y
-+CONFIG_USB_STORAGE_ISD200=y
-+CONFIG_USB_STORAGE_DPCM=y
-+CONFIG_USB_STORAGE_USBAT=y
-+CONFIG_USB_STORAGE_SDDR09=y
-+CONFIG_USB_STORAGE_SDDR55=y
-+CONFIG_USB_STORAGE_JUMPSHOT=y
-+
-+#
-+# USB Input Devices
-+#
-+CONFIG_USB_HID=m
-+CONFIG_USB_HIDINPUT=y
-+# CONFIG_HID_FF is not set
-+CONFIG_USB_HIDDEV=y
-+
-+#
-+# USB HID Boot Protocol drivers
-+#
-+CONFIG_USB_KBD=m
-+CONFIG_USB_MOUSE=m
-+CONFIG_USB_AIPTEK=m
-+CONFIG_USB_WACOM=m
-+CONFIG_USB_KBTAB=m
-+CONFIG_USB_POWERMATE=m
-+CONFIG_USB_MTOUCH=m
-+CONFIG_USB_EGALAX=m
-+CONFIG_USB_XPAD=m
-+CONFIG_USB_ATI_REMOTE=m
-+
-+#
-+# USB Imaging devices
-+#
-+CONFIG_USB_MDC800=m
-+CONFIG_USB_MICROTEK=m
-+
-+#
-+# USB Multimedia devices
-+#
-+# CONFIG_USB_DABUSB is not set
-+CONFIG_USB_VICAM=m
-+CONFIG_USB_DSBR=m
-+CONFIG_USB_IBMCAM=m
-+CONFIG_USB_KONICAWC=m
-+CONFIG_USB_OV511=m
-+CONFIG_USB_SE401=m
-+CONFIG_USB_SN9C102=m
-+CONFIG_USB_STV680=m
-+CONFIG_USB_W9968CF=m
-+CONFIG_USB_PWC=m
-+
-+#
-+# USB Network Adapters
-+#
-+CONFIG_USB_CATC=m
-+CONFIG_USB_KAWETH=m
-+CONFIG_USB_PEGASUS=m
-+CONFIG_USB_RTL8150=m
-+CONFIG_USB_USBNET=m
-+
-+#
-+# USB Host-to-Host Cables
-+#
-+CONFIG_USB_ALI_M5632=y
-+CONFIG_USB_AN2720=y
-+CONFIG_USB_BELKIN=y
-+CONFIG_USB_GENESYS=y
-+CONFIG_USB_NET1080=y
-+CONFIG_USB_PL2301=y
-+CONFIG_USB_KC2190=y
-+
-+#
-+# Intelligent USB Devices/Gadgets
-+#
-+CONFIG_USB_ARMLINUX=y
-+CONFIG_USB_EPSON2888=y
-+CONFIG_USB_ZAURUS=y
-+CONFIG_USB_CDCETHER=y
-+
-+#
-+# USB Network Adapters
-+#
-+CONFIG_USB_AX8817X=y
-+CONFIG_USB_ZD1201=m
-+CONFIG_USB_MON=m
-+
-+#
-+# USB port drivers
-+#
-+CONFIG_USB_USS720=m
-+
-+#
-+# USB Serial Converter support
-+#
-+CONFIG_USB_SERIAL=m
-+CONFIG_USB_SERIAL_GENERIC=y
-+CONFIG_USB_SERIAL_AIRPRIME=m
-+CONFIG_USB_SERIAL_BELKIN=m
-+CONFIG_USB_SERIAL_WHITEHEAT=m
-+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-+CONFIG_USB_SERIAL_CP2101=m
-+CONFIG_USB_SERIAL_CYPRESS_M8=m
-+CONFIG_USB_SERIAL_EMPEG=m
-+CONFIG_USB_SERIAL_FTDI_SIO=m
-+CONFIG_USB_SERIAL_VISOR=m
-+CONFIG_USB_SERIAL_IPAQ=m
-+CONFIG_USB_SERIAL_IR=m
-+CONFIG_USB_SERIAL_EDGEPORT=m
-+CONFIG_USB_SERIAL_EDGEPORT_TI=m
-+# CONFIG_USB_SERIAL_GARMIN is not set
-+CONFIG_USB_SERIAL_IPW=m
-+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
-+CONFIG_USB_SERIAL_KEYSPAN=m
-+# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
-+# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set
-+CONFIG_USB_SERIAL_KLSI=m
-+CONFIG_USB_SERIAL_KOBIL_SCT=m
-+CONFIG_USB_SERIAL_MCT_U232=m
-+CONFIG_USB_SERIAL_PL2303=m
-+CONFIG_USB_SERIAL_HP4X=m
-+CONFIG_USB_SERIAL_SAFE=m
-+# CONFIG_USB_SERIAL_SAFE_PADDED is not set
-+# CONFIG_USB_SERIAL_TI is not set
-+CONFIG_USB_SERIAL_CYBERJACK=m
-+CONFIG_USB_SERIAL_XIRCOM=m
-+CONFIG_USB_SERIAL_OPTION=m
-+CONFIG_USB_SERIAL_OMNINET=m
-+CONFIG_USB_EZUSB=y
-+
-+#
-+# USB Miscellaneous drivers
-+#
-+# CONFIG_USB_EMI62 is not set
-+# CONFIG_USB_EMI26 is not set
-+CONFIG_USB_AUERSWALD=m
-+CONFIG_USB_RIO500=m
-+CONFIG_USB_LEGOTOWER=m
-+CONFIG_USB_LCD=m
-+CONFIG_USB_LED=m
-+CONFIG_USB_CYTHERM=m
-+CONFIG_USB_PHIDGETKIT=m
-+CONFIG_USB_PHIDGETSERVO=m
-+# CONFIG_USB_IDMOUSE is not set
-+CONFIG_USB_SISUSBVGA=m
-+CONFIG_USB_TEST=m
-+
-+#
-+# USB ATM/DSL drivers
-+#
-+CONFIG_USB_ATM=m
-+CONFIG_USB_SPEEDTOUCH=m
-+
-+#
-+# USB Gadget Support
-+#
-+CONFIG_USB_GADGET=m
-+# CONFIG_USB_GADGET_DEBUG_FILES is not set
-+CONFIG_USB_GADGET_NET2280=y
-+CONFIG_USB_NET2280=m
-+# CONFIG_USB_GADGET_PXA2XX is not set
-+# CONFIG_USB_GADGET_GOKU is not set
-+# CONFIG_USB_GADGET_LH7A40X is not set
-+# CONFIG_USB_GADGET_OMAP is not set
-+# CONFIG_USB_GADGET_DUMMY_HCD is not set
-+CONFIG_USB_GADGET_DUALSPEED=y
-+CONFIG_USB_ZERO=m
-+CONFIG_USB_ETH=m
-+CONFIG_USB_ETH_RNDIS=y
-+CONFIG_USB_GADGETFS=m
-+CONFIG_USB_FILE_STORAGE=m
-+# CONFIG_USB_FILE_STORAGE_TEST is not set
-+CONFIG_USB_G_SERIAL=m
-+
-+#
-+# MMC/SD Card support
-+#
-+# CONFIG_MMC is not set
-+
-+#
-+# InfiniBand support
-+#
-+# CONFIG_INFINIBAND is not set
-+
-+#
-+# Power management options
-+#
-+
-+#
-+# ACPI (Advanced Configuration and Power Interface) Support
-+#
-+CONFIG_ACPI=y
-+CONFIG_ACPI_BOOT=y
-+CONFIG_ACPI_INTERPRETER=y
-+CONFIG_ACPI_AC=m
-+CONFIG_ACPI_BATTERY=m
-+CONFIG_ACPI_BUTTON=m
-+CONFIG_ACPI_VIDEO=m
-+CONFIG_ACPI_FAN=m
-+CONFIG_ACPI_PROCESSOR=m
-+# CONFIG_ACPI_HOTPLUG_CPU is not set
-+CONFIG_ACPI_THERMAL=m
-+CONFIG_ACPI_ASUS=m
-+CONFIG_ACPI_IBM=m
-+CONFIG_ACPI_TOSHIBA=m
-+CONFIG_ACPI_BLACKLIST_YEAR=0
-+# CONFIG_ACPI_DEBUG is not set
-+CONFIG_ACPI_BUS=y
-+CONFIG_ACPI_EC=y
-+CONFIG_ACPI_POWER=y
-+CONFIG_ACPI_PCI=y
-+CONFIG_ACPI_SYSTEM=y
-+# CONFIG_X86_PM_TIMER is not set
-+# CONFIG_ACPI_CONTAINER is not set
-+
-+#
-+# File systems
-+#
-+CONFIG_EXT2_FS=y
-+CONFIG_EXT2_FS_XATTR=y
-+CONFIG_EXT2_FS_POSIX_ACL=y
-+CONFIG_EXT2_FS_SECURITY=y
-+CONFIG_EXT3_FS=m
-+CONFIG_EXT3_FS_XATTR=y
-+CONFIG_EXT3_FS_POSIX_ACL=y
-+CONFIG_EXT3_FS_SECURITY=y
-+CONFIG_JBD=m
-+# CONFIG_JBD_DEBUG is not set
-+CONFIG_FS_MBCACHE=y
-+CONFIG_REISERFS_FS=m
-+# CONFIG_REISERFS_CHECK is not set
-+# CONFIG_REISERFS_PROC_INFO is not set
-+# CONFIG_REISERFS_FS_XATTR is not set
-+CONFIG_JFS_FS=m
-+CONFIG_JFS_POSIX_ACL=y
-+# CONFIG_JFS_SECURITY is not set
-+# CONFIG_JFS_DEBUG is not set
-+CONFIG_JFS_STATISTICS=y
-+CONFIG_FS_POSIX_ACL=y
-+
-+#
-+# XFS support
-+#
-+CONFIG_XFS_FS=m
-+CONFIG_XFS_EXPORT=y
-+CONFIG_XFS_RT=y
-+CONFIG_XFS_QUOTA=y
-+CONFIG_XFS_SECURITY=y
-+CONFIG_XFS_POSIX_ACL=y
-+CONFIG_MINIX_FS=m
-+CONFIG_ROMFS_FS=m
-+CONFIG_QUOTA=y
-+CONFIG_QFMT_V1=m
-+CONFIG_QFMT_V2=m
-+CONFIG_QUOTACTL=y
-+CONFIG_DNOTIFY=y
-+CONFIG_AUTOFS_FS=m
-+CONFIG_AUTOFS4_FS=m
-+
-+#
-+# CD-ROM/DVD Filesystems
-+#
-+CONFIG_ISO9660_FS=m
-+CONFIG_JOLIET=y
-+CONFIG_ZISOFS=y
-+CONFIG_ZISOFS_FS=m
-+CONFIG_UDF_FS=m
-+CONFIG_UDF_NLS=y
-+
-+#
-+# DOS/FAT/NT Filesystems
-+#
-+CONFIG_FAT_FS=m
-+CONFIG_MSDOS_FS=m
-+CONFIG_VFAT_FS=m
-+CONFIG_FAT_DEFAULT_CODEPAGE=437
-+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-+CONFIG_NTFS_FS=m
-+# CONFIG_NTFS_DEBUG is not set
-+# CONFIG_NTFS_RW is not set
-+
-+#
-+# Pseudo filesystems
-+#
-+CONFIG_PROC_FS=y
-+CONFIG_PROC_KCORE=y
-+CONFIG_SYSFS=y
-+# CONFIG_DEVFS_FS is not set
-+CONFIG_DEVPTS_FS_XATTR=y
-+CONFIG_DEVPTS_FS_SECURITY=y
-+CONFIG_TMPFS=y
-+CONFIG_TMPFS_XATTR=y
-+CONFIG_TMPFS_SECURITY=y
-+# CONFIG_HUGETLBFS is not set
-+# CONFIG_HUGETLB_PAGE is not set
-+CONFIG_RAMFS=y
-+
-+#
-+# Miscellaneous filesystems
-+#
-+CONFIG_ADFS_FS=m
-+# CONFIG_ADFS_FS_RW is not set
-+CONFIG_AFFS_FS=m
-+CONFIG_HFS_FS=m
-+CONFIG_HFSPLUS_FS=m
-+CONFIG_BEFS_FS=m
-+# CONFIG_BEFS_DEBUG is not set
-+CONFIG_BFS_FS=m
-+CONFIG_EFS_FS=m
-+CONFIG_JFFS_FS=m
-+CONFIG_JFFS_FS_VERBOSE=0
-+CONFIG_JFFS_PROC_FS=y
-+CONFIG_JFFS2_FS=m
-+CONFIG_JFFS2_FS_DEBUG=0
-+# CONFIG_JFFS2_FS_NAND is not set
-+# CONFIG_JFFS2_FS_NOR_ECC is not set
-+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-+CONFIG_JFFS2_ZLIB=y
-+CONFIG_JFFS2_RTIME=y
-+# CONFIG_JFFS2_RUBIN is not set
-+CONFIG_CRAMFS=y
-+CONFIG_VXFS_FS=m
-+CONFIG_HPFS_FS=m
-+CONFIG_QNX4FS_FS=m
-+# CONFIG_QNX4FS_RW is not set
-+CONFIG_SYSV_FS=m
-+CONFIG_UFS_FS=m
-+# CONFIG_UFS_FS_WRITE is not set
-+
-+#
-+# Network File Systems
-+#
-+CONFIG_NFS_FS=m
-+CONFIG_NFS_V3=y
-+CONFIG_NFS_V4=y
-+CONFIG_NFS_DIRECTIO=y
-+CONFIG_NFSD=m
-+CONFIG_NFSD_V3=y
-+CONFIG_NFSD_V4=y
-+CONFIG_NFSD_TCP=y
-+CONFIG_LOCKD=m
-+CONFIG_LOCKD_V4=y
-+CONFIG_EXPORTFS=m
-+CONFIG_SUNRPC=m
-+CONFIG_SUNRPC_GSS=m
-+CONFIG_RPCSEC_GSS_KRB5=m
-+CONFIG_RPCSEC_GSS_SPKM3=m
-+CONFIG_SMB_FS=m
-+# CONFIG_SMB_NLS_DEFAULT is not set
-+CONFIG_CIFS=m
-+# CONFIG_CIFS_STATS is not set
-+# CONFIG_CIFS_XATTR is not set
-+# CONFIG_CIFS_EXPERIMENTAL is not set
-+CONFIG_NCP_FS=m
-+CONFIG_NCPFS_PACKET_SIGNING=y
-+CONFIG_NCPFS_IOCTL_LOCKING=y
-+CONFIG_NCPFS_STRONG=y
-+CONFIG_NCPFS_NFS_NS=y
-+CONFIG_NCPFS_OS2_NS=y
-+# CONFIG_NCPFS_SMALLDOS is not set
-+CONFIG_NCPFS_NLS=y
-+CONFIG_NCPFS_EXTRAS=y
-+CONFIG_CODA_FS=m
-+# CONFIG_CODA_FS_OLD_API is not set
-+CONFIG_AFS_FS=m
-+CONFIG_RXRPC=m
-+
-+#
-+# Partition Types
-+#
-+CONFIG_PARTITION_ADVANCED=y
-+CONFIG_ACORN_PARTITION=y
-+CONFIG_ACORN_PARTITION_CUMANA=y
-+# CONFIG_ACORN_PARTITION_EESOX is not set
-+CONFIG_ACORN_PARTITION_ICS=y
-+# CONFIG_ACORN_PARTITION_ADFS is not set
-+# CONFIG_ACORN_PARTITION_POWERTEC is not set
-+CONFIG_ACORN_PARTITION_RISCIX=y
-+CONFIG_OSF_PARTITION=y
-+CONFIG_AMIGA_PARTITION=y
-+CONFIG_ATARI_PARTITION=y
-+CONFIG_MAC_PARTITION=y
-+CONFIG_MSDOS_PARTITION=y
-+CONFIG_BSD_DISKLABEL=y
-+CONFIG_MINIX_SUBPARTITION=y
-+CONFIG_SOLARIS_X86_PARTITION=y
-+CONFIG_UNIXWARE_DISKLABEL=y
-+CONFIG_LDM_PARTITION=y
-+# CONFIG_LDM_DEBUG is not set
-+CONFIG_SGI_PARTITION=y
-+CONFIG_ULTRIX_PARTITION=y
-+CONFIG_SUN_PARTITION=y
-+CONFIG_EFI_PARTITION=y
-+
-+#
-+# Native Language Support
-+#
-+CONFIG_NLS=y
-+CONFIG_NLS_DEFAULT="cp437"
-+CONFIG_NLS_CODEPAGE_437=m
-+CONFIG_NLS_CODEPAGE_737=m
-+CONFIG_NLS_CODEPAGE_775=m
-+CONFIG_NLS_CODEPAGE_850=m
-+CONFIG_NLS_CODEPAGE_852=m
-+CONFIG_NLS_CODEPAGE_855=m
-+CONFIG_NLS_CODEPAGE_857=m
-+CONFIG_NLS_CODEPAGE_860=m
-+CONFIG_NLS_CODEPAGE_861=m
-+CONFIG_NLS_CODEPAGE_862=m
-+CONFIG_NLS_CODEPAGE_863=m
-+CONFIG_NLS_CODEPAGE_864=m
-+CONFIG_NLS_CODEPAGE_865=m
-+CONFIG_NLS_CODEPAGE_866=m
-+CONFIG_NLS_CODEPAGE_869=m
-+CONFIG_NLS_CODEPAGE_936=m
-+CONFIG_NLS_CODEPAGE_950=m
-+CONFIG_NLS_CODEPAGE_932=m
-+CONFIG_NLS_CODEPAGE_949=m
-+CONFIG_NLS_CODEPAGE_874=m
-+CONFIG_NLS_ISO8859_8=m
-+CONFIG_NLS_CODEPAGE_1250=m
-+CONFIG_NLS_CODEPAGE_1251=m
-+CONFIG_NLS_ASCII=m
-+CONFIG_NLS_ISO8859_1=m
-+CONFIG_NLS_ISO8859_2=m
-+CONFIG_NLS_ISO8859_3=m
-+CONFIG_NLS_ISO8859_4=m
-+CONFIG_NLS_ISO8859_5=m
-+CONFIG_NLS_ISO8859_6=m
-+CONFIG_NLS_ISO8859_7=m
-+CONFIG_NLS_ISO8859_9=m
-+CONFIG_NLS_ISO8859_13=m
-+CONFIG_NLS_ISO8859_14=m
-+CONFIG_NLS_ISO8859_15=m
-+CONFIG_NLS_KOI8_R=m
-+CONFIG_NLS_KOI8_U=m
-+CONFIG_NLS_UTF8=m
-+
-+#
-+# Security options
-+#
-+CONFIG_KEYS=y
-+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
-+CONFIG_SECURITY=y
-+# CONFIG_SECURITY_NETWORK is not set
-+CONFIG_SECURITY_CAPABILITIES=y
-+CONFIG_SECURITY_ROOTPLUG=m
-+CONFIG_SECURITY_SECLVL=m
-+CONFIG_SECURITY_SELINUX=y
-+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
-+CONFIG_SECURITY_SELINUX_DISABLE=y
-+CONFIG_SECURITY_SELINUX_DEVELOP=y
-+CONFIG_SECURITY_SELINUX_AVC_STATS=y
-+CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-+
-+#
-+# Cryptographic options
-+#
-+CONFIG_CRYPTO=y
-+CONFIG_CRYPTO_HMAC=y
-+CONFIG_CRYPTO_NULL=m
-+CONFIG_CRYPTO_MD4=m
-+CONFIG_CRYPTO_MD5=y
-+CONFIG_CRYPTO_SHA1=m
-+CONFIG_CRYPTO_SHA256=m
-+CONFIG_CRYPTO_SHA512=m
-+CONFIG_CRYPTO_WP512=m
-+CONFIG_CRYPTO_TGR192=m
-+CONFIG_CRYPTO_DES=m
-+CONFIG_CRYPTO_BLOWFISH=m
-+CONFIG_CRYPTO_TWOFISH=m
-+CONFIG_CRYPTO_SERPENT=m
-+CONFIG_CRYPTO_AES_586=m
-+CONFIG_CRYPTO_CAST5=m
-+CONFIG_CRYPTO_CAST6=m
-+CONFIG_CRYPTO_TEA=m
-+CONFIG_CRYPTO_ARC4=m
-+CONFIG_CRYPTO_KHAZAD=m
-+CONFIG_CRYPTO_ANUBIS=m
-+CONFIG_CRYPTO_DEFLATE=m
-+CONFIG_CRYPTO_MICHAEL_MIC=m
-+CONFIG_CRYPTO_CRC32C=m
-+CONFIG_CRYPTO_TEST=m
-+
-+#
-+# Hardware crypto devices
-+#
-+# CONFIG_CRYPTO_DEV_PADLOCK is not set
-+
-+#
-+# Library routines
-+#
-+CONFIG_CRC_CCITT=m
-+CONFIG_CRC32=y
-+CONFIG_LIBCRC32C=m
-+CONFIG_ZLIB_INFLATE=y
-+CONFIG_ZLIB_DEFLATE=m
-+CONFIG_REED_SOLOMON=m
-+CONFIG_REED_SOLOMON_DEC16=y
-+
-+#
-+# Kernel hacking
-+#
-+# CONFIG_PRINTK_TIME is not set
-+CONFIG_DEBUG_KERNEL=y
-+CONFIG_MAGIC_SYSRQ=y
-+CONFIG_LOG_BUF_SHIFT=14
-+# CONFIG_SCHEDSTATS is not set
-+# CONFIG_DEBUG_SLAB is not set
-+# CONFIG_DEBUG_SPINLOCK is not set
-+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-+# CONFIG_DEBUG_KOBJECT is not set
-+# CONFIG_DEBUG_HIGHMEM is not set
-+# CONFIG_DEBUG_BUGVERBOSE is not set
-+# CONFIG_DEBUG_INFO is not set
-+# CONFIG_DEBUG_FS is not set
-+# CONFIG_FRAME_POINTER is not set
-+# CONFIG_DEBUG_STACKOVERFLOW is not set
-+# CONFIG_KPROBES is not set
-+# CONFIG_DEBUG_STACK_USAGE is not set
-+# CONFIG_DEBUG_PAGEALLOC is not set
-+# CONFIG_4KSTACKS is not set
-+CONFIG_X86_FIND_SMP_CONFIG=y
-+CONFIG_X86_MPPARSE=y
-diff -Nurp pristine-linux-2.6.12/drivers/acpi/pci_irq.c linux-2.6.12-xen/drivers/acpi/pci_irq.c
---- pristine-linux-2.6.12/drivers/acpi/pci_irq.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/acpi/pci_irq.c	2006-02-25 00:12:30.028558490 +0100
-@@ -433,8 +433,9 @@ acpi_pci_irq_enable (
- 		printk(KERN_WARNING PREFIX "PCI Interrupt %s[%c]: no GSI",
- 			pci_name(dev), ('A' + pin));
- 		/* Interrupt Line values above 0xF are forbidden */
--		if (dev->irq >= 0 && (dev->irq <= 0xF)) {
-+		if (dev->irq > 0 && (dev->irq <= 0xF)) {
- 			printk(" - using IRQ %d\n", dev->irq);
-+			acpi_register_gsi(dev->irq, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
- 			return_VALUE(0);
- 		}
- 		else {
-diff -Nurp pristine-linux-2.6.12/drivers/acpi/tables.c linux-2.6.12-xen/drivers/acpi/tables.c
---- pristine-linux-2.6.12/drivers/acpi/tables.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/acpi/tables.c	2006-02-16 23:44:08.000000000 +0100
-@@ -565,6 +565,11 @@ acpi_table_get_sdt (
-  * 
-  * result: sdt_entry[] is initialized
-  */
-+#if defined(CONFIG_XEN_X86) || defined(CONFIG_XEN_X86_64)
-+#define acpi_rsdp_phys_to_va(rsdp_phys) isa_bus_to_virt(rsdp_phys)
-+#else
-+#define acpi_rsdp_phys_to_va(rsdp_phys) __va(rsdp_phys)
-+#endif
- 
- int __init
- acpi_table_init (void)
-@@ -581,7 +586,7 @@ acpi_table_init (void)
- 		return -ENODEV;
- 	}
- 
--	rsdp = (struct acpi_table_rsdp *) __va(rsdp_phys);
-+	rsdp = (struct acpi_table_rsdp *) acpi_rsdp_phys_to_va(rsdp_phys);
- 	if (!rsdp) {
- 		printk(KERN_WARNING PREFIX "Unable to map RSDP\n");
- 		return -ENODEV;
-diff -Nurp pristine-linux-2.6.12/drivers/char/mem.c linux-2.6.12-xen/drivers/char/mem.c
---- pristine-linux-2.6.12/drivers/char/mem.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/char/mem.c	2006-02-16 23:44:08.000000000 +0100
-@@ -104,6 +104,7 @@ static inline int valid_phys_addr_range(
- }
- #endif
- 
-+#ifndef ARCH_HAS_DEV_MEM
- /*
-  * This funcion reads the *physical* memory. The f_pos points directly to the 
-  * memory location. 
-@@ -228,6 +229,7 @@ static ssize_t write_mem(struct file * f
- 	*ppos += written;
- 	return written;
- }
-+#endif
- 
- static int mmap_mem(struct file * file, struct vm_area_struct * vma)
- {
-@@ -722,6 +724,7 @@ static int open_port(struct inode * inod
- #define open_mem	open_port
- #define open_kmem	open_mem
- 
-+#ifndef ARCH_HAS_DEV_MEM
- static struct file_operations mem_fops = {
- 	.llseek		= memory_lseek,
- 	.read		= read_mem,
-@@ -729,6 +732,9 @@ static struct file_operations mem_fops =
- 	.mmap		= mmap_mem,
- 	.open		= open_mem,
- };
-+#else
-+extern struct file_operations mem_fops;
-+#endif
- 
- static struct file_operations kmem_fops = {
- 	.llseek		= memory_lseek,
-diff -Nurp pristine-linux-2.6.12/drivers/char/rocket.c linux-2.6.12-xen/drivers/char/rocket.c
---- pristine-linux-2.6.12/drivers/char/rocket.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/char/rocket.c	2006-02-25 00:12:30.030558189 +0100
-@@ -277,7 +277,7 @@ static void rp_do_receive(struct r_port 
- 		ToRecv = space;
- 
- 	if (ToRecv <= 0)
--		return;
-+		goto done;
- 
- 	/*
- 	 * if status indicates there are errored characters in the
-@@ -359,6 +359,7 @@ static void rp_do_receive(struct r_port 
- 	}
- 	/*  Push the data up to the tty layer */
- 	ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
-+done:
- 	tty_ldisc_deref(ld);
- }
- 
-diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/Kconfig linux-2.6.12-xen/drivers/char/tpm/Kconfig
---- pristine-linux-2.6.12/drivers/char/tpm/Kconfig	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/char/tpm/Kconfig	2006-02-16 23:44:08.000000000 +0100
-@@ -35,5 +35,15 @@ config TCG_ATMEL
- 	  will be accessible from within Linux.  To compile this driver 
- 	  as a module, choose M here; the module will be called tpm_atmel.
- 
-+config TCG_XEN
-+	tristate "XEN TPM Interface"
-+	depends on TCG_TPM && ARCH_XEN && XEN_TPMDEV_FRONTEND
-+	---help---
-+	  If you want to make TPM support available to a Xen
-+	  user domain, say Yes and it will
-+          be accessible from within Linux. To compile this driver
-+          as a module, choose M here; the module will be called
-+          tpm_xen.
-+
- endmenu
- 
-diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/Kconfig.domU linux-2.6.12-xen/drivers/char/tpm/Kconfig.domU
---- pristine-linux-2.6.12/drivers/char/tpm/Kconfig.domU	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/char/tpm/Kconfig.domU	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,30 @@
-+#
-+# TPM device configuration
-+#
-+
-+menu "TPM devices"
-+
-+config TCG_TPM
-+	tristate "TPM Support for XEN"
-+	depends on ARCH_XEN && !XEN_PHYSDEV_ACCESS
-+	---help---
-+	  If you want to make TPM security available in your system,
-+	  say Yes and it will be accessible from within a user domain.  For
-+	  more information see <http://www.trustedcomputinggroup.org>.
-+	  An implementation of the Trusted Software Stack (TSS), the
-+	  userspace enablement piece of the specification, can be
-+	  obtained at: <http://sourceforge.net/projects/trousers>.  To
-+	  compile this driver as a module, choose M here; the module
-+	  will be called tpm. If unsure, say N.
-+
-+config TCG_XEN
-+	tristate "XEN TPM Interface"
-+	depends on TCG_TPM && ARCH_XEN && XEN_TPMDEV_FRONTEND
-+	---help---
-+	  If you want to make TPM support available to a Xen
-+	  user domain, say Yes and it will
-+          be accessible from within Linux. To compile this driver
-+          as a module, choose M here; the module will be called
-+          tpm_xen.
-+
-+endmenu
-diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/Makefile linux-2.6.12-xen/drivers/char/tpm/Makefile
---- pristine-linux-2.6.12/drivers/char/tpm/Makefile	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/char/tpm/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -1,7 +1,13 @@
- #
- # Makefile for the kernel tpm device drivers.
- #
-+ifeq ($(CONFIG_XEN_PHYSDEV_ACCESS),y)
- obj-$(CONFIG_TCG_TPM) += tpm.o
- obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
- obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
--
-+obj-$(CONFIG_TCG_TIS) += tpm_tis.o
-+obj-$(CONFIG_TCG_XEN) += tpm_xen.o
-+else
-+obj-$(CONFIG_TCG_TPM) += tpm.o
-+obj-$(CONFIG_TCG_XEN) += tpm_xen.o
-+endif
-diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/tpm_atmel.c linux-2.6.12-xen/drivers/char/tpm/tpm_atmel.c
---- pristine-linux-2.6.12/drivers/char/tpm/tpm_atmel.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/char/tpm/tpm_atmel.c	2006-02-16 23:44:08.000000000 +0100
-@@ -22,17 +22,23 @@
- #include "tpm.h"
- 
- /* Atmel definitions */
--#define	TPM_ATML_BASE			0x400
-+enum tpm_atmel_addr {
-+	TPM_ATMEL_BASE_ADDR_LO = 0x08,
-+	TPM_ATMEL_BASE_ADDR_HI = 0x09
-+};
- 
- /* write status bits */
--#define	ATML_STATUS_ABORT		0x01
--#define	ATML_STATUS_LASTBYTE		0x04
--
-+enum tpm_atmel_write_status {
-+	ATML_STATUS_ABORT = 0x01,
-+	ATML_STATUS_LASTBYTE = 0x04
-+};
- /* read status bits */
--#define	ATML_STATUS_BUSY		0x01
--#define	ATML_STATUS_DATA_AVAIL		0x02
--#define	ATML_STATUS_REWRITE		0x04
--
-+enum tpm_atmel_read_status {
-+	ATML_STATUS_BUSY = 0x01,
-+	ATML_STATUS_DATA_AVAIL = 0x02,
-+	ATML_STATUS_REWRITE = 0x04,
-+	ATML_STATUS_READY = 0x08
-+};
- 
- static int tpm_atml_recv(struct tpm_chip *chip, u8 * buf, size_t count)
- {
-@@ -48,7 +54,7 @@ static int tpm_atml_recv(struct tpm_chip
- 	for (i = 0; i < 6; i++) {
- 		status = inb(chip->vendor->base + 1);
- 		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
--			dev_err(&chip->pci_dev->dev,
-+			dev_err(chip->dev,
- 				"error reading header\n");
- 			return -EIO;
- 		}
-@@ -60,12 +66,12 @@ static int tpm_atml_recv(struct tpm_chip
- 	size = be32_to_cpu(*native_size);
- 
- 	if (count < size) {
--		dev_err(&chip->pci_dev->dev,
-+		dev_err(chip->dev,
- 			"Recv size(%d) less than available space\n", size);
- 		for (; i < size; i++) {	/* clear the waiting data anyway */
- 			status = inb(chip->vendor->base + 1);
- 			if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
--				dev_err(&chip->pci_dev->dev,
-+				dev_err(chip->dev,
- 					"error reading data\n");
- 				return -EIO;
- 			}
-@@ -77,7 +83,7 @@ static int tpm_atml_recv(struct tpm_chip
- 	for (; i < size; i++) {
- 		status = inb(chip->vendor->base + 1);
- 		if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
--			dev_err(&chip->pci_dev->dev,
-+			dev_err(chip->dev,
- 				"error reading data\n");
- 			return -EIO;
- 		}
-@@ -87,7 +93,7 @@ static int tpm_atml_recv(struct tpm_chip
- 	/* make sure data available is gone */
- 	status = inb(chip->vendor->base + 1);
- 	if (status & ATML_STATUS_DATA_AVAIL) {
--		dev_err(&chip->pci_dev->dev, "data available is stuck\n");
-+		dev_err(chip->dev, "data available is stuck\n");
- 		return -EIO;
- 	}
- 
-@@ -98,9 +104,9 @@ static int tpm_atml_send(struct tpm_chip
- {
- 	int i;
- 
--	dev_dbg(&chip->pci_dev->dev, "tpm_atml_send: ");
-+	dev_dbg(chip->dev, "tpm_atml_send:\n");
- 	for (i = 0; i < count; i++) {
--		dev_dbg(&chip->pci_dev->dev, "0x%x(%d) ", buf[i], buf[i]);
-+		dev_dbg(chip->dev, "%d 0x%x(%d)\n",  i, buf[i], buf[i]);
- 		outb(buf[i], chip->vendor->base);
- 	}
- 
-@@ -112,6 +118,11 @@ static void tpm_atml_cancel(struct tpm_c
- 	outb(ATML_STATUS_ABORT, chip->vendor->base + 1);
- }
- 
-+static u8 tpm_atml_status(struct tpm_chip *chip)
-+{
-+	return inb(chip->vendor->base + 1);
-+}
-+
- static struct file_operations atmel_ops = {
- 	.owner = THIS_MODULE,
- 	.llseek = no_llseek,
-@@ -121,13 +132,30 @@ static struct file_operations atmel_ops 
- 	.release = tpm_release,
- };
- 
-+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-+static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
-+
-+static struct attribute* atmel_attrs[] = {
-+	&dev_attr_pubek.attr,
-+	&dev_attr_pcrs.attr,
-+	&dev_attr_caps.attr,
-+	&dev_attr_cancel.attr,
-+	0,
-+};
-+
-+static struct attribute_group atmel_attr_grp = { .attrs = atmel_attrs };
-+
- static struct tpm_vendor_specific tpm_atmel = {
- 	.recv = tpm_atml_recv,
- 	.send = tpm_atml_send,
- 	.cancel = tpm_atml_cancel,
-+	.status = tpm_atml_status,
- 	.req_complete_mask = ATML_STATUS_BUSY | ATML_STATUS_DATA_AVAIL,
- 	.req_complete_val = ATML_STATUS_DATA_AVAIL,
--	.base = TPM_ATML_BASE,
-+	.req_canceled = ATML_STATUS_READY,
-+	.attr_group = &atmel_attr_grp,
- 	.miscdev = { .fops = &atmel_ops, },
- };
- 
-@@ -136,34 +164,36 @@ static int __devinit tpm_atml_init(struc
- {
- 	u8 version[4];
- 	int rc = 0;
-+	int lo, hi;
- 
- 	if (pci_enable_device(pci_dev))
- 		return -EIO;
- 
--	if (tpm_lpc_bus_init(pci_dev, TPM_ATML_BASE)) {
--		rc = -ENODEV;
--		goto out_err;
--	}
-+	lo = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_LO);
-+	hi = tpm_read_index(TPM_ADDR, TPM_ATMEL_BASE_ADDR_HI);
-+
-+	tpm_atmel.base = (hi<<8)|lo;
-+	dev_dbg( &pci_dev->dev, "Operating with base: 0x%x\n", tpm_atmel.base);
- 
- 	/* verify that it is an Atmel part */
--	if (tpm_read_index(4) != 'A' || tpm_read_index(5) != 'T'
--	    || tpm_read_index(6) != 'M' || tpm_read_index(7) != 'L') {
-+	if (tpm_read_index(TPM_ADDR, 4) != 'A' || tpm_read_index(TPM_ADDR, 5) != 'T'
-+	    || tpm_read_index(TPM_ADDR, 6) != 'M' || tpm_read_index(TPM_ADDR, 7) != 'L') {
- 		rc = -ENODEV;
- 		goto out_err;
- 	}
- 
- 	/* query chip for its version number */
--	if ((version[0] = tpm_read_index(0x00)) != 0xFF) {
--		version[1] = tpm_read_index(0x01);
--		version[2] = tpm_read_index(0x02);
--		version[3] = tpm_read_index(0x03);
-+	if ((version[0] = tpm_read_index(TPM_ADDR, 0x00)) != 0xFF) {
-+		version[1] = tpm_read_index(TPM_ADDR, 0x01);
-+		version[2] = tpm_read_index(TPM_ADDR, 0x02);
-+		version[3] = tpm_read_index(TPM_ADDR, 0x03);
- 	} else {
- 		dev_info(&pci_dev->dev, "version query failed\n");
- 		rc = -ENODEV;
- 		goto out_err;
- 	}
- 
--	if ((rc = tpm_register_hardware(pci_dev, &tpm_atmel)) < 0)
-+	if ((rc = tpm_register_hardware(&pci_dev->dev, &tpm_atmel)) < 0)
- 		goto out_err;
- 
- 	dev_info(&pci_dev->dev,
-@@ -176,13 +206,30 @@ out_err:
- 	return rc;
- }
- 
-+static void __devexit tpm_atml_remove(struct pci_dev *pci_dev) 
-+{
-+	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
-+
-+	if ( chip )
-+		tpm_remove_hardware(chip->dev);
-+}
-+
- static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
- 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)},
- 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)},
- 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
- 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
- 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
-+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0)},
-+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1)},
-+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0)},
- 	{PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)},
-+#ifndef PCI_DEVICE_ID_SERVERWORKS_CSB6LPC
-+#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
-+#else
-+#warning Remove the define of PCI_DEVICE_ID_SERVERWORKS_CSB6LPC
-+#endif
-+	{PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6LPC)},
- 	{0,}
- };
- 
-@@ -192,7 +239,7 @@ static struct pci_driver atmel_pci_drive
- 	.name = "tpm_atmel",
- 	.id_table = tpm_pci_tbl,
- 	.probe = tpm_atml_init,
--	.remove = __devexit_p(tpm_remove),
-+	.remove = __devexit_p(tpm_atml_remove),
- 	.suspend = tpm_pm_suspend,
- 	.resume = tpm_pm_resume,
- };
-@@ -207,7 +254,7 @@ static void __exit cleanup_atmel(void)
- 	pci_unregister_driver(&atmel_pci_driver);
- }
- 
--module_init(init_atmel);
-+fs_initcall(init_atmel);
- module_exit(cleanup_atmel);
- 
- MODULE_AUTHOR("Leendert van Doorn (leendert at watson.ibm.com)");
-diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/tpm.c linux-2.6.12-xen/drivers/char/tpm/tpm.c
---- pristine-linux-2.6.12/drivers/char/tpm/tpm.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/char/tpm/tpm.c	2006-02-16 23:44:08.000000000 +0100
-@@ -19,7 +19,7 @@
-  * 
-  * Note, the TPM chip is not interrupt driven (only polling)
-  * and can have very long timeouts (minutes!). Hence the unusual
-- * calls to schedule_timeout.
-+ * calls to msleep.
-  *
-  */
- 
-@@ -28,19 +28,20 @@
- #include <linux/spinlock.h>
- #include "tpm.h"
- 
--#define	TPM_MINOR			224	/* officially assigned */
-+#define TPM_CHIP_NUM_MASK	0x0000ffff
-+#define TPM_CHIP_TYPE_SHIFT	16	
- 
--#define	TPM_BUFSIZE			2048
--
--/* PCI configuration addresses */
--#define	PCI_GEN_PMCON_1			0xA0
--#define	PCI_GEN1_DEC			0xE4
--#define	PCI_LPC_EN			0xE6
--#define	PCI_GEN2_DEC			0xEC
-+enum tpm_const {
-+	TPM_MINOR = 224,	/* officially assigned */
-+	TPM_MIN_BUFSIZE = 2048,
-+	TPM_MAX_BUFSIZE = 64 * 1024,
-+	TPM_NUM_DEVICES = 256,
-+	TPM_NUM_MASK_ENTRIES = TPM_NUM_DEVICES / (8 * sizeof(int))
-+};
- 
- static LIST_HEAD(tpm_chip_list);
- static DEFINE_SPINLOCK(driver_lock);
--static int dev_mask[32];
-+static int dev_mask[TPM_NUM_MASK_ENTRIES];
- 
- static void user_reader_timeout(unsigned long ptr)
- {
-@@ -48,154 +49,81 @@ static void user_reader_timeout(unsigned
- 
- 	down(&chip->buffer_mutex);
- 	atomic_set(&chip->data_pending, 0);
--	memset(chip->data_buffer, 0, TPM_BUFSIZE);
-+	memset(chip->data_buffer, 0, chip->vendor->buffersize);
- 	up(&chip->buffer_mutex);
- }
- 
--void tpm_time_expired(unsigned long ptr)
--{
--	int *exp = (int *) ptr;
--	*exp = 1;
--}
--
--EXPORT_SYMBOL_GPL(tpm_time_expired);
--
--/*
-- * Initialize the LPC bus and enable the TPM ports
-- */
--int tpm_lpc_bus_init(struct pci_dev *pci_dev, u16 base)
--{
--	u32 lpcenable, tmp;
--	int is_lpcm = 0;
--
--	switch (pci_dev->vendor) {
--	case PCI_VENDOR_ID_INTEL:
--		switch (pci_dev->device) {
--		case PCI_DEVICE_ID_INTEL_82801CA_12:
--		case PCI_DEVICE_ID_INTEL_82801DB_12:
--			is_lpcm = 1;
--			break;
--		}
--		/* init ICH (enable LPC) */
--		pci_read_config_dword(pci_dev, PCI_GEN1_DEC, &lpcenable);
--		lpcenable |= 0x20000000;
--		pci_write_config_dword(pci_dev, PCI_GEN1_DEC, lpcenable);
--
--		if (is_lpcm) {
--			pci_read_config_dword(pci_dev, PCI_GEN1_DEC,
--					      &lpcenable);
--			if ((lpcenable & 0x20000000) == 0) {
--				dev_err(&pci_dev->dev,
--					"cannot enable LPC\n");
--				return -ENODEV;
--			}
--		}
--
--		/* initialize TPM registers */
--		pci_read_config_dword(pci_dev, PCI_GEN2_DEC, &tmp);
--
--		if (!is_lpcm)
--			tmp = (tmp & 0xFFFF0000) | (base & 0xFFF0);
--		else
--			tmp =
--			    (tmp & 0xFFFF0000) | (base & 0xFFF0) |
--			    0x00000001;
--
--		pci_write_config_dword(pci_dev, PCI_GEN2_DEC, tmp);
--
--		if (is_lpcm) {
--			pci_read_config_dword(pci_dev, PCI_GEN_PMCON_1,
--					      &tmp);
--			tmp |= 0x00000004;	/* enable CLKRUN */
--			pci_write_config_dword(pci_dev, PCI_GEN_PMCON_1,
--					       tmp);
--		}
--		tpm_write_index(0x0D, 0x55);	/* unlock 4F */
--		tpm_write_index(0x0A, 0x00);	/* int disable */
--		tpm_write_index(0x08, base);	/* base addr lo */
--		tpm_write_index(0x09, (base & 0xFF00) >> 8);	/* base addr hi */
--		tpm_write_index(0x0D, 0xAA);	/* lock 4F */
--		break;
--	case PCI_VENDOR_ID_AMD:
--		/* nothing yet */
--		break;
--	}
--
--	return 0;
--}
--
--EXPORT_SYMBOL_GPL(tpm_lpc_bus_init);
--
- /*
-  * Internal kernel interface to transmit TPM commands
-  */
--static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
-+static ssize_t tpm_transmit(struct tpm_chip * chip, const char *buf,
- 			    size_t bufsiz)
- {
--	ssize_t len;
-+	ssize_t rc;
- 	u32 count;
--	__be32 *native_size;
-+	unsigned long stop;
-+
-+	if (!chip)
-+		return -ENODEV;
-+
-+	if ( !chip )
-+		return -ENODEV;
- 
--	native_size = (__force __be32 *) (buf + 2);
--	count = be32_to_cpu(*native_size);
-+	count = be32_to_cpu(*((__be32 *) (buf + 2)));
- 
- 	if (count == 0)
- 		return -ENODATA;
- 	if (count > bufsiz) {
--		dev_err(&chip->pci_dev->dev,
-+		dev_err(chip->dev,
- 			"invalid count value %x %zx \n", count, bufsiz);
- 		return -E2BIG;
- 	}
- 
- 	down(&chip->tpm_mutex);
- 
--	if ((len = chip->vendor->send(chip, (u8 *) buf, count)) < 0) {
--		dev_err(&chip->pci_dev->dev,
--			"tpm_transmit: tpm_send: error %zd\n", len);
--		return len;
-+	if ((rc = chip->vendor->send(chip, (u8 *) buf, count)) < 0) {
-+		dev_err(chip->dev,
-+			"tpm_transmit: tpm_send: error %zd\n", rc);
-+		goto out;
- 	}
- 
--	down(&chip->timer_manipulation_mutex);
--	chip->time_expired = 0;
--	init_timer(&chip->device_timer);
--	chip->device_timer.function = tpm_time_expired;
--	chip->device_timer.expires = jiffies + 2 * 60 * HZ;
--	chip->device_timer.data = (unsigned long) &chip->time_expired;
--	add_timer(&chip->device_timer);
--	up(&chip->timer_manipulation_mutex);
--
-+	stop = jiffies + 2 * 60 * HZ;
- 	do {
--		u8 status = inb(chip->vendor->base + 1);
-+		u8 status = chip->vendor->status(chip);
- 		if ((status & chip->vendor->req_complete_mask) ==
- 		    chip->vendor->req_complete_val) {
--			down(&chip->timer_manipulation_mutex);
--			del_singleshot_timer_sync(&chip->device_timer);
--			up(&chip->timer_manipulation_mutex);
- 			goto out_recv;
- 		}
--		set_current_state(TASK_UNINTERRUPTIBLE);
--		schedule_timeout(TPM_TIMEOUT);
-+
-+		if ((status == chip->vendor->req_canceled)) {
-+			dev_err(chip->dev, "Operation Canceled\n");
-+			rc = -ECANCELED;
-+			goto out;
-+		}
-+
-+		msleep(TPM_TIMEOUT);	/* CHECK */
- 		rmb();
--	} while (!chip->time_expired);
-+	} while (time_before(jiffies, stop));
- 
- 
- 	chip->vendor->cancel(chip);
--	dev_err(&chip->pci_dev->dev, "Time expired\n");
--	up(&chip->tpm_mutex);
--	return -EIO;
-+	dev_err(chip->dev, "Operation Timed out\n");
-+	rc = -ETIME;
-+	goto out;
- 
- out_recv:
--	len = chip->vendor->recv(chip, (u8 *) buf, bufsiz);
--	if (len < 0)
--		dev_err(&chip->pci_dev->dev,
--			"tpm_transmit: tpm_recv: error %zd\n", len);
-+	rc = chip->vendor->recv(chip, (u8 *) buf, bufsiz);
-+	if (rc < 0)
-+		dev_err(chip->dev,
-+			"tpm_transmit: tpm_recv: error %zd\n", rc);
-+out:
- 	up(&chip->tpm_mutex);
--	return len;
-+	return rc;
- }
- 
- #define TPM_DIGEST_SIZE 20
- #define CAP_PCR_RESULT_SIZE 18
--static u8 cap_pcr[] = {
-+static const u8 cap_pcr[] = {
- 	0, 193,			/* TPM_TAG_RQU_COMMAND */
- 	0, 0, 0, 22,		/* length */
- 	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
-@@ -205,75 +133,102 @@ static u8 cap_pcr[] = {
- };
- 
- #define READ_PCR_RESULT_SIZE 30
--static u8 pcrread[] = {
-+static const u8 pcrread[] = {
- 	0, 193,			/* TPM_TAG_RQU_COMMAND */
- 	0, 0, 0, 14,		/* length */
- 	0, 0, 0, 21,		/* TPM_ORD_PcrRead */
- 	0, 0, 0, 0		/* PCR index */
- };
- 
--static ssize_t show_pcrs(struct device *dev, char *buf)
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
-+ssize_t tpm_show_pcrs(struct device *dev, struct device_attribute *attr,
-+		      char *buf)
-+#else
-+ssize_t tpm_show_pcrs(struct device *dev,
-+		      char *buf)
-+#endif
- {
- 	u8 data[READ_PCR_RESULT_SIZE];
- 	ssize_t len;
--	int i, j, index, num_pcrs;
-+	int i, j, num_pcrs;
-+	__be32 index;
- 	char *str = buf;
- 
--	struct tpm_chip *chip =
--	    pci_get_drvdata(container_of(dev, struct pci_dev, dev));
-+	struct tpm_chip *chip = dev_get_drvdata(dev);
- 	if (chip == NULL)
- 		return -ENODEV;
- 
- 	memcpy(data, cap_pcr, sizeof(cap_pcr));
- 	if ((len = tpm_transmit(chip, data, sizeof(data)))
--	    < CAP_PCR_RESULT_SIZE)
--		return len;
-+	    < CAP_PCR_RESULT_SIZE) {
-+		dev_dbg(chip->dev, "A TPM error (%d) occurred "
-+				"attempting to determine the number of PCRS\n",
-+			be32_to_cpu(*((__be32 *) (data + 6))));
-+		return 0;
-+	}
- 
--	num_pcrs = be32_to_cpu(*((__force __be32 *) (data + 14)));
-+	num_pcrs = be32_to_cpu(*((__be32 *) (data + 14)));
- 
- 	for (i = 0; i < num_pcrs; i++) {
- 		memcpy(data, pcrread, sizeof(pcrread));
- 		index = cpu_to_be32(i);
- 		memcpy(data + 10, &index, 4);
- 		if ((len = tpm_transmit(chip, data, sizeof(data)))
--		    < READ_PCR_RESULT_SIZE)
--			return len;
-+		    < READ_PCR_RESULT_SIZE){
-+			dev_dbg(chip->dev, "A TPM error (%d) occurred"
-+				" attempting to read PCR %d of %d\n",
-+				be32_to_cpu(*((__be32 *) (data + 6))), i, num_pcrs);
-+			goto out;
-+		}
- 		str += sprintf(str, "PCR-%02d: ", i);
- 		for (j = 0; j < TPM_DIGEST_SIZE; j++)
- 			str += sprintf(str, "%02X ", *(data + 10 + j));
- 		str += sprintf(str, "\n");
- 	}
-+out:
- 	return str - buf;
- }
--
--static DEVICE_ATTR(pcrs, S_IRUGO, show_pcrs, NULL);
-+EXPORT_SYMBOL_GPL(tpm_show_pcrs);
- 
- #define  READ_PUBEK_RESULT_SIZE 314
--static u8 readpubek[] = {
-+static const u8 readpubek[] = {
- 	0, 193,			/* TPM_TAG_RQU_COMMAND */
- 	0, 0, 0, 30,		/* length */
- 	0, 0, 0, 124,		/* TPM_ORD_ReadPubek */
- };
- 
--static ssize_t show_pubek(struct device *dev, char *buf)
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
-+ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
-+		       char *buf)
-+#else
-+ssize_t tpm_show_pubek(struct device *dev,
-+		       char *buf)
-+#endif
- {
--	u8 data[READ_PUBEK_RESULT_SIZE];
-+	u8 *data;
- 	ssize_t len;
--	__be32 *native_val;
--	int i;
-+	int i, rc;
- 	char *str = buf;
- 
--	struct tpm_chip *chip =
--	    pci_get_drvdata(container_of(dev, struct pci_dev, dev));
-+	struct tpm_chip *chip = dev_get_drvdata(dev);
- 	if (chip == NULL)
- 		return -ENODEV;
- 
-+	data = kmalloc(READ_PUBEK_RESULT_SIZE, GFP_KERNEL);
-+	if (!data)
-+		return -ENOMEM;
-+
- 	memcpy(data, readpubek, sizeof(readpubek));
- 	memset(data + sizeof(readpubek), 0, 20);	/* zero nonce */
- 
--	if ((len = tpm_transmit(chip, data, sizeof(data))) <
--	    READ_PUBEK_RESULT_SIZE)
--		return len;
-+	if ((len = tpm_transmit(chip, data, READ_PUBEK_RESULT_SIZE)) <
-+	    READ_PUBEK_RESULT_SIZE) {
-+		dev_dbg(chip->dev, "A TPM error (%d) occurred "
-+				"attempting to read the PUBEK\n",
-+			    be32_to_cpu(*((__be32 *) (data + 6))));
-+		rc = 0;
-+		goto out;
-+	}
- 
- 	/* 
- 	   ignore header 10 bytes
-@@ -286,8 +241,6 @@ static ssize_t show_pubek(struct device 
- 	   ignore checksum 20 bytes
- 	 */
- 
--	native_val = (__force __be32 *) (data + 34);
--
- 	str +=
- 	    sprintf(str,
- 		    "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
-@@ -298,21 +251,23 @@ static ssize_t show_pubek(struct device 
- 		    data[15], data[16], data[17], data[22], data[23],
- 		    data[24], data[25], data[26], data[27], data[28],
- 		    data[29], data[30], data[31], data[32], data[33],
--		    be32_to_cpu(*native_val)
--	    );
-+		    be32_to_cpu(*((__be32 *) (data + 34))));
- 
- 	for (i = 0; i < 256; i++) {
--		str += sprintf(str, "%02X ", data[i + 39]);
-+		str += sprintf(str, "%02X ", data[i + 38]);
- 		if ((i + 1) % 16 == 0)
- 			str += sprintf(str, "\n");
- 	}
--	return str - buf;
-+	rc = str - buf;
-+out:
-+	kfree(data);
-+	return rc;
- }
- 
--static DEVICE_ATTR(pubek, S_IRUGO, show_pubek, NULL);
-+EXPORT_SYMBOL_GPL(tpm_show_pubek);
- 
- #define CAP_VER_RESULT_SIZE 18
--static u8 cap_version[] = {
-+static const u8 cap_version[] = {
- 	0, 193,			/* TPM_TAG_RQU_COMMAND */
- 	0, 0, 0, 18,		/* length */
- 	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
-@@ -321,7 +276,7 @@ static u8 cap_version[] = {
- };
- 
- #define CAP_MANUFACTURER_RESULT_SIZE 18
--static u8 cap_manufacturer[] = {
-+static const u8 cap_manufacturer[] = {
- 	0, 193,			/* TPM_TAG_RQU_COMMAND */
- 	0, 0, 0, 22,		/* length */
- 	0, 0, 0, 101,		/* TPM_ORD_GetCapability */
-@@ -330,14 +285,19 @@ static u8 cap_manufacturer[] = {
- 	0, 0, 1, 3
- };
- 
--static ssize_t show_caps(struct device *dev, char *buf)
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
-+ssize_t tpm_show_caps(struct device *dev, struct device_attribute *attr,
-+		      char *buf)
-+#else
-+ssize_t tpm_show_caps(struct device *dev,
-+		      char *buf)
-+#endif
- {
--	u8 data[READ_PUBEK_RESULT_SIZE];
-+	u8 data[sizeof(cap_manufacturer)];
- 	ssize_t len;
- 	char *str = buf;
- 
--	struct tpm_chip *chip =
--	    pci_get_drvdata(container_of(dev, struct pci_dev, dev));
-+	struct tpm_chip *chip = dev_get_drvdata(dev);
- 	if (chip == NULL)
- 		return -ENODEV;
- 
-@@ -348,7 +308,7 @@ static ssize_t show_caps(struct device *
- 		return len;
- 
- 	str += sprintf(str, "Manufacturer: 0x%x\n",
--		       be32_to_cpu(*(data + 14)));
-+		       be32_to_cpu(*((__be32 *) (data + 14))));
- 
- 	memcpy(data, cap_version, sizeof(cap_version));
- 
-@@ -363,8 +323,25 @@ static ssize_t show_caps(struct device *
- 
- 	return str - buf;
- }
-+EXPORT_SYMBOL_GPL(tpm_show_caps);
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
-+ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
-+			const char *buf, size_t count)
-+#else
-+ssize_t tpm_store_cancel(struct device *dev,
-+			const char *buf, size_t count)
-+#endif
-+{
-+	struct tpm_chip *chip = dev_get_drvdata(dev);
-+	if (chip == NULL)
-+		return 0;
-+
-+	chip->vendor->cancel(chip);
-+	return count;
-+}
-+EXPORT_SYMBOL_GPL(tpm_store_cancel);
- 
--static DEVICE_ATTR(caps, S_IRUGO, show_caps, NULL);
- 
- /*
-  * Device file system interface to the TPM
-@@ -389,21 +366,21 @@ int tpm_open(struct inode *inode, struct
- 	}
- 
- 	if (chip->num_opens) {
--		dev_dbg(&chip->pci_dev->dev,
-+		dev_dbg(chip->dev,
- 			"Another process owns this TPM\n");
- 		rc = -EBUSY;
- 		goto err_out;
- 	}
- 
- 	chip->num_opens++;
--	pci_dev_get(chip->pci_dev);
-+	get_device(chip->dev);
- 
- 	spin_unlock(&driver_lock);
- 
--	chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
-+	chip->data_buffer = kmalloc(chip->vendor->buffersize * sizeof(u8), GFP_KERNEL);
- 	if (chip->data_buffer == NULL) {
- 		chip->num_opens--;
--		pci_dev_put(chip->pci_dev);
-+		put_device(chip->dev);
- 		return -ENOMEM;
- 	}
- 
-@@ -422,24 +399,15 @@ EXPORT_SYMBOL_GPL(tpm_open);
- int tpm_release(struct inode *inode, struct file *file)
- {
- 	struct tpm_chip *chip = file->private_data;
--	
--	file->private_data = NULL;
- 
- 	spin_lock(&driver_lock);
-+	file->private_data = NULL;
- 	chip->num_opens--;
--	spin_unlock(&driver_lock);
--
--	down(&chip->timer_manipulation_mutex);
--	if (timer_pending(&chip->user_read_timer))
--		del_singleshot_timer_sync(&chip->user_read_timer);
--	else if (timer_pending(&chip->device_timer))
--		del_singleshot_timer_sync(&chip->device_timer);
--	up(&chip->timer_manipulation_mutex);
--
--	kfree(chip->data_buffer);
-+	del_singleshot_timer_sync(&chip->user_read_timer);
- 	atomic_set(&chip->data_pending, 0);
--
--	pci_dev_put(chip->pci_dev);
-+	put_device(chip->dev);
-+	kfree(chip->data_buffer);
-+	spin_unlock(&driver_lock);
- 	return 0;
- }
- 
-@@ -453,15 +421,13 @@ ssize_t tpm_write(struct file * file, co
- 
- 	/* cannot perform a write until the read has cleared
- 	   either via tpm_read or a user_read_timer timeout */
--	while (atomic_read(&chip->data_pending) != 0) {
--		set_current_state(TASK_UNINTERRUPTIBLE);
--		schedule_timeout(TPM_TIMEOUT);
--	}
-+	while (atomic_read(&chip->data_pending) != 0)
-+		msleep(TPM_TIMEOUT);
- 
- 	down(&chip->buffer_mutex);
- 
--	if (in_size > TPM_BUFSIZE)
--		in_size = TPM_BUFSIZE;
-+	if (in_size > chip->vendor->buffersize)
-+		in_size = chip->vendor->buffersize;
- 
- 	if (copy_from_user
- 	    (chip->data_buffer, (void __user *) buf, in_size)) {
-@@ -470,19 +436,15 @@ ssize_t tpm_write(struct file * file, co
- 	}
- 
- 	/* atomic tpm command send and result receive */
--	out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
-+	out_size = tpm_transmit(chip, chip->data_buffer, 
-+	                        chip->vendor->buffersize);
- 
- 	atomic_set(&chip->data_pending, out_size);
-+	atomic_set(&chip->data_position, 0);
- 	up(&chip->buffer_mutex);
- 
- 	/* Set a timeout by which the reader must come claim the result */
--	down(&chip->timer_manipulation_mutex);
--	init_timer(&chip->user_read_timer);
--	chip->user_read_timer.function = user_reader_timeout;
--	chip->user_read_timer.data = (unsigned long) chip;
--	chip->user_read_timer.expires = jiffies + (60 * HZ);
--	add_timer(&chip->user_read_timer);
--	up(&chip->timer_manipulation_mutex);
-+	mod_timer(&chip->user_read_timer, jiffies + (60 * HZ));
- 
- 	return in_size;
- }
-@@ -493,43 +455,46 @@ ssize_t tpm_read(struct file * file, cha
- 		 size_t size, loff_t * off)
- {
- 	struct tpm_chip *chip = file->private_data;
--	int ret_size = -ENODATA;
--
--	if (atomic_read(&chip->data_pending) != 0) {	/* Result available */
--		down(&chip->timer_manipulation_mutex);
--		del_singleshot_timer_sync(&chip->user_read_timer);
--		up(&chip->timer_manipulation_mutex);
-+	int ret_size;
-+	int pos, pending = 0;
- 
--		down(&chip->buffer_mutex);
-+	ret_size = atomic_read(&chip->data_pending);
-+	if (ret_size > 0) {	/* relay data */
-+		if (size < ret_size)
-+			ret_size = size;
- 
--		ret_size = atomic_read(&chip->data_pending);
--		atomic_set(&chip->data_pending, 0);
-+		pos = atomic_read(&chip->data_position);
- 
--		if (ret_size == 0)	/* timeout just occurred */
--			ret_size = -ETIME;
--		else if (ret_size > 0) {	/* relay data */
--			if (size < ret_size)
--				ret_size = size;
--
--			if (copy_to_user((void __user *) buf,
--					 chip->data_buffer, ret_size)) {
--				ret_size = -EFAULT;
-+		down(&chip->buffer_mutex);
-+		if (copy_to_user
-+		    ((void __user *) buf, &chip->data_buffer[pos], ret_size)) {
-+			ret_size = -EFAULT;
-+		} else {
-+			pending = atomic_read(&chip->data_pending) - ret_size;
-+			if ( pending ) {
-+				atomic_set( &chip->data_pending, pending );
-+				atomic_set( &chip->data_position, pos+ret_size );
- 			}
- 		}
- 		up(&chip->buffer_mutex);
- 	}
-+	
-+	if ( ret_size <= 0 || pending == 0 ) {
-+		atomic_set( &chip->data_pending, 0 );
-+		del_singleshot_timer_sync(&chip->user_read_timer);
-+	}
- 
- 	return ret_size;
- }
- 
- EXPORT_SYMBOL_GPL(tpm_read);
- 
--void __devexit tpm_remove(struct pci_dev *pci_dev)
-+void tpm_remove_hardware(struct device *dev)
- {
--	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
-+	struct tpm_chip *chip = dev_get_drvdata(dev);
- 
- 	if (chip == NULL) {
--		dev_err(&pci_dev->dev, "No device data found\n");
-+		dev_err(dev, "No device data found\n");
- 		return;
- 	}
- 
-@@ -539,23 +504,20 @@ void __devexit tpm_remove(struct pci_dev
- 
- 	spin_unlock(&driver_lock);
- 
--	pci_set_drvdata(pci_dev, NULL);
-+	dev_set_drvdata(dev, NULL);
- 	misc_deregister(&chip->vendor->miscdev);
-+	kfree(chip->vendor->miscdev.name);
- 
--	device_remove_file(&pci_dev->dev, &dev_attr_pubek);
--	device_remove_file(&pci_dev->dev, &dev_attr_pcrs);
--	device_remove_file(&pci_dev->dev, &dev_attr_caps);
--
--	pci_disable_device(pci_dev);
-+	sysfs_remove_group(&dev->kobj, chip->vendor->attr_group);
- 
--	dev_mask[chip->dev_num / 32] &= !(1 << (chip->dev_num % 32));
-+	dev_mask[chip->dev_num / TPM_NUM_MASK_ENTRIES ] &= !(1 << (chip->dev_num % TPM_NUM_MASK_ENTRIES));
- 
- 	kfree(chip);
- 
--	pci_dev_put(pci_dev);
-+	put_device(dev);
- }
- 
--EXPORT_SYMBOL_GPL(tpm_remove);
-+EXPORT_SYMBOL_GPL(tpm_remove_hardware);
- 
- static u8 savestate[] = {
- 	0, 193,			/* TPM_TAG_RQU_COMMAND */
-@@ -590,10 +552,6 @@ int tpm_pm_resume(struct pci_dev *pci_de
- 	if (chip == NULL)
- 		return -ENODEV;
- 
--	spin_lock(&driver_lock);
--	tpm_lpc_bus_init(pci_dev, chip->vendor->base);
--	spin_unlock(&driver_lock);
--
- 	return 0;
- }
- 
-@@ -606,10 +564,12 @@ EXPORT_SYMBOL_GPL(tpm_pm_resume);
-  * upon errant exit from this function specific probe function should call
-  * pci_disable_device
-  */
--int tpm_register_hardware(struct pci_dev *pci_dev,
-+int tpm_register_hardware(struct device *dev,
- 			  struct tpm_vendor_specific *entry)
- {
--	char devname[7];
-+#define DEVNAME_SIZE 7
-+
-+	char *devname;
- 	struct tpm_chip *chip;
- 	int i, j;
- 
-@@ -622,24 +582,34 @@ int tpm_register_hardware(struct pci_dev
- 
- 	init_MUTEX(&chip->buffer_mutex);
- 	init_MUTEX(&chip->tpm_mutex);
--	init_MUTEX(&chip->timer_manipulation_mutex);
- 	INIT_LIST_HEAD(&chip->list);
- 
-+	init_timer(&chip->user_read_timer);
-+	chip->user_read_timer.function = user_reader_timeout;
-+	chip->user_read_timer.data = (unsigned long) chip;
-+
- 	chip->vendor = entry;
-+	
-+	if (entry->buffersize < TPM_MIN_BUFSIZE) {
-+		entry->buffersize = TPM_MIN_BUFSIZE;
-+	} else if (entry->buffersize > TPM_MAX_BUFSIZE) {
-+		entry->buffersize = TPM_MAX_BUFSIZE;
-+	}
- 
- 	chip->dev_num = -1;
- 
--	for (i = 0; i < 32; i++)
--		for (j = 0; j < 8; j++)
-+	for (i = 0; i < TPM_NUM_MASK_ENTRIES; i++)
-+		for (j = 0; j < 8 * sizeof(int); j++)
- 			if ((dev_mask[i] & (1 << j)) == 0) {
--				chip->dev_num = i * 32 + j;
-+				chip->dev_num =
-+				    i * TPM_NUM_MASK_ENTRIES + j;
- 				dev_mask[i] |= 1 << j;
- 				goto dev_num_search_complete;
- 			}
- 
- dev_num_search_complete:
- 	if (chip->dev_num < 0) {
--		dev_err(&pci_dev->dev,
-+		dev_err(dev,
- 			"No available tpm device numbers\n");
- 		kfree(chip);
- 		return -ENODEV;
-@@ -648,48 +618,38 @@ dev_num_search_complete:
- 	else
- 		chip->vendor->miscdev.minor = MISC_DYNAMIC_MINOR;
- 
--	snprintf(devname, sizeof(devname), "%s%d", "tpm", chip->dev_num);
-+	devname = kmalloc(DEVNAME_SIZE, GFP_KERNEL);
-+	scnprintf(devname, DEVNAME_SIZE, "%s%d", "tpm", chip->dev_num);
- 	chip->vendor->miscdev.name = devname;
- 
--	chip->vendor->miscdev.dev = &(pci_dev->dev);
--	chip->pci_dev = pci_dev_get(pci_dev);
-+	chip->vendor->miscdev.dev = dev;
-+	chip->dev = get_device(dev);
- 
- 	if (misc_register(&chip->vendor->miscdev)) {
--		dev_err(&chip->pci_dev->dev,
-+		dev_err(chip->dev,
- 			"unable to misc_register %s, minor %d\n",
- 			chip->vendor->miscdev.name,
- 			chip->vendor->miscdev.minor);
--		pci_dev_put(pci_dev);
-+		put_device(dev);
- 		kfree(chip);
- 		dev_mask[i] &= !(1 << j);
- 		return -ENODEV;
- 	}
- 
--	pci_set_drvdata(pci_dev, chip);
-+	spin_lock(&driver_lock);
- 
--	list_add(&chip->list, &tpm_chip_list);
-+	dev_set_drvdata(dev, chip);
- 
--	device_create_file(&pci_dev->dev, &dev_attr_pubek);
--	device_create_file(&pci_dev->dev, &dev_attr_pcrs);
--	device_create_file(&pci_dev->dev, &dev_attr_caps);
-+	list_add(&chip->list, &tpm_chip_list);
- 
--	return 0;
--}
-+	spin_unlock(&driver_lock);
- 
--EXPORT_SYMBOL_GPL(tpm_register_hardware);
-+	sysfs_create_group(&dev->kobj, chip->vendor->attr_group);
- 
--static int __init init_tpm(void)
--{
- 	return 0;
- }
- 
--static void __exit cleanup_tpm(void)
--{
--
--}
--
--module_init(init_tpm);
--module_exit(cleanup_tpm);
-+EXPORT_SYMBOL_GPL(tpm_register_hardware);
- 
- MODULE_AUTHOR("Leendert van Doorn (leendert at watson.ibm.com)");
- MODULE_DESCRIPTION("TPM Driver");
-diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/tpm.h linux-2.6.12-xen/drivers/char/tpm/tpm.h
---- pristine-linux-2.6.12/drivers/char/tpm/tpm.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/char/tpm/tpm.h	2006-02-16 23:44:08.000000000 +0100
-@@ -25,27 +25,56 @@
- #include <linux/fs.h>
- #include <linux/miscdevice.h>
- 
--#define TPM_TIMEOUT msecs_to_jiffies(5)
-+enum tpm_timeout {
-+	TPM_TIMEOUT = 5,	/* msecs */
-+};
- 
- /* TPM addresses */
--#define	TPM_ADDR			0x4E
--#define	TPM_DATA			0x4F
-+enum tpm_addr {
-+	TPM_SUPERIO_ADDR = 0x2E,
-+	TPM_ADDR = 0x4E,
-+};
-+
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13)
-+extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
-+				char *);
-+extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
-+				char *);
-+extern ssize_t tpm_show_caps(struct device *, struct device_attribute *attr,
-+				char *);
-+extern ssize_t tpm_store_cancel(struct device *, struct device_attribute *attr,
-+				const char *, size_t);
-+#else
-+extern ssize_t tpm_show_pubek(struct device *,
-+				char *);
-+extern ssize_t tpm_show_pcrs(struct device *,
-+				char *);
-+extern ssize_t tpm_show_caps(struct device *,
-+				char *);
-+extern ssize_t tpm_store_cancel(struct device *,
-+				const char *, size_t);
-+#endif
- 
- struct tpm_chip;
- 
- struct tpm_vendor_specific {
- 	u8 req_complete_mask;
- 	u8 req_complete_val;
-+	u8 req_canceled;
- 	u16 base;		/* TPM base address */
-+	int drv_type;
-+	u32 buffersize;
- 
- 	int (*recv) (struct tpm_chip *, u8 *, size_t);
- 	int (*send) (struct tpm_chip *, u8 *, size_t);
- 	void (*cancel) (struct tpm_chip *);
-+	u8 (*status) (struct tpm_chip *);
- 	struct miscdevice miscdev;
-+	struct attribute_group *attr_group;
- };
- 
- struct tpm_chip {
--	struct pci_dev *pci_dev;	/* PCI device stuff */
-+	struct device *dev;	/* Device stuff */
- 
- 	int dev_num;		/* /dev/tpm# */
- 	int num_opens;		/* only one allowed */
-@@ -54,40 +83,36 @@ struct tpm_chip {
- 	/* Data passed to and from the tpm via the read/write calls */
- 	u8 *data_buffer;
- 	atomic_t data_pending;
-+	atomic_t data_position;
- 	struct semaphore buffer_mutex;
- 
- 	struct timer_list user_read_timer;	/* user needs to claim result */
- 	struct semaphore tpm_mutex;	/* tpm is processing */
--	struct timer_list device_timer;	/* tpm is processing */
--	struct semaphore timer_manipulation_mutex;
- 
- 	struct tpm_vendor_specific *vendor;
- 
- 	struct list_head list;
- };
- 
--static inline int tpm_read_index(int index)
-+static inline int tpm_read_index(int base, int index)
- {
--	outb(index, TPM_ADDR);
--	return inb(TPM_DATA) & 0xFF;
-+	outb(index, base);
-+	return inb(base+1) & 0xFF;
- }
- 
--static inline void tpm_write_index(int index, int value)
-+static inline void tpm_write_index(int base, int index, int value)
- {
--	outb(index, TPM_ADDR);
--	outb(value & 0xFF, TPM_DATA);
-+	outb(index, base);
-+	outb(value & 0xFF, base+1);
- }
- 
--extern void tpm_time_expired(unsigned long);
--extern int tpm_lpc_bus_init(struct pci_dev *, u16);
--
--extern int tpm_register_hardware(struct pci_dev *,
-+extern int tpm_register_hardware(struct device *,
- 				 struct tpm_vendor_specific *);
- extern int tpm_open(struct inode *, struct file *);
- extern int tpm_release(struct inode *, struct file *);
- extern ssize_t tpm_write(struct file *, const char __user *, size_t,
- 			 loff_t *);
- extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
--extern void __devexit tpm_remove(struct pci_dev *);
-+extern void tpm_remove_hardware(struct device *);
- extern int tpm_pm_suspend(struct pci_dev *, pm_message_t);
- extern int tpm_pm_resume(struct pci_dev *);
-diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/tpm_nsc.c linux-2.6.12-xen/drivers/char/tpm/tpm_nsc.c
---- pristine-linux-2.6.12/drivers/char/tpm/tpm_nsc.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/char/tpm/tpm_nsc.c	2006-02-16 23:44:08.000000000 +0100
-@@ -22,43 +22,52 @@
- #include "tpm.h"
- 
- /* National definitions */
--#define	TPM_NSC_BASE			0x360
--#define	TPM_NSC_IRQ			0x07
-+enum tpm_nsc_addr{
-+	TPM_NSC_IRQ = 0x07,
-+	TPM_NSC_BASE0_HI = 0x60,
-+	TPM_NSC_BASE0_LO = 0x61,
-+	TPM_NSC_BASE1_HI = 0x62,
-+	TPM_NSC_BASE1_LO = 0x63
-+};
-+
-+enum tpm_nsc_index {
-+	NSC_LDN_INDEX = 0x07,
-+	NSC_SID_INDEX = 0x20,
-+	NSC_LDC_INDEX = 0x30,
-+	NSC_DIO_INDEX = 0x60,
-+	NSC_CIO_INDEX = 0x62,
-+	NSC_IRQ_INDEX = 0x70,
-+	NSC_ITS_INDEX = 0x71
-+};
- 
--#define	NSC_LDN_INDEX			0x07
--#define	NSC_SID_INDEX			0x20
--#define	NSC_LDC_INDEX			0x30
--#define	NSC_DIO_INDEX			0x60
--#define	NSC_CIO_INDEX			0x62
--#define	NSC_IRQ_INDEX			0x70
--#define	NSC_ITS_INDEX			0x71
--
--#define	NSC_STATUS			0x01
--#define	NSC_COMMAND			0x01
--#define	NSC_DATA			0x00
-+enum tpm_nsc_status_loc {
-+	NSC_STATUS = 0x01,
-+	NSC_COMMAND = 0x01,
-+	NSC_DATA = 0x00
-+};
- 
- /* status bits */
--#define	NSC_STATUS_OBF			0x01	/* output buffer full */
--#define	NSC_STATUS_IBF			0x02	/* input buffer full */
--#define	NSC_STATUS_F0			0x04	/* F0 */
--#define	NSC_STATUS_A2			0x08	/* A2 */
--#define	NSC_STATUS_RDY			0x10	/* ready to receive command */
--#define	NSC_STATUS_IBR			0x20	/* ready to receive data */
-+enum tpm_nsc_status {
-+	NSC_STATUS_OBF = 0x01,	/* output buffer full */
-+	NSC_STATUS_IBF = 0x02,	/* input buffer full */
-+	NSC_STATUS_F0 = 0x04,	/* F0 */
-+	NSC_STATUS_A2 = 0x08,	/* A2 */
-+	NSC_STATUS_RDY = 0x10,	/* ready to receive command */
-+	NSC_STATUS_IBR = 0x20	/* ready to receive data */
-+};
- 
- /* command bits */
--#define	NSC_COMMAND_NORMAL		0x01	/* normal mode */
--#define	NSC_COMMAND_EOC			0x03
--#define	NSC_COMMAND_CANCEL		0x22
--
-+enum tpm_nsc_cmd_mode {
-+	NSC_COMMAND_NORMAL = 0x01,	/* normal mode */
-+	NSC_COMMAND_EOC = 0x03,
-+	NSC_COMMAND_CANCEL = 0x22
-+};
- /*
-  * Wait for a certain status to appear
-  */
- static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
- {
--	int expired = 0;
--	struct timer_list status_timer =
--	    TIMER_INITIALIZER(tpm_time_expired, jiffies + 10 * HZ,
--			      (unsigned long) &expired);
-+	unsigned long stop;
- 
- 	/* status immediately available check */
- 	*data = inb(chip->vendor->base + NSC_STATUS);
-@@ -66,17 +75,14 @@ static int wait_for_stat(struct tpm_chip
- 		return 0;
- 
- 	/* wait for status */
--	add_timer(&status_timer);
-+	stop = jiffies + 10 * HZ;
- 	do {
--		set_current_state(TASK_UNINTERRUPTIBLE);
--		schedule_timeout(TPM_TIMEOUT);
-+		msleep(TPM_TIMEOUT);
- 		*data = inb(chip->vendor->base + 1);
--		if ((*data & mask) == val) {
--			del_singleshot_timer_sync(&status_timer);
-+		if ((*data & mask) == val)
- 			return 0;
--		}
- 	}
--	while (!expired);
-+	while (time_before(jiffies, stop));
- 
- 	return -EBUSY;
- }
-@@ -84,10 +90,7 @@ static int wait_for_stat(struct tpm_chip
- static int nsc_wait_for_ready(struct tpm_chip *chip)
- {
- 	int status;
--	int expired = 0;
--	struct timer_list status_timer =
--	    TIMER_INITIALIZER(tpm_time_expired, jiffies + 100,
--			      (unsigned long) &expired);
-+	unsigned long stop;
- 
- 	/* status immediately available check */
- 	status = inb(chip->vendor->base + NSC_STATUS);
-@@ -97,21 +100,18 @@ static int nsc_wait_for_ready(struct tpm
- 		return 0;
- 
- 	/* wait for status */
--	add_timer(&status_timer);
-+	stop = jiffies + 100;
- 	do {
--		set_current_state(TASK_UNINTERRUPTIBLE);
--		schedule_timeout(TPM_TIMEOUT);
-+		msleep(TPM_TIMEOUT);
- 		status = inb(chip->vendor->base + NSC_STATUS);
- 		if (status & NSC_STATUS_OBF)
- 			status = inb(chip->vendor->base + NSC_DATA);
--		if (status & NSC_STATUS_RDY) {
--			del_singleshot_timer_sync(&status_timer);
-+		if (status & NSC_STATUS_RDY)
- 			return 0;
--		}
- 	}
--	while (!expired);
-+	while (time_before(jiffies, stop));
- 
--	dev_info(&chip->pci_dev->dev, "wait for ready failed\n");
-+	dev_info(chip->dev, "wait for ready failed\n");
- 	return -EBUSY;
- }
- 
-@@ -127,12 +127,12 @@ static int tpm_nsc_recv(struct tpm_chip 
- 		return -EIO;
- 
- 	if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
--		dev_err(&chip->pci_dev->dev, "F0 timeout\n");
-+		dev_err(chip->dev, "F0 timeout\n");
- 		return -EIO;
- 	}
- 	if ((data =
- 	     inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
--		dev_err(&chip->pci_dev->dev, "not in normal mode (0x%x)\n",
-+		dev_err(chip->dev, "not in normal mode (0x%x)\n",
- 			data);
- 		return -EIO;
- 	}
-@@ -141,7 +141,7 @@ static int tpm_nsc_recv(struct tpm_chip 
- 	for (p = buffer; p < &buffer[count]; p++) {
- 		if (wait_for_stat
- 		    (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
--			dev_err(&chip->pci_dev->dev,
-+			dev_err(chip->dev,
- 				"OBF timeout (while reading data)\n");
- 			return -EIO;
- 		}
-@@ -150,12 +150,13 @@ static int tpm_nsc_recv(struct tpm_chip 
- 		*p = inb(chip->vendor->base + NSC_DATA);
- 	}
- 
--	if ((data & NSC_STATUS_F0) == 0) {
--		dev_err(&chip->pci_dev->dev, "F0 not set\n");
-+	if ((data & NSC_STATUS_F0) == 0 &&
-+	(wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
-+		dev_err(chip->dev, "F0 not set\n");
- 		return -EIO;
- 	}
- 	if ((data = inb(chip->vendor->base + NSC_DATA)) != NSC_COMMAND_EOC) {
--		dev_err(&chip->pci_dev->dev,
-+		dev_err(chip->dev,
- 			"expected end of command(0x%x)\n", data);
- 		return -EIO;
- 	}
-@@ -186,19 +187,19 @@ static int tpm_nsc_send(struct tpm_chip 
- 		return -EIO;
- 
- 	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
--		dev_err(&chip->pci_dev->dev, "IBF timeout\n");
-+		dev_err(chip->dev, "IBF timeout\n");
- 		return -EIO;
- 	}
- 
- 	outb(NSC_COMMAND_NORMAL, chip->vendor->base + NSC_COMMAND);
- 	if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
--		dev_err(&chip->pci_dev->dev, "IBR timeout\n");
-+		dev_err(chip->dev, "IBR timeout\n");
- 		return -EIO;
- 	}
- 
- 	for (i = 0; i < count; i++) {
- 		if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
--			dev_err(&chip->pci_dev->dev,
-+			dev_err(chip->dev,
- 				"IBF timeout (while writing data)\n");
- 			return -EIO;
- 		}
-@@ -206,7 +207,7 @@ static int tpm_nsc_send(struct tpm_chip 
- 	}
- 
- 	if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
--		dev_err(&chip->pci_dev->dev, "IBF timeout\n");
-+		dev_err(chip->dev, "IBF timeout\n");
- 		return -EIO;
- 	}
- 	outb(NSC_COMMAND_EOC, chip->vendor->base + NSC_COMMAND);
-@@ -219,6 +220,11 @@ static void tpm_nsc_cancel(struct tpm_ch
- 	outb(NSC_COMMAND_CANCEL, chip->vendor->base + NSC_COMMAND);
- }
- 
-+static u8 tpm_nsc_status(struct tpm_chip *chip)
-+{
-+	return inb(chip->vendor->base + NSC_STATUS);
-+}
-+
- static struct file_operations nsc_ops = {
- 	.owner = THIS_MODULE,
- 	.llseek = no_llseek,
-@@ -228,102 +234,98 @@ static struct file_operations nsc_ops = 
- 	.release = tpm_release,
- };
- 
-+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-+static DEVICE_ATTR(cancel, S_IWUSR|S_IWGRP, NULL, tpm_store_cancel);
-+
-+static struct attribute * nsc_attrs[] = {
-+	&dev_attr_pubek.attr,
-+	&dev_attr_pcrs.attr,
-+	&dev_attr_caps.attr,
-+	&dev_attr_cancel.attr,
-+	0,
-+};
-+
-+static struct attribute_group nsc_attr_grp = { .attrs = nsc_attrs };
-+
- static struct tpm_vendor_specific tpm_nsc = {
- 	.recv = tpm_nsc_recv,
- 	.send = tpm_nsc_send,
- 	.cancel = tpm_nsc_cancel,
-+	.status = tpm_nsc_status,
- 	.req_complete_mask = NSC_STATUS_OBF,
- 	.req_complete_val = NSC_STATUS_OBF,
--	.base = TPM_NSC_BASE,
-+	.req_canceled = NSC_STATUS_RDY,
-+	.attr_group = &nsc_attr_grp,
- 	.miscdev = { .fops = &nsc_ops, },
--	
- };
- 
- static int __devinit tpm_nsc_init(struct pci_dev *pci_dev,
- 				  const struct pci_device_id *pci_id)
- {
- 	int rc = 0;
-+	int lo, hi;
-+	int nscAddrBase = TPM_ADDR;
-+
- 
- 	if (pci_enable_device(pci_dev))
- 		return -EIO;
- 
--	if (tpm_lpc_bus_init(pci_dev, TPM_NSC_BASE)) {
--		rc = -ENODEV;
--		goto out_err;
--	}
-+	/* select PM channel 1 */
-+	tpm_write_index(nscAddrBase,NSC_LDN_INDEX, 0x12);
- 
- 	/* verify that it is a National part (SID) */
--	if (tpm_read_index(NSC_SID_INDEX) != 0xEF) {
--		rc = -ENODEV;
--		goto out_err;
-+	if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
-+		nscAddrBase = (tpm_read_index(TPM_SUPERIO_ADDR, 0x2C)<<8)|
-+			(tpm_read_index(TPM_SUPERIO_ADDR, 0x2B)&0xFE);
-+		if (tpm_read_index(nscAddrBase, NSC_SID_INDEX) != 0xF6) {
-+			rc = -ENODEV;
-+			goto out_err;
-+		}
- 	}
- 
-+	hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
-+	lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
-+	tpm_nsc.base = (hi<<8) | lo;
-+
- 	dev_dbg(&pci_dev->dev, "NSC TPM detected\n");
- 	dev_dbg(&pci_dev->dev,
- 		"NSC LDN 0x%x, SID 0x%x, SRID 0x%x\n",
--		tpm_read_index(0x07), tpm_read_index(0x20),
--		tpm_read_index(0x27));
-+		tpm_read_index(nscAddrBase,0x07), tpm_read_index(nscAddrBase,0x20),
-+		tpm_read_index(nscAddrBase,0x27));
- 	dev_dbg(&pci_dev->dev,
- 		"NSC SIOCF1 0x%x SIOCF5 0x%x SIOCF6 0x%x SIOCF8 0x%x\n",
--		tpm_read_index(0x21), tpm_read_index(0x25),
--		tpm_read_index(0x26), tpm_read_index(0x28));
-+		tpm_read_index(nscAddrBase,0x21), tpm_read_index(nscAddrBase,0x25),
-+		tpm_read_index(nscAddrBase,0x26), tpm_read_index(nscAddrBase,0x28));
- 	dev_dbg(&pci_dev->dev, "NSC IO Base0 0x%x\n",
--		(tpm_read_index(0x60) << 8) | tpm_read_index(0x61));
-+		(tpm_read_index(nscAddrBase,0x60) << 8) | tpm_read_index(nscAddrBase,0x61));
- 	dev_dbg(&pci_dev->dev, "NSC IO Base1 0x%x\n",
--		(tpm_read_index(0x62) << 8) | tpm_read_index(0x63));
-+		(tpm_read_index(nscAddrBase,0x62) << 8) | tpm_read_index(nscAddrBase,0x63));
- 	dev_dbg(&pci_dev->dev, "NSC Interrupt number and wakeup 0x%x\n",
--		tpm_read_index(0x70));
-+		tpm_read_index(nscAddrBase,0x70));
- 	dev_dbg(&pci_dev->dev, "NSC IRQ type select 0x%x\n",
--		tpm_read_index(0x71));
-+		tpm_read_index(nscAddrBase,0x71));
- 	dev_dbg(&pci_dev->dev,
- 		"NSC DMA channel select0 0x%x, select1 0x%x\n",
--		tpm_read_index(0x74), tpm_read_index(0x75));
-+		tpm_read_index(nscAddrBase,0x74), tpm_read_index(nscAddrBase,0x75));
- 	dev_dbg(&pci_dev->dev,
- 		"NSC Config "
- 		"0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
--		tpm_read_index(0xF0), tpm_read_index(0xF1),
--		tpm_read_index(0xF2), tpm_read_index(0xF3),
--		tpm_read_index(0xF4), tpm_read_index(0xF5),
--		tpm_read_index(0xF6), tpm_read_index(0xF7),
--		tpm_read_index(0xF8), tpm_read_index(0xF9));
-+		tpm_read_index(nscAddrBase,0xF0), tpm_read_index(nscAddrBase,0xF1),
-+		tpm_read_index(nscAddrBase,0xF2), tpm_read_index(nscAddrBase,0xF3),
-+		tpm_read_index(nscAddrBase,0xF4), tpm_read_index(nscAddrBase,0xF5),
-+		tpm_read_index(nscAddrBase,0xF6), tpm_read_index(nscAddrBase,0xF7),
-+		tpm_read_index(nscAddrBase,0xF8), tpm_read_index(nscAddrBase,0xF9));
- 
- 	dev_info(&pci_dev->dev,
--		 "NSC PC21100 TPM revision %d\n",
--		 tpm_read_index(0x27) & 0x1F);
--
--	if (tpm_read_index(NSC_LDC_INDEX) == 0)
--		dev_info(&pci_dev->dev, ": NSC TPM not active\n");
--
--	/* select PM channel 1 */
--	tpm_write_index(NSC_LDN_INDEX, 0x12);
--	tpm_read_index(NSC_LDN_INDEX);
--
--	/* disable the DPM module */
--	tpm_write_index(NSC_LDC_INDEX, 0);
--	tpm_read_index(NSC_LDC_INDEX);
--
--	/* set the data register base addresses */
--	tpm_write_index(NSC_DIO_INDEX, TPM_NSC_BASE >> 8);
--	tpm_write_index(NSC_DIO_INDEX + 1, TPM_NSC_BASE);
--	tpm_read_index(NSC_DIO_INDEX);
--	tpm_read_index(NSC_DIO_INDEX + 1);
--
--	/* set the command register base addresses */
--	tpm_write_index(NSC_CIO_INDEX, (TPM_NSC_BASE + 1) >> 8);
--	tpm_write_index(NSC_CIO_INDEX + 1, (TPM_NSC_BASE + 1));
--	tpm_read_index(NSC_DIO_INDEX);
--	tpm_read_index(NSC_DIO_INDEX + 1);
--
--	/* set the interrupt number to be used for the host interface */
--	tpm_write_index(NSC_IRQ_INDEX, TPM_NSC_IRQ);
--	tpm_write_index(NSC_ITS_INDEX, 0x00);
--	tpm_read_index(NSC_IRQ_INDEX);
-+		 "NSC TPM revision %d\n",
-+		 tpm_read_index(nscAddrBase, 0x27) & 0x1F);
- 
- 	/* enable the DPM module */
--	tpm_write_index(NSC_LDC_INDEX, 0x01);
--	tpm_read_index(NSC_LDC_INDEX);
-+	tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01);
- 
--	if ((rc = tpm_register_hardware(pci_dev, &tpm_nsc)) < 0)
-+	if ((rc = tpm_register_hardware(&pci_dev->dev, &tpm_nsc)) < 0)
- 		goto out_err;
- 
- 	return 0;
-@@ -333,12 +335,23 @@ out_err:
- 	return rc;
- }
- 
-+static void __devexit tpm_nsc_remove(struct pci_dev *pci_dev) 
-+{
-+	struct tpm_chip *chip = pci_get_drvdata(pci_dev);
-+
-+	if ( chip )
-+		tpm_remove_hardware(chip->dev);
-+}
-+
- static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
- 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0)},
- 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12)},
- 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
- 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
- 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
-+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0)},
-+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1)},
-+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0)},
- 	{PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)},
- 	{0,}
- };
-@@ -349,7 +362,7 @@ static struct pci_driver nsc_pci_driver 
- 	.name = "tpm_nsc",
- 	.id_table = tpm_pci_tbl,
- 	.probe = tpm_nsc_init,
--	.remove = __devexit_p(tpm_remove),
-+	.remove = __devexit_p(tpm_nsc_remove),
- 	.suspend = tpm_pm_suspend,
- 	.resume = tpm_pm_resume,
- };
-@@ -364,7 +377,7 @@ static void __exit cleanup_nsc(void)
- 	pci_unregister_driver(&nsc_pci_driver);
- }
- 
--module_init(init_nsc);
-+fs_initcall(init_nsc);
- module_exit(cleanup_nsc);
- 
- MODULE_AUTHOR("Leendert van Doorn (leendert at watson.ibm.com)");
-diff -Nurp pristine-linux-2.6.12/drivers/char/tpm/tpm_xen.c linux-2.6.12-xen/drivers/char/tpm/tpm_xen.c
---- pristine-linux-2.6.12/drivers/char/tpm/tpm_xen.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/char/tpm/tpm_xen.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,525 @@
-+/*
-+ * Copyright (C) 2004 IBM Corporation
-+ *
-+ * Authors:
-+ * Leendert van Doorn <leendert at watson.ibm.com>
-+ * Dave Safford <safford at watson.ibm.com>
-+ * Reiner Sailer <sailer at watson.ibm.com>
-+ * Kylene Hall <kjhall at us.ibm.com>
-+ * Stefan Berger <stefanb at us.ibm.com>
-+ *
-+ * Maintained by: <tpmdd_devel at lists.sourceforge.net>
-+ *
-+ * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
-+ * Specifications at www.trustedcomputinggroup.org
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License as
-+ * published by the Free Software Foundation, version 2 of the
-+ * License.
-+ *
-+ */
-+
-+#include <asm/uaccess.h>
-+#include <linux/list.h>
-+#include <asm-xen/tpmfe.h>
-+#include <linux/device.h>
-+#include <linux/interrupt.h>
-+#include "tpm.h"
-+
-+/* read status bits */
-+enum {
-+	STATUS_BUSY = 0x01,
-+	STATUS_DATA_AVAIL = 0x02,
-+	STATUS_READY = 0x04
-+};
-+
-+#define MIN(x,y)  ((x) < (y)) ? (x) : (y)
-+
-+struct transmission {
-+	struct list_head next;
-+	unsigned char *request;
-+	unsigned int request_len;
-+	unsigned char *rcv_buffer;
-+	unsigned int  buffersize;
-+	struct tpm_chip     *chip;
-+	unsigned int flags;
-+};
-+
-+enum {
-+	TRANSMISSION_FLAG_WAS_QUEUED = 0x1
-+};
-+
-+struct data_exchange {
-+	struct transmission *current_request;
-+	spinlock_t           req_list_lock;
-+	wait_queue_head_t    req_wait_queue;
-+
-+	struct list_head     queued_requests;
-+
-+	struct transmission *current_response;
-+	spinlock_t           resp_list_lock;
-+	wait_queue_head_t    resp_wait_queue;     // processes waiting for responses
-+
-+	struct transmission *req_cancelled;       // if a cancellation was encounterd
-+
-+	unsigned int         fe_status;
-+	unsigned int         flags;
-+};
-+
-+enum {
-+	DATAEX_FLAG_QUEUED_ONLY = 0x1
-+};
-+
-+static struct data_exchange dataex;
-+
-+static unsigned long disconnect_time;
-+
-+/* local function prototypes */
-+static void __exit cleanup_xen(void);
-+
-+
-+/* =============================================================
-+ * Some utility functions
-+ * =============================================================
-+ */
-+static inline struct transmission *
-+transmission_alloc(void)
-+{
-+	struct transmission *t = kmalloc(sizeof(*t), GFP_KERNEL);
-+	if (t) {
-+		memset(t, 0x0, sizeof(*t));
-+	}
-+	return t;
-+}
-+
-+static inline unsigned char *
-+transmission_set_buffer(struct transmission *t,
-+                        unsigned char *buffer, unsigned int len)
-+{
-+	kfree(t->request);
-+	t->request = kmalloc(len, GFP_KERNEL);
-+	if (t->request) {
-+		memcpy(t->request,
-+		       buffer,
-+		       len);
-+		t->request_len = len;
-+	}
-+	return t->request;
-+}
-+
-+static inline void
-+transmission_free(struct transmission *t)
-+{
-+	kfree(t->request);
-+	kfree(t->rcv_buffer);
-+	kfree(t);
-+}
-+
-+/* =============================================================
-+ * Interface with the TPM shared memory driver for XEN
-+ * =============================================================
-+ */
-+static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
-+{
-+	int ret_size = 0;
-+	struct transmission *t, *temp;
-+
-+	/*
-+	 * The list with requests must contain one request
-+	 * only and the element there must be the one that
-+	 * was passed to me from the front-end.
-+	 */
-+	if (dataex.current_request != ptr) {
-+		printk("WARNING: The request pointer is different than the pointer "
-+		       "the shared memory driver returned to me. %p != %p\n",
-+		       dataex.current_request, ptr);
-+	}
-+
-+	/*
-+	 * If the request has been cancelled, just quit here
-+	 */
-+	if (dataex.req_cancelled == (struct transmission *)ptr) {
-+		if (dataex.current_request == dataex.req_cancelled) {
-+			dataex.current_request = NULL;
-+		}
-+		transmission_free(dataex.req_cancelled);
-+		dataex.req_cancelled = NULL;
-+		return 0;
-+	}
-+
-+	if (NULL != (temp = dataex.current_request)) {
-+		transmission_free(temp);
-+		dataex.current_request = NULL;
-+	}
-+
-+	t = transmission_alloc();
-+	if (NULL != t) {
-+		unsigned long flags;
-+		t->rcv_buffer = kmalloc(count, GFP_KERNEL);
-+		if (NULL == t->rcv_buffer) {
-+			transmission_free(t);
-+			return -ENOMEM;
-+		}
-+		t->buffersize = count;
-+		memcpy(t->rcv_buffer, buffer, count);
-+		ret_size = count;
-+
-+		spin_lock_irqsave(&dataex.resp_list_lock ,flags);
-+		dataex.current_response = t;
-+		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-+		wake_up_interruptible(&dataex.resp_wait_queue);
-+	}
-+	return ret_size;
-+}
-+
-+
-+static void tpm_fe_status(unsigned int flags)
-+{
-+	dataex.fe_status = flags;
-+	if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
-+		disconnect_time = jiffies;
-+	}
-+}
-+
-+/* =============================================================
-+ * Interface with the generic TPM driver
-+ * =============================================================
-+ */
-+static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
-+{
-+	unsigned long flags;
-+	int rc = 0;
-+
-+	spin_lock_irqsave(&dataex.resp_list_lock, flags);
-+	/*
-+	 * Check if the previous operation only queued the command
-+	 * In this case there won't be a response, so I just
-+	 * return from here and reset that flag. In any other
-+	 * case I should receive a response from the back-end.
-+	 */
-+	if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
-+		dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
-+		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-+		/*
-+		 * a little hack here. The first few measurements
-+		 * are queued since there's no way to talk to the
-+		 * TPM yet (due to slowness of the control channel)
-+		 * So we just make IMA happy by giving it 30 NULL
-+		 * bytes back where the most important part is
-+		 * that the result code is '0'.
-+		 */
-+
-+		count = MIN(count, 30);
-+		memset(buf, 0x0, count);
-+		return count;
-+	}
-+	/*
-+	 * Check whether something is in the responselist and if
-+	 * there's nothing in the list wait for something to appear.
-+	 */
-+
-+	if (NULL == dataex.current_response) {
-+		spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-+		interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
-+		                               1000);
-+		spin_lock_irqsave(&dataex.resp_list_lock ,flags);
-+	}
-+
-+	if (NULL != dataex.current_response) {
-+		struct transmission *t = dataex.current_response;
-+		dataex.current_response = NULL;
-+		rc = MIN(count, t->buffersize);
-+		memcpy(buf, t->rcv_buffer, rc);
-+		transmission_free(t);
-+	}
-+
-+	spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-+	return rc;
-+}
-+
-+static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
-+{
-+	/*
-+	 * We simply pass the packet onto the XEN shared
-+	 * memory driver.
-+	 */
-+	unsigned long flags;
-+	int rc;
-+	struct transmission *t = transmission_alloc();
-+
-+	spin_lock_irqsave(&dataex.req_list_lock, flags);
-+	/*
-+	 * If there's a current request, it must be the
-+	 * previous request that has timed out.
-+	 */
-+	if (dataex.current_request != NULL) {
-+		printk("WARNING: Sending although there is a request outstanding.\n"
-+		       "         Previous request must have timed out.\n");
-+		transmission_free(dataex.current_request);
-+		dataex.current_request = NULL;
-+	}
-+
-+	if (t != NULL) {
-+		unsigned int error = 0;
-+		t->rcv_buffer = NULL;
-+		t->buffersize = 0;
-+		t->chip = chip;
-+
-+		/*
-+		 * Queue the packet if the driver below is not
-+		 * ready, yet, or there is any packet already
-+		 * in the queue.
-+		 * If the driver below is ready, unqueue all
-+		 * packets first before sending our current
-+		 * packet.
-+		 * For each unqueued packet, except for the
-+		 * last (=current) packet, call the function
-+		 * tpm_xen_recv to wait for the response to come
-+		 * back.
-+		 */
-+		if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
-+			if (time_after(jiffies, disconnect_time + HZ * 10)) {
-+				rc = -ENOENT;
-+			} else {
-+				/*
-+				 * copy the request into the buffer
-+				 */
-+				if (transmission_set_buffer(t, buf, count)
-+				    == NULL) {
-+					transmission_free(t);
-+					rc = -ENOMEM;
-+					goto exit;
-+				}
-+				dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
-+				list_add_tail(&t->next, &dataex.queued_requests);
-+				rc = 0;
-+			}
-+		} else {
-+			/*
-+			 * Check whether there are any packets in the queue
-+			 */
-+			while (!list_empty(&dataex.queued_requests)) {
-+				/*
-+				 * Need to dequeue them.
-+				 * Read the result into a dummy buffer.
-+				 */
-+				unsigned char buffer[1];
-+				struct transmission *qt = (struct transmission *) dataex.queued_requests.next;
-+				list_del(&qt->next);
-+				dataex.current_request = qt;
-+				spin_unlock_irqrestore(&dataex.req_list_lock, flags);
-+
-+				rc = tpm_fe_send(qt->request,
-+				                 qt->request_len,
-+				                 qt);
-+
-+				if (rc < 0) {
-+					spin_lock_irqsave(&dataex.req_list_lock, flags);
-+					if ((qt = dataex.current_request) != NULL) {
-+						/*
-+						 * requeue it at the beginning
-+						 * of the list
-+						 */
-+						list_add(&qt->next,
-+						         &dataex.queued_requests);
-+					}
-+					dataex.current_request = NULL;
-+					error = 1;
-+					break;
-+				}
-+				/*
-+				 * After this point qt is not valid anymore!
-+				 * It is freed when the front-end is delivering the data
-+				 * by calling tpm_recv
-+				 */
-+
-+				/*
-+				 * Try to receive the response now into the provided dummy
-+				 * buffer (I don't really care about this response since
-+				 * there is no receiver anymore for this response)
-+				 */
-+				rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
-+
-+				spin_lock_irqsave(&dataex.req_list_lock, flags);
-+			}
-+
-+			if (error == 0) {
-+				/*
-+				 * Finally, send the current request.
-+				 */
-+				dataex.current_request = t;
-+				/*
-+				 * Call the shared memory driver
-+				 * Pass to it the buffer with the request, the
-+				 * amount of bytes in the request and
-+				 * a void * pointer (here: transmission structure)
-+				 */
-+				rc = tpm_fe_send(buf, count, t);
-+				/*
-+				 * The generic TPM driver will call
-+				 * the function to receive the response.
-+				 */
-+				if (rc < 0) {
-+					dataex.current_request = NULL;
-+					goto queue_it;
-+				}
-+			} else {
-+queue_it:
-+				if (transmission_set_buffer(t, buf, count) == NULL) {
-+					transmission_free(t);
-+					rc = -ENOMEM;
-+					goto exit;
-+				}
-+				/*
-+				 * An error occurred. Don't event try
-+				 * to send the current request. Just
-+				 * queue it.
-+				 */
-+				dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
-+				list_add_tail(&t->next, &dataex.queued_requests);
-+				rc = 0;
-+			}
-+		}
-+	} else {
-+		rc = -ENOMEM;
-+	}
-+
-+exit:
-+	spin_unlock_irqrestore(&dataex.req_list_lock, flags);
-+	return rc;
-+}
-+
-+static void tpm_xen_cancel(struct tpm_chip *chip)
-+{
-+	unsigned long flags;
-+	spin_lock_irqsave(&dataex.resp_list_lock,flags);
-+
-+	dataex.req_cancelled = dataex.current_request;
-+
-+	spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
-+}
-+
-+static u8 tpm_xen_status(struct tpm_chip *chip)
-+{
-+	unsigned long flags;
-+	u8 rc = 0;
-+	spin_lock_irqsave(&dataex.resp_list_lock, flags);
-+	/*
-+	 * Data are available if:
-+	 *  - there's a current response
-+	 *  - the last packet was queued only (this is fake, but necessary to
-+	 *      get the generic TPM layer to call the receive function.)
-+	 */
-+	if (NULL != dataex.current_response ||
-+	    0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
-+		rc = STATUS_DATA_AVAIL;
-+	}
-+	spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-+	return rc;
-+}
-+
-+static struct file_operations tpm_xen_ops = {
-+	.owner = THIS_MODULE,
-+	.llseek = no_llseek,
-+	.open = tpm_open,
-+	.read = tpm_read,
-+	.write = tpm_write,
-+	.release = tpm_release,
-+};
-+
-+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-+static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
-+
-+static struct attribute* xen_attrs[] = {
-+	&dev_attr_pubek.attr,
-+	&dev_attr_pcrs.attr,
-+	&dev_attr_caps.attr,
-+	&dev_attr_cancel.attr,
-+	NULL,
-+};
-+
-+static struct attribute_group xen_attr_grp = { .attrs = xen_attrs };
-+
-+static struct tpm_vendor_specific tpm_xen = {
-+	.recv = tpm_xen_recv,
-+	.send = tpm_xen_send,
-+	.cancel = tpm_xen_cancel,
-+	.status = tpm_xen_status,
-+	.req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
-+	.req_complete_val  = STATUS_DATA_AVAIL,
-+	.req_canceled = STATUS_READY,
-+	.base = 0,
-+	.attr_group = &xen_attr_grp,
-+	.miscdev.fops = &tpm_xen_ops,
-+	.buffersize = 64 * 1024,
-+};
-+
-+static struct device tpm_device = {
-+	.bus_id = "vtpm",
-+};
-+
-+static struct tpmfe_device tpmfe = {
-+	.receive = tpm_recv,
-+	.status  = tpm_fe_status,
-+};
-+
-+
-+static int __init init_xen(void)
-+{
-+	int rc;
-+
-+	/*
-+	 * Register device with the low lever front-end
-+	 * driver
-+	 */
-+	if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
-+		return rc;
-+	}
-+
-+	/*
-+	 * Register our device with the system.
-+	 */
-+	if ((rc = device_register(&tpm_device)) < 0) {
-+		tpm_fe_unregister_receiver();
-+		return rc;
-+	}
-+
-+	tpm_xen.buffersize = tpmfe.max_tx_size;
-+
-+	if ((rc = tpm_register_hardware(&tpm_device, &tpm_xen)) < 0) {
-+		device_unregister(&tpm_device);
-+		tpm_fe_unregister_receiver();
-+		return rc;
-+	}
-+
-+	dataex.current_request = NULL;
-+	spin_lock_init(&dataex.req_list_lock);
-+	init_waitqueue_head(&dataex.req_wait_queue);
-+	INIT_LIST_HEAD(&dataex.queued_requests);
-+
-+	dataex.current_response = NULL;
-+	spin_lock_init(&dataex.resp_list_lock);
-+	init_waitqueue_head(&dataex.resp_wait_queue);
-+
-+	disconnect_time = jiffies;
-+
-+	return 0;
-+}
-+
-+static void __exit cleanup_xen(void)
-+{
-+	tpm_remove_hardware(&tpm_device);
-+	device_unregister(&tpm_device);
-+	tpm_fe_unregister_receiver();
-+}
-+
-+fs_initcall(init_xen);
-+module_exit(cleanup_xen);
-+
-+MODULE_AUTHOR("Stefan Berger (stefanb at us.ibm.com)");
-+MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
-+MODULE_VERSION("1.0");
-+MODULE_LICENSE("GPL");
-diff -Nurp pristine-linux-2.6.12/drivers/char/tty_io.c linux-2.6.12-xen/drivers/char/tty_io.c
---- pristine-linux-2.6.12/drivers/char/tty_io.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/char/tty_io.c	2006-02-16 23:44:08.000000000 +0100
-@@ -131,6 +131,8 @@ LIST_HEAD(tty_drivers);			/* linked list
-    vt.c for deeply disgusting hack reasons */
- DECLARE_MUTEX(tty_sem);
- 
-+int console_use_vt = 1;
-+
- #ifdef CONFIG_UNIX98_PTYS
- extern struct tty_driver *ptm_driver;	/* Unix98 pty masters; for /dev/ptmx */
- extern int pty_limit;		/* Config limit on Unix98 ptys */
-@@ -1788,7 +1790,7 @@ retry_open:
- 		goto got_driver;
- 	}
- #ifdef CONFIG_VT
--	if (device == MKDEV(TTY_MAJOR,0)) {
-+	if (console_use_vt && (device == MKDEV(TTY_MAJOR,0))) {
- 		extern struct tty_driver *console_driver;
- 		driver = console_driver;
- 		index = fg_console;
-@@ -2966,14 +2968,19 @@ static int __init tty_init(void)
- #endif
- 
- #ifdef CONFIG_VT
--	cdev_init(&vc0_cdev, &console_fops);
--	if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
--	    register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0)
--		panic("Couldn't register /dev/tty0 driver\n");
--	devfs_mk_cdev(MKDEV(TTY_MAJOR, 0), S_IFCHR|S_IRUSR|S_IWUSR, "vc/0");
--	class_simple_device_add(tty_class, MKDEV(TTY_MAJOR, 0), NULL, "tty0");
-+	if (console_use_vt) {
-+		cdev_init(&vc0_cdev, &console_fops);
-+		if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) ||
-+		    register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1,
-+					   "/dev/vc/0") < 0)
-+			panic("Couldn't register /dev/tty0 driver\n");
-+		devfs_mk_cdev(MKDEV(TTY_MAJOR, 0), S_IFCHR|S_IRUSR|S_IWUSR,
-+			      "vc/0");
-+		class_simple_device_add(tty_class, MKDEV(TTY_MAJOR, 0), NULL,
-+					"tty0");
- 
--	vty_init();
-+		vty_init();
-+	}
- #endif
- 	return 0;
- }
-diff -Nurp pristine-linux-2.6.12/drivers/char/tty_ioctl.c linux-2.6.12-xen/drivers/char/tty_ioctl.c
---- pristine-linux-2.6.12/drivers/char/tty_ioctl.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/char/tty_ioctl.c	2006-02-25 00:12:30.032557887 +0100
-@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty,
- 			ld = tty_ldisc_ref(tty);
- 			switch (arg) {
- 			case TCIFLUSH:
--				if (ld->flush_buffer)
-+				if (ld && ld->flush_buffer)
- 					ld->flush_buffer(tty);
- 				break;
- 			case TCIOFLUSH:
--				if (ld->flush_buffer)
-+				if (ld && ld->flush_buffer)
- 					ld->flush_buffer(tty);
- 				/* fall through */
- 			case TCOFLUSH:
-diff -Nurp pristine-linux-2.6.12/drivers/Makefile linux-2.6.12-xen/drivers/Makefile
---- pristine-linux-2.6.12/drivers/Makefile	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -30,6 +30,7 @@ obj-y				+= base/ block/ misc/ net/ medi
- obj-$(CONFIG_NUBUS)		+= nubus/
- obj-$(CONFIG_ATM)		+= atm/
- obj-$(CONFIG_PPC_PMAC)		+= macintosh/
-+obj-$(CONFIG_ARCH_XEN)		+= xen/
- obj-$(CONFIG_IDE)		+= ide/
- obj-$(CONFIG_FC4)		+= fc4/
- obj-$(CONFIG_SCSI)		+= scsi/
-diff -Nurp pristine-linux-2.6.12/drivers/media/video/cx88/cx88-video.c linux-2.6.12-xen/drivers/media/video/cx88/cx88-video.c
---- pristine-linux-2.6.12/drivers/media/video/cx88/cx88-video.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/media/video/cx88/cx88-video.c	2006-02-25 00:12:30.034557586 +0100
-@@ -261,7 +261,7 @@ static struct cx88_ctrl cx8800_ctls[] = 
- 			.default_value = 0,
- 			.type          = V4L2_CTRL_TYPE_INTEGER,
- 		},
--		.off                   = 0,
-+		.off                   = 128,
- 		.reg                   = MO_HUE,
- 		.mask                  = 0x00ff,
- 		.shift                 = 0,
-diff -Nurp pristine-linux-2.6.12/drivers/net/e1000/e1000_main.c linux-2.6.12-xen/drivers/net/e1000/e1000_main.c
---- pristine-linux-2.6.12/drivers/net/e1000/e1000_main.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/net/e1000/e1000_main.c	2006-02-25 00:12:30.035557435 +0100
-@@ -2307,6 +2307,7 @@ e1000_xmit_frame(struct sk_buff *skb, st
- 	tso = e1000_tso(adapter, skb);
- 	if (tso < 0) {
- 		dev_kfree_skb_any(skb);
-+		spin_unlock_irqrestore(&adapter->tx_lock, flags);
- 		return NETDEV_TX_OK;
- 	}
- 
-diff -Nurp pristine-linux-2.6.12/drivers/net/hamradio/Kconfig linux-2.6.12-xen/drivers/net/hamradio/Kconfig
---- pristine-linux-2.6.12/drivers/net/hamradio/Kconfig	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/net/hamradio/Kconfig	2006-02-25 00:12:30.255524280 +0100
-@@ -17,7 +17,7 @@ config MKISS
- 
- config 6PACK
- 	tristate "Serial port 6PACK driver"
--	depends on AX25 && BROKEN_ON_SMP
-+	depends on AX25
- 	---help---
- 	  6pack is a transmission protocol for the data exchange between your
- 	  PC and your TNC (the Terminal Node Controller acts as a kind of
-diff -Nurp pristine-linux-2.6.12/drivers/net/shaper.c linux-2.6.12-xen/drivers/net/shaper.c
---- pristine-linux-2.6.12/drivers/net/shaper.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/net/shaper.c	2006-02-25 00:12:31.528332432 +0100
-@@ -135,10 +135,8 @@ static int shaper_start_xmit(struct sk_b
- {
- 	struct shaper *shaper = dev->priv;
-  	struct sk_buff *ptr;
--   
--	if (down_trylock(&shaper->sem))
--		return -1;
- 
-+	spin_lock(&shaper->lock);
-  	ptr=shaper->sendq.prev;
-  	
-  	/*
-@@ -232,7 +230,7 @@ static int shaper_start_xmit(struct sk_b
-                 shaper->stats.collisions++;
-  	}
- 	shaper_kick(shaper);
--	up(&shaper->sem);
-+	spin_unlock(&shaper->lock);
-  	return 0;
- }
- 
-@@ -271,11 +269,9 @@ static void shaper_timer(unsigned long d
- {
- 	struct shaper *shaper = (struct shaper *)data;
- 
--	if (!down_trylock(&shaper->sem)) {
--		shaper_kick(shaper);
--		up(&shaper->sem);
--	} else
--		mod_timer(&shaper->timer, jiffies);
-+	spin_lock(&shaper->lock);
-+	shaper_kick(shaper);
-+	spin_unlock(&shaper->lock);
- }
- 
- /*
-@@ -332,21 +328,6 @@ static void shaper_kick(struct shaper *s
- 
- 
- /*
-- *	Flush the shaper queues on a closedown
-- */
-- 
--static void shaper_flush(struct shaper *shaper)
--{
--	struct sk_buff *skb;
--
--	down(&shaper->sem);
--	while((skb=skb_dequeue(&shaper->sendq))!=NULL)
--		dev_kfree_skb(skb);
--	shaper_kick(shaper);
--	up(&shaper->sem);
--}
--
--/*
-  *	Bring the interface up. We just disallow this until a 
-  *	bind.
-  */
-@@ -375,7 +356,15 @@ static int shaper_open(struct net_device
- static int shaper_close(struct net_device *dev)
- {
- 	struct shaper *shaper=dev->priv;
--	shaper_flush(shaper);
-+	struct sk_buff *skb;
-+
-+	while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
-+		dev_kfree_skb(skb);
-+
-+	spin_lock_bh(&shaper->lock);
-+	shaper_kick(shaper);
-+	spin_unlock_bh(&shaper->lock);
-+
- 	del_timer_sync(&shaper->timer);
- 	return 0;
- }
-@@ -576,6 +565,7 @@ static void shaper_init_priv(struct net_
- 	init_timer(&sh->timer);
- 	sh->timer.function=shaper_timer;
- 	sh->timer.data=(unsigned long)sh;
-+	spin_lock_init(&sh->lock);
- }
- 
- /*
-diff -Nurp pristine-linux-2.6.12/drivers/pci/pci-driver.c linux-2.6.12-xen/drivers/pci/pci-driver.c
---- pristine-linux-2.6.12/drivers/pci/pci-driver.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/pci/pci-driver.c	2006-02-25 00:12:32.328211868 +0100
-@@ -396,7 +396,7 @@ int pci_register_driver(struct pci_drive
- 	/* FIXME, once all of the existing PCI drivers have been fixed to set
- 	 * the pci shutdown function, this test can go away. */
- 	if (!drv->driver.shutdown)
--		drv->driver.shutdown = pci_device_shutdown,
-+		drv->driver.shutdown = pci_device_shutdown;
- 	drv->driver.owner = drv->owner;
- 	drv->driver.kobj.ktype = &pci_driver_kobj_type;
- 	pci_init_dynids(&drv->dynids);
-diff -Nurp pristine-linux-2.6.12/drivers/scsi/qla2xxx/qla_init.c linux-2.6.12-xen/drivers/scsi/qla2xxx/qla_init.c
---- pristine-linux-2.6.12/drivers/scsi/qla2xxx/qla_init.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/scsi/qla2xxx/qla_init.c	2006-02-25 00:12:33.372054530 +0100
-@@ -1914,9 +1914,11 @@ qla2x00_reg_remote_port(scsi_qla_host_t 
- 		rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
- 
- 	fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
--	if (!rport)
-+	if (!rport) {
- 		qla_printk(KERN_WARNING, ha,
- 		    "Unable to allocate fc remote port!\n");
-+		return;
-+	}
- 
- 	if (rport->scsi_target_id != -1 && rport->scsi_target_id < MAX_TARGETS)
- 		fcport->os_target_id = rport->scsi_target_id;
-diff -Nurp pristine-linux-2.6.12/drivers/scsi/qla2xxx/qla_os.c linux-2.6.12-xen/drivers/scsi/qla2xxx/qla_os.c
---- pristine-linux-2.6.12/drivers/scsi/qla2xxx/qla_os.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/scsi/qla2xxx/qla_os.c	2006-02-25 00:12:33.374054228 +0100
-@@ -1150,7 +1150,7 @@ iospace_error_exit:
-  */
- int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
- {
--	int	ret;
-+	int	ret = -ENODEV;
- 	device_reg_t __iomem *reg;
- 	struct Scsi_Host *host;
- 	scsi_qla_host_t *ha;
-@@ -1161,7 +1161,7 @@ int qla2x00_probe_one(struct pci_dev *pd
- 	fc_port_t *fcport;
- 
- 	if (pci_enable_device(pdev))
--		return -1;
-+		goto probe_out;
- 
- 	host = scsi_host_alloc(&qla2x00_driver_template,
- 	    sizeof(scsi_qla_host_t));
-@@ -1183,9 +1183,8 @@ int qla2x00_probe_one(struct pci_dev *pd
- 
- 	/* Configure PCI I/O space */
- 	ret = qla2x00_iospace_config(ha);
--	if (ret != 0) {
--		goto probe_alloc_failed;
--	}
-+	if (ret)
-+		goto probe_failed;
- 
- 	/* Sanitize the information from PCI BIOS. */
- 	host->irq = pdev->irq;
-@@ -1258,23 +1257,10 @@ int qla2x00_probe_one(struct pci_dev *pd
- 		qla_printk(KERN_WARNING, ha,
- 		    "[ERROR] Failed to allocate memory for adapter\n");
- 
--		goto probe_alloc_failed;
-+		ret = -ENOMEM;
-+		goto probe_failed;
- 	}
- 
--	pci_set_drvdata(pdev, ha);
--	host->this_id = 255;
--	host->cmd_per_lun = 3;
--	host->unique_id = ha->instance;
--	host->max_cmd_len = MAX_CMDSZ;
--	host->max_channel = ha->ports - 1;
--	host->max_id = ha->max_targets;
--	host->max_lun = ha->max_luns;
--	host->transportt = qla2xxx_transport_template;
--	if (scsi_add_host(host, &pdev->dev))
--		goto probe_alloc_failed;
--
--	qla2x00_alloc_sysfs_attr(ha);
--
- 	if (qla2x00_initialize_adapter(ha) &&
- 	    !(ha->device_flags & DFLG_NO_CABLE)) {
- 
-@@ -1285,11 +1271,10 @@ int qla2x00_probe_one(struct pci_dev *pd
- 		    "Adapter flags %x.\n",
- 		    ha->host_no, ha->device_flags));
- 
-+		ret = -ENODEV;
- 		goto probe_failed;
- 	}
- 
--	qla2x00_init_host_attr(ha);
--
- 	/*
- 	 * Startup the kernel thread for this host adapter
- 	 */
-@@ -1299,17 +1284,26 @@ int qla2x00_probe_one(struct pci_dev *pd
- 		qla_printk(KERN_WARNING, ha,
- 		    "Unable to start DPC thread!\n");
- 
-+		ret = -ENODEV;
- 		goto probe_failed;
- 	}
- 	wait_for_completion(&ha->dpc_inited);
- 
-+	host->this_id = 255;
-+	host->cmd_per_lun = 3;
-+	host->unique_id = ha->instance;
-+	host->max_cmd_len = MAX_CMDSZ;
-+	host->max_channel = ha->ports - 1;
-+	host->max_lun = MAX_LUNS;
-+	host->transportt = qla2xxx_transport_template;
-+
- 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
- 		ret = request_irq(host->irq, qla2100_intr_handler,
- 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
- 	else
- 		ret = request_irq(host->irq, qla2300_intr_handler,
- 		    SA_INTERRUPT|SA_SHIRQ, ha->brd_info->drv_name, ha);
--	if (ret != 0) {
-+	if (ret) {
- 		qla_printk(KERN_WARNING, ha,
- 		    "Failed to reserve interrupt %d already in use.\n",
- 		    host->irq);
-@@ -1363,9 +1357,18 @@ int qla2x00_probe_one(struct pci_dev *pd
- 		msleep(10);
- 	}
- 
-+	pci_set_drvdata(pdev, ha);
- 	ha->flags.init_done = 1;
- 	num_hosts++;
- 
-+	ret = scsi_add_host(host, &pdev->dev);
-+	if (ret)
-+		goto probe_failed;
-+
-+	qla2x00_alloc_sysfs_attr(ha);
-+
-+	qla2x00_init_host_attr(ha);
-+
- 	qla_printk(KERN_INFO, ha, "\n"
- 	    " QLogic Fibre Channel HBA Driver: %s\n"
- 	    "  QLogic %s - %s\n"
-@@ -1384,9 +1387,6 @@ int qla2x00_probe_one(struct pci_dev *pd
- probe_failed:
- 	fc_remove_host(ha->host);
- 
--	scsi_remove_host(host);
--
--probe_alloc_failed:
- 	qla2x00_free_device(ha);
- 
- 	scsi_host_put(host);
-@@ -1394,7 +1394,8 @@ probe_alloc_failed:
- probe_disable_device:
- 	pci_disable_device(pdev);
- 
--	return -1;
-+probe_out:
-+	return ret;
- }
- EXPORT_SYMBOL_GPL(qla2x00_probe_one);
- 
-diff -Nurp pristine-linux-2.6.12/drivers/scsi/sg.c linux-2.6.12-xen/drivers/scsi/sg.c
---- pristine-linux-2.6.12/drivers/scsi/sg.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/scsi/sg.c	2006-02-25 00:12:33.375054078 +0100
-@@ -2969,23 +2969,22 @@ static void * dev_seq_start(struct seq_f
- {
- 	struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
- 
-+	s->private = it;
- 	if (! it)
- 		return NULL;
-+
- 	if (NULL == sg_dev_arr)
--		goto err1;
-+		return NULL;
- 	it->index = *pos;
- 	it->max = sg_last_dev();
- 	if (it->index >= it->max)
--		goto err1;
-+		return NULL;
- 	return it;
--err1:
--	kfree(it);
--	return NULL;
- }
- 
- static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
- {
--	struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
-+	struct sg_proc_deviter * it = s->private;
- 
- 	*pos = ++it->index;
- 	return (it->index < it->max) ? it : NULL;
-@@ -2993,7 +2992,9 @@ static void * dev_seq_next(struct seq_fi
- 
- static void dev_seq_stop(struct seq_file *s, void *v)
- {
--	kfree (v);
-+	struct sg_proc_deviter * it = s->private;
-+
-+	kfree (it);
- }
- 
- static int sg_proc_open_dev(struct inode *inode, struct file *file)
-diff -Nurp pristine-linux-2.6.12/drivers/usb/net/usbnet.c linux-2.6.12-xen/drivers/usb/net/usbnet.c
---- pristine-linux-2.6.12/drivers/usb/net/usbnet.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/drivers/usb/net/usbnet.c	2006-02-25 00:12:33.377053776 +0100
-@@ -1922,7 +1922,7 @@ static int genelink_rx_fixup (struct usb
- 
- 			// copy the packet data to the new skb
- 			memcpy(skb_put(gl_skb, size), packet->packet_data, size);
--			skb_return (dev, skb);
-+			skb_return (dev, gl_skb);
- 		}
- 
- 		// advance to the next packet
-diff -Nurp pristine-linux-2.6.12/drivers/xen/balloon/balloon.c linux-2.6.12-xen/drivers/xen/balloon/balloon.c
---- pristine-linux-2.6.12/drivers/xen/balloon/balloon.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/balloon/balloon.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,587 @@
-+/******************************************************************************
-+ * balloon.c
-+ *
-+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
-+ *
-+ * Copyright (c) 2003, B Dragovic
-+ * Copyright (c) 2003-2004, M Williamson, K Fraser
-+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/smp_lock.h>
-+#include <linux/pagemap.h>
-+#include <linux/bootmem.h>
-+#include <linux/highmem.h>
-+#include <linux/vmalloc.h>
-+#include <asm-xen/xen_proc.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/balloon.h>
-+#include <asm-xen/xen-public/memory.h>
-+#include <asm/pgalloc.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/tlb.h>
-+#include <linux/list.h>
-+
-+#include<asm-xen/xenbus.h>
-+
-+#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
-+
-+static struct proc_dir_entry *balloon_pde;
-+
-+static DECLARE_MUTEX(balloon_mutex);
-+
-+/*
-+ * Protects atomic reservation decrease/increase against concurrent increases.
-+ * Also protects non-atomic updates of current_pages and driver_pages, and
-+ * balloon lists.
-+ */
-+spinlock_t balloon_lock = SPIN_LOCK_UNLOCKED;
-+
-+/* We aim for 'current allocation' == 'target allocation'. */
-+static unsigned long current_pages;
-+static unsigned long target_pages;
-+
-+/* VM /proc information for memory */
-+extern unsigned long totalram_pages;
-+
-+/* We may hit the hard limit in Xen. If we do then we remember it. */
-+static unsigned long hard_limit;
-+
-+/*
-+ * Drivers may alter the memory reservation independently, but they must
-+ * inform the balloon driver so that we can avoid hitting the hard limit.
-+ */
-+static unsigned long driver_pages;
-+
-+/* List of ballooned pages, threaded through the mem_map array. */
-+static LIST_HEAD(ballooned_pages);
-+static unsigned long balloon_low, balloon_high;
-+
-+/* Main work function, always executed in process context. */
-+static void balloon_process(void *unused);
-+static DECLARE_WORK(balloon_worker, balloon_process, NULL);
-+static struct timer_list balloon_timer;
-+
-+/* Use the private and mapping fields of struct page as a list. */
-+#define PAGE_TO_LIST(p) ((struct list_head *)&p->private)
-+#define LIST_TO_PAGE(l)				\
-+	(list_entry(((unsigned long *)l), struct page, private))
-+#define UNLIST_PAGE(p)				\
-+	do {					\
-+		list_del(PAGE_TO_LIST(p));	\
-+		p->mapping = NULL;		\
-+		p->private = 0;			\
-+	} while(0)
-+
-+#define IPRINTK(fmt, args...) \
-+	printk(KERN_INFO "xen_mem: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+	printk(KERN_WARNING "xen_mem: " fmt, ##args)
-+
-+/* balloon_append: add the given page to the balloon. */
-+static void balloon_append(struct page *page)
-+{
-+	/* Lowmem is re-populated first, so highmem pages go at list tail. */
-+	if (PageHighMem(page)) {
-+		list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
-+		balloon_high++;
-+	} else {
-+		list_add(PAGE_TO_LIST(page), &ballooned_pages);
-+		balloon_low++;
-+	}
-+}
-+
-+/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
-+static struct page *balloon_retrieve(void)
-+{
-+	struct page *page;
-+
-+	if (list_empty(&ballooned_pages))
-+		return NULL;
-+
-+	page = LIST_TO_PAGE(ballooned_pages.next);
-+	UNLIST_PAGE(page);
-+
-+	if (PageHighMem(page))
-+		balloon_high--;
-+	else
-+		balloon_low--;
-+
-+	return page;
-+}
-+
-+static struct page *balloon_first_page(void)
-+{
-+	if (list_empty(&ballooned_pages))
-+		return NULL;
-+	return LIST_TO_PAGE(ballooned_pages.next);
-+}
-+
-+static struct page *balloon_next_page(struct page *page)
-+{
-+	struct list_head *next = PAGE_TO_LIST(page)->next;
-+	if (next == &ballooned_pages)
-+		return NULL;
-+	return LIST_TO_PAGE(next);
-+}
-+
-+static void balloon_alarm(unsigned long unused)
-+{
-+	schedule_work(&balloon_worker);
-+}
-+
-+static unsigned long current_target(void)
-+{
-+	unsigned long target = min(target_pages, hard_limit);
-+	if (target > (current_pages + balloon_low + balloon_high))
-+		target = current_pages + balloon_low + balloon_high;
-+	return target;
-+}
-+
-+static int increase_reservation(unsigned long nr_pages)
-+{
-+	unsigned long *frame_list, pfn, i, flags;
-+	struct page   *page;
-+	long           rc;
-+	struct xen_memory_reservation reservation = {
-+		.address_bits = 0,
-+		.extent_order = 0,
-+		.domid        = DOMID_SELF
-+	};
-+
-+	if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
-+		nr_pages = PAGE_SIZE / sizeof(unsigned long);
-+
-+	frame_list = (unsigned long *)__get_free_page(GFP_KERNEL);
-+	if (frame_list == NULL)
-+		return -ENOMEM;
-+
-+	balloon_lock(flags);
-+
-+	page = balloon_first_page();
-+	for (i = 0; i < nr_pages; i++) {
-+		BUG_ON(page == NULL);
-+		frame_list[i] = page_to_pfn(page);;
-+		page = balloon_next_page(page);
-+	}
-+
-+	reservation.extent_start = frame_list;
-+	reservation.nr_extents   = nr_pages;
-+	rc = HYPERVISOR_memory_op(
-+		XENMEM_populate_physmap, &reservation);
-+	if (rc < nr_pages) {
-+		int ret;
-+		/* We hit the Xen hard limit: reprobe. */
-+		reservation.extent_start = frame_list;
-+		reservation.nr_extents   = rc;
-+		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-+				&reservation);
-+		BUG_ON(ret != rc);
-+		hard_limit = current_pages + rc - driver_pages;
-+		goto out;
-+	}
-+
-+	for (i = 0; i < nr_pages; i++) {
-+		page = balloon_retrieve();
-+		BUG_ON(page == NULL);
-+
-+		pfn = page_to_pfn(page);
-+		BUG_ON(phys_to_machine_mapping_valid(pfn));
-+
-+		/* Update P->M and M->P tables. */
-+		set_phys_to_machine(pfn, frame_list[i]);
-+		xen_machphys_update(frame_list[i], pfn);
-+            
-+		/* Link back into the page tables if not highmem. */
-+		if (pfn < max_low_pfn) {
-+			int ret;
-+			ret = HYPERVISOR_update_va_mapping(
-+				(unsigned long)__va(pfn << PAGE_SHIFT),
-+				pfn_pte_ma(frame_list[i], PAGE_KERNEL),
-+				0);
-+			BUG_ON(ret);
-+		}
-+
-+		/* Relinquish the page back to the allocator. */
-+		ClearPageReserved(page);
-+		set_page_count(page, 1);
-+		__free_page(page);
-+	}
-+
-+	current_pages += nr_pages;
-+	totalram_pages = current_pages;
-+
-+ out:
-+	balloon_unlock(flags);
-+
-+	free_page((unsigned long)frame_list);
-+
-+	return 0;
-+}
-+
-+static int decrease_reservation(unsigned long nr_pages)
-+{
-+	unsigned long *frame_list, pfn, i, flags;
-+	struct page   *page;
-+	void          *v;
-+	int            need_sleep = 0;
-+	int ret;
-+	struct xen_memory_reservation reservation = {
-+		.address_bits = 0,
-+		.extent_order = 0,
-+		.domid        = DOMID_SELF
-+	};
-+
-+	if (nr_pages > (PAGE_SIZE / sizeof(unsigned long)))
-+		nr_pages = PAGE_SIZE / sizeof(unsigned long);
-+
-+	frame_list = (unsigned long *)__get_free_page(GFP_KERNEL);
-+	if (frame_list == NULL)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < nr_pages; i++) {
-+		if ((page = alloc_page(GFP_HIGHUSER)) == NULL) {
-+			nr_pages = i;
-+			need_sleep = 1;
-+			break;
-+		}
-+
-+		pfn = page_to_pfn(page);
-+		frame_list[i] = pfn_to_mfn(pfn);
-+
-+		if (!PageHighMem(page)) {
-+			v = phys_to_virt(pfn << PAGE_SHIFT);
-+			scrub_pages(v, 1);
-+			ret = HYPERVISOR_update_va_mapping(
-+				(unsigned long)v, __pte_ma(0), 0);
-+			BUG_ON(ret);
-+		}
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+		else {
-+			v = kmap(page);
-+			scrub_pages(v, 1);
-+			kunmap(page);
-+		}
-+#endif
-+	}
-+
-+	/* Ensure that ballooned highmem pages don't have kmaps. */
-+	kmap_flush_unused();
-+	flush_tlb_all();
-+
-+	balloon_lock(flags);
-+
-+	/* No more mappings: invalidate P2M and add to balloon. */
-+	for (i = 0; i < nr_pages; i++) {
-+		pfn = mfn_to_pfn(frame_list[i]);
-+		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-+		balloon_append(pfn_to_page(pfn));
-+	}
-+
-+	reservation.extent_start = frame_list;
-+	reservation.nr_extents   = nr_pages;
-+	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
-+	BUG_ON(ret != nr_pages);
-+
-+	current_pages -= nr_pages;
-+	totalram_pages = current_pages;
-+
-+	balloon_unlock(flags);
-+
-+	free_page((unsigned long)frame_list);
-+
-+	return need_sleep;
-+}
-+
-+/*
-+ * We avoid multiple worker processes conflicting via the balloon mutex.
-+ * We may of course race updates of the target counts (which are protected
-+ * by the balloon lock), or with changes to the Xen hard limit, but we will
-+ * recover from these in time.
-+ */
-+static void balloon_process(void *unused)
-+{
-+	int need_sleep = 0;
-+	long credit;
-+
-+	down(&balloon_mutex);
-+
-+	do {
-+		credit = current_target() - current_pages;
-+		if (credit > 0)
-+			need_sleep = (increase_reservation(credit) != 0);
-+		if (credit < 0)
-+			need_sleep = (decrease_reservation(-credit) != 0);
-+
-+#ifndef CONFIG_PREEMPT
-+		if (need_resched())
-+			schedule();
-+#endif
-+	} while ((credit != 0) && !need_sleep);
-+
-+	/* Schedule more work if there is some still to be done. */
-+	if (current_target() != current_pages)
-+		mod_timer(&balloon_timer, jiffies + HZ);
-+
-+	up(&balloon_mutex);
-+}
-+
-+/* Resets the Xen limit, sets new target, and kicks off processing. */
-+static void set_new_target(unsigned long target)
-+{
-+	/* No need for lock. Not read-modify-write updates. */
-+	hard_limit   = ~0UL;
-+	target_pages = target;
-+	schedule_work(&balloon_worker);
-+}
-+
-+static struct xenbus_watch target_watch =
-+{
-+	.node = "memory/target"
-+};
-+
-+/* React to a change in the target key */
-+static void watch_target(struct xenbus_watch *watch,
-+			 const char **vec, unsigned int len)
-+{
-+	unsigned long long new_target;
-+	int err;
-+
-+	err = xenbus_scanf(XBT_NULL, "memory", "target", "%llu", &new_target);
-+	if (err != 1) {
-+		/* This is ok (for domain0 at least) - so just return */
-+		return;
-+	} 
-+        
-+	/* The given memory/target value is in KiB, so it needs converting to
-+	   pages.  PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
-+	*/
-+	set_new_target(new_target >> (PAGE_SHIFT - 10));
-+    
-+}
-+
-+static int balloon_init_watcher(struct notifier_block *notifier,
-+                                unsigned long event,
-+                                void *data)
-+{
-+	int err;
-+
-+	err = register_xenbus_watch(&target_watch);
-+	if (err)
-+		printk(KERN_ERR "Failed to set balloon watcher\n");
-+
-+	return NOTIFY_DONE;
-+    
-+}
-+
-+static int balloon_write(struct file *file, const char __user *buffer,
-+                         unsigned long count, void *data)
-+{
-+	char memstring[64], *endchar;
-+	unsigned long long target_bytes;
-+
-+	if (!capable(CAP_SYS_ADMIN))
-+		return -EPERM;
-+
-+	if (count <= 1)
-+		return -EBADMSG; /* runt */
-+	if (count > sizeof(memstring))
-+		return -EFBIG;   /* too long */
-+
-+	if (copy_from_user(memstring, buffer, count))
-+		return -EFAULT;
-+	memstring[sizeof(memstring)-1] = '\0';
-+
-+	target_bytes = memparse(memstring, &endchar);
-+	set_new_target(target_bytes >> PAGE_SHIFT);
-+
-+	return count;
-+}
-+
-+static int balloon_read(char *page, char **start, off_t off,
-+                        int count, int *eof, void *data)
-+{
-+	int len;
-+
-+	len = sprintf(
-+		page,
-+		"Current allocation: %8lu kB\n"
-+		"Requested target:   %8lu kB\n"
-+		"Low-mem balloon:    %8lu kB\n"
-+		"High-mem balloon:   %8lu kB\n"
-+		"Xen hard limit:     ",
-+		PAGES2KB(current_pages), PAGES2KB(target_pages), 
-+		PAGES2KB(balloon_low), PAGES2KB(balloon_high));
-+
-+	if (hard_limit != ~0UL) {
-+		len += sprintf(
-+			page + len, 
-+			"%8lu kB (inc. %8lu kB driver headroom)\n",
-+			PAGES2KB(hard_limit), PAGES2KB(driver_pages));
-+	} else {
-+		len += sprintf(
-+			page + len,
-+			"     ??? kB\n");
-+	}
-+
-+	*eof = 1;
-+	return len;
-+}
-+
-+static struct notifier_block xenstore_notifier;
-+
-+static int __init balloon_init(void)
-+{
-+	unsigned long pfn;
-+	struct page *page;
-+
-+	IPRINTK("Initialising balloon driver.\n");
-+
-+	if (xen_init() < 0)
-+		return -1;
-+
-+	current_pages = min(xen_start_info->nr_pages, max_pfn);
-+	target_pages  = current_pages;
-+	balloon_low   = 0;
-+	balloon_high  = 0;
-+	driver_pages  = 0UL;
-+	hard_limit    = ~0UL;
-+
-+	init_timer(&balloon_timer);
-+	balloon_timer.data = 0;
-+	balloon_timer.function = balloon_alarm;
-+    
-+	if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) {
-+		WPRINTK("Unable to create /proc/xen/balloon.\n");
-+		return -1;
-+	}
-+
-+	balloon_pde->read_proc  = balloon_read;
-+	balloon_pde->write_proc = balloon_write;
-+    
-+	/* Initialise the balloon with excess memory space. */
-+	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
-+		page = pfn_to_page(pfn);
-+		if (!PageReserved(page))
-+			balloon_append(page);
-+	}
-+
-+	target_watch.callback = watch_target;
-+	xenstore_notifier.notifier_call = balloon_init_watcher;
-+
-+	register_xenstore_notifier(&xenstore_notifier);
-+    
-+	return 0;
-+}
-+
-+subsys_initcall(balloon_init);
-+
-+void balloon_update_driver_allowance(long delta)
-+{
-+	unsigned long flags;
-+
-+	balloon_lock(flags);
-+	driver_pages += delta;
-+	balloon_unlock(flags);
-+}
-+
-+static int dealloc_pte_fn(
-+	pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
-+{
-+	unsigned long mfn = pte_mfn(*pte);
-+	int ret;
-+	struct xen_memory_reservation reservation = {
-+		.extent_start = &mfn,
-+		.nr_extents   = 1,
-+		.extent_order = 0,
-+		.domid        = DOMID_SELF
-+	};
-+	set_pte_at(&init_mm, addr, pte, __pte_ma(0));
-+	set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
-+	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
-+	BUG_ON(ret != 1);
-+	return 0;
-+}
-+
-+struct page *balloon_alloc_empty_page_range(unsigned long nr_pages)
-+{
-+	unsigned long vstart, flags;
-+	unsigned int  order = get_order(nr_pages * PAGE_SIZE);
-+	int ret;
-+
-+	vstart = __get_free_pages(GFP_KERNEL, order);
-+	if (vstart == 0)
-+		return NULL;
-+
-+	scrub_pages(vstart, 1 << order);
-+
-+	balloon_lock(flags);
-+	ret = generic_page_range(
-+		&init_mm, vstart, PAGE_SIZE << order, dealloc_pte_fn, NULL);
-+	BUG_ON(ret);
-+	current_pages -= 1UL << order;
-+	balloon_unlock(flags);
-+
-+	schedule_work(&balloon_worker);
-+
-+	flush_tlb_all();
-+
-+	return virt_to_page(vstart);
-+}
-+
-+void balloon_dealloc_empty_page_range(
-+	struct page *page, unsigned long nr_pages)
-+{
-+	unsigned long i, flags;
-+	unsigned int  order = get_order(nr_pages * PAGE_SIZE);
-+
-+	balloon_lock(flags);
-+	for (i = 0; i < (1UL << order); i++)
-+		balloon_append(page + i);
-+	balloon_unlock(flags);
-+
-+	schedule_work(&balloon_worker);
-+}
-+
-+EXPORT_SYMBOL(balloon_update_driver_allowance);
-+EXPORT_SYMBOL(balloon_alloc_empty_page_range);
-+EXPORT_SYMBOL(balloon_dealloc_empty_page_range);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/balloon/Makefile linux-2.6.12-xen/drivers/xen/balloon/Makefile
---- pristine-linux-2.6.12/drivers/xen/balloon/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/balloon/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+
-+obj-y += balloon.o
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/blkback.c linux-2.6.12-xen/drivers/xen/blkback/blkback.c
---- pristine-linux-2.6.12/drivers/xen/blkback/blkback.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blkback/blkback.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,590 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/backend/main.c
-+ * 
-+ * Back-end of the driver for virtual block devices. This portion of the
-+ * driver exports a 'unified' block-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. A 
-+ * reference front-end implementation can be found in:
-+ *  arch/xen/drivers/blkif/frontend
-+ * 
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Copyright (c) 2005, Christopher Clark
-+ */
-+
-+#include <linux/spinlock.h>
-+#include <linux/kthread.h>
-+#include <linux/list.h>
-+#include <asm-xen/balloon.h>
-+#include <asm/hypervisor.h>
-+#include "common.h"
-+
-+/*
-+ * These are rather arbitrary. They are fairly large because adjacent requests
-+ * pulled from a communication ring are quite likely to end up being part of
-+ * the same scatter/gather request at the disc.
-+ * 
-+ * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
-+ * 
-+ * This will increase the chances of being able to write whole tracks.
-+ * 64 should be enough to keep us competitive with Linux.
-+ */
-+static int blkif_reqs = 64;
-+static int mmap_pages;
-+
-+static int __init set_blkif_reqs(char *str)
-+{
-+	get_option(&str, &blkif_reqs);
-+	return 1;
-+}
-+__setup("blkif_reqs=", set_blkif_reqs);
-+
-+/* Run-time switchable: /sys/module/blkback/parameters/ */
-+static unsigned int log_stats = 0;
-+static unsigned int debug_lvl = 0;
-+module_param(log_stats, int, 0644);
-+module_param(debug_lvl, int, 0644);
-+
-+/*
-+ * Each outstanding request that we've passed to the lower device layers has a 
-+ * 'pending_req' allocated to it. Each buffer_head that completes decrements 
-+ * the pendcnt towards zero. When it hits zero, the specified domain has a 
-+ * response queued for it, with the saved 'id' passed back.
-+ */
-+typedef struct {
-+	blkif_t       *blkif;
-+	unsigned long  id;
-+	int            nr_pages;
-+	atomic_t       pendcnt;
-+	unsigned short operation;
-+	int            status;
-+	struct list_head free_list;
-+} pending_req_t;
-+
-+static pending_req_t *pending_reqs;
-+static struct list_head pending_free;
-+static spinlock_t pending_free_lock = SPIN_LOCK_UNLOCKED;
-+static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
-+
-+#define BLKBACK_INVALID_HANDLE (~0)
-+
-+static unsigned long mmap_vstart;
-+static unsigned long *pending_vaddrs;
-+static grant_handle_t *pending_grant_handles;
-+
-+static inline int vaddr_pagenr(pending_req_t *req, int seg)
-+{
-+	return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
-+}
-+
-+static inline unsigned long vaddr(pending_req_t *req, int seg)
-+{
-+	return pending_vaddrs[vaddr_pagenr(req, seg)];
-+}
-+
-+#define pending_handle(_req, _seg) \
-+	(pending_grant_handles[vaddr_pagenr(_req, _seg)])
-+
-+
-+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
-+/*
-+ * If the tap driver is used, we may get pages belonging to either the tap
-+ * or (more likely) the real frontend.  The backend must specify which domain
-+ * a given page belongs to in update_va_mapping though.  For the moment, 
-+ * the tap rewrites the ID field of the request to contain the request index
-+ * and the id of the real front end domain.
-+ */
-+#define BLKTAP_COOKIE 0xbeadfeed
-+static inline domid_t ID_TO_DOM(unsigned long id) { return (id >> 16); }
-+#endif
-+
-+static int do_block_io_op(blkif_t *blkif);
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+				 blkif_request_t *req,
-+				 pending_req_t *pending_req);
-+static void make_response(blkif_t *blkif, unsigned long id, 
-+                          unsigned short op, int st);
-+
-+/******************************************************************
-+ * misc small helpers
-+ */
-+static pending_req_t* alloc_req(void)
-+{
-+	pending_req_t *req = NULL;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&pending_free_lock, flags);
-+	if (!list_empty(&pending_free)) {
-+		req = list_entry(pending_free.next, pending_req_t, free_list);
-+		list_del(&req->free_list);
-+	}
-+	spin_unlock_irqrestore(&pending_free_lock, flags);
-+	return req;
-+}
-+
-+static void free_req(pending_req_t *req)
-+{
-+	unsigned long flags;
-+	int was_empty;
-+
-+	spin_lock_irqsave(&pending_free_lock, flags);
-+	was_empty = list_empty(&pending_free);
-+	list_add(&req->free_list, &pending_free);
-+	spin_unlock_irqrestore(&pending_free_lock, flags);
-+	if (was_empty)
-+		wake_up(&pending_free_wq);
-+}
-+
-+static void unplug_queue(blkif_t *blkif)
-+{
-+	if (blkif->plug == NULL)
-+		return;
-+	if (blkif->plug->unplug_fn)
-+		blkif->plug->unplug_fn(blkif->plug);
-+	blk_put_queue(blkif->plug);
-+	blkif->plug = NULL;
-+}
-+
-+static void plug_queue(blkif_t *blkif, struct bio *bio)
-+{
-+	request_queue_t *q = bdev_get_queue(bio->bi_bdev);
-+
-+	if (q == blkif->plug)
-+		return;
-+	unplug_queue(blkif);
-+	blk_get_queue(q);
-+	blkif->plug = q;
-+}
-+
-+static void fast_flush_area(pending_req_t *req)
-+{
-+	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+	unsigned int i, invcount = 0;
-+	grant_handle_t handle;
-+	int ret;
-+
-+	for (i = 0; i < req->nr_pages; i++) {
-+		handle = pending_handle(req, i);
-+		if (handle == BLKBACK_INVALID_HANDLE)
-+			continue;
-+		unmap[invcount].host_addr    = vaddr(req, i);
-+		unmap[invcount].dev_bus_addr = 0;
-+		unmap[invcount].handle       = handle;
-+		pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
-+		invcount++;
-+	}
-+
-+	ret = HYPERVISOR_grant_table_op(
-+		GNTTABOP_unmap_grant_ref, unmap, invcount);
-+	BUG_ON(ret);
-+}
-+
-+/******************************************************************
-+ * SCHEDULER FUNCTIONS
-+ */
-+
-+static void print_stats(blkif_t *blkif)
-+{
-+	printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d\n",
-+	       current->comm, blkif->st_oo_req,
-+	       blkif->st_rd_req, blkif->st_wr_req);
-+	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
-+	blkif->st_rd_req = 0;
-+	blkif->st_wr_req = 0;
-+	blkif->st_oo_req = 0;
-+}
-+
-+int blkif_schedule(void *arg)
-+{
-+	blkif_t          *blkif = arg;
-+
-+	blkif_get(blkif);
-+	if (debug_lvl)
-+		printk(KERN_DEBUG "%s: started\n", current->comm);
-+	for (;;) {
-+		if (kthread_should_stop()) {
-+			/* asked to quit? */
-+			if (!atomic_read(&blkif->io_pending))
-+				break;
-+			if (debug_lvl)
-+				printk(KERN_DEBUG "%s: I/O pending, "
-+				       "delaying exit\n", current->comm);
-+		}
-+
-+		if (!atomic_read(&blkif->io_pending)) {
-+			/* Wait for work to do. */
-+			wait_event_interruptible(
-+				blkif->wq,
-+				(atomic_read(&blkif->io_pending) ||
-+				 kthread_should_stop()));
-+		} else if (list_empty(&pending_free)) {
-+			/* Wait for pending_req becoming available. */
-+			wait_event_interruptible(
-+				pending_free_wq,
-+				!list_empty(&pending_free));
-+		}
-+
-+		if (blkif->status != CONNECTED) {
-+			/* make sure we are connected */
-+			if (debug_lvl)
-+				printk(KERN_DEBUG "%s: not connected "
-+				       "(%d pending)\n",
-+				       current->comm,
-+				       atomic_read(&blkif->io_pending));
-+			wait_event_interruptible(
-+				blkif->wq,
-+				(blkif->status == CONNECTED ||
-+				 kthread_should_stop()));
-+			continue;
-+		}
-+
-+		/* Schedule I/O */
-+		atomic_set(&blkif->io_pending, 0);
-+		if (do_block_io_op(blkif))
-+			atomic_inc(&blkif->io_pending);
-+		unplug_queue(blkif);
-+
-+		if (log_stats && time_after(jiffies, blkif->st_print))
-+			print_stats(blkif);
-+	}
-+
-+	if (log_stats)
-+		print_stats(blkif);
-+	if (debug_lvl)
-+		printk(KERN_DEBUG "%s: exiting\n", current->comm);
-+	blkif->xenblkd = NULL;
-+	blkif_put(blkif);
-+	return 0;
-+}
-+
-+/******************************************************************
-+ * COMPLETION CALLBACK -- Called as bh->b_end_io()
-+ */
-+
-+static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
-+{
-+	/* An error fails the entire request. */
-+	if (!uptodate) {
-+		DPRINTK("Buffer not up-to-date at end of operation\n");
-+		pending_req->status = BLKIF_RSP_ERROR;
-+	}
-+
-+	if (atomic_dec_and_test(&pending_req->pendcnt)) {
-+		fast_flush_area(pending_req);
-+		make_response(pending_req->blkif, pending_req->id,
-+			      pending_req->operation, pending_req->status);
-+		blkif_put(pending_req->blkif);
-+		free_req(pending_req);
-+	}
-+}
-+
-+static int end_block_io_op(struct bio *bio, unsigned int done, int error)
-+{
-+	if (bio->bi_size != 0)
-+		return 1;
-+	__end_block_io_op(bio->bi_private, !error);
-+	bio_put(bio);
-+	return error;
-+}
-+
-+
-+/******************************************************************************
-+ * NOTIFICATION FROM GUEST OS.
-+ */
-+
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+	blkif_t *blkif = dev_id;
-+
-+	atomic_inc(&blkif->io_pending);
-+	wake_up(&blkif->wq);
-+	return IRQ_HANDLED;
-+}
-+
-+
-+
-+/******************************************************************
-+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
-+ */
-+
-+static int do_block_io_op(blkif_t *blkif)
-+{
-+	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-+	blkif_request_t *req;
-+	pending_req_t *pending_req;
-+	RING_IDX rc, rp;
-+	int more_to_do = 0;
-+
-+	rc = blk_ring->req_cons;
-+	rp = blk_ring->sring->req_prod;
-+	rmb(); /* Ensure we see queued requests up to 'rp'. */
-+
-+	while ((rc != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
-+
-+		pending_req = alloc_req();
-+		if (NULL == pending_req) {
-+			blkif->st_oo_req++;
-+			more_to_do = 1;
-+			break;
-+		}
-+
-+		req = RING_GET_REQUEST(blk_ring, rc);
-+		blk_ring->req_cons = ++rc; /* before make_response() */
-+
-+		switch (req->operation) {
-+		case BLKIF_OP_READ:
-+			blkif->st_rd_req++;
-+			dispatch_rw_block_io(blkif, req, pending_req);
-+			break;
-+		case BLKIF_OP_WRITE:
-+			blkif->st_wr_req++;
-+			dispatch_rw_block_io(blkif, req, pending_req);
-+			break;
-+		default:
-+			DPRINTK("error: unknown block io operation [%d]\n",
-+				req->operation);
-+			make_response(blkif, req->id, req->operation,
-+				      BLKIF_RSP_ERROR);
-+			free_req(pending_req);
-+			break;
-+		}
-+	}
-+	return more_to_do;
-+}
-+
-+static void dispatch_rw_block_io(blkif_t *blkif,
-+				 blkif_request_t *req,
-+				 pending_req_t *pending_req)
-+{
-+	extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
-+	int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
-+	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+	struct phys_req preq;
-+	struct { 
-+		unsigned long buf; unsigned int nsec;
-+	} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+	unsigned int nseg;
-+	struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+	int ret, i, nbio = 0;
-+
-+	/* Check that number of segments is sane. */
-+	nseg = req->nr_segments;
-+	if (unlikely(nseg == 0) || 
-+	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
-+		DPRINTK("Bad number of segments in request (%d)\n", nseg);
-+		goto fail_response;
-+	}
-+
-+	preq.dev           = req->handle;
-+	preq.sector_number = req->sector_number;
-+	preq.nr_sects      = 0;
-+
-+	pending_req->blkif     = blkif;
-+	pending_req->id        = req->id;
-+	pending_req->operation = operation;
-+	pending_req->status    = BLKIF_RSP_OKAY;
-+	pending_req->nr_pages  = nseg;
-+
-+	for (i = 0; i < nseg; i++) {
-+		seg[i].nsec = req->seg[i].last_sect -
-+			req->seg[i].first_sect + 1;
-+
-+		if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
-+		    (seg[i].nsec <= 0))
-+			goto fail_response;
-+		preq.nr_sects += seg[i].nsec;
-+
-+		map[i].host_addr = vaddr(pending_req, i);
-+		map[i].dom = blkif->domid;
-+		map[i].ref = req->seg[i].gref;
-+		map[i].flags = GNTMAP_host_map;
-+		if ( operation == WRITE )
-+			map[i].flags |= GNTMAP_readonly;
-+	}
-+
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
-+	BUG_ON(ret);
-+
-+	for (i = 0; i < nseg; i++) {
-+		if (unlikely(map[i].status != 0)) {
-+			DPRINTK("invalid buffer -- could not remap it\n");
-+			goto fail_flush;
-+		}
-+
-+		pending_handle(pending_req, i) = map[i].handle;
-+#ifdef __ia64__
-+		pending_vaddrs[vaddr_pagenr(pending_req, i)] =
-+			(unsigned long)gnttab_map_vaddr(map[i]);
-+#else
-+		set_phys_to_machine(__pa(vaddr(
-+			pending_req, i)) >> PAGE_SHIFT,
-+			FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
-+#endif
-+		seg[i].buf  = map[i].dev_bus_addr | 
-+			(req->seg[i].first_sect << 9);
-+	}
-+
-+	if (vbd_translate(&preq, blkif, operation) != 0) {
-+		DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n", 
-+			operation == READ ? "read" : "write",
-+			preq.sector_number,
-+			preq.sector_number + preq.nr_sects, preq.dev); 
-+		goto fail_flush;
-+	}
-+
-+	for (i = 0; i < nseg; i++) {
-+		if (((int)preq.sector_number|(int)seg[i].nsec) &
-+		    ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
-+			DPRINTK("Misaligned I/O request from domain %d",
-+				blkif->domid);
-+			goto fail_put_bio;
-+		}
-+
-+		while ((bio == NULL) ||
-+		       (bio_add_page(bio,
-+				     virt_to_page(vaddr(pending_req, i)),
-+				     seg[i].nsec << 9,
-+				     seg[i].buf & ~PAGE_MASK) == 0)) {
-+			bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, nseg-i);
-+			if (unlikely(bio == NULL))
-+				goto fail_put_bio;
-+                
-+			bio->bi_bdev    = preq.bdev;
-+			bio->bi_private = pending_req;
-+			bio->bi_end_io  = end_block_io_op;
-+			bio->bi_sector  = preq.sector_number;
-+		}
-+
-+		preq.sector_number += seg[i].nsec;
-+	}
-+
-+	plug_queue(blkif, bio);
-+	atomic_set(&pending_req->pendcnt, nbio);
-+	blkif_get(blkif);
-+
-+	for (i = 0; i < nbio; i++)
-+		submit_bio(operation, biolist[i]);
-+
-+	return;
-+
-+ fail_put_bio:
-+	for (i = 0; i < (nbio-1); i++)
-+		bio_put(biolist[i]);
-+ fail_flush:
-+	fast_flush_area(pending_req);
-+ fail_response:
-+	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-+	free_req(pending_req);
-+} 
-+
-+
-+
-+/******************************************************************
-+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
-+ */
-+
-+
-+static void make_response(blkif_t *blkif, unsigned long id, 
-+                          unsigned short op, int st)
-+{
-+	blkif_response_t *resp;
-+	unsigned long     flags;
-+	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-+	int more_to_do = 0;
-+	int notify;
-+
-+	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-+
-+	/* Place on the response ring for the relevant domain. */ 
-+	resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
-+	resp->id        = id;
-+	resp->operation = op;
-+	resp->status    = st;
-+	blk_ring->rsp_prod_pvt++;
-+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
-+
-+	if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
-+		/*
-+		 * Tail check for pending requests. Allows frontend to avoid
-+		 * notifications if requests are already in flight (lower
-+		 * overheads and promotes batching).
-+		 */
-+		RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
-+
-+	} else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
-+		more_to_do = 1;
-+
-+	}
-+	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-+
-+	if (more_to_do) {
-+		atomic_inc(&blkif->io_pending);
-+		wake_up(&blkif->wq);
-+	}
-+	if (notify)
-+		notify_remote_via_irq(blkif->irq);
-+}
-+
-+static int __init blkif_init(void)
-+{
-+	struct page *page;
-+	int i;
-+
-+	if (xen_init() < 0)
-+		return -ENODEV;
-+
-+	mmap_pages            = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
-+	pending_reqs          = kmalloc(sizeof(pending_reqs[0]) *
-+					blkif_reqs, GFP_KERNEL);
-+	pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
-+					mmap_pages, GFP_KERNEL);
-+	pending_vaddrs        = kmalloc(sizeof(pending_vaddrs[0]) *
-+					mmap_pages, GFP_KERNEL);
-+	if (!pending_reqs || !pending_grant_handles || !pending_vaddrs) {
-+		kfree(pending_reqs);
-+		kfree(pending_grant_handles);
-+		kfree(pending_vaddrs);
-+		printk("%s: out of memory\n", __FUNCTION__);
-+		return -ENOMEM;
-+	}
-+
-+	blkif_interface_init();
-+	
-+#ifdef __ia64__
-+	extern unsigned long alloc_empty_foreign_map_page_range(
-+		unsigned long pages);
-+	mmap_vstart = (unsigned long)
-+		alloc_empty_foreign_map_page_range(mmap_pages);
-+#else /* ! ia64 */
-+	page = balloon_alloc_empty_page_range(mmap_pages);
-+	BUG_ON(page == NULL);
-+	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-+#endif
-+	printk("%s: reqs=%d, pages=%d, mmap_vstart=0x%lx\n",
-+	       __FUNCTION__, blkif_reqs, mmap_pages, mmap_vstart);
-+	BUG_ON(mmap_vstart == 0);
-+	for (i = 0; i < mmap_pages; i++) {
-+		pending_vaddrs[i] = mmap_vstart + (i << PAGE_SHIFT);
-+		pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
-+	}
-+
-+	memset(pending_reqs, 0, sizeof(pending_reqs));
-+	INIT_LIST_HEAD(&pending_free);
-+
-+	for (i = 0; i < blkif_reqs; i++)
-+		list_add_tail(&pending_reqs[i].free_list, &pending_free);
-+    
-+	blkif_xenbus_init();
-+	return 0;
-+}
-+
-+__initcall(blkif_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/common.h linux-2.6.12-xen/drivers/xen/blkback/common.h
---- pristine-linux-2.6.12/drivers/xen/blkback/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blkback/common.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,123 @@
-+
-+#ifndef __BLKIF__BACKEND__COMMON_H__
-+#define __BLKIF__BACKEND__COMMON_H__
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/blkdev.h>
-+#include <linux/vmalloc.h>
-+#include <asm/io.h>
-+#include <asm/setup.h>
-+#include <asm/pgalloc.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/xen-public/io/blkif.h>
-+#include <asm-xen/xen-public/io/ring.h>
-+#include <asm-xen/gnttab.h>
-+#include <asm-xen/driver_util.h>
-+
-+#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
-+                                    __FILE__ , __LINE__ , ## _a )
-+
-+struct vbd {
-+	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
-+	unsigned char  readonly;    /* Non-zero -> read-only */
-+	unsigned char  type;        /* VDISK_xxx */
-+	u32            pdevice;     /* phys device that this vbd maps to */
-+	struct block_device *bdev;
-+}; 
-+
-+struct backend_info; 
-+
-+typedef struct blkif_st {
-+	/* Unique identifier for this interface. */
-+	domid_t           domid;
-+	unsigned int      handle;
-+	/* Physical parameters of the comms window. */
-+	unsigned int      evtchn;
-+	unsigned int      irq;
-+	/* Comms information. */
-+	blkif_back_ring_t blk_ring;
-+	struct vm_struct *blk_ring_area;
-+	/* The VBD attached to this interface. */
-+	struct vbd        vbd;
-+	/* Back pointer to the backend_info. */
-+	struct backend_info *be; 
-+	/* Private fields. */
-+	enum { DISCONNECTED, CONNECTED } status;
-+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
-+	/* Is this a blktap frontend */
-+	unsigned int     is_blktap;
-+#endif
-+	spinlock_t       blk_ring_lock;
-+	atomic_t         refcnt;
-+
-+	wait_queue_head_t   wq;
-+	struct task_struct  *xenblkd;
-+	atomic_t            io_pending;
-+	request_queue_t     *plug;
-+
-+	/* statistics */
-+	unsigned long       st_print;
-+	int                 st_rd_req;
-+	int                 st_wr_req;
-+	int                 st_oo_req;
-+
-+	struct work_struct free_work;
-+
-+	grant_handle_t shmem_handle;
-+	grant_ref_t    shmem_ref;
-+} blkif_t;
-+
-+blkif_t *alloc_blkif(domid_t domid);
-+void free_blkif_callback(blkif_t *blkif);
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
-+
-+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define blkif_put(_b)					\
-+	do {						\
-+		if (atomic_dec_and_test(&(_b)->refcnt))	\
-+			free_blkif_callback(_b);	\
-+	} while (0)
-+
-+/* Create a vbd. */
-+int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, unsigned major,
-+	       unsigned minor, int readonly);
-+void vbd_free(struct vbd *vbd);
-+
-+unsigned long vbd_size(struct vbd *vbd);
-+unsigned int vbd_info(struct vbd *vbd);
-+unsigned long vbd_secsize(struct vbd *vbd);
-+
-+struct phys_req {
-+	unsigned short       dev;
-+	unsigned short       nr_sects;
-+	struct block_device *bdev;
-+	blkif_sector_t       sector_number;
-+};
-+
-+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
-+
-+void blkif_interface_init(void);
-+
-+void blkif_xenbus_init(void);
-+
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+int blkif_schedule(void *arg);
-+
-+void update_blkif_status(blkif_t *blkif); 
-+
-+#endif /* __BLKIF__BACKEND__COMMON_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/interface.c linux-2.6.12-xen/drivers/xen/blkback/interface.c
---- pristine-linux-2.6.12/drivers/xen/blkback/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blkback/interface.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,164 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/backend/interface.c
-+ * 
-+ * Block-device interface management.
-+ * 
-+ * Copyright (c) 2004, Keir Fraser
-+ */
-+
-+#include "common.h"
-+#include <asm-xen/evtchn.h>
-+
-+static kmem_cache_t *blkif_cachep;
-+
-+blkif_t *alloc_blkif(domid_t domid)
-+{
-+	blkif_t *blkif;
-+
-+	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
-+	if (!blkif)
-+		return ERR_PTR(-ENOMEM);
-+
-+	memset(blkif, 0, sizeof(*blkif));
-+	blkif->domid = domid;
-+	blkif->status = DISCONNECTED;
-+	spin_lock_init(&blkif->blk_ring_lock);
-+	atomic_set(&blkif->refcnt, 1);
-+	init_waitqueue_head(&blkif->wq);
-+	blkif->st_print = jiffies;
-+
-+	return blkif;
-+}
-+
-+static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
-+{
-+	struct gnttab_map_grant_ref op;
-+	int ret;
-+
-+	op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
-+	op.flags     = GNTMAP_host_map;
-+	op.ref       = shared_page;
-+	op.dom       = blkif->domid;
-+
-+	lock_vm_area(blkif->blk_ring_area);
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
-+	unlock_vm_area(blkif->blk_ring_area);
-+	BUG_ON(ret);
-+
-+	if (op.status) {
-+		DPRINTK(" Grant table operation failure !\n");
-+		return op.status;
-+	}
-+
-+	blkif->shmem_ref = shared_page;
-+	blkif->shmem_handle = op.handle;
-+
-+#ifdef __ia64__
-+	/* on some arch's, map_grant_ref behaves like mmap, in that the
-+	 * passed address is a hint and a different address may be returned */
-+	blkif->blk_ring_area->addr = gnttab_map_vaddr(op);
-+#endif
-+
-+	return 0;
-+}
-+
-+static void unmap_frontend_page(blkif_t *blkif)
-+{
-+	struct gnttab_unmap_grant_ref op;
-+	int ret;
-+
-+	op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
-+	op.handle       = blkif->shmem_handle;
-+	op.dev_bus_addr = 0;
-+
-+	lock_vm_area(blkif->blk_ring_area);
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
-+	unlock_vm_area(blkif->blk_ring_area);
-+	BUG_ON(ret);
-+}
-+
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
-+{
-+	blkif_sring_t *sring;
-+	int err;
-+	evtchn_op_t op = {
-+		.cmd = EVTCHNOP_bind_interdomain,
-+		.u.bind_interdomain.remote_dom = blkif->domid,
-+		.u.bind_interdomain.remote_port = evtchn };
-+
-+	/* Already connected through? */
-+	if (blkif->irq)
-+		return 0;
-+
-+	if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
-+		return -ENOMEM;
-+
-+	err = map_frontend_page(blkif, shared_page);
-+	if (err) {
-+		free_vm_area(blkif->blk_ring_area);
-+		return err;
-+	}
-+
-+	err = HYPERVISOR_event_channel_op(&op);
-+	if (err) {
-+		unmap_frontend_page(blkif);
-+		free_vm_area(blkif->blk_ring_area);
-+		return err;
-+	}
-+
-+	blkif->evtchn = op.u.bind_interdomain.local_port;
-+
-+	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-+	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
-+
-+	blkif->irq = bind_evtchn_to_irqhandler(
-+		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
-+
-+	/* We're potentially connected now */
-+	update_blkif_status(blkif); 
-+
-+	return 0;
-+}
-+
-+static void free_blkif(void *arg)
-+{
-+	blkif_t *blkif = (blkif_t *)arg;
-+
-+	/* Already disconnected? */
-+	if (blkif->irq) {
-+		unbind_from_irqhandler(blkif->irq, blkif);
-+		blkif->irq = 0;
-+	}
-+
-+	vbd_free(&blkif->vbd);
-+
-+	if (blkif->blk_ring.sring) {
-+		unmap_frontend_page(blkif);
-+		free_vm_area(blkif->blk_ring_area);
-+		blkif->blk_ring.sring = NULL;
-+	}
-+
-+	kmem_cache_free(blkif_cachep, blkif);
-+}
-+
-+void free_blkif_callback(blkif_t *blkif)
-+{
-+	INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
-+	schedule_work(&blkif->free_work);
-+}
-+
-+void __init blkif_interface_init(void)
-+{
-+	blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t), 
-+					 0, 0, NULL, NULL);
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/Makefile linux-2.6.12-xen/drivers/xen/blkback/Makefile
---- pristine-linux-2.6.12/drivers/xen/blkback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blkback/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+
-+obj-y	:= blkback.o xenbus.o interface.o vbd.o
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/vbd.c linux-2.6.12-xen/drivers/xen/blkback/vbd.c
---- pristine-linux-2.6.12/drivers/xen/blkback/vbd.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blkback/vbd.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,102 @@
-+/******************************************************************************
-+ * blkback/vbd.c
-+ * 
-+ * Routines for managing virtual block devices (VBDs).
-+ * 
-+ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
-+ */
-+
-+#include "common.h"
-+#include <asm-xen/xenbus.h>
-+
-+#define vbd_sz(_v)   ((_v)->bdev->bd_part ?				\
-+	(_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
-+
-+unsigned long vbd_size(struct vbd *vbd)
-+{
-+	return vbd_sz(vbd);
-+}
-+
-+unsigned int vbd_info(struct vbd *vbd)
-+{
-+	return vbd->type | (vbd->readonly?VDISK_READONLY:0);
-+}
-+
-+unsigned long vbd_secsize(struct vbd *vbd)
-+{
-+	return bdev_hardsect_size(vbd->bdev);
-+}
-+
-+int vbd_create(blkif_t *blkif, blkif_vdev_t handle, unsigned major,
-+	       unsigned minor, int readonly)
-+{
-+	struct vbd *vbd;
-+
-+	vbd = &blkif->vbd;
-+	vbd->handle   = handle; 
-+	vbd->readonly = readonly;
-+	vbd->type     = 0;
-+
-+	vbd->pdevice  = MKDEV(major, minor);
-+
-+	vbd->bdev = open_by_devnum(
-+		vbd->pdevice,
-+		vbd->readonly ? FMODE_READ : FMODE_WRITE);
-+	if (IS_ERR(vbd->bdev)) {
-+		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
-+			vbd->pdevice);
-+		return -ENOENT;
-+	}
-+
-+	if (vbd->bdev->bd_disk == NULL) {
-+		DPRINTK("vbd_creat: device %08x doesn't exist.\n",
-+			vbd->pdevice);
-+		vbd_free(vbd);
-+		return -ENOENT;
-+	}
-+
-+	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD)
-+		vbd->type |= VDISK_CDROM;
-+	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
-+		vbd->type |= VDISK_REMOVABLE;
-+
-+	DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
-+		handle, blkif->domid);
-+	return 0;
-+}
-+
-+void vbd_free(struct vbd *vbd)
-+{
-+	if (vbd->bdev)
-+		blkdev_put(vbd->bdev);
-+	vbd->bdev = NULL;
-+}
-+
-+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
-+{
-+	struct vbd *vbd = &blkif->vbd;
-+	int rc = -EACCES;
-+
-+	if ((operation == WRITE) && vbd->readonly)
-+		goto out;
-+
-+	if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
-+		goto out;
-+
-+	req->dev  = vbd->pdevice;
-+	req->bdev = vbd->bdev;
-+	rc = 0;
-+
-+ out:
-+	return rc;
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blkback/xenbus.c linux-2.6.12-xen/drivers/xen/blkback/xenbus.c
---- pristine-linux-2.6.12/drivers/xen/blkback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blkback/xenbus.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,419 @@
-+/*  Xenbus code for blkif backend
-+    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
-+    Copyright (C) 2005 XenSource Ltd
-+
-+    This program is free software; you can redistribute it and/or modify
-+    it under the terms of the GNU General Public License as published by
-+    the Free Software Foundation; either version 2 of the License, or
-+    (at your option) any later version.
-+
-+    This program is distributed in the hope that it will be useful,
-+    but WITHOUT ANY WARRANTY; without even the implied warranty of
-+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+    GNU General Public License for more details.
-+
-+    You should have received a copy of the GNU General Public License
-+    along with this program; if not, write to the Free Software
-+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+*/
-+
-+
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <linux/kthread.h>
-+#include <asm-xen/xenbus.h>
-+#include "common.h"
-+
-+#undef DPRINTK
-+#define DPRINTK(fmt, args...) \
-+    pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+
-+
-+struct backend_info
-+{
-+	struct xenbus_device *dev;
-+	blkif_t *blkif;
-+	struct xenbus_watch backend_watch;
-+
-+	unsigned major;
-+	unsigned minor;
-+	char *mode;
-+};
-+
-+
-+static void maybe_connect(struct backend_info *);
-+static void connect(struct backend_info *);
-+static int connect_ring(struct backend_info *);
-+static void backend_changed(struct xenbus_watch *, const char **,
-+			    unsigned int);
-+
-+
-+void update_blkif_status(blkif_t *blkif)
-+{ 
-+	if(blkif->irq && blkif->vbd.bdev) {
-+		blkif->status = CONNECTED; 
-+		(void)blkif_be_int(0, blkif, NULL); 
-+	}
-+	maybe_connect(blkif->be); 
-+}
-+
-+
-+static ssize_t show_physical_device(struct device *_dev, char *buf)
-+{
-+	struct xenbus_device *dev = to_xenbus_device(_dev);
-+	struct backend_info *be = dev->data;
-+	return sprintf(buf, "%x:%x\n", be->major, be->minor);
-+}
-+DEVICE_ATTR(physical_device, S_IRUSR | S_IRGRP | S_IROTH,
-+	    show_physical_device, NULL);
-+
-+
-+static ssize_t show_mode(struct device *_dev, char *buf)
-+{
-+	struct xenbus_device *dev = to_xenbus_device(_dev);
-+	struct backend_info *be = dev->data;
-+	return sprintf(buf, "%s\n", be->mode);
-+}
-+DEVICE_ATTR(mode, S_IRUSR | S_IRGRP | S_IROTH, show_mode, NULL);
-+
-+
-+static int blkback_remove(struct xenbus_device *dev)
-+{
-+	struct backend_info *be = dev->data;
-+
-+	DPRINTK("");
-+
-+	if (be->backend_watch.node) {
-+		unregister_xenbus_watch(&be->backend_watch);
-+		kfree(be->backend_watch.node);
-+		be->backend_watch.node = NULL;
-+	}
-+	if (be->blkif) {
-+		be->blkif->status = DISCONNECTED; 
-+		if (be->blkif->xenblkd)
-+			kthread_stop(be->blkif->xenblkd);
-+		blkif_put(be->blkif);
-+		be->blkif = NULL;
-+	}
-+
-+	device_remove_file(&dev->dev, &dev_attr_physical_device);
-+	device_remove_file(&dev->dev, &dev_attr_mode);
-+
-+	kfree(be);
-+	dev->data = NULL;
-+	return 0;
-+}
-+
-+
-+/**
-+ * Entry point to this code when a new device is created.  Allocate the basic
-+ * structures, and watch the store waiting for the hotplug scripts to tell us
-+ * the device's physical major and minor numbers.  Switch to InitWait.
-+ */
-+static int blkback_probe(struct xenbus_device *dev,
-+			 const struct xenbus_device_id *id)
-+{
-+	int err;
-+	struct backend_info *be = kmalloc(sizeof(struct backend_info),
-+					  GFP_KERNEL);
-+	if (!be) {
-+		xenbus_dev_fatal(dev, -ENOMEM,
-+				 "allocating backend structure");
-+		return -ENOMEM;
-+	}
-+	memset(be, 0, sizeof(*be));
-+
-+	be->dev = dev;
-+	dev->data = be;
-+
-+	be->blkif = alloc_blkif(dev->otherend_id);
-+	if (IS_ERR(be->blkif)) {
-+		err = PTR_ERR(be->blkif);
-+		be->blkif = NULL;
-+		xenbus_dev_fatal(dev, err, "creating block interface");
-+		goto fail;
-+	}
-+
-+	/* setup back pointer */
-+	be->blkif->be = be; 
-+
-+	err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
-+				 &be->backend_watch, backend_changed);
-+	if (err)
-+		goto fail;
-+
-+	err = xenbus_switch_state(dev, XBT_NULL, XenbusStateInitWait);
-+	if (err)
-+		goto fail;
-+
-+	return 0;
-+
-+fail:
-+	DPRINTK("failed");
-+	blkback_remove(dev);
-+	return err;
-+}
-+
-+
-+/**
-+ * Callback received when the hotplug scripts have placed the physical-device
-+ * node.  Read it and the mode node, and create a vbd.  If the frontend is
-+ * ready, connect.
-+ */
-+static void backend_changed(struct xenbus_watch *watch,
-+			    const char **vec, unsigned int len)
-+{
-+	int err;
-+	unsigned major;
-+	unsigned minor;
-+	struct backend_info *be
-+		= container_of(watch, struct backend_info, backend_watch);
-+	struct xenbus_device *dev = be->dev;
-+
-+	DPRINTK("");
-+
-+	err = xenbus_scanf(XBT_NULL, dev->nodename, "physical-device", "%x:%x",
-+			   &major, &minor);
-+	if (XENBUS_EXIST_ERR(err)) {
-+		/* Since this watch will fire once immediately after it is
-+		   registered, we expect this.  Ignore it, and wait for the
-+		   hotplug scripts. */
-+		return;
-+	}
-+	if (err != 2) {
-+		xenbus_dev_fatal(dev, err, "reading physical-device");
-+		return;
-+	}
-+
-+	if (be->major && be->minor &&
-+	    (be->major != major || be->minor != minor)) {
-+		printk(KERN_WARNING
-+		       "blkback: changing physical device (from %x:%x to "
-+		       "%x:%x) not supported.\n", be->major, be->minor,
-+		       major, minor);
-+		return;
-+	}
-+
-+	be->mode = xenbus_read(XBT_NULL, dev->nodename, "mode", NULL);
-+	if (IS_ERR(be->mode)) {
-+		err = PTR_ERR(be->mode);
-+		be->mode = NULL;
-+		xenbus_dev_fatal(dev, err, "reading mode");
-+		return;
-+	}
-+
-+	if (be->major == 0 && be->minor == 0) {
-+		/* Front end dir is a number, which is used as the handle. */
-+
-+		char *p = strrchr(dev->otherend, '/') + 1;
-+		long handle = simple_strtoul(p, NULL, 0);
-+
-+		be->major = major;
-+		be->minor = minor;
-+
-+		err = vbd_create(be->blkif, handle, major, minor,
-+				 (NULL == strchr(be->mode, 'w')));
-+		if (err) {
-+			be->major = 0;
-+			be->minor = 0;
-+			xenbus_dev_fatal(dev, err, "creating vbd structure");
-+			return;
-+		}
-+
-+		be->blkif->xenblkd = kthread_run(blkif_schedule, be->blkif,
-+						 "xvd %d %02x:%02x",
-+						 be->blkif->domid,
-+						 be->major, be->minor);
-+		if (IS_ERR(be->blkif->xenblkd)) {
-+			err = PTR_ERR(be->blkif->xenblkd);
-+			be->blkif->xenblkd = NULL;
-+			xenbus_dev_error(dev, err, "start xenblkd");
-+			return;
-+		}
-+
-+		device_create_file(&dev->dev, &dev_attr_physical_device);
-+		device_create_file(&dev->dev, &dev_attr_mode);
-+
-+		/* We're potentially connected now */
-+		update_blkif_status(be->blkif); 
-+	}
-+}
-+
-+
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void frontend_changed(struct xenbus_device *dev,
-+			     XenbusState frontend_state)
-+{
-+	struct backend_info *be = dev->data;
-+	int err;
-+
-+	DPRINTK("");
-+
-+	switch (frontend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateConnected:
-+		break;
-+
-+	case XenbusStateInitialised:
-+		err = connect_ring(be);
-+		if (err) {
-+			return;
-+		}
-+		update_blkif_status(be->blkif); 
-+		break;
-+
-+	case XenbusStateClosing:
-+		xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
-+		break;
-+
-+	case XenbusStateClosed:
-+		device_unregister(&dev->dev);
-+		break;
-+
-+	case XenbusStateUnknown:
-+	case XenbusStateInitWait:
-+	default:
-+		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+				 frontend_state);
-+		break;
-+	}
-+}
-+
-+
-+/* ** Connection ** */
-+
-+
-+static void maybe_connect(struct backend_info *be)
-+{
-+	if ((be->major != 0 || be->minor != 0) &&
-+	    be->blkif->status == CONNECTED)
-+		connect(be);
-+}
-+
-+
-+/**
-+ * Write the physical details regarding the block device to the store, and
-+ * switch to Connected state.
-+ */
-+static void connect(struct backend_info *be)
-+{
-+	xenbus_transaction_t xbt;
-+	int err;
-+	struct xenbus_device *dev = be->dev;
-+
-+	DPRINTK("%s", dev->otherend);
-+
-+	/* Supply the information about the device the frontend needs */
-+again:
-+	err = xenbus_transaction_start(&xbt);
-+
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "starting transaction");
-+		return;
-+	}
-+
-+	err = xenbus_printf(xbt, dev->nodename, "sectors", "%lu",
-+			    vbd_size(&be->blkif->vbd));
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "writing %s/sectors",
-+				 dev->nodename);
-+		goto abort;
-+	}
-+
-+	/* FIXME: use a typename instead */
-+	err = xenbus_printf(xbt, dev->nodename, "info", "%u",
-+			    vbd_info(&be->blkif->vbd));
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "writing %s/info",
-+				 dev->nodename);
-+		goto abort;
-+	}
-+	err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
-+			    vbd_secsize(&be->blkif->vbd));
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "writing %s/sector-size",
-+				 dev->nodename);
-+		goto abort;
-+	}
-+
-+	err = xenbus_switch_state(dev, xbt, XenbusStateConnected);
-+	if (err)
-+		goto abort;
-+
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err == -EAGAIN)
-+		goto again;
-+	if (err)
-+		xenbus_dev_fatal(dev, err, "ending transaction");
-+	return;
-+ abort:
-+	xenbus_transaction_end(xbt, 1);
-+}
-+
-+
-+static int connect_ring(struct backend_info *be)
-+{
-+	struct xenbus_device *dev = be->dev;
-+	unsigned long ring_ref;
-+	unsigned int evtchn;
-+	int err;
-+
-+	DPRINTK("%s", dev->otherend);
-+
-+	err = xenbus_gather(XBT_NULL, dev->otherend, "ring-ref", "%lu", &ring_ref,
-+			    "event-channel", "%u", &evtchn, NULL);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err,
-+				 "reading %s/ring-ref and event-channel",
-+				 dev->otherend);
-+		return err;
-+	}
-+
-+	/* Map the shared frame, irq etc. */
-+	err = blkif_map(be->blkif, ring_ref, evtchn);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
-+				 ring_ref, evtchn);
-+		return err;
-+	}
-+
-+	return 0;
-+}
-+
-+
-+/* ** Driver Registration ** */
-+
-+
-+static struct xenbus_device_id blkback_ids[] = {
-+	{ "vbd" },
-+	{ "" }
-+};
-+
-+
-+static struct xenbus_driver blkback = {
-+	.name = "vbd",
-+	.owner = THIS_MODULE,
-+	.ids = blkback_ids,
-+	.probe = blkback_probe,
-+	.remove = blkback_remove,
-+	.otherend_changed = frontend_changed
-+};
-+
-+
-+void blkif_xenbus_init(void)
-+{
-+	xenbus_register_backend(&blkback);
-+}
-+
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blkfront/blkfront.c linux-2.6.12-xen/drivers/xen/blkfront/blkfront.c
---- pristine-linux-2.6.12/drivers/xen/blkfront/blkfront.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blkfront/blkfront.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,834 @@
-+/******************************************************************************
-+ * blkfront.c
-+ * 
-+ * XenLinux virtual block-device driver.
-+ * 
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004, Christian Limpach
-+ * Copyright (c) 2004, Andrew Warfield
-+ * Copyright (c) 2005, Christopher Clark
-+ * Copyright (c) 2005, XenSource Ltd
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/version.h>
-+#include "block.h"
-+#include <linux/cdrom.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <scsi/scsi.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/xenbus.h>
-+#include <asm-xen/xen-public/grant_table.h>
-+#include <asm-xen/gnttab.h>
-+#include <asm/hypervisor.h>
-+
-+#define BLKIF_STATE_DISCONNECTED 0
-+#define BLKIF_STATE_CONNECTED    1
-+#define BLKIF_STATE_SUSPENDED    2
-+
-+#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
-+    (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
-+#define GRANT_INVALID_REF	0
-+
-+static void connect(struct blkfront_info *);
-+static void blkfront_closing(struct xenbus_device *);
-+static int blkfront_remove(struct xenbus_device *);
-+static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
-+static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
-+
-+static void kick_pending_request_queues(struct blkfront_info *);
-+
-+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
-+static void blkif_restart_queue(void *arg);
-+static void blkif_recover(struct blkfront_info *);
-+static void blkif_completion(struct blk_shadow *);
-+static void blkif_free(struct blkfront_info *, int);
-+
-+
-+/**
-+ * Entry point to this code when a new device is created.  Allocate the basic
-+ * structures and the ring buffer for communication with the backend, and
-+ * inform the backend of the appropriate details for those.  Switch to
-+ * Initialised state.
-+ */
-+static int blkfront_probe(struct xenbus_device *dev,
-+			  const struct xenbus_device_id *id)
-+{
-+	int err, vdevice, i;
-+	struct blkfront_info *info;
-+
-+	/* FIXME: Use dynamic device id if this is not set. */
-+	err = xenbus_scanf(XBT_NULL, dev->nodename,
-+			   "virtual-device", "%i", &vdevice);
-+	if (err != 1) {
-+		xenbus_dev_fatal(dev, err, "reading virtual-device");
-+		return err;
-+	}
-+
-+	info = kmalloc(sizeof(*info), GFP_KERNEL);
-+	if (!info) {
-+		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
-+		return -ENOMEM;
-+	}
-+	info->xbdev = dev;
-+	info->vdevice = vdevice;
-+	info->connected = BLKIF_STATE_DISCONNECTED;
-+	info->mi = NULL;
-+	info->gd = NULL;
-+	INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
-+
-+	info->shadow_free = 0;
-+	memset(info->shadow, 0, sizeof(info->shadow));
-+	for (i = 0; i < BLK_RING_SIZE; i++)
-+		info->shadow[i].req.id = i+1;
-+	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
-+
-+	info->users = 0;
-+
-+	/* Front end dir is a number, which is used as the id. */
-+	info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
-+	dev->data = info;
-+
-+	err = talk_to_backend(dev, info);
-+	if (err) {
-+		kfree(info);
-+		dev->data = NULL;
-+		return err;
-+	}
-+
-+	return 0;
-+}
-+
-+
-+/**
-+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
-+ * driver restart.  We tear down our blkif structure and recreate it, but
-+ * leave the device-layer structures intact so that this is transparent to the
-+ * rest of the kernel.
-+ */
-+static int blkfront_resume(struct xenbus_device *dev)
-+{
-+	struct blkfront_info *info = dev->data;
-+	int err;
-+
-+	DPRINTK("blkfront_resume: %s\n", dev->nodename);
-+
-+	blkif_free(info, 1);
-+
-+	err = talk_to_backend(dev, info);
-+	if (!err)
-+		blkif_recover(info);
-+
-+	return err;
-+}
-+
-+
-+/* Common code used when first setting up, and when resuming. */
-+static int talk_to_backend(struct xenbus_device *dev,
-+			   struct blkfront_info *info)
-+{
-+	const char *message = NULL;
-+	xenbus_transaction_t xbt;
-+	int err;
-+
-+	/* Create shared ring, alloc event channel. */
-+	err = setup_blkring(dev, info);
-+	if (err)
-+		goto out;
-+
-+again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "starting transaction");
-+		goto destroy_blkring;
-+	}
-+
-+	err = xenbus_printf(xbt, dev->nodename,
-+			    "ring-ref","%u", info->ring_ref);
-+	if (err) {
-+		message = "writing ring-ref";
-+		goto abort_transaction;
-+	}
-+	err = xenbus_printf(xbt, dev->nodename,
-+			    "event-channel", "%u", info->evtchn);
-+	if (err) {
-+		message = "writing event-channel";
-+		goto abort_transaction;
-+	}
-+
-+	err = xenbus_switch_state(dev, xbt, XenbusStateInitialised);
-+	if (err) {
-+		goto abort_transaction;
-+	}
-+
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err) {
-+		if (err == -EAGAIN)
-+			goto again;
-+		xenbus_dev_fatal(dev, err, "completing transaction");
-+		goto destroy_blkring;
-+	}
-+
-+	return 0;
-+
-+ abort_transaction:
-+	xenbus_transaction_end(xbt, 1);
-+	if (message)
-+		xenbus_dev_fatal(dev, err, "%s", message);
-+ destroy_blkring:
-+	blkif_free(info, 0);
-+ out:
-+	return err;
-+}
-+
-+
-+static int setup_blkring(struct xenbus_device *dev,
-+			 struct blkfront_info *info)
-+{
-+	blkif_sring_t *sring;
-+	int err;
-+
-+	info->ring_ref = GRANT_INVALID_REF;
-+
-+	sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
-+	if (!sring) {
-+		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
-+		return -ENOMEM;
-+	}
-+	SHARED_RING_INIT(sring);
-+	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
-+
-+	err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
-+	if (err < 0) {
-+		free_page((unsigned long)sring);
-+		info->ring.sring = NULL;
-+		goto fail;
-+	}
-+	info->ring_ref = err;
-+
-+	err = xenbus_alloc_evtchn(dev, &info->evtchn);
-+	if (err)
-+		goto fail;
-+
-+	err = bind_evtchn_to_irqhandler(
-+		info->evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
-+	if (err <= 0) {
-+		xenbus_dev_fatal(dev, err,
-+				 "bind_evtchn_to_irqhandler failed");
-+		goto fail;
-+	}
-+	info->irq = err;
-+
-+	return 0;
-+fail:
-+	blkif_free(info, 0);
-+	return err;
-+}
-+
-+
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+			    XenbusState backend_state)
-+{
-+	struct blkfront_info *info = dev->data;
-+	struct block_device *bd;
-+
-+	DPRINTK("blkfront:backend_changed.\n");
-+
-+	switch (backend_state) {
-+	case XenbusStateUnknown:
-+	case XenbusStateInitialising:
-+	case XenbusStateInitWait:
-+	case XenbusStateInitialised:
-+	case XenbusStateClosed:
-+		break;
-+
-+	case XenbusStateConnected:
-+		connect(info);
-+		break;
-+
-+	case XenbusStateClosing:
-+		bd = bdget(info->dev);
-+		if (bd == NULL)
-+			xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
-+
-+		down(&bd->bd_sem);
-+		if (info->users > 0)
-+			xenbus_dev_error(dev, -EBUSY,
-+					 "Device in use; refusing to close");
-+		else
-+			blkfront_closing(dev);
-+		up(&bd->bd_sem);
-+		bdput(bd);
-+		break;
-+	}
-+}
-+
-+
-+/* ** Connection ** */
-+
-+
-+/* 
-+** Invoked when the backend is finally 'ready' (and has told produced 
-+** the details about the physical device - #sectors, size, etc). 
-+*/
-+static void connect(struct blkfront_info *info)
-+{
-+	unsigned long sectors, sector_size;
-+	unsigned int binfo;
-+	int err;
-+
-+        if( (info->connected == BLKIF_STATE_CONNECTED) || 
-+	    (info->connected == BLKIF_STATE_SUSPENDED) )
-+		return;
-+
-+	DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
-+
-+	err = xenbus_gather(XBT_NULL, info->xbdev->otherend,
-+			    "sectors", "%lu", &sectors,
-+			    "info", "%u", &binfo,
-+			    "sector-size", "%lu", &sector_size,
-+			    NULL);
-+	if (err) {
-+		xenbus_dev_fatal(info->xbdev, err,
-+				 "reading backend fields at %s",
-+				 info->xbdev->otherend);
-+		return;
-+	}
-+
-+	err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
-+	if (err) {
-+		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
-+		                 info->xbdev->otherend);
-+		return;
-+	}
-+
-+	(void)xenbus_switch_state(info->xbdev, XBT_NULL, XenbusStateConnected); 
-+
-+	/* Kick pending requests. */
-+	spin_lock_irq(&blkif_io_lock);
-+	info->connected = BLKIF_STATE_CONNECTED;
-+	kick_pending_request_queues(info);
-+	spin_unlock_irq(&blkif_io_lock);
-+
-+	add_disk(info->gd);
-+}
-+
-+/**
-+ * Handle the change of state of the backend to Closing.  We must delete our
-+ * device-layer structures now, to ensure that writes are flushed through to
-+ * the backend.  Once is this done, we can switch to Closed in
-+ * acknowledgement.
-+ */
-+static void blkfront_closing(struct xenbus_device *dev)
-+{
-+	struct blkfront_info *info = dev->data;
-+
-+	DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
-+
-+	if (info->mi) {
-+		DPRINTK("Calling xlvbd_del\n");
-+		xlvbd_del(info);
-+		info->mi = NULL;
-+	}
-+
-+	xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
-+}
-+
-+
-+static int blkfront_remove(struct xenbus_device *dev)
-+{
-+	struct blkfront_info *info = dev->data;
-+
-+	DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
-+
-+	blkif_free(info, 0);
-+
-+	kfree(info);
-+
-+	return 0;
-+}
-+
-+
-+static inline int GET_ID_FROM_FREELIST(
-+	struct blkfront_info *info)
-+{
-+	unsigned long free = info->shadow_free;
-+	BUG_ON(free > BLK_RING_SIZE);
-+	info->shadow_free = info->shadow[free].req.id;
-+	info->shadow[free].req.id = 0x0fffffee; /* debug */
-+	return free;
-+}
-+
-+static inline void ADD_ID_TO_FREELIST(
-+	struct blkfront_info *info, unsigned long id)
-+{
-+	info->shadow[id].req.id  = info->shadow_free;
-+	info->shadow[id].request = 0;
-+	info->shadow_free = id;
-+}
-+
-+static inline void flush_requests(struct blkfront_info *info)
-+{
-+	int notify;
-+
-+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
-+
-+	if (notify)
-+		notify_remote_via_irq(info->irq);
-+}
-+
-+static void kick_pending_request_queues(struct blkfront_info *info)
-+{
-+	if (!RING_FULL(&info->ring)) {
-+		/* Re-enable calldowns. */
-+		blk_start_queue(info->rq);
-+		/* Kick things off immediately. */
-+		do_blkif_request(info->rq);
-+	}
-+}
-+
-+static void blkif_restart_queue(void *arg)
-+{
-+	struct blkfront_info *info = (struct blkfront_info *)arg;
-+	spin_lock_irq(&blkif_io_lock);
-+	kick_pending_request_queues(info);
-+	spin_unlock_irq(&blkif_io_lock);
-+}
-+
-+static void blkif_restart_queue_callback(void *arg)
-+{
-+	struct blkfront_info *info = (struct blkfront_info *)arg;
-+	schedule_work(&info->work);
-+}
-+
-+int blkif_open(struct inode *inode, struct file *filep)
-+{
-+	struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
-+	info->users++;
-+	return 0;
-+}
-+
-+
-+int blkif_release(struct inode *inode, struct file *filep)
-+{
-+	struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
-+	info->users--;
-+	if (info->users == 0) {
-+		/* Check whether we have been instructed to close.  We will
-+		   have ignored this request initially, as the device was
-+		   still mounted. */
-+		struct xenbus_device * dev = info->xbdev;
-+		XenbusState state = xenbus_read_driver_state(dev->otherend);
-+
-+		if (state == XenbusStateClosing)
-+			blkfront_closing(dev);
-+	}
-+	return 0;
-+}
-+
-+
-+int blkif_ioctl(struct inode *inode, struct file *filep,
-+                unsigned command, unsigned long argument)
-+{
-+	int i;
-+
-+	DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
-+		      command, (long)argument, inode->i_rdev);
-+
-+	switch ( command )
-+	{
-+	case HDIO_GETGEO:
-+		/* return ENOSYS to use defaults */
-+		return -ENOSYS;
-+
-+	case CDROMMULTISESSION:
-+		DPRINTK("FIXME: support multisession CDs later\n");
-+		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
-+			if (put_user(0, (char __user *)(argument + i)))
-+				return -EFAULT;
-+		return 0;
-+
-+	default:
-+		/*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
-+		  command);*/
-+		return -EINVAL; /* same return as native Linux */
-+	}
-+
-+	return 0;
-+}
-+
-+
-+/*
-+ * blkif_queue_request
-+ *
-+ * request block io
-+ * 
-+ * id: for guest use only.
-+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
-+ * buffer: buffer to read/write into. this should be a
-+ *   virtual address in the guest os.
-+ */
-+static int blkif_queue_request(struct request *req)
-+{
-+	struct blkfront_info *info = req->rq_disk->private_data;
-+	unsigned long buffer_mfn;
-+	blkif_request_t *ring_req;
-+	struct bio *bio;
-+	struct bio_vec *bvec;
-+	int idx;
-+	unsigned long id;
-+	unsigned int fsect, lsect;
-+	int ref;
-+	grant_ref_t gref_head;
-+
-+	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
-+		return 1;
-+
-+	if (gnttab_alloc_grant_references(
-+		BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
-+		gnttab_request_free_callback(
-+			&info->callback,
-+			blkif_restart_queue_callback,
-+			info,
-+			BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+		return 1;
-+	}
-+
-+	/* Fill out a communications ring structure. */
-+	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
-+	id = GET_ID_FROM_FREELIST(info);
-+	info->shadow[id].request = (unsigned long)req;
-+
-+	ring_req->id = id;
-+	ring_req->operation = rq_data_dir(req) ?
-+		BLKIF_OP_WRITE : BLKIF_OP_READ;
-+	ring_req->sector_number = (blkif_sector_t)req->sector;
-+	ring_req->handle = info->handle;
-+
-+	ring_req->nr_segments = 0;
-+	rq_for_each_bio (bio, req) {
-+		bio_for_each_segment (bvec, bio, idx) {
-+			BUG_ON(ring_req->nr_segments
-+			       == BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+			buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
-+			fsect = bvec->bv_offset >> 9;
-+			lsect = fsect + (bvec->bv_len >> 9) - 1;
-+			/* install a grant reference. */
-+			ref = gnttab_claim_grant_reference(&gref_head);
-+			BUG_ON(ref == -ENOSPC);
-+
-+			gnttab_grant_foreign_access_ref(
-+				ref,
-+				info->xbdev->otherend_id,
-+				buffer_mfn,
-+				rq_data_dir(req) );
-+
-+			info->shadow[id].frame[ring_req->nr_segments] =
-+				mfn_to_pfn(buffer_mfn);
-+
-+			ring_req->seg[ring_req->nr_segments] =
-+				(struct blkif_request_segment) {
-+					.gref       = ref,
-+					.first_sect = fsect, 
-+					.last_sect  = lsect };
-+
-+			ring_req->nr_segments++;
-+		}
-+	}
-+
-+	info->ring.req_prod_pvt++;
-+
-+	/* Keep a private copy so we can reissue requests when recovering. */
-+	info->shadow[id].req = *ring_req;
-+
-+	gnttab_free_grant_references(gref_head);
-+
-+	return 0;
-+}
-+
-+/*
-+ * do_blkif_request
-+ *  read a block; request is in a request queue
-+ */
-+void do_blkif_request(request_queue_t *rq)
-+{
-+	struct blkfront_info *info = NULL;
-+	struct request *req;
-+	int queued;
-+
-+	DPRINTK("Entered do_blkif_request\n");
-+
-+	queued = 0;
-+
-+	while ((req = elv_next_request(rq)) != NULL) {
-+		info = req->rq_disk->private_data;
-+		if (!blk_fs_request(req)) {
-+			end_request(req, 0);
-+			continue;
-+		}
-+
-+		if (RING_FULL(&info->ring))
-+			goto wait;
-+
-+		DPRINTK("do_blk_req %p: cmd %p, sec %lx, "
-+			"(%u/%li) buffer:%p [%s]\n",
-+			req, req->cmd, req->sector, req->current_nr_sectors,
-+			req->nr_sectors, req->buffer,
-+			rq_data_dir(req) ? "write" : "read");
-+
-+
-+		blkdev_dequeue_request(req);
-+		if (blkif_queue_request(req)) {
-+			blk_requeue_request(rq, req);
-+		wait:
-+			/* Avoid pointless unplugs. */
-+			blk_stop_queue(rq);
-+			break;
-+		}
-+
-+		queued++;
-+	}
-+
-+	if (queued != 0)
-+		flush_requests(info);
-+}
-+
-+
-+static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-+{
-+	struct request *req;
-+	blkif_response_t *bret;
-+	RING_IDX i, rp;
-+	unsigned long flags;
-+	struct blkfront_info *info = (struct blkfront_info *)dev_id;
-+
-+	spin_lock_irqsave(&blkif_io_lock, flags);
-+
-+	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
-+		spin_unlock_irqrestore(&blkif_io_lock, flags);
-+		return IRQ_HANDLED;
-+	}
-+
-+ again:
-+	rp = info->ring.sring->rsp_prod;
-+	rmb(); /* Ensure we see queued responses up to 'rp'. */
-+
-+	for (i = info->ring.rsp_cons; i != rp; i++) {
-+		unsigned long id;
-+		int ret;
-+
-+		bret = RING_GET_RESPONSE(&info->ring, i);
-+		id   = bret->id;
-+		req  = (struct request *)info->shadow[id].request;
-+
-+		blkif_completion(&info->shadow[id]);
-+
-+		ADD_ID_TO_FREELIST(info, id);
-+
-+		switch (bret->operation) {
-+		case BLKIF_OP_READ:
-+		case BLKIF_OP_WRITE:
-+			if (unlikely(bret->status != BLKIF_RSP_OKAY))
-+				DPRINTK("Bad return from blkdev data "
-+					"request: %x\n", bret->status);
-+
-+			ret = end_that_request_first(
-+				req, (bret->status == BLKIF_RSP_OKAY),
-+				req->hard_nr_sectors);
-+			BUG_ON(ret);
-+			end_that_request_last(req);
-+			break;
-+		default:
-+			BUG();
-+		}
-+	}
-+
-+	info->ring.rsp_cons = i;
-+
-+	if (i != info->ring.req_prod_pvt) {
-+		int more_to_do;
-+		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
-+		if (more_to_do)
-+			goto again;
-+	} else {
-+		info->ring.sring->rsp_event = i + 1;
-+	}
-+
-+	kick_pending_request_queues(info);
-+
-+	spin_unlock_irqrestore(&blkif_io_lock, flags);
-+
-+	return IRQ_HANDLED;
-+}
-+
-+static void blkif_free(struct blkfront_info *info, int suspend)
-+{
-+	/* Prevent new requests being issued until we fix things up. */
-+	spin_lock_irq(&blkif_io_lock);
-+	info->connected = suspend ? 
-+		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; 
-+	spin_unlock_irq(&blkif_io_lock);
-+
-+	/* Free resources associated with old device channel. */
-+	if (info->ring_ref != GRANT_INVALID_REF) {
-+		gnttab_end_foreign_access(info->ring_ref, 0,
-+					  (unsigned long)info->ring.sring);
-+		info->ring_ref = GRANT_INVALID_REF;
-+		info->ring.sring = NULL;
-+	}
-+	if (info->irq)
-+		unbind_from_irqhandler(info->irq, info); 
-+	info->evtchn = info->irq = 0;
-+
-+}
-+
-+static void blkif_completion(struct blk_shadow *s)
-+{
-+	int i;
-+	for (i = 0; i < s->req.nr_segments; i++)
-+		gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
-+}
-+
-+static void blkif_recover(struct blkfront_info *info)
-+{
-+	int i;
-+	blkif_request_t *req;
-+	struct blk_shadow *copy;
-+	int j;
-+
-+	/* Stage 1: Make a safe copy of the shadow state. */
-+	copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
-+	BUG_ON(copy == NULL);
-+	memcpy(copy, info->shadow, sizeof(info->shadow));
-+
-+	/* Stage 2: Set up free list. */
-+	memset(&info->shadow, 0, sizeof(info->shadow));
-+	for (i = 0; i < BLK_RING_SIZE; i++)
-+		info->shadow[i].req.id = i+1;
-+	info->shadow_free = info->ring.req_prod_pvt;
-+	info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
-+
-+	/* Stage 3: Find pending requests and requeue them. */
-+	for (i = 0; i < BLK_RING_SIZE; i++) {
-+		/* Not in use? */
-+		if (copy[i].request == 0)
-+			continue;
-+
-+		/* Grab a request slot and copy shadow state into it. */
-+		req = RING_GET_REQUEST(
-+			&info->ring, info->ring.req_prod_pvt);
-+		*req = copy[i].req;
-+
-+		/* We get a new request id, and must reset the shadow state. */
-+		req->id = GET_ID_FROM_FREELIST(info);
-+		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
-+
-+		/* Rewrite any grant references invalidated by susp/resume. */
-+		for (j = 0; j < req->nr_segments; j++)
-+			gnttab_grant_foreign_access_ref(
-+				req->seg[j].gref,
-+				info->xbdev->otherend_id,
-+				pfn_to_mfn(info->shadow[req->id].frame[j]),
-+				rq_data_dir(
-+					(struct request *)
-+					info->shadow[req->id].request));
-+		info->shadow[req->id].req = *req;
-+
-+		info->ring.req_prod_pvt++;
-+	}
-+
-+	kfree(copy);
-+
-+	(void)xenbus_switch_state(info->xbdev, XBT_NULL, XenbusStateConnected); 
-+	
-+	/* Now safe for us to use the shared ring */
-+	spin_lock_irq(&blkif_io_lock);
-+        info->connected = BLKIF_STATE_CONNECTED;
-+	spin_unlock_irq(&blkif_io_lock);
-+
-+	/* Send off requeued requests */
-+	flush_requests(info);
-+
-+	/* Kick any other new requests queued since we resumed */
-+	spin_lock_irq(&blkif_io_lock);
-+	kick_pending_request_queues(info);
-+	spin_unlock_irq(&blkif_io_lock);
-+}
-+
-+
-+/* ** Driver Registration ** */
-+
-+
-+static struct xenbus_device_id blkfront_ids[] = {
-+	{ "vbd" },
-+	{ "" }
-+};
-+
-+
-+static struct xenbus_driver blkfront = {
-+	.name = "vbd",
-+	.owner = THIS_MODULE,
-+	.ids = blkfront_ids,
-+	.probe = blkfront_probe,
-+	.remove = blkfront_remove,
-+	.resume = blkfront_resume,
-+	.otherend_changed = backend_changed,
-+};
-+
-+
-+static int __init xlblk_init(void)
-+{
-+	if (xen_init() < 0)
-+		return -ENODEV;
-+
-+	return xenbus_register_frontend(&blkfront);
-+}
-+module_init(xlblk_init);
-+
-+
-+static void xlblk_exit(void)
-+{
-+	return xenbus_unregister_driver(&blkfront);
-+}
-+module_exit(xlblk_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blkfront/block.h linux-2.6.12-xen/drivers/xen/blkfront/block.h
---- pristine-linux-2.6.12/drivers/xen/blkfront/block.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blkfront/block.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,162 @@
-+/******************************************************************************
-+ * block.h
-+ * 
-+ * Shared definitions between all levels of XenLinux Virtual block devices.
-+ * 
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004-2005, Christian Limpach
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __XEN_DRIVERS_BLOCK_H__
-+#define __XEN_DRIVERS_BLOCK_H__
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/fs.h>
-+#include <linux/hdreg.h>
-+#include <linux/blkdev.h>
-+#include <linux/major.h>
-+#include <linux/devfs_fs_kernel.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/xenbus.h>
-+#include <asm-xen/gnttab.h>
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/xen-public/io/blkif.h>
-+#include <asm-xen/xen-public/io/ring.h>
-+#include <asm/io.h>
-+#include <asm/atomic.h>
-+#include <asm/uaccess.h>
-+
-+#if 1 
-+#define IPRINTK(fmt, args...) \
-+    printk(KERN_INFO "xen_blk: " fmt, ##args)
-+#else
-+#define IPRINTK(fmt, args...) ((void)0)
-+#endif
-+
-+#if 1 
-+#define WPRINTK(fmt, args...) \
-+    printk(KERN_WARNING "xen_blk: " fmt, ##args)
-+#else
-+#define WPRINTK(fmt, args...) ((void)0)
-+#endif
-+ 
-+#define DPRINTK(_f, _a...) pr_debug ( _f , ## _a )
-+
-+#if 0
-+#define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
-+#else
-+#define DPRINTK_IOCTL(_f, _a...) ((void)0)
-+#endif
-+
-+struct xlbd_type_info
-+{
-+	int partn_shift;
-+	int disks_per_major;
-+	char *devname;
-+	char *diskname;
-+};
-+
-+struct xlbd_major_info
-+{
-+	int major;
-+	int index;
-+	int usage;
-+	struct xlbd_type_info *type;
-+};
-+
-+struct blk_shadow {
-+	blkif_request_t req;
-+	unsigned long request;
-+	unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+};
-+
-+#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
-+
-+/*
-+ * We have one of these per vbd, whether ide, scsi or 'other'.  They
-+ * hang in private_data off the gendisk structure. We may end up
-+ * putting all kinds of interesting stuff here :-)
-+ */
-+struct blkfront_info
-+{
-+	struct xenbus_device *xbdev;
-+	dev_t dev;
-+ 	struct gendisk *gd;
-+	int vdevice;
-+	blkif_vdev_t handle;
-+	int connected;
-+	int ring_ref;
-+	blkif_front_ring_t ring;
-+	unsigned int evtchn, irq;
-+	struct xlbd_major_info *mi;
-+	request_queue_t *rq;
-+	struct work_struct work;
-+	struct gnttab_free_callback callback;
-+	struct blk_shadow shadow[BLK_RING_SIZE];
-+	unsigned long shadow_free;
-+
-+	/**
-+	 * The number of people holding this device open.  We won't allow a
-+	 * hot-unplug unless this is 0.
-+	 */
-+	int users;
-+};
-+
-+extern spinlock_t blkif_io_lock;
-+
-+extern int blkif_open(struct inode *inode, struct file *filep);
-+extern int blkif_release(struct inode *inode, struct file *filep);
-+extern int blkif_ioctl(struct inode *inode, struct file *filep,
-+                       unsigned command, unsigned long argument);
-+extern int blkif_check(dev_t dev);
-+extern int blkif_revalidate(dev_t dev);
-+extern void do_blkif_request (request_queue_t *rq); 
-+
-+/* Virtual block-device subsystem. */
-+/* Note that xlvbd_add doesn't call add_disk for you: you're expected
-+   to call add_disk on info->gd once the disk is properly connected
-+   up. */
-+int xlvbd_add(blkif_sector_t capacity, int device,
-+	      u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
-+void xlvbd_del(struct blkfront_info *info);
-+
-+#endif /* __XEN_DRIVERS_BLOCK_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blkfront/Makefile linux-2.6.12-xen/drivers/xen/blkfront/Makefile
---- pristine-linux-2.6.12/drivers/xen/blkfront/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blkfront/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,5 @@
-+
-+obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	:= xenblk.o
-+
-+xenblk-objs := blkfront.o vbd.o
-+
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blkfront/vbd.c linux-2.6.12-xen/drivers/xen/blkfront/vbd.c
---- pristine-linux-2.6.12/drivers/xen/blkfront/vbd.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blkfront/vbd.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,323 @@
-+/******************************************************************************
-+ * vbd.c
-+ * 
-+ * XenLinux virtual block-device driver (xvd).
-+ * 
-+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
-+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
-+ * Copyright (c) 2004-2005, Christian Limpach
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include "block.h"
-+#include <linux/blkdev.h>
-+#include <linux/list.h>
-+
-+#define BLKIF_MAJOR(dev) ((dev)>>8)
-+#define BLKIF_MINOR(dev) ((dev) & 0xff)
-+
-+/*
-+ * For convenience we distinguish between ide, scsi and 'other' (i.e.,
-+ * potentially combinations of the two) in the naming scheme and in a few other
-+ * places.
-+ */
-+
-+#define NUM_IDE_MAJORS 10
-+#define NUM_SCSI_MAJORS 9
-+#define NUM_VBD_MAJORS 1
-+
-+static struct xlbd_type_info xlbd_ide_type = {
-+	.partn_shift = 6,
-+	.disks_per_major = 2,
-+	.devname = "ide",
-+	.diskname = "hd",
-+};
-+
-+static struct xlbd_type_info xlbd_scsi_type = {
-+	.partn_shift = 4,
-+	.disks_per_major = 16,
-+	.devname = "sd",
-+	.diskname = "sd",
-+};
-+
-+static struct xlbd_type_info xlbd_vbd_type = {
-+	.partn_shift = 4,
-+	.disks_per_major = 16,
-+	.devname = "xvd",
-+	.diskname = "xvd",
-+};
-+
-+static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
-+					 NUM_VBD_MAJORS];
-+
-+#define XLBD_MAJOR_IDE_START	0
-+#define XLBD_MAJOR_SCSI_START	(NUM_IDE_MAJORS)
-+#define XLBD_MAJOR_VBD_START	(NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
-+
-+#define XLBD_MAJOR_IDE_RANGE	XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1
-+#define XLBD_MAJOR_SCSI_RANGE	XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1
-+#define XLBD_MAJOR_VBD_RANGE	XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1
-+
-+/* Information about our VBDs. */
-+#define MAX_VBDS 64
-+static LIST_HEAD(vbds_list);
-+
-+static struct block_device_operations xlvbd_block_fops =
-+{
-+	.owner = THIS_MODULE,
-+	.open = blkif_open,
-+	.release = blkif_release,
-+	.ioctl  = blkif_ioctl,
-+};
-+
-+spinlock_t blkif_io_lock = SPIN_LOCK_UNLOCKED;
-+
-+static struct xlbd_major_info *
-+xlbd_alloc_major_info(int major, int minor, int index)
-+{
-+	struct xlbd_major_info *ptr;
-+
-+	ptr = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
-+	if (ptr == NULL)
-+		return NULL;
-+
-+	memset(ptr, 0, sizeof(struct xlbd_major_info));
-+
-+	ptr->major = major;
-+
-+	switch (index) {
-+	case XLBD_MAJOR_IDE_RANGE:
-+		ptr->type = &xlbd_ide_type;
-+		ptr->index = index - XLBD_MAJOR_IDE_START;
-+		break;
-+	case XLBD_MAJOR_SCSI_RANGE:
-+		ptr->type = &xlbd_scsi_type;
-+		ptr->index = index - XLBD_MAJOR_SCSI_START;
-+		break;
-+	case XLBD_MAJOR_VBD_RANGE:
-+		ptr->type = &xlbd_vbd_type;
-+		ptr->index = index - XLBD_MAJOR_VBD_START;
-+		break;
-+	}
-+
-+	printk("Registering block device major %i\n", ptr->major);
-+	if (register_blkdev(ptr->major, ptr->type->devname)) {
-+		WPRINTK("can't get major %d with name %s\n",
-+			ptr->major, ptr->type->devname);
-+		kfree(ptr);
-+		return NULL;
-+	}
-+
-+	devfs_mk_dir(ptr->type->devname);
-+	major_info[index] = ptr;
-+	return ptr;
-+}
-+
-+static struct xlbd_major_info *
-+xlbd_get_major_info(int vdevice)
-+{
-+	struct xlbd_major_info *mi;
-+	int major, minor, index;
-+
-+	major = BLKIF_MAJOR(vdevice);
-+	minor = BLKIF_MINOR(vdevice);
-+
-+	switch (major) {
-+	case IDE0_MAJOR: index = 0; break;
-+	case IDE1_MAJOR: index = 1; break;
-+	case IDE2_MAJOR: index = 2; break;
-+	case IDE3_MAJOR: index = 3; break;
-+	case IDE4_MAJOR: index = 4; break;
-+	case IDE5_MAJOR: index = 5; break;
-+	case IDE6_MAJOR: index = 6; break;
-+	case IDE7_MAJOR: index = 7; break;
-+	case IDE8_MAJOR: index = 8; break;
-+	case IDE9_MAJOR: index = 9; break;
-+	case SCSI_DISK0_MAJOR: index = 10; break;
-+	case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
-+		index = 11 + major - SCSI_DISK1_MAJOR;
-+		break;
-+	case SCSI_CDROM_MAJOR: index = 18; break;
-+	default: index = 19; break;
-+	}
-+
-+	mi = ((major_info[index] != NULL) ? major_info[index] :
-+	      xlbd_alloc_major_info(major, minor, index));
-+	if (mi)
-+		mi->usage++;
-+	return mi;
-+}
-+
-+static void
-+xlbd_put_major_info(struct xlbd_major_info *mi)
-+{
-+	mi->usage--;
-+	/* XXX: release major if 0 */
-+}
-+
-+static int
-+xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
-+{
-+	request_queue_t *rq;
-+
-+	rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
-+	if (rq == NULL)
-+		return -1;
-+
-+	elevator_init(rq, "noop");
-+
-+	/* Hard sector size and max sectors impersonate the equiv. hardware. */
-+	blk_queue_hardsect_size(rq, sector_size);
-+	blk_queue_max_sectors(rq, 512);
-+
-+	/* Each segment in a request is up to an aligned page in size. */
-+	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
-+	blk_queue_max_segment_size(rq, PAGE_SIZE);
-+
-+	/* Ensure a merged request will fit in a single I/O ring slot. */
-+	blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+	blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-+
-+	/* Make sure buffer addresses are sector-aligned. */
-+	blk_queue_dma_alignment(rq, 511);
-+
-+	gd->queue = rq;
-+
-+	return 0;
-+}
-+
-+static int
-+xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, int vdevice,
-+		    u16 vdisk_info, u16 sector_size,
-+		    struct blkfront_info *info)
-+{
-+	struct gendisk *gd;
-+	struct xlbd_major_info *mi;
-+	int nr_minors = 1;
-+	int err = -ENODEV;
-+
-+	mi = xlbd_get_major_info(vdevice);
-+	if (mi == NULL)
-+		goto out;
-+	info->mi = mi;
-+
-+	if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
-+		nr_minors = 1 << mi->type->partn_shift;
-+
-+	gd = alloc_disk(nr_minors);
-+	if (gd == NULL)
-+		goto out;
-+
-+	if (nr_minors > 1)
-+		sprintf(gd->disk_name, "%s%c", mi->type->diskname,
-+			'a' + mi->index * mi->type->disks_per_major +
-+			(minor >> mi->type->partn_shift));
-+	else
-+		sprintf(gd->disk_name, "%s%c%d", mi->type->diskname,
-+			'a' + mi->index * mi->type->disks_per_major +
-+			(minor >> mi->type->partn_shift),
-+			minor & ((1 << mi->type->partn_shift) - 1));
-+
-+	gd->major = mi->major;
-+	gd->first_minor = minor;
-+	gd->fops = &xlvbd_block_fops;
-+	gd->private_data = info;
-+	gd->driverfs_dev = &(info->xbdev->dev);
-+	set_capacity(gd, capacity);
-+
-+	if (xlvbd_init_blk_queue(gd, sector_size)) {
-+		del_gendisk(gd);
-+		goto out;
-+	}
-+
-+	info->rq = gd->queue;
-+
-+	if (vdisk_info & VDISK_READONLY)
-+		set_disk_ro(gd, 1);
-+
-+	if (vdisk_info & VDISK_REMOVABLE)
-+		gd->flags |= GENHD_FL_REMOVABLE;
-+
-+	if (vdisk_info & VDISK_CDROM)
-+		gd->flags |= GENHD_FL_CD;
-+
-+	info->gd = gd;
-+
-+	return 0;
-+
-+ out:
-+	if (mi)
-+		xlbd_put_major_info(mi);
-+	return err;
-+}
-+
-+int
-+xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
-+	  u16 sector_size, struct blkfront_info *info)
-+{
-+	struct block_device *bd;
-+	int err = 0;
-+
-+	info->dev = MKDEV(BLKIF_MAJOR(vdevice), BLKIF_MINOR(vdevice));
-+
-+	bd = bdget(info->dev);
-+	if (bd == NULL)
-+		return -ENODEV;
-+
-+	err = xlvbd_alloc_gendisk(BLKIF_MINOR(vdevice), capacity, vdevice,
-+				  vdisk_info, sector_size, info);
-+
-+	bdput(bd);
-+	return err;
-+}
-+
-+void
-+xlvbd_del(struct blkfront_info *info)
-+{
-+	struct block_device *bd;
-+
-+	bd = bdget(info->dev);
-+	if (bd == NULL)
-+		return;
-+
-+	if (info->gd == NULL)
-+		return;
-+
-+	del_gendisk(info->gd);
-+	put_disk(info->gd);
-+	xlbd_put_major_info(info->mi);
-+	info->mi = NULL;
-+	blk_cleanup_queue(info->rq);
-+
-+	bdput(bd);
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blktap/blktap.c linux-2.6.12-xen/drivers/xen/blktap/blktap.c
---- pristine-linux-2.6.12/drivers/xen/blktap/blktap.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blktap/blktap.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,910 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/blktap/blktap.c
-+ * 
-+ * This is a modified version of the block backend driver that remaps requests
-+ * to a user-space memory region.  It is intended to be used to write 
-+ * application-level servers that provide block interfaces to client VMs.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/spinlock.h>
-+#include <asm-xen/balloon.h>
-+#include <linux/kernel.h>
-+#include <linux/fs.h>
-+#include <linux/mm.h>
-+#include <linux/miscdevice.h>
-+#include <linux/errno.h>
-+#include <linux/major.h>
-+#include <linux/gfp.h>
-+#include <linux/poll.h>
-+#include <asm/tlbflush.h>
-+#include "common.h"
-+
-+/* Only one process may open /dev/xen/blktap at any time. */
-+static unsigned long blktap_dev_inuse;
-+unsigned long blktap_ring_ok; /* make this ring->state */
-+
-+/* Rings up to user space. */
-+static blkif_front_ring_t blktap_ufe_ring;
-+
-+/* for poll: */
-+static wait_queue_head_t blktap_wait;
-+
-+/* current switching mode */
-+static unsigned long blktap_mode;
-+
-+/* local prototypes */
-+static int blktap_read_ufe_ring(void);
-+
-+
-+/* /dev/xen/blktap resides at device number major=10, minor=200        */ 
-+#define BLKTAP_MINOR 202
-+
-+/* blktap IOCTLs:                                                      */
-+#define BLKTAP_IOCTL_KICK_FE         1
-+#define BLKTAP_IOCTL_KICK_BE         2 /* currently unused */
-+#define BLKTAP_IOCTL_SETMODE         3
-+#define BLKTAP_IOCTL_PRINT_IDXS      100  
-+
-+/* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE)             */
-+#define BLKTAP_MODE_PASSTHROUGH      0x00000000  /* default            */
-+#define BLKTAP_MODE_INTERCEPT_FE     0x00000001
-+#define BLKTAP_MODE_INTERCEPT_BE     0x00000002  /* unimp. */
-+#define BLKTAP_MODE_COPY_FE          0x00000004  /* unimp. */
-+#define BLKTAP_MODE_COPY_BE          0x00000008  /* unimp. */
-+#define BLKTAP_MODE_COPY_FE_PAGES    0x00000010  /* unimp. */
-+#define BLKTAP_MODE_COPY_BE_PAGES    0x00000020  /* unimp. */
-+
-+#define BLKTAP_MODE_INTERPOSE \
-+           (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
-+
-+#define BLKTAP_MODE_COPY_BOTH \
-+           (BLKTAP_MODE_COPY_FE | BLKTAP_MODE_COPY_BE)
-+
-+#define BLKTAP_MODE_COPY_BOTH_PAGES \
-+           (BLKTAP_MODE_COPY_FE_PAGES | BLKTAP_MODE_COPY_BE_PAGES)
-+
-+static inline int BLKTAP_MODE_VALID(unsigned long arg)
-+{
-+	return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
-+		(arg == BLKTAP_MODE_INTERCEPT_FE) ||
-+		(arg == BLKTAP_MODE_INTERPOSE   ));
-+/*
-+  return (
-+  ( arg == BLKTAP_MODE_PASSTHROUGH  ) ||
-+  ( arg == BLKTAP_MODE_INTERCEPT_FE ) ||
-+  ( arg == BLKTAP_MODE_INTERCEPT_BE ) ||
-+  ( arg == BLKTAP_MODE_INTERPOSE    ) ||
-+  ( (arg & ~BLKTAP_MODE_COPY_FE_PAGES) == BLKTAP_MODE_COPY_FE ) ||
-+  ( (arg & ~BLKTAP_MODE_COPY_BE_PAGES) == BLKTAP_MODE_COPY_BE ) ||
-+  ( (arg & ~BLKTAP_MODE_COPY_BOTH_PAGES) == BLKTAP_MODE_COPY_BOTH )
-+  );
-+*/
-+}
-+
-+
-+/******************************************************************
-+ * MMAP REGION
-+ */
-+
-+/*
-+ * We use a big chunk of address space to map in-flight requests into,
-+ * and export this region up to user-space.  See the comments in blkback
-+ * about this -- the two must be kept in sync if the tap is used as a 
-+ * passthrough.
-+ */
-+
-+#define MAX_PENDING_REQS 64
-+#define BATCH_PER_DOMAIN 16
-+
-+/* immediately before the mmap area, we have a bunch of pages reserved
-+ * for shared memory rings.
-+ */
-+#define RING_PAGES 1 /* Front */ 
-+
-+/* Where things are inside the device mapping. */
-+struct vm_area_struct *blktap_vma = NULL;
-+unsigned long mmap_vstart;  /* Kernel pages for mapping in data. */
-+unsigned long rings_vstart; /* start of mmaped vma               */
-+unsigned long user_vstart;  /* start of user mappings            */
-+
-+#define MMAP_PAGES						\
-+	(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
-+#define MMAP_VADDR(_start, _req,_seg)					\
-+	(_start +							\
-+	 ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +	\
-+	 ((_seg) * PAGE_SIZE))
-+
-+/*
-+ * Each outstanding request that we've passed to the lower device layers has a 
-+ * 'pending_req' allocated to it. Each buffer_head that completes decrements 
-+ * the pendcnt towards zero. When it hits zero, the specified domain has a 
-+ * response queued for it, with the saved 'id' passed back.
-+ */
-+typedef struct {
-+	blkif_t       *blkif;
-+	unsigned long  id;
-+	int            nr_pages;
-+	atomic_t       pendcnt;
-+	unsigned short operation;
-+	int            status;
-+} pending_req_t;
-+
-+/*
-+ * We can't allocate pending_req's in order, since they may complete out of 
-+ * order. We therefore maintain an allocation ring. This ring also indicates 
-+ * when enough work has been passed down -- at that point the allocation ring 
-+ * will be empty.
-+ */
-+static pending_req_t pending_reqs[MAX_PENDING_REQS];
-+static unsigned char pending_ring[MAX_PENDING_REQS];
-+static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
-+/* NB. We use a different index type to differentiate from shared blk rings. */
-+typedef unsigned int PEND_RING_IDX;
-+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-+static PEND_RING_IDX pending_prod, pending_cons;
-+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-+
-+/* Requests passing through the tap to the backend hijack the id field
-+ * in the request message.  In it we put the AR index _AND_ the fe domid.
-+ * the domid is used by the backend to map the pages properly.
-+ */
-+
-+static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
-+{
-+	return ((fe_dom << 16) | MASK_PEND_IDX(idx));
-+}
-+
-+extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id) 
-+{ 
-+	return (PEND_RING_IDX)(id & 0x0000ffff);
-+}
-+
-+extern inline domid_t ID_TO_DOM(unsigned long id) 
-+{ 
-+	return (domid_t)(id >> 16); 
-+}
-+
-+
-+
-+/******************************************************************
-+ * GRANT HANDLES
-+ */
-+
-+/* When using grant tables to map a frame for device access then the
-+ * handle returned must be used to unmap the frame. This is needed to
-+ * drop the ref count on the frame.
-+ */
-+struct grant_handle_pair
-+{
-+	grant_handle_t kernel;
-+	grant_handle_t user;
-+};
-+static struct grant_handle_pair pending_grant_handles[MMAP_PAGES];
-+#define pending_handle(_idx, _i) \
-+    (pending_grant_handles[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
-+#define BLKTAP_INVALID_HANDLE(_g) \
-+    (((_g->kernel) == 0xFFFF) && ((_g->user) == 0xFFFF))
-+#define BLKTAP_INVALIDATE_HANDLE(_g) do {       \
-+    (_g)->kernel = 0xFFFF; (_g)->user = 0xFFFF; \
-+    } while(0)
-+
-+
-+/******************************************************************
-+ * BLKTAP VM OPS
-+ */
-+
-+static struct page *blktap_nopage(struct vm_area_struct *vma,
-+				  unsigned long address,
-+				  int *type)
-+{
-+	/*
-+	 * if the page has not been mapped in by the driver then generate
-+	 * a SIGBUS to the domain.
-+	 */
-+	force_sig(SIGBUS, current);
-+
-+	return 0;
-+}
-+
-+struct vm_operations_struct blktap_vm_ops = {
-+	.nopage = blktap_nopage,
-+};
-+
-+/******************************************************************
-+ * BLKTAP FILE OPS
-+ */
-+
-+static int blktap_open(struct inode *inode, struct file *filp)
-+{
-+	blkif_sring_t *sring;
-+
-+	if (test_and_set_bit(0, &blktap_dev_inuse))
-+		return -EBUSY;
-+    
-+	/* Allocate the fe ring. */
-+	sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
-+	if (sring == NULL)
-+		return -ENOMEM;
-+
-+	SetPageReserved(virt_to_page(sring));
-+    
-+	SHARED_RING_INIT(sring);
-+	FRONT_RING_INIT(&blktap_ufe_ring, sring, PAGE_SIZE);
-+
-+	return 0;
-+}
-+
-+static int blktap_release(struct inode *inode, struct file *filp)
-+{
-+	blktap_dev_inuse = 0;
-+	blktap_ring_ok = 0;
-+
-+	/* Free the ring page. */
-+	ClearPageReserved(virt_to_page(blktap_ufe_ring.sring));
-+	free_page((unsigned long) blktap_ufe_ring.sring);
-+
-+	/* Clear any active mappings and free foreign map table */
-+	if (blktap_vma != NULL) {
-+		zap_page_range(
-+			blktap_vma, blktap_vma->vm_start, 
-+			blktap_vma->vm_end - blktap_vma->vm_start, NULL);
-+		blktap_vma = NULL;
-+	}
-+
-+	return 0;
-+}
-+
-+
-+/* Note on mmap:
-+ * We need to map pages to user space in a way that will allow the block
-+ * subsystem set up direct IO to them.  This couldn't be done before, because
-+ * there isn't really a sane way to translate a user virtual address down to a 
-+ * physical address when the page belongs to another domain.
-+ *
-+ * My first approach was to map the page in to kernel memory, add an entry
-+ * for it in the physical frame list (using alloc_lomem_region as in blkback)
-+ * and then attempt to map that page up to user space.  This is disallowed
-+ * by xen though, which realizes that we don't really own the machine frame
-+ * underlying the physical page.
-+ *
-+ * The new approach is to provide explicit support for this in xen linux.
-+ * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
-+ * mapped from other vms.  vma->vm_private_data is set up as a mapping 
-+ * from pages to actual page structs.  There is a new clause in get_user_pages
-+ * that does the right thing for this sort of mapping.
-+ */
-+static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
-+{
-+	int size;
-+	struct page **map;
-+	int i;
-+
-+	DPRINTK(KERN_ALERT "blktap mmap (%lx, %lx)\n",
-+		vma->vm_start, vma->vm_end);
-+
-+	vma->vm_flags |= VM_RESERVED;
-+	vma->vm_ops = &blktap_vm_ops;
-+
-+	size = vma->vm_end - vma->vm_start;
-+	if (size != ((MMAP_PAGES + RING_PAGES) << PAGE_SHIFT)) {
-+		printk(KERN_INFO 
-+		       "blktap: you _must_ map exactly %d pages!\n",
-+		       MMAP_PAGES + RING_PAGES);
-+		return -EAGAIN;
-+	}
-+
-+	size >>= PAGE_SHIFT;
-+	DPRINTK(KERN_INFO "blktap: 2 rings + %d pages.\n", size-1);
-+    
-+	rings_vstart = vma->vm_start;
-+	user_vstart  = rings_vstart + (RING_PAGES << PAGE_SHIFT);
-+    
-+	/* Map the ring pages to the start of the region and reserve it. */
-+
-+	/* not sure if I really need to do this... */
-+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-+
-+	if (remap_pfn_range(vma, vma->vm_start, 
-+			    __pa(blktap_ufe_ring.sring) >> PAGE_SHIFT, 
-+			    PAGE_SIZE, vma->vm_page_prot)) {
-+		WPRINTK("Mapping user ring failed!\n");
-+		goto fail;
-+	}
-+
-+	/* Mark this VM as containing foreign pages, and set up mappings. */
-+	map = kmalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
-+		      * sizeof(struct page_struct*),
-+		      GFP_KERNEL);
-+	if (map == NULL) {
-+		WPRINTK("Couldn't alloc VM_FOREIGH map.\n");
-+		goto fail;
-+	}
-+
-+	for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
-+		map[i] = NULL;
-+    
-+	vma->vm_private_data = map;
-+	vma->vm_flags |= VM_FOREIGN;
-+
-+	blktap_vma = vma;
-+	blktap_ring_ok = 1;
-+
-+	return 0;
-+ fail:
-+	/* Clear any active mappings. */
-+	zap_page_range(vma, vma->vm_start, 
-+		       vma->vm_end - vma->vm_start, NULL);
-+
-+	return -ENOMEM;
-+}
-+
-+static int blktap_ioctl(struct inode *inode, struct file *filp,
-+                        unsigned int cmd, unsigned long arg)
-+{
-+	switch(cmd) {
-+	case BLKTAP_IOCTL_KICK_FE: /* There are fe messages to process. */
-+		return blktap_read_ufe_ring();
-+
-+	case BLKTAP_IOCTL_SETMODE:
-+		if (BLKTAP_MODE_VALID(arg)) {
-+			blktap_mode = arg;
-+			/* XXX: may need to flush rings here. */
-+			printk(KERN_INFO "blktap: set mode to %lx\n", arg);
-+			return 0;
-+		}
-+	case BLKTAP_IOCTL_PRINT_IDXS:
-+        {
-+		//print_fe_ring_idxs();
-+		WPRINTK("User Rings: \n-----------\n");
-+		WPRINTK("UF: rsp_cons: %2d, req_prod_prv: %2d "
-+			"| req_prod: %2d, rsp_prod: %2d\n",
-+			blktap_ufe_ring.rsp_cons,
-+			blktap_ufe_ring.req_prod_pvt,
-+			blktap_ufe_ring.sring->req_prod,
-+			blktap_ufe_ring.sring->rsp_prod);
-+            
-+        }
-+	}
-+	return -ENOIOCTLCMD;
-+}
-+
-+static unsigned int blktap_poll(struct file *file, poll_table *wait)
-+{
-+	poll_wait(file, &blktap_wait, wait);
-+	if (blktap_ufe_ring.req_prod_pvt != blktap_ufe_ring.sring->req_prod) {
-+		flush_tlb_all();
-+		RING_PUSH_REQUESTS(&blktap_ufe_ring);
-+		return POLLIN | POLLRDNORM;
-+	}
-+
-+	return 0;
-+}
-+
-+void blktap_kick_user(void)
-+{
-+	/* blktap_ring->req_prod = blktap_req_prod; */
-+	wake_up_interruptible(&blktap_wait);
-+}
-+
-+static struct file_operations blktap_fops = {
-+	.owner   = THIS_MODULE,
-+	.poll    = blktap_poll,
-+	.ioctl   = blktap_ioctl,
-+	.open    = blktap_open,
-+	.release = blktap_release,
-+	.mmap    = blktap_mmap,
-+};
-+
-+
-+
-+static int do_block_io_op(blkif_t *blkif, int max_to_do);
-+static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req);
-+static void make_response(blkif_t *blkif, unsigned long id, 
-+                          unsigned short op, int st);
-+
-+
-+static void fast_flush_area(int idx, int nr_pages)
-+{
-+	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
-+	unsigned int i, op = 0;
-+	struct grant_handle_pair *handle;
-+	uint64_t ptep;
-+	int ret;
-+
-+	for ( i = 0; i < nr_pages; i++)
-+	{
-+		handle = &pending_handle(idx, i);
-+		if (BLKTAP_INVALID_HANDLE(handle))
-+			continue;
-+
-+		unmap[op].host_addr = MMAP_VADDR(mmap_vstart, idx, i);
-+		unmap[op].dev_bus_addr = 0;
-+		unmap[op].handle = handle->kernel;
-+		op++;
-+
-+		if (create_lookup_pte_addr(
-+			    blktap_vma->vm_mm,
-+			    MMAP_VADDR(user_vstart, idx, i), 
-+			    &ptep) !=0) {
-+			DPRINTK("Couldn't get a pte addr!\n");
-+			return;
-+		}
-+		unmap[op].host_addr    = ptep;
-+		unmap[op].dev_bus_addr = 0;
-+		unmap[op].handle       = handle->user;
-+		op++;
-+            
-+		BLKTAP_INVALIDATE_HANDLE(handle);
-+	}
-+
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, op);
-+	BUG_ON(ret);
-+
-+	if (blktap_vma != NULL)
-+		zap_page_range(blktap_vma, 
-+			       MMAP_VADDR(user_vstart, idx, 0), 
-+			       nr_pages << PAGE_SHIFT, NULL);
-+}
-+
-+/******************************************************************
-+ * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE
-+ */
-+
-+static struct list_head blkio_schedule_list;
-+static spinlock_t blkio_schedule_list_lock;
-+
-+static int __on_blkdev_list(blkif_t *blkif)
-+{
-+	return blkif->blkdev_list.next != NULL;
-+}
-+
-+static void remove_from_blkdev_list(blkif_t *blkif)
-+{
-+	unsigned long flags;
-+
-+	if (!__on_blkdev_list(blkif))
-+		return;
-+
-+	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
-+	if (__on_blkdev_list(blkif)) {
-+		list_del(&blkif->blkdev_list);
-+		blkif->blkdev_list.next = NULL;
-+		blkif_put(blkif);
-+	}
-+	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
-+}
-+
-+static void add_to_blkdev_list_tail(blkif_t *blkif)
-+{
-+	unsigned long flags;
-+
-+	if (__on_blkdev_list(blkif))
-+		return;
-+
-+	spin_lock_irqsave(&blkio_schedule_list_lock, flags);
-+	if (!__on_blkdev_list(blkif) && (blkif->status == CONNECTED)) {
-+		list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
-+		blkif_get(blkif);
-+	}
-+	spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
-+}
-+
-+
-+/******************************************************************
-+ * SCHEDULER FUNCTIONS
-+ */
-+
-+static DECLARE_WAIT_QUEUE_HEAD(blkio_schedule_wait);
-+
-+static int blkio_schedule(void *arg)
-+{
-+	DECLARE_WAITQUEUE(wq, current);
-+
-+	blkif_t          *blkif;
-+	struct list_head *ent;
-+
-+	daemonize("xenblkd");
-+
-+	for (;;) {
-+		/* Wait for work to do. */
-+		add_wait_queue(&blkio_schedule_wait, &wq);
-+		set_current_state(TASK_INTERRUPTIBLE);
-+		if ((NR_PENDING_REQS == MAX_PENDING_REQS) || 
-+		    list_empty(&blkio_schedule_list))
-+			schedule();
-+		__set_current_state(TASK_RUNNING);
-+		remove_wait_queue(&blkio_schedule_wait, &wq);
-+
-+		/* Queue up a batch of requests. */
-+		while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
-+		       !list_empty(&blkio_schedule_list)) {
-+			ent = blkio_schedule_list.next;
-+			blkif = list_entry(ent, blkif_t, blkdev_list);
-+			blkif_get(blkif);
-+			remove_from_blkdev_list(blkif);
-+			if (do_block_io_op(blkif, BATCH_PER_DOMAIN))
-+				add_to_blkdev_list_tail(blkif);
-+			blkif_put(blkif);
-+		}
-+	}
-+}
-+
-+static void maybe_trigger_blkio_schedule(void)
-+{
-+	/*
-+	 * Needed so that two processes, who together make the following
-+	 * predicate true, don't both read stale values and evaluate the
-+	 * predicate incorrectly. Incredibly unlikely to stall the scheduler
-+	 * on the x86, but...
-+	 */
-+	smp_mb();
-+
-+	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-+	    !list_empty(&blkio_schedule_list))
-+		wake_up(&blkio_schedule_wait);
-+}
-+
-+
-+
-+/******************************************************************
-+ * COMPLETION CALLBACK -- Called as bh->b_end_io()
-+ */
-+
-+
-+static int blktap_read_ufe_ring(void)
-+{
-+	/* This is called to read responses from the UFE ring. */
-+
-+	RING_IDX i, j, rp;
-+	blkif_response_t *resp;
-+	blkif_t *blkif;
-+	int pending_idx;
-+	pending_req_t *pending_req;
-+	unsigned long     flags;
-+
-+	/* if we are forwarding from UFERring to FERing */
-+	if (blktap_mode & BLKTAP_MODE_INTERCEPT_FE) {
-+
-+		/* for each outstanding message on the UFEring  */
-+		rp = blktap_ufe_ring.sring->rsp_prod;
-+		rmb();
-+        
-+		for (i = blktap_ufe_ring.rsp_cons; i != rp; i++) {
-+			resp = RING_GET_RESPONSE(&blktap_ufe_ring, i);
-+			pending_idx = MASK_PEND_IDX(ID_TO_IDX(resp->id));
-+			pending_req = &pending_reqs[pending_idx];
-+            
-+			blkif = pending_req->blkif;
-+			for (j = 0; j < pending_req->nr_pages; j++) {
-+				unsigned long vaddr;
-+				struct page **map = blktap_vma->vm_private_data;
-+				int offset; 
-+
-+				vaddr  = MMAP_VADDR(user_vstart, pending_idx, j);
-+				offset = (vaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
-+
-+				//ClearPageReserved(virt_to_page(vaddr));
-+				ClearPageReserved((struct page *)map[offset]);
-+				map[offset] = NULL;
-+			}
-+
-+			fast_flush_area(pending_idx, pending_req->nr_pages);
-+			make_response(blkif, pending_req->id, resp->operation, 
-+				      resp->status);
-+			blkif_put(pending_req->blkif);
-+			spin_lock_irqsave(&pend_prod_lock, flags);
-+			pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+			spin_unlock_irqrestore(&pend_prod_lock, flags);
-+		}
-+		blktap_ufe_ring.rsp_cons = i;
-+		maybe_trigger_blkio_schedule();
-+	}
-+	return 0;
-+}
-+
-+
-+/******************************************************************************
-+ * NOTIFICATION FROM GUEST OS.
-+ */
-+
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+	blkif_t *blkif = dev_id;
-+	add_to_blkdev_list_tail(blkif);
-+	maybe_trigger_blkio_schedule();
-+	return IRQ_HANDLED;
-+}
-+
-+
-+
-+/******************************************************************
-+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
-+ */
-+
-+static int do_block_io_op(blkif_t *blkif, int max_to_do)
-+{
-+	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-+	blkif_request_t *req;
-+	RING_IDX i, rp;
-+	int more_to_do = 0;
-+    
-+	rp = blk_ring->sring->req_prod;
-+	rmb(); /* Ensure we see queued requests up to 'rp'. */
-+
-+	for (i = blk_ring->req_cons; 
-+	     (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
-+	     i++ ) {
-+		if ((max_to_do-- == 0) ||
-+		    (NR_PENDING_REQS == MAX_PENDING_REQS)) {
-+			more_to_do = 1;
-+			break;
-+		}
-+        
-+		req = RING_GET_REQUEST(blk_ring, i);
-+		switch (req->operation) {
-+		case BLKIF_OP_READ:
-+		case BLKIF_OP_WRITE:
-+			dispatch_rw_block_io(blkif, req);
-+			break;
-+
-+		default:
-+			DPRINTK("error: unknown block io operation [%d]\n",
-+				req->operation);
-+			make_response(blkif, req->id, req->operation,
-+				      BLKIF_RSP_ERROR);
-+			break;
-+		}
-+	}
-+
-+	blk_ring->req_cons = i;
-+	blktap_kick_user();
-+
-+	return more_to_do;
-+}
-+
-+static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
-+{
-+	blkif_request_t *target;
-+	int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-+	pending_req_t *pending_req;
-+	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
-+	int op, ret;
-+	unsigned int nseg;
-+	int retval;
-+
-+	/* Check that number of segments is sane. */
-+	nseg = req->nr_segments;
-+	if (unlikely(nseg == 0) || 
-+	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
-+		DPRINTK("Bad number of segments in request (%d)\n", nseg);
-+		goto bad_descriptor;
-+	}
-+
-+	/* Make sure userspace is ready. */
-+	if (!blktap_ring_ok) {
-+		DPRINTK("blktap: ring not ready for requests!\n");
-+		goto bad_descriptor;
-+	}
-+    
-+
-+	if (RING_FULL(&blktap_ufe_ring)) {
-+		WPRINTK("blktap: fe_ring is full, can't add "
-+			"(very broken!).\n");
-+		goto bad_descriptor;
-+	}
-+
-+	flush_cache_all(); /* a noop on intel... */
-+
-+	/* Map the foreign pages directly in to the application */    
-+	op = 0;
-+	for (i = 0; i < req->nr_segments; i++) {
-+
-+		unsigned long uvaddr;
-+		unsigned long kvaddr;
-+		uint64_t ptep;
-+
-+		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i);
-+		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
-+
-+		/* Map the remote page to kernel. */
-+		map[op].host_addr = kvaddr;
-+		map[op].dom   = blkif->domid;
-+		map[op].ref   = req->seg[i].gref;
-+		map[op].flags = GNTMAP_host_map;
-+		/* This needs a bit more thought in terms of interposition: 
-+		 * If we want to be able to modify pages during write using 
-+		 * grant table mappings, the guest will either need to allow 
-+		 * it, or we'll need to incur a copy. Bit of an fbufs moment. ;) */
-+		if (req->operation == BLKIF_OP_WRITE)
-+			map[op].flags |= GNTMAP_readonly;
-+		op++;
-+
-+		/* Now map it to user. */
-+		ret = create_lookup_pte_addr(blktap_vma->vm_mm, uvaddr, &ptep);
-+		if (ret) {
-+			DPRINTK("Couldn't get a pte addr!\n");
-+			fast_flush_area(pending_idx, req->nr_segments);
-+			goto bad_descriptor;
-+		}
-+
-+		map[op].host_addr = ptep;
-+		map[op].dom       = blkif->domid;
-+		map[op].ref       = req->seg[i].gref;
-+		map[op].flags     = GNTMAP_host_map | GNTMAP_application_map
-+			| GNTMAP_contains_pte;
-+		/* Above interposition comment applies here as well. */
-+		if (req->operation == BLKIF_OP_WRITE)
-+			map[op].flags |= GNTMAP_readonly;
-+		op++;
-+	}
-+
-+	retval = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
-+	BUG_ON(retval);
-+
-+	op = 0;
-+	for (i = 0; i < (req->nr_segments*2); i += 2) {
-+		unsigned long uvaddr;
-+		unsigned long kvaddr;
-+		unsigned long offset;
-+		int cancel = 0;
-+
-+		uvaddr = MMAP_VADDR(user_vstart, pending_idx, i/2);
-+		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i/2);
-+
-+		if (unlikely(map[i].status)) {
-+			DPRINTK("Error on kernel grant mapping (%d)\n",
-+				map[i].status);
-+			ret = map[i].status;
-+			cancel = 1;
-+		}
-+
-+		if (unlikely(map[i+1].status)) {
-+			DPRINTK("Error on user grant mapping (%d)\n",
-+				map[i+1].status);
-+			ret = map[i+1].status;
-+			cancel = 1;
-+		}
-+
-+		if (cancel) {
-+			fast_flush_area(pending_idx, req->nr_segments);
-+			goto bad_descriptor;
-+		}
-+
-+		/* Set the necessary mappings in p2m and in the VM_FOREIGN 
-+		 * vm_area_struct to allow user vaddr -> struct page lookups
-+		 * to work.  This is needed for direct IO to foreign pages. */
-+		set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
-+				FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
-+
-+		offset = (uvaddr - blktap_vma->vm_start) >> PAGE_SHIFT;
-+		((struct page **)blktap_vma->vm_private_data)[offset] =
-+			pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
-+
-+		/* Save handles for unmapping later. */
-+		pending_handle(pending_idx, i/2).kernel = map[i].handle;
-+		pending_handle(pending_idx, i/2).user   = map[i+1].handle;
-+	}
-+
-+	/* Mark mapped pages as reserved: */
-+	for (i = 0; i < req->nr_segments; i++) {
-+		unsigned long kvaddr;
-+		kvaddr = MMAP_VADDR(mmap_vstart, pending_idx, i);
-+		SetPageReserved(pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT));
-+	}
-+
-+	pending_req = &pending_reqs[pending_idx];
-+	pending_req->blkif     = blkif;
-+	pending_req->id        = req->id;
-+	pending_req->operation = req->operation;
-+	pending_req->status    = BLKIF_RSP_OKAY;
-+	pending_req->nr_pages  = nseg;
-+	req->id = MAKE_ID(blkif->domid, pending_idx);
-+	//atomic_set(&pending_req->pendcnt, nbio);
-+	pending_cons++;
-+	blkif_get(blkif);
-+
-+	/* Finally, write the request message to the user ring. */
-+	target = RING_GET_REQUEST(&blktap_ufe_ring,
-+				  blktap_ufe_ring.req_prod_pvt);
-+	memcpy(target, req, sizeof(*req));
-+	blktap_ufe_ring.req_prod_pvt++;
-+	return;
-+
-+ bad_descriptor:
-+	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
-+} 
-+
-+
-+
-+/******************************************************************
-+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
-+ */
-+
-+
-+static void make_response(blkif_t *blkif, unsigned long id, 
-+                          unsigned short op, int st)
-+{
-+	blkif_response_t *resp;
-+	unsigned long     flags;
-+	blkif_back_ring_t *blk_ring = &blkif->blk_ring;
-+
-+	/* Place on the response ring for the relevant domain. */ 
-+	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-+	resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
-+	resp->id        = id;
-+	resp->operation = op;
-+	resp->status    = st;
-+	wmb(); /* Ensure other side can see the response fields. */
-+	blk_ring->rsp_prod_pvt++;
-+	RING_PUSH_RESPONSES(blk_ring);
-+	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-+
-+	/* Kick the relevant domain. */
-+	notify_remote_via_irq(blkif->irq);
-+}
-+
-+static struct miscdevice blktap_miscdev = {
-+	.minor        = BLKTAP_MINOR,
-+	.name         = "blktap",
-+	.fops         = &blktap_fops,
-+	.devfs_name   = "misc/blktap",
-+};
-+
-+void blkif_deschedule(blkif_t *blkif)
-+{
-+	remove_from_blkdev_list(blkif);
-+}
-+
-+static int __init blkif_init(void)
-+{
-+	int i, j, err;
-+	struct page *page;
-+
-+	blkif_interface_init();
-+
-+	page = balloon_alloc_empty_page_range(MMAP_PAGES);
-+	BUG_ON(page == NULL);
-+	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-+
-+	pending_cons = 0;
-+	pending_prod = MAX_PENDING_REQS;
-+	memset(pending_reqs, 0, sizeof(pending_reqs));
-+	for ( i = 0; i < MAX_PENDING_REQS; i++ )
-+		pending_ring[i] = i;
-+    
-+	spin_lock_init(&blkio_schedule_list_lock);
-+	INIT_LIST_HEAD(&blkio_schedule_list);
-+
-+	i = kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES);
-+	BUG_ON(i<0);
-+
-+	blkif_xenbus_init();
-+
-+	for (i = 0; i < MAX_PENDING_REQS ; i++)
-+		for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
-+			BLKTAP_INVALIDATE_HANDLE(&pending_handle(i, j));
-+
-+	err = misc_register(&blktap_miscdev);
-+	if (err != 0) {
-+		printk(KERN_ALERT "Couldn't register /dev/misc/blktap (%d)\n",
-+		       err);
-+		return err;
-+	}
-+
-+	init_waitqueue_head(&blktap_wait);
-+
-+	return 0;
-+}
-+
-+__initcall(blkif_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blktap/common.h linux-2.6.12-xen/drivers/xen/blktap/common.h
---- pristine-linux-2.6.12/drivers/xen/blktap/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blktap/common.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,110 @@
-+
-+#ifndef __BLKIF__BACKEND__COMMON_H__
-+#define __BLKIF__BACKEND__COMMON_H__
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/blkdev.h>
-+#include <linux/vmalloc.h>
-+#include <asm/io.h>
-+#include <asm/setup.h>
-+#include <asm/pgalloc.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/xen-public/io/blkif.h>
-+#include <asm-xen/xen-public/io/ring.h>
-+#include <asm-xen/gnttab.h>
-+#include <asm-xen/driver_util.h>
-+
-+#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
-+                                    __FILE__ , __LINE__ , ## _a )
-+
-+#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
-+
-+struct vbd {
-+	blkif_vdev_t   handle;      /* what the domain refers to this vbd as */
-+	unsigned char  readonly;    /* Non-zero -> read-only */
-+	unsigned char  type;        /* VDISK_xxx */
-+	u32            pdevice;     /* phys device that this vbd maps to */
-+	struct block_device *bdev;
-+}; 
-+
-+typedef struct blkif_st {
-+	/* Unique identifier for this interface. */
-+	domid_t           domid;
-+	unsigned int      handle;
-+	/* Physical parameters of the comms window. */
-+	unsigned int      evtchn;
-+	unsigned int      irq;
-+	/* Comms information. */
-+	blkif_back_ring_t blk_ring;
-+	struct vm_struct *blk_ring_area;
-+	/* VBDs attached to this interface. */
-+	struct vbd        vbd;
-+	/* Private fields. */
-+	enum { DISCONNECTED, CONNECTED } status;
-+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
-+	/* Is this a blktap frontend */
-+	unsigned int     is_blktap;
-+#endif
-+	struct list_head blkdev_list;
-+	spinlock_t       blk_ring_lock;
-+	atomic_t         refcnt;
-+
-+	struct work_struct free_work;
-+
-+	grant_handle_t   shmem_handle;
-+	grant_ref_t      shmem_ref;
-+} blkif_t;
-+
-+blkif_t *alloc_blkif(domid_t domid);
-+void free_blkif_callback(blkif_t *blkif);
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
-+
-+#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define blkif_put(_b)                             \
-+    do {                                          \
-+        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-+            free_blkif_callback(_b);		  \
-+    } while (0)
-+
-+/* Create a vbd. */
-+int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, u32 pdevice,
-+	       int readonly);
-+void vbd_free(struct vbd *vbd);
-+
-+unsigned long vbd_size(struct vbd *vbd);
-+unsigned int vbd_info(struct vbd *vbd);
-+unsigned long vbd_secsize(struct vbd *vbd);
-+
-+struct phys_req {
-+	unsigned short       dev;
-+	unsigned short       nr_sects;
-+	struct block_device *bdev;
-+	blkif_sector_t       sector_number;
-+};
-+
-+int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation); 
-+
-+void blkif_interface_init(void);
-+
-+void blkif_deschedule(blkif_t *blkif);
-+
-+void blkif_xenbus_init(void);
-+
-+irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+
-+#endif /* __BLKIF__BACKEND__COMMON_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blktap/interface.c linux-2.6.12-xen/drivers/xen/blktap/interface.c
---- pristine-linux-2.6.12/drivers/xen/blktap/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blktap/interface.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,146 @@
-+/******************************************************************************
-+ * arch/xen/drivers/blkif/backend/interface.c
-+ * 
-+ * Block-device interface management.
-+ * 
-+ * Copyright (c) 2004, Keir Fraser
-+ */
-+
-+#include "common.h"
-+#include <asm-xen/evtchn.h>
-+
-+static kmem_cache_t *blkif_cachep;
-+
-+blkif_t *alloc_blkif(domid_t domid)
-+{
-+	blkif_t *blkif;
-+
-+	blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
-+	if (!blkif)
-+		return ERR_PTR(-ENOMEM);
-+
-+	memset(blkif, 0, sizeof(*blkif));
-+	blkif->domid = domid;
-+	blkif->status = DISCONNECTED;
-+	spin_lock_init(&blkif->blk_ring_lock);
-+	atomic_set(&blkif->refcnt, 1);
-+
-+	return blkif;
-+}
-+
-+static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
-+{
-+	struct gnttab_map_grant_ref op;
-+	int ret;
-+
-+	op.host_addr = (unsigned long)blkif->blk_ring_area->addr;
-+	op.flags     = GNTMAP_host_map;
-+	op.ref       = shared_page;
-+	op.dom       = blkif->domid;
-+
-+	lock_vm_area(blkif->blk_ring_area);
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
-+	unlock_vm_area(blkif->blk_ring_area);
-+	BUG_ON(ret);
-+
-+	if (op.status) {
-+		DPRINTK(" Grant table operation failure !\n");
-+		return op.status;
-+	}
-+
-+	blkif->shmem_ref    = shared_page;
-+	blkif->shmem_handle = op.handle;
-+
-+	return 0;
-+}
-+
-+static void unmap_frontend_page(blkif_t *blkif)
-+{
-+	struct gnttab_unmap_grant_ref op;
-+	int ret;
-+
-+	op.host_addr    = (unsigned long)blkif->blk_ring_area->addr;
-+	op.handle       = blkif->shmem_handle;
-+	op.dev_bus_addr = 0;
-+
-+	lock_vm_area(blkif->blk_ring_area);
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
-+	unlock_vm_area(blkif->blk_ring_area);
-+	BUG_ON(ret);
-+}
-+
-+int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
-+{
-+	blkif_sring_t *sring;
-+	int err;
-+	evtchn_op_t op = {
-+		.cmd = EVTCHNOP_bind_interdomain,
-+		.u.bind_interdomain.remote_dom  = blkif->domid,
-+		.u.bind_interdomain.remote_port = evtchn };
-+
-+	if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
-+		return -ENOMEM;
-+
-+	err = map_frontend_page(blkif, shared_page);
-+	if (err) {
-+		free_vm_area(blkif->blk_ring_area);
-+		return err;
-+	}
-+
-+	err = HYPERVISOR_event_channel_op(&op);
-+	if (err) {
-+		unmap_frontend_page(blkif);
-+		free_vm_area(blkif->blk_ring_area);
-+		return err;
-+	}
-+
-+	blkif->evtchn = op.u.bind_interdomain.local_port;
-+
-+	sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-+	BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
-+
-+	blkif->irq = bind_evtchn_to_irqhandler(
-+		blkif->evtchn, blkif_be_int, 0, "blkif-backend", blkif);
-+
-+	blkif->status = CONNECTED;
-+
-+	return 0;
-+}
-+
-+static void free_blkif(void *arg)
-+{
-+	blkif_t *blkif = (blkif_t *)arg;
-+
-+	if (blkif->irq)
-+		unbind_from_irqhandler(blkif->irq, blkif);
-+
-+	if (blkif->blk_ring.sring) {
-+		unmap_frontend_page(blkif);
-+		free_vm_area(blkif->blk_ring_area);
-+		blkif->blk_ring.sring = NULL;
-+	}
-+
-+	kmem_cache_free(blkif_cachep, blkif);
-+}
-+
-+void free_blkif_callback(blkif_t *blkif)
-+{
-+	INIT_WORK(&blkif->free_work, free_blkif, (void *)blkif);
-+	schedule_work(&blkif->free_work);
-+}
-+
-+void __init blkif_interface_init(void)
-+{
-+	blkif_cachep = kmem_cache_create(
-+		"blkif_cache", sizeof(blkif_t), 0, 0, NULL, NULL);
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blktap/Makefile linux-2.6.12-xen/drivers/xen/blktap/Makefile
---- pristine-linux-2.6.12/drivers/xen/blktap/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blktap/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,3 @@
-+
-+obj-y	:= xenbus.o interface.o blktap.o 
-+
-diff -Nurp pristine-linux-2.6.12/drivers/xen/blktap/xenbus.c linux-2.6.12-xen/drivers/xen/blktap/xenbus.c
---- pristine-linux-2.6.12/drivers/xen/blktap/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/blktap/xenbus.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,234 @@
-+/*  Xenbus code for blkif tap
-+
-+    A Warfield.
-+
-+    Hastily modified from the oroginal backend code:
-+
-+    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
-+
-+    This program is free software; you can redistribute it and/or modify
-+    it under the terms of the GNU General Public License as published by
-+    the Free Software Foundation; either version 2 of the License, or
-+    (at your option) any later version.
-+
-+    This program is distributed in the hope that it will be useful,
-+    but WITHOUT ANY WARRANTY; without even the implied warranty of
-+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+    GNU General Public License for more details.
-+
-+    You should have received a copy of the GNU General Public License
-+    along with this program; if not, write to the Free Software
-+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+*/
-+
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <asm-xen/xenbus.h>
-+#include "common.h"
-+
-+struct backend_info
-+{
-+	struct xenbus_device *dev;
-+
-+	/* our communications channel */
-+	blkif_t *blkif;
-+
-+	long int frontend_id;
-+
-+	/* watch back end for changes */
-+	struct xenbus_watch backend_watch;
-+
-+	/* watch front end for changes */
-+	struct xenbus_watch watch;
-+	char *frontpath;
-+};
-+
-+static int blkback_remove(struct xenbus_device *dev)
-+{
-+	struct backend_info *be = dev->data;
-+
-+	if (be->watch.node)
-+		unregister_xenbus_watch(&be->watch);
-+	unregister_xenbus_watch(&be->backend_watch);
-+	if (be->blkif)
-+		blkif_put(be->blkif);
-+	kfree(be->frontpath);
-+	kfree(be);
-+	return 0;
-+}
-+
-+/* Front end tells us frame. */
-+static void frontend_changed(struct xenbus_watch *watch,
-+			     const char **vec, unsigned int len)
-+{
-+	unsigned long ring_ref;
-+	unsigned int evtchn;
-+	int err;
-+	struct backend_info *be
-+		= container_of(watch, struct backend_info, watch);
-+
-+	/* If other end is gone, delete ourself. */
-+	if (vec && !xenbus_exists(be->frontpath, "")) {
-+		xenbus_rm(be->dev->nodename, "");
-+		device_unregister(&be->dev->dev);
-+		return;
-+	}
-+	if (be->blkif == NULL || be->blkif->status == CONNECTED)
-+		return;
-+
-+	err = xenbus_gather(be->frontpath, "ring-ref", "%lu", &ring_ref,
-+			    "event-channel", "%u", &evtchn, NULL);
-+	if (err) {
-+		xenbus_dev_error(be->dev, err,
-+				 "reading %s/ring-ref and event-channel",
-+				 be->frontpath);
-+		return;
-+	}
-+
-+	/* Map the shared frame, irq etc. */
-+	err = blkif_map(be->blkif, ring_ref, evtchn);
-+	if (err) {
-+		xenbus_dev_error(be->dev, err, "mapping ring-ref %lu port %u",
-+				 ring_ref, evtchn);
-+		goto abort;
-+	}
-+
-+	xenbus_dev_ok(be->dev);
-+
-+	return;
-+
-+abort:
-+	xenbus_transaction_end(1);
-+}
-+
-+/* 
-+   Setup supplies physical device.  
-+   We provide event channel and device details to front end.
-+   Frontend supplies shared frame and event channel.
-+ */
-+static void backend_changed(struct xenbus_watch *watch,
-+			    const char **vec, unsigned int len)
-+{
-+	int err;
-+	char *p;
-+	long int handle;
-+	struct backend_info *be
-+		= container_of(watch, struct backend_info, backend_watch);
-+	struct xenbus_device *dev = be->dev;
-+
-+	if (be->blkif == NULL) {
-+		/* Front end dir is a number, which is used as the handle. */
-+		p = strrchr(be->frontpath, '/') + 1;
-+		handle = simple_strtoul(p, NULL, 0);
-+
-+		be->blkif = alloc_blkif(be->frontend_id);
-+		if (IS_ERR(be->blkif)) {
-+			err = PTR_ERR(be->blkif);
-+			be->blkif = NULL;
-+			xenbus_dev_error(dev, err, "creating block interface");
-+			return;
-+		}
-+
-+		/* Pass in NULL node to skip exist test. */
-+		frontend_changed(&be->watch, NULL, 0);
-+	}
-+}
-+
-+static int blkback_probe(struct xenbus_device *dev,
-+			 const struct xenbus_device_id *id)
-+{
-+	struct backend_info *be;
-+	char *frontend;
-+	int err;
-+
-+	be = kmalloc(sizeof(*be), GFP_KERNEL);
-+	if (!be) {
-+		xenbus_dev_error(dev, -ENOMEM, "allocating backend structure");
-+		return -ENOMEM;
-+	}
-+	memset(be, 0, sizeof(*be));
-+
-+	frontend = NULL;
-+	err = xenbus_gather(dev->nodename,
-+			    "frontend-id", "%li", &be->frontend_id,
-+			    "frontend", NULL, &frontend,
-+			    NULL);
-+	if (XENBUS_EXIST_ERR(err))
-+		goto free_be;
-+	if (err < 0) {
-+		xenbus_dev_error(dev, err,
-+				 "reading %s/frontend or frontend-id",
-+				 dev->nodename);
-+		goto free_be;
-+	}
-+	if (strlen(frontend) == 0 || !xenbus_exists(frontend, "")) {
-+		/* If we can't get a frontend path and a frontend-id,
-+		 * then our bus-id is no longer valid and we need to
-+		 * destroy the backend device.
-+		 */
-+		err = -ENOENT;
-+		goto free_be;
-+	}
-+
-+	be->dev = dev;
-+	be->backend_watch.node = dev->nodename;
-+	be->backend_watch.callback = backend_changed;
-+	/* Registration implicitly fires backend_changed once */
-+	err = register_xenbus_watch(&be->backend_watch);
-+	if (err) {
-+		be->backend_watch.node = NULL;
-+		xenbus_dev_error(dev, err, "adding backend watch on %s",
-+				 dev->nodename);
-+		goto free_be;
-+	}
-+
-+	be->frontpath = frontend;
-+	be->watch.node = be->frontpath;
-+	be->watch.callback = frontend_changed;
-+	err = register_xenbus_watch(&be->watch);
-+	if (err) {
-+		be->watch.node = NULL;
-+		xenbus_dev_error(dev, err,
-+				 "adding frontend watch on %s",
-+				 be->frontpath);
-+		goto free_be;
-+	}
-+
-+	dev->data = be;
-+	return 0;
-+
-+ free_be:
-+	if (be->backend_watch.node)
-+		unregister_xenbus_watch(&be->backend_watch);
-+	kfree(frontend);
-+	kfree(be);
-+	return err;
-+}
-+
-+static struct xenbus_device_id blkback_ids[] = {
-+	{ "vbd" },
-+	{ "" }
-+};
-+
-+static struct xenbus_driver blkback = {
-+	.name = "vbd",
-+	.owner = THIS_MODULE,
-+	.ids = blkback_ids,
-+	.probe = blkback_probe,
-+	.remove = blkback_remove,
-+};
-+
-+void blkif_xenbus_init(void)
-+{
-+	xenbus_register_backend(&blkback);
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/char/Makefile linux-2.6.12-xen/drivers/xen/char/Makefile
---- pristine-linux-2.6.12/drivers/xen/char/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/char/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+
-+obj-y	:= mem.o
-diff -Nurp pristine-linux-2.6.12/drivers/xen/char/mem.c linux-2.6.12-xen/drivers/xen/char/mem.c
---- pristine-linux-2.6.12/drivers/xen/char/mem.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/char/mem.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,157 @@
-+/*
-+ *  Originally from linux/drivers/char/mem.c
-+ *
-+ *  Copyright (C) 1991, 1992  Linus Torvalds
-+ *
-+ *  Added devfs support. 
-+ *    Jan-11-1998, C. Scott Ananian <cananian at alumni.princeton.edu>
-+ *  Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj at sgi.com>
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/mm.h>
-+#include <linux/miscdevice.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/mman.h>
-+#include <linux/random.h>
-+#include <linux/init.h>
-+#include <linux/raw.h>
-+#include <linux/tty.h>
-+#include <linux/capability.h>
-+#include <linux/smp_lock.h>
-+#include <linux/devfs_fs_kernel.h>
-+#include <linux/ptrace.h>
-+#include <linux/device.h>
-+#include <asm/pgalloc.h>
-+#include <asm/uaccess.h>
-+#include <asm/io.h>
-+#include <asm/hypervisor.h>
-+
-+static inline int uncached_access(struct file *file)
-+{
-+        if (file->f_flags & O_SYNC)
-+                return 1;
-+        /* Xen sets correct MTRR type on non-RAM for us. */
-+        return 0;
-+}
-+
-+/*
-+ * This funcion reads the *physical* memory. The f_pos points directly to the 
-+ * memory location. 
-+ */
-+static ssize_t read_mem(struct file * file, char __user * buf,
-+			size_t count, loff_t *ppos)
-+{
-+	unsigned long i, p = *ppos;
-+	ssize_t read = -EFAULT;
-+	void __iomem *v;
-+
-+	if ((v = ioremap(p, count)) == NULL) {
-+		/*
-+		 * Some programs (e.g., dmidecode) groove off into weird RAM
-+		 * areas where no table scan possibly exist (because Xen will
-+		 * have stomped on them!). These programs get rather upset if
-+                 * we let them know that Xen failed their access, so we fake
-+                 * out a read of all zeroes. :-)
-+		 */
-+		for (i = 0; i < count; i++)
-+			if (put_user(0, buf+i))
-+				return -EFAULT;
-+		return count;
-+	}
-+	if (copy_to_user(buf, v, count))
-+		goto out;
-+
-+	read = count;
-+	*ppos += read;
-+out:
-+	iounmap(v);
-+	return read;
-+}
-+
-+static ssize_t write_mem(struct file * file, const char __user * buf, 
-+			 size_t count, loff_t *ppos)
-+{
-+	unsigned long p = *ppos;
-+	ssize_t written = -EFAULT;
-+	void __iomem *v;
-+
-+	if ((v = ioremap(p, count)) == NULL)
-+		return -EFAULT;
-+	if (copy_from_user(v, buf, count))
-+		goto out;
-+
-+	written = count;
-+	*ppos += written;
-+out:
-+	iounmap(v);
-+	return written;
-+}
-+
-+static int mmap_mem(struct file * file, struct vm_area_struct * vma)
-+{
-+	if (uncached_access(file))
-+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-+
-+	if (direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-+				   vma->vm_end - vma->vm_start,
-+				   vma->vm_page_prot, DOMID_IO))
-+		return -EAGAIN;
-+
-+	return 0;
-+}
-+
-+/*
-+ * The memory devices use the full 32/64 bits of the offset, and so we cannot
-+ * check against negative addresses: they are ok. The return value is weird,
-+ * though, in that case (0).
-+ *
-+ * also note that seeking relative to the "end of file" isn't supported:
-+ * it has no meaning, so it returns -EINVAL.
-+ */
-+static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
-+{
-+	loff_t ret;
-+
-+	down(&file->f_dentry->d_inode->i_sem);
-+	switch (orig) {
-+		case 0:
-+			file->f_pos = offset;
-+			ret = file->f_pos;
-+			force_successful_syscall_return();
-+			break;
-+		case 1:
-+			file->f_pos += offset;
-+			ret = file->f_pos;
-+			force_successful_syscall_return();
-+			break;
-+		default:
-+			ret = -EINVAL;
-+	}
-+	up(&file->f_dentry->d_inode->i_sem);
-+	return ret;
-+}
-+
-+static int open_mem(struct inode * inode, struct file * filp)
-+{
-+	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-+}
-+
-+struct file_operations mem_fops = {
-+	.llseek		= memory_lseek,
-+	.read		= read_mem,
-+	.write		= write_mem,
-+	.mmap		= mmap_mem,
-+	.open		= open_mem,
-+};
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/console/console.c linux-2.6.12-xen/drivers/xen/console/console.c
---- pristine-linux-2.6.12/drivers/xen/console/console.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/console/console.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,688 @@
-+/******************************************************************************
-+ * console.c
-+ * 
-+ * Virtual console driver.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser.
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <linux/tty.h>
-+#include <linux/tty_flip.h>
-+#include <linux/serial.h>
-+#include <linux/major.h>
-+#include <linux/ptrace.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+#include <linux/init.h>
-+#include <linux/console.h>
-+#include <linux/bootmem.h>
-+#include <linux/sysrq.h>
-+#include <asm/io.h>
-+#include <asm/irq.h>
-+#include <asm/uaccess.h>
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/xen-public/event_channel.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/xencons.h>
-+
-+/*
-+ * Modes:
-+ *  'xencons=off'  [XC_OFF]:     Console is disabled.
-+ *  'xencons=tty'  [XC_TTY]:     Console attached to '/dev/tty[0-9]+'.
-+ *  'xencons=ttyS' [XC_SERIAL]:  Console attached to '/dev/ttyS[0-9]+'.
-+ *                 [XC_DEFAULT]: DOM0 -> XC_SERIAL ; all others -> XC_TTY.
-+ * 
-+ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
-+ * warnings from standard distro startup scripts.
-+ */
-+static enum { XC_OFF, XC_DEFAULT, XC_TTY, XC_SERIAL } xc_mode = XC_DEFAULT;
-+static int xc_num = -1;
-+
-+#ifdef CONFIG_MAGIC_SYSRQ
-+static unsigned long sysrq_requested;
-+extern int sysrq_enabled;
-+#endif
-+
-+static int __init xencons_setup(char *str)
-+{
-+	char *q;
-+	int n;
-+
-+	if (!strncmp(str, "ttyS", 4))
-+		xc_mode = XC_SERIAL;
-+	else if (!strncmp(str, "tty", 3))
-+		xc_mode = XC_TTY;
-+	else if (!strncmp(str, "off", 3))
-+		xc_mode = XC_OFF;
-+
-+	switch ( xc_mode )
-+	{
-+	case XC_SERIAL:
-+		n = simple_strtol(str+4, &q, 10);
-+		if (q > (str + 4))
-+			xc_num = n;
-+		break;
-+	case XC_TTY:
-+		n = simple_strtol(str+3, &q, 10);
-+		if (q > (str + 3))
-+			xc_num = n;
-+		break;
-+	default:
-+		break;
-+	}
-+
-+	return 1;
-+}
-+__setup("xencons=", xencons_setup);
-+
-+/* The kernel and user-land drivers share a common transmit buffer. */
-+static unsigned int wbuf_size = 4096;
-+#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
-+static char *wbuf;
-+static unsigned int wc, wp; /* write_cons, write_prod */
-+
-+static int __init xencons_bufsz_setup(char *str)
-+{
-+	unsigned int goal;
-+	goal = simple_strtoul(str, NULL, 0);
-+	while (wbuf_size < goal)
-+		wbuf_size <<= 1;
-+	return 1;
-+}
-+__setup("xencons_bufsz=", xencons_bufsz_setup);
-+
-+/* This lock protects accesses to the common transmit buffer. */
-+static spinlock_t xencons_lock = SPIN_LOCK_UNLOCKED;
-+
-+/* Common transmit-kick routine. */
-+static void __xencons_tx_flush(void);
-+
-+static struct tty_driver *xencons_driver;
-+
-+/******************** Kernel console driver ********************************/
-+
-+static void kcons_write(
-+	struct console *c, const char *s, unsigned int count)
-+{
-+	int           i = 0;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&xencons_lock, flags);
-+
-+	while (i < count) {
-+		for (; i < count; i++) {
-+			if ((wp - wc) >= (wbuf_size - 1))
-+				break;
-+			if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
-+				wbuf[WBUF_MASK(wp++)] = '\r';
-+		}
-+
-+		__xencons_tx_flush();
-+	}
-+
-+	spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void kcons_write_dom0(
-+	struct console *c, const char *s, unsigned int count)
-+{
-+	int rc;
-+
-+	while ((count > 0) &&
-+	       ((rc = HYPERVISOR_console_io(
-+			CONSOLEIO_write, count, (char *)s)) > 0)) {
-+		count -= rc;
-+		s += rc;
-+	}
-+}
-+
-+static struct tty_driver *kcons_device(struct console *c, int *index)
-+{
-+	*index = 0;
-+	return xencons_driver;
-+}
-+
-+static struct console kcons_info = {
-+	.device	= kcons_device,
-+	.flags	= CON_PRINTBUFFER,
-+	.index	= -1,
-+};
-+
-+#define __RETCODE 0
-+static int __init xen_console_init(void)
-+{
-+	if (xen_init() < 0)
-+		return __RETCODE;
-+
-+	if (xen_start_info->flags & SIF_INITDOMAIN) {
-+		if (xc_mode == XC_DEFAULT)
-+			xc_mode = XC_SERIAL;
-+		kcons_info.write = kcons_write_dom0;
-+		if (xc_mode == XC_SERIAL)
-+			kcons_info.flags |= CON_ENABLED;
-+	} else {
-+		if (xc_mode == XC_DEFAULT)
-+			xc_mode = XC_TTY;
-+		kcons_info.write = kcons_write;
-+	}
-+
-+	switch (xc_mode) {
-+	case XC_SERIAL:
-+		strcpy(kcons_info.name, "ttyS");
-+		if (xc_num == -1)
-+			xc_num = 0;
-+		break;
-+
-+	case XC_TTY:
-+		strcpy(kcons_info.name, "tty");
-+		if (xc_num == -1)
-+			xc_num = 1;
-+		break;
-+
-+	default:
-+		return __RETCODE;
-+	}
-+
-+	wbuf = alloc_bootmem(wbuf_size);
-+
-+	register_console(&kcons_info);
-+
-+	return __RETCODE;
-+}
-+console_initcall(xen_console_init);
-+
-+/*** Useful function for console debugging -- goes straight to Xen. ***/
-+asmlinkage int xprintk(const char *fmt, ...)
-+{
-+	va_list args;
-+	int printk_len;
-+	static char printk_buf[1024];
-+    
-+	/* Emit the output into the temporary buffer */
-+	va_start(args, fmt);
-+	printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
-+	va_end(args);
-+
-+	/* Send the processed output directly to Xen. */
-+	kcons_write_dom0(NULL, printk_buf, printk_len);
-+
-+	return 0;
-+}
-+
-+/*** Forcibly flush console data before dying. ***/
-+void xencons_force_flush(void)
-+{
-+	int sz;
-+
-+	/* Emergency console is synchronous, so there's nothing to flush. */
-+	if (xen_start_info->flags & SIF_INITDOMAIN)
-+		return;
-+
-+	/* Spin until console data is flushed through to the daemon. */
-+	while (wc != wp) {
-+		int sent = 0;
-+		if ((sz = wp - wc) == 0)
-+			continue;
-+		sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
-+		if (sent > 0)
-+			wc += sent;
-+	}
-+}
-+
-+
-+/******************** User-space console driver (/dev/console) ************/
-+
-+#define DRV(_d)         (_d)
-+#define TTY_INDEX(_tty) ((_tty)->index)
-+
-+static struct termios *xencons_termios[MAX_NR_CONSOLES];
-+static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
-+static struct tty_struct *xencons_tty;
-+static int xencons_priv_irq;
-+static char x_char;
-+
-+void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
-+{
-+	int           i;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	if (xencons_tty == NULL)
-+		goto out;
-+
-+	for (i = 0; i < len; i++) {
-+#ifdef CONFIG_MAGIC_SYSRQ
-+		if (sysrq_enabled) {
-+			if (buf[i] == '\x0f') { /* ^O */
-+				sysrq_requested = jiffies;
-+				continue; /* don't print the sysrq key */
-+			} else if (sysrq_requested) {
-+				unsigned long sysrq_timeout =
-+					sysrq_requested + HZ*2;
-+				sysrq_requested = 0;
-+				if (time_before(jiffies, sysrq_timeout)) {
-+					spin_unlock_irqrestore(
-+						&xencons_lock, flags);
-+					handle_sysrq(
-+						buf[i], regs, xencons_tty);
-+					spin_lock_irqsave(
-+						&xencons_lock, flags);
-+					continue;
-+				}
-+			}
-+		}
-+#endif
-+		tty_insert_flip_char(xencons_tty, buf[i], 0);
-+	}
-+	tty_flip_buffer_push(xencons_tty);
-+
-+ out:
-+	spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void __xencons_tx_flush(void)
-+{
-+	int sent, sz, work_done = 0;
-+
-+	if (x_char) {
-+		if (xen_start_info->flags & SIF_INITDOMAIN)
-+			kcons_write_dom0(NULL, &x_char, 1);
-+		else
-+			while (x_char)
-+				if (xencons_ring_send(&x_char, 1) == 1)
-+					break;
-+		x_char = 0;
-+		work_done = 1;
-+	}
-+
-+	while (wc != wp) {
-+		sz = wp - wc;
-+		if (sz > (wbuf_size - WBUF_MASK(wc)))
-+			sz = wbuf_size - WBUF_MASK(wc);
-+		if (xen_start_info->flags & SIF_INITDOMAIN) {
-+			kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
-+			wc += sz;
-+		} else {
-+			sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
-+			if (sent == 0)
-+				break;
-+			wc += sent;
-+		}
-+		work_done = 1;
-+	}
-+
-+	if (work_done && (xencons_tty != NULL)) {
-+		wake_up_interruptible(&xencons_tty->write_wait);
-+		if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
-+		    (xencons_tty->ldisc.write_wakeup != NULL))
-+			(xencons_tty->ldisc.write_wakeup)(xencons_tty);
-+	}
-+}
-+
-+void xencons_tx(void)
-+{
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	__xencons_tx_flush();
-+	spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+/* Privileged receive callback and transmit kicker. */
-+static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
-+                                          struct pt_regs *regs)
-+{
-+	static char rbuf[16];
-+	int         l;
-+
-+	while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
-+		xencons_rx(rbuf, l, regs);
-+
-+	xencons_tx();
-+
-+	return IRQ_HANDLED;
-+}
-+
-+static int xencons_write_room(struct tty_struct *tty)
-+{
-+	return wbuf_size - (wp - wc);
-+}
-+
-+static int xencons_chars_in_buffer(struct tty_struct *tty)
-+{
-+	return wp - wc;
-+}
-+
-+static void xencons_send_xchar(struct tty_struct *tty, char ch)
-+{
-+	unsigned long flags;
-+
-+	if (TTY_INDEX(tty) != 0)
-+		return;
-+
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	x_char = ch;
-+	__xencons_tx_flush();
-+	spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void xencons_throttle(struct tty_struct *tty)
-+{
-+	if (TTY_INDEX(tty) != 0)
-+		return;
-+
-+	if (I_IXOFF(tty))
-+		xencons_send_xchar(tty, STOP_CHAR(tty));
-+}
-+
-+static void xencons_unthrottle(struct tty_struct *tty)
-+{
-+	if (TTY_INDEX(tty) != 0)
-+		return;
-+
-+	if (I_IXOFF(tty)) {
-+		if (x_char != 0)
-+			x_char = 0;
-+		else
-+			xencons_send_xchar(tty, START_CHAR(tty));
-+	}
-+}
-+
-+static void xencons_flush_buffer(struct tty_struct *tty)
-+{
-+	unsigned long flags;
-+
-+	if (TTY_INDEX(tty) != 0)
-+		return;
-+
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	wc = wp = 0;
-+	spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static inline int __xencons_put_char(int ch)
-+{
-+	char _ch = (char)ch;
-+	if ((wp - wc) == wbuf_size)
-+		return 0;
-+	wbuf[WBUF_MASK(wp++)] = _ch;
-+	return 1;
-+}
-+
-+static int xencons_write(
-+	struct tty_struct *tty,
-+	const unsigned char *buf,
-+	int count)
-+{
-+	int i;
-+	unsigned long flags;
-+
-+	if (TTY_INDEX(tty) != 0)
-+		return count;
-+
-+	spin_lock_irqsave(&xencons_lock, flags);
-+
-+	for (i = 0; i < count; i++)
-+		if (!__xencons_put_char(buf[i]))
-+			break;
-+
-+	if (i != 0)
-+		__xencons_tx_flush();
-+
-+	spin_unlock_irqrestore(&xencons_lock, flags);
-+
-+	return i;
-+}
-+
-+static void xencons_put_char(struct tty_struct *tty, u_char ch)
-+{
-+	unsigned long flags;
-+
-+	if (TTY_INDEX(tty) != 0)
-+		return;
-+
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	(void)__xencons_put_char(ch);
-+	spin_unlock_irqrestore(&xencons_lock, flags);
-+}
-+
-+static void xencons_flush_chars(struct tty_struct *tty)
-+{
-+	unsigned long flags;
-+
-+	if (TTY_INDEX(tty) != 0)
-+		return;
-+
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	__xencons_tx_flush();
-+	spin_unlock_irqrestore(&xencons_lock, flags);    
-+}
-+
-+static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
-+{
-+	unsigned long orig_jiffies = jiffies;
-+
-+	if (TTY_INDEX(tty) != 0)
-+		return;
-+
-+	while (DRV(tty->driver)->chars_in_buffer(tty))
-+	{
-+		set_current_state(TASK_INTERRUPTIBLE);
-+		schedule_timeout(1);
-+		if (signal_pending(current))
-+			break;
-+		if ( (timeout != 0) &&
-+		     time_after(jiffies, orig_jiffies + timeout) )
-+			break;
-+	}
-+    
-+	set_current_state(TASK_RUNNING);
-+}
-+
-+static int xencons_open(struct tty_struct *tty, struct file *filp)
-+{
-+	unsigned long flags;
-+
-+	if (TTY_INDEX(tty) != 0)
-+		return 0;
-+
-+	spin_lock_irqsave(&xencons_lock, flags);
-+	tty->driver_data = NULL;
-+	if (xencons_tty == NULL)
-+		xencons_tty = tty;
-+	__xencons_tx_flush();
-+	spin_unlock_irqrestore(&xencons_lock, flags);    
-+
-+	return 0;
-+}
-+
-+static void xencons_close(struct tty_struct *tty, struct file *filp)
-+{
-+	unsigned long flags;
-+
-+	if (TTY_INDEX(tty) != 0)
-+		return;
-+
-+	if (tty->count == 1) {
-+		tty->closing = 1;
-+		tty_wait_until_sent(tty, 0);
-+		if (DRV(tty->driver)->flush_buffer != NULL)
-+			DRV(tty->driver)->flush_buffer(tty);
-+		if (tty->ldisc.flush_buffer != NULL)
-+			tty->ldisc.flush_buffer(tty);
-+		tty->closing = 0;
-+		spin_lock_irqsave(&xencons_lock, flags);
-+		xencons_tty = NULL;
-+		spin_unlock_irqrestore(&xencons_lock, flags);    
-+	}
-+}
-+
-+static struct tty_operations xencons_ops = {
-+	.open = xencons_open,
-+	.close = xencons_close,
-+	.write = xencons_write,
-+	.write_room = xencons_write_room,
-+	.put_char = xencons_put_char,
-+	.flush_chars = xencons_flush_chars,
-+	.chars_in_buffer = xencons_chars_in_buffer,
-+	.send_xchar = xencons_send_xchar,
-+	.flush_buffer = xencons_flush_buffer,
-+	.throttle = xencons_throttle,
-+	.unthrottle = xencons_unthrottle,
-+	.wait_until_sent = xencons_wait_until_sent,
-+};
-+
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+static const char *xennullcon_startup(void)
-+{
-+	return NULL;
-+}
-+
-+static int xennullcon_dummy(void)
-+{
-+	return 0;
-+}
-+
-+#define DUMMY (void *)xennullcon_dummy
-+
-+/*
-+ *  The console `switch' structure for the dummy console
-+ *
-+ *  Most of the operations are dummies.
-+ */
-+
-+const struct consw xennull_con = {
-+	.owner =		THIS_MODULE,
-+	.con_startup =	xennullcon_startup,
-+	.con_init =		DUMMY,
-+	.con_deinit =	DUMMY,
-+	.con_clear =	DUMMY,
-+	.con_putc =		DUMMY,
-+	.con_putcs =	DUMMY,
-+	.con_cursor =	DUMMY,
-+	.con_scroll =	DUMMY,
-+	.con_bmove =	DUMMY,
-+	.con_switch =	DUMMY,
-+	.con_blank =	DUMMY,
-+	.con_font_set =	DUMMY,
-+	.con_font_get =	DUMMY,
-+	.con_font_default =	DUMMY,
-+	.con_font_copy =	DUMMY,
-+	.con_set_palette =	DUMMY,
-+	.con_scrolldelta =	DUMMY,
-+};
-+#endif
-+
-+static int __init xencons_init(void)
-+{
-+	int rc;
-+
-+	if (xen_init() < 0)
-+		return -ENODEV;
-+
-+	if (xc_mode == XC_OFF)
-+		return 0;
-+
-+	xencons_ring_init();
-+
-+	xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 
-+					  1 : MAX_NR_CONSOLES);
-+	if (xencons_driver == NULL)
-+		return -ENOMEM;
-+
-+	DRV(xencons_driver)->name            = "xencons";
-+	DRV(xencons_driver)->major           = TTY_MAJOR;
-+	DRV(xencons_driver)->type            = TTY_DRIVER_TYPE_SERIAL;
-+	DRV(xencons_driver)->subtype         = SERIAL_TYPE_NORMAL;
-+	DRV(xencons_driver)->init_termios    = tty_std_termios;
-+	DRV(xencons_driver)->flags           = 
-+		TTY_DRIVER_REAL_RAW |
-+		TTY_DRIVER_RESET_TERMIOS |
-+		TTY_DRIVER_NO_DEVFS;
-+	DRV(xencons_driver)->termios         = xencons_termios;
-+	DRV(xencons_driver)->termios_locked  = xencons_termios_locked;
-+
-+	if (xc_mode == XC_SERIAL)
-+	{
-+		DRV(xencons_driver)->name        = "ttyS";
-+		DRV(xencons_driver)->minor_start = 64 + xc_num;
-+		DRV(xencons_driver)->name_base   = 0 + xc_num;
-+	} else {
-+		DRV(xencons_driver)->name        = "tty";
-+		DRV(xencons_driver)->minor_start = xc_num;
-+		DRV(xencons_driver)->name_base   = xc_num;
-+	}
-+
-+	tty_set_operations(xencons_driver, &xencons_ops);
-+
-+	if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
-+		printk("WARNING: Failed to register Xen virtual "
-+		       "console driver as '%s%d'\n",
-+		       DRV(xencons_driver)->name,
-+		       DRV(xencons_driver)->name_base);
-+		put_tty_driver(xencons_driver);
-+		xencons_driver = NULL;
-+		return rc;
-+	}
-+
-+	tty_register_device(xencons_driver, 0, NULL);
-+
-+	if (xen_start_info->flags & SIF_INITDOMAIN) {
-+		xencons_priv_irq = bind_virq_to_irqhandler(
-+			VIRQ_CONSOLE,
-+			0,
-+			xencons_priv_interrupt,
-+			0,
-+			"console",
-+			NULL);
-+		BUG_ON(xencons_priv_irq < 0);
-+	}
-+
-+	printk("Xen virtual console successfully installed as %s%d\n",
-+	       DRV(xencons_driver)->name,
-+	       DRV(xencons_driver)->name_base );
-+    
-+	return 0;
-+}
-+
-+module_init(xencons_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/console/Makefile linux-2.6.12-xen/drivers/xen/console/Makefile
---- pristine-linux-2.6.12/drivers/xen/console/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/console/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+
-+obj-y	:= console.o xencons_ring.o
-diff -Nurp pristine-linux-2.6.12/drivers/xen/console/xencons_ring.c linux-2.6.12-xen/drivers/xen/console/xencons_ring.c
---- pristine-linux-2.6.12/drivers/xen/console/xencons_ring.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/console/xencons_ring.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,125 @@
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/errno.h>
-+#include <linux/signal.h>
-+#include <linux/sched.h>
-+#include <linux/interrupt.h>
-+#include <linux/tty.h>
-+#include <linux/tty_flip.h>
-+#include <linux/serial.h>
-+#include <linux/major.h>
-+#include <linux/ptrace.h>
-+#include <linux/ioport.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+
-+#include <asm/hypervisor.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/xencons.h>
-+#include <linux/wait.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/err.h>
-+#include <asm-xen/xen-public/io/console.h>
-+
-+static int xencons_irq;
-+
-+static inline struct xencons_interface *xencons_interface(void)
-+{
-+	return mfn_to_virt(xen_start_info->console_mfn);
-+}
-+
-+static inline void notify_daemon(void)
-+{
-+	/* Use evtchn: this is called early, before irq is set up. */
-+	notify_remote_via_evtchn(xen_start_info->console_evtchn);
-+}
-+
-+int xencons_ring_send(const char *data, unsigned len)
-+{
-+	int sent = 0;
-+	struct xencons_interface *intf = xencons_interface();
-+	XENCONS_RING_IDX cons, prod;
-+
-+	cons = intf->out_cons;
-+	prod = intf->out_prod;
-+	mb();
-+	BUG_ON((prod - cons) > sizeof(intf->out));
-+
-+	while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
-+		intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
-+
-+	wmb();
-+	intf->out_prod = prod;
-+
-+	notify_daemon();
-+
-+	return sent;
-+}	
-+
-+static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
-+{
-+	struct xencons_interface *intf = xencons_interface();
-+	XENCONS_RING_IDX cons, prod;
-+
-+	cons = intf->in_cons;
-+	prod = intf->in_prod;
-+	mb();
-+	BUG_ON((prod - cons) > sizeof(intf->in));
-+
-+	while (cons != prod) {
-+		xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1, regs);
-+		cons++;
-+	}
-+
-+	mb();
-+	intf->in_cons = cons;
-+
-+	notify_daemon();
-+
-+	xencons_tx();
-+
-+	return IRQ_HANDLED;
-+}
-+
-+int xencons_ring_init(void)
-+{
-+	int err;
-+
-+	if (xencons_irq)
-+		unbind_from_irqhandler(xencons_irq, NULL);
-+	xencons_irq = 0;
-+
-+	if (!xen_start_info->console_evtchn)
-+		return 0;
-+
-+	err = bind_evtchn_to_irqhandler(
-+		xen_start_info->console_evtchn,
-+		handle_input, 0, "xencons", NULL);
-+	if (err <= 0) {
-+		printk(KERN_ERR "XEN console request irq failed %i\n", err);
-+		return err;
-+	}
-+
-+	xencons_irq = err;
-+
-+	/* In case we have in-flight data after save/restore... */
-+	notify_daemon();
-+
-+	return 0;
-+}
-+
-+void xencons_resume(void)
-+{
-+	(void)xencons_ring_init();
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/evtchn/evtchn.c linux-2.6.12-xen/drivers/xen/evtchn/evtchn.c
---- pristine-linux-2.6.12/drivers/xen/evtchn/evtchn.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/evtchn/evtchn.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,459 @@
-+/******************************************************************************
-+ * evtchn.c
-+ * 
-+ * Driver for receiving and demuxing event-channel signals.
-+ * 
-+ * Copyright (c) 2004-2005, K A Fraser
-+ * Multi-process extensions Copyright (c) 2004, Steven Smith
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/fs.h>
-+#include <linux/errno.h>
-+#include <linux/miscdevice.h>
-+#include <linux/major.h>
-+#include <linux/proc_fs.h>
-+#include <linux/stat.h>
-+#include <linux/poll.h>
-+#include <linux/irq.h>
-+#include <linux/init.h>
-+#include <linux/gfp.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/linux-public/evtchn.h>
-+
-+struct per_user_data {
-+	/* Notification ring, accessed via /dev/xen/evtchn. */
-+#define EVTCHN_RING_SIZE     (PAGE_SIZE / sizeof(evtchn_port_t))
-+#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
-+	evtchn_port_t *ring;
-+	unsigned int ring_cons, ring_prod, ring_overflow;
-+
-+	/* Processes wait on this queue when ring is empty. */
-+	wait_queue_head_t evtchn_wait;
-+	struct fasync_struct *evtchn_async_queue;
-+};
-+
-+/* Who's bound to each port? */
-+static struct per_user_data *port_user[NR_EVENT_CHANNELS];
-+static spinlock_t port_user_lock;
-+
-+void evtchn_device_upcall(int port)
-+{
-+	struct per_user_data *u;
-+
-+	spin_lock(&port_user_lock);
-+
-+	mask_evtchn(port);
-+	clear_evtchn(port);
-+
-+	if ((u = port_user[port]) != NULL) {
-+		if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
-+			u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
-+			if (u->ring_cons == u->ring_prod++) {
-+				wake_up_interruptible(&u->evtchn_wait);
-+				kill_fasync(&u->evtchn_async_queue,
-+					    SIGIO, POLL_IN);
-+			}
-+		} else {
-+			u->ring_overflow = 1;
-+		}
-+	}
-+
-+	spin_unlock(&port_user_lock);
-+}
-+
-+static ssize_t evtchn_read(struct file *file, char __user *buf,
-+                           size_t count, loff_t *ppos)
-+{
-+	int rc;
-+	unsigned int c, p, bytes1 = 0, bytes2 = 0;
-+	struct per_user_data *u = file->private_data;
-+
-+	/* Whole number of ports. */
-+	count &= ~(sizeof(evtchn_port_t)-1);
-+
-+	if (count == 0)
-+		return 0;
-+
-+	if (count > PAGE_SIZE)
-+		count = PAGE_SIZE;
-+
-+	for (;;) {
-+		if (u->ring_overflow)
-+			return -EFBIG;
-+
-+		if ((c = u->ring_cons) != (p = u->ring_prod))
-+			break;
-+
-+		if (file->f_flags & O_NONBLOCK)
-+			return -EAGAIN;
-+
-+		rc = wait_event_interruptible(
-+			u->evtchn_wait, u->ring_cons != u->ring_prod);
-+		if (rc)
-+			return rc;
-+	}
-+
-+	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
-+	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
-+		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
-+			sizeof(evtchn_port_t);
-+		bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
-+	} else {
-+		bytes1 = (p - c) * sizeof(evtchn_port_t);
-+		bytes2 = 0;
-+	}
-+
-+	/* Truncate chunks according to caller's maximum byte count. */
-+	if (bytes1 > count) {
-+		bytes1 = count;
-+		bytes2 = 0;
-+	} else if ((bytes1 + bytes2) > count) {
-+		bytes2 = count - bytes1;
-+	}
-+
-+	if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
-+	    ((bytes2 != 0) &&
-+	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
-+		return -EFAULT;
-+
-+	u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
-+
-+	return bytes1 + bytes2;
-+}
-+
-+static ssize_t evtchn_write(struct file *file, const char __user *buf,
-+                            size_t count, loff_t *ppos)
-+{
-+	int  rc, i;
-+	evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
-+	struct per_user_data *u = file->private_data;
-+
-+	if (kbuf == NULL)
-+		return -ENOMEM;
-+
-+	/* Whole number of ports. */
-+	count &= ~(sizeof(evtchn_port_t)-1);
-+
-+	if (count == 0) {
-+		rc = 0;
-+		goto out;
-+	}
-+
-+	if (count > PAGE_SIZE)
-+		count = PAGE_SIZE;
-+
-+	if (copy_from_user(kbuf, buf, count) != 0) {
-+		rc = -EFAULT;
-+		goto out;
-+	}
-+
-+	spin_lock_irq(&port_user_lock);
-+	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
-+		if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
-+			unmask_evtchn(kbuf[i]);
-+	spin_unlock_irq(&port_user_lock);
-+
-+	rc = count;
-+
-+ out:
-+	free_page((unsigned long)kbuf);
-+	return rc;
-+}
-+
-+static void evtchn_bind_to_user(struct per_user_data *u, int port)
-+{
-+	spin_lock_irq(&port_user_lock);
-+	BUG_ON(port_user[port] != NULL);
-+	port_user[port] = u;
-+	unmask_evtchn(port);
-+	spin_unlock_irq(&port_user_lock);
-+}
-+
-+static int evtchn_ioctl(struct inode *inode, struct file *file,
-+                        unsigned int cmd, unsigned long arg)
-+{
-+	int rc;
-+	struct per_user_data *u = file->private_data;
-+	void __user *uarg = (void __user *) arg;
-+	evtchn_op_t op = { 0 };
-+
-+	switch (cmd) {
-+	case IOCTL_EVTCHN_BIND_VIRQ: {
-+		struct ioctl_evtchn_bind_virq bind;
-+
-+		rc = -EFAULT;
-+		if (copy_from_user(&bind, uarg, sizeof(bind)))
-+			break;
-+
-+		op.cmd = EVTCHNOP_bind_virq;
-+		op.u.bind_virq.virq = bind.virq;
-+		op.u.bind_virq.vcpu = 0;
-+		rc = HYPERVISOR_event_channel_op(&op);
-+		if (rc != 0)
-+			break;
-+
-+		rc = op.u.bind_virq.port;
-+		evtchn_bind_to_user(u, rc);
-+		break;
-+	}
-+
-+	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
-+		struct ioctl_evtchn_bind_interdomain bind;
-+
-+		rc = -EFAULT;
-+		if (copy_from_user(&bind, uarg, sizeof(bind)))
-+			break;
-+
-+		op.cmd = EVTCHNOP_bind_interdomain;
-+		op.u.bind_interdomain.remote_dom  = bind.remote_domain;
-+		op.u.bind_interdomain.remote_port = bind.remote_port;
-+		rc = HYPERVISOR_event_channel_op(&op);
-+		if (rc != 0)
-+			break;
-+
-+		rc = op.u.bind_interdomain.local_port;
-+		evtchn_bind_to_user(u, rc);
-+		break;
-+	}
-+
-+	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
-+		struct ioctl_evtchn_bind_unbound_port bind;
-+
-+		rc = -EFAULT;
-+		if (copy_from_user(&bind, uarg, sizeof(bind)))
-+			break;
-+
-+		op.cmd = EVTCHNOP_alloc_unbound;
-+		op.u.alloc_unbound.dom        = DOMID_SELF;
-+		op.u.alloc_unbound.remote_dom = bind.remote_domain;
-+		rc = HYPERVISOR_event_channel_op(&op);
-+		if (rc != 0)
-+			break;
-+
-+		rc = op.u.alloc_unbound.port;
-+		evtchn_bind_to_user(u, rc);
-+		break;
-+	}
-+
-+	case IOCTL_EVTCHN_UNBIND: {
-+		struct ioctl_evtchn_unbind unbind;
-+		int ret;
-+
-+		rc = -EFAULT;
-+		if (copy_from_user(&unbind, uarg, sizeof(unbind)))
-+			break;
-+
-+		rc = -EINVAL;
-+		if (unbind.port >= NR_EVENT_CHANNELS)
-+			break;
-+
-+		spin_lock_irq(&port_user_lock);
-+    
-+		rc = -ENOTCONN;
-+		if (port_user[unbind.port] != u) {
-+			spin_unlock_irq(&port_user_lock);
-+			break;
-+		}
-+
-+		port_user[unbind.port] = NULL;
-+		mask_evtchn(unbind.port);
-+
-+		spin_unlock_irq(&port_user_lock);
-+
-+		op.cmd = EVTCHNOP_close;
-+		op.u.close.port = unbind.port;
-+		ret = HYPERVISOR_event_channel_op(&op);
-+		BUG_ON(ret);
-+
-+		rc = 0;
-+		break;
-+	}
-+
-+	case IOCTL_EVTCHN_NOTIFY: {
-+		struct ioctl_evtchn_notify notify;
-+
-+		rc = -EFAULT;
-+		if (copy_from_user(&notify, uarg, sizeof(notify)))
-+			break;
-+
-+		if (notify.port >= NR_EVENT_CHANNELS) {
-+			rc = -EINVAL;
-+		} else if (port_user[notify.port] != u) {
-+			rc = -ENOTCONN;
-+		} else {
-+			notify_remote_via_evtchn(notify.port);
-+			rc = 0;
-+		}
-+		break;
-+	}
-+
-+	case IOCTL_EVTCHN_RESET: {
-+		/* Initialise the ring to empty. Clear errors. */
-+		spin_lock_irq(&port_user_lock);
-+		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
-+		spin_unlock_irq(&port_user_lock);
-+		rc = 0;
-+		break;
-+	}
-+
-+	default:
-+		rc = -ENOSYS;
-+		break;
-+	}
-+
-+	return rc;
-+}
-+
-+static unsigned int evtchn_poll(struct file *file, poll_table *wait)
-+{
-+	unsigned int mask = POLLOUT | POLLWRNORM;
-+	struct per_user_data *u = file->private_data;
-+
-+	poll_wait(file, &u->evtchn_wait, wait);
-+	if (u->ring_cons != u->ring_prod)
-+		mask |= POLLIN | POLLRDNORM;
-+	if (u->ring_overflow)
-+		mask = POLLERR;
-+	return mask;
-+}
-+
-+static int evtchn_fasync(int fd, struct file *filp, int on)
-+{
-+	struct per_user_data *u = filp->private_data;
-+	return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
-+}
-+
-+static int evtchn_open(struct inode *inode, struct file *filp)
-+{
-+	struct per_user_data *u;
-+
-+	if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
-+		return -ENOMEM;
-+
-+	memset(u, 0, sizeof(*u));
-+	init_waitqueue_head(&u->evtchn_wait);
-+
-+	u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
-+	if (u->ring == NULL) {
-+		kfree(u);
-+		return -ENOMEM;
-+	}
-+
-+	filp->private_data = u;
-+
-+	return 0;
-+}
-+
-+static int evtchn_release(struct inode *inode, struct file *filp)
-+{
-+	int i;
-+	struct per_user_data *u = filp->private_data;
-+	evtchn_op_t op = { 0 };
-+
-+	spin_lock_irq(&port_user_lock);
-+
-+	free_page((unsigned long)u->ring);
-+
-+	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
-+		int ret;
-+		if (port_user[i] != u)
-+			continue;
-+
-+		port_user[i] = NULL;
-+		mask_evtchn(i);
-+
-+		op.cmd = EVTCHNOP_close;
-+		op.u.close.port = i;
-+		ret = HYPERVISOR_event_channel_op(&op);
-+		BUG_ON(ret);
-+	}
-+
-+	spin_unlock_irq(&port_user_lock);
-+
-+	kfree(u);
-+
-+	return 0;
-+}
-+
-+static struct file_operations evtchn_fops = {
-+	.owner   = THIS_MODULE,
-+	.read    = evtchn_read,
-+	.write   = evtchn_write,
-+	.ioctl   = evtchn_ioctl,
-+	.poll    = evtchn_poll,
-+	.fasync  = evtchn_fasync,
-+	.open    = evtchn_open,
-+	.release = evtchn_release,
-+};
-+
-+static struct miscdevice evtchn_miscdev = {
-+	.minor        = EVTCHN_MINOR,
-+	.name         = "evtchn",
-+	.fops         = &evtchn_fops,
-+	.devfs_name   = "misc/evtchn",
-+};
-+
-+static int __init evtchn_init(void)
-+{
-+	int err;
-+
-+	spin_lock_init(&port_user_lock);
-+	memset(port_user, 0, sizeof(port_user));
-+
-+	/* Create '/dev/misc/evtchn'. */
-+	err = misc_register(&evtchn_miscdev);
-+	if (err != 0) {
-+		printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
-+		return err;
-+	}
-+
-+	printk("Event-channel device installed.\n");
-+
-+	return 0;
-+}
-+
-+static void evtchn_cleanup(void)
-+{
-+	misc_deregister(&evtchn_miscdev);
-+}
-+
-+module_init(evtchn_init);
-+module_exit(evtchn_cleanup);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/evtchn/Makefile linux-2.6.12-xen/drivers/xen/evtchn/Makefile
---- pristine-linux-2.6.12/drivers/xen/evtchn/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/evtchn/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+
-+obj-y	:= evtchn.o
-diff -Nurp pristine-linux-2.6.12/drivers/xen/Makefile linux-2.6.12-xen/drivers/xen/Makefile
---- pristine-linux-2.6.12/drivers/xen/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,19 @@
-+
-+obj-y	+= net_driver_util.o
-+obj-y	+= util.o
-+
-+obj-y	+= char/
-+obj-y	+= console/
-+obj-y	+= evtchn/
-+obj-y	+= balloon/
-+obj-y	+= privcmd/
-+obj-y	+= xenbus/
-+
-+obj-$(CONFIG_XEN_BLKDEV_BACKEND)	+= blkback/
-+obj-$(CONFIG_XEN_NETDEV_BACKEND)	+= netback/
-+obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmback/
-+obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= blkfront/
-+obj-$(CONFIG_XEN_NETDEV_FRONTEND)	+= netfront/
-+obj-$(CONFIG_XEN_BLKDEV_TAP)    	+= blktap/
-+obj-$(CONFIG_XEN_TPMDEV_FRONTEND)	+= tpmfront/
-+
-diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/common.h linux-2.6.12-xen/drivers/xen/netback/common.h
---- pristine-linux-2.6.12/drivers/xen/netback/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/netback/common.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,110 @@
-+/******************************************************************************
-+ * arch/xen/drivers/netif/backend/common.h
-+ */
-+
-+#ifndef __NETIF__BACKEND__COMMON_H__
-+#define __NETIF__BACKEND__COMMON_H__
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <linux/ip.h>
-+#include <linux/in.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/xen-public/io/netif.h>
-+#include <asm/io.h>
-+#include <asm/pgalloc.h>
-+#include <asm-xen/xen-public/grant_table.h>
-+#include <asm-xen/gnttab.h>
-+#include <asm-xen/driver_util.h>
-+
-+#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
-+                                    __FILE__ , __LINE__ , ## _a )
-+#define IPRINTK(fmt, args...) \
-+    printk(KERN_INFO "xen_net: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+    printk(KERN_WARNING "xen_net: " fmt, ##args)
-+
-+typedef struct netif_st {
-+	/* Unique identifier for this interface. */
-+	domid_t          domid;
-+	unsigned int     handle;
-+
-+	u8               fe_dev_addr[6];
-+
-+	/* Physical parameters of the comms window. */
-+	grant_handle_t   tx_shmem_handle;
-+	grant_ref_t      tx_shmem_ref; 
-+	grant_handle_t   rx_shmem_handle;
-+	grant_ref_t      rx_shmem_ref; 
-+	unsigned int     evtchn;
-+	unsigned int     irq;
-+
-+	/* The shared rings and indexes. */
-+	netif_tx_back_ring_t tx;
-+	netif_rx_back_ring_t rx;
-+	struct vm_struct *tx_comms_area;
-+	struct vm_struct *rx_comms_area;
-+
-+	/* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
-+	RING_IDX rx_req_cons_peek;
-+
-+	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
-+	unsigned long   credit_bytes;
-+	unsigned long   credit_usec;
-+	unsigned long   remaining_credit;
-+	struct timer_list credit_timeout;
-+
-+	/* Miscellaneous private stuff. */
-+	enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-+	int active;
-+	struct list_head list;  /* scheduling list */
-+	atomic_t         refcnt;
-+	struct net_device *dev;
-+	struct net_device_stats stats;
-+
-+	struct work_struct free_work;
-+} netif_t;
-+
-+#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
-+#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
-+
-+void netif_creditlimit(netif_t *netif);
-+void netif_disconnect(netif_t *netif);
-+
-+netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]);
-+void free_netif(netif_t *netif);
-+int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+	      unsigned long rx_ring_ref, unsigned int evtchn);
-+
-+#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define netif_put(_b)						\
-+	do {							\
-+		if ( atomic_dec_and_test(&(_b)->refcnt) )	\
-+			free_netif(_b);				\
-+	} while (0)
-+
-+void netif_xenbus_init(void);
-+
-+void netif_schedule_work(netif_t *netif);
-+void netif_deschedule_work(netif_t *netif);
-+
-+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
-+struct net_device_stats *netif_be_get_stats(struct net_device *dev);
-+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+
-+#endif /* __NETIF__BACKEND__COMMON_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/interface.c linux-2.6.12-xen/drivers/xen/netback/interface.c
---- pristine-linux-2.6.12/drivers/xen/netback/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/netback/interface.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,320 @@
-+/******************************************************************************
-+ * arch/xen/drivers/netif/backend/interface.c
-+ * 
-+ * Network-device interface management.
-+ * 
-+ * Copyright (c) 2004-2005, Keir Fraser
-+ */
-+
-+#include "common.h"
-+#include <linux/rtnetlink.h>
-+
-+static void __netif_up(netif_t *netif)
-+{
-+	struct net_device *dev = netif->dev;
-+	spin_lock_bh(&dev->xmit_lock);
-+	netif->active = 1;
-+	spin_unlock_bh(&dev->xmit_lock);
-+	enable_irq(netif->irq);
-+	netif_schedule_work(netif);
-+}
-+
-+static void __netif_down(netif_t *netif)
-+{
-+	struct net_device *dev = netif->dev;
-+	disable_irq(netif->irq);
-+	spin_lock_bh(&dev->xmit_lock);
-+	netif->active = 0;
-+	spin_unlock_bh(&dev->xmit_lock);
-+	netif_deschedule_work(netif);
-+}
-+
-+static int net_open(struct net_device *dev)
-+{
-+	netif_t *netif = netdev_priv(dev);
-+	if (netif->status == CONNECTED)
-+		__netif_up(netif);
-+	netif_start_queue(dev);
-+	return 0;
-+}
-+
-+static int net_close(struct net_device *dev)
-+{
-+	netif_t *netif = netdev_priv(dev);
-+	netif_stop_queue(dev);
-+	if (netif->status == CONNECTED)
-+		__netif_down(netif);
-+	return 0;
-+}
-+
-+netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN])
-+{
-+	int err = 0, i;
-+	struct net_device *dev;
-+	netif_t *netif;
-+	char name[IFNAMSIZ] = {};
-+
-+	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
-+	dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
-+	if (dev == NULL) {
-+		DPRINTK("Could not create netif: out of memory\n");
-+		return ERR_PTR(-ENOMEM);
-+	}
-+
-+	netif = netdev_priv(dev);
-+	memset(netif, 0, sizeof(*netif));
-+	netif->domid  = domid;
-+	netif->handle = handle;
-+	netif->status = DISCONNECTED;
-+	atomic_set(&netif->refcnt, 0);
-+	netif->dev = dev;
-+
-+	netif->credit_bytes = netif->remaining_credit = ~0UL;
-+	netif->credit_usec  = 0UL;
-+	init_timer(&netif->credit_timeout);
-+
-+	dev->hard_start_xmit = netif_be_start_xmit;
-+	dev->get_stats       = netif_be_get_stats;
-+	dev->open            = net_open;
-+	dev->stop            = net_close;
-+	dev->features        = NETIF_F_NO_CSUM;
-+
-+	/* Disable queuing. */
-+	dev->tx_queue_len = 0;
-+
-+	for (i = 0; i < ETH_ALEN; i++)
-+		if (be_mac[i] != 0)
-+			break;
-+	if (i == ETH_ALEN) {
-+		/*
-+		 * Initialise a dummy MAC address. We choose the numerically
-+		 * largest non-broadcast address to prevent the address getting
-+		 * stolen by an Ethernet bridge for STP purposes.
-+                 * (FE:FF:FF:FF:FF:FF) 
-+		 */ 
-+		memset(dev->dev_addr, 0xFF, ETH_ALEN);
-+		dev->dev_addr[0] &= ~0x01;
-+	} else
-+		memcpy(dev->dev_addr, be_mac, ETH_ALEN);
-+
-+	rtnl_lock();
-+	err = register_netdevice(dev);
-+	rtnl_unlock();
-+	if (err) {
-+		DPRINTK("Could not register new net device %s: err=%d\n",
-+			dev->name, err);
-+		free_netdev(dev);
-+		return ERR_PTR(err);
-+	}
-+
-+	DPRINTK("Successfully created netif\n");
-+	return netif;
-+}
-+
-+static int map_frontend_pages(
-+	netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
-+{
-+	struct gnttab_map_grant_ref op;
-+	int ret;
-+
-+	op.host_addr = (unsigned long)netif->tx_comms_area->addr;
-+	op.flags     = GNTMAP_host_map;
-+	op.ref       = tx_ring_ref;
-+	op.dom       = netif->domid;
-+    
-+	lock_vm_area(netif->tx_comms_area);
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
-+	unlock_vm_area(netif->tx_comms_area);
-+	BUG_ON(ret);
-+
-+	if (op.status) { 
-+		DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
-+		return op.status;
-+	}
-+
-+	netif->tx_shmem_ref    = tx_ring_ref;
-+	netif->tx_shmem_handle = op.handle;
-+
-+	op.host_addr = (unsigned long)netif->rx_comms_area->addr;
-+	op.flags     = GNTMAP_host_map;
-+	op.ref       = rx_ring_ref;
-+	op.dom       = netif->domid;
-+
-+	lock_vm_area(netif->rx_comms_area);
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
-+	unlock_vm_area(netif->rx_comms_area);
-+	BUG_ON(ret);
-+
-+	if (op.status) {
-+		DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
-+		return op.status;
-+	}
-+
-+	netif->rx_shmem_ref    = rx_ring_ref;
-+	netif->rx_shmem_handle = op.handle;
-+
-+	return 0;
-+}
-+
-+static void unmap_frontend_pages(netif_t *netif)
-+{
-+	struct gnttab_unmap_grant_ref op;
-+	int ret;
-+
-+	op.host_addr    = (unsigned long)netif->tx_comms_area->addr;
-+	op.handle       = netif->tx_shmem_handle;
-+	op.dev_bus_addr = 0;
-+
-+	lock_vm_area(netif->tx_comms_area);
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
-+	unlock_vm_area(netif->tx_comms_area);
-+	BUG_ON(ret);
-+
-+	op.host_addr    = (unsigned long)netif->rx_comms_area->addr;
-+	op.handle       = netif->rx_shmem_handle;
-+	op.dev_bus_addr = 0;
-+
-+	lock_vm_area(netif->rx_comms_area);
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
-+	unlock_vm_area(netif->rx_comms_area);
-+	BUG_ON(ret);
-+}
-+
-+int netif_map(netif_t *netif, unsigned long tx_ring_ref,
-+	      unsigned long rx_ring_ref, unsigned int evtchn)
-+{
-+	int err = -ENOMEM;
-+	netif_tx_sring_t *txs;
-+	netif_rx_sring_t *rxs;
-+	evtchn_op_t op = {
-+		.cmd = EVTCHNOP_bind_interdomain,
-+		.u.bind_interdomain.remote_dom = netif->domid,
-+		.u.bind_interdomain.remote_port = evtchn };
-+
-+	/* Already connected through? */
-+	if (netif->irq)
-+		return 0;
-+
-+	netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
-+	if (netif->tx_comms_area == NULL)
-+		return -ENOMEM;
-+	netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
-+	if (netif->rx_comms_area == NULL)
-+		goto err_rx;
-+
-+	err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
-+	if (err)
-+		goto err_map;
-+
-+	err = HYPERVISOR_event_channel_op(&op);
-+	if (err)
-+		goto err_hypervisor;
-+
-+	netif->evtchn = op.u.bind_interdomain.local_port;
-+
-+	netif->irq = bind_evtchn_to_irqhandler(
-+		netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
-+	disable_irq(netif->irq);
-+
-+	txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
-+	BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
-+
-+	rxs = (netif_rx_sring_t *)
-+		((char *)netif->rx_comms_area->addr);
-+	BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
-+
-+	netif->rx_req_cons_peek = 0;
-+
-+	netif_get(netif);
-+	wmb(); /* Other CPUs see new state before interface is started. */
-+
-+	rtnl_lock();
-+	netif->status = CONNECTED;
-+	wmb();
-+	if (netif_running(netif->dev))
-+		__netif_up(netif);
-+	rtnl_unlock();
-+
-+	return 0;
-+err_hypervisor:
-+	unmap_frontend_pages(netif);
-+err_map:
-+	free_vm_area(netif->rx_comms_area);
-+err_rx:
-+	free_vm_area(netif->tx_comms_area);
-+	return err;
-+}
-+
-+static void free_netif_callback(void *arg)
-+{
-+	netif_t *netif = (netif_t *)arg;
-+
-+	if (netif->irq)
-+		unbind_from_irqhandler(netif->irq, netif);
-+	
-+	unregister_netdev(netif->dev);
-+
-+	if (netif->tx.sring) {
-+		unmap_frontend_pages(netif);
-+		free_vm_area(netif->tx_comms_area);
-+		free_vm_area(netif->rx_comms_area);
-+	}
-+
-+	free_netdev(netif->dev);
-+}
-+
-+void free_netif(netif_t *netif)
-+{
-+	INIT_WORK(&netif->free_work, free_netif_callback, (void *)netif);
-+	schedule_work(&netif->free_work);
-+}
-+
-+void netif_creditlimit(netif_t *netif)
-+{
-+#if 0
-+	/* Set the credit limit (reset remaining credit to new limit). */
-+	netif->credit_bytes     = creditlimit->credit_bytes;
-+	netif->remaining_credit = creditlimit->credit_bytes;
-+	netif->credit_usec      = creditlimit->period_usec;
-+
-+	if (netif->status == CONNECTED) {
-+		/*
-+		 * Schedule work so that any packets waiting under previous
-+		 * credit limit are dealt with (acts as a replenishment point).
-+		 */
-+		netif->credit_timeout.expires = jiffies;
-+		netif_schedule_work(netif);
-+	}
-+#endif
-+}
-+
-+void netif_disconnect(netif_t *netif)
-+{
-+	switch (netif->status) {
-+	case CONNECTED:
-+		rtnl_lock();
-+		netif->status = DISCONNECTING;
-+		wmb();
-+		if (netif_running(netif->dev))
-+			__netif_down(netif);
-+		rtnl_unlock();
-+		netif_put(netif);
-+		break;
-+	case DISCONNECTED:
-+		BUG_ON(atomic_read(&netif->refcnt) != 0);
-+		free_netif(netif);
-+		break;
-+	default:
-+		BUG();
-+	}
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/loopback.c linux-2.6.12-xen/drivers/xen/netback/loopback.c
---- pristine-linux-2.6.12/drivers/xen/netback/loopback.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/netback/loopback.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,198 @@
-+/******************************************************************************
-+ * netback/loopback.c
-+ * 
-+ * A two-interface loopback device to emulate a local netfront-netback
-+ * connection. This ensures that local packet delivery looks identical
-+ * to inter-domain delivery. Most importantly, packets delivered locally
-+ * originating from other domains will get *copied* when they traverse this
-+ * driver. This prevents unbounded delays in socket-buffer queues from
-+ * causing the netback driver to "seize up".
-+ * 
-+ * This driver creates a symmetric pair of loopback interfaces with names
-+ * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
-+ * bridge, just like a proper netback interface, while a local IP interface
-+ * is configured on 'veth0'.
-+ * 
-+ * As with a real netback interface, vif0.0 is configured with a suitable
-+ * dummy MAC address. No default is provided for veth0: a reasonable strategy
-+ * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
-+ * (to avoid confusing the Etherbridge).
-+ * 
-+ * Copyright (c) 2005 K A Fraser
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <linux/ethtool.h>
-+#include <net/dst.h>
-+
-+static int nloopbacks = 8;
-+module_param(nloopbacks, int, 0);
-+MODULE_PARM_DESC(nloopbacks, "Number of netback-loopback devices to create");
-+
-+struct net_private {
-+	struct net_device *loopback_dev;
-+	struct net_device_stats stats;
-+};
-+
-+static int loopback_open(struct net_device *dev)
-+{
-+	struct net_private *np = netdev_priv(dev);
-+	memset(&np->stats, 0, sizeof(np->stats));
-+	netif_start_queue(dev);
-+	return 0;
-+}
-+
-+static int loopback_close(struct net_device *dev)
-+{
-+	netif_stop_queue(dev);
-+	return 0;
-+}
-+
-+static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+	struct net_private *np = netdev_priv(dev);
-+
-+	dst_release(skb->dst);
-+	skb->dst = NULL;
-+
-+	skb_orphan(skb);
-+
-+	np->stats.tx_bytes += skb->len;
-+	np->stats.tx_packets++;
-+
-+	/* Switch to loopback context. */
-+	dev = np->loopback_dev;
-+	np  = netdev_priv(dev);
-+
-+	np->stats.rx_bytes += skb->len;
-+	np->stats.rx_packets++;
-+
-+	if (skb->ip_summed == CHECKSUM_HW) {
-+		/* Defer checksum calculation. */
-+		skb->proto_csum_blank = 1;
-+		/* Must be a local packet: assert its integrity. */
-+		skb->proto_csum_valid = 1;
-+	}
-+
-+	skb->ip_summed = skb->proto_csum_valid ?
-+		CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
-+
-+	skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
-+	skb->protocol = eth_type_trans(skb, dev);
-+	skb->dev      = dev;
-+	dev->last_rx  = jiffies;
-+	netif_rx(skb);
-+
-+	return 0;
-+}
-+
-+static struct net_device_stats *loopback_get_stats(struct net_device *dev)
-+{
-+	struct net_private *np = netdev_priv(dev);
-+	return &np->stats;
-+}
-+
-+static void loopback_construct(struct net_device *dev, struct net_device *lo)
-+{
-+	struct net_private *np = netdev_priv(dev);
-+
-+	np->loopback_dev     = lo;
-+
-+	dev->open            = loopback_open;
-+	dev->stop            = loopback_close;
-+	dev->hard_start_xmit = loopback_start_xmit;
-+	dev->get_stats       = loopback_get_stats;
-+
-+	dev->tx_queue_len    = 0;
-+
-+	dev->features        = NETIF_F_HIGHDMA | NETIF_F_LLTX;
-+
-+	/*
-+	 * We do not set a jumbo MTU on the interface. Otherwise the network
-+	 * stack will try to send large packets that will get dropped by the
-+	 * Ethernet bridge (unless the physical Ethernet interface is
-+	 * configured to transfer jumbo packets). If a larger MTU is desired
-+	 * then the system administrator can specify it using the 'ifconfig'
-+	 * command.
-+	 */
-+	/*dev->mtu             = 16*1024;*/
-+}
-+
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+	.get_tx_csum = ethtool_op_get_tx_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
-+};
-+
-+static int __init make_loopback(int i)
-+{
-+	struct net_device *dev1, *dev2;
-+	char dev_name[IFNAMSIZ];
-+	int err = -ENOMEM;
-+
-+	sprintf(dev_name, "vif0.%d", i);
-+	dev1 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
-+	sprintf(dev_name, "veth%d", i);
-+	dev2 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
-+	if ((dev1 == NULL) || (dev2 == NULL))
-+		goto fail;
-+
-+	loopback_construct(dev1, dev2);
-+	loopback_construct(dev2, dev1);
-+
-+	dev1->features |= NETIF_F_NO_CSUM;
-+	dev2->features |= NETIF_F_IP_CSUM;
-+
-+	SET_ETHTOOL_OPS(dev2, &network_ethtool_ops);
-+
-+	/*
-+	 * Initialise a dummy MAC address for the 'dummy backend' interface. We
-+	 * choose the numerically largest non-broadcast address to prevent the
-+	 * address getting stolen by an Ethernet bridge for STP purposes.
-+	 */
-+	memset(dev1->dev_addr, 0xFF, ETH_ALEN);
-+	dev1->dev_addr[0] &= ~0x01;
-+
-+	if ((err = register_netdev(dev1)) != 0)
-+		goto fail;
-+
-+	if ((err = register_netdev(dev2)) != 0) {
-+		unregister_netdev(dev1);
-+		goto fail;
-+	}
-+
-+	return 0;
-+
-+ fail:
-+	kfree(dev1);
-+	kfree(dev2);
-+	return err;
-+}
-+
-+static int __init loopback_init(void)
-+{
-+	int i, err = 0;
-+
-+	for (i = 0; i < nloopbacks; i++)
-+		if ((err = make_loopback(i)) != 0)
-+			break;
-+
-+	return err;
-+}
-+
-+module_init(loopback_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/Makefile linux-2.6.12-xen/drivers/xen/netback/Makefile
---- pristine-linux-2.6.12/drivers/xen/netback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/netback/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+
-+obj-y	:= netback.o xenbus.o interface.o loopback.o
-diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/netback.c linux-2.6.12-xen/drivers/xen/netback/netback.c
---- pristine-linux-2.6.12/drivers/xen/netback/netback.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/netback/netback.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,828 @@
-+/******************************************************************************
-+ * drivers/xen/netback/netback.c
-+ * 
-+ * Back-end of the driver for virtual network devices. This portion of the
-+ * driver exports a 'unified' network-device interface that can be accessed
-+ * by any operating system that implements a compatible front end. A 
-+ * reference front-end implementation can be found in:
-+ *  drivers/xen/netfront/netfront.c
-+ * 
-+ * Copyright (c) 2002-2005, K A Fraser
-+ */
-+
-+#include "common.h"
-+#include <asm-xen/balloon.h>
-+#include <asm-xen/xen-public/memory.h>
-+
-+/*#define NETBE_DEBUG_INTERRUPT*/
-+
-+static void netif_idx_release(u16 pending_idx);
-+static void netif_page_release(struct page *page);
-+static void make_tx_response(netif_t *netif, 
-+                             u16      id,
-+                             s8       st);
-+static int  make_rx_response(netif_t *netif, 
-+                             u16      id, 
-+                             s8       st,
-+                             u16      offset,
-+                             u16      size,
-+                             u16      flags);
-+
-+static void net_tx_action(unsigned long unused);
-+static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
-+
-+static void net_rx_action(unsigned long unused);
-+static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
-+
-+static struct timer_list net_timer;
-+
-+#define MAX_PENDING_REQS 256
-+
-+static struct sk_buff_head rx_queue;
-+static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
-+static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
-+static gnttab_transfer_t grant_rx_op[NET_RX_RING_SIZE];
-+static unsigned char rx_notify[NR_IRQS];
-+
-+static unsigned long mmap_vstart;
-+#define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
-+
-+#define PKT_PROT_LEN 64
-+
-+static struct {
-+	netif_tx_request_t req;
-+	netif_t *netif;
-+} pending_tx_info[MAX_PENDING_REQS];
-+static u16 pending_ring[MAX_PENDING_REQS];
-+typedef unsigned int PEND_RING_IDX;
-+#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
-+static PEND_RING_IDX pending_prod, pending_cons;
-+#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
-+
-+/* Freed TX SKBs get batched on this ring before return to pending_ring. */
-+static u16 dealloc_ring[MAX_PENDING_REQS];
-+static PEND_RING_IDX dealloc_prod, dealloc_cons;
-+
-+static struct sk_buff_head tx_queue;
-+
-+static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
-+static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
-+static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
-+
-+static struct list_head net_schedule_list;
-+static spinlock_t net_schedule_list_lock;
-+
-+#define MAX_MFN_ALLOC 64
-+static unsigned long mfn_list[MAX_MFN_ALLOC];
-+static unsigned int alloc_index = 0;
-+static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
-+
-+static unsigned long alloc_mfn(void)
-+{
-+	unsigned long mfn = 0, flags;
-+	struct xen_memory_reservation reservation = {
-+		.extent_start = mfn_list,
-+		.nr_extents   = MAX_MFN_ALLOC,
-+		.extent_order = 0,
-+		.domid        = DOMID_SELF
-+	};
-+	spin_lock_irqsave(&mfn_lock, flags);
-+	if ( unlikely(alloc_index == 0) )
-+		alloc_index = HYPERVISOR_memory_op(
-+			XENMEM_increase_reservation, &reservation);
-+	if ( alloc_index != 0 )
-+		mfn = mfn_list[--alloc_index];
-+	spin_unlock_irqrestore(&mfn_lock, flags);
-+	return mfn;
-+}
-+
-+static inline void maybe_schedule_tx_action(void)
-+{
-+	smp_mb();
-+	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-+	    !list_empty(&net_schedule_list))
-+		tasklet_schedule(&net_tx_tasklet);
-+}
-+
-+/*
-+ * A gross way of confirming the origin of an skb data page. The slab
-+ * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
-+ */
-+static inline int is_xen_skb(struct sk_buff *skb)
-+{
-+	extern kmem_cache_t *skbuff_cachep;
-+	kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
-+	return (cp == skbuff_cachep);
-+}
-+
-+int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+	netif_t *netif = netdev_priv(dev);
-+
-+	BUG_ON(skb->dev != dev);
-+
-+	/* Drop the packet if the target domain has no receive buffers. */
-+	if (!netif->active || 
-+	    (netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
-+	    ((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
-+	     NET_RX_RING_SIZE))
-+		goto drop;
-+
-+	/*
-+	 * We do not copy the packet unless:
-+	 *  1. The data is shared; or
-+	 *  2. The data is not allocated from our special cache.
-+	 * NB. We also couldn't cope with fragmented packets, but we won't get
-+	 *     any because we not advertise the NETIF_F_SG feature.
-+	 */
-+	if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) {
-+		int hlen = skb->data - skb->head;
-+		int ret;
-+		struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
-+		if ( unlikely(nskb == NULL) )
-+			goto drop;
-+		skb_reserve(nskb, hlen);
-+		__skb_put(nskb, skb->len);
-+		ret = skb_copy_bits(skb, -hlen, nskb->data - hlen,
-+				     skb->len + hlen);
-+		BUG_ON(ret);
-+		nskb->dev = skb->dev;
-+		nskb->proto_csum_valid = skb->proto_csum_valid;
-+		dev_kfree_skb(skb);
-+		skb = nskb;
-+	}
-+
-+	netif->rx_req_cons_peek++;
-+	netif_get(netif);
-+
-+	skb_queue_tail(&rx_queue, skb);
-+	tasklet_schedule(&net_rx_tasklet);
-+
-+	return 0;
-+
-+ drop:
-+	netif->stats.tx_dropped++;
-+	dev_kfree_skb(skb);
-+	return 0;
-+}
-+
-+#if 0
-+static void xen_network_done_notify(void)
-+{
-+	static struct net_device *eth0_dev = NULL;
-+	if (unlikely(eth0_dev == NULL))
-+		eth0_dev = __dev_get_by_name("eth0");
-+	netif_rx_schedule(eth0_dev);
-+}
-+/* 
-+ * Add following to poll() function in NAPI driver (Tigon3 is example):
-+ *  if ( xen_network_done() )
-+ *      tg3_enable_ints(tp); 
-+ */
-+int xen_network_done(void)
-+{
-+	return skb_queue_empty(&rx_queue);
-+}
-+#endif
-+
-+static void net_rx_action(unsigned long unused)
-+{
-+	netif_t *netif = NULL; 
-+	s8 status;
-+	u16 size, id, irq;
-+	multicall_entry_t *mcl;
-+	mmu_update_t *mmu;
-+	gnttab_transfer_t *gop;
-+	unsigned long vdata, old_mfn, new_mfn;
-+	struct sk_buff_head rxq;
-+	struct sk_buff *skb;
-+	u16 notify_list[NET_RX_RING_SIZE];
-+	int notify_nr = 0;
-+	int ret;
-+
-+	skb_queue_head_init(&rxq);
-+
-+	mcl = rx_mcl;
-+	mmu = rx_mmu;
-+	gop = grant_rx_op;
-+
-+	while ((skb = skb_dequeue(&rx_queue)) != NULL) {
-+		netif   = netdev_priv(skb->dev);
-+		vdata   = (unsigned long)skb->data;
-+		old_mfn = virt_to_mfn(vdata);
-+
-+		/* Memory squeeze? Back off for an arbitrary while. */
-+		if ((new_mfn = alloc_mfn()) == 0) {
-+			if ( net_ratelimit() )
-+				WPRINTK("Memory squeeze in netback driver.\n");
-+			mod_timer(&net_timer, jiffies + HZ);
-+			skb_queue_head(&rx_queue, skb);
-+			break;
-+		}
-+		/*
-+		 * Set the new P2M table entry before reassigning the old data
-+		 * page. Heed the comment in pgtable-2level.h:pte_page(). :-)
-+		 */
-+		set_phys_to_machine(__pa(skb->data) >> PAGE_SHIFT, new_mfn);
-+
-+		MULTI_update_va_mapping(mcl, vdata,
-+					pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
-+		mcl++;
-+
-+		gop->mfn = old_mfn;
-+		gop->domid = netif->domid;
-+		gop->ref = RING_GET_REQUEST(
-+			&netif->rx, netif->rx.req_cons)->gref;
-+		netif->rx.req_cons++;
-+		gop++;
-+
-+		mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
-+			MMU_MACHPHYS_UPDATE;
-+		mmu->val = __pa(vdata) >> PAGE_SHIFT;  
-+		mmu++;
-+
-+		__skb_queue_tail(&rxq, skb);
-+
-+		/* Filled the batch queue? */
-+		if ((gop - grant_rx_op) == ARRAY_SIZE(grant_rx_op))
-+			break;
-+	}
-+
-+	if (mcl == rx_mcl)
-+		return;
-+
-+	mcl->op = __HYPERVISOR_mmu_update;
-+	mcl->args[0] = (unsigned long)rx_mmu;
-+	mcl->args[1] = mmu - rx_mmu;
-+	mcl->args[2] = 0;
-+	mcl->args[3] = DOMID_SELF;
-+	mcl++;
-+
-+	mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-+	ret = HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
-+	BUG_ON(ret != 0);
-+
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
-+					gop - grant_rx_op);
-+	BUG_ON(ret != 0);
-+
-+	mcl = rx_mcl;
-+	gop = grant_rx_op;
-+	while ((skb = __skb_dequeue(&rxq)) != NULL) {
-+		netif   = netdev_priv(skb->dev);
-+		size    = skb->tail - skb->data;
-+
-+		/* Rederive the machine addresses. */
-+		new_mfn = mcl->args[1] >> PAGE_SHIFT;
-+		old_mfn = gop->mfn;
-+		atomic_set(&(skb_shinfo(skb)->dataref), 1);
-+		skb_shinfo(skb)->nr_frags = 0;
-+		skb_shinfo(skb)->frag_list = NULL;
-+
-+		netif->stats.tx_bytes += size;
-+		netif->stats.tx_packets++;
-+
-+		/* The update_va_mapping() must not fail. */
-+		BUG_ON(mcl->result != 0);
-+
-+		/* Check the reassignment error code. */
-+		status = NETIF_RSP_OKAY;
-+		if (gop->status != 0) { 
-+			DPRINTK("Bad status %d from grant transfer to DOM%u\n",
-+				gop->status, netif->domid);
-+			/*
-+                         * Page no longer belongs to us unless GNTST_bad_page,
-+                         * but that should be a fatal error anyway.
-+                         */
-+			BUG_ON(gop->status == GNTST_bad_page);
-+			status = NETIF_RSP_ERROR; 
-+		}
-+		irq = netif->irq;
-+		id = RING_GET_REQUEST(&netif->rx, netif->rx.rsp_prod_pvt)->id;
-+		if (make_rx_response(netif, id, status,
-+				     (unsigned long)skb->data & ~PAGE_MASK,
-+				     size, skb->proto_csum_valid ?
-+				     NETRXF_csum_valid : 0) &&
-+		    (rx_notify[irq] == 0)) {
-+			rx_notify[irq] = 1;
-+			notify_list[notify_nr++] = irq;
-+		}
-+
-+		netif_put(netif);
-+		dev_kfree_skb(skb);
-+		mcl++;
-+		gop++;
-+	}
-+
-+	while (notify_nr != 0) {
-+		irq = notify_list[--notify_nr];
-+		rx_notify[irq] = 0;
-+		notify_remote_via_irq(irq);
-+	}
-+
-+	/* More work to do? */
-+	if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
-+		tasklet_schedule(&net_rx_tasklet);
-+#if 0
-+	else
-+		xen_network_done_notify();
-+#endif
-+}
-+
-+static void net_alarm(unsigned long unused)
-+{
-+	tasklet_schedule(&net_rx_tasklet);
-+}
-+
-+struct net_device_stats *netif_be_get_stats(struct net_device *dev)
-+{
-+	netif_t *netif = netdev_priv(dev);
-+	return &netif->stats;
-+}
-+
-+static int __on_net_schedule_list(netif_t *netif)
-+{
-+	return netif->list.next != NULL;
-+}
-+
-+static void remove_from_net_schedule_list(netif_t *netif)
-+{
-+	spin_lock_irq(&net_schedule_list_lock);
-+	if (likely(__on_net_schedule_list(netif))) {
-+		list_del(&netif->list);
-+		netif->list.next = NULL;
-+		netif_put(netif);
-+	}
-+	spin_unlock_irq(&net_schedule_list_lock);
-+}
-+
-+static void add_to_net_schedule_list_tail(netif_t *netif)
-+{
-+	if (__on_net_schedule_list(netif))
-+		return;
-+
-+	spin_lock_irq(&net_schedule_list_lock);
-+	if (!__on_net_schedule_list(netif) && netif->active) {
-+		list_add_tail(&netif->list, &net_schedule_list);
-+		netif_get(netif);
-+	}
-+	spin_unlock_irq(&net_schedule_list_lock);
-+}
-+
-+/*
-+ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
-+ * If this driver is pipelining transmit requests then we can be very
-+ * aggressive in avoiding new-packet notifications -- frontend only needs to
-+ * send a notification if there are no outstanding unreceived responses.
-+ * If we may be buffer transmit buffers for any reason then we must be rather
-+ * more conservative and treat this as the final check for pending work.
-+ */
-+void netif_schedule_work(netif_t *netif)
-+{
-+	int more_to_do;
-+
-+#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+	more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
-+#else
-+	RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-+#endif
-+
-+	if (more_to_do) {
-+		add_to_net_schedule_list_tail(netif);
-+		maybe_schedule_tx_action();
-+	}
-+}
-+
-+void netif_deschedule_work(netif_t *netif)
-+{
-+	remove_from_net_schedule_list(netif);
-+}
-+
-+
-+static void tx_credit_callback(unsigned long data)
-+{
-+	netif_t *netif = (netif_t *)data;
-+	netif->remaining_credit = netif->credit_bytes;
-+	netif_schedule_work(netif);
-+}
-+
-+inline static void net_tx_action_dealloc(void)
-+{
-+	gnttab_unmap_grant_ref_t *gop;
-+	u16 pending_idx;
-+	PEND_RING_IDX dc, dp;
-+	netif_t *netif;
-+	int ret;
-+
-+	dc = dealloc_cons;
-+	dp = dealloc_prod;
-+
-+	/*
-+	 * Free up any grants we have finished using
-+	 */
-+	gop = tx_unmap_ops;
-+	while (dc != dp) {
-+		pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-+		gop->host_addr    = MMAP_VADDR(pending_idx);
-+		gop->dev_bus_addr = 0;
-+		gop->handle       = grant_tx_handle[pending_idx];
-+		gop++;
-+	}
-+	ret = HYPERVISOR_grant_table_op(
-+		GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
-+	BUG_ON(ret);
-+
-+	while (dealloc_cons != dp) {
-+		pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
-+
-+		netif = pending_tx_info[pending_idx].netif;
-+
-+		make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
-+				 NETIF_RSP_OKAY);
-+        
-+		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-+
-+		netif_put(netif);
-+	}
-+}
-+
-+/* Called after netfront has transmitted */
-+static void net_tx_action(unsigned long unused)
-+{
-+	struct list_head *ent;
-+	struct sk_buff *skb;
-+	netif_t *netif;
-+	netif_tx_request_t txreq;
-+	u16 pending_idx;
-+	RING_IDX i;
-+	gnttab_map_grant_ref_t *mop;
-+	unsigned int data_len;
-+	int ret, work_to_do;
-+
-+	if (dealloc_cons != dealloc_prod)
-+		net_tx_action_dealloc();
-+
-+	mop = tx_map_ops;
-+	while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
-+		!list_empty(&net_schedule_list)) {
-+		/* Get a netif from the list with work to do. */
-+		ent = net_schedule_list.next;
-+		netif = list_entry(ent, netif_t, list);
-+		netif_get(netif);
-+		remove_from_net_schedule_list(netif);
-+
-+		RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
-+		if (!work_to_do) {
-+			netif_put(netif);
-+			continue;
-+		}
-+
-+		i = netif->tx.req_cons;
-+		rmb(); /* Ensure that we see the request before we copy it. */
-+		memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
-+		/* Credit-based scheduling. */
-+		if (txreq.size > netif->remaining_credit) {
-+			unsigned long now = jiffies;
-+			unsigned long next_credit = 
-+				netif->credit_timeout.expires +
-+				msecs_to_jiffies(netif->credit_usec / 1000);
-+
-+			/* Timer could already be pending in rare cases. */
-+			if (timer_pending(&netif->credit_timeout))
-+				break;
-+
-+			/* Passed the point where we can replenish credit? */
-+			if (time_after_eq(now, next_credit)) {
-+				netif->credit_timeout.expires = now;
-+				netif->remaining_credit = netif->credit_bytes;
-+			}
-+
-+			/* Still too big to send right now? Set a callback. */
-+			if (txreq.size > netif->remaining_credit) {
-+				netif->remaining_credit = 0;
-+				netif->credit_timeout.expires  = 
-+					next_credit;
-+				netif->credit_timeout.data     =
-+					(unsigned long)netif;
-+				netif->credit_timeout.function =
-+					tx_credit_callback;
-+				add_timer_on(&netif->credit_timeout,
-+					     smp_processor_id());
-+				break;
-+			}
-+		}
-+		netif->remaining_credit -= txreq.size;
-+
-+		netif->tx.req_cons++;
-+
-+		netif_schedule_work(netif);
-+
-+		if (unlikely(txreq.size < ETH_HLEN) || 
-+		    unlikely(txreq.size > ETH_FRAME_LEN)) {
-+			DPRINTK("Bad packet size: %d\n", txreq.size);
-+			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-+			netif_put(netif);
-+			continue; 
-+		}
-+
-+		/* No crossing a page as the payload mustn't fragment. */
-+		if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) {
-+			DPRINTK("txreq.offset: %x, size: %u, end: %lu\n", 
-+				txreq.offset, txreq.size, 
-+				(txreq.offset &~PAGE_MASK) + txreq.size);
-+			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-+			netif_put(netif);
-+			continue;
-+		}
-+
-+		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-+
-+		data_len = (txreq.size > PKT_PROT_LEN) ?
-+			PKT_PROT_LEN : txreq.size;
-+
-+		skb = alloc_skb(data_len+16, GFP_ATOMIC);
-+		if (unlikely(skb == NULL)) {
-+			DPRINTK("Can't allocate a skb in start_xmit.\n");
-+			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-+			netif_put(netif);
-+			break;
-+		}
-+
-+		/* Packets passed to netif_rx() must have some headroom. */
-+		skb_reserve(skb, 16);
-+
-+		mop->host_addr = MMAP_VADDR(pending_idx);
-+		mop->dom       = netif->domid;
-+		mop->ref       = txreq.gref;
-+		mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
-+		mop++;
-+
-+		memcpy(&pending_tx_info[pending_idx].req,
-+		       &txreq, sizeof(txreq));
-+		pending_tx_info[pending_idx].netif = netif;
-+		*((u16 *)skb->data) = pending_idx;
-+
-+		__skb_queue_tail(&tx_queue, skb);
-+
-+		pending_cons++;
-+
-+		if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
-+			break;
-+	}
-+
-+	if (mop == tx_map_ops)
-+		return;
-+
-+	ret = HYPERVISOR_grant_table_op(
-+		GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
-+	BUG_ON(ret);
-+
-+	mop = tx_map_ops;
-+	while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
-+		pending_idx = *((u16 *)skb->data);
-+		netif       = pending_tx_info[pending_idx].netif;
-+		memcpy(&txreq, &pending_tx_info[pending_idx].req,
-+		       sizeof(txreq));
-+
-+		/* Check the remap error code. */
-+		if (unlikely(mop->status)) {
-+			printk(KERN_ALERT "#### netback grant fails\n");
-+			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-+			netif_put(netif);
-+			kfree_skb(skb);
-+			mop++;
-+			pending_ring[MASK_PEND_IDX(pending_prod++)] =
-+				pending_idx;
-+			continue;
-+		}
-+		set_phys_to_machine(
-+			__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT,
-+			FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
-+		grant_tx_handle[pending_idx] = mop->handle;
-+
-+		data_len = (txreq.size > PKT_PROT_LEN) ?
-+			PKT_PROT_LEN : txreq.size;
-+
-+		__skb_put(skb, data_len);
-+		memcpy(skb->data, 
-+		       (void *)(MMAP_VADDR(pending_idx)|txreq.offset),
-+		       data_len);
-+		if (data_len < txreq.size) {
-+			/* Append the packet payload as a fragment. */
-+			skb_shinfo(skb)->frags[0].page        = 
-+				virt_to_page(MMAP_VADDR(pending_idx));
-+			skb_shinfo(skb)->frags[0].size        =
-+				txreq.size - data_len;
-+			skb_shinfo(skb)->frags[0].page_offset = 
-+				txreq.offset + data_len;
-+			skb_shinfo(skb)->nr_frags = 1;
-+		} else {
-+			/* Schedule a response immediately. */
-+			netif_idx_release(pending_idx);
-+		}
-+
-+		skb->data_len  = txreq.size - data_len;
-+		skb->len      += skb->data_len;
-+
-+		skb->dev      = netif->dev;
-+		skb->protocol = eth_type_trans(skb, skb->dev);
-+
-+		/*
-+                 * No checking needed on localhost, but remember the field is
-+                 * blank. 
-+                 */
-+		skb->ip_summed        = CHECKSUM_UNNECESSARY;
-+		skb->proto_csum_valid = 1;
-+		skb->proto_csum_blank = !!(txreq.flags & NETTXF_csum_blank);
-+
-+		netif->stats.rx_bytes += txreq.size;
-+		netif->stats.rx_packets++;
-+
-+		netif_rx(skb);
-+		netif->dev->last_rx = jiffies;
-+
-+		mop++;
-+	}
-+}
-+
-+static void netif_idx_release(u16 pending_idx)
-+{
-+	static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&_lock, flags);
-+	dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
-+	spin_unlock_irqrestore(&_lock, flags);
-+
-+	tasklet_schedule(&net_tx_tasklet);
-+}
-+
-+static void netif_page_release(struct page *page)
-+{
-+	u16 pending_idx = page - virt_to_page(mmap_vstart);
-+
-+	/* Ready for next use. */
-+	set_page_count(page, 1);
-+
-+	netif_idx_release(pending_idx);
-+}
-+
-+irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+	netif_t *netif = dev_id;
-+	add_to_net_schedule_list_tail(netif);
-+	maybe_schedule_tx_action();
-+	return IRQ_HANDLED;
-+}
-+
-+static void make_tx_response(netif_t *netif, 
-+                             u16      id,
-+                             s8       st)
-+{
-+	RING_IDX i = netif->tx.rsp_prod_pvt;
-+	netif_tx_response_t *resp;
-+	int notify;
-+
-+	resp = RING_GET_RESPONSE(&netif->tx, i);
-+	resp->id     = id;
-+	resp->status = st;
-+
-+	netif->tx.rsp_prod_pvt = ++i;
-+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
-+	if (notify)
-+		notify_remote_via_irq(netif->irq);
-+
-+#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+	if (i == netif->tx.req_cons) {
-+		int more_to_do;
-+		RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
-+		if (more_to_do)
-+			add_to_net_schedule_list_tail(netif);
-+	}
-+#endif
-+}
-+
-+static int make_rx_response(netif_t *netif, 
-+                            u16      id, 
-+                            s8       st,
-+                            u16      offset,
-+                            u16      size,
-+                            u16      flags)
-+{
-+	RING_IDX i = netif->rx.rsp_prod_pvt;
-+	netif_rx_response_t *resp;
-+	int notify;
-+
-+	resp = RING_GET_RESPONSE(&netif->rx, i);
-+	resp->offset     = offset;
-+	resp->flags      = flags;
-+	resp->id         = id;
-+	resp->status     = (s16)size;
-+	if (st < 0)
-+		resp->status = (s16)st;
-+
-+	netif->rx.rsp_prod_pvt = ++i;
-+	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, notify);
-+
-+	return notify;
-+}
-+
-+#ifdef NETBE_DEBUG_INTERRUPT
-+static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+	struct list_head *ent;
-+	netif_t *netif;
-+	int i = 0;
-+
-+	printk(KERN_ALERT "netif_schedule_list:\n");
-+	spin_lock_irq(&net_schedule_list_lock);
-+
-+	list_for_each (ent, &net_schedule_list) {
-+		netif = list_entry(ent, netif_t, list);
-+		printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
-+		       "rx_resp_prod=%08x\n",
-+		       i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
-+		printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
-+		       netif->tx.req_cons, netif->tx.rsp_prod_pvt);
-+		printk(KERN_ALERT "   shared(rx_req_prod=%08x "
-+		       "rx_resp_prod=%08x\n",
-+		       netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
-+		printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
-+		       netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
-+		printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
-+		       netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
-+		i++;
-+	}
-+
-+	spin_unlock_irq(&net_schedule_list_lock);
-+	printk(KERN_ALERT " ** End of netif_schedule_list **\n");
-+
-+	return IRQ_HANDLED;
-+}
-+#endif
-+
-+static int __init netback_init(void)
-+{
-+	int i;
-+	struct page *page;
-+
-+	/* We can increase reservation by this much in net_rx_action(). */
-+	balloon_update_driver_allowance(NET_RX_RING_SIZE);
-+
-+	skb_queue_head_init(&rx_queue);
-+	skb_queue_head_init(&tx_queue);
-+
-+	init_timer(&net_timer);
-+	net_timer.data = 0;
-+	net_timer.function = net_alarm;
-+    
-+	page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
-+	BUG_ON(page == NULL);
-+	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-+
-+	for (i = 0; i < MAX_PENDING_REQS; i++) {
-+		page = virt_to_page(MMAP_VADDR(i));
-+		set_page_count(page, 1);
-+		SetPageForeign(page, netif_page_release);
-+	}
-+
-+	pending_cons = 0;
-+	pending_prod = MAX_PENDING_REQS;
-+	for (i = 0; i < MAX_PENDING_REQS; i++)
-+		pending_ring[i] = i;
-+
-+	spin_lock_init(&net_schedule_list_lock);
-+	INIT_LIST_HEAD(&net_schedule_list);
-+
-+	netif_xenbus_init();
-+
-+#ifdef NETBE_DEBUG_INTERRUPT
-+	(void)bind_virq_to_irqhandler(
-+		VIRQ_DEBUG,
-+		0,
-+		netif_be_dbg,
-+		SA_SHIRQ, 
-+		"net-be-dbg",
-+		&netif_be_dbg);
-+#endif
-+
-+	return 0;
-+}
-+
-+static void netback_cleanup(void)
-+{
-+	BUG();
-+}
-+
-+module_init(netback_init);
-+module_exit(netback_cleanup);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/netback/xenbus.c linux-2.6.12-xen/drivers/xen/netback/xenbus.c
---- pristine-linux-2.6.12/drivers/xen/netback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/netback/xenbus.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,329 @@
-+/*  Xenbus code for netif backend
-+    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
-+    Copyright (C) 2005 XenSource Ltd
-+
-+    This program is free software; you can redistribute it and/or modify
-+    it under the terms of the GNU General Public License as published by
-+    the Free Software Foundation; either version 2 of the License, or
-+    (at your option) any later version.
-+
-+    This program is distributed in the hope that it will be useful,
-+    but WITHOUT ANY WARRANTY; without even the implied warranty of
-+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+    GNU General Public License for more details.
-+
-+    You should have received a copy of the GNU General Public License
-+    along with this program; if not, write to the Free Software
-+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+*/
-+
-+
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <asm-xen/xenbus.h>
-+#include <asm-xen/net_driver_util.h>
-+#include "common.h"
-+
-+
-+#if 0
-+#undef DPRINTK
-+#define DPRINTK(fmt, args...) \
-+    printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+#endif
-+
-+
-+struct backend_info
-+{
-+	struct xenbus_device *dev;
-+	netif_t *netif;
-+	struct xenbus_watch backend_watch;
-+	XenbusState frontend_state;
-+};
-+
-+
-+static int connect_rings(struct backend_info *);
-+static void connect(struct backend_info *);
-+static void maybe_connect(struct backend_info *);
-+static void backend_changed(struct xenbus_watch *, const char **,
-+			    unsigned int);
-+
-+
-+static int netback_remove(struct xenbus_device *dev)
-+{
-+	struct backend_info *be = dev->data;
-+
-+	if (be->backend_watch.node) {
-+		unregister_xenbus_watch(&be->backend_watch);
-+		kfree(be->backend_watch.node);
-+		be->backend_watch.node = NULL;
-+	}
-+	if (be->netif) {
-+		netif_disconnect(be->netif);
-+		be->netif = NULL;
-+	}
-+	kfree(be);
-+	dev->data = NULL;
-+	return 0;
-+}
-+
-+
-+/**
-+ * Entry point to this code when a new device is created.  Allocate the basic
-+ * structures, and watch the store waiting for the hotplug scripts to tell us
-+ * the device's handle.  Switch to InitWait.
-+ */
-+static int netback_probe(struct xenbus_device *dev,
-+			 const struct xenbus_device_id *id)
-+{
-+	int err;
-+	struct backend_info *be = kmalloc(sizeof(struct backend_info),
-+					  GFP_KERNEL);
-+	if (!be) {
-+		xenbus_dev_fatal(dev, -ENOMEM,
-+				 "allocating backend structure");
-+		return -ENOMEM;
-+	}
-+	memset(be, 0, sizeof(*be));
-+
-+	be->dev = dev;
-+	dev->data = be;
-+
-+	err = xenbus_watch_path2(dev, dev->nodename, "handle",
-+				 &be->backend_watch, backend_changed);
-+	if (err)
-+		goto fail;
-+
-+	err = xenbus_switch_state(dev, XBT_NULL, XenbusStateInitWait);
-+	if (err) {
-+		goto fail;
-+	}
-+
-+	return 0;
-+
-+fail:
-+	DPRINTK("failed");
-+	netback_remove(dev);
-+	return err;
-+}
-+
-+
-+/**
-+ * Handle the creation of the hotplug script environment.  We add the script
-+ * and vif variables to the environment, for the benefit of the vif-* hotplug
-+ * scripts.
-+ */
-+static int netback_hotplug(struct xenbus_device *xdev, char **envp,
-+			   int num_envp, char *buffer, int buffer_size)
-+{
-+	struct backend_info *be = xdev->data;
-+	netif_t *netif = be->netif;
-+	int i = 0, length = 0;
-+	char *val;
-+
-+	DPRINTK("netback_hotplug");
-+
-+	val = xenbus_read(XBT_NULL, xdev->nodename, "script", NULL);
-+	if (IS_ERR(val)) {
-+		int err = PTR_ERR(val);
-+		xenbus_dev_fatal(xdev, err, "reading script");
-+		return err;
-+	}
-+	else {
-+		add_hotplug_env_var(envp, num_envp, &i,
-+				    buffer, buffer_size, &length,
-+				    "script=%s", val);
-+		kfree(val);
-+	}
-+
-+	add_hotplug_env_var(envp, num_envp, &i,
-+			    buffer, buffer_size, &length,
-+			    "vif=%s", netif->dev->name);
-+
-+	envp[i] = NULL;
-+
-+	return 0;
-+}
-+
-+
-+/**
-+ * Callback received when the hotplug scripts have placed the handle node.
-+ * Read it, and create a netif structure.  If the frontend is ready, connect.
-+ */
-+static void backend_changed(struct xenbus_watch *watch,
-+			    const char **vec, unsigned int len)
-+{
-+	int err;
-+	long handle;
-+	struct backend_info *be
-+		= container_of(watch, struct backend_info, backend_watch);
-+	struct xenbus_device *dev = be->dev;
-+
-+	DPRINTK("");
-+
-+	err = xenbus_scanf(XBT_NULL, dev->nodename, "handle", "%li", &handle);
-+	if (XENBUS_EXIST_ERR(err)) {
-+		/* Since this watch will fire once immediately after it is
-+		   registered, we expect this.  Ignore it, and wait for the
-+		   hotplug scripts. */
-+		return;
-+	}
-+	if (err != 1) {
-+		xenbus_dev_fatal(dev, err, "reading handle");
-+		return;
-+	}
-+
-+	if (be->netif == NULL) {
-+		u8 be_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
-+
-+		be->netif = alloc_netif(dev->otherend_id, handle, be_mac);
-+		if (IS_ERR(be->netif)) {
-+			err = PTR_ERR(be->netif);
-+			be->netif = NULL;
-+			xenbus_dev_fatal(dev, err, "creating interface");
-+			return;
-+		}
-+
-+		kobject_hotplug(&dev->dev.kobj, KOBJ_ONLINE);
-+
-+		maybe_connect(be);
-+	}
-+}
-+
-+
-+/**
-+ * Callback received when the frontend's state changes.
-+ */
-+static void frontend_changed(struct xenbus_device *dev,
-+			     XenbusState frontend_state)
-+{
-+	struct backend_info *be = dev->data;
-+
-+	DPRINTK("");
-+
-+	be->frontend_state = frontend_state;
-+
-+	switch (frontend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateInitialised:
-+		break;
-+
-+	case XenbusStateConnected:
-+		maybe_connect(be);
-+		break;
-+
-+	case XenbusStateClosing:
-+		xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
-+		break;
-+
-+	case XenbusStateClosed:
-+		kobject_hotplug(&dev->dev.kobj, KOBJ_OFFLINE);
-+		device_unregister(&dev->dev);
-+		break;
-+
-+	case XenbusStateUnknown:
-+	case XenbusStateInitWait:
-+	default:
-+		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
-+				 frontend_state);
-+		break;
-+	}
-+}
-+
-+
-+/* ** Connection ** */
-+
-+
-+static void maybe_connect(struct backend_info *be)
-+{
-+	if (be->netif != NULL && be->frontend_state == XenbusStateConnected) {
-+		connect(be);
-+	}
-+}
-+
-+
-+static void connect(struct backend_info *be)
-+{
-+	int err;
-+	struct xenbus_device *dev = be->dev;
-+
-+	err = connect_rings(be);
-+	if (err)
-+		return;
-+
-+	err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-+		return;
-+	}
-+
-+	xenbus_switch_state(dev, XBT_NULL, XenbusStateConnected);
-+}
-+
-+
-+static int connect_rings(struct backend_info *be)
-+{
-+	struct xenbus_device *dev = be->dev;
-+	unsigned long tx_ring_ref, rx_ring_ref;
-+	unsigned int evtchn;
-+	int err;
-+
-+	DPRINTK("");
-+
-+	err = xenbus_gather(XBT_NULL, dev->otherend,
-+			    "tx-ring-ref", "%lu", &tx_ring_ref,
-+			    "rx-ring-ref", "%lu", &rx_ring_ref,
-+			    "event-channel", "%u", &evtchn, NULL);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err,
-+				 "reading %s/ring-ref and event-channel",
-+				 dev->otherend);
-+		return err;
-+	}
-+
-+	/* Map the shared frame, irq etc. */
-+	err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err,
-+				 "mapping shared-frames %lu/%lu port %u",
-+				 tx_ring_ref, rx_ring_ref, evtchn);
-+		return err;
-+	}
-+	return 0;
-+}
-+
-+
-+/* ** Driver Registration ** */
-+
-+
-+static struct xenbus_device_id netback_ids[] = {
-+	{ "vif" },
-+	{ "" }
-+};
-+
-+
-+static struct xenbus_driver netback = {
-+	.name = "vif",
-+	.owner = THIS_MODULE,
-+	.ids = netback_ids,
-+	.probe = netback_probe,
-+	.remove = netback_remove,
-+	.hotplug = netback_hotplug,
-+	.otherend_changed = frontend_changed,
-+};
-+
-+
-+void netif_xenbus_init(void)
-+{
-+	xenbus_register_backend(&netback);
-+}
-+
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/net_driver_util.c linux-2.6.12-xen/drivers/xen/net_driver_util.c
---- pristine-linux-2.6.12/drivers/xen/net_driver_util.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/net_driver_util.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,67 @@
-+/*****************************************************************************
-+ *
-+ * Utility functions for Xen network devices.
-+ *
-+ * Copyright (c) 2005 XenSource Ltd.
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following
-+ * license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject
-+ * to the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+
-+#include <linux/if_ether.h>
-+#include <linux/err.h>
-+#include <asm-xen/net_driver_util.h>
-+
-+
-+int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
-+{
-+	char *s;
-+	int i;
-+	char *e;
-+	char *macstr = xenbus_read(XBT_NULL, dev->nodename, "mac", NULL);
-+	if (IS_ERR(macstr)) {
-+		return PTR_ERR(macstr);
-+	}
-+	s = macstr;
-+	for (i = 0; i < ETH_ALEN; i++) {
-+		mac[i] = simple_strtoul(s, &e, 16);
-+		if (s == e || (e[0] != ':' && e[0] != 0)) {
-+			kfree(macstr);
-+			return -ENOENT;
-+		}
-+		s = &e[1];
-+	}
-+	kfree(macstr);
-+	return 0;
-+}
-+
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/netfront/Makefile linux-2.6.12-xen/drivers/xen/netfront/Makefile
---- pristine-linux-2.6.12/drivers/xen/netfront/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/netfront/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,4 @@
-+
-+obj-$(CONFIG_XEN_NETDEV_FRONTEND)	:= xennet.o
-+
-+xennet-objs := netfront.o
-diff -Nurp pristine-linux-2.6.12/drivers/xen/netfront/netfront.c linux-2.6.12-xen/drivers/xen/netfront/netfront.c
---- pristine-linux-2.6.12/drivers/xen/netfront/netfront.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/netfront/netfront.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1486 @@
-+/******************************************************************************
-+ * Virtual network driver for conversing with remote driver backends.
-+ * 
-+ * Copyright (c) 2002-2005, K A Fraser
-+ * Copyright (c) 2005, XenSource Ltd
-+ * 
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License version 2
-+ * as published by the Free Software Foundation; or, when distributed
-+ * separately from the Linux kernel or incorporated into other
-+ * software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <linux/init.h>
-+#include <linux/bitops.h>
-+#include <linux/proc_fs.h>
-+#include <linux/ethtool.h>
-+#include <net/sock.h>
-+#include <net/pkt_sched.h>
-+#include <net/arp.h>
-+#include <net/route.h>
-+#include <asm/io.h>
-+#include <asm/uaccess.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/xenbus.h>
-+#include <asm-xen/xen-public/io/netif.h>
-+#include <asm-xen/xen-public/memory.h>
-+#include <asm-xen/balloon.h>
-+#include <asm/page.h>
-+#include <asm/uaccess.h>
-+#include <asm-xen/xen-public/grant_table.h>
-+#include <asm-xen/gnttab.h>
-+#include <asm-xen/net_driver_util.h>
-+
-+#define GRANT_INVALID_REF	0
-+
-+#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
-+#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
-+
-+#ifndef __GFP_NOWARN
-+#define __GFP_NOWARN 0
-+#endif
-+#define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
-+
-+#define init_skb_shinfo(_skb)                         \
-+    do {                                              \
-+        atomic_set(&(skb_shinfo(_skb)->dataref), 1);  \
-+        skb_shinfo(_skb)->nr_frags = 0;               \
-+        skb_shinfo(_skb)->frag_list = NULL;           \
-+    } while (0)
-+
-+static unsigned long rx_pfn_array[NET_RX_RING_SIZE];
-+static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
-+static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
-+
-+struct netfront_info
-+{
-+	struct list_head list;
-+	struct net_device *netdev;
-+
-+	struct net_device_stats stats;
-+	unsigned int tx_full;
-+    
-+	netif_tx_front_ring_t tx;
-+	netif_rx_front_ring_t rx;
-+
-+	spinlock_t   tx_lock;
-+	spinlock_t   rx_lock;
-+
-+	unsigned int handle;
-+	unsigned int evtchn, irq;
-+
-+	/* What is the status of our connection to the remote backend? */
-+#define BEST_CLOSED       0
-+#define BEST_DISCONNECTED 1
-+#define BEST_CONNECTED    2
-+	unsigned int backend_state;
-+
-+	/* Is this interface open or closed (down or up)? */
-+#define UST_CLOSED        0
-+#define UST_OPEN          1
-+	unsigned int user_state;
-+
-+	/* Receive-ring batched refills. */
-+#define RX_MIN_TARGET 8
-+#define RX_MAX_TARGET NET_RX_RING_SIZE
-+	int rx_min_target, rx_max_target, rx_target;
-+	struct sk_buff_head rx_batch;
-+
-+	struct timer_list rx_refill_timer;
-+
-+	/*
-+	 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
-+	 * array is an index into a chain of free entries.
-+	 */
-+	struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
-+	struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
-+
-+	grant_ref_t gref_tx_head;
-+	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 
-+	grant_ref_t gref_rx_head;
-+	grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 
-+
-+	struct xenbus_device *xbdev;
-+	int tx_ring_ref;
-+	int rx_ring_ref;
-+	u8 mac[ETH_ALEN];
-+};
-+
-+/* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
-+#define ADD_ID_TO_FREELIST(_list, _id)			\
-+	(_list)[(_id)] = (_list)[0];			\
-+	(_list)[0]     = (void *)(unsigned long)(_id);
-+#define GET_ID_FROM_FREELIST(_list)				\
-+	({ unsigned long _id = (unsigned long)(_list)[0];	\
-+	   (_list)[0]  = (_list)[_id];				\
-+	   (unsigned short)_id; })
-+
-+#ifdef DEBUG
-+static char *be_state_name[] = {
-+	[BEST_CLOSED]       = "closed",
-+	[BEST_DISCONNECTED] = "disconnected",
-+	[BEST_CONNECTED]    = "connected",
-+};
-+#endif
-+
-+#define DPRINTK(fmt, args...) pr_debug("netfront (%s:%d) " fmt, \
-+                                       __FUNCTION__, __LINE__, ##args)
-+#define IPRINTK(fmt, args...)				\
-+	printk(KERN_INFO "netfront: " fmt, ##args)
-+#define WPRINTK(fmt, args...)				\
-+	printk(KERN_WARNING "netfront: " fmt, ##args)
-+
-+
-+static int talk_to_backend(struct xenbus_device *, struct netfront_info *);
-+static int setup_device(struct xenbus_device *, struct netfront_info *);
-+static int create_netdev(int, struct xenbus_device *, struct net_device **);
-+
-+static void netfront_closing(struct xenbus_device *);
-+
-+static void end_access(int, void *);
-+static void netif_disconnect_backend(struct netfront_info *);
-+static void close_netdev(struct netfront_info *);
-+static void netif_free(struct netfront_info *);
-+
-+static void show_device(struct netfront_info *);
-+
-+static void network_connect(struct net_device *);
-+static void network_tx_buf_gc(struct net_device *);
-+static void network_alloc_rx_buffers(struct net_device *);
-+static int send_fake_arp(struct net_device *);
-+
-+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
-+
-+#ifdef CONFIG_PROC_FS
-+static int xennet_proc_init(void);
-+static int xennet_proc_addif(struct net_device *dev);
-+static void xennet_proc_delif(struct net_device *dev);
-+#else
-+#define xennet_proc_init()   (0)
-+#define xennet_proc_addif(d) (0)
-+#define xennet_proc_delif(d) ((void)0)
-+#endif
-+
-+
-+/**
-+ * Entry point to this code when a new device is created.  Allocate the basic
-+ * structures and the ring buffers for communication with the backend, and
-+ * inform the backend of the appropriate details for those.  Switch to
-+ * Connected state.
-+ */
-+static int netfront_probe(struct xenbus_device *dev,
-+			  const struct xenbus_device_id *id)
-+{
-+	int err;
-+	struct net_device *netdev;
-+	struct netfront_info *info;
-+	unsigned int handle;
-+
-+	err = xenbus_scanf(XBT_NULL, dev->nodename, "handle", "%u", &handle);
-+	if (err != 1) {
-+		xenbus_dev_fatal(dev, err, "reading handle");
-+		return err;
-+	}
-+
-+	err = create_netdev(handle, dev, &netdev);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "creating netdev");
-+		return err;
-+	}
-+
-+	info = netdev_priv(netdev);
-+	dev->data = info;
-+
-+	err = talk_to_backend(dev, info);
-+	if (err) {
-+		kfree(info);
-+		dev->data = NULL;
-+		return err;
-+	}
-+
-+	return 0;
-+}
-+
-+
-+/**
-+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
-+ * driver restart.  We tear down our netif structure and recreate it, but
-+ * leave the device-layer structures intact so that this is transparent to the
-+ * rest of the kernel.
-+ */
-+static int netfront_resume(struct xenbus_device *dev)
-+{
-+	struct netfront_info *info = dev->data;
-+
-+	DPRINTK("%s\n", dev->nodename);
-+
-+	netif_disconnect_backend(info);
-+	return talk_to_backend(dev, info);
-+}
-+
-+
-+/* Common code used when first setting up, and when resuming. */
-+static int talk_to_backend(struct xenbus_device *dev,
-+			   struct netfront_info *info)
-+{
-+	const char *message;
-+	xenbus_transaction_t xbt;
-+	int err;
-+
-+	err = xen_net_read_mac(dev, info->mac);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-+		goto out;
-+	}
-+
-+	/* Create shared ring, alloc event channel. */
-+	err = setup_device(dev, info);
-+	if (err)
-+		goto out;
-+
-+again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "starting transaction");
-+		goto destroy_ring;
-+	}
-+
-+	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
-+			    info->tx_ring_ref);
-+	if (err) {
-+		message = "writing tx ring-ref";
-+		goto abort_transaction;
-+	}
-+	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
-+			    info->rx_ring_ref);
-+	if (err) {
-+		message = "writing rx ring-ref";
-+		goto abort_transaction;
-+	}
-+	err = xenbus_printf(xbt, dev->nodename,
-+			    "event-channel", "%u", info->evtchn);
-+	if (err) {
-+		message = "writing event-channel";
-+		goto abort_transaction;
-+	}
-+
-+	err = xenbus_printf(xbt, dev->nodename,
-+			    "state", "%d", XenbusStateConnected);
-+	if (err) {
-+		message = "writing frontend XenbusStateConnected";
-+		goto abort_transaction;
-+	}
-+
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err) {
-+		if (err == -EAGAIN)
-+			goto again;
-+		xenbus_dev_fatal(dev, err, "completing transaction");
-+		goto destroy_ring;
-+	}
-+
-+	return 0;
-+
-+ abort_transaction:
-+	xenbus_transaction_end(xbt, 1);
-+	xenbus_dev_fatal(dev, err, "%s", message);
-+ destroy_ring:
-+	netif_free(info);
-+ out:
-+	return err;
-+}
-+
-+
-+static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
-+{
-+	netif_tx_sring_t *txs;
-+	netif_rx_sring_t *rxs;
-+	int err;
-+	struct net_device *netdev = info->netdev;
-+
-+	info->tx_ring_ref = GRANT_INVALID_REF;
-+	info->rx_ring_ref = GRANT_INVALID_REF;
-+	info->rx.sring = NULL;
-+	info->tx.sring = NULL;
-+	info->irq = 0;
-+
-+	txs = (netif_tx_sring_t *)__get_free_page(GFP_KERNEL);
-+	if (!txs) {
-+		err = -ENOMEM;
-+		xenbus_dev_fatal(dev, err, "allocating tx ring page");
-+		goto fail;
-+	}
-+	rxs = (netif_rx_sring_t *)__get_free_page(GFP_KERNEL);
-+	if (!rxs) {
-+		err = -ENOMEM;
-+		xenbus_dev_fatal(dev, err, "allocating rx ring page");
-+		goto fail;
-+	}
-+	memset(txs, 0, PAGE_SIZE);
-+	memset(rxs, 0, PAGE_SIZE);
-+	info->backend_state = BEST_DISCONNECTED;
-+
-+	SHARED_RING_INIT(txs);
-+	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
-+
-+	SHARED_RING_INIT(rxs);
-+	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
-+
-+	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
-+	if (err < 0)
-+		goto fail;
-+	info->tx_ring_ref = err;
-+
-+	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
-+	if (err < 0)
-+		goto fail;
-+	info->rx_ring_ref = err;
-+
-+	err = xenbus_alloc_evtchn(dev, &info->evtchn);
-+	if (err)
-+		goto fail;
-+
-+	memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
-+	network_connect(netdev);
-+	info->irq = bind_evtchn_to_irqhandler(
-+		info->evtchn, netif_int, SA_SAMPLE_RANDOM, netdev->name,
-+		netdev);
-+	(void)send_fake_arp(netdev);
-+	show_device(info);
-+
-+	return 0;
-+
-+ fail:
-+	netif_free(info);
-+	return err;
-+}
-+
-+
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+			    XenbusState backend_state)
-+{
-+	DPRINTK("\n");
-+
-+	switch (backend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateInitWait:
-+	case XenbusStateInitialised:
-+	case XenbusStateConnected:
-+	case XenbusStateUnknown:
-+	case XenbusStateClosed:
-+		break;
-+
-+	case XenbusStateClosing:
-+		netfront_closing(dev);
-+		break;
-+	}
-+}
-+
-+
-+/** Send a packet on a net device to encourage switches to learn the
-+ * MAC. We send a fake ARP request.
-+ *
-+ * @param dev device
-+ * @return 0 on success, error code otherwise
-+ */
-+static int send_fake_arp(struct net_device *dev)
-+{
-+	struct sk_buff *skb;
-+	u32             src_ip, dst_ip;
-+
-+	dst_ip = INADDR_BROADCAST;
-+	src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
-+
-+	/* No IP? Then nothing to do. */
-+	if (src_ip == 0)
-+		return 0;
-+
-+	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
-+			 dst_ip, dev, src_ip,
-+			 /*dst_hw*/ NULL, /*src_hw*/ NULL, 
-+			 /*target_hw*/ dev->dev_addr);
-+	if (skb == NULL)
-+		return -ENOMEM;
-+
-+	return dev_queue_xmit(skb);
-+}
-+
-+
-+static int network_open(struct net_device *dev)
-+{
-+	struct netfront_info *np = netdev_priv(dev);
-+
-+	memset(&np->stats, 0, sizeof(np->stats));
-+
-+	np->user_state = UST_OPEN;
-+
-+	network_alloc_rx_buffers(dev);
-+	np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
-+
-+	netif_start_queue(dev);
-+
-+	return 0;
-+}
-+
-+static void network_tx_buf_gc(struct net_device *dev)
-+{
-+	RING_IDX i, prod;
-+	unsigned short id;
-+	struct netfront_info *np = netdev_priv(dev);
-+	struct sk_buff *skb;
-+
-+	if (np->backend_state != BEST_CONNECTED)
-+		return;
-+
-+	do {
-+		prod = np->tx.sring->rsp_prod;
-+		rmb(); /* Ensure we see responses up to 'rp'. */
-+
-+		for (i = np->tx.rsp_cons; i != prod; i++) {
-+			id  = RING_GET_RESPONSE(&np->tx, i)->id;
-+			skb = np->tx_skbs[id];
-+			if (unlikely(gnttab_query_foreign_access(
-+				np->grant_tx_ref[id]) != 0)) {
-+				printk(KERN_ALERT "network_tx_buf_gc: warning "
-+				       "-- grant still in use by backend "
-+				       "domain.\n");
-+				goto out; 
-+			}
-+			gnttab_end_foreign_access_ref(
-+				np->grant_tx_ref[id], GNTMAP_readonly);
-+			gnttab_release_grant_reference(
-+				&np->gref_tx_head, np->grant_tx_ref[id]);
-+			np->grant_tx_ref[id] = GRANT_INVALID_REF;
-+			ADD_ID_TO_FREELIST(np->tx_skbs, id);
-+			dev_kfree_skb_irq(skb);
-+		}
-+        
-+		np->tx.rsp_cons = prod;
-+        
-+		/*
-+		 * Set a new event, then check for race with update of tx_cons.
-+		 * Note that it is essential to schedule a callback, no matter
-+		 * how few buffers are pending. Even if there is space in the
-+		 * transmit ring, higher layers may be blocked because too much
-+		 * data is outstanding: in such cases notification from Xen is
-+		 * likely to be the only kick that we'll get.
-+		 */
-+		np->tx.sring->rsp_event =
-+			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
-+		mb();
-+	} while (prod != np->tx.sring->rsp_prod);
-+
-+ out: 
-+	if (np->tx_full &&
-+	    ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
-+		np->tx_full = 0;
-+		if (np->user_state == UST_OPEN)
-+			netif_wake_queue(dev);
-+	}
-+}
-+
-+
-+static void rx_refill_timeout(unsigned long data)
-+{
-+	struct net_device *dev = (struct net_device *)data;
-+	netif_rx_schedule(dev);
-+}
-+
-+
-+static void network_alloc_rx_buffers(struct net_device *dev)
-+{
-+	unsigned short id;
-+	struct netfront_info *np = netdev_priv(dev);
-+	struct sk_buff *skb;
-+	int i, batch_target;
-+	RING_IDX req_prod = np->rx.req_prod_pvt;
-+	struct xen_memory_reservation reservation;
-+	grant_ref_t ref;
-+
-+	if (unlikely(np->backend_state != BEST_CONNECTED))
-+		return;
-+
-+	/*
-+	 * Allocate skbuffs greedily, even though we batch updates to the
-+	 * receive ring. This creates a less bursty demand on the memory
-+	 * allocator, so should reduce the chance of failed allocation requests
-+	 * both for ourself and for other kernel subsystems.
-+	 */
-+	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
-+	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
-+		/*
-+		 * Subtract dev_alloc_skb headroom (16 bytes) and shared info
-+		 * tailroom then round down to SKB_DATA_ALIGN boundary.
-+		 */
-+		skb = alloc_xen_skb(
-+			((PAGE_SIZE - sizeof(struct skb_shared_info)) &
-+			 (-SKB_DATA_ALIGN(1))) - 16);
-+		if (skb == NULL) {
-+			/* Any skbuffs queued for refill? Force them out. */
-+			if (i != 0)
-+				goto refill;
-+			/* Could not allocate any skbuffs. Try again later. */
-+			mod_timer(&np->rx_refill_timer,
-+				  jiffies + (HZ/10));
-+			return;
-+		}
-+		__skb_queue_tail(&np->rx_batch, skb);
-+	}
-+
-+	/* Is the batch large enough to be worthwhile? */
-+	if (i < (np->rx_target/2))
-+		return;
-+
-+	/* Adjust our fill target if we risked running out of buffers. */
-+	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
-+	    ((np->rx_target *= 2) > np->rx_max_target))
-+		np->rx_target = np->rx_max_target;
-+
-+ refill:
-+	for (i = 0; ; i++) {
-+		if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
-+			break;
-+
-+		skb->dev = dev;
-+
-+		id = GET_ID_FROM_FREELIST(np->rx_skbs);
-+
-+		np->rx_skbs[id] = skb;
-+        
-+		RING_GET_REQUEST(&np->rx, req_prod + i)->id = id;
-+		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
-+		BUG_ON((signed short)ref < 0);
-+		np->grant_rx_ref[id] = ref;
-+		gnttab_grant_foreign_transfer_ref(ref,
-+						  np->xbdev->otherend_id);
-+		RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
-+		rx_pfn_array[i] = virt_to_mfn(skb->head);
-+
-+		/* Remove this page from map before passing back to Xen. */
-+		set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
-+				    INVALID_P2M_ENTRY);
-+
-+		MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
-+					__pte(0), 0);
-+	}
-+
-+	/* After all PTEs have been zapped we blow away stale TLB entries. */
-+	rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-+
-+	/* Give away a batch of pages. */
-+	rx_mcl[i].op = __HYPERVISOR_memory_op;
-+	rx_mcl[i].args[0] = XENMEM_decrease_reservation;
-+	rx_mcl[i].args[1] = (unsigned long)&reservation;
-+
-+	reservation.extent_start = rx_pfn_array;
-+	reservation.nr_extents   = i;
-+	reservation.extent_order = 0;
-+	reservation.address_bits = 0;
-+	reservation.domid        = DOMID_SELF;
-+
-+	/* Tell the ballon driver what is going on. */
-+	balloon_update_driver_allowance(i);
-+
-+	/* Zap PTEs and give away pages in one big multicall. */
-+	(void)HYPERVISOR_multicall(rx_mcl, i+1);
-+
-+	/* Check return status of HYPERVISOR_memory_op(). */
-+	if (unlikely(rx_mcl[i].result != i))
-+		panic("Unable to reduce memory reservation\n");
-+
-+	/* Above is a suitable barrier to ensure backend will see requests. */
-+	np->rx.req_prod_pvt = req_prod + i;
-+	RING_PUSH_REQUESTS(&np->rx);
-+}
-+
-+
-+static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
-+{
-+	unsigned short id;
-+	struct netfront_info *np = netdev_priv(dev);
-+	netif_tx_request_t *tx;
-+	RING_IDX i;
-+	grant_ref_t ref;
-+	unsigned long mfn;
-+	int notify;
-+
-+	if (unlikely(np->tx_full)) {
-+		printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
-+		       dev->name);
-+		netif_stop_queue(dev);
-+		goto drop;
-+	}
-+
-+	if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
-+		     PAGE_SIZE)) {
-+		struct sk_buff *nskb;
-+		if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
-+			goto drop;
-+		skb_put(nskb, skb->len);
-+		memcpy(nskb->data, skb->data, skb->len);
-+		nskb->dev = skb->dev;
-+		dev_kfree_skb(skb);
-+		skb = nskb;
-+	}
-+    
-+	spin_lock_irq(&np->tx_lock);
-+
-+	if (np->backend_state != BEST_CONNECTED) {
-+		spin_unlock_irq(&np->tx_lock);
-+		goto drop;
-+	}
-+
-+	i = np->tx.req_prod_pvt;
-+
-+	id = GET_ID_FROM_FREELIST(np->tx_skbs);
-+	np->tx_skbs[id] = skb;
-+
-+	tx = RING_GET_REQUEST(&np->tx, i);
-+
-+	tx->id   = id;
-+	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
-+	BUG_ON((signed short)ref < 0);
-+	mfn = virt_to_mfn(skb->data);
-+	gnttab_grant_foreign_access_ref(
-+		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
-+	tx->gref = np->grant_tx_ref[id] = ref;
-+	tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
-+	tx->size = skb->len;
-+	tx->flags = (skb->ip_summed == CHECKSUM_HW) ? NETTXF_csum_blank : 0;
-+
-+	np->tx.req_prod_pvt = i + 1;
-+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
-+	if (notify)
-+		notify_remote_via_irq(np->irq);
-+
-+	network_tx_buf_gc(dev);
-+
-+	if (RING_FULL(&np->tx)) {
-+		np->tx_full = 1;
-+		netif_stop_queue(dev);
-+	}
-+
-+	spin_unlock_irq(&np->tx_lock);
-+
-+	np->stats.tx_bytes += skb->len;
-+	np->stats.tx_packets++;
-+
-+	return 0;
-+
-+ drop:
-+	np->stats.tx_dropped++;
-+	dev_kfree_skb(skb);
-+	return 0;
-+}
-+
-+static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
-+{
-+	struct net_device *dev = dev_id;
-+	struct netfront_info *np = netdev_priv(dev);
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&np->tx_lock, flags);
-+	network_tx_buf_gc(dev);
-+	spin_unlock_irqrestore(&np->tx_lock, flags);
-+
-+	if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx) &&
-+	    (np->user_state == UST_OPEN))
-+		netif_rx_schedule(dev);
-+
-+	return IRQ_HANDLED;
-+}
-+
-+
-+static int netif_poll(struct net_device *dev, int *pbudget)
-+{
-+	struct netfront_info *np = netdev_priv(dev);
-+	struct sk_buff *skb, *nskb;
-+	netif_rx_response_t *rx;
-+	RING_IDX i, rp;
-+	mmu_update_t *mmu = rx_mmu;
-+	multicall_entry_t *mcl = rx_mcl;
-+	int work_done, budget, more_to_do = 1;
-+	struct sk_buff_head rxq;
-+	unsigned long flags;
-+	unsigned long mfn;
-+	grant_ref_t ref;
-+
-+	spin_lock(&np->rx_lock);
-+
-+	if (np->backend_state != BEST_CONNECTED) {
-+		spin_unlock(&np->rx_lock);
-+		return 0;
-+	}
-+
-+	skb_queue_head_init(&rxq);
-+
-+	if ((budget = *pbudget) > dev->quota)
-+		budget = dev->quota;
-+	rp = np->rx.sring->rsp_prod;
-+	rmb(); /* Ensure we see queued responses up to 'rp'. */
-+
-+	for (i = np->rx.rsp_cons, work_done = 0; 
-+	     (i != rp) && (work_done < budget);
-+	     i++, work_done++) {
-+		rx = RING_GET_RESPONSE(&np->rx, i);
-+
-+		/*
-+                 * This definitely indicates a bug, either in this driver or
-+                 * in the backend driver. In future this should flag the bad
-+                 * situation to the system controller to reboot the backed.
-+                 */
-+		if ((ref = np->grant_rx_ref[rx->id]) == GRANT_INVALID_REF) {
-+			WPRINTK("Bad rx response id %d.\n", rx->id);
-+			work_done--;
-+			continue;
-+		}
-+
-+		/* Memory pressure, insufficient buffer headroom, ... */
-+		if ((mfn = gnttab_end_foreign_transfer_ref(ref)) == 0) {
-+			if (net_ratelimit())
-+				WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
-+					rx->id, rx->status);
-+			RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id =
-+				rx->id;
-+			RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref =
-+				ref;
-+			np->rx.req_prod_pvt++;
-+			RING_PUSH_REQUESTS(&np->rx);
-+			work_done--;
-+			continue;
-+		}
-+
-+		gnttab_release_grant_reference(&np->gref_rx_head, ref);
-+		np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
-+
-+		skb = np->rx_skbs[rx->id];
-+		ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
-+
-+		/* NB. We handle skb overflow later. */
-+		skb->data = skb->head + rx->offset;
-+		skb->len  = rx->status;
-+		skb->tail = skb->data + skb->len;
-+
-+		if ( rx->flags & NETRXF_csum_valid )
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+		np->stats.rx_packets++;
-+		np->stats.rx_bytes += rx->status;
-+
-+		/* Remap the page. */
-+		mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-+		mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
-+		mmu++;
-+		MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
-+					pfn_pte_ma(mfn, PAGE_KERNEL), 0);
-+		mcl++;
-+
-+		set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT, mfn);
-+
-+		__skb_queue_tail(&rxq, skb);
-+	}
-+
-+	/* Some pages are no longer absent... */
-+	balloon_update_driver_allowance(-work_done);
-+
-+	/* Do all the remapping work, and M2P updates, in one big hypercall. */
-+	if (likely((mcl - rx_mcl) != 0)) {
-+		mcl->op = __HYPERVISOR_mmu_update;
-+		mcl->args[0] = (unsigned long)rx_mmu;
-+		mcl->args[1] = mmu - rx_mmu;
-+		mcl->args[2] = 0;
-+		mcl->args[3] = DOMID_SELF;
-+		mcl++;
-+		(void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
-+	}
-+
-+	while ((skb = __skb_dequeue(&rxq)) != NULL) {
-+		if (skb->len > (dev->mtu + ETH_HLEN)) {
-+			if (net_ratelimit())
-+				printk(KERN_INFO "Received packet too big for "
-+				       "MTU (%d > %d)\n",
-+				       skb->len - ETH_HLEN, dev->mtu);
-+			skb->len  = 0;
-+			skb->tail = skb->data;
-+			init_skb_shinfo(skb);
-+			dev_kfree_skb(skb);
-+			continue;
-+		}
-+
-+		/*
-+		 * Enough room in skbuff for the data we were passed? Also,
-+		 * Linux expects at least 16 bytes headroom in each rx buffer.
-+		 */
-+		if (unlikely(skb->tail > skb->end) || 
-+		    unlikely((skb->data - skb->head) < 16)) {
-+			if (net_ratelimit()) {
-+				if (skb->tail > skb->end)
-+					printk(KERN_INFO "Received packet "
-+					       "is %zd bytes beyond tail.\n",
-+					       skb->tail - skb->end);
-+				else
-+					printk(KERN_INFO "Received packet "
-+					       "is %zd bytes before head.\n",
-+					       16 - (skb->data - skb->head));
-+			}
-+
-+			nskb = alloc_xen_skb(skb->len + 2);
-+			if (nskb != NULL) {
-+				skb_reserve(nskb, 2);
-+				skb_put(nskb, skb->len);
-+				memcpy(nskb->data, skb->data, skb->len);
-+				nskb->dev = skb->dev;
-+				nskb->ip_summed = skb->ip_summed;
-+			}
-+
-+			/* Reinitialise and then destroy the old skbuff. */
-+			skb->len  = 0;
-+			skb->tail = skb->data;
-+			init_skb_shinfo(skb);
-+			dev_kfree_skb(skb);
-+
-+			/* Switch old for new, if we copied the buffer. */
-+			if ((skb = nskb) == NULL)
-+				continue;
-+		}
-+        
-+		/* Set the shinfo area, which is hidden behind the data. */
-+		init_skb_shinfo(skb);
-+		/* Ethernet work: Delayed to here as it peeks the header. */
-+		skb->protocol = eth_type_trans(skb, dev);
-+
-+		/* Pass it up. */
-+		netif_receive_skb(skb);
-+		dev->last_rx = jiffies;
-+	}
-+
-+	np->rx.rsp_cons = i;
-+
-+	/* If we get a callback with very few responses, reduce fill target. */
-+	/* NB. Note exponential increase, linear decrease. */
-+	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
-+	     ((3*np->rx_target) / 4)) &&
-+	    (--np->rx_target < np->rx_min_target))
-+		np->rx_target = np->rx_min_target;
-+
-+	network_alloc_rx_buffers(dev);
-+
-+	*pbudget   -= work_done;
-+	dev->quota -= work_done;
-+
-+	if (work_done < budget) {
-+		local_irq_save(flags);
-+
-+		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
-+		if (!more_to_do)
-+			__netif_rx_complete(dev);
-+
-+		local_irq_restore(flags);
-+	}
-+
-+	spin_unlock(&np->rx_lock);
-+
-+	return more_to_do;
-+}
-+
-+
-+static int network_close(struct net_device *dev)
-+{
-+	struct netfront_info *np = netdev_priv(dev);
-+	np->user_state = UST_CLOSED;
-+	netif_stop_queue(np->netdev);
-+	return 0;
-+}
-+
-+
-+static struct net_device_stats *network_get_stats(struct net_device *dev)
-+{
-+	struct netfront_info *np = netdev_priv(dev);
-+	return &np->stats;
-+}
-+
-+static void network_connect(struct net_device *dev)
-+{
-+	struct netfront_info *np;
-+	int i, requeue_idx;
-+	netif_tx_request_t *tx;
-+	struct sk_buff *skb;
-+
-+	np = netdev_priv(dev);
-+	spin_lock_irq(&np->tx_lock);
-+	spin_lock(&np->rx_lock);
-+
-+	/* Recovery procedure: */
-+
-+	/* Step 1: Reinitialise variables. */
-+	np->tx_full = 0;
-+
-+	/*
-+	 * Step 2: Rebuild the RX and TX ring contents.
-+	 * NB. We could just free the queued TX packets now but we hope
-+	 * that sending them out might do some good.  We have to rebuild
-+	 * the RX ring because some of our pages are currently flipped out
-+	 * so we can't just free the RX skbs.
-+	 * NB2. Freelist index entries are always going to be less than
-+	 *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
-+	 * greater than __PAGE_OFFSET: we use this property to distinguish
-+	 * them.
-+	 */
-+
-+	/*
-+	 * Rebuild the TX buffer freelist and the TX ring itself.
-+	 * NB. This reorders packets.  We could keep more private state
-+	 * to avoid this but maybe it doesn't matter so much given the
-+	 * interface has been down.
-+	 */
-+	for (requeue_idx = 0, i = 1; i <= NET_TX_RING_SIZE; i++) {
-+		if ((unsigned long)np->tx_skbs[i] < __PAGE_OFFSET)
-+			continue;
-+
-+		skb = np->tx_skbs[i];
-+
-+		tx = RING_GET_REQUEST(&np->tx, requeue_idx);
-+		requeue_idx++;
-+
-+		tx->id = i;
-+		gnttab_grant_foreign_access_ref(
-+			np->grant_tx_ref[i], np->xbdev->otherend_id, 
-+			virt_to_mfn(np->tx_skbs[i]->data),
-+			GNTMAP_readonly); 
-+		tx->gref = np->grant_tx_ref[i];
-+		tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
-+		tx->size = skb->len;
-+		tx->flags = (skb->ip_summed == CHECKSUM_HW) ?
-+			NETTXF_csum_blank : 0;
-+
-+		np->stats.tx_bytes += skb->len;
-+		np->stats.tx_packets++;
-+	}
-+
-+	np->tx.req_prod_pvt = requeue_idx;
-+	RING_PUSH_REQUESTS(&np->tx);
-+
-+	/* Rebuild the RX buffer freelist and the RX ring itself. */
-+	for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) { 
-+		if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
-+			continue;
-+		gnttab_grant_foreign_transfer_ref(
-+			np->grant_rx_ref[i], np->xbdev->otherend_id);
-+		RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
-+			np->grant_rx_ref[i];
-+		RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
-+		requeue_idx++; 
-+	}
-+
-+	np->rx.req_prod_pvt = requeue_idx;
-+	RING_PUSH_REQUESTS(&np->rx);
-+
-+	/*
-+	 * Step 3: All public and private state should now be sane.  Get
-+	 * ready to start sending and receiving packets and give the driver
-+	 * domain a kick because we've probably just requeued some
-+	 * packets.
-+	 */
-+	np->backend_state = BEST_CONNECTED;
-+	notify_remote_via_irq(np->irq);
-+	network_tx_buf_gc(dev);
-+
-+	if (np->user_state == UST_OPEN)
-+		netif_start_queue(dev);
-+
-+	spin_unlock(&np->rx_lock);
-+	spin_unlock_irq(&np->tx_lock);
-+}
-+
-+static void show_device(struct netfront_info *np)
-+{
-+#ifdef DEBUG
-+	if (np) {
-+		IPRINTK("<vif handle=%u %s(%s) evtchn=%u tx=%p rx=%p>\n",
-+			np->handle,
-+			be_state_name[np->backend_state],
-+			np->user_state ? "open" : "closed",
-+			np->evtchn,
-+			np->tx,
-+			np->rx);
-+	} else {
-+		IPRINTK("<vif NULL>\n");
-+	}
-+#endif
-+}
-+
-+static void netif_uninit(struct net_device *dev)
-+{
-+	struct netfront_info *np = netdev_priv(dev);
-+	gnttab_free_grant_references(np->gref_tx_head);
-+	gnttab_free_grant_references(np->gref_rx_head);
-+}
-+
-+static struct ethtool_ops network_ethtool_ops =
-+{
-+	.get_tx_csum = ethtool_op_get_tx_csum,
-+	.set_tx_csum = ethtool_op_set_tx_csum,
-+};
-+
-+/** Create a network device.
-+ * @param handle device handle
-+ * @param val return parameter for created device
-+ * @return 0 on success, error code otherwise
-+ */
-+static int create_netdev(int handle, struct xenbus_device *dev,
-+			 struct net_device **val)
-+{
-+	int i, err = 0;
-+	struct net_device *netdev = NULL;
-+	struct netfront_info *np = NULL;
-+
-+	if ((netdev = alloc_etherdev(sizeof(struct netfront_info))) == NULL) {
-+		printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
-+		       __FUNCTION__);
-+		err = -ENOMEM;
-+		goto exit;
-+	}
-+
-+	np                = netdev_priv(netdev);
-+	np->backend_state = BEST_CLOSED;
-+	np->user_state    = UST_CLOSED;
-+	np->handle        = handle;
-+	np->xbdev         = dev;
-+
-+	spin_lock_init(&np->tx_lock);
-+	spin_lock_init(&np->rx_lock);
-+
-+	skb_queue_head_init(&np->rx_batch);
-+	np->rx_target     = RX_MIN_TARGET;
-+	np->rx_min_target = RX_MIN_TARGET;
-+	np->rx_max_target = RX_MAX_TARGET;
-+
-+	init_timer(&np->rx_refill_timer);
-+	np->rx_refill_timer.data = (unsigned long)netdev;
-+	np->rx_refill_timer.function = rx_refill_timeout;
-+
-+	/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
-+	for (i = 0; i <= NET_TX_RING_SIZE; i++) {
-+		np->tx_skbs[i] = (void *)((unsigned long) i+1);
-+		np->grant_tx_ref[i] = GRANT_INVALID_REF;
-+	}
-+
-+	for (i = 0; i <= NET_RX_RING_SIZE; i++) {
-+		np->rx_skbs[i] = (void *)((unsigned long) i+1);
-+		np->grant_rx_ref[i] = GRANT_INVALID_REF;
-+	}
-+
-+	/* A grant for every tx ring slot */
-+	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
-+					  &np->gref_tx_head) < 0) {
-+		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
-+		err = -ENOMEM;
-+		goto exit;
-+	}
-+	/* A grant for every rx ring slot */
-+	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
-+					  &np->gref_rx_head) < 0) {
-+		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
-+		gnttab_free_grant_references(np->gref_tx_head);
-+		err = -ENOMEM;
-+		goto exit;
-+	}
-+
-+	netdev->open            = network_open;
-+	netdev->hard_start_xmit = network_start_xmit;
-+	netdev->stop            = network_close;
-+	netdev->get_stats       = network_get_stats;
-+	netdev->poll            = netif_poll;
-+	netdev->uninit          = netif_uninit;
-+	netdev->weight          = 64;
-+	netdev->features        = NETIF_F_IP_CSUM;
-+
-+	SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
-+	SET_MODULE_OWNER(netdev);
-+	SET_NETDEV_DEV(netdev, &dev->dev);
-+    
-+	if ((err = register_netdev(netdev)) != 0) {
-+		printk(KERN_WARNING "%s> register_netdev err=%d\n",
-+		       __FUNCTION__, err);
-+		goto exit_free_grefs;
-+	}
-+
-+	if ((err = xennet_proc_addif(netdev)) != 0) {
-+		unregister_netdev(netdev);
-+		goto exit_free_grefs;
-+	}
-+
-+	np->netdev = netdev;
-+
-+ exit:
-+	if (err != 0)
-+		kfree(netdev);
-+	else if (val != NULL)
-+		*val = netdev;
-+	return err;
-+
-+ exit_free_grefs:
-+	gnttab_free_grant_references(np->gref_tx_head);
-+	gnttab_free_grant_references(np->gref_rx_head);
-+	goto exit;
-+}
-+
-+/*
-+ * We use this notifier to send out a fake ARP reply to reset switches and
-+ * router ARP caches when an IP interface is brought up on a VIF.
-+ */
-+static int 
-+inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
-+{
-+	struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
-+	struct net_device *dev = ifa->ifa_dev->dev;
-+
-+	/* UP event and is it one of our devices? */
-+	if (event == NETDEV_UP && dev->open == network_open)
-+		(void)send_fake_arp(dev);
-+        
-+	return NOTIFY_DONE;
-+}
-+
-+
-+/* ** Close down ** */
-+
-+
-+/**
-+ * Handle the change of state of the backend to Closing.  We must delete our
-+ * device-layer structures now, to ensure that writes are flushed through to
-+ * the backend.  Once is this done, we can switch to Closed in
-+ * acknowledgement.
-+ */
-+static void netfront_closing(struct xenbus_device *dev)
-+{
-+	struct netfront_info *info = dev->data;
-+
-+	DPRINTK("netfront_closing: %s removed\n", dev->nodename);
-+
-+	close_netdev(info);
-+
-+	xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
-+}
-+
-+
-+static int netfront_remove(struct xenbus_device *dev)
-+{
-+	struct netfront_info *info = dev->data;
-+
-+	DPRINTK("%s\n", dev->nodename);
-+
-+	netif_disconnect_backend(info);
-+	free_netdev(info->netdev);
-+
-+	return 0;
-+}
-+
-+
-+static void close_netdev(struct netfront_info *info)
-+{
-+	spin_lock_irq(&info->netdev->xmit_lock);
-+	netif_stop_queue(info->netdev);
-+	spin_unlock_irq(&info->netdev->xmit_lock);
-+
-+#ifdef CONFIG_PROC_FS
-+	xennet_proc_delif(info->netdev);
-+#endif
-+
-+	del_timer_sync(&info->rx_refill_timer);
-+
-+	unregister_netdev(info->netdev);
-+}
-+
-+
-+static void netif_disconnect_backend(struct netfront_info *info)
-+{
-+	/* Stop old i/f to prevent errors whilst we rebuild the state. */
-+	spin_lock_irq(&info->tx_lock);
-+	spin_lock(&info->rx_lock);
-+	info->backend_state = BEST_DISCONNECTED;
-+	spin_unlock(&info->rx_lock);
-+	spin_unlock_irq(&info->tx_lock);
-+
-+	if (info->irq)
-+		unbind_from_irqhandler(info->irq, info->netdev);
-+	info->evtchn = info->irq = 0;
-+
-+	end_access(info->tx_ring_ref, info->tx.sring);
-+	end_access(info->rx_ring_ref, info->rx.sring);
-+	info->tx_ring_ref = GRANT_INVALID_REF;
-+	info->rx_ring_ref = GRANT_INVALID_REF;
-+	info->tx.sring = NULL;
-+	info->rx.sring = NULL;
-+}
-+
-+
-+static void netif_free(struct netfront_info *info)
-+{
-+	close_netdev(info);
-+	netif_disconnect_backend(info);
-+	free_netdev(info->netdev);
-+}
-+
-+
-+static void end_access(int ref, void *page)
-+{
-+	if (ref != GRANT_INVALID_REF)
-+		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
-+}
-+
-+
-+/* ** Driver registration ** */
-+
-+
-+static struct xenbus_device_id netfront_ids[] = {
-+	{ "vif" },
-+	{ "" }
-+};
-+
-+
-+static struct xenbus_driver netfront = {
-+	.name = "vif",
-+	.owner = THIS_MODULE,
-+	.ids = netfront_ids,
-+	.probe = netfront_probe,
-+	.remove = netfront_remove,
-+	.resume = netfront_resume,
-+	.otherend_changed = backend_changed,
-+};
-+
-+
-+static struct notifier_block notifier_inetdev = {
-+	.notifier_call  = inetdev_notify,
-+	.next           = NULL,
-+	.priority       = 0
-+};
-+
-+static int __init netif_init(void)
-+{
-+	int err = 0;
-+
-+	if (xen_start_info->flags & SIF_INITDOMAIN)
-+		return 0;
-+
-+	if ((err = xennet_proc_init()) != 0)
-+		return err;
-+
-+	IPRINTK("Initialising virtual ethernet driver.\n");
-+
-+	(void)register_inetaddr_notifier(&notifier_inetdev);
-+
-+	return xenbus_register_frontend(&netfront);
-+}
-+module_init(netif_init);
-+
-+
-+static void netif_exit(void)
-+{
-+	unregister_inetaddr_notifier(&notifier_inetdev);
-+
-+	return xenbus_unregister_driver(&netfront);
-+}
-+module_exit(netif_exit);
-+
-+MODULE_LICENSE("Dual BSD/GPL");
-+ 
-+ 
-+/* ** /proc **/
-+
-+
-+#ifdef CONFIG_PROC_FS
-+
-+#define TARGET_MIN 0UL
-+#define TARGET_MAX 1UL
-+#define TARGET_CUR 2UL
-+
-+static int xennet_proc_read(
-+	char *page, char **start, off_t off, int count, int *eof, void *data)
-+{
-+	struct net_device *dev =
-+		(struct net_device *)((unsigned long)data & ~3UL);
-+	struct netfront_info *np = netdev_priv(dev);
-+	int len = 0, which_target = (long)data & 3;
-+    
-+	switch (which_target)
-+	{
-+	case TARGET_MIN:
-+		len = sprintf(page, "%d\n", np->rx_min_target);
-+		break;
-+	case TARGET_MAX:
-+		len = sprintf(page, "%d\n", np->rx_max_target);
-+		break;
-+	case TARGET_CUR:
-+		len = sprintf(page, "%d\n", np->rx_target);
-+		break;
-+	}
-+
-+	*eof = 1;
-+	return len;
-+}
-+
-+static int xennet_proc_write(
-+	struct file *file, const char __user *buffer,
-+	unsigned long count, void *data)
-+{
-+	struct net_device *dev =
-+		(struct net_device *)((unsigned long)data & ~3UL);
-+	struct netfront_info *np = netdev_priv(dev);
-+	int which_target = (long)data & 3;
-+	char string[64];
-+	long target;
-+
-+	if (!capable(CAP_SYS_ADMIN))
-+		return -EPERM;
-+
-+	if (count <= 1)
-+		return -EBADMSG; /* runt */
-+	if (count > sizeof(string))
-+		return -EFBIG;   /* too long */
-+
-+	if (copy_from_user(string, buffer, count))
-+		return -EFAULT;
-+	string[sizeof(string)-1] = '\0';
-+
-+	target = simple_strtol(string, NULL, 10);
-+	if (target < RX_MIN_TARGET)
-+		target = RX_MIN_TARGET;
-+	if (target > RX_MAX_TARGET)
-+		target = RX_MAX_TARGET;
-+
-+	spin_lock(&np->rx_lock);
-+
-+	switch (which_target)
-+	{
-+	case TARGET_MIN:
-+		if (target > np->rx_max_target)
-+			np->rx_max_target = target;
-+		np->rx_min_target = target;
-+		if (target > np->rx_target)
-+			np->rx_target = target;
-+		break;
-+	case TARGET_MAX:
-+		if (target < np->rx_min_target)
-+			np->rx_min_target = target;
-+		np->rx_max_target = target;
-+		if (target < np->rx_target)
-+			np->rx_target = target;
-+		break;
-+	case TARGET_CUR:
-+		break;
-+	}
-+
-+	network_alloc_rx_buffers(dev);
-+
-+	spin_unlock(&np->rx_lock);
-+
-+	return count;
-+}
-+
-+static int xennet_proc_init(void)
-+{
-+	if (proc_mkdir("xen/net", NULL) == NULL)
-+		return -ENOMEM;
-+	return 0;
-+}
-+
-+static int xennet_proc_addif(struct net_device *dev)
-+{
-+	struct proc_dir_entry *dir, *min, *max, *cur;
-+	char name[30];
-+
-+	sprintf(name, "xen/net/%s", dev->name);
-+
-+	dir = proc_mkdir(name, NULL);
-+	if (!dir)
-+		goto nomem;
-+
-+	min = create_proc_entry("rxbuf_min", 0644, dir);
-+	max = create_proc_entry("rxbuf_max", 0644, dir);
-+	cur = create_proc_entry("rxbuf_cur", 0444, dir);
-+	if (!min || !max || !cur)
-+		goto nomem;
-+
-+	min->read_proc  = xennet_proc_read;
-+	min->write_proc = xennet_proc_write;
-+	min->data       = (void *)((unsigned long)dev | TARGET_MIN);
-+
-+	max->read_proc  = xennet_proc_read;
-+	max->write_proc = xennet_proc_write;
-+	max->data       = (void *)((unsigned long)dev | TARGET_MAX);
-+
-+	cur->read_proc  = xennet_proc_read;
-+	cur->write_proc = xennet_proc_write;
-+	cur->data       = (void *)((unsigned long)dev | TARGET_CUR);
-+
-+	return 0;
-+
-+ nomem:
-+	xennet_proc_delif(dev);
-+	return -ENOMEM;
-+}
-+
-+static void xennet_proc_delif(struct net_device *dev)
-+{
-+	char name[30];
-+
-+	sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
-+	remove_proc_entry(name, NULL);
-+
-+	sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
-+	remove_proc_entry(name, NULL);
-+
-+	sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
-+	remove_proc_entry(name, NULL);
-+
-+	sprintf(name, "xen/net/%s", dev->name);
-+	remove_proc_entry(name, NULL);
-+}
-+
-+#endif
-+
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/privcmd/Makefile linux-2.6.12-xen/drivers/xen/privcmd/Makefile
---- pristine-linux-2.6.12/drivers/xen/privcmd/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/privcmd/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+
-+obj-y	:= privcmd.o
-diff -Nurp pristine-linux-2.6.12/drivers/xen/privcmd/privcmd.c linux-2.6.12-xen/drivers/xen/privcmd/privcmd.c
---- pristine-linux-2.6.12/drivers/xen/privcmd/privcmd.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/privcmd/privcmd.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,279 @@
-+/******************************************************************************
-+ * privcmd.c
-+ * 
-+ * Interface to privileged domain-0 commands.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/errno.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/swap.h>
-+#include <linux/smp_lock.h>
-+#include <linux/highmem.h>
-+#include <linux/pagemap.h>
-+#include <linux/seq_file.h>
-+#include <linux/kthread.h>
-+#include <asm/hypervisor.h>
-+
-+#include <asm/pgalloc.h>
-+#include <asm/pgtable.h>
-+#include <asm/uaccess.h>
-+#include <asm/tlb.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/linux-public/privcmd.h>
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/xen-public/dom0_ops.h>
-+#include <asm-xen/xen_proc.h>
-+
-+static struct proc_dir_entry *privcmd_intf;
-+static struct proc_dir_entry *capabilities_intf;
-+
-+static int privcmd_ioctl(struct inode *inode, struct file *file,
-+                         unsigned int cmd, unsigned long data)
-+{
-+	int ret = -ENOSYS;
-+	void __user *udata = (void __user *) data;
-+
-+	switch (cmd) {
-+	case IOCTL_PRIVCMD_HYPERCALL: {
-+		privcmd_hypercall_t hypercall;
-+  
-+		if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
-+			return -EFAULT;
-+
-+#if defined(__i386__)
-+		__asm__ __volatile__ (
-+			"pushl %%ebx; pushl %%ecx; pushl %%edx; "
-+			"pushl %%esi; pushl %%edi; "
-+			"movl  4(%%eax),%%ebx ;"
-+			"movl  8(%%eax),%%ecx ;"
-+			"movl 12(%%eax),%%edx ;"
-+			"movl 16(%%eax),%%esi ;"
-+			"movl 20(%%eax),%%edi ;"
-+			"movl   (%%eax),%%eax ;"
-+			"shll $5,%%eax ;"
-+			"addl $hypercall_page,%%eax ;"
-+			"call *%%eax ;"
-+			"popl %%edi; popl %%esi; popl %%edx; "
-+			"popl %%ecx; popl %%ebx"
-+			: "=a" (ret) : "0" (&hypercall) : "memory" );
-+#elif defined (__x86_64__)
-+		{
-+			long ign1, ign2, ign3;
-+			__asm__ __volatile__ (
-+				"movq %8,%%r10; movq %9,%%r8;"
-+				"shlq $5,%%rax ;"
-+				"addq $hypercall_page,%%rax ;"
-+				"call *%%rax"
-+				: "=a" (ret), "=D" (ign1),
-+				  "=S" (ign2), "=d" (ign3)
-+				: "0" ((unsigned long)hypercall.op), 
-+				"1" ((unsigned long)hypercall.arg[0]), 
-+				"2" ((unsigned long)hypercall.arg[1]),
-+				"3" ((unsigned long)hypercall.arg[2]), 
-+				"g" ((unsigned long)hypercall.arg[3]),
-+				"g" ((unsigned long)hypercall.arg[4])
-+				: "r8", "r10", "memory" );
-+		}
-+#elif defined (__ia64__)
-+		__asm__ __volatile__ (
-+			";; mov r14=%2; mov r15=%3; "
-+			"mov r16=%4; mov r17=%5; mov r18=%6;"
-+			"mov r2=%1; break 0x1000;; mov %0=r8 ;;"
-+			: "=r" (ret)
-+			: "r" (hypercall.op),
-+			"r" (hypercall.arg[0]),
-+			"r" (hypercall.arg[1]),
-+			"r" (hypercall.arg[2]),
-+			"r" (hypercall.arg[3]),
-+			"r" (hypercall.arg[4])
-+			: "r14","r15","r16","r17","r18","r2","r8","memory");
-+#endif
-+	}
-+	break;
-+
-+#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
-+	case IOCTL_PRIVCMD_MMAP: {
-+#define PRIVCMD_MMAP_SZ 32
-+		privcmd_mmap_t mmapcmd;
-+		privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ];
-+		privcmd_mmap_entry_t __user *p;
-+		int i, rc;
-+
-+		if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
-+			return -EFAULT;
-+
-+		p = mmapcmd.entry;
-+
-+		for (i = 0; i < mmapcmd.num;
-+		     i += PRIVCMD_MMAP_SZ, p += PRIVCMD_MMAP_SZ) {
-+			int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
-+				PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
-+
-+			if (copy_from_user(&msg, p,
-+					   n*sizeof(privcmd_mmap_entry_t)))
-+				return -EFAULT;
-+     
-+			for (j = 0; j < n; j++) {
-+				struct vm_area_struct *vma = 
-+					find_vma( current->mm, msg[j].va );
-+
-+				if (!vma)
-+					return -EINVAL;
-+
-+				if (msg[j].va > PAGE_OFFSET)
-+					return -EINVAL;
-+
-+				if ((msg[j].va + (msg[j].npages << PAGE_SHIFT))
-+				    > vma->vm_end )
-+					return -EINVAL;
-+
-+				if ((rc = direct_remap_pfn_range(
-+					vma,
-+					msg[j].va&PAGE_MASK, 
-+					msg[j].mfn, 
-+					msg[j].npages<<PAGE_SHIFT, 
-+					vma->vm_page_prot,
-+					mmapcmd.dom)) < 0)
-+					return rc;
-+			}
-+		}
-+		ret = 0;
-+	}
-+	break;
-+
-+	case IOCTL_PRIVCMD_MMAPBATCH: {
-+		mmu_update_t u;
-+		privcmd_mmapbatch_t m;
-+		struct vm_area_struct *vma = NULL;
-+		unsigned long __user *p;
-+		unsigned long addr, mfn; 
-+		uint64_t ptep;
-+		int i;
-+
-+		if (copy_from_user(&m, udata, sizeof(m))) {
-+			ret = -EFAULT;
-+			goto batch_err;
-+		}
-+
-+		vma = find_vma( current->mm, m.addr );
-+		if (!vma) {
-+			ret = -EINVAL;
-+			goto batch_err;
-+		}
-+
-+		if (m.addr > PAGE_OFFSET) {
-+			ret = -EFAULT;
-+			goto batch_err;
-+		}
-+
-+		if ((m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end) {
-+			ret = -EFAULT;
-+			goto batch_err;
-+		}
-+
-+		p = m.arr;
-+		addr = m.addr;
-+		for (i = 0; i < m.num; i++, addr += PAGE_SIZE, p++) {
-+			if (get_user(mfn, p))
-+				return -EFAULT;
-+#ifdef __ia64__
-+			ret = remap_pfn_range(vma,
-+					      addr&PAGE_MASK,
-+					      mfn,
-+					      1<<PAGE_SHIFT,
-+					      vma->vm_page_prot);
-+			if (ret < 0)
-+			    goto batch_err;
-+#else
-+
-+			ret = create_lookup_pte_addr(vma->vm_mm, addr, &ptep);
-+			if (ret)
-+				goto batch_err;
-+
-+			u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot));
-+			u.ptr = ptep;
-+
-+			if (HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0)
-+				put_user(0xF0000000 | mfn, p);
-+#endif
-+		}
-+
-+		ret = 0;
-+		break;
-+
-+	batch_err:
-+		printk("batch_err ret=%d vma=%p addr=%lx "
-+		       "num=%d arr=%p %lx-%lx\n", 
-+		       ret, vma, m.addr, m.num, m.arr,
-+		       vma ? vma->vm_start : 0, vma ? vma->vm_end : 0);
-+		break;
-+	}
-+	break;
-+#endif
-+
-+	default:
-+		ret = -EINVAL;
-+		break;
-+	}
-+
-+	return ret;
-+}
-+
-+static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
-+{
-+	/* DONTCOPY is essential for Xen as copy_page_range is broken. */
-+	vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
-+
-+	return 0;
-+}
-+
-+static struct file_operations privcmd_file_ops = {
-+	.ioctl = privcmd_ioctl,
-+	.mmap  = privcmd_mmap,
-+};
-+
-+static int capabilities_read(char *page, char **start, off_t off,
-+                        int count, int *eof, void *data)
-+{
-+	int len = 0;
-+	*page = 0;
-+
-+	if (xen_start_info->flags & SIF_INITDOMAIN)
-+		len = sprintf( page, "control_d\n" );
-+
-+	*eof = 1;
-+	return len;
-+}
-+
-+static int __init privcmd_init(void)
-+{
-+	privcmd_intf = create_xen_proc_entry("privcmd", 0400);
-+	if (privcmd_intf != NULL)
-+		privcmd_intf->proc_fops = &privcmd_file_ops;
-+
-+	capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
-+	if (capabilities_intf != NULL)
-+		capabilities_intf->read_proc = capabilities_read;
-+
-+	return 0;
-+}
-+
-+__initcall(privcmd_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmback/common.h linux-2.6.12-xen/drivers/xen/tpmback/common.h
---- pristine-linux-2.6.12/drivers/xen/tpmback/common.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/tpmback/common.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,89 @@
-+/******************************************************************************
-+ * drivers/xen/tpmback/common.h
-+ */
-+
-+#ifndef __NETIF__BACKEND__COMMON_H__
-+#define __NETIF__BACKEND__COMMON_H__
-+
-+#include <linux/config.h>
-+#include <linux/version.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/slab.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/driver_util.h>
-+#include <asm-xen/xen-public/grant_table.h>
-+#include <asm-xen/xen-public/io/tpmif.h>
-+#include <asm/io.h>
-+#include <asm/pgalloc.h>
-+
-+#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
-+                                    __FILE__ , __LINE__ , ## _a )
-+
-+typedef struct tpmif_st {
-+	struct list_head tpmif_list;
-+	/* Unique identifier for this interface. */
-+	domid_t domid;
-+	unsigned int handle;
-+
-+	/* Physical parameters of the comms window. */
-+	unsigned int evtchn;
-+	unsigned int irq;
-+
-+	/* The shared rings and indexes. */
-+	tpmif_tx_interface_t *tx;
-+	struct vm_struct *tx_area;
-+
-+	/* Miscellaneous private stuff. */
-+	enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-+	int active;
-+
-+	struct tpmif_st *hash_next;
-+	struct list_head list;	/* scheduling list */
-+	atomic_t refcnt;
-+
-+	long int tpm_instance;
-+	unsigned long mmap_vstart;
-+
-+	struct work_struct work;
-+
-+	grant_handle_t shmem_handle;
-+	grant_ref_t shmem_ref;
-+} tpmif_t;
-+
-+void tpmif_disconnect_complete(tpmif_t * tpmif);
-+tpmif_t *tpmif_find(domid_t domid, long int instance);
-+void tpmif_interface_init(void);
-+void tpmif_schedule_work(tpmif_t * tpmif);
-+void tpmif_deschedule_work(tpmif_t * tpmif);
-+void tpmif_xenbus_init(void);
-+int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
-+irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs);
-+int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domain, u32 instance);
-+int tpmif_vtpm_close(u32 instance);
-+
-+int vtpm_release_packets(tpmif_t * tpmif, int send_msgs);
-+
-+#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt))
-+#define tpmif_put(_b)                             \
-+    do {                                          \
-+        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-+            tpmif_disconnect_complete(_b);        \
-+    } while (0)
-+
-+
-+extern int num_frontends;
-+
-+#define MMAP_VADDR(t,_req) ((t)->mmap_vstart + ((_req) * PAGE_SIZE))
-+
-+#endif /* __TPMIF__BACKEND__COMMON_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmback/interface.c linux-2.6.12-xen/drivers/xen/tpmback/interface.c
---- pristine-linux-2.6.12/drivers/xen/tpmback/interface.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/tpmback/interface.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,200 @@
-+ /*****************************************************************************
-+ * drivers/xen/tpmback/interface.c
-+ *
-+ * Vritual TPM interface management.
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb at us.ibm.com
-+ *
-+ * This code has been derived from drivers/xen/netback/interface.c
-+ * Copyright (c) 2004, Keir Fraser
-+ */
-+
-+#include "common.h"
-+#include <asm-xen/balloon.h>
-+
-+#define TPMIF_HASHSZ (2 << 5)
-+#define TPMIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(TPMIF_HASHSZ-1))
-+
-+static kmem_cache_t *tpmif_cachep;
-+int num_frontends = 0;
-+
-+LIST_HEAD(tpmif_list);
-+
-+tpmif_t *
-+alloc_tpmif(domid_t domid, long int instance)
-+{
-+	struct page *page;
-+	tpmif_t *tpmif;
-+
-+	tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL);
-+	if (!tpmif)
-+		return ERR_PTR(-ENOMEM);
-+
-+	memset(tpmif, 0, sizeof (*tpmif));
-+	tpmif->domid = domid;
-+	tpmif->status = DISCONNECTED;
-+	tpmif->tpm_instance = instance;
-+	atomic_set(&tpmif->refcnt, 1);
-+
-+	page = balloon_alloc_empty_page_range(TPMIF_TX_RING_SIZE);
-+	BUG_ON(page == NULL);
-+	tpmif->mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-+
-+	list_add(&tpmif->tpmif_list, &tpmif_list);
-+	num_frontends++;
-+
-+	return tpmif;
-+}
-+
-+void
-+free_tpmif(tpmif_t * tpmif)
-+{
-+	num_frontends--;
-+	list_del(&tpmif->tpmif_list);
-+	kmem_cache_free(tpmif_cachep, tpmif);
-+}
-+
-+tpmif_t *
-+tpmif_find(domid_t domid, long int instance)
-+{
-+	tpmif_t *tpmif;
-+
-+	list_for_each_entry(tpmif, &tpmif_list, tpmif_list) {
-+		if (tpmif->tpm_instance == instance) {
-+			if (tpmif->domid == domid) {
-+				tpmif_get(tpmif);
-+				return tpmif;
-+			} else {
-+				return ERR_PTR(-EEXIST);
-+			}
-+		}
-+	}
-+
-+	return alloc_tpmif(domid, instance);
-+}
-+
-+static int
-+map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
-+{
-+	int ret;
-+	struct gnttab_map_grant_ref op = {
-+		.host_addr = (unsigned long)tpmif->tx_area->addr,
-+		.flags = GNTMAP_host_map,
-+		.ref = shared_page,
-+		.dom = tpmif->domid,
-+	};
-+
-+	lock_vm_area(tpmif->tx_area);
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
-+	unlock_vm_area(tpmif->tx_area);
-+	BUG_ON(ret);
-+
-+	if (op.status) {
-+		DPRINTK(" Grant table operation failure !\n");
-+		return op.status;
-+	}
-+
-+	tpmif->shmem_ref = shared_page;
-+	tpmif->shmem_handle = op.handle;
-+
-+	return 0;
-+}
-+
-+static void
-+unmap_frontend_page(tpmif_t *tpmif)
-+{
-+	struct gnttab_unmap_grant_ref op;
-+	int ret;
-+
-+	op.host_addr    = (unsigned long)tpmif->tx_area->addr;
-+	op.handle       = tpmif->shmem_handle;
-+	op.dev_bus_addr = 0;
-+
-+	lock_vm_area(tpmif->tx_area);
-+	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
-+	unlock_vm_area(tpmif->tx_area);
-+	BUG_ON(ret);
-+}
-+
-+int
-+tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
-+{
-+	int err;
-+	evtchn_op_t op = {
-+		.cmd = EVTCHNOP_bind_interdomain,
-+		.u.bind_interdomain.remote_dom = tpmif->domid,
-+		.u.bind_interdomain.remote_port = evtchn };
-+
-+        if (tpmif->irq) {
-+                return 0;
-+        }
-+
-+	if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
-+		return -ENOMEM;
-+
-+	err = map_frontend_page(tpmif, shared_page);
-+	if (err) {
-+		free_vm_area(tpmif->tx_area);
-+		return err;
-+	}
-+
-+	err = HYPERVISOR_event_channel_op(&op);
-+	if (err) {
-+		unmap_frontend_page(tpmif);
-+		free_vm_area(tpmif->tx_area);
-+		return err;
-+	}
-+
-+	tpmif->evtchn = op.u.bind_interdomain.local_port;
-+
-+	tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
-+
-+	tpmif->irq = bind_evtchn_to_irqhandler(
-+		tpmif->evtchn, tpmif_be_int, 0, "tpmif-backend", tpmif);
-+	tpmif->shmem_ref = shared_page;
-+	tpmif->active = 1;
-+
-+	return 0;
-+}
-+
-+static void
-+__tpmif_disconnect_complete(void *arg)
-+{
-+	tpmif_t *tpmif = (tpmif_t *) arg;
-+
-+	if (tpmif->irq)
-+		unbind_from_irqhandler(tpmif->irq, tpmif);
-+
-+	if (tpmif->tx) {
-+		unmap_frontend_page(tpmif);
-+		free_vm_area(tpmif->tx_area);
-+	}
-+
-+	free_tpmif(tpmif);
-+}
-+
-+void
-+tpmif_disconnect_complete(tpmif_t * tpmif)
-+{
-+	INIT_WORK(&tpmif->work, __tpmif_disconnect_complete, (void *)tpmif);
-+	schedule_work(&tpmif->work);
-+}
-+
-+void __init
-+tpmif_interface_init(void)
-+{
-+	tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
-+					 0, 0, NULL, NULL);
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmback/Makefile linux-2.6.12-xen/drivers/xen/tpmback/Makefile
---- pristine-linux-2.6.12/drivers/xen/tpmback/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/tpmback/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,4 @@
-+
-+obj-$(CONFIG_XEN_TPMDEV_BACKEND)	+= tpmbk.o
-+
-+tpmbk-y += tpmback.o interface.o xenbus.o
-diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmback/tpmback.c linux-2.6.12-xen/drivers/xen/tpmback/tpmback.c
---- pristine-linux-2.6.12/drivers/xen/tpmback/tpmback.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/tpmback/tpmback.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1109 @@
-+/******************************************************************************
-+ * drivers/xen/tpmback/tpmback.c
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb at us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from drivers/xen/netback/netback.c
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ */
-+
-+#include "common.h"
-+#include <asm-xen/evtchn.h>
-+
-+#include <linux/types.h>
-+#include <linux/list.h>
-+#include <linux/miscdevice.h>
-+#include <linux/poll.h>
-+#include <asm/uaccess.h>
-+#include <asm-xen/xenbus.h>
-+#include <asm-xen/xen-public/grant_table.h>
-+
-+
-+/* local data structures */
-+struct data_exchange {
-+	struct list_head pending_pak;
-+	struct list_head current_pak;
-+	unsigned int copied_so_far;
-+	u8 has_opener;
-+	rwlock_t pak_lock;  // protects all of the previous fields
-+	wait_queue_head_t wait_queue;
-+};
-+
-+struct packet {
-+	struct list_head next;
-+	unsigned int data_len;
-+	u8 *data_buffer;
-+	tpmif_t *tpmif;
-+	u32 tpm_instance;
-+	u8 req_tag;
-+	u32 last_read;
-+	u8 flags;
-+	struct timer_list processing_timer;
-+};
-+
-+enum {
-+	PACKET_FLAG_DISCARD_RESPONSE = 1,
-+	PACKET_FLAG_CHECK_RESPONSESTATUS = 2,
-+};
-+
-+static struct data_exchange dataex;
-+
-+/* local function prototypes */
-+static int vtpm_queue_packet(struct packet *pak);
-+static int _packet_write(struct packet *pak,
-+                         const char *data, size_t size,
-+                         int userbuffer);
-+static void processing_timeout(unsigned long ptr);
-+static int  packet_read_shmem(struct packet *pak,
-+                              tpmif_t *tpmif,
-+                              u32 offset,
-+                              char *buffer,
-+                              int isuserbuffer,
-+                              u32 left);
-+
-+
-+#define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
-+
-+#define MIN(x,y)  (x) < (y) ? (x) : (y)
-+
-+
-+/***************************************************************
-+ Buffer copying
-+***************************************************************/
-+static inline int
-+copy_from_buffer(void *to,
-+                 const void *from,
-+                 unsigned long size,
-+                 int userbuffer)
-+{
-+	if (userbuffer) {
-+		if (copy_from_user(to, from, size))
-+			return -EFAULT;
-+	} else {
-+		memcpy(to, from, size);
-+	}
-+	return 0;
-+}
-+
-+/***************************************************************
-+ Packet-related functions
-+***************************************************************/
-+
-+static struct packet *
-+packet_find_instance(struct list_head *head, u32 tpm_instance)
-+{
-+	struct packet *pak;
-+	struct list_head *p;
-+	/*
-+	 * traverse the list of packets and return the first
-+	 * one with the given instance number
-+	 */
-+	list_for_each(p, head) {
-+		pak = list_entry(p, struct packet, next);
-+		if (pak->tpm_instance == tpm_instance) {
-+			return pak;
-+		}
-+	}
-+	return NULL;
-+}
-+
-+static struct packet *
-+packet_find_packet(struct list_head *head, void *packet)
-+{
-+	struct packet *pak;
-+	struct list_head *p;
-+	/*
-+	 * traverse the list of packets and return the first
-+	 * one with the given instance number
-+	 */
-+	list_for_each(p, head) {
-+		pak = list_entry(p, struct packet, next);
-+		if (pak == packet) {
-+			return pak;
-+		}
-+	}
-+	return NULL;
-+}
-+
-+static struct packet *
-+packet_alloc(tpmif_t *tpmif, u32 size, u8 req_tag, u8 flags)
-+{
-+	struct packet *pak = NULL;
-+	pak = kmalloc(sizeof(struct packet),
-+                      GFP_KERNEL);
-+	if (NULL != pak) {
-+		memset(pak, 0x0, sizeof(*pak));
-+		if (tpmif) {
-+			pak->tpmif = tpmif;
-+			pak->tpm_instance = tpmif->tpm_instance;
-+		}
-+		pak->data_len  = size;
-+		pak->req_tag   = req_tag;
-+		pak->last_read = 0;
-+		pak->flags     = flags;
-+
-+		/*
-+		 * cannot do tpmif_get(tpmif); bad things happen
-+		 * on the last tpmif_put()
-+		 */
-+		init_timer(&pak->processing_timer);
-+		pak->processing_timer.function = processing_timeout;
-+		pak->processing_timer.data = (unsigned long)pak;
-+	}
-+	return pak;
-+}
-+
-+static void inline
-+packet_reset(struct packet *pak)
-+{
-+	pak->last_read = 0;
-+}
-+
-+static void inline
-+packet_free(struct packet *pak)
-+{
-+	del_singleshot_timer_sync(&pak->processing_timer);
-+	kfree(pak->data_buffer);
-+	/*
-+	 * cannot do tpmif_put(pak->tpmif); bad things happen
-+	 * on the last tpmif_put()
-+	 */
-+	kfree(pak);
-+}
-+
-+static int
-+packet_set(struct packet *pak,
-+           const unsigned char *buffer, u32 size)
-+{
-+	int rc = 0;
-+	unsigned char *buf = kmalloc(size, GFP_KERNEL);
-+	if (NULL != buf) {
-+		pak->data_buffer = buf;
-+		memcpy(buf, buffer, size);
-+		pak->data_len = size;
-+	} else {
-+		rc = -ENOMEM;
-+	}
-+	return rc;
-+}
-+
-+
-+/*
-+ * Write data to the shared memory and send it to the FE.
-+ */
-+static int
-+packet_write(struct packet *pak,
-+             const char *data, size_t size,
-+             int userbuffer)
-+{
-+	int rc = 0;
-+
-+	DPRINTK("Supposed to send %d bytes to front-end!\n",
-+	        size);
-+
-+	if (0 != (pak->flags & PACKET_FLAG_CHECK_RESPONSESTATUS)) {
-+#ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
-+		u32 res;
-+		if (copy_from_buffer(&res,
-+		                     &data[2+4],
-+		                     sizeof(res),
-+		                     userbuffer)) {
-+			return -EFAULT;
-+		}
-+
-+		if (res != 0) {
-+			/*
-+			 * Close down this device. Should have the
-+			 * FE notified about closure.
-+			 */
-+			if (!pak->tpmif) {
-+				return -EFAULT;
-+			}
-+			pak->tpmif->status = DISCONNECTING;
-+		}
-+#endif
-+	}
-+
-+	if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
-+		/* Don't send a respone to this packet. Just acknowledge it. */
-+		rc = size;
-+	} else {
-+		rc = _packet_write(pak, data, size, userbuffer);
-+	}
-+
-+	return rc;
-+}
-+
-+
-+static int
-+_packet_write(struct packet *pak,
-+              const char *data, size_t size,
-+              int userbuffer)
-+{
-+	/*
-+	 * Write into the shared memory pages directly
-+	 * and send it to the front end.
-+	 */
-+	tpmif_t *tpmif = pak->tpmif;
-+	grant_handle_t handle;
-+	int rc = 0;
-+	unsigned int i = 0;
-+	unsigned int offset = 0;
-+
-+	if (tpmif == NULL) {
-+		return -EFAULT;
-+        }
-+
-+	if (tpmif->status == DISCONNECTED) {
-+		return size;
-+	}
-+
-+	while (offset < size && i < TPMIF_TX_RING_SIZE) {
-+		unsigned int tocopy;
-+		struct gnttab_map_grant_ref map_op;
-+		struct gnttab_unmap_grant_ref unmap_op;
-+		tpmif_tx_request_t *tx;
-+
-+		tx = &tpmif->tx->ring[i].req;
-+
-+		if (0 == tx->addr) {
-+			DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
-+			return 0;
-+		}
-+
-+		map_op.host_addr  = MMAP_VADDR(tpmif, i);
-+		map_op.flags      = GNTMAP_host_map;
-+		map_op.ref        = tx->ref;
-+		map_op.dom        = tpmif->domid;
-+
-+		if(unlikely(
-+		    HYPERVISOR_grant_table_op(
-+		        GNTTABOP_map_grant_ref,
-+		        &map_op,
-+		        1))) {
-+			BUG();
-+		}
-+
-+		handle = map_op.handle;
-+
-+		if (map_op.status) {
-+			DPRINTK(" Grant table operation failure !\n");
-+			return 0;
-+		}
-+		set_phys_to_machine(__pa(MMAP_VADDR(tpmif,i)) >> PAGE_SHIFT,
-+			FOREIGN_FRAME(map_op.dev_bus_addr >> PAGE_SHIFT));
-+
-+		tocopy = MIN(size - offset, PAGE_SIZE);
-+
-+		if (copy_from_buffer((void *)(MMAP_VADDR(tpmif,i)|
-+		                     (tx->addr & ~PAGE_MASK)),
-+		                     &data[offset],
-+		                     tocopy,
-+		                     userbuffer)) {
-+			tpmif_put(tpmif);
-+			return -EFAULT;
-+		}
-+		tx->size = tocopy;
-+
-+		unmap_op.host_addr    = MMAP_VADDR(tpmif, i);
-+		unmap_op.handle       = handle;
-+		unmap_op.dev_bus_addr = 0;
-+
-+		if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
-+		                                      &unmap_op,
-+		                                      1))) {
-+			BUG();
-+		}
-+
-+		offset += tocopy;
-+		i++;
-+	}
-+
-+	rc = offset;
-+	DPRINTK("Notifying frontend via irq %d\n",
-+	        tpmif->irq);
-+	notify_remote_via_irq(tpmif->irq);
-+
-+	return rc;
-+}
-+
-+/*
-+ * Read data from the shared memory and copy it directly into the
-+ * provided buffer. Advance the read_last indicator which tells
-+ * how many bytes have already been read.
-+ */
-+static int
-+packet_read(struct packet *pak, size_t numbytes,
-+            char *buffer, size_t buffersize,
-+            int userbuffer)
-+{
-+	tpmif_t *tpmif = pak->tpmif;
-+	/*
-+	 * I am supposed to read 'numbytes' of data from the
-+	 * buffer.
-+	 * The first 4 bytes that are read are the instance number in
-+	 * network byte order, after that comes the data from the
-+	 * shared memory buffer.
-+	 */
-+	u32 to_copy;
-+	u32 offset = 0;
-+	u32 room_left = buffersize;
-+	/*
-+	 * Ensure that we see the request when we copy it.
-+	 */
-+	mb();
-+
-+	if (pak->last_read < 4) {
-+		/*
-+		 * copy the instance number into the buffer
-+		 */
-+		u32 instance_no = htonl(pak->tpm_instance);
-+		u32 last_read = pak->last_read;
-+		to_copy = MIN(4 - last_read, numbytes);
-+
-+		if (userbuffer) {
-+			if (copy_to_user(&buffer[0],
-+			                 &(((u8 *)&instance_no)[last_read]),
-+			                 to_copy)) {
-+				return -EFAULT;
-+			}
-+		} else {
-+			memcpy(&buffer[0],
-+			       &(((u8 *)&instance_no)[last_read]),
-+			       to_copy);
-+		}
-+
-+		pak->last_read += to_copy;
-+		offset += to_copy;
-+		room_left -= to_copy;
-+	}
-+
-+	/*
-+	 * If the packet has a data buffer appended, read from it...
-+	 */
-+
-+	if (room_left > 0) {
-+		if (pak->data_buffer) {
-+			u32 to_copy = MIN(pak->data_len - offset, room_left);
-+			u32 last_read = pak->last_read - 4;
-+			if (userbuffer) {
-+				if (copy_to_user(&buffer[offset],
-+				                 &pak->data_buffer[last_read],
-+				                 to_copy)) {
-+					return -EFAULT;
-+				}
-+			} else {
-+				memcpy(&buffer[offset],
-+				       &pak->data_buffer[last_read],
-+				       to_copy);
-+			}
-+			pak->last_read += to_copy;
-+			offset += to_copy;
-+		} else {
-+			offset = packet_read_shmem(pak,
-+			                           tpmif,
-+			                           offset,
-+			                           buffer,
-+			                           userbuffer,
-+			                           room_left);
-+		}
-+	}
-+	return offset;
-+}
-+
-+
-+static int
-+packet_read_shmem(struct packet *pak,
-+                  tpmif_t *tpmif,
-+                  u32 offset,
-+                  char *buffer,
-+                  int isuserbuffer,
-+                  u32 room_left) {
-+	u32 last_read = pak->last_read - 4;
-+	u32 i = (last_read / PAGE_SIZE);
-+	u32 pg_offset = last_read & (PAGE_SIZE - 1);
-+	u32 to_copy;
-+	grant_handle_t handle;
-+
-+	tpmif_tx_request_t *tx;
-+	tx = &tpmif->tx->ring[0].req;
-+	/*
-+	 * Start copying data at the page with index 'index'
-+	 * and within that page at offset 'offset'.
-+	 * Copy a maximum of 'room_left' bytes.
-+	 */
-+	to_copy = MIN(PAGE_SIZE - pg_offset, room_left);
-+	while (to_copy > 0) {
-+		void *src;
-+		struct gnttab_map_grant_ref map_op;
-+		struct gnttab_unmap_grant_ref unmap_op;
-+
-+		tx = &tpmif->tx->ring[i].req;
-+
-+		map_op.host_addr = MMAP_VADDR(tpmif, i);
-+		map_op.flags     = GNTMAP_host_map;
-+		map_op.ref       = tx->ref;
-+		map_op.dom       = tpmif->domid;
-+
-+		if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
-+		                                      &map_op,
-+		                                      1))) {
-+			BUG();
-+		}
-+
-+		if (map_op.status) {
-+			DPRINTK(" Grant table operation failure !\n");
-+			return -EFAULT;
-+		}
-+
-+		handle = map_op.handle;
-+
-+		if (to_copy > tx->size) {
-+			/*
-+			 * This is the case when the user wants to read more
-+			 * than what we have. So we just give him what we
-+			 * have.
-+			 */
-+			to_copy = MIN(tx->size, to_copy);
-+		}
-+
-+		DPRINTK("Copying from mapped memory at %08lx\n",
-+		        (unsigned long)(MMAP_VADDR(tpmif,i) |
-+			(tx->addr & ~PAGE_MASK)));
-+
-+		src = (void *)(MMAP_VADDR(tpmif,i) | ((tx->addr & ~PAGE_MASK) + pg_offset));
-+		if (isuserbuffer) {
-+			if (copy_to_user(&buffer[offset],
-+			                 src,
-+			                 to_copy)) {
-+				return -EFAULT;
-+			}
-+		} else {
-+			memcpy(&buffer[offset],
-+			       src,
-+			       to_copy);
-+		}
-+
-+
-+		DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
-+		        tpmif->domid, buffer[offset], buffer[offset+1],buffer[offset+2],buffer[offset+3]);
-+
-+		unmap_op.host_addr    = MMAP_VADDR(tpmif, i);
-+		unmap_op.handle       = handle;
-+		unmap_op.dev_bus_addr = 0;
-+
-+		if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
-+		                                      &unmap_op,
-+		                                      1))) {
-+			BUG();
-+		}
-+
-+		offset += to_copy;
-+		pg_offset = 0;
-+		last_read += to_copy;
-+		room_left -= to_copy;
-+
-+		to_copy = MIN(PAGE_SIZE, room_left);
-+		i++;
-+	} /* while (to_copy > 0) */
-+	/*
-+	 * Adjust the last_read pointer
-+	 */
-+	pak->last_read = last_read + 4;
-+	return offset;
-+}
-+
-+
-+/* ============================================================
-+ * The file layer for reading data from this device
-+ * ============================================================
-+ */
-+static int
-+vtpm_op_open(struct inode *inode, struct file *f)
-+{
-+	int rc = 0;
-+	unsigned long flags;
-+
-+	write_lock_irqsave(&dataex.pak_lock, flags);
-+	if (dataex.has_opener == 0) {
-+		dataex.has_opener = 1;
-+	} else {
-+		rc = -EPERM;
-+	}
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
-+	return rc;
-+}
-+
-+static ssize_t
-+vtpm_op_read(struct file *file,
-+	     char __user * data, size_t size, loff_t * offset)
-+{
-+	int ret_size = -ENODATA;
-+	struct packet *pak = NULL;
-+	unsigned long flags;
-+
-+	write_lock_irqsave(&dataex.pak_lock, flags);
-+
-+	if (list_empty(&dataex.pending_pak)) {
-+		write_unlock_irqrestore(&dataex.pak_lock, flags);
-+		wait_event_interruptible(dataex.wait_queue,
-+		                         !list_empty(&dataex.pending_pak));
-+		write_lock_irqsave(&dataex.pak_lock, flags);
-+	}
-+
-+	if (!list_empty(&dataex.pending_pak)) {
-+		unsigned int left;
-+		pak = list_entry(dataex.pending_pak.next, struct packet, next);
-+
-+		left = pak->data_len - dataex.copied_so_far;
-+
-+		DPRINTK("size given by app: %d, available: %d\n", size, left);
-+
-+		ret_size = MIN(size,left);
-+
-+		ret_size = packet_read(pak, ret_size, data, size, 1);
-+		if (ret_size < 0) {
-+			ret_size = -EFAULT;
-+		} else {
-+			DPRINTK("Copied %d bytes to user buffer\n", ret_size);
-+
-+			dataex.copied_so_far += ret_size;
-+			if (dataex.copied_so_far >= pak->data_len + 4) {
-+				DPRINTK("All data from this packet given to app.\n");
-+				/* All data given to app */
-+
-+				del_singleshot_timer_sync(&pak->processing_timer);
-+				list_del(&pak->next);
-+				list_add_tail(&pak->next, &dataex.current_pak);
-+				/*
-+				 * The more fontends that are handled at the same time,
-+				 * the more time we give the TPM to process the request.
-+				 */
-+				mod_timer(&pak->processing_timer,
-+				          jiffies + (num_frontends * 60 * HZ));
-+				dataex.copied_so_far = 0;
-+			}
-+		}
-+	}
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
-+
-+	DPRINTK("Returning result from read to app: %d\n", ret_size);
-+
-+	return ret_size;
-+}
-+
-+/*
-+ * Write operation - only works after a previous read operation!
-+ */
-+static ssize_t
-+vtpm_op_write(struct file *file, const char __user * data, size_t size,
-+	      loff_t * offset)
-+{
-+	struct packet *pak;
-+	int rc = 0;
-+	unsigned int off = 4;
-+	unsigned long flags;
-+	u32 instance_no = 0;
-+	u32 len_no = 0;
-+
-+	/*
-+	 * Minimum required packet size is:
-+	 * 4 bytes for instance number
-+	 * 2 bytes for tag
-+	 * 4 bytes for paramSize
-+	 * 4 bytes for the ordinal
-+	 * sum: 14 bytes
-+	 */
-+	if ( size < off + 10 ) {
-+		return -EFAULT;
-+	}
-+
-+	if (copy_from_user(&instance_no,
-+	                   (void __user *)&data[0],
-+	                   4)) {
-+		return -EFAULT;
-+	}
-+
-+	if (copy_from_user(&len_no,
-+	                   (void __user *)&data[off+2],
-+	                   4) ||
-+	    (off + ntohl(len_no) != size)) {
-+		return -EFAULT;
-+	}
-+
-+	write_lock_irqsave(&dataex.pak_lock, flags);
-+	pak = packet_find_instance(&dataex.current_pak, ntohl(instance_no));
-+
-+	if (pak == NULL) {
-+		write_unlock_irqrestore(&dataex.pak_lock, flags);
-+		printk(KERN_ALERT "No associated packet!\n");
-+		return -EFAULT;
-+	} else {
-+		del_singleshot_timer_sync(&pak->processing_timer);
-+		list_del(&pak->next);
-+	}
-+
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
-+
-+	/*
-+	 * The first 'offset' bytes must be the instance number.
-+	 * I will just pull that from the packet.
-+	 */
-+	size -= off;
-+	data = &data[off];
-+
-+	rc = packet_write(pak, data, size, 1);
-+
-+	if (rc > 0) {
-+		/* I neglected the first 4 bytes */
-+		rc += off;
-+	}
-+	packet_free(pak);
-+	return rc;
-+}
-+
-+static int
-+vtpm_op_release(struct inode *inode, struct file *file)
-+{
-+	unsigned long flags;
-+	vtpm_release_packets(NULL, 1);
-+	write_lock_irqsave(&dataex.pak_lock, flags);
-+	dataex.has_opener = 0;
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
-+	return 0;
-+}
-+
-+static unsigned int
-+vtpm_op_poll(struct file *file, struct poll_table_struct *pts)
-+{
-+	unsigned int flags = POLLOUT | POLLWRNORM;
-+	poll_wait(file, &dataex.wait_queue, pts);
-+	if (!list_empty(&dataex.pending_pak)) {
-+		flags |= POLLIN | POLLRDNORM;
-+	}
-+	return flags;
-+}
-+
-+static struct file_operations vtpm_ops = {
-+	.owner = THIS_MODULE,
-+	.llseek = no_llseek,
-+	.open = vtpm_op_open,
-+	.read = vtpm_op_read,
-+	.write = vtpm_op_write,
-+	.release = vtpm_op_release,
-+	.poll = vtpm_op_poll,
-+};
-+
-+static struct miscdevice ibmvtpms_miscdevice = {
-+	.minor = 225,
-+	.name = "vtpm",
-+	.fops = &vtpm_ops,
-+};
-+
-+
-+/***************************************************************
-+ Virtual TPM functions and data stuctures
-+***************************************************************/
-+
-+static u8 create_cmd[] = {
-+        1,193,		/* 0: TPM_TAG_RQU_COMMAMD */
-+        0,0,0,19,	/* 2: length */
-+        0,0,0,0x1,	/* 6: VTPM_ORD_OPEN */
-+        0,		/* 10: VTPM type */
-+        0,0,0,0,	/* 11: domain id */
-+        0,0,0,0		/* 15: instance id */
-+};
-+
-+static u8 destroy_cmd[] = {
-+        1,193,		/* 0: TPM_TAG_RQU_COMMAMD */
-+        0,0,0,14,	/* 2: length */
-+        0,0,0,0x2,	/* 6: VTPM_ORD_CLOSE */
-+        0,0,0,0		/* 10: instance id */
-+};
-+
-+int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domid, u32 instance)
-+{
-+	int rc = 0;
-+	struct packet *pak;
-+
-+	pak = packet_alloc(tpmif,
-+	                   sizeof(create_cmd),
-+	                   create_cmd[0],
-+	                   PACKET_FLAG_DISCARD_RESPONSE|
-+	                   PACKET_FLAG_CHECK_RESPONSESTATUS);
-+	if (pak) {
-+		u8 buf[sizeof(create_cmd)];
-+		u32 domid_no = htonl((u32)domid);
-+		u32 instance_no = htonl(instance);
-+		memcpy(buf, create_cmd, sizeof(create_cmd));
-+
-+		memcpy(&buf[11], &domid_no, sizeof(u32));
-+		memcpy(&buf[15], &instance_no, sizeof(u32));
-+
-+		/* copy the buffer into the packet */
-+		rc = packet_set(pak, buf, sizeof(buf));
-+
-+		if (rc == 0) {
-+			pak->tpm_instance = 0;
-+			rc = vtpm_queue_packet(pak);
-+		}
-+		if (rc < 0) {
-+			/* could not be queued or built */
-+			packet_free(pak);
-+		}
-+	} else {
-+		rc = -ENOMEM;
-+	}
-+	return rc;
-+}
-+
-+int tpmif_vtpm_close(u32 instid)
-+{
-+	int rc = 0;
-+	struct packet *pak;
-+
-+	pak = packet_alloc(NULL,
-+	                   sizeof(create_cmd),
-+	                   create_cmd[0],
-+	                   PACKET_FLAG_DISCARD_RESPONSE);
-+	if (pak) {
-+		u8 buf[sizeof(destroy_cmd)];
-+		u32 instid_no = htonl(instid);
-+		memcpy(buf, destroy_cmd, sizeof(destroy_cmd));
-+		memcpy(&buf[10], &instid_no, sizeof(u32));
-+
-+		/* copy the buffer into the packet */
-+		rc = packet_set(pak, buf, sizeof(buf));
-+
-+		if (rc == 0) {
-+			pak->tpm_instance = 0;
-+			rc = vtpm_queue_packet(pak);
-+		}
-+		if (rc < 0) {
-+			/* could not be queued or built */
-+			packet_free(pak);
-+		}
-+	} else {
-+		rc = -ENOMEM;
-+	}
-+	return rc;
-+}
-+
-+
-+/***************************************************************
-+ Utility functions
-+***************************************************************/
-+
-+static int
-+tpm_send_fail_message(struct packet *pak, u8 req_tag)
-+{
-+	int rc;
-+	static const unsigned char tpm_error_message_fail[] = {
-+		0x00, 0x00,
-+		0x00, 0x00, 0x00, 0x0a,
-+		0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
-+	};
-+	unsigned char buffer[sizeof(tpm_error_message_fail)];
-+
-+	memcpy(buffer, tpm_error_message_fail, sizeof(tpm_error_message_fail));
-+	/*
-+	 * Insert the right response tag depending on the given tag
-+	 * All response tags are '+3' to the request tag.
-+	 */
-+	buffer[1] = req_tag + 3;
-+
-+	/*
-+	 * Write the data to shared memory and notify the front-end
-+	 */
-+	rc = packet_write(pak, buffer, sizeof(buffer), 0);
-+
-+	return rc;
-+}
-+
-+
-+static void
-+_vtpm_release_packets(struct list_head *head, tpmif_t *tpmif,
-+                      int send_msgs)
-+{
-+	struct packet *pak;
-+	struct list_head *pos, *tmp;
-+
-+	list_for_each_safe(pos, tmp, head) {
-+		pak = list_entry(pos, struct packet, next);
-+		if (tpmif == NULL || pak->tpmif == tpmif) {
-+			int can_send = 0;
-+			del_singleshot_timer_sync(&pak->processing_timer);
-+			list_del(&pak->next);
-+
-+			if (pak->tpmif && pak->tpmif->status == CONNECTED) {
-+				can_send = 1;
-+			}
-+
-+			if (send_msgs && can_send) {
-+				tpm_send_fail_message(pak, pak->req_tag);
-+			}
-+			packet_free(pak);
-+		}
-+	}
-+}
-+
-+
-+int
-+vtpm_release_packets(tpmif_t *tpmif, int send_msgs)
-+{
-+	unsigned long flags;
-+
-+	write_lock_irqsave(&dataex.pak_lock, flags);
-+
-+	_vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs);
-+	_vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
-+
-+	write_unlock_irqrestore(&dataex.pak_lock,
-+	                        flags);
-+	return 0;
-+}
-+
-+
-+static int vtpm_queue_packet(struct packet *pak)
-+{
-+	int rc = 0;
-+	if (dataex.has_opener) {
-+		unsigned long flags;
-+		write_lock_irqsave(&dataex.pak_lock, flags);
-+		list_add_tail(&pak->next, &dataex.pending_pak);
-+		/* give the TPM some time to pick up the request */
-+		mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
-+		write_unlock_irqrestore(&dataex.pak_lock,
-+		                        flags);
-+
-+		wake_up_interruptible(&dataex.wait_queue);
-+	} else {
-+		rc = -EFAULT;
-+	}
-+	return rc;
-+}
-+
-+
-+static int vtpm_receive(tpmif_t *tpmif, u32 size)
-+{
-+	int rc = 0;
-+	unsigned char buffer[10];
-+	__be32 *native_size;
-+
-+	struct packet *pak = packet_alloc(tpmif, size, buffer[4], 0);
-+	if (NULL == pak) {
-+		return -ENOMEM;
-+	}
-+	/*
-+	 * Read 10 bytes from the received buffer to test its
-+	 * content for validity.
-+	 */
-+	if (sizeof(buffer) != packet_read(pak,
-+	                                  sizeof(buffer), buffer,
-+	                                  sizeof(buffer), 0)) {
-+		goto failexit;
-+	}
-+	/*
-+	 * Reset the packet read pointer so we can read all its
-+	 * contents again.
-+	 */
-+	packet_reset(pak);
-+
-+	native_size = (__force __be32 *)(&buffer[4+2]);
-+	/*
-+	 * Verify that the size of the packet is correct
-+	 * as indicated and that there's actually someone reading packets.
-+	 * The minimum size of the packet is '10' for tag, size indicator
-+	 * and ordinal.
-+	 */
-+	if (size < 10 ||
-+	    be32_to_cpu(*native_size) != size ||
-+	    0 == dataex.has_opener ||
-+	    tpmif->status != CONNECTED) {
-+	    	rc = -EINVAL;
-+	    	goto failexit;
-+	} else {
-+		if ((rc = vtpm_queue_packet(pak)) < 0) {
-+			goto failexit;
-+		}
-+	}
-+	return 0;
-+
-+failexit:
-+	if (pak) {
-+		tpm_send_fail_message(pak, buffer[4+1]);
-+		packet_free(pak);
-+	}
-+	return rc;
-+}
-+
-+
-+/*
-+ * Timeout function that gets invoked when a packet has not been processed
-+ * during the timeout period.
-+ * The packet must be on a list when this function is invoked. This
-+ * also means that once its taken off a list, the timer must be
-+ * destroyed as well.
-+ */
-+static void processing_timeout(unsigned long ptr)
-+{
-+	struct packet *pak = (struct packet *)ptr;
-+	unsigned long flags;
-+	write_lock_irqsave(&dataex.pak_lock, flags);
-+	/*
-+	 * The packet needs to be searched whether it
-+	 * is still on the list.
-+	 */
-+	if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
-+	    pak == packet_find_packet(&dataex.current_pak, pak) ) {
-+		list_del(&pak->next);
-+		tpm_send_fail_message(pak, pak->req_tag);
-+		packet_free(pak);
-+	}
-+
-+	write_unlock_irqrestore(&dataex.pak_lock, flags);
-+}
-+
-+
-+
-+static void tpm_tx_action(unsigned long unused);
-+static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
-+
-+#define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
-+
-+static struct list_head tpm_schedule_list;
-+static spinlock_t tpm_schedule_list_lock;
-+
-+static inline void
-+maybe_schedule_tx_action(void)
-+{
-+	smp_mb();
-+	tasklet_schedule(&tpm_tx_tasklet);
-+}
-+
-+static inline int
-+__on_tpm_schedule_list(tpmif_t * tpmif)
-+{
-+	return tpmif->list.next != NULL;
-+}
-+
-+static void
-+remove_from_tpm_schedule_list(tpmif_t * tpmif)
-+{
-+	spin_lock_irq(&tpm_schedule_list_lock);
-+	if (likely(__on_tpm_schedule_list(tpmif))) {
-+		list_del(&tpmif->list);
-+		tpmif->list.next = NULL;
-+		tpmif_put(tpmif);
-+	}
-+	spin_unlock_irq(&tpm_schedule_list_lock);
-+}
-+
-+static void
-+add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
-+{
-+	if (__on_tpm_schedule_list(tpmif))
-+		return;
-+
-+	spin_lock_irq(&tpm_schedule_list_lock);
-+	if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
-+		list_add_tail(&tpmif->list, &tpm_schedule_list);
-+		tpmif_get(tpmif);
-+	}
-+	spin_unlock_irq(&tpm_schedule_list_lock);
-+}
-+
-+void
-+tpmif_schedule_work(tpmif_t * tpmif)
-+{
-+	add_to_tpm_schedule_list_tail(tpmif);
-+	maybe_schedule_tx_action();
-+}
-+
-+void
-+tpmif_deschedule_work(tpmif_t * tpmif)
-+{
-+	remove_from_tpm_schedule_list(tpmif);
-+}
-+
-+
-+static void
-+tpm_tx_action(unsigned long unused)
-+{
-+	struct list_head *ent;
-+	tpmif_t *tpmif;
-+	tpmif_tx_request_t *tx;
-+
-+	DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
-+
-+	while (!list_empty(&tpm_schedule_list)) {
-+		/* Get a tpmif from the list with work to do. */
-+		ent = tpm_schedule_list.next;
-+		tpmif = list_entry(ent, tpmif_t, list);
-+		tpmif_get(tpmif);
-+		remove_from_tpm_schedule_list(tpmif);
-+		/*
-+		 * Ensure that we see the request when we read from it.
-+		 */
-+		mb();
-+
-+		tx = &tpmif->tx->ring[0].req;
-+
-+		/* pass it up */
-+		vtpm_receive(tpmif, tx->size);
-+
-+		tpmif_put(tpmif);
-+	}
-+}
-+
-+irqreturn_t
-+tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
-+{
-+	tpmif_t *tpmif = dev_id;
-+	add_to_tpm_schedule_list_tail(tpmif);
-+	maybe_schedule_tx_action();
-+	return IRQ_HANDLED;
-+}
-+
-+static int __init
-+tpmback_init(void)
-+{
-+	int rc;
-+
-+	if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) {
-+		printk(KERN_ALERT "Could not register misc device for TPM BE.\n");
-+		return rc;
-+	}
-+
-+	INIT_LIST_HEAD(&dataex.pending_pak);
-+	INIT_LIST_HEAD(&dataex.current_pak);
-+	dataex.has_opener = 0;
-+	rwlock_init(&dataex.pak_lock);
-+	init_waitqueue_head(&dataex.wait_queue);
-+
-+	spin_lock_init(&tpm_schedule_list_lock);
-+	INIT_LIST_HEAD(&tpm_schedule_list);
-+
-+	tpmif_interface_init();
-+	tpmif_xenbus_init();
-+
-+	printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
-+
-+	return 0;
-+}
-+
-+__initcall(tpmback_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmback/xenbus.c linux-2.6.12-xen/drivers/xen/tpmback/xenbus.c
---- pristine-linux-2.6.12/drivers/xen/tpmback/xenbus.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/tpmback/xenbus.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,328 @@
-+/*  Xenbus code for tpmif backend
-+    Copyright (C) 2005 IBM Corporation
-+    Copyright (C) 2005 Rusty Russell <rusty at rustcorp.com.au>
-+
-+    This program is free software; you can redistribute it and/or modify
-+    it under the terms of the GNU General Public License as published by
-+    the Free Software Foundation; either version 2 of the License, or
-+    (at your option) any later version.
-+
-+    This program is distributed in the hope that it will be useful,
-+    but WITHOUT ANY WARRANTY; without even the implied warranty of
-+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+    GNU General Public License for more details.
-+
-+    You should have received a copy of the GNU General Public License
-+    along with this program; if not, write to the Free Software
-+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-+*/
-+#include <stdarg.h>
-+#include <linux/module.h>
-+#include <asm-xen/xenbus.h>
-+#include "common.h"
-+
-+struct backend_info
-+{
-+	struct xenbus_device *dev;
-+
-+	/* our communications channel */
-+	tpmif_t *tpmif;
-+
-+	long int frontend_id;
-+	long int instance; // instance of TPM
-+	u8 is_instance_set;// whether instance number has been set
-+
-+	/* watch front end for changes */
-+	struct xenbus_watch backend_watch;
-+	XenbusState frontend_state;
-+};
-+
-+static void maybe_connect(struct backend_info *be);
-+static void connect(struct backend_info *be);
-+static int connect_ring(struct backend_info *be);
-+static void backend_changed(struct xenbus_watch *watch,
-+                            const char **vec, unsigned int len);
-+static void frontend_changed(struct xenbus_device *dev,
-+                             XenbusState frontend_state);
-+
-+static int tpmback_remove(struct xenbus_device *dev)
-+{
-+	struct backend_info *be = dev->data;
-+
-+	if (be->backend_watch.node) {
-+		unregister_xenbus_watch(&be->backend_watch);
-+		kfree(be->backend_watch.node);
-+		be->backend_watch.node = NULL;
-+	}
-+	if (be->tpmif) {
-+		tpmif_put(be->tpmif);
-+		be->tpmif = NULL;
-+	}
-+	kfree(be);
-+	dev->data = NULL;
-+	return 0;
-+}
-+
-+static int tpmback_probe(struct xenbus_device *dev,
-+                         const struct xenbus_device_id *id)
-+{
-+	int err;
-+	struct backend_info *be = kmalloc(sizeof(struct backend_info),
-+	                                  GFP_KERNEL);
-+
-+	if (!be) {
-+		xenbus_dev_fatal(dev, -ENOMEM,
-+		                 "allocating backend structure");
-+		return -ENOMEM;
-+	}
-+
-+	memset(be, 0, sizeof(*be));
-+
-+	be->is_instance_set = 0;
-+	be->dev = dev;
-+	dev->data = be;
-+
-+	err = xenbus_watch_path2(dev, dev->nodename,
-+	                        "instance", &be->backend_watch,
-+	                        backend_changed);
-+	if (err) {
-+		goto fail;
-+	}
-+
-+	err = xenbus_switch_state(dev, XBT_NULL, XenbusStateInitWait);
-+	if (err) {
-+		goto fail;
-+	}
-+	return 0;
-+fail:
-+	tpmback_remove(dev);
-+	return err;
-+}
-+
-+
-+static void backend_changed(struct xenbus_watch *watch,
-+                            const char **vec, unsigned int len)
-+{
-+	int err;
-+	long instance;
-+	struct backend_info *be
-+		= container_of(watch, struct backend_info, backend_watch);
-+	struct xenbus_device *dev = be->dev;
-+
-+	err = xenbus_scanf(XBT_NULL, dev->nodename,
-+	                   "instance","%li", &instance);
-+	if (XENBUS_EXIST_ERR(err)) {
-+		return;
-+	}
-+
-+	if (err != 1) {
-+		xenbus_dev_fatal(dev, err, "reading instance");
-+		return;
-+	}
-+
-+	if (be->is_instance_set != 0 && be->instance != instance) {
-+		printk(KERN_WARNING
-+		       "tpmback: changing instance (from %ld to %ld) "
-+		       "not allowed.\n",
-+		       be->instance, instance);
-+		return;
-+	}
-+
-+	if (be->is_instance_set == 0) {
-+		be->tpmif = tpmif_find(dev->otherend_id,
-+		                       instance);
-+		if (IS_ERR(be->tpmif)) {
-+			err = PTR_ERR(be->tpmif);
-+			be->tpmif = NULL;
-+			xenbus_dev_fatal(dev,err,"creating block interface");
-+			return;
-+		}
-+		be->instance = instance;
-+		be->is_instance_set = 1;
-+
-+		/*
-+		 * There's an unfortunate problem:
-+		 * Sometimes after a suspend/resume the
-+		 * state switch to XenbusStateInitialised happens
-+		 * *before* I get to this point here. Since then
-+		 * the connect_ring() must have failed (be->tpmif is
-+		 * still NULL), I just call it here again indirectly.
-+		 */
-+		if (be->frontend_state == XenbusStateInitialised) {
-+			frontend_changed(dev, be->frontend_state);
-+		}
-+	}
-+}
-+
-+
-+static void frontend_changed(struct xenbus_device *dev,
-+                             XenbusState frontend_state)
-+{
-+	struct backend_info *be = dev->data;
-+	int err;
-+
-+	be->frontend_state = frontend_state;
-+
-+	switch (frontend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateConnected:
-+		break;
-+
-+	case XenbusStateInitialised:
-+		err = connect_ring(be);
-+		if (err) {
-+			return;
-+		}
-+		maybe_connect(be);
-+		break;
-+
-+	case XenbusStateClosing:
-+		xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
-+		break;
-+
-+	case XenbusStateClosed:
-+		/*
-+		 * Notify the vTPM manager about the front-end
-+		 * having left.
-+		 */
-+		tpmif_vtpm_close(be->instance);
-+		device_unregister(&be->dev->dev);
-+		break;
-+
-+	case XenbusStateUnknown:
-+	case XenbusStateInitWait:
-+	default:
-+		xenbus_dev_fatal(dev, -EINVAL,
-+		                 "saw state %d at frontend",
-+		                 frontend_state);
-+		break;
-+	}
-+}
-+
-+
-+
-+static void maybe_connect(struct backend_info *be)
-+{
-+	int err;
-+
-+	if (be->tpmif == NULL || be->tpmif->status == CONNECTED)
-+		return;
-+
-+	connect(be);
-+
-+	/*
-+	 * Notify the vTPM manager about a new front-end.
-+	 */
-+	err = tpmif_vtpm_open(be->tpmif,
-+	                      be->frontend_id,
-+	                      be->instance);
-+	if (err) {
-+		xenbus_dev_error(be->dev, err,
-+		                 "queueing vtpm open packet");
-+		/*
-+		 * Should close down this device and notify FE
-+		 * about closure.
-+		 */
-+		return;
-+	}
-+}
-+
-+
-+static void connect(struct backend_info *be)
-+{
-+	xenbus_transaction_t xbt;
-+	int err;
-+	struct xenbus_device *dev = be->dev;
-+	unsigned long ready = 1;
-+
-+again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err) {
-+		xenbus_dev_fatal(be->dev, err, "starting transaction");
-+		return;
-+	}
-+
-+	err = xenbus_printf(xbt, be->dev->nodename,
-+	                    "ready", "%lu", ready);
-+	if (err) {
-+		xenbus_dev_fatal(be->dev, err, "writing 'ready'");
-+		goto abort;
-+	}
-+
-+	err = xenbus_switch_state(dev, xbt, XenbusStateConnected);
-+	if (err)
-+		goto abort;
-+
-+	be->tpmif->status = CONNECTED;
-+
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err == -EAGAIN)
-+		goto again;
-+	if (err) {
-+		xenbus_dev_fatal(be->dev, err, "end of transaction");
-+	}
-+	return;
-+abort:
-+	xenbus_transaction_end(xbt, 1);
-+}
-+
-+
-+static int connect_ring(struct backend_info *be)
-+{
-+	struct xenbus_device *dev = be->dev;
-+	unsigned long ring_ref;
-+	unsigned int evtchn;
-+	int err;
-+
-+	err = xenbus_gather(XBT_NULL, dev->otherend,
-+	                    "ring-ref", "%lu", &ring_ref,
-+			    "event-channel", "%u", &evtchn, NULL);
-+	if (err) {
-+		xenbus_dev_error(dev, err,
-+				 "reading %s/ring-ref and event-channel",
-+				 dev->otherend);
-+		return err;
-+	}
-+	if (be->tpmif != NULL) {
-+		err = tpmif_map(be->tpmif, ring_ref, evtchn);
-+		if (err) {
-+			xenbus_dev_error(dev, err,
-+			    	         "mapping shared-frame %lu port %u",
-+				         ring_ref, evtchn);
-+			return err;
-+		}
-+	}
-+	return 0;
-+}
-+
-+
-+static struct xenbus_device_id tpmback_ids[] = {
-+	{ "vtpm" },
-+	{ "" }
-+};
-+
-+
-+static struct xenbus_driver tpmback = {
-+	.name = "vtpm",
-+	.owner = THIS_MODULE,
-+	.ids = tpmback_ids,
-+	.probe = tpmback_probe,
-+	.remove = tpmback_remove,
-+	.otherend_changed = frontend_changed,
-+};
-+
-+
-+void tpmif_xenbus_init(void)
-+{
-+	xenbus_register_backend(&tpmback);
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmfront/Makefile linux-2.6.12-xen/drivers/xen/tpmfront/Makefile
---- pristine-linux-2.6.12/drivers/xen/tpmfront/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/tpmfront/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+
-+obj-$(CONFIG_XEN_TPMDEV_FRONTEND)	+= tpmfront.o
-diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmfront/tpmfront.c linux-2.6.12-xen/drivers/xen/tpmfront/tpmfront.c
---- pristine-linux-2.6.12/drivers/xen/tpmfront/tpmfront.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/tpmfront/tpmfront.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,703 @@
-+/*
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb at us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from drivers/xen/netfront/netfront.c
-+ *
-+ * Copyright (c) 2002-2004, K A Fraser
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ *
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ *
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/module.h>
-+#include <linux/version.h>
-+#include <linux/kernel.h>
-+#include <linux/slab.h>
-+#include <linux/errno.h>
-+#include <linux/interrupt.h>
-+#include <linux/init.h>
-+#include <asm-xen/tpmfe.h>
-+#include <linux/err.h>
-+
-+#include <asm/semaphore.h>
-+#include <asm/io.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/xen-public/grant_table.h>
-+#include <asm-xen/xen-public/io/tpmif.h>
-+#include <asm/uaccess.h>
-+#include <asm-xen/xenbus.h>
-+#include <asm-xen/xen-public/grant_table.h>
-+
-+#include "tpmfront.h"
-+
-+#undef DEBUG
-+
-+/* locally visible variables */
-+static grant_ref_t gref_head;
-+static struct tpm_private my_private;
-+
-+/* local function prototypes */
-+static irqreturn_t tpmif_int(int irq,
-+                             void *tpm_priv,
-+                             struct pt_regs *ptregs);
-+static void tpmif_rx_action(unsigned long unused);
-+static void tpmif_connect(u16 evtchn, domid_t domid);
-+static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
-+static int tpm_allocate_buffers(struct tpm_private *tp);
-+static void tpmif_set_connected_state(struct tpm_private *tp,
-+                                      u8 newstate);
-+static int tpm_xmit(struct tpm_private *tp,
-+                    const u8 * buf, size_t count, int userbuffer,
-+                    void *remember);
-+
-+#define DPRINTK(fmt, args...) \
-+    pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
-+#define IPRINTK(fmt, args...) \
-+    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
-+#define WPRINTK(fmt, args...) \
-+    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
-+
-+
-+static inline int
-+tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
-+               int isuserbuffer)
-+{
-+	int copied = len;
-+
-+	if (len > txb->size) {
-+		copied = txb->size;
-+	}
-+	if (isuserbuffer) {
-+		if (copy_from_user(txb->data, src, copied))
-+			return -EFAULT;
-+	} else {
-+		memcpy(txb->data, src, copied);
-+	}
-+	txb->len = len;
-+	return copied;
-+}
-+
-+static inline struct tx_buffer *tx_buffer_alloc(void)
-+{
-+	struct tx_buffer *txb = kmalloc(sizeof (struct tx_buffer),
-+					GFP_KERNEL);
-+
-+	if (txb) {
-+		txb->len = 0;
-+		txb->size = PAGE_SIZE;
-+		txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
-+		if (txb->data == NULL) {
-+			kfree(txb);
-+			txb = NULL;
-+		}
-+	}
-+	return txb;
-+}
-+
-+
-+/**************************************************************
-+
-+ The interface to let the tpm plugin register its callback
-+ function and send data to another partition using this module
-+
-+**************************************************************/
-+
-+static DECLARE_MUTEX(upperlayer_lock);
-+static DECLARE_MUTEX(suspend_lock);
-+static struct tpmfe_device *upperlayer_tpmfe;
-+
-+/*
-+ * Send data via this module by calling this function
-+ */
-+int tpm_fe_send(const u8 * buf, size_t count, void *ptr)
-+{
-+	int sent = 0;
-+	struct tpm_private *tp = &my_private;
-+
-+	down(&suspend_lock);
-+	sent = tpm_xmit(tp, buf, count, 0, ptr);
-+	up(&suspend_lock);
-+
-+	return sent;
-+}
-+EXPORT_SYMBOL(tpm_fe_send);
-+
-+/*
-+ * Register a callback for receiving data from this module
-+ */
-+int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
-+{
-+	int rc = 0;
-+
-+	down(&upperlayer_lock);
-+	if (NULL == upperlayer_tpmfe) {
-+		upperlayer_tpmfe = tpmfe_dev;
-+		tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
-+	} else {
-+		rc = -EBUSY;
-+	}
-+	up(&upperlayer_lock);
-+	return rc;
-+}
-+EXPORT_SYMBOL(tpm_fe_register_receiver);
-+
-+/*
-+ * Unregister the callback for receiving data from this module
-+ */
-+void tpm_fe_unregister_receiver(void)
-+{
-+	down(&upperlayer_lock);
-+	upperlayer_tpmfe = NULL;
-+	up(&upperlayer_lock);
-+}
-+EXPORT_SYMBOL(tpm_fe_unregister_receiver);
-+
-+/*
-+ * Call this function to send data to the upper layer's
-+ * registered receiver function.
-+ */
-+static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
-+                                  const void *ptr)
-+{
-+	int rc = 0;
-+
-+	down(&upperlayer_lock);
-+
-+	if (upperlayer_tpmfe && upperlayer_tpmfe->receive)
-+		rc = upperlayer_tpmfe->receive(buf, count, ptr);
-+
-+	up(&upperlayer_lock);
-+	return rc;
-+}
-+
-+/**************************************************************
-+ XENBUS support code
-+**************************************************************/
-+
-+static int setup_tpmring(struct xenbus_device *dev,
-+                         struct tpmfront_info * info)
-+{
-+	tpmif_tx_interface_t *sring;
-+	struct tpm_private *tp = &my_private;
-+	int err;
-+
-+	sring = (void *)__get_free_page(GFP_KERNEL);
-+	if (!sring) {
-+		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
-+		return -ENOMEM;
-+	}
-+	tp->tx = sring;
-+
-+	tpm_allocate_buffers(tp);
-+
-+	err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
-+	if (err < 0) {
-+		free_page((unsigned long)sring);
-+		tp->tx = NULL;
-+		xenbus_dev_fatal(dev, err, "allocating grant reference");
-+		goto fail;
-+	}
-+	info->ring_ref = err;
-+
-+	err = xenbus_alloc_evtchn(dev, &tp->evtchn);
-+	if (err)
-+		goto fail;
-+
-+	tpmif_connect(tp->evtchn, dev->otherend_id);
-+
-+	return 0;
-+fail:
-+	return err;
-+}
-+
-+
-+static void destroy_tpmring(struct tpmfront_info *info, struct tpm_private *tp)
-+{
-+	tpmif_set_connected_state(tp, 0);
-+	if (tp->tx != NULL) {
-+		gnttab_end_foreign_access(info->ring_ref, 0,
-+					  (unsigned long)tp->tx);
-+		tp->tx = NULL;
-+	}
-+
-+	if (tp->irq)
-+		unbind_from_irqhandler(tp->irq, NULL);
-+	tp->evtchn = tp->irq = 0;
-+}
-+
-+
-+static int talk_to_backend(struct xenbus_device *dev,
-+                           struct tpmfront_info *info)
-+{
-+	const char *message = NULL;
-+	int err;
-+	xenbus_transaction_t xbt;
-+
-+	err = setup_tpmring(dev, info);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "setting up ring");
-+		goto out;
-+	}
-+
-+again:
-+	err = xenbus_transaction_start(&xbt);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "starting transaction");
-+		goto destroy_tpmring;
-+	}
-+
-+	err = xenbus_printf(xbt, dev->nodename,
-+	                    "ring-ref","%u", info->ring_ref);
-+	if (err) {
-+		message = "writing ring-ref";
-+		goto abort_transaction;
-+	}
-+
-+	err = xenbus_printf(xbt, dev->nodename,
-+			    "event-channel", "%u", my_private.evtchn);
-+	if (err) {
-+		message = "writing event-channel";
-+		goto abort_transaction;
-+	}
-+
-+	err = xenbus_switch_state(dev, xbt, XenbusStateInitialised);
-+	if (err) {
-+		goto abort_transaction;
-+	}
-+
-+	err = xenbus_transaction_end(xbt, 0);
-+	if (err == -EAGAIN)
-+		goto again;
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "completing transaction");
-+		goto destroy_tpmring;
-+	}
-+	return 0;
-+
-+abort_transaction:
-+	xenbus_transaction_end(xbt, 1);
-+	if (message)
-+		xenbus_dev_error(dev, err, "%s", message);
-+destroy_tpmring:
-+	destroy_tpmring(info, &my_private);
-+out:
-+	return err;
-+}
-+
-+/**
-+ * Callback received when the backend's state changes.
-+ */
-+static void backend_changed(struct xenbus_device *dev,
-+			    XenbusState backend_state)
-+{
-+	struct tpm_private *tp = &my_private;
-+	DPRINTK("\n");
-+
-+	switch (backend_state) {
-+	case XenbusStateInitialising:
-+	case XenbusStateInitWait:
-+	case XenbusStateInitialised:
-+	case XenbusStateUnknown:
-+		break;
-+
-+	case XenbusStateConnected:
-+		tpmif_set_connected_state(tp, 1);
-+		break;
-+
-+	case XenbusStateClosing:
-+		tpmif_set_connected_state(tp, 0);
-+		break;
-+
-+	case XenbusStateClosed:
-+        	if (tp->is_suspended == 0) {
-+        	        device_unregister(&dev->dev);
-+        	}
-+	        break;
-+	}
-+}
-+
-+
-+static int tpmfront_probe(struct xenbus_device *dev,
-+                          const struct xenbus_device_id *id)
-+{
-+	int err;
-+	struct tpmfront_info *info;
-+	int handle;
-+
-+	err = xenbus_scanf(XBT_NULL, dev->nodename,
-+	                   "handle", "%i", &handle);
-+	if (XENBUS_EXIST_ERR(err))
-+		return err;
-+
-+	if (err < 0) {
-+		xenbus_dev_fatal(dev,err,"reading virtual-device");
-+		return err;
-+	}
-+
-+	info = kmalloc(sizeof(*info), GFP_KERNEL);
-+	if (!info) {
-+		err = -ENOMEM;
-+		xenbus_dev_fatal(dev,err,"allocating info structure");
-+		return err;
-+	}
-+	memset(info, 0x0, sizeof(*info));
-+
-+	info->dev = dev;
-+	dev->data = info;
-+
-+	err = talk_to_backend(dev, info);
-+	if (err) {
-+		kfree(info);
-+		dev->data = NULL;
-+		return err;
-+	}
-+	return 0;
-+}
-+
-+
-+static int tpmfront_remove(struct xenbus_device *dev)
-+{
-+	struct tpmfront_info *info = dev->data;
-+
-+	destroy_tpmring(info, &my_private);
-+
-+	kfree(info);
-+	return 0;
-+}
-+
-+static int
-+tpmfront_suspend(struct xenbus_device *dev)
-+{
-+	struct tpm_private *tp = &my_private;
-+	u32 ctr;
-+
-+	/* lock, so no app can send */
-+	down(&suspend_lock);
-+	tp->is_suspended = 1;
-+
-+	for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
-+		if ((ctr % 10) == 0)
-+			printk("TPM-FE [INFO]: Waiting for outstanding request.\n");
-+		/*
-+		 * Wait for a request to be responded to.
-+		 */
-+		interruptible_sleep_on_timeout(&tp->wait_q, 100);
-+	}
-+
-+	if (atomic_read(&tp->tx_busy)) {
-+		/*
-+		 * A temporary work-around.
-+		 */
-+		printk("TPM-FE [WARNING]: Resetting busy flag.");
-+		atomic_set(&tp->tx_busy, 0);
-+	}
-+
-+	return 0;
-+}
-+
-+static int
-+tpmfront_resume(struct xenbus_device *dev)
-+{
-+	struct tpmfront_info *info = dev->data;
-+	return talk_to_backend(dev, info);
-+}
-+
-+static void
-+tpmif_connect(u16 evtchn, domid_t domid)
-+{
-+	int err;
-+	struct tpm_private *tp = &my_private;
-+
-+	tp->evtchn = evtchn;
-+	tp->backend_id = domid;
-+
-+	err = bind_evtchn_to_irqhandler(tp->evtchn,
-+					tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
-+					tp);
-+	if (err <= 0) {
-+		WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
-+		return;
-+	}
-+
-+	tp->irq = err;
-+}
-+
-+static struct xenbus_device_id tpmfront_ids[] = {
-+	{ "vtpm" },
-+	{ "" }
-+};
-+
-+static struct xenbus_driver tpmfront = {
-+	.name = "vtpm",
-+	.owner = THIS_MODULE,
-+	.ids = tpmfront_ids,
-+	.probe = tpmfront_probe,
-+	.remove =  tpmfront_remove,
-+	.resume = tpmfront_resume,
-+	.otherend_changed = backend_changed,
-+	.suspend = tpmfront_suspend,
-+};
-+
-+static void __init init_tpm_xenbus(void)
-+{
-+	xenbus_register_frontend(&tpmfront);
-+}
-+
-+
-+static int
-+tpm_allocate_buffers(struct tpm_private *tp)
-+{
-+	unsigned int i;
-+
-+	for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
-+		tp->tx_buffers[i] = tx_buffer_alloc();
-+	return 1;
-+}
-+
-+static void
-+tpmif_rx_action(unsigned long unused)
-+{
-+	struct tpm_private *tp = &my_private;
-+
-+	int i = 0;
-+	unsigned int received;
-+	unsigned int offset = 0;
-+	u8 *buffer;
-+	tpmif_tx_request_t *tx;
-+	tx = &tp->tx->ring[i].req;
-+
-+	received = tx->size;
-+
-+	buffer = kmalloc(received, GFP_KERNEL);
-+	if (NULL == buffer) {
-+		goto exit;
-+	}
-+
-+	for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
-+		struct tx_buffer *txb = tp->tx_buffers[i];
-+		tpmif_tx_request_t *tx;
-+		unsigned int tocopy;
-+
-+		tx = &tp->tx->ring[i].req;
-+		tocopy = tx->size;
-+		if (tocopy > PAGE_SIZE) {
-+			tocopy = PAGE_SIZE;
-+		}
-+
-+		memcpy(&buffer[offset], txb->data, tocopy);
-+
-+		gnttab_release_grant_reference(&gref_head, tx->ref);
-+
-+		offset += tocopy;
-+	}
-+
-+	tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
-+	kfree(buffer);
-+
-+exit:
-+	atomic_set(&tp->tx_busy, 0);
-+	wake_up_interruptible(&tp->wait_q);
-+}
-+
-+
-+static irqreturn_t
-+tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
-+{
-+	struct tpm_private *tp = tpm_priv;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&tp->tx_lock, flags);
-+	tasklet_schedule(&tpmif_rx_tasklet);
-+	spin_unlock_irqrestore(&tp->tx_lock, flags);
-+
-+	return IRQ_HANDLED;
-+}
-+
-+
-+static int
-+tpm_xmit(struct tpm_private *tp,
-+         const u8 * buf, size_t count, int isuserbuffer,
-+         void *remember)
-+{
-+	tpmif_tx_request_t *tx;
-+	TPMIF_RING_IDX i;
-+	unsigned int offset = 0;
-+
-+	spin_lock_irq(&tp->tx_lock);
-+
-+	if (unlikely(atomic_read(&tp->tx_busy))) {
-+		printk("tpm_xmit: There's an outstanding request/response "
-+		       "on the way!\n");
-+		spin_unlock_irq(&tp->tx_lock);
-+		return -EBUSY;
-+	}
-+
-+	if (tp->is_connected != 1) {
-+		spin_unlock_irq(&tp->tx_lock);
-+		return -EIO;
-+	}
-+
-+	for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
-+		struct tx_buffer *txb = tp->tx_buffers[i];
-+		int copied;
-+
-+		if (NULL == txb) {
-+			DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
-+				"Not transmitting anything!\n", i);
-+			spin_unlock_irq(&tp->tx_lock);
-+			return -EFAULT;
-+		}
-+		copied = tx_buffer_copy(txb, &buf[offset], count,
-+		                        isuserbuffer);
-+		if (copied < 0) {
-+			/* An error occurred */
-+			spin_unlock_irq(&tp->tx_lock);
-+			return copied;
-+		}
-+		count -= copied;
-+		offset += copied;
-+
-+		tx = &tp->tx->ring[i].req;
-+
-+		tx->id = i;
-+		tx->addr = virt_to_machine(txb->data);
-+		tx->size = txb->len;
-+
-+		DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 0x%02x 0x%02x\n",
-+		        txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
-+
-+		/* get the granttable reference for this page */
-+		tx->ref = gnttab_claim_grant_reference(&gref_head);
-+
-+		if (-ENOSPC == tx->ref) {
-+			spin_unlock_irq(&tp->tx_lock);
-+			DPRINTK(" Grant table claim reference failed in func:%s line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
-+			return -ENOSPC;
-+		}
-+		gnttab_grant_foreign_access_ref( tx->ref,
-+		                                 tp->backend_id,
-+		                                 (tx->addr >> PAGE_SHIFT),
-+		                                 0 /*RW*/);
-+		wmb();
-+	}
-+
-+	atomic_set(&tp->tx_busy, 1);
-+	tp->tx_remember = remember;
-+	mb();
-+
-+	DPRINTK("Notifying backend via event channel %d\n",
-+	        tp->evtchn);
-+
-+	notify_remote_via_irq(tp->irq);
-+
-+	spin_unlock_irq(&tp->tx_lock);
-+	return offset;
-+}
-+
-+
-+static void tpmif_notify_upperlayer(struct tpm_private *tp)
-+{
-+	/*
-+	 * Notify upper layer about the state of the connection
-+	 * to the BE.
-+	 */
-+	down(&upperlayer_lock);
-+
-+	if (upperlayer_tpmfe != NULL) {
-+		if (tp->is_connected) {
-+			upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
-+		} else {
-+			upperlayer_tpmfe->status(0);
-+		}
-+	}
-+	up(&upperlayer_lock);
-+}
-+
-+
-+static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
-+{
-+	/*
-+	 * Don't notify upper layer if we are in suspend mode and
-+	 * should disconnect - assumption is that we will resume
-+	 * The semaphore keeps apps from sending.
-+	 */
-+	if (is_connected == 0 && tp->is_suspended == 1) {
-+		return;
-+	}
-+
-+	/*
-+	 * Unlock the semaphore if we are connected again
-+	 * after being suspended - now resuming.
-+	 * This also removes the suspend state.
-+	 */
-+	if (is_connected == 1 && tp->is_suspended == 1) {
-+		tp->is_suspended = 0;
-+		/* unlock, so apps can resume sending */
-+		up(&suspend_lock);
-+	}
-+
-+	if (is_connected != tp->is_connected) {
-+		tp->is_connected = is_connected;
-+		tpmif_notify_upperlayer(tp);
-+	}
-+}
-+
-+
-+/* =================================================================
-+ * Initialization function.
-+ * =================================================================
-+ */
-+
-+static int __init
-+tpmif_init(void)
-+{
-+	IPRINTK("Initialising the vTPM driver.\n");
-+	if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
-+	                                     &gref_head ) < 0) {
-+		return -EFAULT;
-+	}
-+	/*
-+	 * Only don't send the driver status when we are in the
-+	 * INIT domain.
-+	 */
-+	spin_lock_init(&my_private.tx_lock);
-+	init_waitqueue_head(&my_private.wait_q);
-+
-+	init_tpm_xenbus();
-+
-+	return 0;
-+}
-+
-+__initcall(tpmif_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/tpmfront/tpmfront.h linux-2.6.12-xen/drivers/xen/tpmfront/tpmfront.h
---- pristine-linux-2.6.12/drivers/xen/tpmfront/tpmfront.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/tpmfront/tpmfront.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,43 @@
-+#ifndef TPM_FRONT_H
-+#define TPM_FRONT_H
-+
-+struct tpm_private {
-+	tpmif_tx_interface_t *tx;
-+	unsigned int evtchn;
-+	unsigned int irq;
-+	u8 is_connected;
-+	u8 is_suspended;
-+
-+	spinlock_t tx_lock;
-+
-+	struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
-+
-+	atomic_t tx_busy;
-+	void *tx_remember;
-+	domid_t backend_id;
-+	wait_queue_head_t wait_q;
-+
-+};
-+
-+struct tpmfront_info {
-+	struct xenbus_device *dev;
-+	int ring_ref;
-+};
-+
-+struct tx_buffer {
-+	unsigned int size;	// available space in data
-+	unsigned int len;	// used space in data
-+	unsigned char *data;	// pointer to a page
-+};
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/util.c linux-2.6.12-xen/drivers/xen/util.c
---- pristine-linux-2.6.12/drivers/xen/util.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/util.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,80 @@
-+#include <linux/config.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <asm/uaccess.h>
-+#include <asm-xen/driver_util.h>
-+
-+static int f(pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
-+{
-+	/* generic_page_range() does all the hard work. */
-+	return 0;
-+}
-+
-+struct vm_struct *alloc_vm_area(unsigned long size)
-+{
-+	struct vm_struct *area;
-+
-+	area = get_vm_area(size, VM_IOREMAP);
-+	if (area == NULL)
-+		return NULL;
-+
-+	/*
-+	 * This ensures that page tables are constructed for this region
-+	 * of kernel virtual address space and mapped into init_mm.
-+	 */
-+	if (generic_page_range(&init_mm, (unsigned long)area->addr,
-+			       area->size, f, NULL)) {
-+		free_vm_area(area);
-+		return NULL;
-+	}
-+
-+	return area;
-+}
-+EXPORT_SYMBOL_GPL(alloc_vm_area);
-+
-+void free_vm_area(struct vm_struct *area)
-+{
-+	struct vm_struct *ret;
-+	ret = remove_vm_area(area->addr);
-+	BUG_ON(ret != area);
-+	kfree(area);
-+}
-+EXPORT_SYMBOL_GPL(free_vm_area);
-+
-+void lock_vm_area(struct vm_struct *area)
-+{
-+	unsigned long i;
-+	char c;
-+
-+	/*
-+	 * Prevent context switch to a lazy mm that doesn't have this area
-+	 * mapped into its page tables.
-+	 */
-+	preempt_disable();
-+
-+	/*
-+	 * Ensure that the page tables are mapped into the current mm. The
-+	 * page-fault path will copy the page directory pointers from init_mm.
-+	 */
-+	for (i = 0; i < area->size; i += PAGE_SIZE)
-+		(void)__get_user(c, (char __user *)area->addr + i);
-+}
-+EXPORT_SYMBOL_GPL(lock_vm_area);
-+
-+void unlock_vm_area(struct vm_struct *area)
-+{
-+	preempt_enable();
-+}
-+EXPORT_SYMBOL_GPL(unlock_vm_area);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/Makefile linux-2.6.12-xen/drivers/xen/xenbus/Makefile
---- pristine-linux-2.6.12/drivers/xen/xenbus/Makefile	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/xenbus/Makefile	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,8 @@
-+obj-y	+= xenbus.o
-+
-+xenbus-objs =
-+xenbus-objs += xenbus_client.o 
-+xenbus-objs += xenbus_comms.o
-+xenbus-objs += xenbus_xs.o
-+xenbus-objs += xenbus_probe.o 
-+xenbus-objs += xenbus_dev.o 
-diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_client.c linux-2.6.12-xen/drivers/xen/xenbus/xenbus_client.c
---- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_client.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_client.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,238 @@
-+/******************************************************************************
-+ * Client-facing interface for the Xenbus driver.  In other words, the
-+ * interface between the Xenbus and the device-specific code, be it the
-+ * frontend or the backend of that driver.
-+ *
-+ * Copyright (C) 2005 XenSource Ltd
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/gnttab.h>
-+#include <asm-xen/xenbus.h>
-+
-+/* xenbus_probe.c */
-+extern char *kasprintf(const char *fmt, ...);
-+
-+#define DPRINTK(fmt, args...) \
-+    pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+
-+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
-+		      struct xenbus_watch *watch, 
-+		      void (*callback)(struct xenbus_watch *,
-+				       const char **, unsigned int))
-+{
-+	int err;
-+
-+	watch->node = path;
-+	watch->callback = callback;
-+
-+	err = register_xenbus_watch(watch);
-+
-+	if (err) {
-+		watch->node = NULL;
-+		watch->callback = NULL;
-+		xenbus_dev_fatal(dev, err, "adding watch on %s", path);
-+	}
-+
-+	return err;
-+}
-+EXPORT_SYMBOL(xenbus_watch_path);
-+
-+
-+int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
-+		       const char *path2, struct xenbus_watch *watch, 
-+		       void (*callback)(struct xenbus_watch *,
-+					const char **, unsigned int))
-+{
-+	int err;
-+	char *state = kasprintf("%s/%s", path, path2);
-+	if (!state) {
-+		xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
-+		return -ENOMEM;
-+	}
-+	err = xenbus_watch_path(dev, state, watch, callback);
-+
-+	if (err) {
-+		kfree(state);
-+	}
-+	return err;
-+}
-+EXPORT_SYMBOL(xenbus_watch_path2);
-+
-+
-+int xenbus_switch_state(struct xenbus_device *dev,
-+			xenbus_transaction_t xbt,
-+			XenbusState state)
-+{
-+	/* We check whether the state is currently set to the given value, and
-+	   if not, then the state is set.  We don't want to unconditionally
-+	   write the given state, because we don't want to fire watches
-+	   unnecessarily.  Furthermore, if the node has gone, we don't write
-+	   to it, as the device will be tearing down, and we don't want to
-+	   resurrect that directory.
-+	 */
-+
-+	int current_state;
-+
-+	int err = xenbus_scanf(xbt, dev->nodename, "state", "%d",
-+			       &current_state);
-+	if ((err == 1 && (XenbusState)current_state == state) ||
-+	    err == -ENOENT)
-+		return 0;
-+
-+	err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
-+	if (err) {
-+		xenbus_dev_fatal(dev, err, "writing new state");
-+		return err;
-+	}
-+	return 0;
-+}
-+EXPORT_SYMBOL(xenbus_switch_state);
-+
-+
-+/**
-+ * Return the path to the error node for the given device, or NULL on failure.
-+ * If the value returned is non-NULL, then it is the caller's to kfree.
-+ */
-+static char *error_path(struct xenbus_device *dev)
-+{
-+	return kasprintf("error/%s", dev->nodename);
-+}
-+
-+
-+void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+		va_list ap)
-+{
-+	int ret;
-+	unsigned int len;
-+	char *printf_buffer = NULL, *path_buffer = NULL;
-+
-+#define PRINTF_BUFFER_SIZE 4096
-+	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
-+	if (printf_buffer == NULL)
-+		goto fail;
-+
-+	len = sprintf(printf_buffer, "%i ", -err);
-+	ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
-+
-+	BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
-+	dev->has_error = 1;
-+
-+	path_buffer = error_path(dev);
-+
-+	if (path_buffer == NULL) {
-+		printk("xenbus: failed to write error node for %s (%s)\n",
-+		       dev->nodename, printf_buffer);
-+		goto fail;
-+	}
-+
-+	if (xenbus_write(XBT_NULL, path_buffer, "error", printf_buffer) != 0) {
-+		printk("xenbus: failed to write error node for %s (%s)\n",
-+		       dev->nodename, printf_buffer);
-+		goto fail;
-+	}
-+
-+fail:
-+	if (printf_buffer)
-+		kfree(printf_buffer);
-+	if (path_buffer)
-+		kfree(path_buffer);
-+}
-+
-+
-+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+		      ...)
-+{
-+	va_list ap;
-+
-+	va_start(ap, fmt);
-+	_dev_error(dev, err, fmt, ap);
-+	va_end(ap);
-+}
-+EXPORT_SYMBOL(xenbus_dev_error);
-+
-+
-+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
-+		      ...)
-+{
-+	va_list ap;
-+
-+	va_start(ap, fmt);
-+	_dev_error(dev, err, fmt, ap);
-+	va_end(ap);
-+	
-+	xenbus_switch_state(dev, XBT_NULL, XenbusStateClosing);
-+}
-+EXPORT_SYMBOL(xenbus_dev_fatal);
-+
-+
-+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
-+{
-+	int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
-+	if (err < 0)
-+		xenbus_dev_fatal(dev, err, "granting access to ring page");
-+	return err;
-+}
-+EXPORT_SYMBOL(xenbus_grant_ring);
-+
-+
-+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
-+{
-+	evtchn_op_t op = {
-+		.cmd = EVTCHNOP_alloc_unbound,
-+		.u.alloc_unbound.dom = DOMID_SELF,
-+		.u.alloc_unbound.remote_dom = dev->otherend_id };
-+
-+	int err = HYPERVISOR_event_channel_op(&op);
-+	if (err)
-+		xenbus_dev_fatal(dev, err, "allocating event channel");
-+	else
-+		*port = op.u.alloc_unbound.port;
-+	return err;
-+}
-+EXPORT_SYMBOL(xenbus_alloc_evtchn);
-+
-+
-+XenbusState xenbus_read_driver_state(const char *path)
-+{
-+	XenbusState result;
-+
-+	int err = xenbus_gather(XBT_NULL, path, "state", "%d", &result, NULL);
-+	if (err)
-+		result = XenbusStateClosed;
-+
-+	return result;
-+}
-+EXPORT_SYMBOL(xenbus_read_driver_state);
-+
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_comms.c linux-2.6.12-xen/drivers/xen/xenbus/xenbus_comms.c
---- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_comms.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_comms.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,203 @@
-+/******************************************************************************
-+ * xenbus_comms.c
-+ *
-+ * Low level code to talks to Xen Store: ringbuffer and event channel.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <asm/hypervisor.h>
-+#include <asm-xen/evtchn.h>
-+#include <linux/wait.h>
-+#include <linux/interrupt.h>
-+#include <linux/sched.h>
-+#include <linux/err.h>
-+#include <asm-xen/xenbus.h>
-+#include "xenbus_comms.h"
-+
-+static int xenbus_irq;
-+
-+extern void xenbus_probe(void *); 
-+extern int xenstored_ready; 
-+static DECLARE_WORK(probe_work, xenbus_probe, NULL);
-+
-+DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
-+
-+static inline struct xenstore_domain_interface *xenstore_domain_interface(void)
-+{
-+	return mfn_to_virt(xen_start_info->store_mfn);
-+}
-+
-+static irqreturn_t wake_waiting(int irq, void *unused, struct pt_regs *regs)
-+{
-+	if (unlikely(xenstored_ready == 0)) {
-+		xenstored_ready = 1; 
-+		schedule_work(&probe_work); 
-+	} 
-+
-+	wake_up(&xb_waitq);
-+	return IRQ_HANDLED;
-+}
-+
-+static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
-+{
-+	return ((prod - cons) <= XENSTORE_RING_SIZE);
-+}
-+
-+static void *get_output_chunk(XENSTORE_RING_IDX cons,
-+			      XENSTORE_RING_IDX prod,
-+			      char *buf, uint32_t *len)
-+{
-+	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
-+	if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
-+		*len = XENSTORE_RING_SIZE - (prod - cons);
-+	return buf + MASK_XENSTORE_IDX(prod);
-+}
-+
-+static const void *get_input_chunk(XENSTORE_RING_IDX cons,
-+				   XENSTORE_RING_IDX prod,
-+				   const char *buf, uint32_t *len)
-+{
-+	*len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
-+	if ((prod - cons) < *len)
-+		*len = prod - cons;
-+	return buf + MASK_XENSTORE_IDX(cons);
-+}
-+
-+int xb_write(const void *data, unsigned len)
-+{
-+	struct xenstore_domain_interface *intf = xenstore_domain_interface();
-+	XENSTORE_RING_IDX cons, prod;
-+
-+	while (len != 0) {
-+		void *dst;
-+		unsigned int avail;
-+
-+		wait_event_interruptible(xb_waitq,
-+					 (intf->req_prod - intf->req_cons) !=
-+					 XENSTORE_RING_SIZE);
-+
-+		/* Read indexes, then verify. */
-+		cons = intf->req_cons;
-+		prod = intf->req_prod;
-+		mb();
-+		if (!check_indexes(cons, prod))
-+			return -EIO;
-+
-+		dst = get_output_chunk(cons, prod, intf->req, &avail);
-+		if (avail == 0)
-+			continue;
-+		if (avail > len)
-+			avail = len;
-+
-+		memcpy(dst, data, avail);
-+		data += avail;
-+		len -= avail;
-+
-+		/* Other side must not see new header until data is there. */
-+		wmb();
-+		intf->req_prod += avail;
-+
-+		/* This implies mb() before other side sees interrupt. */
-+		notify_remote_via_evtchn(xen_start_info->store_evtchn);
-+	}
-+
-+	return 0;
-+}
-+
-+int xb_read(void *data, unsigned len)
-+{
-+	struct xenstore_domain_interface *intf = xenstore_domain_interface();
-+	XENSTORE_RING_IDX cons, prod;
-+
-+	while (len != 0) {
-+		unsigned int avail;
-+		const char *src;
-+
-+		wait_event_interruptible(xb_waitq,
-+					 intf->rsp_cons != intf->rsp_prod);
-+
-+		/* Read indexes, then verify. */
-+		cons = intf->rsp_cons;
-+		prod = intf->rsp_prod;
-+		mb();
-+		if (!check_indexes(cons, prod))
-+			return -EIO;
-+
-+		src = get_input_chunk(cons, prod, intf->rsp, &avail);
-+		if (avail == 0)
-+			continue;
-+		if (avail > len)
-+			avail = len;
-+
-+		/* We must read header before we read data. */
-+		rmb();
-+
-+		memcpy(data, src, avail);
-+		data += avail;
-+		len -= avail;
-+
-+		/* Other side must not see free space until we've copied out */
-+		mb();
-+		intf->rsp_cons += avail;
-+
-+		pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
-+
-+		/* Implies mb(): they will see new header. */
-+		notify_remote_via_evtchn(xen_start_info->store_evtchn);
-+	}
-+
-+	return 0;
-+}
-+
-+/* Set up interrupt handler off store event channel. */
-+int xb_init_comms(void)
-+{
-+	int err;
-+
-+	if (xenbus_irq)
-+		unbind_from_irqhandler(xenbus_irq, &xb_waitq);
-+
-+	err = bind_evtchn_to_irqhandler(
-+		xen_start_info->store_evtchn, wake_waiting,
-+		0, "xenbus", &xb_waitq);
-+	if (err <= 0) {
-+		printk(KERN_ERR "XENBUS request irq failed %i\n", err);
-+		return err;
-+	}
-+
-+	xenbus_irq = err;
-+
-+	return 0;
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_comms.h linux-2.6.12-xen/drivers/xen/xenbus/xenbus_comms.h
---- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_comms.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_comms.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,50 @@
-+/*
-+ * Private include for xenbus communications.
-+ * 
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ *
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef _XENBUS_COMMS_H
-+#define _XENBUS_COMMS_H
-+
-+int xs_init(void);
-+int xb_init_comms(void);
-+
-+/* Low level routines. */
-+int xb_write(const void *data, unsigned len);
-+int xb_read(void *data, unsigned len);
-+int xs_input_avail(void);
-+extern wait_queue_head_t xb_waitq;
-+
-+#endif /* _XENBUS_COMMS_H */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_dev.c linux-2.6.12-xen/drivers/xen/xenbus/xenbus_dev.c
---- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_dev.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_dev.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,238 @@
-+/*
-+ * xenbus_dev.c
-+ * 
-+ * Driver giving user-space access to the kernel's xenbus connection
-+ * to xenstore.
-+ * 
-+ * Copyright (c) 2005, Christian Limpach
-+ * Copyright (c) 2005, Rusty Russell, IBM Corporation
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/errno.h>
-+#include <linux/uio.h>
-+#include <linux/notifier.h>
-+#include <linux/wait.h>
-+#include <linux/fs.h>
-+
-+#include "xenbus_comms.h"
-+
-+#include <asm/uaccess.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/xenbus.h>
-+#include <asm-xen/xen_proc.h>
-+#include <asm/hypervisor.h>
-+
-+struct xenbus_dev_transaction {
-+	struct list_head list;
-+	xenbus_transaction_t handle;
-+};
-+
-+struct xenbus_dev_data {
-+	/* In-progress transaction. */
-+	struct list_head transactions;
-+
-+	/* Partial request. */
-+	unsigned int len;
-+	union {
-+		struct xsd_sockmsg msg;
-+		char buffer[PAGE_SIZE];
-+	} u;
-+
-+	/* Response queue. */
-+#define MASK_READ_IDX(idx) ((idx)&(PAGE_SIZE-1))
-+	char read_buffer[PAGE_SIZE];
-+	unsigned int read_cons, read_prod;
-+	wait_queue_head_t read_waitq;
-+};
-+
-+static struct proc_dir_entry *xenbus_dev_intf;
-+
-+static ssize_t xenbus_dev_read(struct file *filp,
-+			       char __user *ubuf,
-+			       size_t len, loff_t *ppos)
-+{
-+	struct xenbus_dev_data *u = filp->private_data;
-+	int i;
-+
-+	if (wait_event_interruptible(u->read_waitq,
-+				     u->read_prod != u->read_cons))
-+		return -EINTR;
-+
-+	for (i = 0; i < len; i++) {
-+		if (u->read_cons == u->read_prod)
-+			break;
-+		put_user(u->read_buffer[MASK_READ_IDX(u->read_cons)], ubuf+i);
-+		u->read_cons++;
-+	}
-+
-+	return i;
-+}
-+
-+static void queue_reply(struct xenbus_dev_data *u,
-+			char *data, unsigned int len)
-+{
-+	int i;
-+
-+	for (i = 0; i < len; i++, u->read_prod++)
-+		u->read_buffer[MASK_READ_IDX(u->read_prod)] = data[i];
-+
-+	BUG_ON((u->read_prod - u->read_cons) > sizeof(u->read_buffer));
-+
-+	wake_up(&u->read_waitq);
-+}
-+
-+static ssize_t xenbus_dev_write(struct file *filp,
-+				const char __user *ubuf,
-+				size_t len, loff_t *ppos)
-+{
-+	struct xenbus_dev_data *u = filp->private_data;
-+	struct xenbus_dev_transaction *trans = NULL;
-+	void *reply;
-+
-+	if ((len + u->len) > sizeof(u->u.buffer))
-+		return -EINVAL;
-+
-+	if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0)
-+		return -EFAULT;
-+
-+	u->len += len;
-+	if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
-+		return len;
-+
-+	switch (u->u.msg.type) {
-+	case XS_TRANSACTION_START:
-+	case XS_TRANSACTION_END:
-+	case XS_DIRECTORY:
-+	case XS_READ:
-+	case XS_GET_PERMS:
-+	case XS_RELEASE:
-+	case XS_GET_DOMAIN_PATH:
-+	case XS_WRITE:
-+	case XS_MKDIR:
-+	case XS_RM:
-+	case XS_SET_PERMS:
-+		if (u->u.msg.type == XS_TRANSACTION_START) {
-+			trans = kmalloc(sizeof(*trans), GFP_KERNEL);
-+			if (!trans)
-+				return -ENOMEM;
-+		}
-+
-+		reply = xenbus_dev_request_and_reply(&u->u.msg);
-+		if (IS_ERR(reply)) {
-+			kfree(trans);
-+			return PTR_ERR(reply);
-+		}
-+
-+		if (u->u.msg.type == XS_TRANSACTION_START) {
-+			trans->handle = simple_strtoul(reply, NULL, 0);
-+			list_add(&trans->list, &u->transactions);
-+		} else if (u->u.msg.type == XS_TRANSACTION_END) {
-+			list_for_each_entry(trans, &u->transactions, list)
-+				if (trans->handle == u->u.msg.tx_id)
-+					break;
-+			BUG_ON(&trans->list == &u->transactions);
-+			list_del(&trans->list);
-+			kfree(trans);
-+		}
-+		queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg));
-+		queue_reply(u, (char *)reply, u->u.msg.len);
-+		kfree(reply);
-+		break;
-+
-+	default:
-+		return -EINVAL;
-+	}
-+
-+	u->len = 0;
-+	return len;
-+}
-+
-+static int xenbus_dev_open(struct inode *inode, struct file *filp)
-+{
-+	struct xenbus_dev_data *u;
-+
-+	if (xen_start_info->store_evtchn == 0)
-+		return -ENOENT;
-+
-+	nonseekable_open(inode, filp);
-+
-+	u = kmalloc(sizeof(*u), GFP_KERNEL);
-+	if (u == NULL)
-+		return -ENOMEM;
-+
-+	memset(u, 0, sizeof(*u));
-+	INIT_LIST_HEAD(&u->transactions);
-+	init_waitqueue_head(&u->read_waitq);
-+
-+	filp->private_data = u;
-+
-+	return 0;
-+}
-+
-+static int xenbus_dev_release(struct inode *inode, struct file *filp)
-+{
-+	struct xenbus_dev_data *u = filp->private_data;
-+	struct xenbus_dev_transaction *trans, *tmp;
-+
-+	list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
-+		xenbus_transaction_end(trans->handle, 1);
-+		list_del(&trans->list);
-+		kfree(trans);
-+	}
-+
-+	kfree(u);
-+
-+	return 0;
-+}
-+
-+static struct file_operations xenbus_dev_file_ops = {
-+	.read = xenbus_dev_read,
-+	.write = xenbus_dev_write,
-+	.open = xenbus_dev_open,
-+	.release = xenbus_dev_release,
-+};
-+
-+static int __init
-+xenbus_dev_init(void)
-+{
-+	xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400);
-+	if (xenbus_dev_intf)
-+		xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops;
-+
-+	return 0;
-+}
-+
-+__initcall(xenbus_dev_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_probe.c linux-2.6.12-xen/drivers/xen/xenbus/xenbus_probe.c
---- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_probe.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_probe.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,1016 @@
-+/******************************************************************************
-+ * Talks to Xen Store to figure out what devices we have.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
-+ * Copyright (C) 2005 XenSource Ltd
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#define DPRINTK(fmt, args...) \
-+    pr_debug("xenbus_probe (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
-+
-+#include <linux/kernel.h>
-+#include <linux/err.h>
-+#include <linux/string.h>
-+#include <linux/ctype.h>
-+#include <linux/fcntl.h>
-+#include <linux/mm.h>
-+#include <linux/notifier.h>
-+#include <linux/kthread.h>
-+
-+#include <asm/io.h>
-+#include <asm/page.h>
-+#include <asm/pgtable.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/xenbus.h>
-+#include <asm-xen/xen_proc.h>
-+#include <asm-xen/balloon.h>
-+#include <asm-xen/evtchn.h>
-+#include <asm-xen/linux-public/evtchn.h>
-+
-+#include "xenbus_comms.h"
-+
-+extern struct semaphore xenwatch_mutex;
-+
-+#define streq(a, b) (strcmp((a), (b)) == 0)
-+
-+static struct notifier_block *xenstore_chain;
-+
-+/* If something in array of ids matches this device, return it. */
-+static const struct xenbus_device_id *
-+match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
-+{
-+	for (; !streq(arr->devicetype, ""); arr++) {
-+		if (streq(arr->devicetype, dev->devicetype))
-+			return arr;
-+	}
-+	return NULL;
-+}
-+
-+static int xenbus_match(struct device *_dev, struct device_driver *_drv)
-+{
-+	struct xenbus_driver *drv = to_xenbus_driver(_drv);
-+
-+	if (!drv->ids)
-+		return 0;
-+
-+	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
-+}
-+
-+struct xen_bus_type
-+{
-+	char *root;
-+	unsigned int levels;
-+	int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
-+	int (*probe)(const char *type, const char *dir);
-+	struct bus_type bus;
-+	struct device dev;
-+};
-+
-+
-+/* device/<type>/<id> => <type>-<id> */
-+static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
-+{
-+	nodename = strchr(nodename, '/');
-+	if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
-+		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
-+		return -EINVAL;
-+	}
-+
-+	strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
-+	if (!strchr(bus_id, '/')) {
-+		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
-+		return -EINVAL;
-+	}
-+	*strchr(bus_id, '/') = '-';
-+	return 0;
-+}
-+
-+
-+static int read_otherend_details(struct xenbus_device *xendev,
-+				 char *id_node, char *path_node)
-+{
-+	int err = xenbus_gather(XBT_NULL, xendev->nodename,
-+				id_node, "%i", &xendev->otherend_id,
-+				path_node, NULL, &xendev->otherend,
-+				NULL);
-+	if (err) {
-+		xenbus_dev_fatal(xendev, err,
-+				 "reading other end details from %s",
-+				 xendev->nodename);
-+		return err;
-+	}
-+	if (strlen(xendev->otherend) == 0 ||
-+	    !xenbus_exists(XBT_NULL, xendev->otherend, "")) {
-+		xenbus_dev_fatal(xendev, -ENOENT, "missing other end from %s",
-+				 xendev->nodename);
-+		kfree(xendev->otherend);
-+		xendev->otherend = NULL;
-+		return -ENOENT;
-+	}
-+
-+	return 0;
-+}
-+
-+
-+static int read_backend_details(struct xenbus_device *xendev)
-+{
-+	return read_otherend_details(xendev, "backend-id", "backend");
-+}
-+
-+
-+static int read_frontend_details(struct xenbus_device *xendev)
-+{
-+	return read_otherend_details(xendev, "frontend-id", "frontend");
-+}
-+
-+
-+static void free_otherend_details(struct xenbus_device *dev)
-+{
-+	kfree(dev->otherend);
-+	dev->otherend = NULL;
-+}
-+
-+
-+static void free_otherend_watch(struct xenbus_device *dev)
-+{
-+	if (dev->otherend_watch.node) {
-+		unregister_xenbus_watch(&dev->otherend_watch);
-+		kfree(dev->otherend_watch.node);
-+		dev->otherend_watch.node = NULL;
-+	}
-+}
-+
-+
-+/* Bus type for frontend drivers. */
-+static int xenbus_probe_frontend(const char *type, const char *name);
-+static struct xen_bus_type xenbus_frontend = {
-+	.root = "device",
-+	.levels = 2, 		/* device/type/<id> */
-+	.get_bus_id = frontend_bus_id,
-+	.probe = xenbus_probe_frontend,
-+	.bus = {
-+		.name  = "xen",
-+		.match = xenbus_match,
-+	},
-+	.dev = {
-+		.bus_id = "xen",
-+	},
-+};
-+
-+/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
-+static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
-+{
-+	int domid, err;
-+	const char *devid, *type, *frontend;
-+	unsigned int typelen;
-+
-+	type = strchr(nodename, '/');
-+	if (!type)
-+		return -EINVAL;
-+	type++;
-+	typelen = strcspn(type, "/");
-+	if (!typelen || type[typelen] != '/')
-+		return -EINVAL;
-+
-+	devid = strrchr(nodename, '/') + 1;
-+
-+	err = xenbus_gather(XBT_NULL, nodename, "frontend-id", "%i", &domid,
-+			    "frontend", NULL, &frontend,
-+			    NULL);
-+	if (err)
-+		return err;
-+	if (strlen(frontend) == 0)
-+		err = -ERANGE;
-+	if (!err && !xenbus_exists(XBT_NULL, frontend, ""))
-+		err = -ENOENT;
-+
-+	kfree(frontend);
-+
-+	if (err)
-+		return err;
-+
-+	if (snprintf(bus_id, BUS_ID_SIZE,
-+		     "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE)
-+		return -ENOSPC;
-+	return 0;
-+}
-+
-+static int xenbus_hotplug_backend(struct device *dev, char **envp,
-+				  int num_envp, char *buffer, int buffer_size);
-+static int xenbus_probe_backend(const char *type, const char *domid);
-+static struct xen_bus_type xenbus_backend = {
-+	.root = "backend",
-+	.levels = 3, 		/* backend/type/<frontend>/<id> */
-+	.get_bus_id = backend_bus_id,
-+	.probe = xenbus_probe_backend,
-+	.bus = {
-+		.name  = "xen-backend",
-+		.match = xenbus_match,
-+		.hotplug = xenbus_hotplug_backend,
-+	},
-+	.dev = {
-+		.bus_id = "xen-backend",
-+	},
-+};
-+
-+static int xenbus_hotplug_backend(struct device *dev, char **envp,
-+				  int num_envp, char *buffer, int buffer_size)
-+{
-+	struct xenbus_device *xdev;
-+	struct xenbus_driver *drv;
-+	int i = 0;
-+	int length = 0;
-+
-+	DPRINTK("");
-+
-+	if (dev == NULL)
-+		return -ENODEV;
-+
-+	xdev = to_xenbus_device(dev);
-+	if (xdev == NULL)
-+		return -ENODEV;
-+
-+	/* stuff we want to pass to /sbin/hotplug */
-+	add_hotplug_env_var(envp, num_envp, &i,
-+			    buffer, buffer_size, &length,
-+			    "XENBUS_TYPE=%s", xdev->devicetype);
-+
-+	add_hotplug_env_var(envp, num_envp, &i,
-+			    buffer, buffer_size, &length,
-+			    "XENBUS_PATH=%s", xdev->nodename);
-+
-+	add_hotplug_env_var(envp, num_envp, &i,
-+			    buffer, buffer_size, &length,
-+			    "XENBUS_BASE_PATH=%s", xenbus_backend.root);
-+
-+	/* terminate, set to next free slot, shrink available space */
-+	envp[i] = NULL;
-+	envp = &envp[i];
-+	num_envp -= i;
-+	buffer = &buffer[length];
-+	buffer_size -= length;
-+
-+	if (dev->driver) {
-+		drv = to_xenbus_driver(dev->driver);
-+		if (drv && drv->hotplug)
-+			return drv->hotplug(xdev, envp, num_envp, buffer,
-+					    buffer_size);
-+	}
-+
-+	return 0;
-+}
-+
-+static void otherend_changed(struct xenbus_watch *watch,
-+			     const char **vec, unsigned int len)
-+{
-+	struct xenbus_device *dev =
-+		container_of(watch, struct xenbus_device, otherend_watch);
-+	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-+	XenbusState state;
-+
-+	/* Protect us against watches firing on old details when the otherend
-+	   details change, say immediately after a resume. */
-+	if (!dev->otherend ||
-+	    strncmp(dev->otherend, vec[XS_WATCH_PATH],
-+		    strlen(dev->otherend))) {
-+		DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
-+		return;
-+	}
-+
-+	state = xenbus_read_driver_state(dev->otherend);
-+
-+	DPRINTK("state is %d, %s, %s",
-+		state, dev->otherend_watch.node, vec[XS_WATCH_PATH]);
-+	if (drv->otherend_changed)
-+		drv->otherend_changed(dev, state);
-+}
-+
-+
-+static int talk_to_otherend(struct xenbus_device *dev)
-+{
-+	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-+
-+	free_otherend_watch(dev);
-+	free_otherend_details(dev);
-+
-+	return drv->read_otherend_details(dev);
-+}
-+
-+
-+static int watch_otherend(struct xenbus_device *dev)
-+{
-+	return xenbus_watch_path2(dev, dev->otherend, "state",
-+				  &dev->otherend_watch, otherend_changed);
-+}
-+
-+
-+static int xenbus_dev_probe(struct device *_dev)
-+{
-+	struct xenbus_device *dev = to_xenbus_device(_dev);
-+	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
-+	const struct xenbus_device_id *id;
-+	int err;
-+
-+	DPRINTK("");
-+
-+	if (!drv->probe) {
-+		err = -ENODEV;
-+		goto fail;
-+	}
-+
-+	id = match_device(drv->ids, dev);
-+	if (!id) {
-+		err = -ENODEV;
-+		goto fail;
-+	}
-+
-+	err = talk_to_otherend(dev);
-+	if (err) {
-+		printk(KERN_WARNING
-+		       "xenbus_probe: talk_to_otherend on %s failed.\n",
-+		       dev->nodename);
-+		return err;
-+	}
-+
-+	err = drv->probe(dev, id);
-+	if (err)
-+		goto fail;
-+
-+	err = watch_otherend(dev);
-+	if (err) {
-+		printk(KERN_WARNING
-+		       "xenbus_probe: watch_otherend on %s failed.\n",
-+		       dev->nodename);
-+		return err;
-+	}
-+
-+	return 0;
-+fail:
-+	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
-+	xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
-+	return -ENODEV;
-+	
-+}
-+
-+static int xenbus_dev_remove(struct device *_dev)
-+{
-+	struct xenbus_device *dev = to_xenbus_device(_dev);
-+	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
-+
-+	DPRINTK("");
-+
-+	free_otherend_watch(dev);
-+	free_otherend_details(dev);
-+
-+	if (drv->remove)
-+		drv->remove(dev);
-+
-+	xenbus_switch_state(dev, XBT_NULL, XenbusStateClosed);
-+	return 0;
-+}
-+
-+static int xenbus_register_driver_common(struct xenbus_driver *drv,
-+					 struct xen_bus_type *bus)
-+{
-+	int ret;
-+
-+	drv->driver.name = drv->name;
-+	drv->driver.bus = &bus->bus;
-+	drv->driver.owner = drv->owner;
-+	drv->driver.probe = xenbus_dev_probe;
-+	drv->driver.remove = xenbus_dev_remove;
-+
-+	down(&xenwatch_mutex);
-+	ret = driver_register(&drv->driver);
-+	up(&xenwatch_mutex);
-+	return ret;
-+}
-+
-+int xenbus_register_frontend(struct xenbus_driver *drv)
-+{
-+	drv->read_otherend_details = read_backend_details;
-+
-+	return xenbus_register_driver_common(drv, &xenbus_frontend);
-+}
-+EXPORT_SYMBOL(xenbus_register_frontend);
-+
-+int xenbus_register_backend(struct xenbus_driver *drv)
-+{
-+	drv->read_otherend_details = read_frontend_details;
-+
-+	return xenbus_register_driver_common(drv, &xenbus_backend);
-+}
-+EXPORT_SYMBOL(xenbus_register_backend);
-+
-+void xenbus_unregister_driver(struct xenbus_driver *drv)
-+{
-+	driver_unregister(&drv->driver);
-+}
-+EXPORT_SYMBOL(xenbus_unregister_driver);
-+
-+struct xb_find_info
-+{
-+	struct xenbus_device *dev;
-+	const char *nodename;
-+};
-+
-+static int cmp_dev(struct device *dev, void *data)
-+{
-+	struct xenbus_device *xendev = to_xenbus_device(dev);
-+	struct xb_find_info *info = data;
-+
-+	if (streq(xendev->nodename, info->nodename)) {
-+		info->dev = xendev;
-+		get_device(dev);
-+		return 1;
-+	}
-+	return 0;
-+}
-+
-+struct xenbus_device *xenbus_device_find(const char *nodename,
-+					 struct bus_type *bus)
-+{
-+	struct xb_find_info info = { .dev = NULL, .nodename = nodename };
-+
-+	bus_for_each_dev(bus, NULL, &info, cmp_dev);
-+	return info.dev;
-+}
-+
-+static int cleanup_dev(struct device *dev, void *data)
-+{
-+	struct xenbus_device *xendev = to_xenbus_device(dev);
-+	struct xb_find_info *info = data;
-+	int len = strlen(info->nodename);
-+
-+	DPRINTK("%s", info->nodename);
-+
-+	/* Match the info->nodename path, or any subdirectory of that path. */
-+	if (strncmp(xendev->nodename, info->nodename, len))
-+		return 0;
-+
-+	/* If the node name is longer, ensure it really is a subdirectory. */
-+	if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
-+		return 0;
-+
-+	info->dev = xendev;
-+	get_device(dev);
-+	return 1;
-+}
-+
-+static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
-+{
-+	struct xb_find_info info = { .nodename = path };
-+
-+	do {
-+		info.dev = NULL;
-+		bus_for_each_dev(bus, NULL, &info, cleanup_dev);
-+		if (info.dev) {
-+			device_unregister(&info.dev->dev);
-+			put_device(&info.dev->dev);
-+		}
-+	} while (info.dev);
-+}
-+
-+static void xenbus_dev_free(struct xenbus_device *xendev)
-+{
-+	kfree(xendev);
-+}
-+
-+static void xenbus_dev_release(struct device *dev)
-+{
-+	if (dev) {
-+		xenbus_dev_free(to_xenbus_device(dev));
-+	}
-+}
-+
-+/* Simplified asprintf. */
-+char *kasprintf(const char *fmt, ...)
-+{
-+	va_list ap;
-+	unsigned int len;
-+	char *p, dummy[1];
-+
-+	va_start(ap, fmt);
-+	/* FIXME: vsnprintf has a bug, NULL should work */
-+	len = vsnprintf(dummy, 0, fmt, ap);
-+	va_end(ap);
-+
-+	p = kmalloc(len + 1, GFP_KERNEL);
-+	if (!p)
-+		return NULL;
-+	va_start(ap, fmt);
-+	vsprintf(p, fmt, ap);
-+	va_end(ap);
-+	return p;
-+}
-+
-+static ssize_t xendev_show_nodename(struct device *dev, char *buf)
-+{
-+	return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
-+}
-+DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
-+
-+static ssize_t xendev_show_devtype(struct device *dev, char *buf)
-+{
-+	return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
-+}
-+DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
-+
-+
-+static int xenbus_probe_node(struct xen_bus_type *bus,
-+			     const char *type,
-+			     const char *nodename)
-+{
-+	int err;
-+	struct xenbus_device *xendev;
-+	size_t stringlen;
-+	char *tmpstring;
-+
-+	XenbusState state = xenbus_read_driver_state(nodename);
-+
-+	if (state != XenbusStateInitialising) {
-+		/* Device is not new, so ignore it.  This can happen if a
-+		   device is going away after switching to Closed.  */
-+		return 0;
-+	}
-+
-+	stringlen = strlen(nodename) + 1 + strlen(type) + 1;
-+	xendev = kmalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
-+	if (!xendev)
-+		return -ENOMEM;
-+	memset(xendev, 0, sizeof(*xendev));
-+
-+	/* Copy the strings into the extra space. */
-+
-+	tmpstring = (char *)(xendev + 1);
-+	strcpy(tmpstring, nodename);
-+	xendev->nodename = tmpstring;
-+
-+	tmpstring += strlen(tmpstring) + 1;
-+	strcpy(tmpstring, type);
-+	xendev->devicetype = tmpstring;
-+
-+	xendev->dev.parent = &bus->dev;
-+	xendev->dev.bus = &bus->bus;
-+	xendev->dev.release = xenbus_dev_release;
-+
-+	err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
-+	if (err)
-+		goto fail;
-+
-+	/* Register with generic device framework. */
-+	err = device_register(&xendev->dev);
-+	if (err)
-+		goto fail;
-+
-+	device_create_file(&xendev->dev, &dev_attr_nodename);
-+	device_create_file(&xendev->dev, &dev_attr_devtype);
-+
-+	return 0;
-+fail:
-+	xenbus_dev_free(xendev);
-+	return err;
-+}
-+
-+/* device/<typename>/<name> */
-+static int xenbus_probe_frontend(const char *type, const char *name)
-+{
-+	char *nodename;
-+	int err;
-+
-+	nodename = kasprintf("%s/%s/%s", xenbus_frontend.root, type, name);
-+	if (!nodename)
-+		return -ENOMEM;
-+	
-+	DPRINTK("%s", nodename);
-+
-+	err = xenbus_probe_node(&xenbus_frontend, type, nodename);
-+	kfree(nodename);
-+	return err;
-+}
-+
-+/* backend/<typename>/<frontend-uuid>/<name> */
-+static int xenbus_probe_backend_unit(const char *dir,
-+				     const char *type,
-+				     const char *name)
-+{
-+	char *nodename;
-+	int err;
-+
-+	nodename = kasprintf("%s/%s", dir, name);
-+	if (!nodename)
-+		return -ENOMEM;
-+
-+	DPRINTK("%s\n", nodename);
-+
-+	err = xenbus_probe_node(&xenbus_backend, type, nodename);
-+	kfree(nodename);
-+	return err;
-+}
-+
-+/* backend/<typename>/<frontend-domid> */
-+static int xenbus_probe_backend(const char *type, const char *domid)
-+{
-+	char *nodename;
-+	int err = 0;
-+	char **dir;
-+	unsigned int i, dir_n = 0;
-+
-+	DPRINTK("");
-+
-+	nodename = kasprintf("%s/%s/%s", xenbus_backend.root, type, domid);
-+	if (!nodename)
-+		return -ENOMEM;
-+
-+	dir = xenbus_directory(XBT_NULL, nodename, "", &dir_n);
-+	if (IS_ERR(dir)) {
-+		kfree(nodename);
-+		return PTR_ERR(dir);
-+	}
-+
-+	for (i = 0; i < dir_n; i++) {
-+		err = xenbus_probe_backend_unit(nodename, type, dir[i]);
-+		if (err)
-+			break;
-+	}
-+	kfree(dir);
-+	kfree(nodename);
-+	return err;
-+}
-+
-+static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
-+{
-+	int err = 0;
-+	char **dir;
-+	unsigned int dir_n = 0;
-+	int i;
-+
-+	dir = xenbus_directory(XBT_NULL, bus->root, type, &dir_n);
-+	if (IS_ERR(dir))
-+		return PTR_ERR(dir);
-+
-+	for (i = 0; i < dir_n; i++) {
-+		err = bus->probe(type, dir[i]);
-+		if (err)
-+			break;
-+	}
-+	kfree(dir);
-+	return err;
-+}
-+
-+static int xenbus_probe_devices(struct xen_bus_type *bus)
-+{
-+	int err = 0;
-+	char **dir;
-+	unsigned int i, dir_n;
-+
-+	dir = xenbus_directory(XBT_NULL, bus->root, "", &dir_n);
-+	if (IS_ERR(dir))
-+		return PTR_ERR(dir);
-+
-+	for (i = 0; i < dir_n; i++) {
-+		err = xenbus_probe_device_type(bus, dir[i]);
-+		if (err)
-+			break;
-+	}
-+	kfree(dir);
-+	return err;
-+}
-+
-+static unsigned int char_count(const char *str, char c)
-+{
-+	unsigned int i, ret = 0;
-+
-+	for (i = 0; str[i]; i++)
-+		if (str[i] == c)
-+			ret++;
-+	return ret;
-+}
-+
-+static int strsep_len(const char *str, char c, unsigned int len)
-+{
-+	unsigned int i;
-+
-+	for (i = 0; str[i]; i++)
-+		if (str[i] == c) {
-+			if (len == 0)
-+				return i;
-+			len--;
-+		}
-+	return (len == 0) ? i : -ERANGE;
-+}
-+
-+static void dev_changed(const char *node, struct xen_bus_type *bus)
-+{
-+	int exists, rootlen;
-+	struct xenbus_device *dev;
-+	char type[BUS_ID_SIZE];
-+	const char *p, *root;
-+
-+	if (char_count(node, '/') < 2)
-+ 		return;
-+
-+	exists = xenbus_exists(XBT_NULL, node, "");
-+	if (!exists) {
-+		xenbus_cleanup_devices(node, &bus->bus);
-+		return;
-+	}
-+
-+	/* backend/<type>/... or device/<type>/... */
-+	p = strchr(node, '/') + 1;
-+	snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
-+	type[BUS_ID_SIZE-1] = '\0';
-+
-+	rootlen = strsep_len(node, '/', bus->levels);
-+	if (rootlen < 0)
-+		return;
-+	root = kasprintf("%.*s", rootlen, node);
-+	if (!root)
-+		return;
-+
-+	dev = xenbus_device_find(root, &bus->bus);
-+	if (!dev)
-+		xenbus_probe_node(bus, type, root);
-+	else
-+		put_device(&dev->dev);
-+
-+	kfree(root);
-+}
-+
-+static void frontend_changed(struct xenbus_watch *watch,
-+			     const char **vec, unsigned int len)
-+{
-+	DPRINTK("");
-+
-+	dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
-+}
-+
-+static void backend_changed(struct xenbus_watch *watch,
-+			    const char **vec, unsigned int len)
-+{
-+	DPRINTK("");
-+
-+	dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
-+}
-+
-+/* We watch for devices appearing and vanishing. */
-+static struct xenbus_watch fe_watch = {
-+	.node = "device",
-+	.callback = frontend_changed,
-+};
-+
-+static struct xenbus_watch be_watch = {
-+	.node = "backend",
-+	.callback = backend_changed,
-+};
-+
-+static int suspend_dev(struct device *dev, void *data)
-+{
-+	int err = 0;
-+	struct xenbus_driver *drv;
-+	struct xenbus_device *xdev;
-+
-+	DPRINTK("");
-+
-+	if (dev->driver == NULL)
-+		return 0;
-+	drv = to_xenbus_driver(dev->driver);
-+	xdev = container_of(dev, struct xenbus_device, dev);
-+	if (drv->suspend)
-+		err = drv->suspend(xdev);
-+	if (err)
-+		printk(KERN_WARNING
-+		       "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
-+	return 0;
-+}
-+
-+static int resume_dev(struct device *dev, void *data)
-+{
-+	int err;
-+	struct xenbus_driver *drv;
-+	struct xenbus_device *xdev;
-+
-+	DPRINTK("");
-+
-+	if (dev->driver == NULL)
-+		return 0;
-+	drv = to_xenbus_driver(dev->driver);
-+	xdev = container_of(dev, struct xenbus_device, dev);
-+
-+	err = talk_to_otherend(xdev);
-+	if (err) {
-+		printk(KERN_WARNING
-+		       "xenbus: resume (talk_to_otherend) %s failed: %i\n",
-+		       dev->bus_id, err);
-+		return err;
-+	}
-+
-+	err = watch_otherend(xdev);
-+	if (err) {
-+		printk(KERN_WARNING
-+		       "xenbus_probe: resume (watch_otherend) %s failed: "
-+		       "%d.\n", dev->bus_id, err);
-+		return err;
-+	}
-+
-+	if (drv->resume)
-+		err = drv->resume(xdev);
-+	if (err)
-+		printk(KERN_WARNING
-+		       "xenbus: resume %s failed: %i\n", dev->bus_id, err);
-+	return err;
-+}
-+
-+void xenbus_suspend(void)
-+{
-+	DPRINTK("");
-+
-+	bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
-+	bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, suspend_dev);
-+	xs_suspend();
-+}
-+EXPORT_SYMBOL(xenbus_suspend);
-+
-+void xenbus_resume(void)
-+{
-+	xb_init_comms();
-+	xs_resume();
-+	bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
-+	bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, resume_dev);
-+}
-+EXPORT_SYMBOL(xenbus_resume);
-+
-+
-+/* A flag to determine if xenstored is 'ready' (i.e. has started) */
-+int xenstored_ready = 0; 
-+
-+
-+int register_xenstore_notifier(struct notifier_block *nb)
-+{
-+	int ret = 0;
-+
-+	if (xenstored_ready > 0) 
-+		ret = nb->notifier_call(nb, 0, NULL);
-+	else 
-+		notifier_chain_register(&xenstore_chain, nb);
-+
-+	return ret;
-+}
-+EXPORT_SYMBOL(register_xenstore_notifier);
-+
-+void unregister_xenstore_notifier(struct notifier_block *nb)
-+{
-+	notifier_chain_unregister(&xenstore_chain, nb);
-+}
-+EXPORT_SYMBOL(unregister_xenstore_notifier);
-+
-+
-+
-+void xenbus_probe(void *unused)
-+{
-+	BUG_ON((xenstored_ready <= 0)); 
-+
-+	/* Enumerate devices in xenstore. */
-+	xenbus_probe_devices(&xenbus_frontend);
-+	xenbus_probe_devices(&xenbus_backend);
-+
-+	/* Watch for changes. */
-+	register_xenbus_watch(&fe_watch);
-+	register_xenbus_watch(&be_watch);
-+
-+	/* Notify others that xenstore is up */
-+	notifier_call_chain(&xenstore_chain, 0, NULL);
-+}
-+
-+
-+static struct proc_dir_entry *xsd_mfn_intf;
-+static struct proc_dir_entry *xsd_port_intf;
-+
-+
-+static int xsd_mfn_read(char *page, char **start, off_t off,
-+                        int count, int *eof, void *data)
-+{
-+	int len; 
-+	len  = sprintf(page, "%ld", xen_start_info->store_mfn); 
-+	*eof = 1; 
-+	return len; 
-+}
-+
-+static int xsd_port_read(char *page, char **start, off_t off,
-+			 int count, int *eof, void *data)
-+{
-+	int len; 
-+
-+	len  = sprintf(page, "%d", xen_start_info->store_evtchn); 
-+	*eof = 1; 
-+	return len; 
-+}
-+
-+
-+static int __init xenbus_probe_init(void)
-+{
-+	int err = 0, dom0;
-+
-+	DPRINTK("");
-+
-+	if (xen_init() < 0) {
-+		DPRINTK("failed");
-+		return -ENODEV;
-+	}
-+
-+	/* Register ourselves with the kernel bus & device subsystems */
-+	bus_register(&xenbus_frontend.bus);
-+	bus_register(&xenbus_backend.bus);
-+	device_register(&xenbus_frontend.dev);
-+	device_register(&xenbus_backend.dev);
-+
-+	/*
-+	** Domain0 doesn't have a store_evtchn or store_mfn yet.
-+	*/
-+	dom0 = (xen_start_info->store_evtchn == 0);
-+
-+	if (dom0) {
-+
-+		unsigned long page;
-+		evtchn_op_t op = { 0 };
-+		int ret;
-+
-+
-+		/* Allocate page. */
-+		page = get_zeroed_page(GFP_KERNEL);
-+		if (!page) 
-+			return -ENOMEM; 
-+
-+		/* We don't refcnt properly, so set reserved on page.
-+		 * (this allocation is permanent) */
-+		SetPageReserved(virt_to_page(page));
-+
-+		xen_start_info->store_mfn =
-+			pfn_to_mfn(virt_to_phys((void *)page) >>
-+				   PAGE_SHIFT);
-+		
-+		/* Next allocate a local port which xenstored can bind to */
-+		op.cmd = EVTCHNOP_alloc_unbound;
-+		op.u.alloc_unbound.dom        = DOMID_SELF;
-+		op.u.alloc_unbound.remote_dom = 0; 
-+
-+		ret = HYPERVISOR_event_channel_op(&op);
-+		BUG_ON(ret); 
-+		xen_start_info->store_evtchn = op.u.alloc_unbound.port;
-+
-+		/* And finally publish the above info in /proc/xen */
-+		if((xsd_mfn_intf = create_xen_proc_entry("xsd_mfn", 0400)))
-+			xsd_mfn_intf->read_proc = xsd_mfn_read; 
-+		if((xsd_port_intf = create_xen_proc_entry("xsd_port", 0400)))
-+			xsd_port_intf->read_proc = xsd_port_read;
-+	}
-+
-+	/* Initialize the interface to xenstore. */
-+	err = xs_init(); 
-+	if (err) {
-+		printk(KERN_WARNING
-+		       "XENBUS: Error initializing xenstore comms: %i\n", err);
-+		return err; 
-+	}
-+
-+	if (!dom0) {
-+		xenstored_ready = 1;
-+		xenbus_probe(NULL);
-+	}
-+
-+	return 0;
-+}
-+
-+postcore_initcall(xenbus_probe_init);
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_xs.c linux-2.6.12-xen/drivers/xen/xenbus/xenbus_xs.c
---- pristine-linux-2.6.12/drivers/xen/xenbus/xenbus_xs.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/drivers/xen/xenbus/xenbus_xs.c	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,821 @@
-+/******************************************************************************
-+ * xenbus_xs.c
-+ *
-+ * This is the kernel equivalent of the "xs" library.  We don't need everything
-+ * and we use xenbus_comms for communication.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#include <linux/unistd.h>
-+#include <linux/errno.h>
-+#include <linux/types.h>
-+#include <linux/uio.h>
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/err.h>
-+#include <linux/slab.h>
-+#include <linux/fcntl.h>
-+#include <linux/kthread.h>
-+#include <asm-xen/xenbus.h>
-+#include "xenbus_comms.h"
-+
-+/* xenbus_probe.c */
-+extern char *kasprintf(const char *fmt, ...);
-+
-+#define streq(a, b) (strcmp((a), (b)) == 0)
-+
-+struct xs_stored_msg {
-+	struct list_head list;
-+
-+	struct xsd_sockmsg hdr;
-+
-+	union {
-+		/* Queued replies. */
-+		struct {
-+			char *body;
-+		} reply;
-+
-+		/* Queued watch events. */
-+		struct {
-+			struct xenbus_watch *handle;
-+			char **vec;
-+			unsigned int vec_size;
-+		} watch;
-+	} u;
-+};
-+
-+struct xs_handle {
-+	/* A list of replies. Currently only one will ever be outstanding. */
-+	struct list_head reply_list;
-+	spinlock_t reply_lock;
-+	wait_queue_head_t reply_waitq;
-+
-+	/* One request at a time. */
-+	struct semaphore request_mutex;
-+
-+	/* Protect transactions against save/restore. */
-+	struct rw_semaphore suspend_mutex;
-+};
-+
-+static struct xs_handle xs_state;
-+
-+/* List of registered watches, and a lock to protect it. */
-+static LIST_HEAD(watches);
-+static DEFINE_SPINLOCK(watches_lock);
-+
-+/* List of pending watch callback events, and a lock to protect it. */
-+static LIST_HEAD(watch_events);
-+static DEFINE_SPINLOCK(watch_events_lock);
-+
-+/*
-+ * Details of the xenwatch callback kernel thread. The thread waits on the
-+ * watch_events_waitq for work to do (queued on watch_events list). When it
-+ * wakes up it acquires the xenwatch_mutex before reading the list and
-+ * carrying out work.
-+ */
-+static pid_t xenwatch_pid;
-+/* static */ DECLARE_MUTEX(xenwatch_mutex);
-+static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
-+
-+static int get_error(const char *errorstring)
-+{
-+	unsigned int i;
-+
-+	for (i = 0; !streq(errorstring, xsd_errors[i].errstring); i++) {
-+		if (i == ARRAY_SIZE(xsd_errors) - 1) {
-+			printk(KERN_WARNING
-+			       "XENBUS xen store gave: unknown error %s",
-+			       errorstring);
-+			return EINVAL;
-+		}
-+	}
-+	return xsd_errors[i].errnum;
-+}
-+
-+static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
-+{
-+	struct xs_stored_msg *msg;
-+	char *body;
-+
-+	spin_lock(&xs_state.reply_lock);
-+
-+	while (list_empty(&xs_state.reply_list)) {
-+		spin_unlock(&xs_state.reply_lock);
-+		wait_event_interruptible(xs_state.reply_waitq,
-+					 !list_empty(&xs_state.reply_list));
-+		spin_lock(&xs_state.reply_lock);
-+	}
-+
-+	msg = list_entry(xs_state.reply_list.next,
-+			 struct xs_stored_msg, list);
-+	list_del(&msg->list);
-+
-+	spin_unlock(&xs_state.reply_lock);
-+
-+	*type = msg->hdr.type;
-+	if (len)
-+		*len = msg->hdr.len;
-+	body = msg->u.reply.body;
-+
-+	kfree(msg);
-+
-+	return body;
-+}
-+
-+/* Emergency write. */
-+void xenbus_debug_write(const char *str, unsigned int count)
-+{
-+	struct xsd_sockmsg msg = { 0 };
-+
-+	msg.type = XS_DEBUG;
-+	msg.len = sizeof("print") + count + 1;
-+
-+	down(&xs_state.request_mutex);
-+	xb_write(&msg, sizeof(msg));
-+	xb_write("print", sizeof("print"));
-+	xb_write(str, count);
-+	xb_write("", 1);
-+	up(&xs_state.request_mutex);
-+}
-+
-+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
-+{
-+	void *ret;
-+	struct xsd_sockmsg req_msg = *msg;
-+	int err;
-+
-+	if (req_msg.type == XS_TRANSACTION_START)
-+		down_read(&xs_state.suspend_mutex);
-+
-+	down(&xs_state.request_mutex);
-+
-+	err = xb_write(msg, sizeof(*msg) + msg->len);
-+	if (err) {
-+		msg->type = XS_ERROR;
-+		ret = ERR_PTR(err);
-+	} else {
-+		ret = read_reply(&msg->type, &msg->len);
-+	}
-+
-+	up(&xs_state.request_mutex);
-+
-+	if ((msg->type == XS_TRANSACTION_END) ||
-+	    ((req_msg.type == XS_TRANSACTION_START) &&
-+	     (msg->type == XS_ERROR)))
-+		up_read(&xs_state.suspend_mutex);
-+
-+	return ret;
-+}
-+
-+/* Send message to xs, get kmalloc'ed reply.  ERR_PTR() on error. */
-+static void *xs_talkv(xenbus_transaction_t t,
-+		      enum xsd_sockmsg_type type,
-+		      const struct kvec *iovec,
-+		      unsigned int num_vecs,
-+		      unsigned int *len)
-+{
-+	struct xsd_sockmsg msg;
-+	void *ret = NULL;
-+	unsigned int i;
-+	int err;
-+
-+	msg.tx_id = t;
-+	msg.req_id = 0;
-+	msg.type = type;
-+	msg.len = 0;
-+	for (i = 0; i < num_vecs; i++)
-+		msg.len += iovec[i].iov_len;
-+
-+	down(&xs_state.request_mutex);
-+
-+	err = xb_write(&msg, sizeof(msg));
-+	if (err) {
-+		up(&xs_state.request_mutex);
-+		return ERR_PTR(err);
-+	}
-+
-+	for (i = 0; i < num_vecs; i++) {
-+		err = xb_write(iovec[i].iov_base, iovec[i].iov_len);;
-+		if (err) {
-+			up(&xs_state.request_mutex);
-+			return ERR_PTR(err);
-+		}
-+	}
-+
-+	ret = read_reply(&msg.type, len);
-+
-+	up(&xs_state.request_mutex);
-+
-+	if (IS_ERR(ret))
-+		return ret;
-+
-+	if (msg.type == XS_ERROR) {
-+		err = get_error(ret);
-+		kfree(ret);
-+		return ERR_PTR(-err);
-+	}
-+
-+	BUG_ON(msg.type != type);
-+	return ret;
-+}
-+
-+/* Simplified version of xs_talkv: single message. */
-+static void *xs_single(xenbus_transaction_t t,
-+		       enum xsd_sockmsg_type type,
-+		       const char *string,
-+		       unsigned int *len)
-+{
-+	struct kvec iovec;
-+
-+	iovec.iov_base = (void *)string;
-+	iovec.iov_len = strlen(string) + 1;
-+	return xs_talkv(t, type, &iovec, 1, len);
-+}
-+
-+/* Many commands only need an ack, don't care what it says. */
-+static int xs_error(char *reply)
-+{
-+	if (IS_ERR(reply))
-+		return PTR_ERR(reply);
-+	kfree(reply);
-+	return 0;
-+}
-+
-+static unsigned int count_strings(const char *strings, unsigned int len)
-+{
-+	unsigned int num;
-+	const char *p;
-+
-+	for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
-+		num++;
-+
-+	return num;
-+}
-+
-+/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ 
-+static char *join(const char *dir, const char *name)
-+{
-+	char *buffer;
-+
-+	if (strlen(name) == 0)
-+		buffer = kasprintf("%s", dir);
-+	else
-+		buffer = kasprintf("%s/%s", dir, name);
-+	return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
-+}
-+
-+static char **split(char *strings, unsigned int len, unsigned int *num)
-+{
-+	char *p, **ret;
-+
-+	/* Count the strings. */
-+	*num = count_strings(strings, len);
-+
-+	/* Transfer to one big alloc for easy freeing. */
-+	ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
-+	if (!ret) {
-+		kfree(strings);
-+		return ERR_PTR(-ENOMEM);
-+	}
-+	memcpy(&ret[*num], strings, len);
-+	kfree(strings);
-+
-+	strings = (char *)&ret[*num];
-+	for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
-+		ret[(*num)++] = p;
-+
-+	return ret;
-+}
-+
-+char **xenbus_directory(xenbus_transaction_t t,
-+			const char *dir, const char *node, unsigned int *num)
-+{
-+	char *strings, *path;
-+	unsigned int len;
-+
-+	path = join(dir, node);
-+	if (IS_ERR(path))
-+		return (char **)path;
-+
-+	strings = xs_single(t, XS_DIRECTORY, path, &len);
-+	kfree(path);
-+	if (IS_ERR(strings))
-+		return (char **)strings;
-+
-+	return split(strings, len, num);
-+}
-+EXPORT_SYMBOL(xenbus_directory);
-+
-+/* Check if a path exists. Return 1 if it does. */
-+int xenbus_exists(xenbus_transaction_t t,
-+		  const char *dir, const char *node)
-+{
-+	char **d;
-+	int dir_n;
-+
-+	d = xenbus_directory(t, dir, node, &dir_n);
-+	if (IS_ERR(d))
-+		return 0;
-+	kfree(d);
-+	return 1;
-+}
-+EXPORT_SYMBOL(xenbus_exists);
-+
-+/* Get the value of a single file.
-+ * Returns a kmalloced value: call free() on it after use.
-+ * len indicates length in bytes.
-+ */
-+void *xenbus_read(xenbus_transaction_t t,
-+		  const char *dir, const char *node, unsigned int *len)
-+{
-+	char *path;
-+	void *ret;
-+
-+	path = join(dir, node);
-+	if (IS_ERR(path))
-+		return (void *)path;
-+
-+	ret = xs_single(t, XS_READ, path, len);
-+	kfree(path);
-+	return ret;
-+}
-+EXPORT_SYMBOL(xenbus_read);
-+
-+/* Write the value of a single file.
-+ * Returns -err on failure.
-+ */
-+int xenbus_write(xenbus_transaction_t t,
-+		 const char *dir, const char *node, const char *string)
-+{
-+	const char *path;
-+	struct kvec iovec[2];
-+	int ret;
-+
-+	path = join(dir, node);
-+	if (IS_ERR(path))
-+		return PTR_ERR(path);
-+
-+	iovec[0].iov_base = (void *)path;
-+	iovec[0].iov_len = strlen(path) + 1;
-+	iovec[1].iov_base = (void *)string;
-+	iovec[1].iov_len = strlen(string);
-+
-+	ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
-+	kfree(path);
-+	return ret;
-+}
-+EXPORT_SYMBOL(xenbus_write);
-+
-+/* Create a new directory. */
-+int xenbus_mkdir(xenbus_transaction_t t,
-+		 const char *dir, const char *node)
-+{
-+	char *path;
-+	int ret;
-+
-+	path = join(dir, node);
-+	if (IS_ERR(path))
-+		return PTR_ERR(path);
-+
-+	ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
-+	kfree(path);
-+	return ret;
-+}
-+EXPORT_SYMBOL(xenbus_mkdir);
-+
-+/* Destroy a file or directory (directories must be empty). */
-+int xenbus_rm(xenbus_transaction_t t, const char *dir, const char *node)
-+{
-+	char *path;
-+	int ret;
-+
-+	path = join(dir, node);
-+	if (IS_ERR(path))
-+		return PTR_ERR(path);
-+
-+	ret = xs_error(xs_single(t, XS_RM, path, NULL));
-+	kfree(path);
-+	return ret;
-+}
-+EXPORT_SYMBOL(xenbus_rm);
-+
-+/* Start a transaction: changes by others will not be seen during this
-+ * transaction, and changes will not be visible to others until end.
-+ */
-+int xenbus_transaction_start(xenbus_transaction_t *t)
-+{
-+	char *id_str;
-+
-+	down_read(&xs_state.suspend_mutex);
-+
-+	id_str = xs_single(XBT_NULL, XS_TRANSACTION_START, "", NULL);
-+	if (IS_ERR(id_str)) {
-+		up_read(&xs_state.suspend_mutex);
-+		return PTR_ERR(id_str);
-+	}
-+
-+	*t = simple_strtoul(id_str, NULL, 0);
-+	kfree(id_str);
-+	return 0;
-+}
-+EXPORT_SYMBOL(xenbus_transaction_start);
-+
-+/* End a transaction.
-+ * If abandon is true, transaction is discarded instead of committed.
-+ */
-+int xenbus_transaction_end(xenbus_transaction_t t, int abort)
-+{
-+	char abortstr[2];
-+	int err;
-+
-+	if (abort)
-+		strcpy(abortstr, "F");
-+	else
-+		strcpy(abortstr, "T");
-+
-+	err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
-+
-+	up_read(&xs_state.suspend_mutex);
-+
-+	return err;
-+}
-+EXPORT_SYMBOL(xenbus_transaction_end);
-+
-+/* Single read and scanf: returns -errno or num scanned. */
-+int xenbus_scanf(xenbus_transaction_t t,
-+		 const char *dir, const char *node, const char *fmt, ...)
-+{
-+	va_list ap;
-+	int ret;
-+	char *val;
-+
-+	val = xenbus_read(t, dir, node, NULL);
-+	if (IS_ERR(val))
-+		return PTR_ERR(val);
-+
-+	va_start(ap, fmt);
-+	ret = vsscanf(val, fmt, ap);
-+	va_end(ap);
-+	kfree(val);
-+	/* Distinctive errno. */
-+	if (ret == 0)
-+		return -ERANGE;
-+	return ret;
-+}
-+EXPORT_SYMBOL(xenbus_scanf);
-+
-+/* Single printf and write: returns -errno or 0. */
-+int xenbus_printf(xenbus_transaction_t t,
-+		  const char *dir, const char *node, const char *fmt, ...)
-+{
-+	va_list ap;
-+	int ret;
-+#define PRINTF_BUFFER_SIZE 4096
-+	char *printf_buffer;
-+
-+	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
-+	if (printf_buffer == NULL)
-+		return -ENOMEM;
-+
-+	va_start(ap, fmt);
-+	ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
-+	va_end(ap);
-+
-+	BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
-+	ret = xenbus_write(t, dir, node, printf_buffer);
-+
-+	kfree(printf_buffer);
-+
-+	return ret;
-+}
-+EXPORT_SYMBOL(xenbus_printf);
-+
-+/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
-+int xenbus_gather(xenbus_transaction_t t, const char *dir, ...)
-+{
-+	va_list ap;
-+	const char *name;
-+	int ret = 0;
-+
-+	va_start(ap, dir);
-+	while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
-+		const char *fmt = va_arg(ap, char *);
-+		void *result = va_arg(ap, void *);
-+		char *p;
-+
-+		p = xenbus_read(t, dir, name, NULL);
-+		if (IS_ERR(p)) {
-+			ret = PTR_ERR(p);
-+			break;
-+		}
-+		if (fmt) {
-+			if (sscanf(p, fmt, result) == 0)
-+				ret = -EINVAL;
-+			kfree(p);
-+		} else
-+			*(char **)result = p;
-+	}
-+	va_end(ap);
-+	return ret;
-+}
-+EXPORT_SYMBOL(xenbus_gather);
-+
-+static int xs_watch(const char *path, const char *token)
-+{
-+	struct kvec iov[2];
-+
-+	iov[0].iov_base = (void *)path;
-+	iov[0].iov_len = strlen(path) + 1;
-+	iov[1].iov_base = (void *)token;
-+	iov[1].iov_len = strlen(token) + 1;
-+
-+	return xs_error(xs_talkv(XBT_NULL, XS_WATCH, iov,
-+				 ARRAY_SIZE(iov), NULL));
-+}
-+
-+static int xs_unwatch(const char *path, const char *token)
-+{
-+	struct kvec iov[2];
-+
-+	iov[0].iov_base = (char *)path;
-+	iov[0].iov_len = strlen(path) + 1;
-+	iov[1].iov_base = (char *)token;
-+	iov[1].iov_len = strlen(token) + 1;
-+
-+	return xs_error(xs_talkv(XBT_NULL, XS_UNWATCH, iov,
-+				 ARRAY_SIZE(iov), NULL));
-+}
-+
-+static struct xenbus_watch *find_watch(const char *token)
-+{
-+	struct xenbus_watch *i, *cmp;
-+
-+	cmp = (void *)simple_strtoul(token, NULL, 16);
-+
-+	list_for_each_entry(i, &watches, list)
-+		if (i == cmp)
-+			return i;
-+
-+	return NULL;
-+}
-+
-+/* Register callback to watch this node. */
-+int register_xenbus_watch(struct xenbus_watch *watch)
-+{
-+	/* Pointer in ascii is the token. */
-+	char token[sizeof(watch) * 2 + 1];
-+	int err;
-+
-+	sprintf(token, "%lX", (long)watch);
-+
-+	down_read(&xs_state.suspend_mutex);
-+
-+	spin_lock(&watches_lock);
-+	BUG_ON(find_watch(token));
-+	list_add(&watch->list, &watches);
-+	spin_unlock(&watches_lock);
-+
-+	err = xs_watch(watch->node, token);
-+
-+	/* Ignore errors due to multiple registration. */
-+	if ((err != 0) && (err != -EEXIST)) {
-+		spin_lock(&watches_lock);
-+		list_del(&watch->list);
-+		spin_unlock(&watches_lock);
-+	}
-+
-+	up_read(&xs_state.suspend_mutex);
-+
-+	return err;
-+}
-+EXPORT_SYMBOL(register_xenbus_watch);
-+
-+void unregister_xenbus_watch(struct xenbus_watch *watch)
-+{
-+	struct xs_stored_msg *msg, *tmp;
-+	char token[sizeof(watch) * 2 + 1];
-+	int err;
-+
-+	sprintf(token, "%lX", (long)watch);
-+
-+	down_read(&xs_state.suspend_mutex);
-+
-+	spin_lock(&watches_lock);
-+	BUG_ON(!find_watch(token));
-+	list_del(&watch->list);
-+	spin_unlock(&watches_lock);
-+
-+	err = xs_unwatch(watch->node, token);
-+	if (err)
-+		printk(KERN_WARNING
-+		       "XENBUS Failed to release watch %s: %i\n",
-+		       watch->node, err);
-+
-+	up_read(&xs_state.suspend_mutex);
-+
-+	/* Cancel pending watch events. */
-+	spin_lock(&watch_events_lock);
-+	list_for_each_entry_safe(msg, tmp, &watch_events, list) {
-+		if (msg->u.watch.handle != watch)
-+			continue;
-+		list_del(&msg->list);
-+		kfree(msg->u.watch.vec);
-+		kfree(msg);
-+	}
-+	spin_unlock(&watch_events_lock);
-+
-+	/* Flush any currently-executing callback, unless we are it. :-) */
-+	if (current->pid != xenwatch_pid) {
-+		down(&xenwatch_mutex);
-+		up(&xenwatch_mutex);
-+	}
-+}
-+EXPORT_SYMBOL(unregister_xenbus_watch);
-+
-+void xs_suspend(void)
-+{
-+	down_write(&xs_state.suspend_mutex);
-+	down(&xs_state.request_mutex);
-+}
-+
-+void xs_resume(void)
-+{
-+	struct xenbus_watch *watch;
-+	char token[sizeof(watch) * 2 + 1];
-+
-+	up(&xs_state.request_mutex);
-+
-+	/* No need for watches_lock: the suspend_mutex is sufficient. */
-+	list_for_each_entry(watch, &watches, list) {
-+		sprintf(token, "%lX", (long)watch);
-+		xs_watch(watch->node, token);
-+	}
-+
-+	up_write(&xs_state.suspend_mutex);
-+}
-+
-+static int xenwatch_thread(void *unused)
-+{
-+	struct list_head *ent;
-+	struct xs_stored_msg *msg;
-+
-+	for (;;) {
-+		wait_event_interruptible(watch_events_waitq,
-+					 !list_empty(&watch_events));
-+
-+		down(&xenwatch_mutex);
-+
-+		spin_lock(&watch_events_lock);
-+		ent = watch_events.next;
-+		if (ent != &watch_events)
-+			list_del(ent);
-+		spin_unlock(&watch_events_lock);
-+
-+		if (ent != &watch_events) {
-+			msg = list_entry(ent, struct xs_stored_msg, list);
-+			msg->u.watch.handle->callback(
-+				msg->u.watch.handle,
-+				(const char **)msg->u.watch.vec,
-+				msg->u.watch.vec_size);
-+			kfree(msg->u.watch.vec);
-+			kfree(msg);
-+		}
-+
-+		up(&xenwatch_mutex);
-+	}
-+}
-+
-+static int process_msg(void)
-+{
-+	struct xs_stored_msg *msg;
-+	char *body;
-+	int err;
-+
-+	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
-+	if (msg == NULL)
-+		return -ENOMEM;
-+
-+	err = xb_read(&msg->hdr, sizeof(msg->hdr));
-+	if (err) {
-+		kfree(msg);
-+		return err;
-+	}
-+
-+	body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
-+	if (body == NULL) {
-+		kfree(msg);
-+		return -ENOMEM;
-+	}
-+
-+	err = xb_read(body, msg->hdr.len);
-+	if (err) {
-+		kfree(body);
-+		kfree(msg);
-+		return err;
-+	}
-+	body[msg->hdr.len] = '\0';
-+
-+	if (msg->hdr.type == XS_WATCH_EVENT) {
-+		msg->u.watch.vec = split(body, msg->hdr.len,
-+					 &msg->u.watch.vec_size);
-+		if (IS_ERR(msg->u.watch.vec)) {
-+			kfree(msg);
-+			return PTR_ERR(msg->u.watch.vec);
-+		}
-+
-+		spin_lock(&watches_lock);
-+		msg->u.watch.handle = find_watch(
-+			msg->u.watch.vec[XS_WATCH_TOKEN]);
-+		if (msg->u.watch.handle != NULL) {
-+			spin_lock(&watch_events_lock);
-+			list_add_tail(&msg->list, &watch_events);
-+			wake_up(&watch_events_waitq);
-+			spin_unlock(&watch_events_lock);
-+		} else {
-+			kfree(msg->u.watch.vec);
-+			kfree(msg);
-+		}
-+		spin_unlock(&watches_lock);
-+	} else {
-+		msg->u.reply.body = body;
-+		spin_lock(&xs_state.reply_lock);
-+		list_add_tail(&msg->list, &xs_state.reply_list);
-+		spin_unlock(&xs_state.reply_lock);
-+		wake_up(&xs_state.reply_waitq);
-+	}
-+
-+	return 0;
-+}
-+
-+static int xenbus_thread(void *unused)
-+{
-+	int err;
-+
-+	for (;;) {
-+		err = process_msg();
-+		if (err)
-+			printk(KERN_WARNING "XENBUS error %d while reading "
-+			       "message\n", err);
-+	}
-+}
-+
-+int xs_init(void)
-+{
-+	int err;
-+	struct task_struct *task;
-+
-+	INIT_LIST_HEAD(&xs_state.reply_list);
-+	spin_lock_init(&xs_state.reply_lock);
-+	init_waitqueue_head(&xs_state.reply_waitq);
-+
-+	init_MUTEX(&xs_state.request_mutex);
-+	init_rwsem(&xs_state.suspend_mutex);
-+
-+	/* Initialize the shared memory rings to talk to xenstored */
-+	err = xb_init_comms();
-+	if (err)
-+		return err;
-+
-+	task = kthread_run(xenwatch_thread, NULL, "xenwatch");
-+	if (IS_ERR(task))
-+		return PTR_ERR(task);
-+	xenwatch_pid = task->pid;
-+
-+	task = kthread_run(xenbus_thread, NULL, "xenbus");
-+	if (IS_ERR(task))
-+		return PTR_ERR(task);
-+
-+	return 0;
-+}
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/fs/bio.c linux-2.6.12-xen/fs/bio.c
---- pristine-linux-2.6.12/fs/bio.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/fs/bio.c	2006-02-25 00:12:33.378053625 +0100
-@@ -261,6 +261,7 @@ inline void __bio_clone(struct bio *bio,
- 	 */
- 	bio->bi_vcnt = bio_src->bi_vcnt;
- 	bio->bi_size = bio_src->bi_size;
-+	bio->bi_idx = bio_src->bi_idx;
- 	bio_phys_segments(q, bio);
- 	bio_hw_segments(q, bio);
- }
-diff -Nurp pristine-linux-2.6.12/fs/char_dev.c linux-2.6.12-xen/fs/char_dev.c
---- pristine-linux-2.6.12/fs/char_dev.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/fs/char_dev.c	2006-02-25 00:12:33.764995302 +0100
-@@ -139,7 +139,7 @@ __unregister_chrdev_region(unsigned majo
- 	struct char_device_struct *cd = NULL, **cp;
- 	int i = major_to_index(major);
- 
--	up(&chrdevs_lock);
-+	down(&chrdevs_lock);
- 	for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
- 		if ((*cp)->major == major &&
- 		    (*cp)->baseminor == baseminor &&
-diff -Nurp pristine-linux-2.6.12/fs/exec.c linux-2.6.12-xen/fs/exec.c
---- pristine-linux-2.6.12/fs/exec.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/fs/exec.c	2006-02-25 00:12:33.765995151 +0100
-@@ -649,6 +649,7 @@ static inline int de_thread(struct task_
- 	}
- 	sig->group_exit_task = NULL;
- 	sig->notify_count = 0;
-+	sig->real_timer.data = (unsigned long)current;
- 	spin_unlock_irq(lock);
- 
- 	/*
-diff -Nurp pristine-linux-2.6.12/fs/isofs/compress.c linux-2.6.12-xen/fs/isofs/compress.c
---- pristine-linux-2.6.12/fs/isofs/compress.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/fs/isofs/compress.c	2006-02-25 00:12:33.765995151 +0100
-@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *
- 	cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
- 	brelse(bh);
- 
-+	if (cstart > cend)
-+		goto eio;
-+		
- 	csize = cend-cstart;
- 
-+	if (csize > deflateBound(1UL << zisofs_block_shift))
-+		goto eio;
-+
- 	/* Now page[] contains an array of pages, any of which can be NULL,
- 	   and the locks on which we hold.  We should now read the data and
- 	   release the pages.  If the pages are NULL the decompressed data
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/atomic.h linux-2.6.12-xen/include/asm-i386/atomic.h
---- pristine-linux-2.6.12/include/asm-i386/atomic.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/atomic.h	2006-02-25 00:12:33.832985054 +0100
-@@ -4,18 +4,13 @@
- #include <linux/config.h>
- #include <linux/compiler.h>
- #include <asm/processor.h>
-+#include <asm/smp_alt.h>
- 
- /*
-  * Atomic operations that C can't guarantee us.  Useful for
-  * resource counting etc..
-  */
- 
--#ifdef CONFIG_SMP
--#define LOCK "lock ; "
--#else
--#define LOCK ""
--#endif
--
- /*
-  * Make sure gcc doesn't try to be clever and move things around
-  * on us. We need to use _exactly_ the address the user gave us,
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/bitops.h linux-2.6.12-xen/include/asm-i386/bitops.h
---- pristine-linux-2.6.12/include/asm-i386/bitops.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/bitops.h	2006-02-25 00:12:33.832985054 +0100
-@@ -7,6 +7,7 @@
- 
- #include <linux/config.h>
- #include <linux/compiler.h>
-+#include <asm/smp_alt.h>
- 
- /*
-  * These have to be done with inline assembly: that way the bit-setting
-@@ -16,12 +17,6 @@
-  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
-  */
- 
--#ifdef CONFIG_SMP
--#define LOCK_PREFIX "lock ; "
--#else
--#define LOCK_PREFIX ""
--#endif
--
- #define ADDR (*(volatile long *) addr)
- 
- /**
-@@ -41,7 +36,7 @@
-  */
- static inline void set_bit(int nr, volatile unsigned long * addr)
- {
--	__asm__ __volatile__( LOCK_PREFIX
-+	__asm__ __volatile__( LOCK
- 		"btsl %1,%0"
- 		:"=m" (ADDR)
- 		:"Ir" (nr));
-@@ -76,7 +71,7 @@ static inline void __set_bit(int nr, vol
-  */
- static inline void clear_bit(int nr, volatile unsigned long * addr)
- {
--	__asm__ __volatile__( LOCK_PREFIX
-+	__asm__ __volatile__( LOCK
- 		"btrl %1,%0"
- 		:"=m" (ADDR)
- 		:"Ir" (nr));
-@@ -121,7 +116,7 @@ static inline void __change_bit(int nr, 
-  */
- static inline void change_bit(int nr, volatile unsigned long * addr)
- {
--	__asm__ __volatile__( LOCK_PREFIX
-+	__asm__ __volatile__( LOCK
- 		"btcl %1,%0"
- 		:"=m" (ADDR)
- 		:"Ir" (nr));
-@@ -140,7 +135,7 @@ static inline int test_and_set_bit(int n
- {
- 	int oldbit;
- 
--	__asm__ __volatile__( LOCK_PREFIX
-+	__asm__ __volatile__( LOCK
- 		"btsl %2,%1\n\tsbbl %0,%0"
- 		:"=r" (oldbit),"=m" (ADDR)
- 		:"Ir" (nr) : "memory");
-@@ -180,7 +175,7 @@ static inline int test_and_clear_bit(int
- {
- 	int oldbit;
- 
--	__asm__ __volatile__( LOCK_PREFIX
-+	__asm__ __volatile__( LOCK
- 		"btrl %2,%1\n\tsbbl %0,%0"
- 		:"=r" (oldbit),"=m" (ADDR)
- 		:"Ir" (nr) : "memory");
-@@ -231,7 +226,7 @@ static inline int test_and_change_bit(in
- {
- 	int oldbit;
- 
--	__asm__ __volatile__( LOCK_PREFIX
-+	__asm__ __volatile__( LOCK
- 		"btcl %2,%1\n\tsbbl %0,%0"
- 		:"=r" (oldbit),"=m" (ADDR)
- 		:"Ir" (nr) : "memory");
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/cpu.h linux-2.6.12-xen/include/asm-i386/cpu.h
---- pristine-linux-2.6.12/include/asm-i386/cpu.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/cpu.h	2006-02-25 00:12:33.814987766 +0100
-@@ -5,6 +5,7 @@
- #include <linux/cpu.h>
- #include <linux/topology.h>
- #include <linux/nodemask.h>
-+#include <linux/percpu.h>
- 
- #include <asm/node.h>
- 
-@@ -16,4 +17,5 @@ extern int arch_register_cpu(int num);
- extern void arch_unregister_cpu(int);
- #endif
- 
-+DECLARE_PER_CPU(int, cpu_state);
- #endif /* _ASM_I386_CPU_H_ */
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/irq.h linux-2.6.12-xen/include/asm-i386/irq.h
---- pristine-linux-2.6.12/include/asm-i386/irq.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/irq.h	2006-02-25 00:12:33.814987766 +0100
-@@ -38,4 +38,8 @@ extern void release_vm86_irqs(struct tas
- extern int irqbalance_disable(char *str);
- #endif
- 
-+#ifdef CONFIG_HOTPLUG_CPU
-+extern void fixup_irqs(cpumask_t map);
-+#endif
-+
- #endif /* _ASM_IRQ_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/mach-default/mach_traps.h linux-2.6.12-xen/include/asm-i386/mach-default/mach_traps.h
---- pristine-linux-2.6.12/include/asm-i386/mach-default/mach_traps.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/mach-default/mach_traps.h	2006-02-25 00:12:33.819987013 +0100
-@@ -15,6 +15,18 @@ static inline void clear_mem_error(unsig
- 	outb(reason, 0x61);
- }
- 
-+static inline void clear_io_check_error(unsigned char reason)
-+{
-+	unsigned long i;
-+
-+	reason = (reason & 0xf) | 8;
-+	outb(reason, 0x61);
-+	i = 2000;
-+	while (--i) udelay(1000);
-+	reason &= ~8;
-+	outb(reason, 0x61);
-+}
-+
- static inline unsigned char get_nmi_reason(void)
- {
- 	return inb(0x61);
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/pgtable-2level-defs.h linux-2.6.12-xen/include/asm-i386/pgtable-2level-defs.h
---- pristine-linux-2.6.12/include/asm-i386/pgtable-2level-defs.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/pgtable-2level-defs.h	2006-02-25 00:12:33.824986259 +0100
-@@ -1,6 +1,8 @@
- #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
- #define _I386_PGTABLE_2LEVEL_DEFS_H
- 
-+#define HAVE_SHARED_KERNEL_PMD 0
-+
- /*
-  * traditional i386 two-level paging structure:
-  */
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/pgtable-3level-defs.h linux-2.6.12-xen/include/asm-i386/pgtable-3level-defs.h
---- pristine-linux-2.6.12/include/asm-i386/pgtable-3level-defs.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/pgtable-3level-defs.h	2006-02-25 00:12:33.825986109 +0100
-@@ -1,6 +1,8 @@
- #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
- #define _I386_PGTABLE_3LEVEL_DEFS_H
- 
-+#define HAVE_SHARED_KERNEL_PMD 1
-+
- /*
-  * PGDIR_SHIFT determines what a top-level page table entry can map
-  */
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/rwsem.h linux-2.6.12-xen/include/asm-i386/rwsem.h
---- pristine-linux-2.6.12/include/asm-i386/rwsem.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/rwsem.h	2006-02-25 00:12:33.833984903 +0100
-@@ -40,6 +40,7 @@
- 
- #include <linux/list.h>
- #include <linux/spinlock.h>
-+#include <asm/smp_alt.h>
- 
- struct rwsem_waiter;
- 
-@@ -99,7 +100,7 @@ static inline void __down_read(struct rw
- {
- 	__asm__ __volatile__(
- 		"# beginning down_read\n\t"
--LOCK_PREFIX	"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
-+LOCK	        "  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
- 		"  js        2f\n\t" /* jump if we weren't granted the lock */
- 		"1:\n\t"
- 		LOCK_SECTION_START("")
-@@ -130,7 +131,7 @@ static inline int __down_read_trylock(st
- 		"  movl	     %1,%2\n\t"
- 		"  addl      %3,%2\n\t"
- 		"  jle	     2f\n\t"
--LOCK_PREFIX	"  cmpxchgl  %2,%0\n\t"
-+LOCK	        "  cmpxchgl  %2,%0\n\t"
- 		"  jnz	     1b\n\t"
- 		"2:\n\t"
- 		"# ending __down_read_trylock\n\t"
-@@ -150,7 +151,7 @@ static inline void __down_write(struct r
- 	tmp = RWSEM_ACTIVE_WRITE_BIAS;
- 	__asm__ __volatile__(
- 		"# beginning down_write\n\t"
--LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
-+LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
- 		"  testl     %%edx,%%edx\n\t" /* was the count 0 before? */
- 		"  jnz       2f\n\t" /* jump if we weren't granted the lock */
- 		"1:\n\t"
-@@ -188,7 +189,7 @@ static inline void __up_read(struct rw_s
- 	__s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
- 	__asm__ __volatile__(
- 		"# beginning __up_read\n\t"
--LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
-+LOCK	        "  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
- 		"  js        2f\n\t" /* jump if the lock is being waited upon */
- 		"1:\n\t"
- 		LOCK_SECTION_START("")
-@@ -214,7 +215,7 @@ static inline void __up_write(struct rw_
- 	__asm__ __volatile__(
- 		"# beginning __up_write\n\t"
- 		"  movl      %2,%%edx\n\t"
--LOCK_PREFIX	"  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
-+LOCK	        "  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
- 		"  jnz       2f\n\t" /* jump if the lock is being waited upon */
- 		"1:\n\t"
- 		LOCK_SECTION_START("")
-@@ -239,7 +240,7 @@ static inline void __downgrade_write(str
- {
- 	__asm__ __volatile__(
- 		"# beginning __downgrade_write\n\t"
--LOCK_PREFIX	"  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
-+LOCK	        "  addl      %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
- 		"  js        2f\n\t" /* jump if the lock is being waited upon */
- 		"1:\n\t"
- 		LOCK_SECTION_START("")
-@@ -263,7 +264,7 @@ LOCK_PREFIX	"  addl      %2,(%%eax)\n\t"
- static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
- {
- 	__asm__ __volatile__(
--LOCK_PREFIX	"addl %1,%0"
-+LOCK	          "addl %1,%0"
- 		: "=m"(sem->count)
- 		: "ir"(delta), "m"(sem->count));
- }
-@@ -276,7 +277,7 @@ static inline int rwsem_atomic_update(in
- 	int tmp = delta;
- 
- 	__asm__ __volatile__(
--LOCK_PREFIX	"xadd %0,(%2)"
-+LOCK  	          "xadd %0,(%2)"
- 		: "+r"(tmp), "=m"(sem->count)
- 		: "r"(sem), "m"(sem->count)
- 		: "memory");
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/smp_alt.h linux-2.6.12-xen/include/asm-i386/smp_alt.h
---- pristine-linux-2.6.12/include/asm-i386/smp_alt.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-i386/smp_alt.h	2006-02-25 00:12:33.833984903 +0100
-@@ -0,0 +1,32 @@
-+#ifndef __ASM_SMP_ALT_H__
-+#define __ASM_SMP_ALT_H__
-+
-+#include <linux/config.h>
-+
-+#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
-+#define LOCK \
-+        "6677: nop\n" \
-+	".section __smp_alternatives,\"a\"\n" \
-+	".long 6677b\n" \
-+	".long 6678f\n" \
-+	".previous\n" \
-+	".section __smp_replacements,\"a\"\n" \
-+	"6678: .byte 1\n" \
-+	".byte 1\n" \
-+	".byte 0\n" \
-+        ".byte 1\n" \
-+	".byte -1\n" \
-+	"lock\n" \
-+	"nop\n" \
-+	".previous\n"
-+void prepare_for_smp(void);
-+void unprepare_for_smp(void);
-+#else
-+#define LOCK "lock ; "
-+#endif
-+#else
-+#define LOCK ""
-+#endif
-+
-+#endif /* __ASM_SMP_ALT_H__ */
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/smp.h linux-2.6.12-xen/include/asm-i386/smp.h
---- pristine-linux-2.6.12/include/asm-i386/smp.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/smp.h	2006-02-25 00:12:33.814987766 +0100
-@@ -83,6 +83,9 @@ static __inline int logical_smp_processo
- }
- 
- #endif
-+
-+extern int __cpu_disable(void);
-+extern void __cpu_die(unsigned int cpu);
- #endif /* !__ASSEMBLY__ */
- 
- #define NO_PROC_ID		0xFF		/* No processor magic marker */
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/spinlock.h linux-2.6.12-xen/include/asm-i386/spinlock.h
---- pristine-linux-2.6.12/include/asm-i386/spinlock.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/spinlock.h	2006-02-25 00:12:33.834984752 +0100
-@@ -6,6 +6,7 @@
- #include <asm/page.h>
- #include <linux/config.h>
- #include <linux/compiler.h>
-+#include <asm/smp_alt.h>
- 
- asmlinkage int printk(const char * fmt, ...)
- 	__attribute__ ((format (printf, 1, 2)));
-@@ -47,8 +48,9 @@ typedef struct {
- #define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
- 
- #define spin_lock_string \
--	"\n1:\t" \
--	"lock ; decb %0\n\t" \
-+        "1:\n" \
-+	LOCK \
-+	"decb %0\n\t" \
- 	"jns 3f\n" \
- 	"2:\t" \
- 	"rep;nop\n\t" \
-@@ -58,8 +60,9 @@ typedef struct {
- 	"3:\n\t"
- 
- #define spin_lock_string_flags \
--	"\n1:\t" \
--	"lock ; decb %0\n\t" \
-+        "1:\n" \
-+	LOCK \
-+	"decb %0\n\t" \
- 	"jns 4f\n\t" \
- 	"2:\t" \
- 	"testl $0x200, %1\n\t" \
-@@ -121,10 +124,34 @@ static inline void _raw_spin_unlock(spin
- static inline int _raw_spin_trylock(spinlock_t *lock)
- {
- 	char oldval;
-+#ifdef CONFIG_SMP_ALTERNATIVES
- 	__asm__ __volatile__(
--		"xchgb %b0,%1"
-+		"1:movb %1,%b0\n"
-+		"movb $0,%1\n"
-+		"2:"
-+		".section __smp_alternatives,\"a\"\n"
-+		".long 1b\n"
-+		".long 3f\n"
-+		".previous\n"
-+		".section __smp_replacements,\"a\"\n"
-+		"3: .byte 2b - 1b\n"
-+		".byte 5f-4f\n"
-+		".byte 0\n"
-+		".byte 6f-5f\n"
-+		".byte -1\n"
-+		"4: xchgb %b0,%1\n"
-+		"5: movb %1,%b0\n"
-+		"movb $0,%1\n"
-+		"6:\n"
-+		".previous\n"
- 		:"=q" (oldval), "=m" (lock->slock)
- 		:"0" (0) : "memory");
-+#else
-+	__asm__ __volatile__(
-+		"xchgb %b0,%1\n"
-+		:"=q" (oldval), "=m" (lock->slock)
-+		:"0" (0) : "memory");
-+#endif
- 	return oldval > 0;
- }
- 
-@@ -225,8 +252,8 @@ static inline void _raw_write_lock(rwloc
- 	__build_write_lock(rw, "__write_lock_failed");
- }
- 
--#define _raw_read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
--#define _raw_write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
-+#define _raw_read_unlock(rw)	asm volatile(LOCK "incl %0" :"=m" ((rw)->lock) : : "memory")
-+#define _raw_write_unlock(rw)	asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
- 
- static inline int _raw_read_trylock(rwlock_t *lock)
- {
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/string.h linux-2.6.12-xen/include/asm-i386/string.h
---- pristine-linux-2.6.12/include/asm-i386/string.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/string.h	2006-02-25 00:12:33.766995000 +0100
-@@ -116,7 +116,8 @@ __asm__ __volatile__(
- 	"orb $1,%%al\n"
- 	"3:"
- 	:"=a" (__res), "=&S" (d0), "=&D" (d1)
--		     :"1" (cs),"2" (ct));
-+	:"1" (cs),"2" (ct)
-+	:"memory");
- return __res;
- }
- 
-@@ -138,8 +139,9 @@ __asm__ __volatile__(
- 	"3:\tsbbl %%eax,%%eax\n\t"
- 	"orb $1,%%al\n"
- 	"4:"
--		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
--		     :"1" (cs),"2" (ct),"3" (count));
-+	:"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
-+	:"1" (cs),"2" (ct),"3" (count)
-+	:"memory");
- return __res;
- }
- 
-@@ -158,7 +160,9 @@ __asm__ __volatile__(
- 	"movl $1,%1\n"
- 	"2:\tmovl %1,%0\n\t"
- 	"decl %0"
--	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
-+	:"=a" (__res), "=&S" (d0)
-+	:"1" (s),"0" (c)
-+	:"memory");
- return __res;
- }
- 
-@@ -175,7 +179,9 @@ __asm__ __volatile__(
- 	"leal -1(%%esi),%0\n"
- 	"2:\ttestb %%al,%%al\n\t"
- 	"jne 1b"
--	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
-+	:"=g" (__res), "=&S" (d0), "=&a" (d1)
-+	:"0" (0),"1" (s),"2" (c)
-+	:"memory");
- return __res;
- }
- 
-@@ -189,7 +195,9 @@ __asm__ __volatile__(
- 	"scasb\n\t"
- 	"notl %0\n\t"
- 	"decl %0"
--	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffffu));
-+	:"=c" (__res), "=&D" (d0)
-+	:"1" (s),"a" (0), "0" (0xffffffffu)
-+	:"memory");
- return __res;
- }
- 
-@@ -333,7 +341,9 @@ __asm__ __volatile__(
- 	"je 1f\n\t"
- 	"movl $1,%0\n"
- 	"1:\tdecl %0"
--	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
-+	:"=D" (__res), "=&c" (d0)
-+	:"a" (c),"0" (cs),"1" (count)
-+	:"memory");
- return __res;
- }
- 
-@@ -369,7 +379,7 @@ __asm__ __volatile__(
- 	"je 2f\n\t"
- 	"stosb\n"
- 	"2:"
--	: "=&c" (d0), "=&D" (d1)
-+	:"=&c" (d0), "=&D" (d1)
- 	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
- 	:"memory");
- return (s);	
-@@ -392,7 +402,8 @@ __asm__ __volatile__(
- 	"jne 1b\n"
- 	"3:\tsubl %2,%0"
- 	:"=a" (__res), "=&d" (d0)
--	:"c" (s),"1" (count));
-+	:"c" (s),"1" (count)
-+	:"memory");
- return __res;
- }
- /* end of additional stuff */
-@@ -473,7 +484,8 @@ static inline void * memscan(void * addr
- 		"dec %%edi\n"
- 		"1:"
- 		: "=D" (addr), "=c" (size)
--		: "0" (addr), "1" (size), "a" (c));
-+		: "0" (addr), "1" (size), "a" (c)
-+		: "memory");
- 	return addr;
- }
- 
-diff -Nurp pristine-linux-2.6.12/include/asm-i386/system.h linux-2.6.12-xen/include/asm-i386/system.h
---- pristine-linux-2.6.12/include/asm-i386/system.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-i386/system.h	2006-02-25 00:12:33.834984752 +0100
-@@ -5,7 +5,7 @@
- #include <linux/kernel.h>
- #include <asm/segment.h>
- #include <asm/cpufeature.h>
--#include <linux/bitops.h> /* for LOCK_PREFIX */
-+#include <asm/smp_alt.h>
- 
- #ifdef __KERNEL__
- 
-@@ -249,19 +249,19 @@ static inline unsigned long __cmpxchg(vo
- 	unsigned long prev;
- 	switch (size) {
- 	case 1:
--		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
-+		__asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
- 				     : "=a"(prev)
- 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
- 				     : "memory");
- 		return prev;
- 	case 2:
--		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
-+		__asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
- 				     : "=a"(prev)
- 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
- 				     : "memory");
- 		return prev;
- 	case 4:
--		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
-+		__asm__ __volatile__(LOCK "cmpxchgl %1,%2"
- 				     : "=a"(prev)
- 				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
- 				     : "memory");
-@@ -425,11 +425,55 @@ struct alt_instr { 
- #endif
- 
- #ifdef CONFIG_SMP
--#define smp_mb()	mb()
--#define smp_rmb()	rmb()
- #define smp_wmb()	wmb()
--#define smp_read_barrier_depends()	read_barrier_depends()
-+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
-+#define smp_alt_mb(instr)                                           \
-+__asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
-+		     ".section __smp_alternatives,\"a\"\n"          \
-+		     ".long 6667b\n"                                \
-+                     ".long 6673f\n"                                \
-+		     ".previous\n"                                  \
-+		     ".section __smp_replacements,\"a\"\n"          \
-+		     "6673:.byte 6668b-6667b\n"                     \
-+		     ".byte 6670f-6669f\n"                          \
-+		     ".byte 6671f-6670f\n"                          \
-+                     ".byte 0\n"                                    \
-+		     ".byte %c0\n"                                  \
-+		     "6669:lock;addl $0,0(%%esp)\n"                 \
-+		     "6670:" instr "\n"                             \
-+		     "6671:\n"                                      \
-+		     ".previous\n"                                  \
-+		     :                                              \
-+		     : "i" (X86_FEATURE_XMM2)                       \
-+		     : "memory")
-+#define smp_rmb() smp_alt_mb("lfence")
-+#define smp_mb()  smp_alt_mb("mfence")
-+#define set_mb(var, value) do {                                     \
-+unsigned long __set_mb_temp;                                        \
-+__asm__ __volatile__("6667:movl %1, %0\n6668:\n"                    \
-+		     ".section __smp_alternatives,\"a\"\n"          \
-+		     ".long 6667b\n"                                \
-+		     ".long 6673f\n"                                \
-+		     ".previous\n"                                  \
-+		     ".section __smp_replacements,\"a\"\n"          \
-+		     "6673: .byte 6668b-6667b\n"                    \
-+		     ".byte 6670f-6669f\n"                          \
-+		     ".byte 0\n"                                    \
-+		     ".byte 6671f-6670f\n"                          \
-+		     ".byte -1\n"                                   \
-+		     "6669: xchg %1, %0\n"                          \
-+		     "6670:movl %1, %0\n"                           \
-+		     "6671:\n"                                      \
-+		     ".previous\n"                                  \
-+		     : "=m" (var), "=r" (__set_mb_temp)             \
-+		     : "1" (value)                                  \
-+		     : "memory"); } while (0)
-+#else
-+#define smp_rmb()	rmb()
-+#define smp_mb()	mb()
- #define set_mb(var, value) do { xchg(&var, value); } while (0)
-+#endif
-+#define smp_read_barrier_depends()	read_barrier_depends()
- #else
- #define smp_mb()	barrier()
- #define smp_rmb()	barrier()
-diff -Nurp pristine-linux-2.6.12/include/asm-ia64/gcc_intrin.h linux-2.6.12-xen/include/asm-ia64/gcc_intrin.h
---- pristine-linux-2.6.12/include/asm-ia64/gcc_intrin.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-ia64/gcc_intrin.h	2006-02-16 23:44:08.000000000 +0100
-@@ -26,7 +26,7 @@ extern void ia64_bad_param_for_getreg (v
- 
- register unsigned long ia64_r13 asm ("r13") __attribute_used__;
- 
--#define ia64_setreg(regnum, val)						\
-+#define __ia64_setreg(regnum, val)						\
- ({										\
- 	switch (regnum) {							\
- 	    case _IA64_REG_PSR_L:						\
-@@ -55,7 +55,7 @@ register unsigned long ia64_r13 asm ("r1
- 	}									\
- })
- 
--#define ia64_getreg(regnum)							\
-+#define __ia64_getreg(regnum)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 										\
-@@ -92,7 +92,7 @@ register unsigned long ia64_r13 asm ("r1
- 
- #define ia64_hint_pause 0
- 
--#define ia64_hint(mode)						\
-+#define __ia64_hint(mode)						\
- ({								\
- 	switch (mode) {						\
- 	case ia64_hint_pause:					\
-@@ -374,7 +374,7 @@ register unsigned long ia64_r13 asm ("r1
- 
- #define ia64_invala() asm volatile ("invala" ::: "memory")
- 
--#define ia64_thash(addr)							\
-+#define __ia64_thash(addr)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\
-@@ -394,18 +394,18 @@ register unsigned long ia64_r13 asm ("r1
- 
- #define ia64_nop(x)	asm volatile ("nop %0"::"i"(x));
- 
--#define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
-+#define __ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
- 
--#define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
-+#define __ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
- 
- 
--#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\
-+#define __ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"			\
- 					     :: "r"(trnum), "r"(addr) : "memory")
- 
--#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\
-+#define __ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"			\
- 					     :: "r"(trnum), "r"(addr) : "memory")
- 
--#define ia64_tpa(addr)								\
-+#define __ia64_tpa(addr)							\
- ({										\
- 	__u64 ia64_pa;								\
- 	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\
-@@ -415,22 +415,22 @@ register unsigned long ia64_r13 asm ("r1
- #define __ia64_set_dbr(index, val)						\
- 	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
- 
--#define ia64_set_ibr(index, val)						\
-+#define __ia64_set_ibr(index, val)						\
- 	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
- 
--#define ia64_set_pkr(index, val)						\
-+#define __ia64_set_pkr(index, val)						\
- 	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
- 
--#define ia64_set_pmc(index, val)						\
-+#define __ia64_set_pmc(index, val)						\
- 	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
- 
--#define ia64_set_pmd(index, val)						\
-+#define __ia64_set_pmd(index, val)						\
- 	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
- 
--#define ia64_set_rr(index, val)							\
-+#define __ia64_set_rr(index, val)							\
- 	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
- 
--#define ia64_get_cpuid(index)								\
-+#define __ia64_get_cpuid(index)								\
- ({											\
- 	__u64 ia64_intri_res;								\
- 	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\
-@@ -444,21 +444,21 @@ register unsigned long ia64_r13 asm ("r1
- 	ia64_intri_res;								\
- })
- 
--#define ia64_get_ibr(index)							\
-+#define __ia64_get_ibr(index)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
- 	ia64_intri_res;								\
- })
- 
--#define ia64_get_pkr(index)							\
-+#define __ia64_get_pkr(index)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
- 	ia64_intri_res;								\
- })
- 
--#define ia64_get_pmc(index)							\
-+#define __ia64_get_pmc(index)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
-@@ -466,48 +466,48 @@ register unsigned long ia64_r13 asm ("r1
- })
- 
- 
--#define ia64_get_pmd(index)							\
-+#define __ia64_get_pmd(index)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\
- 	ia64_intri_res;								\
- })
- 
--#define ia64_get_rr(index)							\
-+#define __ia64_get_rr(index)							\
- ({										\
- 	__u64 ia64_intri_res;							\
- 	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\
- 	ia64_intri_res;								\
- })
- 
--#define ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
-+#define __ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory")
- 
- 
- #define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory")
- 
--#define ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
--#define ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
-+#define __ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory")
-+#define __ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory")
- #define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory")
- #define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory")
- 
--#define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
-+#define __ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr))
- 
--#define ia64_ptcga(addr, size)							\
-+#define __ia64_ptcga(addr, size)							\
- do {										\
- 	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\
- 	ia64_dv_serialize_data();						\
- } while (0)
- 
--#define ia64_ptcl(addr, size)							\
-+#define __ia64_ptcl(addr, size)							\
- do {										\
- 	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\
- 	ia64_dv_serialize_data();						\
- } while (0)
- 
--#define ia64_ptri(addr, size)						\
-+#define __ia64_ptri(addr, size)						\
- 	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
- 
--#define ia64_ptrd(addr, size)						\
-+#define __ia64_ptrd(addr, size)						\
- 	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
- 
- /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
-@@ -589,7 +589,7 @@ do {										\
-         }								\
- })
- 
--#define ia64_intrin_local_irq_restore(x)			\
-+#define __ia64_intrin_local_irq_restore(x)			\
- do {								\
- 	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\
- 		      "(p6) ssm psr.i;"				\
-@@ -598,4 +598,6 @@ do {								\
- 		      :: "r"((x)) : "p6", "p7", "memory");	\
- } while (0)
- 
-+#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
-+
- #endif /* _ASM_IA64_GCC_INTRIN_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-ia64/intel_intrin.h linux-2.6.12-xen/include/asm-ia64/intel_intrin.h
---- pristine-linux-2.6.12/include/asm-ia64/intel_intrin.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-ia64/intel_intrin.h	2006-02-16 23:44:08.000000000 +0100
-@@ -119,10 +119,10 @@ __s64 _m64_popcnt(__s64 a);
- 		 	 * intrinsic
- 		 	 */
- 
--#define ia64_getreg		__getReg
--#define ia64_setreg		__setReg
-+#define __ia64_getreg		__getReg
-+#define __ia64_setreg		__setReg
- 
--#define ia64_hint(x)
-+#define __ia64_hint(x)
- 
- #define ia64_mux1_brcst	 0
- #define ia64_mux1_mix		 8
-@@ -135,16 +135,16 @@ __s64 _m64_popcnt(__s64 a);
- #define ia64_getf_exp		__getf_exp
- #define ia64_shrp		_m64_shrp
- 
--#define ia64_tpa		__tpa
-+#define __ia64_tpa		__tpa
- #define ia64_invala		__invala
- #define ia64_invala_gr		__invala_gr
- #define ia64_invala_fr		__invala_fr
- #define ia64_nop		__nop
- #define ia64_sum		__sum
--#define ia64_ssm		__ssm
-+#define __ia64_ssm		__ssm
- #define ia64_rum		__rum
--#define ia64_rsm		__rsm
--#define ia64_fc 		__fc
-+#define __ia64_rsm		__rsm
-+#define __ia64_fc 		__fc
- 
- #define ia64_ldfs		__ldfs
- #define ia64_ldfd		__ldfd
-@@ -182,24 +182,24 @@ __s64 _m64_popcnt(__s64 a);
- 
- #define __ia64_set_dbr(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_DBR, index, val)
--#define ia64_set_ibr(index, val)	\
-+#define __ia64_set_ibr(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_IBR, index, val)
--#define ia64_set_pkr(index, val)	\
-+#define __ia64_set_pkr(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_PKR, index, val)
--#define ia64_set_pmc(index, val)	\
-+#define __ia64_set_pmc(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_PMC, index, val)
--#define ia64_set_pmd(index, val)	\
-+#define __ia64_set_pmd(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_PMD, index, val)
--#define ia64_set_rr(index, val)	\
-+#define __ia64_set_rr(index, val)	\
- 		__setIndReg(_IA64_REG_INDR_RR, index, val)
- 
--#define ia64_get_cpuid(index) 	__getIndReg(_IA64_REG_INDR_CPUID, index)
-+#define __ia64_get_cpuid(index) 	__getIndReg(_IA64_REG_INDR_CPUID, index)
- #define __ia64_get_dbr(index) 	__getIndReg(_IA64_REG_INDR_DBR, index)
--#define ia64_get_ibr(index) 	__getIndReg(_IA64_REG_INDR_IBR, index)
--#define ia64_get_pkr(index) 	__getIndReg(_IA64_REG_INDR_PKR, index)
--#define ia64_get_pmc(index) 	__getIndReg(_IA64_REG_INDR_PMC, index)
--#define ia64_get_pmd(index)  	__getIndReg(_IA64_REG_INDR_PMD, index)
--#define ia64_get_rr(index) 	__getIndReg(_IA64_REG_INDR_RR, index)
-+#define __ia64_get_ibr(index) 	__getIndReg(_IA64_REG_INDR_IBR, index)
-+#define __ia64_get_pkr(index) 	__getIndReg(_IA64_REG_INDR_PKR, index)
-+#define __ia64_get_pmc(index) 	__getIndReg(_IA64_REG_INDR_PMC, index)
-+#define __ia64_get_pmd(index)  	__getIndReg(_IA64_REG_INDR_PMD, index)
-+#define __ia64_get_rr(index) 	__getIndReg(_IA64_REG_INDR_RR, index)
- 
- #define ia64_srlz_d		__dsrlz
- #define ia64_srlz_i		__isrlz
-@@ -218,18 +218,18 @@ __s64 _m64_popcnt(__s64 a);
- #define ia64_ld8_acq		__ld8_acq
- 
- #define ia64_sync_i		__synci
--#define ia64_thash		__thash
--#define ia64_ttag		__ttag
--#define ia64_itcd		__itcd
--#define ia64_itci		__itci
--#define ia64_itrd		__itrd
--#define ia64_itri		__itri
--#define ia64_ptce		__ptce
--#define ia64_ptcl		__ptcl
--#define ia64_ptcg		__ptcg
--#define ia64_ptcga		__ptcga
--#define ia64_ptri		__ptri
--#define ia64_ptrd		__ptrd
-+#define __ia64_thash		__thash
-+#define __ia64_ttag		__ttag
-+#define __ia64_itcd		__itcd
-+#define __ia64_itci		__itci
-+#define __ia64_itrd		__itrd
-+#define __ia64_itri		__itri
-+#define __ia64_ptce		__ptce
-+#define __ia64_ptcl		__ptcl
-+#define __ia64_ptcg		__ptcg
-+#define __ia64_ptcga		__ptcga
-+#define __ia64_ptri		__ptri
-+#define __ia64_ptrd		__ptrd
- #define ia64_dep_mi		_m64_dep_mi
- 
- /* Values for lfhint in __lfetch and __lfetch_fault */
-@@ -244,14 +244,16 @@ __s64 _m64_popcnt(__s64 a);
- #define ia64_lfetch_fault	__lfetch_fault
- #define ia64_lfetch_fault_excl	__lfetch_fault_excl
- 
--#define ia64_intrin_local_irq_restore(x)		\
-+#define __ia64_intrin_local_irq_restore(x)		\
- do {							\
- 	if ((x) != 0) {					\
--		ia64_ssm(IA64_PSR_I);			\
-+		__ia64_ssm(IA64_PSR_I);			\
- 		ia64_srlz_d();				\
- 	} else {					\
--		ia64_rsm(IA64_PSR_I);			\
-+		__ia64_rsm(IA64_PSR_I);			\
- 	}						\
- } while (0)
- 
-+#define __ia64_get_psr_i()	(__ia64_getreg(_IA64_REG_PSR) & 0x4000UL)
-+
- #endif /* _ASM_IA64_INTEL_INTRIN_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-ia64/pal.h linux-2.6.12-xen/include/asm-ia64/pal.h
---- pristine-linux-2.6.12/include/asm-ia64/pal.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-ia64/pal.h	2006-02-16 23:44:08.000000000 +0100
-@@ -79,6 +79,7 @@
- #ifndef __ASSEMBLY__
- 
- #include <linux/types.h>
-+#include <asm/processor.h>
- #include <asm/fpu.h>
- 
- /*
-diff -Nurp pristine-linux-2.6.12/include/asm-ia64/privop.h linux-2.6.12-xen/include/asm-ia64/privop.h
---- pristine-linux-2.6.12/include/asm-ia64/privop.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-ia64/privop.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,59 @@
-+#ifndef _ASM_IA64_PRIVOP_H
-+#define _ASM_IA64_PRIVOP_H
-+
-+/*
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ *	Dan Magenheimer <dan.magenheimer at hp.com>
-+ *
-+ */
-+
-+#include <linux/config.h>
-+#ifdef CONFIG_XEN
-+#include <asm/xen/privop.h>
-+#endif
-+
-+#ifndef __ASSEMBLY
-+
-+#ifndef IA64_PARAVIRTUALIZED
-+
-+#define ia64_getreg			__ia64_getreg
-+#define ia64_setreg			__ia64_setreg
-+#define ia64_hint			__ia64_hint
-+#define ia64_thash			__ia64_thash
-+#define ia64_itci			__ia64_itci
-+#define ia64_itcd			__ia64_itcd
-+#define ia64_itri			__ia64_itri
-+#define ia64_itrd			__ia64_itrd
-+#define ia64_tpa			__ia64_tpa
-+#define ia64_set_ibr			__ia64_set_ibr
-+#define ia64_set_pkr			__ia64_set_pkr
-+#define ia64_set_pmc			__ia64_set_pmc
-+#define ia64_set_pmd			__ia64_set_pmd
-+#define ia64_set_rr			__ia64_set_rr
-+#define ia64_get_cpuid			__ia64_get_cpuid
-+#define ia64_get_ibr			__ia64_get_ibr
-+#define ia64_get_pkr			__ia64_get_pkr
-+#define ia64_get_pmc			__ia64_get_pmc
-+#define ia64_get_pmd			__ia64_get_pmd
-+#define ia64_get_rr			__ia64_get_rr
-+#define ia64_fc				__ia64_fc
-+#define ia64_ssm			__ia64_ssm
-+#define ia64_rsm			__ia64_rsm
-+#define ia64_ptce			__ia64_ptce
-+#define ia64_ptcga			__ia64_ptcga
-+#define ia64_ptcl			__ia64_ptcl
-+#define ia64_ptri			__ia64_ptri
-+#define ia64_ptrd			__ia64_ptrd
-+#define	ia64_get_psr_i			__ia64_get_psr_i
-+#define ia64_intrin_local_irq_restore	__ia64_intrin_local_irq_restore
-+#define ia64_pal_halt_light		__ia64_pal_halt_light
-+#define	ia64_leave_kernel		__ia64_leave_kernel
-+#define	ia64_leave_syscall		__ia64_leave_syscall
-+#define	ia64_switch_to			__ia64_switch_to
-+#define	ia64_pal_call_static		__ia64_pal_call_static
-+
-+#endif /* !IA64_PARAVIRTUALIZED */
-+
-+#endif /* !__ASSEMBLY */
-+
-+#endif /* _ASM_IA64_PRIVOP_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-ia64/processor.h linux-2.6.12-xen/include/asm-ia64/processor.h
---- pristine-linux-2.6.12/include/asm-ia64/processor.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-ia64/processor.h	2006-02-16 23:44:08.000000000 +0100
-@@ -19,6 +19,7 @@
- #include <asm/kregs.h>
- #include <asm/ptrace.h>
- #include <asm/ustack.h>
-+#include <asm/privop.h>
- 
- /* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */
- #define ARCH_HAS_SCHED_DOMAIN
-diff -Nurp pristine-linux-2.6.12/include/asm-ia64/system.h linux-2.6.12-xen/include/asm-ia64/system.h
---- pristine-linux-2.6.12/include/asm-ia64/system.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-ia64/system.h	2006-02-16 23:44:08.000000000 +0100
-@@ -124,7 +124,7 @@ extern struct ia64_boot_param {
- #define __local_irq_save(x)			\
- do {						\
- 	ia64_stop();				\
--	(x) = ia64_getreg(_IA64_REG_PSR);	\
-+	(x) = ia64_get_psr_i();			\
- 	ia64_stop();				\
- 	ia64_rsm(IA64_PSR_I);			\
- } while (0)
-@@ -172,7 +172,7 @@ do {								\
- #endif /* !CONFIG_IA64_DEBUG_IRQ */
- 
- #define local_irq_enable()	({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); })
--#define local_save_flags(flags)	({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); })
-+#define local_save_flags(flags)	({ ia64_stop(); (flags) = ia64_get_psr_i(); })
- 
- #define irqs_disabled()				\
- ({						\
-diff -Nurp pristine-linux-2.6.12/include/asm-ia64/xen/privop.h linux-2.6.12-xen/include/asm-ia64/xen/privop.h
---- pristine-linux-2.6.12/include/asm-ia64/xen/privop.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-ia64/xen/privop.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,272 @@
-+#ifndef _ASM_IA64_XEN_PRIVOP_H
-+#define _ASM_IA64_XEN_PRIVOP_H
-+
-+/*
-+ * Copyright (C) 2005 Hewlett-Packard Co
-+ *	Dan Magenheimer <dan.magenheimer at hp.com>
-+ *
-+ * Paravirtualizations of privileged operations for Xen/ia64
-+ *
-+ */
-+
-+
-+#include <asm/xen/asm-xsi-offsets.h>
-+
-+#define IA64_PARAVIRTUALIZED
-+
-+#ifdef __ASSEMBLY__
-+#define	XEN_HYPER_RFI			break 0x1
-+#define	XEN_HYPER_RSM_PSR_DT		break 0x2
-+#define	XEN_HYPER_SSM_PSR_DT		break 0x3
-+#define	XEN_HYPER_COVER			break 0x4
-+#define	XEN_HYPER_ITC_D			break 0x5
-+#define	XEN_HYPER_ITC_I			break 0x6
-+#define	XEN_HYPER_SSM_I			break 0x7
-+#define	XEN_HYPER_GET_IVR		break 0x8
-+#define	XEN_HYPER_GET_TPR		break 0x9
-+#define	XEN_HYPER_SET_TPR		break 0xa
-+#define	XEN_HYPER_EOI			break 0xb
-+#define	XEN_HYPER_SET_ITM		break 0xc
-+#define	XEN_HYPER_THASH			break 0xd
-+#define	XEN_HYPER_PTC_GA		break 0xe
-+#define	XEN_HYPER_ITR_D			break 0xf
-+#define	XEN_HYPER_GET_RR		break 0x10
-+#define	XEN_HYPER_SET_RR		break 0x11
-+#define	XEN_HYPER_SET_KR		break 0x12
-+#endif
-+
-+#ifndef __ASSEMBLY__
-+#ifdef MODULE
-+extern int is_running_on_xen(void);
-+#define running_on_xen (is_running_on_xen())
-+#else
-+extern int running_on_xen;
-+#endif
-+
-+#define	XEN_HYPER_SSM_I			asm("break 0x7");
-+#define	XEN_HYPER_GET_IVR		asm("break 0x8");
-+
-+/************************************************/
-+/* Instructions paravirtualized for correctness */
-+/************************************************/
-+
-+/* "fc" and "thash" are privilege-sensitive instructions, meaning they
-+ *  may have different semantics depending on whether they are executed
-+ *  at PL0 vs PL!=0.  When paravirtualized, these instructions mustn't
-+ *  be allowed to execute directly, lest incorrect semantics result. */
-+extern unsigned long xen_fc(unsigned long addr);
-+#define ia64_fc(addr)			xen_fc((unsigned long)(addr))
-+extern unsigned long xen_thash(unsigned long addr);
-+#define ia64_thash(addr)		xen_thash((unsigned long)(addr))
-+/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
-+ * is not currently used (though it may be in a long-format VHPT system!)
-+ * and the semantics of cover only change if psr.ic is off which is very
-+ * rare (and currently non-existent outside of assembly code */
-+
-+/* There are also privilege-sensitive registers.  These registers are
-+ * readable at any privilege level but only writable at PL0. */
-+extern unsigned long xen_get_cpuid(int index);
-+#define	ia64_get_cpuid(i)		xen_get_cpuid(i)
-+extern unsigned long xen_get_pmd(int index);
-+#define	ia64_get_pmd(i)			xen_get_pmd(i)
-+extern unsigned long xen_get_eflag(void);	/* see xen_ia64_getreg */
-+extern void xen_set_eflag(unsigned long);	/* see xen_ia64_setreg */
-+
-+/************************************************/
-+/* Instructions paravirtualized for performance */
-+/************************************************/
-+
-+/* Xen uses memory-mapped virtual privileged registers for access to many
-+ * performance-sensitive privileged registers.  Some, like the processor
-+ * status register (psr), are broken up into multiple memory locations.
-+ * Others, like "pend", are abstractions based on privileged registers.
-+ * "Pend" is guaranteed to be set if reading cr.ivr would return a
-+ * (non-spurious) interrupt. */
-+#define xen_get_virtual_psr_i()		(*(int *)(XSI_PSR_I))
-+#define xen_set_virtual_psr_i(_val)	({ *(int *)(XSI_PSR_I) = _val ? 1:0; })
-+#define xen_set_virtual_psr_ic(_val)	({ *(int *)(XSI_PSR_IC) = _val ? 1:0; })
-+#define xen_get_virtual_pend()		(*(int *)(XSI_PEND))
-+
-+/* Hyperprivops are "break" instructions with a well-defined API.
-+ * In particular, the virtual psr.ic bit must be off; in this way
-+ * it is guaranteed to never conflict with a linux break instruction.
-+ * Normally, this is done in a xen stub but this one is frequent enough
-+ * that we inline it */
-+#define xen_hyper_ssm_i()						\
-+({									\
-+	xen_set_virtual_psr_i(0);					\
-+	xen_set_virtual_psr_ic(0);					\
-+	XEN_HYPER_SSM_I;						\
-+})
-+
-+/* turning off interrupts can be paravirtualized simply by writing
-+ * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
-+#define xen_rsm_i()	xen_set_virtual_psr_i(0)
-+
-+/* turning on interrupts is a bit more complicated.. write to the
-+ * memory-mapped virtual psr.i bit first (to avoid race condition),
-+ * then if any interrupts were pending, we have to execute a hyperprivop
-+ * to ensure the pending interrupt gets delivered; else we're done! */
-+#define xen_ssm_i()							\
-+({									\
-+	int old = xen_get_virtual_psr_i();				\
-+	xen_set_virtual_psr_i(1);					\
-+	if (!old && xen_get_virtual_pend()) xen_hyper_ssm_i();		\
-+})
-+
-+#define xen_ia64_intrin_local_irq_restore(x)				\
-+{									\
-+     if (running_on_xen) {						\
-+	if ((x) & IA64_PSR_I) { xen_ssm_i(); }				\
-+	else { xen_rsm_i(); }						\
-+    }									\
-+    else __ia64_intrin_local_irq_restore((x));				\
-+}
-+
-+#define	xen_get_psr_i()							\
-+(									\
-+	(running_on_xen) ?						\
-+		(xen_get_virtual_psr_i() ? IA64_PSR_I : 0)		\
-+		: __ia64_get_psr_i()					\
-+)
-+
-+#define xen_ia64_ssm(mask)						\
-+{									\
-+	if ((mask)==IA64_PSR_I) {					\
-+		if (running_on_xen) { xen_ssm_i(); }			\
-+		else { __ia64_ssm(mask); }				\
-+	}								\
-+	else { __ia64_ssm(mask); }					\
-+}
-+
-+#define xen_ia64_rsm(mask)						\
-+{									\
-+	if ((mask)==IA64_PSR_I) {					\
-+		if (running_on_xen) { xen_rsm_i(); }			\
-+		else { __ia64_rsm(mask); }				\
-+	}								\
-+	else { __ia64_rsm(mask); }					\
-+}
-+
-+
-+/* Although all privileged operations can be left to trap and will
-+ * be properly handled by Xen, some are frequent enough that we use
-+ * hyperprivops for performance. */
-+
-+extern unsigned long xen_get_ivr(void);
-+extern unsigned long xen_get_tpr(void);
-+extern void xen_set_itm(unsigned long);
-+extern void xen_set_tpr(unsigned long);
-+extern void xen_eoi(void);
-+extern void xen_set_rr(unsigned long index, unsigned long val);
-+extern unsigned long xen_get_rr(unsigned long index);
-+extern void xen_set_kr(unsigned long index, unsigned long val);
-+
-+/* Note: It may look wrong to test for running_on_xen in each case.
-+ * However regnum is always a constant so, as written, the compiler
-+ * eliminates the switch statement, whereas running_on_xen must be
-+ * tested dynamically. */
-+#define xen_ia64_getreg(regnum)						\
-+({									\
-+	__u64 ia64_intri_res;						\
-+									\
-+	switch(regnum) {						\
-+	case _IA64_REG_CR_IVR:						\
-+		ia64_intri_res = (running_on_xen) ?			\
-+			xen_get_ivr() :					\
-+			__ia64_getreg(regnum);				\
-+		break;							\
-+	case _IA64_REG_CR_TPR:						\
-+		ia64_intri_res = (running_on_xen) ?			\
-+			xen_get_tpr() :					\
-+			__ia64_getreg(regnum);				\
-+		break;							\
-+	case _IA64_REG_AR_EFLAG:					\
-+		ia64_intri_res = (running_on_xen) ?			\
-+			xen_get_eflag() :				\
-+			__ia64_getreg(regnum);				\
-+		break;							\
-+	default:							\
-+		ia64_intri_res = __ia64_getreg(regnum);			\
-+		break;							\
-+	}								\
-+	ia64_intri_res;							\
-+})
-+
-+#define xen_ia64_setreg(regnum,val)					\
-+({									\
-+	switch(regnum) {						\
-+	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:			\
-+		(running_on_xen) ?					\
-+			xen_set_kr((regnum-_IA64_REG_AR_KR0), val) :	\
-+			__ia64_setreg(regnum,val);			\
-+		break;							\
-+	case _IA64_REG_CR_ITM:						\
-+		(running_on_xen) ?					\
-+			xen_set_itm(val) :				\
-+			__ia64_setreg(regnum,val);			\
-+		break;							\
-+	case _IA64_REG_CR_TPR:						\
-+		(running_on_xen) ?					\
-+			xen_set_tpr(val) :				\
-+			__ia64_setreg(regnum,val);			\
-+		break;							\
-+	case _IA64_REG_CR_EOI:						\
-+		(running_on_xen) ?					\
-+			xen_eoi() :					\
-+			__ia64_setreg(regnum,val);			\
-+		break;							\
-+	case _IA64_REG_AR_EFLAG:					\
-+		(running_on_xen) ?					\
-+			xen_set_eflag(val) :				\
-+			__ia64_setreg(regnum,val);			\
-+		break;							\
-+	default:							\
-+		__ia64_setreg(regnum,val);				\
-+		break;							\
-+	}								\
-+})
-+
-+#define ia64_ssm			xen_ia64_ssm
-+#define ia64_rsm			xen_ia64_rsm
-+#define ia64_intrin_local_irq_restore	xen_ia64_intrin_local_irq_restore
-+#define	ia64_ptcga			xen_ptcga
-+#define	ia64_set_rr(index,val)		xen_set_rr(index,val)
-+#define	ia64_get_rr(index)		xen_get_rr(index)
-+#define ia64_getreg			xen_ia64_getreg
-+#define ia64_setreg			xen_ia64_setreg
-+#define	ia64_get_psr_i			xen_get_psr_i
-+
-+/* the remainder of these are not performance-sensitive so its
-+ * OK to not paravirtualize and just take a privop trap and emulate */
-+#define ia64_hint			__ia64_hint
-+#define ia64_set_pmd			__ia64_set_pmd
-+#define ia64_itci			__ia64_itci
-+#define ia64_itcd			__ia64_itcd
-+#define ia64_itri			__ia64_itri
-+#define ia64_itrd			__ia64_itrd
-+#define ia64_tpa			__ia64_tpa
-+#define ia64_set_ibr			__ia64_set_ibr
-+#define ia64_set_pkr			__ia64_set_pkr
-+#define ia64_set_pmc			__ia64_set_pmc
-+#define ia64_get_ibr			__ia64_get_ibr
-+#define ia64_get_pkr			__ia64_get_pkr
-+#define ia64_get_pmc			__ia64_get_pmc
-+#define ia64_ptce			__ia64_ptce
-+#define ia64_ptcl			__ia64_ptcl
-+#define ia64_ptri			__ia64_ptri
-+#define ia64_ptrd			__ia64_ptrd
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+/* these routines utilize privilege-sensitive or performance-sensitive
-+ * privileged instructions so the code must be replaced with
-+ * paravirtualized versions */
-+#define ia64_pal_halt_light		xen_pal_halt_light
-+#define	ia64_leave_kernel		xen_leave_kernel
-+#define	ia64_leave_syscall		xen_leave_syscall
-+#define	ia64_trace_syscall		xen_trace_syscall
-+#define	ia64_switch_to			xen_switch_to
-+#define	ia64_pal_call_static		xen_pal_call_static
-+
-+#endif /* _ASM_IA64_XEN_PRIVOP_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-x86_64/smp.h linux-2.6.12-xen/include/asm-x86_64/smp.h
---- pristine-linux-2.6.12/include/asm-x86_64/smp.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/asm-x86_64/smp.h	2006-02-25 00:12:33.766995000 +0100
-@@ -46,6 +46,8 @@ extern int pic_mode;
- extern int smp_num_siblings;
- extern void smp_flush_tlb(void);
- extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-+extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
-+				     int retry, int wait);
- extern void smp_send_reschedule(int cpu);
- extern void smp_invalidate_rcv(void);		/* Process an NMI */
- extern void zap_low_mappings(void);
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/agp.h linux-2.6.12-xen/include/asm-xen/asm-i386/agp.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/agp.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/agp.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,37 @@
-+#ifndef AGP_H
-+#define AGP_H 1
-+
-+#include <asm/pgtable.h>
-+#include <asm/cacheflush.h>
-+#include <asm/system.h>
-+
-+/* 
-+ * Functions to keep the agpgart mappings coherent with the MMU.
-+ * The GART gives the CPU a physical alias of pages in memory. The alias region is
-+ * mapped uncacheable. Make sure there are no conflicting mappings
-+ * with different cachability attributes for the same page. This avoids
-+ * data corruption on some CPUs.
-+ */
-+
-+int map_page_into_agp(struct page *page);
-+int unmap_page_from_agp(struct page *page);
-+#define flush_agp_mappings() global_flush_tlb()
-+
-+/* Could use CLFLUSH here if the cpu supports it. But then it would
-+   need to be called for each cacheline of the whole page so it may not be 
-+   worth it. Would need a page for it. */
-+#define flush_agp_cache() wbinvd()
-+
-+/* Convert a physical address to an address suitable for the GART. */
-+#define phys_to_gart(x) phys_to_machine(x)
-+#define gart_to_phys(x) machine_to_phys(x)
-+
-+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
-+#define alloc_gatt_pages(order)	({                                          \
-+	char *_t; dma_addr_t _d;                                            \
-+	_t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL);    \
-+	_t; })
-+#define free_gatt_pages(table, order)	\
-+	dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/desc.h linux-2.6.12-xen/include/asm-xen/asm-i386/desc.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/desc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/desc.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,148 @@
-+#ifndef __ARCH_DESC_H
-+#define __ARCH_DESC_H
-+
-+#include <asm/ldt.h>
-+#include <asm/segment.h>
-+
-+#define CPU_16BIT_STACK_SIZE 1024
-+
-+#ifndef __ASSEMBLY__
-+
-+#include <linux/preempt.h>
-+#include <linux/smp.h>
-+
-+#include <asm/mmu.h>
-+
-+extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
-+
-+DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-+
-+struct Xgt_desc_struct {
-+	unsigned short size;
-+	unsigned long address __attribute__((packed));
-+	unsigned short pad;
-+} __attribute__ ((packed));
-+
-+extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
-+
-+#define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
-+#define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8))
-+
-+#define get_cpu_gdt_table(_cpu) ((struct desc_struct *)cpu_gdt_descr[(_cpu)].address)
-+
-+/*
-+ * This is the ldt that every process will get unless we need
-+ * something other than this.
-+ */
-+extern struct desc_struct default_ldt[];
-+extern void set_intr_gate(unsigned int irq, void * addr);
-+
-+#define _set_tssldt_desc(n,addr,limit,type) \
-+__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
-+	"movw %%ax,2(%2)\n\t" \
-+	"rorl $16,%%eax\n\t" \
-+	"movb %%al,4(%2)\n\t" \
-+	"movb %4,5(%2)\n\t" \
-+	"movb $0,6(%2)\n\t" \
-+	"movb %%ah,7(%2)\n\t" \
-+	"rorl $16,%%eax" \
-+	: "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
-+
-+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
-+{
-+	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
-+		offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
-+}
-+
-+#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
-+
-+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
-+{
-+	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
-+	    (int)addr, ((size << 3)-1), 0x82);
-+}
-+
-+#define LDT_entry_a(info) \
-+	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-+
-+#define LDT_entry_b(info) \
-+	(((info)->base_addr & 0xff000000) | \
-+	(((info)->base_addr & 0x00ff0000) >> 16) | \
-+	((info)->limit & 0xf0000) | \
-+	(((info)->read_exec_only ^ 1) << 9) | \
-+	((info)->contents << 10) | \
-+	(((info)->seg_not_present ^ 1) << 15) | \
-+	((info)->seg_32bit << 22) | \
-+	((info)->limit_in_pages << 23) | \
-+	((info)->useable << 20) | \
-+	0x7000)
-+
-+#define LDT_empty(info) (\
-+	(info)->base_addr	== 0	&& \
-+	(info)->limit		== 0	&& \
-+	(info)->contents	== 0	&& \
-+	(info)->read_exec_only	== 1	&& \
-+	(info)->seg_32bit	== 0	&& \
-+	(info)->limit_in_pages	== 0	&& \
-+	(info)->seg_not_present	== 1	&& \
-+	(info)->useable		== 0	)
-+
-+extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
-+
-+#if TLS_SIZE != 24
-+# error update this code.
-+#endif
-+
-+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-+{
-+#define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), *(u64 *)&t->tls_array[i])
-+	C(0); C(1); C(2);
-+#undef C
-+}
-+
-+static inline void clear_LDT(void)
-+{
-+	int cpu = get_cpu();
-+
-+	/*
-+	 * NB. We load the default_ldt for lcall7/27 handling on demand, as
-+	 * it slows down context switching. Noone uses it anyway.
-+	 */
-+	cpu = cpu;		/* XXX avoid compiler warning */
-+	xen_set_ldt(0UL, 0);
-+	put_cpu();
-+}
-+
-+/*
-+ * load one particular LDT into the current CPU
-+ */
-+static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
-+{
-+	void *segments = pc->ldt;
-+	int count = pc->size;
-+
-+	if (likely(!count))
-+		segments = NULL;
-+
-+	xen_set_ldt((unsigned long)segments, count);
-+}
-+
-+static inline void load_LDT(mm_context_t *pc)
-+{
-+	int cpu = get_cpu();
-+	load_LDT_nolock(pc, cpu);
-+	put_cpu();
-+}
-+
-+static inline unsigned long get_desc_base(unsigned long *desc)
-+{
-+	unsigned long base;
-+	base = ((desc[0] >> 16)  & 0x0000ffff) |
-+		((desc[1] << 16) & 0x00ff0000) |
-+		(desc[1] & 0xff000000);
-+	return base;
-+}
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/dma-mapping.h linux-2.6.12-xen/include/asm-xen/asm-i386/dma-mapping.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/dma-mapping.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/dma-mapping.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,156 @@
-+#ifndef _ASM_I386_DMA_MAPPING_H
-+#define _ASM_I386_DMA_MAPPING_H
-+
-+/*
-+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
-+ * documentation.
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/mm.h>
-+#include <asm/cache.h>
-+#include <asm/io.h>
-+#include <asm/scatterlist.h>
-+#include <asm-i386/swiotlb.h>
-+
-+static inline int
-+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
-+{
-+	dma_addr_t mask = 0xffffffff;
-+	/* If the device has a mask, use it, otherwise default to 32 bits */
-+	if (hwdev && hwdev->dma_mask)
-+		mask = *hwdev->dma_mask;
-+	return (addr & ~mask) != 0;
-+}
-+
-+static inline int
-+range_straddles_page_boundary(void *p, size_t size)
-+{
-+	extern unsigned long *contiguous_bitmap;
-+	return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
-+		!test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
-+}
-+
-+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-+
-+void *dma_alloc_coherent(struct device *dev, size_t size,
-+			   dma_addr_t *dma_handle, unsigned int __nocast flag);
-+
-+void dma_free_coherent(struct device *dev, size_t size,
-+			 void *vaddr, dma_addr_t dma_handle);
-+
-+extern dma_addr_t
-+dma_map_single(struct device *dev, void *ptr, size_t size,
-+	       enum dma_data_direction direction);
-+
-+extern void
-+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-+		 enum dma_data_direction direction);
-+
-+extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
-+		      int nents, enum dma_data_direction direction);
-+extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+			 int nents, enum dma_data_direction direction);
-+
-+extern dma_addr_t
-+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-+	     size_t size, enum dma_data_direction direction);
-+
-+extern void
-+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-+	       enum dma_data_direction direction);
-+
-+extern void
-+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-+			enum dma_data_direction direction);
-+
-+extern void
-+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
-+                           enum dma_data_direction direction);
-+
-+static inline void
-+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-+			      unsigned long offset, size_t size,
-+			      enum dma_data_direction direction)
-+{
-+	dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-+}
-+
-+static inline void
-+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-+				 unsigned long offset, size_t size,
-+				 enum dma_data_direction direction)
-+{
-+	dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
-+}
-+
-+static inline void
-+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
-+		    enum dma_data_direction direction)
-+{
-+	if (swiotlb)
-+		swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
-+	flush_write_buffers();
-+}
-+
-+static inline void
-+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
-+		    enum dma_data_direction direction)
-+{
-+	if (swiotlb)
-+		swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
-+	flush_write_buffers();
-+}
-+
-+extern int
-+dma_mapping_error(dma_addr_t dma_addr);
-+
-+extern int
-+dma_supported(struct device *dev, u64 mask);
-+
-+static inline int
-+dma_set_mask(struct device *dev, u64 mask)
-+{
-+	if(!dev->dma_mask || !dma_supported(dev, mask))
-+		return -EIO;
-+
-+	*dev->dma_mask = mask;
-+
-+	return 0;
-+}
-+
-+#ifdef __i386__
-+static inline int
-+dma_get_cache_alignment(void)
-+{
-+	/* no easy way to get cache size on all x86, so return the
-+	 * maximum possible, to be safe */
-+	return (1 << L1_CACHE_SHIFT_MAX);
-+}
-+#else
-+extern int dma_get_cache_alignment(void);
-+#endif
-+
-+#define dma_is_consistent(d)	(1)
-+
-+static inline void
-+dma_cache_sync(void *vaddr, size_t size,
-+	       enum dma_data_direction direction)
-+{
-+	flush_write_buffers();
-+}
-+
-+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-+extern int
-+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
-+			    dma_addr_t device_addr, size_t size, int flags);
-+
-+extern void
-+dma_release_declared_memory(struct device *dev);
-+
-+extern void *
-+dma_mark_declared_memory_occupied(struct device *dev,
-+				  dma_addr_t device_addr, size_t size);
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/fixmap.h linux-2.6.12-xen/include/asm-xen/asm-i386/fixmap.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/fixmap.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/fixmap.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,167 @@
-+/*
-+ * fixmap.h: compile-time virtual memory allocation
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1998 Ingo Molnar
-+ *
-+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
-+ */
-+
-+#ifndef _ASM_FIXMAP_H
-+#define _ASM_FIXMAP_H
-+
-+#include <linux/config.h>
-+
-+/* used by vmalloc.c, vsyscall.lds.S.
-+ *
-+ * Leave one empty page between vmalloc'ed areas and
-+ * the start of the fixmap.
-+ */
-+#define __FIXADDR_TOP	(HYPERVISOR_VIRT_START - 2 * PAGE_SIZE)
-+
-+#ifndef __ASSEMBLY__
-+#include <linux/kernel.h>
-+#include <asm/acpi.h>
-+#include <asm/apicdef.h>
-+#include <asm/page.h>
-+#include <asm-xen/gnttab.h>
-+#ifdef CONFIG_HIGHMEM
-+#include <linux/threads.h>
-+#include <asm/kmap_types.h>
-+#endif
-+
-+/*
-+ * Here we define all the compile-time 'special' virtual
-+ * addresses. The point is to have a constant address at
-+ * compile time, but to set the physical address only
-+ * in the boot process. We allocate these special addresses
-+ * from the end of virtual memory (0xfffff000) backwards.
-+ * Also this lets us do fail-safe vmalloc(), we
-+ * can guarantee that these special addresses and
-+ * vmalloc()-ed addresses never overlap.
-+ *
-+ * these 'compile-time allocated' memory buffers are
-+ * fixed-size 4k pages. (or larger if used with an increment
-+ * highger than 1) use fixmap_set(idx,phys) to associate
-+ * physical memory with fixmap indices.
-+ *
-+ * TLB entries of such buffers will not be flushed across
-+ * task switches.
-+ */
-+enum fixed_addresses {
-+	FIX_HOLE,
-+	FIX_VSYSCALL,
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
-+#endif
-+#ifdef CONFIG_X86_IO_APIC
-+	FIX_IO_APIC_BASE_0,
-+	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-+#endif
-+#ifdef CONFIG_X86_VISWS_APIC
-+	FIX_CO_CPU,	/* Cobalt timer */
-+	FIX_CO_APIC,	/* Cobalt APIC Redirection Table */ 
-+	FIX_LI_PCIA,	/* Lithium PCI Bridge A */
-+	FIX_LI_PCIB,	/* Lithium PCI Bridge B */
-+#endif
-+#ifdef CONFIG_X86_F00F_BUG
-+	FIX_F00F_IDT,	/* Virtual mapping for IDT */
-+#endif
-+#ifdef CONFIG_X86_CYCLONE_TIMER
-+	FIX_CYCLONE_TIMER, /*cyclone timer register*/
-+#endif 
-+#ifdef CONFIG_HIGHMEM
-+	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
-+	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-+#endif
-+#ifdef CONFIG_ACPI_BOOT
-+	FIX_ACPI_BEGIN,
-+	FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-+#endif
-+#ifdef CONFIG_PCI_MMCONFIG
-+	FIX_PCIE_MCFG,
-+#endif
-+	FIX_SHARED_INFO,
-+	FIX_GNTTAB_BEGIN,
-+	FIX_GNTTAB_END = FIX_GNTTAB_BEGIN + NR_GRANT_FRAMES - 1,
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+#define NR_FIX_ISAMAPS	256
-+	FIX_ISAMAP_END,
-+	FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
-+#endif
-+	__end_of_permanent_fixed_addresses,
-+	/* temporary boot-time mappings, used before ioremap() is functional */
-+#define NR_FIX_BTMAPS	16
-+	FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
-+	FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
-+	FIX_WP_TEST,
-+	__end_of_fixed_addresses
-+};
-+
-+extern void __set_fixmap(
-+	enum fixed_addresses idx, maddr_t phys, pgprot_t flags);
-+
-+#define set_fixmap(idx, phys) \
-+		__set_fixmap(idx, phys, PAGE_KERNEL)
-+/*
-+ * Some hardware wants to get fixmapped without caching.
-+ */
-+#define set_fixmap_nocache(idx, phys) \
-+		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-+
-+#define clear_fixmap(idx) \
-+		__set_fixmap(idx, 0, __pgprot(0))
-+
-+#define FIXADDR_TOP	((unsigned long)__FIXADDR_TOP)
-+
-+#define __FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-+#define __FIXADDR_BOOT_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
-+#define FIXADDR_START		(FIXADDR_TOP - __FIXADDR_SIZE)
-+#define FIXADDR_BOOT_START	(FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
-+
-+#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
-+#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-+
-+/*
-+ * This is the range that is readable by user mode, and things
-+ * acting like user mode such as get_user_pages.
-+ */
-+#define FIXADDR_USER_START	(__fix_to_virt(FIX_VSYSCALL))
-+#define FIXADDR_USER_END	(FIXADDR_USER_START + PAGE_SIZE)
-+
-+
-+extern void __this_fixmap_does_not_exist(void);
-+
-+/*
-+ * 'index to address' translation. If anyone tries to use the idx
-+ * directly without tranlation, we catch the bug with a NULL-deference
-+ * kernel oops. Illegal ranges of incoming indices are caught too.
-+ */
-+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-+{
-+	/*
-+	 * this branch gets completely eliminated after inlining,
-+	 * except when someone tries to use fixaddr indices in an
-+	 * illegal way. (such as mixing up address types or using
-+	 * out-of-range indices).
-+	 *
-+	 * If it doesn't get removed, the linker will complain
-+	 * loudly with a reasonably clear error message..
-+	 */
-+	if (idx >= __end_of_fixed_addresses)
-+		__this_fixmap_does_not_exist();
-+
-+        return __fix_to_virt(idx);
-+}
-+
-+static inline unsigned long virt_to_fix(const unsigned long vaddr)
-+{
-+	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
-+	return __virt_to_fix(vaddr);
-+}
-+
-+#endif /* !__ASSEMBLY__ */
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/floppy.h linux-2.6.12-xen/include/asm-xen/asm-i386/floppy.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/floppy.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/floppy.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,147 @@
-+/*
-+ * Architecture specific parts of the Floppy driver
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1995
-+ *
-+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
-+ */
-+#ifndef __ASM_XEN_I386_FLOPPY_H
-+#define __ASM_XEN_I386_FLOPPY_H
-+
-+#include <linux/vmalloc.h>
-+
-+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
-+#include <asm/dma.h>
-+#undef MAX_DMA_ADDRESS
-+#define MAX_DMA_ADDRESS 0
-+#define CROSS_64KB(a,s) (0)
-+
-+#define fd_inb(port)			inb_p(port)
-+#define fd_outb(value,port)		outb_p(value,port)
-+
-+#define fd_request_dma()        (0)
-+#define fd_free_dma()           ((void)0)
-+#define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
-+#define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
-+#define fd_free_irq()		free_irq(FLOPPY_IRQ, NULL)
-+#define fd_get_dma_residue()    (virtual_dma_count + virtual_dma_residue)
-+#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
-+/*
-+ * Do not use vmalloc/vfree: floppy_release_irq_and_dma() gets called from
-+ * softirq context via motor_off_callback. A generic bug we happen to trigger.
-+ */
-+#define fd_dma_mem_alloc(size)	__get_free_pages(GFP_KERNEL, get_order(size))
-+#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
-+
-+static int virtual_dma_count;
-+static int virtual_dma_residue;
-+static char *virtual_dma_addr;
-+static int virtual_dma_mode;
-+static int doing_pdma;
-+
-+static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
-+{
-+	register unsigned char st;
-+	register int lcount;
-+	register char *lptr;
-+
-+	if (!doing_pdma)
-+		return floppy_interrupt(irq, dev_id, regs);
-+
-+	st = 1;
-+	for(lcount=virtual_dma_count, lptr=virtual_dma_addr; 
-+	    lcount; lcount--, lptr++) {
-+		st=inb(virtual_dma_port+4) & 0xa0 ;
-+		if(st != 0xa0) 
-+			break;
-+		if(virtual_dma_mode)
-+			outb_p(*lptr, virtual_dma_port+5);
-+		else
-+			*lptr = inb_p(virtual_dma_port+5);
-+	}
-+	virtual_dma_count = lcount;
-+	virtual_dma_addr = lptr;
-+	st = inb(virtual_dma_port+4);
-+
-+	if(st == 0x20)
-+		return IRQ_HANDLED;
-+	if(!(st & 0x20)) {
-+		virtual_dma_residue += virtual_dma_count;
-+		virtual_dma_count=0;
-+		doing_pdma = 0;
-+		floppy_interrupt(irq, dev_id, regs);
-+		return IRQ_HANDLED;
-+	}
-+	return IRQ_HANDLED;
-+}
-+
-+static void fd_disable_dma(void)
-+{
-+	doing_pdma = 0;
-+	virtual_dma_residue += virtual_dma_count;
-+	virtual_dma_count=0;
-+}
-+
-+static int fd_request_irq(void)
-+{
-+	return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
-+					   "floppy", NULL);
-+}
-+
-+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
-+{
-+	doing_pdma = 1;
-+	virtual_dma_port = io;
-+	virtual_dma_mode = (mode  == DMA_MODE_WRITE);
-+	virtual_dma_addr = addr;
-+	virtual_dma_count = size;
-+	virtual_dma_residue = 0;
-+	return 0;
-+}
-+
-+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
-+#define FDC1 xen_floppy_init()
-+static int FDC2 = -1;
-+
-+static int xen_floppy_init(void)
-+{
-+	use_virtual_dma = 1;
-+	can_use_virtual_dma = 1;
-+	return 0x3f0;
-+}
-+
-+/*
-+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
-+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
-+ * coincides with another rtc CMOS user.		Paul G.
-+ */
-+#define FLOPPY0_TYPE	({				\
-+	unsigned long flags;				\
-+	unsigned char val;				\
-+	spin_lock_irqsave(&rtc_lock, flags);		\
-+	val = (CMOS_READ(0x10) >> 4) & 15;		\
-+	spin_unlock_irqrestore(&rtc_lock, flags);	\
-+	val;						\
-+})
-+
-+#define FLOPPY1_TYPE	({				\
-+	unsigned long flags;				\
-+	unsigned char val;				\
-+	spin_lock_irqsave(&rtc_lock, flags);		\
-+	val = CMOS_READ(0x10) & 15;			\
-+	spin_unlock_irqrestore(&rtc_lock, flags);	\
-+	val;						\
-+})
-+
-+#define N_FDC 2
-+#define N_DRIVE 8
-+
-+#define FLOPPY_MOTOR_MASK 0xf0
-+
-+#define EXTRA_FLOPPY_PARAMS
-+
-+#endif /* __ASM_XEN_I386_FLOPPY_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/highmem.h linux-2.6.12-xen/include/asm-xen/asm-i386/highmem.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/highmem.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/highmem.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,80 @@
-+/*
-+ * highmem.h: virtual kernel memory mappings for high memory
-+ *
-+ * Used in CONFIG_HIGHMEM systems for memory pages which
-+ * are not addressable by direct kernel virtual addresses.
-+ *
-+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
-+ *		      Gerhard.Wichert at pdb.siemens.de
-+ *
-+ *
-+ * Redesigned the x86 32-bit VM architecture to deal with 
-+ * up to 16 Terabyte physical memory. With current x86 CPUs
-+ * we now support up to 64 Gigabytes physical RAM.
-+ *
-+ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-+ */
-+
-+#ifndef _ASM_HIGHMEM_H
-+#define _ASM_HIGHMEM_H
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/config.h>
-+#include <linux/interrupt.h>
-+#include <linux/threads.h>
-+#include <asm/kmap_types.h>
-+#include <asm/tlbflush.h>
-+
-+/* declarations for highmem.c */
-+extern unsigned long highstart_pfn, highend_pfn;
-+
-+extern pte_t *kmap_pte;
-+extern pgprot_t kmap_prot;
-+extern pte_t *pkmap_page_table;
-+
-+/*
-+ * Right now we initialize only a single pte table. It can be extended
-+ * easily, subsequent pte tables have to be allocated in one physical
-+ * chunk of RAM.
-+ */
-+#ifdef CONFIG_X86_PAE
-+#define LAST_PKMAP 512
-+#else
-+#define LAST_PKMAP 1024
-+#endif
-+/*
-+ * Ordering is:
-+ *
-+ * FIXADDR_TOP
-+ * 			fixed_addresses
-+ * FIXADDR_START
-+ * 			temp fixed addresses
-+ * FIXADDR_BOOT_START
-+ * 			Persistent kmap area
-+ * PKMAP_BASE
-+ * VMALLOC_END
-+ * 			Vmalloc area
-+ * VMALLOC_START
-+ * high_memory
-+ */
-+#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
-+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-+#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-+#define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-+
-+extern void * FASTCALL(kmap_high(struct page *page));
-+extern void FASTCALL(kunmap_high(struct page *page));
-+
-+void *kmap(struct page *page);
-+void kunmap(struct page *page);
-+void *kmap_atomic(struct page *page, enum km_type type);
-+void *kmap_atomic_pte(struct page *page, enum km_type type);
-+void kunmap_atomic(void *kvaddr, enum km_type type);
-+struct page *kmap_atomic_to_page(void *ptr);
-+
-+#define flush_cache_kmaps()	do { } while (0)
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* _ASM_HIGHMEM_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/hw_irq.h linux-2.6.12-xen/include/asm-xen/asm-i386/hw_irq.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/hw_irq.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/hw_irq.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,71 @@
-+#ifndef _ASM_HW_IRQ_H
-+#define _ASM_HW_IRQ_H
-+
-+/*
-+ *	linux/include/asm/hw_irq.h
-+ *
-+ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
-+ *
-+ *	moved some of the old arch/i386/kernel/irq.h to here. VY
-+ *
-+ *	IRQ/IPI changes taken from work by Thomas Radke
-+ *	<tomsoft at informatik.tu-chemnitz.de>
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/profile.h>
-+#include <asm/atomic.h>
-+#include <asm/irq.h>
-+#include <asm/sections.h>
-+
-+/*
-+ * Various low-level irq details needed by irq.c, process.c,
-+ * time.c, io_apic.c and smp.c
-+ *
-+ * Interrupt entry/exit code at both C and assembly level
-+ */
-+
-+extern u8 irq_vector[NR_IRQ_VECTORS];
-+#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
-+#define AUTO_ASSIGN		-1
-+
-+extern void (*interrupt[NR_IRQS])(void);
-+
-+#ifdef CONFIG_SMP
-+fastcall void reschedule_interrupt(void);
-+fastcall void invalidate_interrupt(void);
-+fastcall void call_function_interrupt(void);
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+fastcall void apic_timer_interrupt(void);
-+fastcall void error_interrupt(void);
-+fastcall void spurious_interrupt(void);
-+fastcall void thermal_interrupt(struct pt_regs *);
-+#define platform_legacy_irq(irq)	((irq) < 16)
-+#endif
-+
-+void disable_8259A_irq(unsigned int irq);
-+void enable_8259A_irq(unsigned int irq);
-+int i8259A_irq_pending(unsigned int irq);
-+void make_8259A_irq(unsigned int irq);
-+void init_8259A(int aeoi);
-+void FASTCALL(send_IPI_self(int vector));
-+void init_VISWS_APIC_irqs(void);
-+void setup_IO_APIC(void);
-+void disable_IO_APIC(void);
-+void print_IO_APIC(void);
-+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-+void send_IPI(int dest, int vector);
-+void setup_ioapic_dest(void);
-+
-+extern unsigned long io_apic_irqs;
-+
-+extern atomic_t irq_err_count;
-+extern atomic_t irq_mis_count;
-+
-+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-+
-+extern void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i);
-+
-+#endif /* _ASM_HW_IRQ_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/hypercall.h linux-2.6.12-xen/include/asm-xen/asm-i386/hypercall.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/hypercall.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/hypercall.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,323 @@
-+/******************************************************************************
-+ * hypercall.h
-+ * 
-+ * Linux-specific hypervisor handling.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERCALL_H__
-+#define __HYPERCALL_H__
-+
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/xen-public/sched.h>
-+#include <asm-xen/xen-public/nmi.h>
-+
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
-+
-+#define _hypercall0(type, name)			\
-+({						\
-+	long __res;				\
-+	asm volatile (				\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res)			\
-+		:				\
-+		: "memory" );			\
-+	(type)__res;				\
-+})
-+
-+#define _hypercall1(type, name, a1)				\
-+({								\
-+	long __res, __ign1;					\
-+	asm volatile (						\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res), "=b" (__ign1)			\
-+		: "1" ((long)(a1))				\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
-+
-+#define _hypercall2(type, name, a1, a2)				\
-+({								\
-+	long __res, __ign1, __ign2;				\
-+	asm volatile (						\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2)	\
-+		: "1" ((long)(a1)), "2" ((long)(a2))		\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
-+
-+#define _hypercall3(type, name, a1, a2, a3)			\
-+({								\
-+	long __res, __ign1, __ign2, __ign3;			\
-+	asm volatile (						\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2), 	\
-+		"=d" (__ign3)					\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3))				\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
-+
-+#define _hypercall4(type, name, a1, a2, a3, a4)			\
-+({								\
-+	long __res, __ign1, __ign2, __ign3, __ign4;		\
-+	asm volatile (						\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
-+		"=d" (__ign3), "=S" (__ign4)			\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3)), "4" ((long)(a4))		\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
-+
-+#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
-+({								\
-+	long __res, __ign1, __ign2, __ign3, __ign4, __ign5;	\
-+	asm volatile (						\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res), "=b" (__ign1), "=c" (__ign2),	\
-+		"=d" (__ign3), "=S" (__ign4), "=D" (__ign5)	\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3)), "4" ((long)(a4)),		\
-+		"5" ((long)(a5))				\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
-+
-+static inline int
-+HYPERVISOR_set_trap_table(
-+	trap_info_t *table)
-+{
-+	return _hypercall1(int, set_trap_table, table);
-+}
-+
-+static inline int
-+HYPERVISOR_mmu_update(
-+	mmu_update_t *req, int count, int *success_count, domid_t domid)
-+{
-+	return _hypercall4(int, mmu_update, req, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_mmuext_op(
-+	struct mmuext_op *op, int count, int *success_count, domid_t domid)
-+{
-+	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_set_gdt(
-+	unsigned long *frame_list, int entries)
-+{
-+	return _hypercall2(int, set_gdt, frame_list, entries);
-+}
-+
-+static inline int
-+HYPERVISOR_stack_switch(
-+	unsigned long ss, unsigned long esp)
-+{
-+	return _hypercall2(int, stack_switch, ss, esp);
-+}
-+
-+static inline int
-+HYPERVISOR_set_callbacks(
-+	unsigned long event_selector, unsigned long event_address,
-+	unsigned long failsafe_selector, unsigned long failsafe_address)
-+{
-+	return _hypercall4(int, set_callbacks,
-+			   event_selector, event_address,
-+			   failsafe_selector, failsafe_address);
-+}
-+
-+static inline int
-+HYPERVISOR_fpu_taskswitch(
-+	int set)
-+{
-+	return _hypercall1(int, fpu_taskswitch, set);
-+}
-+
-+static inline int
-+HYPERVISOR_sched_op(
-+	int cmd, unsigned long arg)
-+{
-+	return _hypercall2(int, sched_op, cmd, arg);
-+}
-+
-+static inline long
-+HYPERVISOR_set_timer_op(
-+	u64 timeout)
-+{
-+	unsigned long timeout_hi = (unsigned long)(timeout>>32);
-+	unsigned long timeout_lo = (unsigned long)timeout;
-+	return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
-+}
-+
-+static inline int
-+HYPERVISOR_dom0_op(
-+	dom0_op_t *dom0_op)
-+{
-+	dom0_op->interface_version = DOM0_INTERFACE_VERSION;
-+	return _hypercall1(int, dom0_op, dom0_op);
-+}
-+
-+static inline int
-+HYPERVISOR_set_debugreg(
-+	int reg, unsigned long value)
-+{
-+	return _hypercall2(int, set_debugreg, reg, value);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_get_debugreg(
-+	int reg)
-+{
-+	return _hypercall1(unsigned long, get_debugreg, reg);
-+}
-+
-+static inline int
-+HYPERVISOR_update_descriptor(
-+	u64 ma, u64 desc)
-+{
-+	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
-+}
-+
-+static inline int
-+HYPERVISOR_memory_op(
-+	unsigned int cmd, void *arg)
-+{
-+	return _hypercall2(int, memory_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_multicall(
-+	void *call_list, int nr_calls)
-+{
-+	return _hypercall2(int, multicall, call_list, nr_calls);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping(
-+	unsigned long va, pte_t new_val, unsigned long flags)
-+{
-+	unsigned long pte_hi = 0;
-+#ifdef CONFIG_X86_PAE
-+	pte_hi = new_val.pte_high;
-+#endif
-+	return _hypercall4(int, update_va_mapping, va,
-+			   new_val.pte_low, pte_hi, flags);
-+}
-+
-+static inline int
-+HYPERVISOR_event_channel_op(
-+	void *op)
-+{
-+	return _hypercall1(int, event_channel_op, op);
-+}
-+
-+static inline int
-+HYPERVISOR_xen_version(
-+	int cmd, void *arg)
-+{
-+	return _hypercall2(int, xen_version, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_console_io(
-+	int cmd, int count, char *str)
-+{
-+	return _hypercall3(int, console_io, cmd, count, str);
-+}
-+
-+static inline int
-+HYPERVISOR_physdev_op(
-+	void *physdev_op)
-+{
-+	return _hypercall1(int, physdev_op, physdev_op);
-+}
-+
-+static inline int
-+HYPERVISOR_grant_table_op(
-+	unsigned int cmd, void *uop, unsigned int count)
-+{
-+	return _hypercall3(int, grant_table_op, cmd, uop, count);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping_otherdomain(
-+	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+	unsigned long pte_hi = 0;
-+#ifdef CONFIG_X86_PAE
-+	pte_hi = new_val.pte_high;
-+#endif
-+	return _hypercall5(int, update_va_mapping_otherdomain, va,
-+			   new_val.pte_low, pte_hi, flags, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_vm_assist(
-+	unsigned int cmd, unsigned int type)
-+{
-+	return _hypercall2(int, vm_assist, cmd, type);
-+}
-+
-+static inline int
-+HYPERVISOR_vcpu_op(
-+	int cmd, int vcpuid, void *extra_args)
-+{
-+	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-+}
-+
-+static inline int
-+HYPERVISOR_suspend(
-+	unsigned long srec)
-+{
-+	return _hypercall3(int, sched_op, SCHEDOP_shutdown,
-+			   SHUTDOWN_suspend, srec);
-+}
-+
-+static inline int
-+HYPERVISOR_nmi_op(
-+	unsigned long op,
-+	unsigned long arg)
-+{
-+	return _hypercall2(int, nmi_op, op, arg);
-+}
-+
-+#endif /* __HYPERCALL_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/hypervisor.h linux-2.6.12-xen/include/asm-xen/asm-i386/hypervisor.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/hypervisor.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,175 @@
-+/******************************************************************************
-+ * hypervisor.h
-+ * 
-+ * Linux-specific hypervisor handling.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERVISOR_H__
-+#define __HYPERVISOR_H__
-+
-+#include <linux/config.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/version.h>
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/xen-public/dom0_ops.h>
-+#include <asm/ptrace.h>
-+#include <asm/page.h>
-+#if defined(__i386__)
-+#  ifdef CONFIG_X86_PAE
-+#   include <asm-generic/pgtable-nopud.h>
-+#  else
-+#   include <asm-generic/pgtable-nopmd.h>
-+#  endif
-+#endif
-+
-+extern shared_info_t *HYPERVISOR_shared_info;
-+
-+/* arch/xen/i386/kernel/setup.c */
-+extern start_info_t *xen_start_info;
-+
-+/* arch/xen/kernel/evtchn.c */
-+/* Force a proper event-channel callback from Xen. */
-+void force_evtchn_callback(void);
-+
-+/* arch/xen/kernel/process.c */
-+void xen_cpu_idle (void);
-+
-+/* arch/xen/i386/kernel/hypervisor.c */
-+void do_hypervisor_callback(struct pt_regs *regs);
-+
-+/* arch/xen/i386/kernel/head.S */
-+void lgdt_finish(void);
-+
-+/* arch/xen/i386/mm/hypervisor.c */
-+/*
-+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
-+ * be MACHINE addresses.
-+ */
-+
-+void xen_pt_switch(unsigned long ptr);
-+void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
-+void xen_load_gs(unsigned int selector); /* x86_64 only */
-+void xen_tlb_flush(void);
-+void xen_invlpg(unsigned long ptr);
-+
-+#ifndef CONFIG_XEN_SHADOW_MODE
-+void xen_l1_entry_update(pte_t *ptr, pte_t val);
-+void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
-+void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
-+void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
-+void xen_pgd_pin(unsigned long ptr);
-+void xen_pgd_unpin(unsigned long ptr);
-+void xen_pud_pin(unsigned long ptr); /* x86_64 only */
-+void xen_pud_unpin(unsigned long ptr); /* x86_64 only */
-+void xen_pmd_pin(unsigned long ptr); /* x86_64 only */
-+void xen_pmd_unpin(unsigned long ptr); /* x86_64 only */
-+void xen_pte_pin(unsigned long ptr);
-+void xen_pte_unpin(unsigned long ptr);
-+#else
-+#define xen_l1_entry_update(_p, _v) set_pte((_p), (_v))
-+#define xen_l2_entry_update(_p, _v) set_pgd((_p), (_v))
-+#define xen_pgd_pin(_p)   ((void)0)
-+#define xen_pgd_unpin(_p) ((void)0)
-+#define xen_pte_pin(_p)   ((void)0)
-+#define xen_pte_unpin(_p) ((void)0)
-+#endif
-+
-+void xen_set_ldt(unsigned long ptr, unsigned long bytes);
-+void xen_machphys_update(unsigned long mfn, unsigned long pfn);
-+
-+#ifdef CONFIG_SMP
-+#include <linux/cpumask.h>
-+void xen_tlb_flush_all(void);
-+void xen_invlpg_all(unsigned long ptr);
-+void xen_tlb_flush_mask(cpumask_t *mask);
-+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
-+#endif
-+
-+/* Returns zero on success else negative errno. */
-+int xen_create_contiguous_region(
-+    unsigned long vstart, unsigned int order, unsigned int address_bits);
-+void xen_destroy_contiguous_region(
-+    unsigned long vstart, unsigned int order);
-+
-+#include <asm/hypercall.h>
-+
-+#if defined(CONFIG_X86_64)
-+#define MULTI_UVMFLAGS_INDEX 2
-+#define MULTI_UVMDOMID_INDEX 3
-+#else
-+#define MULTI_UVMFLAGS_INDEX 3
-+#define MULTI_UVMDOMID_INDEX 4
-+#endif
-+
-+#define xen_init()	(0)
-+
-+static inline void
-+MULTI_update_va_mapping(
-+    multicall_entry_t *mcl, unsigned long va,
-+    pte_t new_val, unsigned long flags)
-+{
-+    mcl->op = __HYPERVISOR_update_va_mapping;
-+    mcl->args[0] = va;
-+#if defined(CONFIG_X86_64)
-+    mcl->args[1] = new_val.pte;
-+    mcl->args[2] = flags;
-+#elif defined(CONFIG_X86_PAE)
-+    mcl->args[1] = new_val.pte_low;
-+    mcl->args[2] = new_val.pte_high;
-+    mcl->args[3] = flags;
-+#else
-+    mcl->args[1] = new_val.pte_low;
-+    mcl->args[2] = 0;
-+    mcl->args[3] = flags;
-+#endif
-+}
-+
-+static inline void
-+MULTI_update_va_mapping_otherdomain(
-+    multicall_entry_t *mcl, unsigned long va,
-+    pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+    mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
-+    mcl->args[0] = va;
-+#if defined(CONFIG_X86_64)
-+    mcl->args[1] = new_val.pte;
-+    mcl->args[2] = flags;
-+    mcl->args[3] = domid;
-+#elif defined(CONFIG_X86_PAE)
-+    mcl->args[1] = new_val.pte_low;
-+    mcl->args[2] = new_val.pte_high;
-+    mcl->args[3] = flags;
-+    mcl->args[4] = domid;
-+#else
-+    mcl->args[1] = new_val.pte_low;
-+    mcl->args[2] = 0;
-+    mcl->args[3] = flags;
-+    mcl->args[4] = domid;
-+#endif
-+}
-+
-+#endif /* __HYPERVISOR_H__ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/io.h linux-2.6.12-xen/include/asm-xen/asm-i386/io.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/io.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/io.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,400 @@
-+#ifndef _ASM_IO_H
-+#define _ASM_IO_H
-+
-+#include <linux/config.h>
-+#include <linux/string.h>
-+#include <linux/compiler.h>
-+
-+/*
-+ * This file contains the definitions for the x86 IO instructions
-+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
-+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
-+ * versions of the single-IO instructions (inb_p/inw_p/..).
-+ *
-+ * This file is not meant to be obfuscating: it's just complicated
-+ * to (a) handle it all in a way that makes gcc able to optimize it
-+ * as well as possible and (b) trying to avoid writing the same thing
-+ * over and over again with slight variations and possibly making a
-+ * mistake somewhere.
-+ */
-+
-+/*
-+ * Thanks to James van Artsdalen for a better timing-fix than
-+ * the two short jumps: using outb's to a nonexistent port seems
-+ * to guarantee better timings even on fast machines.
-+ *
-+ * On the other hand, I'd like to be sure of a non-existent port:
-+ * I feel a bit unsafe about using 0x80 (should be safe, though)
-+ *
-+ *		Linus
-+ */
-+
-+ /*
-+  *  Bit simplified and optimized by Jan Hubicka
-+  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
-+  *
-+  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
-+  *  isa_read[wl] and isa_write[wl] fixed
-+  *  - Arnaldo Carvalho de Melo <acme at conectiva.com.br>
-+  */
-+
-+#define IO_SPACE_LIMIT 0xffff
-+
-+#define XQUAD_PORTIO_BASE 0xfe400000
-+#define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
-+
-+#ifdef __KERNEL__
-+
-+#include <asm-generic/iomap.h>
-+
-+#include <linux/vmalloc.h>
-+#include <asm/fixmap.h>
-+
-+/*
-+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
-+ * access
-+ */
-+#define xlate_dev_mem_ptr(p)	__va(p)
-+
-+/*
-+ * Convert a virtual cached pointer to an uncached pointer
-+ */
-+#define xlate_dev_kmem_ptr(p)	p
-+
-+/**
-+ *	virt_to_phys	-	map virtual addresses to physical
-+ *	@address: address to remap
-+ *
-+ *	The returned physical address is the physical (CPU) mapping for
-+ *	the memory address given. It is only valid to use this function on
-+ *	addresses directly mapped or allocated via kmalloc. 
-+ *
-+ *	This function does not give bus mappings for DMA transfers. In
-+ *	almost all conceivable cases a device driver should not be using
-+ *	this function
-+ */
-+ 
-+static inline unsigned long virt_to_phys(volatile void * address)
-+{
-+	return __pa(address);
-+}
-+
-+/**
-+ *	phys_to_virt	-	map physical address to virtual
-+ *	@address: address to remap
-+ *
-+ *	The returned virtual address is a current CPU mapping for
-+ *	the memory address given. It is only valid to use this function on
-+ *	addresses that have a kernel mapping
-+ *
-+ *	This function does not handle bus mappings for DMA transfers. In
-+ *	almost all conceivable cases a device driver should not be using
-+ *	this function
-+ */
-+
-+static inline void * phys_to_virt(unsigned long address)
-+{
-+	return __va(address);
-+}
-+
-+/*
-+ * Change "struct page" to physical address.
-+ */
-+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+#define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
-+
-+#define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
-+				  (unsigned long) bio_offset((bio)))
-+#define bvec_to_pseudophys(bv)	 (page_to_pseudophys((bv)->bv_page) + \
-+				  (unsigned long) (bv)->bv_offset)
-+
-+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
-+	(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-+	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
-+	  bvec_to_pseudophys((vec2))))
-+
-+extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-+
-+/**
-+ * ioremap     -   map bus memory into CPU space
-+ * @offset:    bus address of the memory
-+ * @size:      size of the resource to map
-+ *
-+ * ioremap performs a platform specific sequence of operations to
-+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
-+ * writew/writel functions and the other mmio helpers. The returned
-+ * address is not guaranteed to be usable directly as a virtual
-+ * address. 
-+ */
-+
-+static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
-+{
-+	return __ioremap(offset, size, 0);
-+}
-+
-+extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size);
-+extern void iounmap(volatile void __iomem *addr);
-+
-+/*
-+ * bt_ioremap() and bt_iounmap() are for temporary early boot-time
-+ * mappings, before the real ioremap() is functional.
-+ * A boot-time mapping is currently limited to at most 16 pages.
-+ */
-+extern void *bt_ioremap(unsigned long offset, unsigned long size);
-+extern void bt_iounmap(void *addr, unsigned long size);
-+
-+/*
-+ * ISA I/O bus memory addresses are 1:1 with the physical address.
-+ */
-+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
-+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
-+#else
-+#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
-+#endif
-+
-+/*
-+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
-+ * are forbidden in portable PCI drivers.
-+ *
-+ * Allow them on x86 for legacy drivers, though.
-+ */
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
-+
-+/*
-+ * readX/writeX() are used to access memory mapped devices. On some
-+ * architectures the memory mapped IO stuff needs to be accessed
-+ * differently. On the x86 architecture, we just read/write the
-+ * memory location directly.
-+ */
-+
-+static inline unsigned char readb(const volatile void __iomem *addr)
-+{
-+	return *(volatile unsigned char __force *) addr;
-+}
-+static inline unsigned short readw(const volatile void __iomem *addr)
-+{
-+	return *(volatile unsigned short __force *) addr;
-+}
-+static inline unsigned int readl(const volatile void __iomem *addr)
-+{
-+	return *(volatile unsigned int __force *) addr;
-+}
-+#define readb_relaxed(addr) readb(addr)
-+#define readw_relaxed(addr) readw(addr)
-+#define readl_relaxed(addr) readl(addr)
-+#define __raw_readb readb
-+#define __raw_readw readw
-+#define __raw_readl readl
-+
-+static inline void writeb(unsigned char b, volatile void __iomem *addr)
-+{
-+	*(volatile unsigned char __force *) addr = b;
-+}
-+static inline void writew(unsigned short b, volatile void __iomem *addr)
-+{
-+	*(volatile unsigned short __force *) addr = b;
-+}
-+static inline void writel(unsigned int b, volatile void __iomem *addr)
-+{
-+	*(volatile unsigned int __force *) addr = b;
-+}
-+#define __raw_writeb writeb
-+#define __raw_writew writew
-+#define __raw_writel writel
-+
-+#define mmiowb()
-+
-+static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
-+{
-+	memset((void __force *) addr, val, count);
-+}
-+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
-+{
-+	__memcpy(dst, (void __force *) src, count);
-+}
-+static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
-+{
-+	__memcpy((void __force *) dst, src, count);
-+}
-+
-+/*
-+ * ISA space is 'always mapped' on a typical x86 system, no need to
-+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
-+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
-+ * are physical addresses. The following constant pointer can be
-+ * used as the IO-area pointer (it can be iounmapped as well, so the
-+ * analogy with PCI is quite large):
-+ */
-+#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
-+
-+#define isa_readb(a) readb(__ISA_IO_base + (a))
-+#define isa_readw(a) readw(__ISA_IO_base + (a))
-+#define isa_readl(a) readl(__ISA_IO_base + (a))
-+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-+#define isa_memset_io(a,b,c)		memset_io(__ISA_IO_base + (a),(b),(c))
-+#define isa_memcpy_fromio(a,b,c)	memcpy_fromio((a),__ISA_IO_base + (b),(c))
-+#define isa_memcpy_toio(a,b,c)		memcpy_toio(__ISA_IO_base + (a),(b),(c))
-+
-+
-+/*
-+ * Again, i386 does not require mem IO specific function.
-+ */
-+
-+#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void __force *)(b),(c),(d))
-+#define isa_eth_io_copy_and_sum(a,b,c,d)	eth_copy_and_sum((a),(void __force *)(__ISA_IO_base + (b)),(c),(d))
-+
-+/**
-+ *	check_signature		-	find BIOS signatures
-+ *	@io_addr: mmio address to check 
-+ *	@signature:  signature block
-+ *	@length: length of signature
-+ *
-+ *	Perform a signature comparison with the mmio address io_addr. This
-+ *	address should have been obtained by ioremap.
-+ *	Returns 1 on a match.
-+ */
-+ 
-+static inline int check_signature(volatile void __iomem * io_addr,
-+	const unsigned char *signature, int length)
-+{
-+	int retval = 0;
-+	do {
-+		if (readb(io_addr) != *signature)
-+			goto out;
-+		io_addr++;
-+		signature++;
-+		length--;
-+	} while (length);
-+	retval = 1;
-+out:
-+	return retval;
-+}
-+
-+/*
-+ *	Cache management
-+ *
-+ *	This needed for two cases
-+ *	1. Out of order aware processors
-+ *	2. Accidentally out of order processors (PPro errata #51)
-+ */
-+ 
-+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
-+
-+static inline void flush_write_buffers(void)
-+{
-+	__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
-+}
-+
-+#define dma_cache_inv(_start,_size)		flush_write_buffers()
-+#define dma_cache_wback(_start,_size)		flush_write_buffers()
-+#define dma_cache_wback_inv(_start,_size)	flush_write_buffers()
-+
-+#else
-+
-+/* Nothing to do */
-+
-+#define dma_cache_inv(_start,_size)		do { } while (0)
-+#define dma_cache_wback(_start,_size)		do { } while (0)
-+#define dma_cache_wback_inv(_start,_size)	do { } while (0)
-+#define flush_write_buffers()
-+
-+#endif
-+
-+#endif /* __KERNEL__ */
-+
-+#ifdef SLOW_IO_BY_JUMPING
-+#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
-+#else
-+#define __SLOW_DOWN_IO "outb %%al,$0x80;"
-+#endif
-+
-+static inline void slow_down_io(void) {
-+	__asm__ __volatile__(
-+		__SLOW_DOWN_IO
-+#ifdef REALLY_SLOW_IO
-+		__SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-+#endif
-+		: : );
-+}
-+
-+#ifdef CONFIG_X86_NUMAQ
-+extern void *xquad_portio;    /* Where the IO area was mapped */
-+#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
-+#define __BUILDIO(bwl,bw,type) \
-+static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
-+	if (xquad_portio) \
-+		write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
-+	else \
-+		out##bwl##_local(value, port); \
-+} \
-+static inline void out##bwl(unsigned type value, int port) { \
-+	out##bwl##_quad(value, port, 0); \
-+} \
-+static inline unsigned type in##bwl##_quad(int port, int quad) { \
-+	if (xquad_portio) \
-+		return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
-+	else \
-+		return in##bwl##_local(port); \
-+} \
-+static inline unsigned type in##bwl(int port) { \
-+	return in##bwl##_quad(port, 0); \
-+}
-+#else
-+#define __BUILDIO(bwl,bw,type) \
-+static inline void out##bwl(unsigned type value, int port) { \
-+	out##bwl##_local(value, port); \
-+} \
-+static inline unsigned type in##bwl(int port) { \
-+	return in##bwl##_local(port); \
-+}
-+#endif
-+
-+
-+#define BUILDIO(bwl,bw,type) \
-+static inline void out##bwl##_local(unsigned type value, int port) { \
-+	__asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \
-+} \
-+static inline unsigned type in##bwl##_local(int port) { \
-+	unsigned type value; \
-+	__asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \
-+	return value; \
-+} \
-+static inline void out##bwl##_local_p(unsigned type value, int port) { \
-+	out##bwl##_local(value, port); \
-+	slow_down_io(); \
-+} \
-+static inline unsigned type in##bwl##_local_p(int port) { \
-+	unsigned type value = in##bwl##_local(port); \
-+	slow_down_io(); \
-+	return value; \
-+} \
-+__BUILDIO(bwl,bw,type) \
-+static inline void out##bwl##_p(unsigned type value, int port) { \
-+	out##bwl(value, port); \
-+	slow_down_io(); \
-+} \
-+static inline unsigned type in##bwl##_p(int port) { \
-+	unsigned type value = in##bwl(port); \
-+	slow_down_io(); \
-+	return value; \
-+} \
-+static inline void outs##bwl(int port, const void *addr, unsigned long count) { \
-+	__asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \
-+} \
-+static inline void ins##bwl(int port, void *addr, unsigned long count) { \
-+	__asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \
-+}
-+
-+BUILDIO(b,b,char)
-+BUILDIO(w,w,short)
-+BUILDIO(l,,int)
-+
-+/* We will be supplying our own /dev/mem implementation */
-+#define ARCH_HAS_DEV_MEM
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/kmap_types.h linux-2.6.12-xen/include/asm-xen/asm-i386/kmap_types.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/kmap_types.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/kmap_types.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,32 @@
-+#ifndef _ASM_KMAP_TYPES_H
-+#define _ASM_KMAP_TYPES_H
-+
-+#include <linux/config.h>
-+
-+#ifdef CONFIG_DEBUG_HIGHMEM
-+# define D(n) __KM_FENCE_##n ,
-+#else
-+# define D(n)
-+#endif
-+
-+enum km_type {
-+D(0)	KM_BOUNCE_READ,
-+D(1)	KM_SKB_SUNRPC_DATA,
-+D(2)	KM_SKB_DATA_SOFTIRQ,
-+D(3)	KM_USER0,
-+D(4)	KM_USER1,
-+D(5)	KM_BIO_SRC_IRQ,
-+D(6)	KM_BIO_DST_IRQ,
-+D(7)	KM_PTE0,
-+D(8)	KM_PTE1,
-+D(9)	KM_IRQ0,
-+D(10)	KM_IRQ1,
-+D(11)	KM_SOFTIRQ0,
-+D(12)	KM_SOFTIRQ1,
-+D(13)	KM_SWIOTLB,
-+D(14)	KM_TYPE_NR
-+};
-+
-+#undef D
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/irq_vectors.h linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/irq_vectors.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/irq_vectors.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,125 @@
-+/*
-+ * This file should contain #defines for all of the interrupt vector
-+ * numbers used by this architecture.
-+ *
-+ * In addition, there are some standard defines:
-+ *
-+ *	FIRST_EXTERNAL_VECTOR:
-+ *		The first free place for external interrupts
-+ *
-+ *	SYSCALL_VECTOR:
-+ *		The IRQ vector a syscall makes the user to kernel transition
-+ *		under.
-+ *
-+ *	TIMER_IRQ:
-+ *		The IRQ number the timer interrupt comes in at.
-+ *
-+ *	NR_IRQS:
-+ *		The total number of interrupt vectors (including all the
-+ *		architecture specific interrupts) needed.
-+ *
-+ */			
-+#ifndef _ASM_IRQ_VECTORS_H
-+#define _ASM_IRQ_VECTORS_H
-+
-+/*
-+ * IDT vectors usable for external interrupt sources start
-+ * at 0x20:
-+ */
-+#define FIRST_EXTERNAL_VECTOR	0x20
-+
-+#define SYSCALL_VECTOR		0x80
-+
-+/*
-+ * Vectors 0x20-0x2f are used for ISA interrupts.
-+ */
-+
-+#if 0
-+/*
-+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
-+ *
-+ *  some of the following vectors are 'rare', they are merged
-+ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
-+ *  TLB, reschedule and local APIC vectors are performance-critical.
-+ *
-+ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
-+ */
-+#define SPURIOUS_APIC_VECTOR	0xff
-+#define ERROR_APIC_VECTOR	0xfe
-+#define INVALIDATE_TLB_VECTOR	0xfd
-+#define RESCHEDULE_VECTOR	0xfc
-+#define CALL_FUNCTION_VECTOR	0xfb
-+
-+#define THERMAL_APIC_VECTOR	0xf0
-+/*
-+ * Local APIC timer IRQ vector is on a different priority level,
-+ * to work around the 'lost local interrupt if more than 2 IRQ
-+ * sources per level' errata.
-+ */
-+#define LOCAL_TIMER_VECTOR	0xef
-+#endif
-+
-+#define SPURIOUS_APIC_VECTOR	0xff
-+#define ERROR_APIC_VECTOR	0xfe
-+
-+/*
-+ * First APIC vector available to drivers: (vectors 0x30-0xee)
-+ * we start at 0x31 to spread out vectors evenly between priority
-+ * levels. (0x80 is the syscall vector)
-+ */
-+#define FIRST_DEVICE_VECTOR	0x31
-+#define FIRST_SYSTEM_VECTOR	0xef
-+
-+/*
-+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
-+ * Right now the APIC is mostly only used for SMP.
-+ * 256 vectors is an architectural limit. (we can have
-+ * more than 256 devices theoretically, but they will
-+ * have to use shared interrupts)
-+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
-+ * the usable vector space is 0x20-0xff (224 vectors)
-+ */
-+
-+#define RESCHEDULE_VECTOR	0
-+#define CALL_FUNCTION_VECTOR	1
-+#define NR_IPIS			2
-+
-+/*
-+ * The maximum number of vectors supported by i386 processors
-+ * is limited to 256. For processors other than i386, NR_VECTORS
-+ * should be changed accordingly.
-+ */
-+#define NR_VECTORS 256
-+
-+#define FPU_IRQ			13
-+
-+#define	FIRST_VM86_IRQ		3
-+#define LAST_VM86_IRQ		15
-+#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
-+
-+/*
-+ * The flat IRQ space is divided into two regions:
-+ *  1. A one-to-one mapping of real physical IRQs. This space is only used
-+ *     if we have physical device-access privilege. This region is at the 
-+ *     start of the IRQ space so that existing device drivers do not need
-+ *     to be modified to translate physical IRQ numbers into our IRQ space.
-+ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
-+ *     are bound using the provided bind/unbind functions.
-+ */
-+
-+#define PIRQ_BASE		0
-+#define NR_PIRQS		256
-+
-+#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
-+#define NR_DYNIRQS		256
-+
-+#define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
-+#define NR_IRQ_VECTORS		NR_IRQS
-+
-+#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
-+#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
-+
-+#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
-+#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
-+
-+#endif /* _ASM_IRQ_VECTORS_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/mach_traps.h linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/mach_traps.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/mach_traps.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/mach_traps.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,33 @@
-+/*
-+ *  include/asm-xen/asm-i386/mach-xen/mach_traps.h
-+ *
-+ *  Machine specific NMI handling for Xen
-+ */
-+#ifndef _MACH_TRAPS_H
-+#define _MACH_TRAPS_H
-+
-+#include <linux/bitops.h>
-+#include <asm-xen/xen-public/nmi.h>
-+
-+static inline void clear_mem_error(unsigned char reason) {}
-+static inline void clear_io_check_error(unsigned char reason) {}
-+
-+static inline unsigned char get_nmi_reason(void)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	unsigned char reason = 0;
-+
-+	/* construct a value which looks like it came from
-+	 * port 0x61.
-+	 */
-+	if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
-+		reason |= 0x40;
-+	if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
-+		reason |= 0x80;
-+
-+        return reason;
-+}
-+
-+static inline void reassert_nmi(void) {}
-+
-+#endif /* !_MACH_TRAPS_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,43 @@
-+/**
-+ * machine_specific_memory_setup - Hook for machine specific memory setup.
-+ *
-+ * Description:
-+ *	This is included late in kernel/setup.c so that it can make
-+ *	use of all of the static functions.
-+ **/
-+
-+static char * __init machine_specific_memory_setup(void)
-+{
-+	unsigned long max_pfn = xen_start_info->nr_pages;
-+
-+	e820.nr_map = 0;
-+	add_memory_region(0, PFN_PHYS(max_pfn), E820_RAM);
-+
-+	return "Xen";
-+}
-+
-+void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
-+{
-+	clear_bit(X86_FEATURE_VME, c->x86_capability);
-+	clear_bit(X86_FEATURE_DE, c->x86_capability);
-+	clear_bit(X86_FEATURE_PSE, c->x86_capability);
-+	clear_bit(X86_FEATURE_PGE, c->x86_capability);
-+	clear_bit(X86_FEATURE_SEP, c->x86_capability);
-+	if (!(xen_start_info->flags & SIF_PRIVILEGED))
-+		clear_bit(X86_FEATURE_MTRR, c->x86_capability);
-+}
-+
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void nmi(void);
-+
-+static void __init machine_specific_arch_setup(void)
-+{
-+	HYPERVISOR_set_callbacks(
-+	    __KERNEL_CS, (unsigned long)hypervisor_callback,
-+	    __KERNEL_CS, (unsigned long)failsafe_callback);
-+
-+	HYPERVISOR_nmi_op(XENNMI_register_callback, (unsigned long)&nmi);
-+
-+	machine_specific_modify_cpu_capabilities(&boot_cpu_data);
-+}
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,5 @@
-+/* Hook to call BIOS initialisation function */
-+
-+#define ARCH_SETUP machine_specific_arch_setup();
-+
-+static void __init machine_specific_arch_setup(void);
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mmu_context.h linux-2.6.12-xen/include/asm-xen/asm-i386/mmu_context.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/mmu_context.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/mmu_context.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,108 @@
-+#ifndef __I386_SCHED_H
-+#define __I386_SCHED_H
-+
-+#include <linux/config.h>
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/pgalloc.h>
-+#include <asm/tlbflush.h>
-+
-+/*
-+ * Used for LDT copy/destruction.
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-+void destroy_context(struct mm_struct *mm);
-+
-+
-+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-+{
-+#if 0 /* XEN: no lazy tlb */
-+	unsigned cpu = smp_processor_id();
-+	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-+		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
-+#endif
-+}
-+
-+#define prepare_arch_switch(rq,next)	__prepare_arch_switch()
-+#define finish_arch_switch(rq, next)	spin_unlock_irq(&(rq)->lock)
-+#define task_running(rq, p)		((rq)->curr == (p))
-+
-+static inline void __prepare_arch_switch(void)
-+{
-+	/*
-+	 * Save away %fs and %gs. No need to save %es and %ds, as those
-+	 * are always kernel segments while inside the kernel. Must
-+	 * happen before reload of cr3/ldt (i.e., not in __switch_to).
-+	 */
-+	asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
-+		: "=m" (current->thread.fs),
-+		  "=m" (current->thread.gs));
-+	asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
-+		: : "r" (0) );
-+}
-+
-+extern void mm_pin(struct mm_struct *mm);
-+extern void mm_unpin(struct mm_struct *mm);
-+void mm_pin_all(void);
-+
-+static inline void switch_mm(struct mm_struct *prev,
-+			     struct mm_struct *next,
-+			     struct task_struct *tsk)
-+{
-+	int cpu = smp_processor_id();
-+	struct mmuext_op _op[2], *op = _op;
-+
-+	if (likely(prev != next)) {
-+		if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
-+			mm_pin(next);
-+
-+		/* stop flush ipis for the previous mm */
-+		cpu_clear(cpu, prev->cpu_vm_mask);
-+#if 0 /* XEN: no lazy tlb */
-+		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-+		per_cpu(cpu_tlbstate, cpu).active_mm = next;
-+#endif
-+		cpu_set(cpu, next->cpu_vm_mask);
-+
-+		/* Re-load page tables: load_cr3(next->pgd) */
-+		per_cpu(cur_pgd, cpu) = next->pgd;
-+		op->cmd = MMUEXT_NEW_BASEPTR;
-+		op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
-+		op++;
-+
-+		/*
-+		 * load the LDT, if the LDT is different:
-+		 */
-+		if (unlikely(prev->context.ldt != next->context.ldt)) {
-+			/* load_LDT_nolock(&next->context, cpu) */
-+			op->cmd = MMUEXT_SET_LDT;
-+			op->arg1.linear_addr = (unsigned long)next->context.ldt;
-+			op->arg2.nr_ents     = next->context.size;
-+			op++;
-+		}
-+
-+		BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
-+	}
-+#if 0 /* XEN: no lazy tlb */
-+	else {
-+		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-+		BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
-+
-+		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-+			/* We were in lazy tlb mode and leave_mm disabled 
-+			 * tlb flush IPI delivery. We must reload %cr3.
-+			 */
-+			load_cr3(next->pgd);
-+			load_LDT_nolock(&next->context, cpu);
-+		}
-+	}
-+#endif
-+}
-+
-+#define deactivate_mm(tsk, mm) \
-+	asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
-+
-+#define activate_mm(prev, next) \
-+	switch_mm((prev),(next),NULL)
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/mmu.h linux-2.6.12-xen/include/asm-xen/asm-i386/mmu.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/mmu.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/mmu.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,21 @@
-+#ifndef __i386_MMU_H
-+#define __i386_MMU_H
-+
-+#include <asm/semaphore.h>
-+/*
-+ * The i386 doesn't have a mmu context, but
-+ * we put the segment information here.
-+ *
-+ * cpu_vm_mask is used to optimize ldt flushing.
-+ */
-+typedef struct { 
-+	int size;
-+	struct semaphore sem;
-+	void *ldt;
-+} mm_context_t;
-+
-+/* mm/memory.c:exit_mmap hook */
-+extern void _arch_exit_mmap(struct mm_struct *mm);
-+#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/page.h linux-2.6.12-xen/include/asm-xen/asm-i386/page.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/page.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/page.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,276 @@
-+#ifndef _I386_PAGE_H
-+#define _I386_PAGE_H
-+
-+/* PAGE_SHIFT determines the page size */
-+#define PAGE_SHIFT	12
-+#define PAGE_SIZE	(1UL << PAGE_SHIFT)
-+#define PAGE_MASK	(~(PAGE_SIZE-1))
-+
-+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
-+
-+#ifdef __KERNEL__
-+#ifndef __ASSEMBLY__
-+
-+#include <linux/config.h>
-+#include <linux/string.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <asm/bug.h>
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/foreign_page.h>
-+
-+#define arch_free_page(_page,_order)			\
-+({	int foreign = PageForeign(_page);		\
-+	if (foreign)					\
-+		(PageForeignDestructor(_page))(_page);	\
-+	foreign;					\
-+})
-+#define HAVE_ARCH_FREE_PAGE
-+
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-+#else
-+#define scrub_pages(_p,_n) ((void)0)
-+#endif
-+
-+#ifdef CONFIG_X86_USE_3DNOW
-+
-+#include <asm/mmx.h>
-+
-+#define clear_page(page)	mmx_clear_page((void *)(page))
-+#define copy_page(to,from)	mmx_copy_page(to,from)
-+
-+#else
-+
-+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
-+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-+
-+/*
-+ *	On older X86 processors it's not a win to use MMX here it seems.
-+ *	Maybe the K6-III ?
-+ */
-+ 
-+#define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
-+#define copy_page(to,from)	memcpy((void *)(to), (void *)(from), PAGE_SIZE)
-+
-+#endif
-+
-+#define clear_user_page(page, vaddr, pg)	clear_page(page)
-+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
-+
-+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-+#define INVALID_P2M_ENTRY	(~0UL)
-+#define FOREIGN_FRAME(m)	((m) | (1UL<<31))
-+extern unsigned long *phys_to_machine_mapping;
-+#define pfn_to_mfn(pfn)	\
-+(phys_to_machine_mapping[(unsigned int)(pfn)] & ~(1UL<<31))
-+#define	phys_to_machine_mapping_valid(pfn) \
-+	(phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
-+static inline unsigned long mfn_to_pfn(unsigned long mfn)
-+{
-+	unsigned long pfn;
-+
-+	/*
-+	 * The array access can fail (e.g., device space beyond end of RAM).
-+	 * In such cases it doesn't matter what we return (we return garbage),
-+	 * but we must handle the fault without crashing!
-+	 */
-+	asm (
-+		"1:	movl %1,%0\n"
-+		"2:\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 4\n"
-+		"	.long 1b,2b\n"
-+		".previous"
-+		: "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) );
-+
-+	return pfn;
-+}
-+
-+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-+{
-+	phys_to_machine_mapping[pfn] = mfn;
-+}
-+
-+/* Definitions for machine and pseudophysical addresses. */
-+#ifdef CONFIG_X86_PAE
-+typedef unsigned long long paddr_t;
-+typedef unsigned long long maddr_t;
-+#else
-+typedef unsigned long paddr_t;
-+typedef unsigned long maddr_t;
-+#endif
-+
-+static inline maddr_t phys_to_machine(paddr_t phys)
-+{
-+	maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-+	machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-+	return machine;
-+}
-+static inline paddr_t machine_to_phys(maddr_t machine)
-+{
-+	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-+	phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-+	return phys;
-+}
-+
-+/*
-+ * These are used to make use of C type-checking..
-+ */
-+extern int nx_enabled;
-+#ifdef CONFIG_X86_PAE
-+extern unsigned long long __supported_pte_mask;
-+typedef struct { unsigned long pte_low, pte_high; } pte_t;
-+typedef struct { unsigned long long pmd; } pmd_t;
-+typedef struct { unsigned long long pgd; } pgd_t;
-+typedef struct { unsigned long long pgprot; } pgprot_t;
-+#define __pte(x) ({ unsigned long long _x = (x);        \
-+    if (_x & 1) _x = phys_to_machine(_x);               \
-+    ((pte_t) {(unsigned long)(_x), (unsigned long)(_x>>32)}); })
-+#define __pgd(x) ({ unsigned long long _x = (x); \
-+    (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
-+#define __pmd(x) ({ unsigned long long _x = (x); \
-+    (((_x)&1) ? ((pmd_t) {phys_to_machine(_x)}) : ((pmd_t) {(_x)})); })
-+static inline unsigned long long pte_val(pte_t x)
-+{
-+	unsigned long long ret;
-+
-+	if (x.pte_low) {
-+		ret = x.pte_low | (unsigned long long)x.pte_high << 32;
-+		ret = machine_to_phys(ret) | 1;
-+	} else {
-+		ret = 0;
-+	}
-+	return ret;
-+}
-+static inline unsigned long long pmd_val(pmd_t x)
-+{
-+	unsigned long long ret = x.pmd;
-+	if (ret) ret = machine_to_phys(ret) | 1;
-+	return ret;
-+}
-+static inline unsigned long long pgd_val(pgd_t x)
-+{
-+	unsigned long long ret = x.pgd;
-+	if (ret) ret = machine_to_phys(ret) | 1;
-+	return ret;
-+}
-+static inline unsigned long long pte_val_ma(pte_t x)
-+{
-+	return (unsigned long long)x.pte_high << 32 | x.pte_low;
-+}
-+#define HPAGE_SHIFT	21
-+#else
-+typedef struct { unsigned long pte_low; } pte_t;
-+typedef struct { unsigned long pgd; } pgd_t;
-+typedef struct { unsigned long pgprot; } pgprot_t;
-+#define boot_pte_t pte_t /* or would you rather have a typedef */
-+#define pte_val(x)	(((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
-+			 (x).pte_low)
-+#define pte_val_ma(x)	((x).pte_low)
-+#define __pte(x) ({ unsigned long _x = (x); \
-+    (((_x)&1) ? ((pte_t) {phys_to_machine(_x)}) : ((pte_t) {(_x)})); })
-+#define __pgd(x) ({ unsigned long _x = (x); \
-+    (((_x)&1) ? ((pgd_t) {phys_to_machine(_x)}) : ((pgd_t) {(_x)})); })
-+static inline unsigned long pgd_val(pgd_t x)
-+{
-+	unsigned long ret = x.pgd;
-+	if (ret) ret = machine_to_phys(ret) | 1;
-+	return ret;
-+}
-+#define HPAGE_SHIFT	22
-+#endif
-+#define PTE_MASK	PAGE_MASK
-+
-+#ifdef CONFIG_HUGETLB_PAGE
-+#define HPAGE_SIZE	((1UL) << HPAGE_SHIFT)
-+#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
-+#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
-+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-+#endif
-+
-+#define pgprot_val(x)	((x).pgprot)
-+
-+#define __pte_ma(x)	((pte_t) { (x) } )
-+#define __pgprot(x)	((pgprot_t) { (x) } )
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+/* to align the pointer to the (next) page boundary */
-+#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
-+
-+/*
-+ * This handles the memory map.. We could make this a config
-+ * option, but too many people screw it up, and too few need
-+ * it.
-+ *
-+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
-+ * a virtual address space of one gigabyte, which limits the
-+ * amount of physical memory you can use to about 950MB. 
-+ *
-+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
-+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
-+ */
-+
-+#ifndef __ASSEMBLY__
-+
-+/*
-+ * This much address space is reserved for vmalloc() and iomap()
-+ * as well as fixmap mappings.
-+ */
-+extern unsigned int __VMALLOC_RESERVE;
-+
-+/* Pure 2^n version of get_order */
-+static __inline__ int get_order(unsigned long size)
-+{
-+	int order;
-+
-+	size = (size-1) >> (PAGE_SHIFT-1);
-+	order = -1;
-+	do {
-+		size >>= 1;
-+		order++;
-+	} while (size);
-+	return order;
-+}
-+
-+extern int sysctl_legacy_va_layout;
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#ifdef __ASSEMBLY__
-+#define __PAGE_OFFSET		(0xC0000000)
-+#else
-+#define __PAGE_OFFSET		(0xC0000000UL)
-+#endif
-+
-+
-+#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
-+#define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
-+#define MAXMEM			(HYPERVISOR_VIRT_START-__PAGE_OFFSET-__VMALLOC_RESERVE)
-+#define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
-+#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
-+#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
-+#ifndef CONFIG_DISCONTIGMEM
-+#define pfn_to_page(pfn)	(mem_map + (pfn))
-+#define page_to_pfn(page)	((unsigned long)((page) - mem_map))
-+#define pfn_valid(pfn)		((pfn) < max_mapnr)
-+#endif /* !CONFIG_DISCONTIGMEM */
-+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-+
-+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-+
-+#define VM_DATA_DEFAULT_FLAGS \
-+	(VM_READ | VM_WRITE | \
-+	((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
-+		 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+
-+/* VIRT <-> MACHINE conversion */
-+#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
-+#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
-+#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* _I386_PAGE_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/param.h linux-2.6.12-xen/include/asm-xen/asm-i386/param.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/param.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/param.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,23 @@
-+#ifndef _ASMi386_PARAM_H
-+#define _ASMi386_PARAM_H
-+
-+#ifdef __KERNEL__
-+# define HZ		100		/* Internal kernel timer frequency */
-+# define USER_HZ	100		/* .. some user interfaces are in "ticks" */
-+# define CLOCKS_PER_SEC		(USER_HZ)	/* like times() */
-+#endif
-+
-+#ifndef HZ
-+#define HZ 100
-+#endif
-+
-+#define EXEC_PAGESIZE	4096
-+
-+#ifndef NOGROUP
-+#define NOGROUP		(-1)
-+#endif
-+
-+#define MAXHOSTNAMELEN	64	/* max length of hostname */
-+#define COMMAND_LINE_SIZE 256
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pci.h linux-2.6.12-xen/include/asm-xen/asm-i386/pci.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/pci.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/pci.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,140 @@
-+#ifndef __i386_PCI_H
-+#define __i386_PCI_H
-+
-+#include <linux/config.h>
-+
-+#ifdef __KERNEL__
-+#include <linux/mm.h>		/* for struct page */
-+
-+/* Can be used to override the logic in pci_scan_bus for skipping
-+   already-configured bus numbers - to be used for buggy BIOSes
-+   or architectures with incomplete PCI setup by the loader */
-+
-+#ifdef CONFIG_PCI
-+extern unsigned int pcibios_assign_all_busses(void);
-+#else
-+#define pcibios_assign_all_busses()	0
-+#endif
-+#define pcibios_scan_all_fns(a, b)	0
-+
-+extern unsigned long pci_mem_start;
-+#define PCIBIOS_MIN_IO		0x1000
-+#define PCIBIOS_MIN_MEM		(pci_mem_start)
-+
-+#define PCIBIOS_MIN_CARDBUS_IO	0x4000
-+
-+void pcibios_config_init(void);
-+struct pci_bus * pcibios_scan_root(int bus);
-+
-+void pcibios_set_master(struct pci_dev *dev);
-+void pcibios_penalize_isa_irq(int irq);
-+struct irq_routing_table *pcibios_get_irq_routing_table(void);
-+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-+
-+/* Dynamic DMA mapping stuff.
-+ * i386 has everything mapped statically.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/slab.h>
-+#include <asm/scatterlist.h>
-+#include <linux/string.h>
-+#include <asm/io.h>
-+
-+struct pci_dev;
-+
-+#ifdef CONFIG_SWIOTLB
-+
-+
-+/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
-+#define PCI_DMA_BUS_IS_PHYS	(0)
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
-+	dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
-+	__u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME)			\
-+	((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
-+	(((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME)			\
-+	((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
-+	(((PTR)->LEN_NAME) = (VAL))
-+
-+#else
-+
-+/* The PCI address space does equal the physical memory
-+ * address space.  The networking and block device layers use
-+ * this boolean for bounce buffer decisions.
-+ */
-+#define PCI_DMA_BUS_IS_PHYS	(1)
-+
-+/* pci_unmap_{page,single} is a nop so... */
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-+#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
-+#define pci_unmap_len(PTR, LEN_NAME)		(0)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
-+
-+#endif
-+
-+/* This is always fine. */
-+#define pci_dac_dma_supported(pci_dev, mask)	(1)
-+
-+static inline dma64_addr_t
-+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-+{
-+	return ((dma64_addr_t) page_to_phys(page) +
-+		(dma64_addr_t) offset);
-+}
-+
-+static inline struct page *
-+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+	return pfn_to_page(dma_addr >> PAGE_SHIFT);
-+}
-+
-+static inline unsigned long
-+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+	return (dma_addr & ~PAGE_MASK);
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+	flush_write_buffers();
-+}
-+
-+#define HAVE_PCI_MMAP
-+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-+			       enum pci_mmap_state mmap_state, int write_combine);
-+
-+
-+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-+{
-+}
-+
-+#endif /* __KERNEL__ */
-+
-+/* implement the pci_ DMA API in terms of the generic device dma_ one */
-+#include <asm-generic/pci-dma-compat.h>
-+
-+/* generic pci stuff */
-+#include <asm-generic/pci.h>
-+
-+/* On Xen we have to scan all functions since Xen hides bridges from
-+ * us.  If a bridge is at fn=0 and that slot has a multifunction
-+ * device, we won't find the additional devices without scanning all
-+ * functions. */
-+#undef pcibios_scan_all_fns
-+#define pcibios_scan_all_fns(a, b)	1
-+
-+#endif /* __i386_PCI_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgalloc.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgalloc.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgalloc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgalloc.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,64 @@
-+#ifndef _I386_PGALLOC_H
-+#define _I386_PGALLOC_H
-+
-+#include <linux/config.h>
-+#include <asm/fixmap.h>
-+#include <linux/threads.h>
-+#include <linux/mm.h>		/* for struct page */
-+#include <asm/io.h>		/* for phys_to_virt and page_to_pseudophys */
-+
-+/* Is this pagetable pinned? */
-+#define PG_pinned	PG_arch_1
-+
-+#define pmd_populate_kernel(mm, pmd, pte) \
-+		set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
-+
-+#define pmd_populate(mm, pmd, pte) 					\
-+do {									\
-+	if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) {	\
-+		if (!PageHighMem(pte))					\
-+			BUG_ON(HYPERVISOR_update_va_mapping(		\
-+			  (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT),\
-+			  pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));\
-+		set_pmd(pmd, __pmd(_PAGE_TABLE +			\
-+			((unsigned long long)page_to_pfn(pte) <<	\
-+				(unsigned long long) PAGE_SHIFT)));	\
-+	} else {							\
-+		*(pmd) = __pmd(_PAGE_TABLE +				\
-+			((unsigned long long)page_to_pfn(pte) <<	\
-+				(unsigned long long) PAGE_SHIFT));	\
-+	}								\
-+} while (0)
-+
-+/*
-+ * Allocate and free page tables.
-+ */
-+extern pgd_t *pgd_alloc(struct mm_struct *);
-+extern void pgd_free(pgd_t *pgd);
-+
-+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
-+extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-+
-+static inline void pte_free_kernel(pte_t *pte)
-+{
-+	free_page((unsigned long)pte);
-+	make_page_writable(pte, XENFEAT_writable_page_tables);
-+}
-+
-+extern void pte_free(struct page *pte);
-+
-+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
-+
-+#ifdef CONFIG_X86_PAE
-+/*
-+ * In the PAE case we free the pmds as part of the pgd.
-+ */
-+#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
-+#define pmd_free(x)			do { } while (0)
-+#define __pmd_free_tlb(tlb,x)		do { } while (0)
-+#define pud_populate(mm, pmd, pte)	BUG()
-+#endif
-+
-+#define check_pgt_cache()	do { } while (0)
-+
-+#endif /* _I386_PGALLOC_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-2level-defs.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-2level-defs.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-2level-defs.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-2level-defs.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,21 @@
-+#ifndef _I386_PGTABLE_2LEVEL_DEFS_H
-+#define _I386_PGTABLE_2LEVEL_DEFS_H
-+
-+#define HAVE_SHARED_KERNEL_PMD 0
-+
-+/*
-+ * traditional i386 two-level paging structure:
-+ */
-+
-+#define PGDIR_SHIFT	22
-+#define PTRS_PER_PGD	1024
-+#define PTRS_PER_PGD_NO_HV	(HYPERVISOR_VIRT_START >> PGDIR_SHIFT)
-+
-+/*
-+ * the i386 is two-level, so we don't really have any
-+ * PMD directory physically.
-+ */
-+
-+#define PTRS_PER_PTE	1024
-+
-+#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-2level.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-2level.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-2level.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-2level.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,119 @@
-+#ifndef _I386_PGTABLE_2LEVEL_H
-+#define _I386_PGTABLE_2LEVEL_H
-+
-+#include <asm-generic/pgtable-nopmd.h>
-+
-+#define pte_ERROR(e) \
-+	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
-+#define pgd_ERROR(e) \
-+	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-+
-+/*
-+ * Certain architectures need to do special things when PTEs
-+ * within a page table are directly modified.  Thus, the following
-+ * hook is made available.
-+ */
-+#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
-+
-+#define set_pte_at(_mm,addr,ptep,pteval) do {				\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
-+		set_pte((ptep), (pteval));				\
-+} while (0)
-+
-+#define set_pte_at_sync(_mm,addr,ptep,pteval) do {			\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
-+		set_pte((ptep), (pteval));				\
-+		xen_invlpg((addr));					\
-+	}								\
-+} while (0)
-+
-+#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
-+
-+#ifndef CONFIG_XEN_SHADOW_MODE
-+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
-+#else
-+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
-+#endif
-+
-+#define ptep_get_and_clear(mm,addr,xp)	__pte_ma(xchg(&(xp)->pte_low, 0))
-+#define pte_same(a, b)		((a).pte_low == (b).pte_low)
-+/*
-+ * We detect special mappings in one of two ways:
-+ *  1. If the MFN is an I/O page then Xen will set the m2p entry
-+ *     to be outside our maximum possible pseudophys range.
-+ *  2. If the MFN belongs to a different domain then we will certainly
-+ *     not have MFN in our p2m table. Conversely, if the page is ours,
-+ *     then we'll have p2m(m2p(MFN))==MFN.
-+ * If we detect a special mapping then it doesn't have a 'struct page'.
-+ * We force !pfn_valid() by returning an out-of-range pointer.
-+ *
-+ * NB. These checks require that, for any MFN that is not in our reservation,
-+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
-+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
-+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
-+ * 
-+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
-+ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
-+ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
-+ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
-+ */
-+#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
-+#define pte_pfn(_pte)							\
-+({									\
-+	unsigned long mfn = pte_mfn(_pte);				\
-+	unsigned long pfn = mfn_to_pfn(mfn);				\
-+	if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
-+		pfn = max_mapnr; /* special: force !pfn_valid() */	\
-+	pfn;								\
-+})
-+
-+#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
-+
-+#define pte_none(x)		(!(x).pte_low)
-+#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-+#define pfn_pte_ma(pfn, prot)	__pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-+#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-+
-+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-+
-+#define pmd_page_kernel(pmd) \
-+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-+
-+/*
-+ * All present user pages are user-executable:
-+ */
-+static inline int pte_exec(pte_t pte)
-+{
-+	return pte_user(pte);
-+}
-+
-+/*
-+ * All present pages are kernel-executable:
-+ */
-+static inline int pte_exec_kernel(pte_t pte)
-+{
-+	return 1;
-+}
-+
-+/*
-+ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
-+ * into this range:
-+ */
-+#define PTE_FILE_MAX_BITS	29
-+
-+#define pte_to_pgoff(pte) \
-+	((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
-+
-+#define pgoff_to_pte(off) \
-+	((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
-+
-+/* Encode and de-code a swap entry */
-+#define __swp_type(x)			(((x).val >> 1) & 0x1f)
-+#define __swp_offset(x)			((x).val >> 8)
-+#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-+#define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_low })
-+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
-+
-+#endif /* _I386_PGTABLE_2LEVEL_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-3level-defs.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-3level-defs.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-3level-defs.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-3level-defs.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,25 @@
-+#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
-+#define _I386_PGTABLE_3LEVEL_DEFS_H
-+
-+#define HAVE_SHARED_KERNEL_PMD 0
-+
-+/*
-+ * PGDIR_SHIFT determines what a top-level page table entry can map
-+ */
-+#define PGDIR_SHIFT	30
-+#define PTRS_PER_PGD	4
-+#define PTRS_PER_PGD_NO_HV 4
-+
-+/*
-+ * PMD_SHIFT determines the size of the area a middle-level
-+ * page table can map
-+ */
-+#define PMD_SHIFT	21
-+#define PTRS_PER_PMD	512
-+
-+/*
-+ * entries per page directory level
-+ */
-+#define PTRS_PER_PTE	512
-+
-+#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-3level.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-3level.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable-3level.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable-3level.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,200 @@
-+#ifndef _I386_PGTABLE_3LEVEL_H
-+#define _I386_PGTABLE_3LEVEL_H
-+
-+#include <asm-generic/pgtable-nopud.h>
-+
-+/*
-+ * Intel Physical Address Extension (PAE) Mode - three-level page
-+ * tables on PPro+ CPUs.
-+ *
-+ * Copyright (C) 1999 Ingo Molnar <mingo at redhat.com>
-+ */
-+
-+#define pte_ERROR(e) \
-+	printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
-+#define pmd_ERROR(e) \
-+	printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
-+#define pgd_ERROR(e) \
-+	printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
-+
-+#define pud_none(pud)				0
-+#define pud_bad(pud)				0
-+#define pud_present(pud)			1
-+
-+/*
-+ * Is the pte executable?
-+ */
-+static inline int pte_x(pte_t pte)
-+{
-+	return !(pte_val(pte) & _PAGE_NX);
-+}
-+
-+/*
-+ * All present user-pages with !NX bit are user-executable:
-+ */
-+static inline int pte_exec(pte_t pte)
-+{
-+	return pte_user(pte) && pte_x(pte);
-+}
-+/*
-+ * All present pages with !NX bit are kernel-executable:
-+ */
-+static inline int pte_exec_kernel(pte_t pte)
-+{
-+	return pte_x(pte);
-+}
-+
-+/* Rules for using set_pte: the pte being assigned *must* be
-+ * either not present or in a state where the hardware will
-+ * not attempt to update the pte.  In places where this is
-+ * not possible, use pte_get_and_clear to obtain the old pte
-+ * value and then use set_pte to update it.  -ben
-+ */
-+#define __HAVE_ARCH_SET_PTE_ATOMIC
-+
-+#if 1
-+/* use writable pagetables */
-+static inline void set_pte(pte_t *ptep, pte_t pte)
-+{
-+	ptep->pte_high = pte.pte_high;
-+	smp_wmb();
-+	ptep->pte_low = pte.pte_low;
-+}
-+# define set_pte_atomic(pteptr,pteval) \
-+		set_64bit((unsigned long long *)(pteptr),pte_val_ma(pteval))
-+#else
-+/* no writable pagetables */
-+# define set_pte(pteptr,pteval)				\
-+		xen_l1_entry_update((pteptr), (pteval))
-+# define set_pte_atomic(pteptr,pteval) set_pte(pteptr,pteval)
-+#endif
-+
-+#define set_pte_at(_mm,addr,ptep,pteval) do {				\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
-+		set_pte((ptep), (pteval));				\
-+} while (0)
-+
-+#define set_pte_at_sync(_mm,addr,ptep,pteval) do {			\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
-+		set_pte((ptep), (pteval));				\
-+		xen_invlpg((addr));					\
-+	}								\
-+} while (0)
-+
-+#ifdef CONFIG_XEN_SHADOW_MODE
-+# define set_pmd(pmdptr,pmdval) \
-+		set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
-+# define set_pud(pudptr,pudval) \
-+		set_64bit((unsigned long long *)(pudptr),pud_val(pudval))
-+#else
-+# define set_pmd(pmdptr,pmdval)				\
-+		xen_l2_entry_update((pmdptr), (pmdval))
-+# define set_pud(pudptr,pudval) \
-+		xen_l3_entry_update((pudptr), (pudval))
-+#endif
-+
-+/*
-+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
-+ * the TLB via cr3 if the top-level pgd is changed...
-+ * We do not let the generic code free and clear pgd entries due to
-+ * this erratum.
-+ */
-+static inline void pud_clear (pud_t * pud) { }
-+
-+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-+
-+#define pmd_page_kernel(pmd) \
-+((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-+
-+#define pud_page(pud) \
-+((struct page *) __va(pud_val(pud) & PAGE_MASK))
-+
-+#define pud_page_kernel(pud) \
-+((unsigned long) __va(pud_val(pud) & PAGE_MASK))
-+
-+
-+/* Find an entry in the second-level page table.. */
-+#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
-+			pmd_index(address))
-+
-+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+	pte_t res;
-+
-+	/* xchg acts as a barrier before the setting of the high bits */
-+	res.pte_low = xchg(&ptep->pte_low, 0);
-+	res.pte_high = ptep->pte_high;
-+	ptep->pte_high = 0;
-+
-+	return res;
-+}
-+
-+static inline int pte_same(pte_t a, pte_t b)
-+{
-+	return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
-+}
-+
-+#define pte_page(x)	pfn_to_page(pte_pfn(x))
-+
-+static inline int pte_none(pte_t pte)
-+{
-+	return !pte.pte_low && !pte.pte_high;
-+}
-+
-+#define pte_mfn(_pte) ( ((_pte).pte_low >> PAGE_SHIFT) |\
-+		        (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)) )
-+#define pte_pfn(_pte)                                                  \
-+({                                                                     \
-+       unsigned long mfn = pte_mfn(_pte);                              \
-+       unsigned long pfn = mfn_to_pfn(mfn);                            \
-+       if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
-+               pfn = max_mapnr; /* special: force !pfn_valid() */      \
-+       pfn;                                                            \
-+})
-+
-+extern unsigned long long __supported_pte_mask;
-+
-+static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
-+{
-+	pte_t pte;
-+
-+	pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
-+					(pgprot_val(pgprot) >> 32);
-+	pte.pte_high &= (__supported_pte_mask >> 32);
-+	pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
-+							__supported_pte_mask;
-+	return pte;
-+}
-+
-+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-+{
-+	return pfn_pte_ma(pfn_to_mfn(page_nr), pgprot);
-+}
-+
-+static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
-+{
-+	BUG(); panic("needs review");
-+	return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \
-+			pgprot_val(pgprot)) & __supported_pte_mask);
-+}
-+
-+/*
-+ * Bits 0, 6 and 7 are taken in the low part of the pte,
-+ * put the 32 bits of offset into the high part.
-+ */
-+#define pte_to_pgoff(pte) ((pte).pte_high)
-+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
-+#define PTE_FILE_MAX_BITS       32
-+
-+/* Encode and de-code a swap entry */
-+#define __swp_type(x)			(((x).val) & 0x1f)
-+#define __swp_offset(x)			((x).val >> 5)
-+#define __swp_entry(type, offset)	((swp_entry_t){(type) | (offset) << 5})
-+#define __pte_to_swp_entry(pte)		((swp_entry_t){ (pte).pte_high })
-+#define __swp_entry_to_pte(x)		((pte_t){ 0, (x).val })
-+
-+#define __pmd_free_tlb(tlb, x)		do { } while (0)
-+
-+#endif /* _I386_PGTABLE_3LEVEL_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable.h linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/pgtable.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/pgtable.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,478 @@
-+#ifndef _I386_PGTABLE_H
-+#define _I386_PGTABLE_H
-+
-+#include <linux/config.h>
-+#include <asm/hypervisor.h>
-+
-+/*
-+ * The Linux memory management assumes a three-level page table setup. On
-+ * the i386, we use that, but "fold" the mid level into the top-level page
-+ * table, so that we physically have the same two-level page table as the
-+ * i386 mmu expects.
-+ *
-+ * This file contains the functions and defines necessary to modify and use
-+ * the i386 page table tree.
-+ */
-+#ifndef __ASSEMBLY__
-+#include <asm/processor.h>
-+#include <asm/fixmap.h>
-+#include <linux/threads.h>
-+
-+#ifndef _I386_BITOPS_H
-+#include <asm/bitops.h>
-+#endif
-+
-+#include <linux/slab.h>
-+#include <linux/list.h>
-+#include <linux/spinlock.h>
-+
-+/*
-+ * ZERO_PAGE is a global shared page that is always zero: used
-+ * for zero-mapped memory areas etc..
-+ */
-+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-+extern unsigned long empty_zero_page[1024];
-+extern pgd_t *swapper_pg_dir;
-+extern kmem_cache_t *pgd_cache;
-+extern kmem_cache_t *pmd_cache;
-+extern spinlock_t pgd_lock;
-+extern struct page *pgd_list;
-+
-+void pmd_ctor(void *, kmem_cache_t *, unsigned long);
-+void pgd_ctor(void *, kmem_cache_t *, unsigned long);
-+void pgd_dtor(void *, kmem_cache_t *, unsigned long);
-+void pgtable_cache_init(void);
-+void paging_init(void);
-+
-+/*
-+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
-+ * implements both the traditional 2-level x86 page tables and the
-+ * newer 3-level PAE-mode page tables.
-+ */
-+#ifdef CONFIG_X86_PAE
-+# include <asm/pgtable-3level-defs.h>
-+# define PMD_SIZE	(1UL << PMD_SHIFT)
-+# define PMD_MASK	(~(PMD_SIZE-1))
-+#else
-+# include <asm/pgtable-2level-defs.h>
-+#endif
-+
-+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
-+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
-+
-+#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
-+#define FIRST_USER_ADDRESS	0
-+
-+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
-+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
-+
-+#define TWOLEVEL_PGDIR_SHIFT	22
-+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
-+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
-+
-+/* Just any arbitrary offset to the start of the vmalloc VM area: the
-+ * current 8MB value just means that there will be a 8MB "hole" after the
-+ * physical memory until the kernel virtual memory starts.  That means that
-+ * any out-of-bounds memory accesses will hopefully be caught.
-+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
-+ * area for the same reason. ;)
-+ */
-+#define VMALLOC_OFFSET	(8*1024*1024)
-+#define VMALLOC_START	(((unsigned long) high_memory + vmalloc_earlyreserve + \
-+			2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
-+#ifdef CONFIG_HIGHMEM
-+# define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
-+#else
-+# define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
-+#endif
-+
-+/*
-+ * The 4MB page is guessing..  Detailed in the infamous "Chapter H"
-+ * of the Pentium details, but assuming intel did the straightforward
-+ * thing, this bit set in the page directory entry just means that
-+ * the page directory entry points directly to a 4MB-aligned block of
-+ * memory. 
-+ */
-+#define _PAGE_BIT_PRESENT	0
-+#define _PAGE_BIT_RW		1
-+#define _PAGE_BIT_USER		2
-+#define _PAGE_BIT_PWT		3
-+#define _PAGE_BIT_PCD		4
-+#define _PAGE_BIT_ACCESSED	5
-+#define _PAGE_BIT_DIRTY		6
-+#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page, Pentium+, if present.. */
-+#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
-+#define _PAGE_BIT_UNUSED1	9	/* available for programmer */
-+#define _PAGE_BIT_UNUSED2	10
-+#define _PAGE_BIT_UNUSED3	11
-+#define _PAGE_BIT_NX		63
-+
-+#define _PAGE_PRESENT	0x001
-+#define _PAGE_RW	0x002
-+#define _PAGE_USER	0x004
-+#define _PAGE_PWT	0x008
-+#define _PAGE_PCD	0x010
-+#define _PAGE_ACCESSED	0x020
-+#define _PAGE_DIRTY	0x040
-+#define _PAGE_PSE	0x080	/* 4 MB (or 2MB) page, Pentium+, if present.. */
-+#define _PAGE_GLOBAL	0x100	/* Global TLB entry PPro+ */
-+#define _PAGE_UNUSED1	0x200	/* available for programmer */
-+#define _PAGE_UNUSED2	0x400
-+#define _PAGE_UNUSED3	0x800
-+
-+#define _PAGE_FILE	0x040	/* set:pagecache unset:swap */
-+#define _PAGE_PROTNONE	0x080	/* If not present */
-+#ifdef CONFIG_X86_PAE
-+#define _PAGE_NX	(1ULL<<_PAGE_BIT_NX)
-+#else
-+#define _PAGE_NX	0
-+#endif
-+
-+#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-+#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-+#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-+
-+#define PAGE_NONE \
-+	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-+#define PAGE_SHARED \
-+	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-+
-+#define PAGE_SHARED_EXEC \
-+	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_COPY_NOEXEC \
-+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_COPY_EXEC \
-+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_COPY \
-+	PAGE_COPY_NOEXEC
-+#define PAGE_READONLY \
-+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_READONLY_EXEC \
-+	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+
-+#define _PAGE_KERNEL \
-+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-+#define _PAGE_KERNEL_EXEC \
-+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-+
-+extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
-+#define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
-+#define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD)
-+#define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
-+#define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
-+
-+#define PAGE_KERNEL		__pgprot(__PAGE_KERNEL)
-+#define PAGE_KERNEL_RO		__pgprot(__PAGE_KERNEL_RO)
-+#define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
-+#define PAGE_KERNEL_NOCACHE	__pgprot(__PAGE_KERNEL_NOCACHE)
-+#define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE)
-+#define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC)
-+
-+/*
-+ * The i386 can't do page protection for execute, and considers that
-+ * the same are read. Also, write permissions imply read permissions.
-+ * This is the closest we can get..
-+ */
-+#define __P000	PAGE_NONE
-+#define __P001	PAGE_READONLY
-+#define __P010	PAGE_COPY
-+#define __P011	PAGE_COPY
-+#define __P100	PAGE_READONLY_EXEC
-+#define __P101	PAGE_READONLY_EXEC
-+#define __P110	PAGE_COPY_EXEC
-+#define __P111	PAGE_COPY_EXEC
-+
-+#define __S000	PAGE_NONE
-+#define __S001	PAGE_READONLY
-+#define __S010	PAGE_SHARED
-+#define __S011	PAGE_SHARED
-+#define __S100	PAGE_READONLY_EXEC
-+#define __S101	PAGE_READONLY_EXEC
-+#define __S110	PAGE_SHARED_EXEC
-+#define __S111	PAGE_SHARED_EXEC
-+
-+/*
-+ * Define this if things work differently on an i386 and an i486:
-+ * it will (on an i486) warn about kernel memory accesses that are
-+ * done without a 'access_ok(VERIFY_WRITE,..)'
-+ */
-+#undef TEST_ACCESS_OK
-+
-+/* The boot page tables (all created as a single array) */
-+extern unsigned long pg0[];
-+
-+#define pte_present(x)	((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-+#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
-+
-+#define pmd_none(x)	(!pmd_val(x))
-+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
-+   can temporarily clear it. */
-+#define pmd_present(x)	(pmd_val(x))
-+#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
-+#define pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
-+
-+
-+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-+
-+/*
-+ * The following only work if pte_present() is true.
-+ * Undefined behaviour if not..
-+ */
-+static inline int pte_user(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
-+static inline int pte_read(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
-+static inline int pte_dirty(pte_t pte)		{ return (pte).pte_low & _PAGE_DIRTY; }
-+static inline int pte_young(pte_t pte)		{ return (pte).pte_low & _PAGE_ACCESSED; }
-+static inline int pte_write(pte_t pte)		{ return (pte).pte_low & _PAGE_RW; }
-+
-+/*
-+ * The following only works if pte_present() is not true.
-+ */
-+static inline int pte_file(pte_t pte)		{ return (pte).pte_low & _PAGE_FILE; }
-+
-+static inline pte_t pte_rdprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_exprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
-+static inline pte_t pte_mkclean(pte_t pte)	{ (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkold(pte_t pte)	{ (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_wrprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_RW; return pte; }
-+static inline pte_t pte_mkread(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkexec(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
-+static inline pte_t pte_mkdirty(pte_t pte)	{ (pte).pte_low |= _PAGE_DIRTY; return pte; }
-+static inline pte_t pte_mkyoung(pte_t pte)	{ (pte).pte_low |= _PAGE_ACCESSED; return pte; }
-+static inline pte_t pte_mkwrite(pte_t pte)	{ (pte).pte_low |= _PAGE_RW; return pte; }
-+
-+#ifdef CONFIG_X86_PAE
-+# include <asm/pgtable-3level.h>
-+#else
-+# include <asm/pgtable-2level.h>
-+#endif
-+
-+static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-+{
-+	if (!pte_dirty(*ptep))
-+		return 0;
-+	return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
-+}
-+
-+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-+{
-+	if (!pte_young(*ptep))
-+		return 0;
-+	return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
-+}
-+
-+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+	if (pte_write(*ptep))
-+		clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
-+}
-+
-+/*
-+ * Macro to mark a page protection value as "uncacheable".  On processors which do not support
-+ * it, this is a no-op.
-+ */
-+#define pgprot_noncached(prot)	((boot_cpu_data.x86 > 3)					  \
-+				 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
-+
-+/*
-+ * Conversion functions: convert a page and protection to a page entry,
-+ * and a page entry and page directory to the page they refer to.
-+ */
-+
-+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
-+#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
-+
-+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-+{
-+	pte.pte_low &= _PAGE_CHG_MASK;
-+	pte.pte_low |= pgprot_val(newprot);
-+#ifdef CONFIG_X86_PAE
-+	/*
-+	 * Chop off the NX bit (if present), and add the NX portion of
-+	 * the newprot (if present):
-+	 */
-+	pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
-+	pte.pte_high |= (pgprot_val(newprot) >> 32) & \
-+					(__supported_pte_mask >> 32);
-+#endif
-+	return pte;
-+}
-+
-+#define page_pte(page) page_pte_prot(page, __pgprot(0))
-+
-+#define pmd_large(pmd) \
-+((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
-+
-+/*
-+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
-+ *
-+ * this macro returns the index of the entry in the pgd page which would
-+ * control the given virtual address
-+ */
-+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-+#define pgd_index_k(addr) pgd_index(addr)
-+
-+/*
-+ * pgd_offset() returns a (pgd_t *)
-+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
-+ */
-+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-+
-+/*
-+ * a shortcut which implies the use of the kernel's pgd, instead
-+ * of a process's
-+ */
-+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-+
-+/*
-+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
-+ *
-+ * this macro returns the index of the entry in the pmd page which would
-+ * control the given virtual address
-+ */
-+#define pmd_index(address) \
-+		(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-+
-+/*
-+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
-+ *
-+ * this macro returns the index of the entry in the pte page which would
-+ * control the given virtual address
-+ */
-+#define pte_index(address) \
-+		(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-+#define pte_offset_kernel(dir, address) \
-+	((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address))
-+
-+/*
-+ * Helper function that returns the kernel pagetable entry controlling
-+ * the virtual address 'address'. NULL means no pagetable entry present.
-+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
-+ * as a pte too.
-+ */
-+extern pte_t *lookup_address(unsigned long address);
-+
-+/*
-+ * Make a given kernel text page executable/non-executable.
-+ * Returns the previous executability setting of that page (which
-+ * is used to restore the previous state). Used by the SMP bootup code.
-+ * NOTE: this is an __init function for security reasons.
-+ */
-+#ifdef CONFIG_X86_PAE
-+ extern int set_kernel_exec(unsigned long vaddr, int enable);
-+#else
-+ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
-+#endif
-+
-+extern void noexec_setup(const char *str);
-+
-+#if defined(CONFIG_HIGHPTE)
-+#define pte_offset_map(dir, address) \
-+	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
-+	 pte_index(address))
-+#define pte_offset_map_nested(dir, address) \
-+	((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
-+	 pte_index(address))
-+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
-+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
-+#else
-+#define pte_offset_map(dir, address) \
-+	((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
-+#define pte_unmap(pte) do { } while (0)
-+#define pte_unmap_nested(pte) do { } while (0)
-+#endif
-+
-+/*
-+ * The i386 doesn't have any external MMU info: the kernel page
-+ * tables contain all the necessary information.
-+ *
-+ * Also, we only update the dirty/accessed state if we set
-+ * the dirty bit by hand in the kernel, since the hardware
-+ * will do the accessed bit for us, and we don't want to
-+ * race with other CPU's that might be updating the dirty
-+ * bit at the same time.
-+ */
-+#define update_mmu_cache(vma,address,pte) do { } while (0)
-+#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-+	do {								  \
-+		if (__dirty) {						  \
-+		        if ( likely((__vma)->vm_mm == current->mm) ) {    \
-+			    BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
-+			} else {                                          \
-+                            xen_l1_entry_update((__ptep), (__entry)); \
-+			    flush_tlb_page((__vma), (__address));         \
-+			}                                                 \
-+		}							  \
-+	} while (0)
-+
-+#define __HAVE_ARCH_PTEP_ESTABLISH
-+#define ptep_establish(__vma, __address, __ptep, __entry)		\
-+do {				  					\
-+	ptep_set_access_flags(__vma, __address, __ptep, __entry, 1);	\
-+} while (0)
-+
-+#include <asm-xen/features.h>
-+void make_lowmem_page_readonly(void *va, unsigned int feature);
-+void make_lowmem_page_writable(void *va, unsigned int feature);
-+void make_page_readonly(void *va, unsigned int feature);
-+void make_page_writable(void *va, unsigned int feature);
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
-+
-+#define virt_to_ptep(__va)						\
-+({									\
-+	pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));		\
-+	pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va));	\
-+	pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va));	\
-+	pte_offset_kernel(__pmd, (unsigned long)(__va));		\
-+})
-+
-+#define arbitrary_virt_to_machine(__va)					\
-+({									\
-+	maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
-+	m | ((unsigned long)(__va) & (PAGE_SIZE-1));			\
-+})
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#ifndef CONFIG_DISCONTIGMEM
-+#define kern_addr_valid(addr)	(1)
-+#endif /* !CONFIG_DISCONTIGMEM */
-+
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+                           unsigned long address, 
-+                           unsigned long mfn,
-+                           unsigned long size, 
-+                           pgprot_t prot,
-+                           domid_t  domid);
-+int direct_kernel_remap_pfn_range(unsigned long address, 
-+				  unsigned long mfn,
-+				  unsigned long size, 
-+				  pgprot_t prot,
-+				  domid_t  domid);
-+int create_lookup_pte_addr(struct mm_struct *mm,
-+                           unsigned long address,
-+                           uint64_t *ptep);
-+int touch_pte_range(struct mm_struct *mm,
-+                    unsigned long address,
-+                    unsigned long size);
-+
-+#define io_remap_page_range(vma,from,phys,size,prot) \
-+direct_remap_pfn_range(vma,from,(phys)>>PAGE_SHIFT,size,prot,DOMID_IO)
-+
-+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
-+direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
-+
-+#define MK_IOSPACE_PFN(space, pfn)	(pfn)
-+#define GET_IOSPACE(pfn)		0
-+#define GET_PFN(pfn)			(pfn)
-+
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-+#define __HAVE_ARCH_PTE_SAME
-+#include <asm-generic/pgtable.h>
-+
-+#endif /* _I386_PGTABLE_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/processor.h linux-2.6.12-xen/include/asm-xen/asm-i386/processor.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/processor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/processor.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,703 @@
-+/*
-+ * include/asm-i386/processor.h
-+ *
-+ * Copyright (C) 1994 Linus Torvalds
-+ */
-+
-+#ifndef __ASM_I386_PROCESSOR_H
-+#define __ASM_I386_PROCESSOR_H
-+
-+#include <asm/vm86.h>
-+#include <asm/math_emu.h>
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/types.h>
-+#include <asm/sigcontext.h>
-+#include <asm/cpufeature.h>
-+#include <asm/msr.h>
-+#include <asm/system.h>
-+#include <linux/cache.h>
-+#include <linux/config.h>
-+#include <linux/threads.h>
-+#include <asm/percpu.h>
-+
-+/* flag for disabling the tsc */
-+extern int tsc_disable;
-+
-+struct desc_struct {
-+	unsigned long a,b;
-+};
-+
-+#define desc_empty(desc) \
-+		(!((desc)->a + (desc)->b))
-+
-+#define desc_equal(desc1, desc2) \
-+		(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
-+/*
-+ * Default implementation of macro that returns current
-+ * instruction pointer ("program counter").
-+ */
-+#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
-+
-+/*
-+ *  CPU type and hardware bug flags. Kept separately for each CPU.
-+ *  Members of this structure are referenced in head.S, so think twice
-+ *  before touching them. [mj]
-+ */
-+
-+struct cpuinfo_x86 {
-+	__u8	x86;		/* CPU family */
-+	__u8	x86_vendor;	/* CPU vendor */
-+	__u8	x86_model;
-+	__u8	x86_mask;
-+	char	wp_works_ok;	/* It doesn't on 386's */
-+	char	hlt_works_ok;	/* Problems on some 486Dx4's and old 386's */
-+	char	hard_math;
-+	char	rfu;
-+       	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
-+	unsigned long	x86_capability[NCAPINTS];
-+	char	x86_vendor_id[16];
-+	char	x86_model_id[64];
-+	int 	x86_cache_size;  /* in KB - valid for CPUS which support this
-+				    call  */
-+	int 	x86_cache_alignment;	/* In bytes */
-+	int	fdiv_bug;
-+	int	f00f_bug;
-+	int	coma_bug;
-+	unsigned long loops_per_jiffy;
-+	unsigned char x86_num_cores;
-+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-+
-+#define X86_VENDOR_INTEL 0
-+#define X86_VENDOR_CYRIX 1
-+#define X86_VENDOR_AMD 2
-+#define X86_VENDOR_UMC 3
-+#define X86_VENDOR_NEXGEN 4
-+#define X86_VENDOR_CENTAUR 5
-+#define X86_VENDOR_RISE 6
-+#define X86_VENDOR_TRANSMETA 7
-+#define X86_VENDOR_NSC 8
-+#define X86_VENDOR_NUM 9
-+#define X86_VENDOR_UNKNOWN 0xff
-+
-+/*
-+ * capabilities of CPUs
-+ */
-+
-+extern struct cpuinfo_x86 boot_cpu_data;
-+extern struct cpuinfo_x86 new_cpu_data;
-+extern struct tss_struct doublefault_tss;
-+DECLARE_PER_CPU(struct tss_struct, init_tss);
-+DECLARE_PER_CPU(pgd_t *, cur_pgd);
-+
-+#ifdef CONFIG_SMP
-+extern struct cpuinfo_x86 cpu_data[];
-+#define current_cpu_data cpu_data[smp_processor_id()]
-+#else
-+#define cpu_data (&boot_cpu_data)
-+#define current_cpu_data boot_cpu_data
-+#endif
-+
-+extern	int phys_proc_id[NR_CPUS];
-+extern	int cpu_core_id[NR_CPUS];
-+extern char ignore_fpu_irq;
-+
-+extern void identify_cpu(struct cpuinfo_x86 *);
-+extern void print_cpu_info(struct cpuinfo_x86 *);
-+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
-+
-+#ifdef CONFIG_X86_HT
-+extern void detect_ht(struct cpuinfo_x86 *c);
-+#else
-+static inline void detect_ht(struct cpuinfo_x86 *c) {}
-+#endif
-+
-+/*
-+ * EFLAGS bits
-+ */
-+#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
-+#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
-+#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
-+#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
-+#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
-+#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
-+#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
-+#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
-+#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
-+#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
-+#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
-+#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
-+#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
-+#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
-+#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
-+#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
-+#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
-+
-+/*
-+ * Generic CPUID function
-+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
-+ * resulting in stale register contents being returned.
-+ */
-+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
-+{
-+	__asm__("cpuid"
-+		: "=a" (*eax),
-+		  "=b" (*ebx),
-+		  "=c" (*ecx),
-+		  "=d" (*edx)
-+		: "0" (op), "c"(0));
-+}
-+
-+/* Some CPUID calls want 'count' to be placed in ecx */
-+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
-+	       	int *edx)
-+{
-+	__asm__("cpuid"
-+		: "=a" (*eax),
-+		  "=b" (*ebx),
-+		  "=c" (*ecx),
-+		  "=d" (*edx)
-+		: "0" (op), "c" (count));
-+}
-+
-+/*
-+ * CPUID functions returning a single datum
-+ */
-+static inline unsigned int cpuid_eax(unsigned int op)
-+{
-+	unsigned int eax;
-+
-+	__asm__("cpuid"
-+		: "=a" (eax)
-+		: "0" (op)
-+		: "bx", "cx", "dx");
-+	return eax;
-+}
-+static inline unsigned int cpuid_ebx(unsigned int op)
-+{
-+	unsigned int eax, ebx;
-+
-+	__asm__("cpuid"
-+		: "=a" (eax), "=b" (ebx)
-+		: "0" (op)
-+		: "cx", "dx" );
-+	return ebx;
-+}
-+static inline unsigned int cpuid_ecx(unsigned int op)
-+{
-+	unsigned int eax, ecx;
-+
-+	__asm__("cpuid"
-+		: "=a" (eax), "=c" (ecx)
-+		: "0" (op)
-+		: "bx", "dx" );
-+	return ecx;
-+}
-+static inline unsigned int cpuid_edx(unsigned int op)
-+{
-+	unsigned int eax, edx;
-+
-+	__asm__("cpuid"
-+		: "=a" (eax), "=d" (edx)
-+		: "0" (op)
-+		: "bx", "cx");
-+	return edx;
-+}
-+
-+#define load_cr3(pgdir) do {				\
-+	xen_pt_switch(__pa(pgdir));			\
-+	per_cpu(cur_pgd, smp_processor_id()) = pgdir;	\
-+} while (/* CONSTCOND */0)
-+
-+
-+/*
-+ * Intel CPU features in CR4
-+ */
-+#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
-+#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
-+#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
-+#define X86_CR4_DE		0x0008	/* enable debugging extensions */
-+#define X86_CR4_PSE		0x0010	/* enable page size extensions */
-+#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
-+#define X86_CR4_MCE		0x0040	/* Machine check enable */
-+#define X86_CR4_PGE		0x0080	/* enable global pages */
-+#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
-+#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
-+#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
-+
-+/*
-+ * Save the cr4 feature set we're using (ie
-+ * Pentium 4MB enable and PPro Global page
-+ * enable), so that any CPU's that boot up
-+ * after us can get the correct flags.
-+ */
-+extern unsigned long mmu_cr4_features;
-+
-+static inline void set_in_cr4 (unsigned long mask)
-+{
-+	mmu_cr4_features |= mask;
-+	switch (mask) {
-+	case X86_CR4_OSFXSR:
-+	case X86_CR4_OSXMMEXCPT:
-+		break;
-+	default:
-+		do {
-+			const char *msg = "Xen unsupported cr4 update\n";
-+			(void)HYPERVISOR_console_io(
-+				CONSOLEIO_write, __builtin_strlen(msg),
-+				(char *)msg);
-+			BUG();
-+		} while (0);
-+	}
-+}
-+
-+static inline void clear_in_cr4 (unsigned long mask)
-+{
-+	mmu_cr4_features &= ~mask;
-+	__asm__("movl %%cr4,%%eax\n\t"
-+		"andl %0,%%eax\n\t"
-+		"movl %%eax,%%cr4\n"
-+		: : "irg" (~mask)
-+		:"ax");
-+}
-+
-+/*
-+ *      NSC/Cyrix CPU configuration register indexes
-+ */
-+
-+#define CX86_PCR0 0x20
-+#define CX86_GCR  0xb8
-+#define CX86_CCR0 0xc0
-+#define CX86_CCR1 0xc1
-+#define CX86_CCR2 0xc2
-+#define CX86_CCR3 0xc3
-+#define CX86_CCR4 0xe8
-+#define CX86_CCR5 0xe9
-+#define CX86_CCR6 0xea
-+#define CX86_CCR7 0xeb
-+#define CX86_PCR1 0xf0
-+#define CX86_DIR0 0xfe
-+#define CX86_DIR1 0xff
-+#define CX86_ARR_BASE 0xc4
-+#define CX86_RCR_BASE 0xdc
-+
-+/*
-+ *      NSC/Cyrix CPU indexed register access macros
-+ */
-+
-+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-+
-+#define setCx86(reg, data) do { \
-+	outb((reg), 0x22); \
-+	outb((data), 0x23); \
-+} while (0)
-+
-+static inline void __monitor(const void *eax, unsigned long ecx,
-+		unsigned long edx)
-+{
-+	/* "monitor %eax,%ecx,%edx;" */
-+	asm volatile(
-+		".byte 0x0f,0x01,0xc8;"
-+		: :"a" (eax), "c" (ecx), "d"(edx));
-+}
-+
-+static inline void __mwait(unsigned long eax, unsigned long ecx)
-+{
-+	/* "mwait %eax,%ecx;" */
-+	asm volatile(
-+		".byte 0x0f,0x01,0xc9;"
-+		: :"a" (eax), "c" (ecx));
-+}
-+
-+/* from system description table in BIOS.  Mostly for MCA use, but
-+others may find it useful. */
-+extern unsigned int machine_id;
-+extern unsigned int machine_submodel_id;
-+extern unsigned int BIOS_revision;
-+extern unsigned int mca_pentium_flag;
-+
-+/* Boot loader type from the setup header */
-+extern int bootloader_type;
-+
-+/*
-+ * User space process size: 3GB (default).
-+ */
-+#define TASK_SIZE	(PAGE_OFFSET)
-+
-+/* This decides where the kernel will search for a free chunk of vm
-+ * space during mmap's.
-+ */
-+#define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
-+
-+#define HAVE_ARCH_PICK_MMAP_LAYOUT
-+
-+/*
-+ * Size of io_bitmap.
-+ */
-+#define IO_BITMAP_BITS  65536
-+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-+#define INVALID_IO_BITMAP_OFFSET 0x8000
-+#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
-+
-+struct i387_fsave_struct {
-+	long	cwd;
-+	long	swd;
-+	long	twd;
-+	long	fip;
-+	long	fcs;
-+	long	foo;
-+	long	fos;
-+	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
-+	long	status;		/* software status information */
-+};
-+
-+struct i387_fxsave_struct {
-+	unsigned short	cwd;
-+	unsigned short	swd;
-+	unsigned short	twd;
-+	unsigned short	fop;
-+	long	fip;
-+	long	fcs;
-+	long	foo;
-+	long	fos;
-+	long	mxcsr;
-+	long	mxcsr_mask;
-+	long	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
-+	long	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
-+	long	padding[56];
-+} __attribute__ ((aligned (16)));
-+
-+struct i387_soft_struct {
-+	long	cwd;
-+	long	swd;
-+	long	twd;
-+	long	fip;
-+	long	fcs;
-+	long	foo;
-+	long	fos;
-+	long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
-+	unsigned char	ftop, changed, lookahead, no_update, rm, alimit;
-+	struct info	*info;
-+	unsigned long	entry_eip;
-+};
-+
-+union i387_union {
-+	struct i387_fsave_struct	fsave;
-+	struct i387_fxsave_struct	fxsave;
-+	struct i387_soft_struct soft;
-+};
-+
-+typedef struct {
-+	unsigned long seg;
-+} mm_segment_t;
-+
-+struct thread_struct;
-+
-+struct tss_struct {
-+	unsigned short	back_link,__blh;
-+	unsigned long	esp0;
-+	unsigned short	ss0,__ss0h;
-+	unsigned long	esp1;
-+	unsigned short	ss1,__ss1h;	/* ss1 is used to cache MSR_IA32_SYSENTER_CS */
-+	unsigned long	esp2;
-+	unsigned short	ss2,__ss2h;
-+	unsigned long	__cr3;
-+	unsigned long	eip;
-+	unsigned long	eflags;
-+	unsigned long	eax,ecx,edx,ebx;
-+	unsigned long	esp;
-+	unsigned long	ebp;
-+	unsigned long	esi;
-+	unsigned long	edi;
-+	unsigned short	es, __esh;
-+	unsigned short	cs, __csh;
-+	unsigned short	ss, __ssh;
-+	unsigned short	ds, __dsh;
-+	unsigned short	fs, __fsh;
-+	unsigned short	gs, __gsh;
-+	unsigned short	ldt, __ldth;
-+	unsigned short	trace, io_bitmap_base;
-+	/*
-+	 * The extra 1 is there because the CPU will access an
-+	 * additional byte beyond the end of the IO permission
-+	 * bitmap. The extra byte must be all 1 bits, and must
-+	 * be within the limit.
-+	 */
-+	unsigned long	io_bitmap[IO_BITMAP_LONGS + 1];
-+	/*
-+	 * Cache the current maximum and the last task that used the bitmap:
-+	 */
-+	unsigned long io_bitmap_max;
-+	struct thread_struct *io_bitmap_owner;
-+	/*
-+	 * pads the TSS to be cacheline-aligned (size is 0x100)
-+	 */
-+	unsigned long __cacheline_filler[35];
-+	/*
-+	 * .. and then another 0x100 bytes for emergency kernel stack
-+	 */
-+	unsigned long stack[64];
-+} __attribute__((packed));
-+
-+#define ARCH_MIN_TASKALIGN	16
-+
-+struct thread_struct {
-+/* cached TLS descriptors. */
-+	struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
-+	unsigned long	esp0;
-+	unsigned long	sysenter_cs;
-+	unsigned long	eip;
-+	unsigned long	esp;
-+	unsigned long	fs;
-+	unsigned long	gs;
-+	unsigned int	io_pl;
-+/* Hardware debugging registers */
-+	unsigned long	debugreg[8];  /* %%db0-7 debug registers */
-+/* fault info */
-+	unsigned long	cr2, trap_no, error_code;
-+/* floating point info */
-+	union i387_union	i387;
-+/* virtual 86 mode info */
-+	struct vm86_struct __user * vm86_info;
-+	unsigned long		screen_bitmap;
-+	unsigned long		v86flags, v86mask, saved_esp0;
-+	unsigned int		saved_fs, saved_gs;
-+/* IO permissions */
-+	unsigned long	*io_bitmap_ptr;
-+/* max allowed port in the bitmap, in bytes: */
-+	unsigned long	io_bitmap_max;
-+};
-+
-+#define INIT_THREAD  {							\
-+	.vm86_info = NULL,						\
-+	.sysenter_cs = __KERNEL_CS,					\
-+	.io_bitmap_ptr = NULL,						\
-+}
-+
-+/*
-+ * Note that the .io_bitmap member must be extra-big. This is because
-+ * the CPU will access an additional byte beyond the end of the IO
-+ * permission bitmap. The extra byte must be all 1 bits, and must
-+ * be within the limit.
-+ */
-+#define INIT_TSS  {							\
-+	.esp0		= sizeof(init_stack) + (long)&init_stack,	\
-+	.ss0		= __KERNEL_DS,					\
-+	.ss1		= __KERNEL_CS,					\
-+	.ldt		= GDT_ENTRY_LDT,				\
-+	.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,			\
-+	.io_bitmap	= { [ 0 ... IO_BITMAP_LONGS] = ~0 },		\
-+}
-+
-+static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
-+{
-+	tss->esp0 = thread->esp0;
-+	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
-+	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
-+		tss->ss1 = thread->sysenter_cs;
-+		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
-+	}
-+	HYPERVISOR_stack_switch(tss->ss0, tss->esp0);
-+}
-+
-+#define start_thread(regs, new_eip, new_esp) do {		\
-+	__asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));	\
-+	set_fs(USER_DS);					\
-+	regs->xds = __USER_DS;					\
-+	regs->xes = __USER_DS;					\
-+	regs->xss = __USER_DS;					\
-+	regs->xcs = __USER_CS;					\
-+	regs->eip = new_eip;					\
-+	regs->esp = new_esp;					\
-+} while (0)
-+
-+/*
-+ * This special macro can be used to load a debugging register
-+ */
-+#define loaddebug(thread,register) \
-+		HYPERVISOR_set_debugreg((register), \
-+					((thread)->debugreg[register]))
-+
-+/* Forward declaration, a strange C thing */
-+struct task_struct;
-+struct mm_struct;
-+
-+/* Free all resources held by a thread. */
-+extern void release_thread(struct task_struct *);
-+
-+/* Prepare to copy thread state - unlazy all lazy status */
-+extern void prepare_to_copy(struct task_struct *tsk);
-+
-+/*
-+ * create a kernel thread without removing it from tasklists
-+ */
-+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-+
-+extern unsigned long thread_saved_pc(struct task_struct *tsk);
-+void show_trace(struct task_struct *task, unsigned long *stack);
-+
-+unsigned long get_wchan(struct task_struct *p);
-+
-+#define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
-+#define KSTK_TOP(info)                                                 \
-+({                                                                     \
-+       unsigned long *__ptr = (unsigned long *)(info);                 \
-+       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
-+})
-+
-+#define task_pt_regs(task)                                             \
-+({                                                                     \
-+       struct pt_regs *__regs__;                                       \
-+       __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info);     \
-+       __regs__ - 1;                                                   \
-+})
-+
-+#define KSTK_EIP(task) (task_pt_regs(task)->eip)
-+#define KSTK_ESP(task) (task_pt_regs(task)->esp)
-+
-+
-+struct microcode_header {
-+	unsigned int hdrver;
-+	unsigned int rev;
-+	unsigned int date;
-+	unsigned int sig;
-+	unsigned int cksum;
-+	unsigned int ldrver;
-+	unsigned int pf;
-+	unsigned int datasize;
-+	unsigned int totalsize;
-+	unsigned int reserved[3];
-+};
-+
-+struct microcode {
-+	struct microcode_header hdr;
-+	unsigned int bits[0];
-+};
-+
-+typedef struct microcode microcode_t;
-+typedef struct microcode_header microcode_header_t;
-+
-+/* microcode format is extended from prescott processors */
-+struct extended_signature {
-+	unsigned int sig;
-+	unsigned int pf;
-+	unsigned int cksum;
-+};
-+
-+struct extended_sigtable {
-+	unsigned int count;
-+	unsigned int cksum;
-+	unsigned int reserved[3];
-+	struct extended_signature sigs[0];
-+};
-+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
-+#define MICROCODE_IOCFREE	_IO('6',0)
-+
-+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-+static inline void rep_nop(void)
-+{
-+	__asm__ __volatile__("rep;nop": : :"memory");
-+}
-+
-+#define cpu_relax()	rep_nop()
-+
-+/* generic versions from gas */
-+#define GENERIC_NOP1	".byte 0x90\n"
-+#define GENERIC_NOP2    	".byte 0x89,0xf6\n"
-+#define GENERIC_NOP3        ".byte 0x8d,0x76,0x00\n"
-+#define GENERIC_NOP4        ".byte 0x8d,0x74,0x26,0x00\n"
-+#define GENERIC_NOP5        GENERIC_NOP1 GENERIC_NOP4
-+#define GENERIC_NOP6	".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
-+#define GENERIC_NOP7	".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
-+#define GENERIC_NOP8	GENERIC_NOP1 GENERIC_NOP7
-+
-+/* Opteron nops */
-+#define K8_NOP1 GENERIC_NOP1
-+#define K8_NOP2	".byte 0x66,0x90\n" 
-+#define K8_NOP3	".byte 0x66,0x66,0x90\n" 
-+#define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" 
-+#define K8_NOP5	K8_NOP3 K8_NOP2 
-+#define K8_NOP6	K8_NOP3 K8_NOP3
-+#define K8_NOP7	K8_NOP4 K8_NOP3
-+#define K8_NOP8	K8_NOP4 K8_NOP4
-+
-+/* K7 nops */
-+/* uses eax dependencies (arbitary choice) */
-+#define K7_NOP1  GENERIC_NOP1
-+#define K7_NOP2	".byte 0x8b,0xc0\n" 
-+#define K7_NOP3	".byte 0x8d,0x04,0x20\n"
-+#define K7_NOP4	".byte 0x8d,0x44,0x20,0x00\n"
-+#define K7_NOP5	K7_NOP4 ASM_NOP1
-+#define K7_NOP6	".byte 0x8d,0x80,0,0,0,0\n"
-+#define K7_NOP7        ".byte 0x8D,0x04,0x05,0,0,0,0\n"
-+#define K7_NOP8        K7_NOP7 ASM_NOP1
-+
-+#ifdef CONFIG_MK8
-+#define ASM_NOP1 K8_NOP1
-+#define ASM_NOP2 K8_NOP2
-+#define ASM_NOP3 K8_NOP3
-+#define ASM_NOP4 K8_NOP4
-+#define ASM_NOP5 K8_NOP5
-+#define ASM_NOP6 K8_NOP6
-+#define ASM_NOP7 K8_NOP7
-+#define ASM_NOP8 K8_NOP8
-+#elif defined(CONFIG_MK7)
-+#define ASM_NOP1 K7_NOP1
-+#define ASM_NOP2 K7_NOP2
-+#define ASM_NOP3 K7_NOP3
-+#define ASM_NOP4 K7_NOP4
-+#define ASM_NOP5 K7_NOP5
-+#define ASM_NOP6 K7_NOP6
-+#define ASM_NOP7 K7_NOP7
-+#define ASM_NOP8 K7_NOP8
-+#else
-+#define ASM_NOP1 GENERIC_NOP1
-+#define ASM_NOP2 GENERIC_NOP2
-+#define ASM_NOP3 GENERIC_NOP3
-+#define ASM_NOP4 GENERIC_NOP4
-+#define ASM_NOP5 GENERIC_NOP5
-+#define ASM_NOP6 GENERIC_NOP6
-+#define ASM_NOP7 GENERIC_NOP7
-+#define ASM_NOP8 GENERIC_NOP8
-+#endif
-+
-+#define ASM_NOP_MAX 8
-+
-+/* Prefetch instructions for Pentium III and AMD Athlon */
-+/* It's not worth to care about 3dnow! prefetches for the K6
-+   because they are microcoded there and very slow.
-+   However we don't do prefetches for pre XP Athlons currently
-+   That should be fixed. */
-+#define ARCH_HAS_PREFETCH
-+extern inline void prefetch(const void *x)
-+{
-+	alternative_input(ASM_NOP4,
-+			  "prefetchnta (%1)",
-+			  X86_FEATURE_XMM,
-+			  "r" (x));
-+}
-+
-+#define ARCH_HAS_PREFETCH
-+#define ARCH_HAS_PREFETCHW
-+#define ARCH_HAS_SPINLOCK_PREFETCH
-+
-+/* 3dnow! prefetch to get an exclusive cache line. Useful for 
-+   spinlocks to avoid one state transition in the cache coherency protocol. */
-+extern inline void prefetchw(const void *x)
-+{
-+	alternative_input(ASM_NOP4,
-+			  "prefetchw (%1)",
-+			  X86_FEATURE_3DNOW,
-+			  "r" (x));
-+}
-+#define spin_lock_prefetch(x)	prefetchw(x)
-+
-+extern void select_idle_routine(const struct cpuinfo_x86 *c);
-+
-+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
-+
-+extern unsigned long boot_option_idle_override;
-+
-+#endif /* __ASM_I386_PROCESSOR_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/ptrace.h linux-2.6.12-xen/include/asm-xen/asm-i386/ptrace.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/ptrace.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/ptrace.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,69 @@
-+#ifndef _I386_PTRACE_H
-+#define _I386_PTRACE_H
-+
-+#define EBX 0
-+#define ECX 1
-+#define EDX 2
-+#define ESI 3
-+#define EDI 4
-+#define EBP 5
-+#define EAX 6
-+#define DS 7
-+#define ES 8
-+#define FS 9
-+#define GS 10
-+#define ORIG_EAX 11
-+#define EIP 12
-+#define CS  13
-+#define EFL 14
-+#define UESP 15
-+#define SS   16
-+#define FRAME_SIZE 17
-+
-+/* this struct defines the way the registers are stored on the 
-+   stack during a system call. */
-+
-+struct pt_regs {
-+	long ebx;
-+	long ecx;
-+	long edx;
-+	long esi;
-+	long edi;
-+	long ebp;
-+	long eax;
-+	int  xds;
-+	int  xes;
-+	long orig_eax;
-+	long eip;
-+	int  xcs;
-+	long eflags;
-+	long esp;
-+	int  xss;
-+};
-+
-+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-+#define PTRACE_GETREGS            12
-+#define PTRACE_SETREGS            13
-+#define PTRACE_GETFPREGS          14
-+#define PTRACE_SETFPREGS          15
-+#define PTRACE_GETFPXREGS         18
-+#define PTRACE_SETFPXREGS         19
-+
-+#define PTRACE_OLDSETOPTIONS         21
-+
-+#define PTRACE_GET_THREAD_AREA    25
-+#define PTRACE_SET_THREAD_AREA    26
-+
-+#ifdef __KERNEL__
-+struct task_struct;
-+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
-+#define user_mode(regs) ((VM_MASK & (regs)->eflags) || (2 & (regs)->xcs))
-+#define instruction_pointer(regs) ((regs)->eip)
-+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-+extern unsigned long profile_pc(struct pt_regs *regs);
-+#else
-+#define profile_pc(regs) instruction_pointer(regs)
-+#endif
-+#endif
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/scatterlist.h linux-2.6.12-xen/include/asm-xen/asm-i386/scatterlist.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/scatterlist.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/scatterlist.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,22 @@
-+#ifndef _I386_SCATTERLIST_H
-+#define _I386_SCATTERLIST_H
-+
-+struct scatterlist {
-+    struct page		*page;
-+    unsigned int	offset;
-+    unsigned int	length;
-+    dma_addr_t		dma_address;
-+    unsigned int	dma_length;
-+};
-+
-+/* These macros should be used after a pci_map_sg call has been done
-+ * to get bus addresses of each of the SG entries and their lengths.
-+ * You should only work with the number of sg entries pci_map_sg
-+ * returns.
-+ */
-+#define sg_dma_address(sg)	((sg)->dma_address)
-+#define sg_dma_len(sg)		((sg)->dma_length)
-+
-+#define ISA_DMA_THRESHOLD (0x00ffffff)
-+
-+#endif /* !(_I386_SCATTERLIST_H) */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/segment.h linux-2.6.12-xen/include/asm-xen/asm-i386/segment.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/segment.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/segment.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,99 @@
-+#ifndef _ASM_SEGMENT_H
-+#define _ASM_SEGMENT_H
-+
-+/*
-+ * The layout of the per-CPU GDT under Linux:
-+ *
-+ *   0 - null
-+ *   1 - reserved
-+ *   2 - reserved
-+ *   3 - reserved
-+ *
-+ *   4 - unused			<==== new cacheline
-+ *   5 - unused
-+ *
-+ *  ------- start of TLS (Thread-Local Storage) segments:
-+ *
-+ *   6 - TLS segment #1			[ glibc's TLS segment ]
-+ *   7 - TLS segment #2			[ Wine's %fs Win32 segment ]
-+ *   8 - TLS segment #3
-+ *   9 - reserved
-+ *  10 - reserved
-+ *  11 - reserved
-+ *
-+ *  ------- start of kernel segments:
-+ *
-+ *  12 - kernel code segment		<==== new cacheline
-+ *  13 - kernel data segment
-+ *  14 - default user CS
-+ *  15 - default user DS
-+ *  16 - TSS
-+ *  17 - LDT
-+ *  18 - PNPBIOS support (16->32 gate)
-+ *  19 - PNPBIOS support
-+ *  20 - PNPBIOS support
-+ *  21 - PNPBIOS support
-+ *  22 - PNPBIOS support
-+ *  23 - APM BIOS support
-+ *  24 - APM BIOS support
-+ *  25 - APM BIOS support 
-+ *
-+ *  26 - ESPFIX small SS
-+ *  27 - unused
-+ *  28 - unused
-+ *  29 - unused
-+ *  30 - unused
-+ *  31 - TSS for double fault handler
-+ */
-+#define GDT_ENTRY_TLS_ENTRIES	3
-+#define GDT_ENTRY_TLS_MIN	6
-+#define GDT_ENTRY_TLS_MAX 	(GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
-+
-+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
-+
-+#define GDT_ENTRY_DEFAULT_USER_CS	14
-+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
-+
-+#define GDT_ENTRY_DEFAULT_USER_DS	15
-+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
-+
-+#define GDT_ENTRY_KERNEL_BASE	12
-+
-+#define GDT_ENTRY_KERNEL_CS		(GDT_ENTRY_KERNEL_BASE + 0)
-+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8 + 1)
-+
-+#define GDT_ENTRY_KERNEL_DS		(GDT_ENTRY_KERNEL_BASE + 1)
-+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8 + 1)
-+
-+#define GDT_ENTRY_TSS			(GDT_ENTRY_KERNEL_BASE + 4)
-+#define GDT_ENTRY_LDT			(GDT_ENTRY_KERNEL_BASE + 5)
-+
-+#define GDT_ENTRY_PNPBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 6)
-+#define GDT_ENTRY_APMBIOS_BASE		(GDT_ENTRY_KERNEL_BASE + 11)
-+
-+#define GDT_ENTRY_ESPFIX_SS		(GDT_ENTRY_KERNEL_BASE + 14)
-+#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
-+
-+#define GDT_ENTRY_DOUBLEFAULT_TSS	31
-+
-+/*
-+ * The GDT has 32 entries
-+ */
-+#define GDT_ENTRIES 32
-+
-+#define GDT_SIZE (GDT_ENTRIES * 8)
-+
-+/* Simple and small GDT entries for booting only */
-+
-+#define __BOOT_CS	FLAT_KERNEL_CS
-+
-+#define __BOOT_DS	FLAT_KERNEL_DS
-+
-+/*
-+ * The interrupt descriptor table has room for 256 idt's,
-+ * the global descriptor table is dependent on the number
-+ * of tasks we can have..
-+ */
-+#define IDT_ENTRIES 256
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/setup.h linux-2.6.12-xen/include/asm-xen/asm-i386/setup.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/setup.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/setup.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,66 @@
-+/*
-+ *	Just a place holder. We don't want to have to test x86 before
-+ *	we include stuff
-+ */
-+
-+#ifndef _i386_SETUP_H
-+#define _i386_SETUP_H
-+
-+#define PFN_UP(x)	(((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-+#define PFN_DOWN(x)	((x) >> PAGE_SHIFT)
-+#define PFN_PHYS(x)	((unsigned long long)(x) << PAGE_SHIFT)
-+
-+/*
-+ * Reserved space for vmalloc and iomap - defined in asm/page.h
-+ */
-+#define MAXMEM_PFN	PFN_DOWN(MAXMEM)
-+#define MAX_NONPAE_PFN	(1 << 20)
-+
-+#define PARAM_SIZE 4096
-+#define COMMAND_LINE_SIZE 256
-+
-+#define OLD_CL_MAGIC_ADDR	0x90020
-+#define OLD_CL_MAGIC		0xA33F
-+#define OLD_CL_BASE_ADDR	0x90000
-+#define OLD_CL_OFFSET		0x90022
-+#define NEW_CL_POINTER		0x228	/* Relative to real mode data */
-+
-+#ifndef __ASSEMBLY__
-+/*
-+ * This is set up by the setup-routine at boot-time
-+ */
-+extern unsigned char boot_params[PARAM_SIZE];
-+
-+#define PARAM	(boot_params)
-+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-+#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
-+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-+#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
-+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-+#define IST_INFO   (*(struct ist_info *) (PARAM+0x60))
-+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-+#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
-+#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
-+#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
-+#define EFI_MEMMAP ((efi_memory_desc_t *) *((unsigned long *)(PARAM+0x1d0)))
-+#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4)))
-+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-+#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-+#define KERNEL_START (*(unsigned long *) (PARAM+0x214))
-+#define INITRD_START (__pa(xen_start_info->mod_start))
-+#define INITRD_SIZE (xen_start_info->mod_len)
-+#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
-+#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
-+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-+#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#endif /* _i386_SETUP_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/smp.h linux-2.6.12-xen/include/asm-xen/asm-i386/smp.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/smp.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/smp.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,93 @@
-+#ifndef __ASM_SMP_H
-+#define __ASM_SMP_H
-+
-+/*
-+ * We need the APIC definitions automatically as part of 'smp.h'
-+ */
-+#ifndef __ASSEMBLY__
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#ifndef __ASSEMBLY__
-+#include <asm/fixmap.h>
-+#include <asm/bitops.h>
-+#include <asm/mpspec.h>
-+#ifdef CONFIG_X86_IO_APIC
-+#include <asm/io_apic.h>
-+#endif
-+#include <asm/apic.h>
-+#endif
-+#endif
-+
-+#define BAD_APICID 0xFFu
-+#ifdef CONFIG_SMP
-+#ifndef __ASSEMBLY__
-+
-+/*
-+ * Private routines/data
-+ */
-+ 
-+extern void smp_alloc_memory(void);
-+extern int pic_mode;
-+extern int smp_num_siblings;
-+extern cpumask_t cpu_sibling_map[];
-+extern cpumask_t cpu_core_map[];
-+
-+extern void smp_flush_tlb(void);
-+extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-+extern void smp_invalidate_rcv(void);		/* Process an NMI */
-+extern void (*mtrr_hook) (void);
-+extern void zap_low_mappings (void);
-+
-+#define MAX_APICID 256
-+extern u8 x86_cpu_to_apicid[];
-+
-+/*
-+ * This function is needed by all SMP systems. It must _always_ be valid
-+ * from the initial startup. We map APIC_BASE very early in page_setup(),
-+ * so this is correct in the x86 case.
-+ */
-+#define __smp_processor_id() (current_thread_info()->cpu)
-+
-+extern cpumask_t cpu_possible_map;
-+#define cpu_callin_map cpu_possible_map
-+
-+/* We don't mark CPUs online until __cpu_up(), so we need another measure */
-+static inline int num_booting_cpus(void)
-+{
-+	return cpus_weight(cpu_possible_map);
-+}
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+
-+#ifdef APIC_DEFINITION
-+extern int hard_smp_processor_id(void);
-+#else
-+#include <mach_apicdef.h>
-+static inline int hard_smp_processor_id(void)
-+{
-+	/* we don't want to mark this access volatile - bad code generation */
-+	return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
-+}
-+#endif
-+
-+static __inline int logical_smp_processor_id(void)
-+{
-+	/* we don't want to mark this access volatile - bad code generation */
-+	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
-+}
-+
-+#endif
-+
-+extern int __cpu_disable(void);
-+extern void __cpu_die(unsigned int cpu);
-+#endif /* !__ASSEMBLY__ */
-+
-+#define NO_PROC_ID		0xFF		/* No processor magic marker */
-+
-+#endif
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/spinlock.h linux-2.6.12-xen/include/asm-xen/asm-i386/spinlock.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/spinlock.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/spinlock.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,277 @@
-+#ifndef __ASM_SPINLOCK_H
-+#define __ASM_SPINLOCK_H
-+
-+#include <asm/atomic.h>
-+#include <asm/rwlock.h>
-+#include <asm/page.h>
-+#include <linux/config.h>
-+#include <linux/compiler.h>
-+#include <asm/smp_alt.h>
-+
-+asmlinkage int printk(const char * fmt, ...)
-+	__attribute__ ((format (printf, 1, 2)));
-+
-+/*
-+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
-+ */
-+
-+typedef struct {
-+	volatile unsigned int slock;
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	unsigned magic;
-+#endif
-+#ifdef CONFIG_PREEMPT
-+	unsigned int break_lock;
-+#endif
-+} spinlock_t;
-+
-+#define SPINLOCK_MAGIC	0xdead4ead
-+
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+#define SPINLOCK_MAGIC_INIT	, SPINLOCK_MAGIC
-+#else
-+#define SPINLOCK_MAGIC_INIT	/* */
-+#endif
-+
-+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
-+
-+#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-+
-+/*
-+ * Simple spin lock operations.  There are two variants, one clears IRQ's
-+ * on the local processor, one does not.
-+ *
-+ * We make no fairness assumptions. They have a cost.
-+ */
-+
-+#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->slock) <= 0)
-+#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
-+
-+#define spin_lock_string \
-+        "1:\n" \
-+	LOCK \
-+	"decb %0\n\t" \
-+	"jns 3f\n" \
-+	"2:\t" \
-+	"rep;nop\n\t" \
-+	"cmpb $0,%0\n\t" \
-+	"jle 2b\n\t" \
-+	"jmp 1b\n" \
-+	"3:\n\t"
-+
-+#define spin_lock_string_flags \
-+        "1:\n" \
-+	LOCK \
-+	"decb %0\n\t" \
-+	"jns 4f\n\t" \
-+	"2:\t" \
-+	"testl $0x200, %1\n\t" \
-+	"jz 3f\n\t" \
-+	"#sti\n\t" \
-+	"3:\t" \
-+	"rep;nop\n\t" \
-+	"cmpb $0, %0\n\t" \
-+	"jle 3b\n\t" \
-+	"#cli\n\t" \
-+	"jmp 1b\n" \
-+	"4:\n\t"
-+
-+/*
-+ * This works. Despite all the confusion.
-+ * (except on PPro SMP or if we are using OOSTORE)
-+ * (PPro errata 66, 92)
-+ */
-+
-+#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
-+
-+#define spin_unlock_string \
-+	"movb $1,%0" \
-+		:"=m" (lock->slock) : : "memory"
-+
-+
-+static inline void _raw_spin_unlock(spinlock_t *lock)
-+{
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	BUG_ON(lock->magic != SPINLOCK_MAGIC);
-+	BUG_ON(!spin_is_locked(lock));
-+#endif
-+	__asm__ __volatile__(
-+		spin_unlock_string
-+	);
-+}
-+
-+#else
-+
-+#define spin_unlock_string \
-+	"xchgb %b0, %1" \
-+		:"=q" (oldval), "=m" (lock->slock) \
-+		:"0" (oldval) : "memory"
-+
-+static inline void _raw_spin_unlock(spinlock_t *lock)
-+{
-+	char oldval = 1;
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	BUG_ON(lock->magic != SPINLOCK_MAGIC);
-+	BUG_ON(!spin_is_locked(lock));
-+#endif
-+	__asm__ __volatile__(
-+		spin_unlock_string
-+	);
-+}
-+
-+#endif
-+
-+static inline int _raw_spin_trylock(spinlock_t *lock)
-+{
-+	char oldval;
-+#ifdef CONFIG_SMP_ALTERNATIVES
-+	__asm__ __volatile__(
-+		"1:movb %1,%b0\n"
-+		"movb $0,%1\n"
-+		"2:"
-+		".section __smp_alternatives,\"a\"\n"
-+		".long 1b\n"
-+		".long 3f\n"
-+		".previous\n"
-+		".section __smp_replacements,\"a\"\n"
-+		"3: .byte 2b - 1b\n"
-+		".byte 5f-4f\n"
-+		".byte 0\n"
-+		".byte 6f-5f\n"
-+		".byte -1\n"
-+		"4: xchgb %b0,%1\n"
-+		"5: movb %1,%b0\n"
-+		"movb $0,%1\n"
-+		"6:\n"
-+		".previous\n"
-+		:"=q" (oldval), "=m" (lock->slock)
-+		:"0" (0) : "memory");
-+#else
-+	__asm__ __volatile__(
-+		"xchgb %b0,%1\n"
-+		:"=q" (oldval), "=m" (lock->slock)
-+		:"0" (0) : "memory");
-+#endif
-+	return oldval > 0;
-+}
-+
-+static inline void _raw_spin_lock(spinlock_t *lock)
-+{
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
-+		printk("eip: %p\n", __builtin_return_address(0));
-+		BUG();
-+	}
-+#endif
-+	__asm__ __volatile__(
-+		spin_lock_string
-+		:"=m" (lock->slock) : : "memory");
-+}
-+
-+static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
-+{
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
-+		printk("eip: %p\n", __builtin_return_address(0));
-+		BUG();
-+	}
-+#endif
-+	__asm__ __volatile__(
-+		spin_lock_string_flags
-+		:"=m" (lock->slock) : "r" (flags) : "memory");
-+}
-+
-+/*
-+ * Read-write spinlocks, allowing multiple readers
-+ * but only one writer.
-+ *
-+ * NOTE! it is quite common to have readers in interrupts
-+ * but no interrupt writers. For those circumstances we
-+ * can "mix" irq-safe locks - any writer needs to get a
-+ * irq-safe write-lock, but readers can get non-irqsafe
-+ * read-locks.
-+ */
-+typedef struct {
-+	volatile unsigned int lock;
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	unsigned magic;
-+#endif
-+#ifdef CONFIG_PREEMPT
-+	unsigned int break_lock;
-+#endif
-+} rwlock_t;
-+
-+#define RWLOCK_MAGIC	0xdeaf1eed
-+
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+#define RWLOCK_MAGIC_INIT	, RWLOCK_MAGIC
-+#else
-+#define RWLOCK_MAGIC_INIT	/* */
-+#endif
-+
-+#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
-+
-+#define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-+
-+/**
-+ * read_can_lock - would read_trylock() succeed?
-+ * @lock: the rwlock in question.
-+ */
-+#define read_can_lock(x) ((int)(x)->lock > 0)
-+
-+/**
-+ * write_can_lock - would write_trylock() succeed?
-+ * @lock: the rwlock in question.
-+ */
-+#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-+
-+/*
-+ * On x86, we implement read-write locks as a 32-bit counter
-+ * with the high bit (sign) being the "contended" bit.
-+ *
-+ * The inline assembly is non-obvious. Think about it.
-+ *
-+ * Changed to use the same technique as rw semaphores.  See
-+ * semaphore.h for details.  -ben
-+ */
-+/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
-+
-+static inline void _raw_read_lock(rwlock_t *rw)
-+{
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	BUG_ON(rw->magic != RWLOCK_MAGIC);
-+#endif
-+	__build_read_lock(rw, "__read_lock_failed");
-+}
-+
-+static inline void _raw_write_lock(rwlock_t *rw)
-+{
-+#ifdef CONFIG_DEBUG_SPINLOCK
-+	BUG_ON(rw->magic != RWLOCK_MAGIC);
-+#endif
-+	__build_write_lock(rw, "__write_lock_failed");
-+}
-+
-+#define _raw_read_unlock(rw)	asm volatile(LOCK "incl %0" :"=m" ((rw)->lock) : : "memory")
-+#define _raw_write_unlock(rw)	asm volatile(LOCK "addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
-+
-+static inline int _raw_read_trylock(rwlock_t *lock)
-+{
-+	atomic_t *count = (atomic_t *)lock;
-+	atomic_dec(count);
-+	if (atomic_read(count) >= 0)
-+		return 1;
-+	atomic_inc(count);
-+	return 0;
-+}
-+
-+static inline int _raw_write_trylock(rwlock_t *lock)
-+{
-+	atomic_t *count = (atomic_t *)lock;
-+	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
-+		return 1;
-+	atomic_add(RW_LOCK_BIAS, count);
-+	return 0;
-+}
-+
-+#endif /* __ASM_SPINLOCK_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/swiotlb.h linux-2.6.12-xen/include/asm-xen/asm-i386/swiotlb.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/swiotlb.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/swiotlb.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,42 @@
-+#ifndef _ASM_SWIOTLB_H
-+#define _ASM_SWIOTLB_H 1
-+
-+#include <linux/config.h>
-+
-+/* SWIOTLB interface */
-+
-+extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
-+				      int dir);
-+extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
-+				  size_t size, int dir);
-+extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
-+					 dma_addr_t dev_addr,
-+					 size_t size, int dir);
-+extern void swiotlb_sync_single_for_device(struct device *hwdev,
-+					    dma_addr_t dev_addr,
-+					    size_t size, int dir);
-+extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
-+				     struct scatterlist *sg, int nelems,
-+				     int dir);
-+extern void swiotlb_sync_sg_for_device(struct device *hwdev,
-+					struct scatterlist *sg, int nelems,
-+					int dir);
-+extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
-+		      int nents, int direction);
-+extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-+			 int nents, int direction);
-+extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
-+extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
-+                                   unsigned long offset, size_t size,
-+                                   enum dma_data_direction direction);
-+extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
-+                               size_t size, enum dma_data_direction direction);
-+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
-+
-+#ifdef CONFIG_SWIOTLB
-+extern int swiotlb;
-+#else
-+#define swiotlb 0
-+#endif
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/synch_bitops.h linux-2.6.12-xen/include/asm-xen/asm-i386/synch_bitops.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/synch_bitops.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,140 @@
-+#ifndef __XEN_SYNCH_BITOPS_H__
-+#define __XEN_SYNCH_BITOPS_H__
-+
-+/*
-+ * Copyright 1992, Linus Torvalds.
-+ * Heavily modified to provide guaranteed strong synchronisation
-+ * when communicating with Xen or other guest OSes running on other CPUs.
-+ */
-+
-+#include <linux/config.h>
-+
-+#define ADDR (*(volatile long *) addr)
-+
-+static __inline__ void synch_set_bit(int nr, volatile void * addr)
-+{
-+    __asm__ __volatile__ ( 
-+        "lock btsl %1,%0"
-+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-+}
-+
-+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-+{
-+    __asm__ __volatile__ (
-+        "lock btrl %1,%0"
-+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-+}
-+
-+static __inline__ void synch_change_bit(int nr, volatile void * addr)
-+{
-+    __asm__ __volatile__ (
-+        "lock btcl %1,%0"
-+        : "=m" (ADDR) : "Ir" (nr) : "memory" );
-+}
-+
-+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-+{
-+    int oldbit;
-+    __asm__ __volatile__ (
-+        "lock btsl %2,%1\n\tsbbl %0,%0"
-+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-+    return oldbit;
-+}
-+
-+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-+{
-+    int oldbit;
-+    __asm__ __volatile__ (
-+        "lock btrl %2,%1\n\tsbbl %0,%0"
-+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-+    return oldbit;
-+}
-+
-+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
-+{
-+    int oldbit;
-+
-+    __asm__ __volatile__ (
-+        "lock btcl %2,%1\n\tsbbl %0,%0"
-+        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
-+    return oldbit;
-+}
-+
-+struct __synch_xchg_dummy { unsigned long a[100]; };
-+#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
-+
-+#define synch_cmpxchg(ptr, old, new) \
-+((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
-+                                     (unsigned long)(old), \
-+                                     (unsigned long)(new), \
-+                                     sizeof(*(ptr))))
-+
-+static inline unsigned long __synch_cmpxchg(volatile void *ptr,
-+					    unsigned long old,
-+					    unsigned long new, int size)
-+{
-+	unsigned long prev;
-+	switch (size) {
-+	case 1:
-+		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__synch_xg(ptr)),
-+				       "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 2:
-+		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__synch_xg(ptr)),
-+				       "0"(old)
-+				     : "memory");
-+		return prev;
-+#ifdef CONFIG_X86_64
-+	case 4:
-+		__asm__ __volatile__("lock; cmpxchgl %k1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__synch_xg(ptr)),
-+				       "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 8:
-+		__asm__ __volatile__("lock; cmpxchgq %1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__synch_xg(ptr)),
-+				       "0"(old)
-+				     : "memory");
-+		return prev;
-+#else
-+	case 4:
-+		__asm__ __volatile__("lock; cmpxchgl %1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__synch_xg(ptr)),
-+				       "0"(old)
-+				     : "memory");
-+		return prev;
-+#endif
-+	}
-+	return old;
-+}
-+
-+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
-+{
-+    return ((1UL << (nr & 31)) & 
-+            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-+}
-+
-+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-+{
-+    int oldbit;
-+    __asm__ __volatile__ (
-+        "btl %2,%1\n\tsbbl %0,%0"
-+        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
-+    return oldbit;
-+}
-+
-+#define synch_test_bit(nr,addr) \
-+(__builtin_constant_p(nr) ? \
-+ synch_const_test_bit((nr),(addr)) : \
-+ synch_var_test_bit((nr),(addr)))
-+
-+#endif /* __XEN_SYNCH_BITOPS_H__ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/system.h linux-2.6.12-xen/include/asm-xen/asm-i386/system.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/system.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/system.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,588 @@
-+#ifndef __ASM_SYSTEM_H
-+#define __ASM_SYSTEM_H
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <linux/bitops.h>
-+#include <asm/synch_bitops.h>
-+#include <asm/segment.h>
-+#include <asm/cpufeature.h>
-+#include <asm/hypervisor.h>
-+#include <asm/smp_alt.h>
-+
-+#ifdef __KERNEL__
-+
-+struct task_struct;	/* one of the stranger aspects of C forward declarations.. */
-+extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
-+
-+#define switch_to(prev,next,last) do {					\
-+	unsigned long esi,edi;						\
-+	asm volatile("pushfl\n\t"					\
-+		     "pushl %%ebp\n\t"					\
-+		     "movl %%esp,%0\n\t"	/* save ESP */		\
-+		     "movl %5,%%esp\n\t"	/* restore ESP */	\
-+		     "movl $1f,%1\n\t"		/* save EIP */		\
-+		     "pushl %6\n\t"		/* restore EIP */	\
-+		     "jmp __switch_to\n"				\
-+		     "1:\t"						\
-+		     "popl %%ebp\n\t"					\
-+		     "popfl"						\
-+		     :"=m" (prev->thread.esp),"=m" (prev->thread.eip),	\
-+		      "=a" (last),"=S" (esi),"=D" (edi)			\
-+		     :"m" (next->thread.esp),"m" (next->thread.eip),	\
-+		      "2" (prev), "d" (next));				\
-+} while (0)
-+
-+#define _set_base(addr,base) do { unsigned long __pr; \
-+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-+	"rorl $16,%%edx\n\t" \
-+	"movb %%dl,%2\n\t" \
-+	"movb %%dh,%3" \
-+	:"=&d" (__pr) \
-+	:"m" (*((addr)+2)), \
-+	 "m" (*((addr)+4)), \
-+	 "m" (*((addr)+7)), \
-+         "0" (base) \
-+        ); } while(0)
-+
-+#define _set_limit(addr,limit) do { unsigned long __lr; \
-+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-+	"rorl $16,%%edx\n\t" \
-+	"movb %2,%%dh\n\t" \
-+	"andb $0xf0,%%dh\n\t" \
-+	"orb %%dh,%%dl\n\t" \
-+	"movb %%dl,%2" \
-+	:"=&d" (__lr) \
-+	:"m" (*(addr)), \
-+	 "m" (*((addr)+6)), \
-+	 "0" (limit) \
-+        ); } while(0)
-+
-+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
-+
-+static inline unsigned long _get_base(char * addr)
-+{
-+	unsigned long __base;
-+	__asm__("movb %3,%%dh\n\t"
-+		"movb %2,%%dl\n\t"
-+		"shll $16,%%edx\n\t"
-+		"movw %1,%%dx"
-+		:"=&d" (__base)
-+		:"m" (*((addr)+2)),
-+		 "m" (*((addr)+4)),
-+		 "m" (*((addr)+7)));
-+	return __base;
-+}
-+
-+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
-+
-+/*
-+ * Load a segment. Fall back on loading the zero
-+ * segment if something goes wrong..
-+ */
-+#define loadsegment(seg,value)			\
-+	asm volatile("\n"			\
-+		"1:\t"				\
-+		"mov %0,%%" #seg "\n"		\
-+		"2:\n"				\
-+		".section .fixup,\"ax\"\n"	\
-+		"3:\t"				\
-+		"pushl $0\n\t"			\
-+		"popl %%" #seg "\n\t"		\
-+		"jmp 2b\n"			\
-+		".previous\n"			\
-+		".section __ex_table,\"a\"\n\t"	\
-+		".align 4\n\t"			\
-+		".long 1b,3b\n"			\
-+		".previous"			\
-+		: :"m" (value))
-+
-+/*
-+ * Save a segment register away
-+ */
-+#define savesegment(seg, value) \
-+	asm volatile("mov %%" #seg ",%0":"=m" (value))
-+
-+/*
-+ * Clear and set 'TS' bit respectively
-+ */
-+#define clts() (HYPERVISOR_fpu_taskswitch(0))
-+#define read_cr0() ({ \
-+	unsigned int __dummy; \
-+	__asm__( \
-+		"movl %%cr0,%0\n\t" \
-+		:"=r" (__dummy)); \
-+	__dummy; \
-+})
-+#define write_cr0(x) \
-+	__asm__("movl %0,%%cr0": :"r" (x));
-+
-+#define read_cr4() ({ \
-+	unsigned int __dummy; \
-+	__asm__( \
-+		"movl %%cr4,%0\n\t" \
-+		:"=r" (__dummy)); \
-+	__dummy; \
-+})
-+#define write_cr4(x) \
-+	__asm__("movl %0,%%cr4": :"r" (x));
-+#define stts() (HYPERVISOR_fpu_taskswitch(1))
-+
-+#endif	/* __KERNEL__ */
-+
-+#define wbinvd() \
-+	__asm__ __volatile__ ("wbinvd": : :"memory");
-+
-+static inline unsigned long get_limit(unsigned long segment)
-+{
-+	unsigned long __limit;
-+	__asm__("lsll %1,%0"
-+		:"=r" (__limit):"r" (segment));
-+	return __limit+1;
-+}
-+
-+#define nop() __asm__ __volatile__ ("nop")
-+
-+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-+
-+#define tas(ptr) (xchg((ptr),1))
-+
-+struct __xchg_dummy { unsigned long a[100]; };
-+#define __xg(x) ((struct __xchg_dummy *)(x))
-+
-+
-+/*
-+ * The semantics of XCHGCMP8B are a bit strange, this is why
-+ * there is a loop and the loading of %%eax and %%edx has to
-+ * be inside. This inlines well in most cases, the cached
-+ * cost is around ~38 cycles. (in the future we might want
-+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
-+ * might have an implicit FPU-save as a cost, so it's not
-+ * clear which path to go.)
-+ *
-+ * cmpxchg8b must be used with the lock prefix here to allow
-+ * the instruction to be executed atomically, see page 3-102
-+ * of the instruction set reference 24319102.pdf. We need
-+ * the reader side to see the coherent 64bit value.
-+ */
-+static inline void __set_64bit (unsigned long long * ptr,
-+		unsigned int low, unsigned int high)
-+{
-+	__asm__ __volatile__ (
-+		"\n1:\t"
-+		"movl (%0), %%eax\n\t"
-+		"movl 4(%0), %%edx\n\t"
-+		"lock cmpxchg8b (%0)\n\t"
-+		"jnz 1b"
-+		: /* no outputs */
-+		:	"D"(ptr),
-+			"b"(low),
-+			"c"(high)
-+		:	"ax","dx","memory");
-+}
-+
-+static inline void __set_64bit_constant (unsigned long long *ptr,
-+						 unsigned long long value)
-+{
-+	__set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
-+}
-+#define ll_low(x)	*(((unsigned int*)&(x))+0)
-+#define ll_high(x)	*(((unsigned int*)&(x))+1)
-+
-+static inline void __set_64bit_var (unsigned long long *ptr,
-+			 unsigned long long value)
-+{
-+	__set_64bit(ptr,ll_low(value), ll_high(value));
-+}
-+
-+#define set_64bit(ptr,value) \
-+(__builtin_constant_p(value) ? \
-+ __set_64bit_constant(ptr, value) : \
-+ __set_64bit_var(ptr, value) )
-+
-+#define _set_64bit(ptr,value) \
-+(__builtin_constant_p(value) ? \
-+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
-+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
-+
-+/*
-+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
-+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
-+ *	  but generally the primitive is invalid, *ptr is output argument. --ANK
-+ */
-+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-+{
-+	switch (size) {
-+		case 1:
-+			__asm__ __volatile__("xchgb %b0,%1"
-+				:"=q" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+		case 2:
-+			__asm__ __volatile__("xchgw %w0,%1"
-+				:"=r" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+		case 4:
-+			__asm__ __volatile__("xchgl %0,%1"
-+				:"=r" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+	}
-+	return x;
-+}
-+
-+/*
-+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
-+ * store NEW in MEM.  Return the initial value in MEM.  Success is
-+ * indicated by comparing RETURN with OLD.
-+ */
-+
-+#ifdef CONFIG_X86_CMPXCHG
-+#define __HAVE_ARCH_CMPXCHG 1
-+#endif
-+
-+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-+				      unsigned long new, int size)
-+{
-+	unsigned long prev;
-+	switch (size) {
-+	case 1:
-+		__asm__ __volatile__(LOCK "cmpxchgb %b1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 2:
-+		__asm__ __volatile__(LOCK "cmpxchgw %w1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 4:
-+		__asm__ __volatile__(LOCK "cmpxchgl %1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	}
-+	return old;
-+}
-+
-+#define cmpxchg(ptr,o,n)\
-+	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
-+					(unsigned long)(n),sizeof(*(ptr))))
-+    
-+#ifdef __KERNEL__
-+struct alt_instr { 
-+	__u8 *instr; 		/* original instruction */
-+	__u8 *replacement;
-+	__u8  cpuid;		/* cpuid bit set for replacement */
-+	__u8  instrlen;		/* length of original instruction */
-+	__u8  replacementlen; 	/* length of new instruction, <= instrlen */ 
-+	__u8  pad;
-+}; 
-+#endif
-+
-+/* 
-+ * Alternative instructions for different CPU types or capabilities.
-+ * 
-+ * This allows to use optimized instructions even on generic binary
-+ * kernels.
-+ * 
-+ * length of oldinstr must be longer or equal the length of newinstr
-+ * It can be padded with nops as needed.
-+ * 
-+ * For non barrier like inlines please define new variants
-+ * without volatile and memory clobber.
-+ */
-+#define alternative(oldinstr, newinstr, feature) 	\
-+	asm volatile ("661:\n\t" oldinstr "\n662:\n" 		     \
-+		      ".section .altinstructions,\"a\"\n"     	     \
-+		      "  .align 4\n"				       \
-+		      "  .long 661b\n"            /* label */          \
-+		      "  .long 663f\n"		  /* new instruction */ 	\
-+		      "  .byte %c0\n"             /* feature bit */    \
-+		      "  .byte 662b-661b\n"       /* sourcelen */      \
-+		      "  .byte 664f-663f\n"       /* replacementlen */ \
-+		      ".previous\n"						\
-+		      ".section .altinstr_replacement,\"ax\"\n"			\
-+		      "663:\n\t" newinstr "\n664:\n"   /* replacement */    \
-+		      ".previous" :: "i" (feature) : "memory")  
-+
-+/*
-+ * Alternative inline assembly with input.
-+ * 
-+ * Pecularities:
-+ * No memory clobber here. 
-+ * Argument numbers start with 1.
-+ * Best is to use constraints that are fixed size (like (%1) ... "r")
-+ * If you use variable sized constraints like "m" or "g" in the 
-+ * replacement maake sure to pad to the worst case length.
-+ */
-+#define alternative_input(oldinstr, newinstr, feature, input...)		\
-+	asm volatile ("661:\n\t" oldinstr "\n662:\n"				\
-+		      ".section .altinstructions,\"a\"\n"			\
-+		      "  .align 4\n"						\
-+		      "  .long 661b\n"            /* label */			\
-+		      "  .long 663f\n"		  /* new instruction */ 	\
-+		      "  .byte %c0\n"             /* feature bit */		\
-+		      "  .byte 662b-661b\n"       /* sourcelen */		\
-+		      "  .byte 664f-663f\n"       /* replacementlen */ 		\
-+		      ".previous\n"						\
-+		      ".section .altinstr_replacement,\"ax\"\n"			\
-+		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ 	\
-+		      ".previous" :: "i" (feature), ##input)
-+
-+/*
-+ * Force strict CPU ordering.
-+ * And yes, this is required on UP too when we're talking
-+ * to devices.
-+ *
-+ * For now, "wmb()" doesn't actually do anything, as all
-+ * Intel CPU's follow what Intel calls a *Processor Order*,
-+ * in which all writes are seen in the program order even
-+ * outside the CPU.
-+ *
-+ * I expect future Intel CPU's to have a weaker ordering,
-+ * but I'd also expect them to finally get their act together
-+ * and add some real memory barriers if so.
-+ *
-+ * Some non intel clones support out of order store. wmb() ceases to be a
-+ * nop for these.
-+ */
-+ 
-+
-+/* 
-+ * Actually only lfence would be needed for mb() because all stores done 
-+ * by the kernel should be already ordered. But keep a full barrier for now. 
-+ */
-+
-+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-+
-+/**
-+ * read_barrier_depends - Flush all pending reads that subsequents reads
-+ * depend on.
-+ *
-+ * No data-dependent reads from memory-like regions are ever reordered
-+ * over this barrier.  All reads preceding this primitive are guaranteed
-+ * to access memory (but not necessarily other CPUs' caches) before any
-+ * reads following this primitive that depend on the data return by
-+ * any of the preceding reads.  This primitive is much lighter weight than
-+ * rmb() on most CPUs, and is never heavier weight than is
-+ * rmb().
-+ *
-+ * These ordering constraints are respected by both the local CPU
-+ * and the compiler.
-+ *
-+ * Ordering is not guaranteed by anything other than these primitives,
-+ * not even by data dependencies.  See the documentation for
-+ * memory_barrier() for examples and URLs to more information.
-+ *
-+ * For example, the following code would force ordering (the initial
-+ * value of "a" is zero, "b" is one, and "p" is "&a"):
-+ *
-+ * <programlisting>
-+ *	CPU 0				CPU 1
-+ *
-+ *	b = 2;
-+ *	memory_barrier();
-+ *	p = &b;				q = p;
-+ *					read_barrier_depends();
-+ *					d = *q;
-+ * </programlisting>
-+ *
-+ * because the read of "*q" depends on the read of "p" and these
-+ * two reads are separated by a read_barrier_depends().  However,
-+ * the following code, with the same initial values for "a" and "b":
-+ *
-+ * <programlisting>
-+ *	CPU 0				CPU 1
-+ *
-+ *	a = 2;
-+ *	memory_barrier();
-+ *	b = 3;				y = b;
-+ *					read_barrier_depends();
-+ *					x = a;
-+ * </programlisting>
-+ *
-+ * does not enforce ordering, since there is no data dependency between
-+ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
-+ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
-+ * in cases like thiswhere there are no data dependencies.
-+ **/
-+
-+#define read_barrier_depends()	do { } while(0)
-+
-+#ifdef CONFIG_X86_OOSTORE
-+/* Actually there are no OOO store capable CPUs for now that do SSE, 
-+   but make it already an possibility. */
-+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-+#else
-+#define wmb()	__asm__ __volatile__ ("": : :"memory")
-+#endif
-+
-+#ifdef CONFIG_SMP
-+#define smp_wmb()	wmb()
-+#if defined(CONFIG_SMP_ALTERNATIVES) && !defined(MODULE)
-+#define smp_alt_mb(instr)                                           \
-+__asm__ __volatile__("6667:\nnop\nnop\nnop\nnop\nnop\nnop\n6668:\n" \
-+		     ".section __smp_alternatives,\"a\"\n"          \
-+		     ".long 6667b\n"                                \
-+                     ".long 6673f\n"                                \
-+		     ".previous\n"                                  \
-+		     ".section __smp_replacements,\"a\"\n"          \
-+		     "6673:.byte 6668b-6667b\n"                     \
-+		     ".byte 6670f-6669f\n"                          \
-+		     ".byte 6671f-6670f\n"                          \
-+                     ".byte 0\n"                                    \
-+		     ".byte %c0\n"                                  \
-+		     "6669:lock;addl $0,0(%%esp)\n"                 \
-+		     "6670:" instr "\n"                             \
-+		     "6671:\n"                                      \
-+		     ".previous\n"                                  \
-+		     :                                              \
-+		     : "i" (X86_FEATURE_XMM2)                       \
-+		     : "memory")
-+#define smp_rmb() smp_alt_mb("lfence")
-+#define smp_mb()  smp_alt_mb("mfence")
-+#define set_mb(var, value) do {                                     \
-+unsigned long __set_mb_temp;                                        \
-+__asm__ __volatile__("6667:movl %1, %0\n6668:\n"                    \
-+		     ".section __smp_alternatives,\"a\"\n"          \
-+		     ".long 6667b\n"                                \
-+		     ".long 6673f\n"                                \
-+		     ".previous\n"                                  \
-+		     ".section __smp_replacements,\"a\"\n"          \
-+		     "6673: .byte 6668b-6667b\n"                    \
-+		     ".byte 6670f-6669f\n"                          \
-+		     ".byte 0\n"                                    \
-+		     ".byte 6671f-6670f\n"                          \
-+		     ".byte -1\n"                                   \
-+		     "6669: xchg %1, %0\n"                          \
-+		     "6670:movl %1, %0\n"                           \
-+		     "6671:\n"                                      \
-+		     ".previous\n"                                  \
-+		     : "=m" (var), "=r" (__set_mb_temp)             \
-+		     : "1" (value)                                  \
-+		     : "memory"); } while (0)
-+#else
-+#define smp_rmb()	rmb()
-+#define smp_mb()	mb()
-+#define set_mb(var, value) do { xchg(&var, value); } while (0)
-+#endif
-+#define smp_read_barrier_depends()	read_barrier_depends()
-+#else
-+#define smp_mb()	barrier()
-+#define smp_rmb()	barrier()
-+#define smp_wmb()	barrier()
-+#define smp_read_barrier_depends()	do { } while(0)
-+#define set_mb(var, value) do { var = value; barrier(); } while (0)
-+#endif
-+
-+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-+
-+/* interrupt control.. */
-+
-+/* 
-+ * The use of 'barrier' in the following reflects their use as local-lock
-+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
-+ * critical operations are executed. All critical operations must complete
-+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
-+ * includes these barriers, for example.
-+ */
-+
-+#define __cli()								\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	_vcpu->evtchn_upcall_mask = 1;					\
-+	preempt_enable_no_resched();					\
-+	barrier();							\
-+} while (0)
-+
-+#define __sti()								\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	barrier();							\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	_vcpu->evtchn_upcall_mask = 0;					\
-+	barrier(); /* unmask then check (avoid races) */		\
-+	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
-+		force_evtchn_callback();				\
-+	preempt_enable();						\
-+} while (0)
-+
-+#define __save_flags(x)							\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	(x) = _vcpu->evtchn_upcall_mask;				\
-+	preempt_enable();						\
-+} while (0)
-+
-+#define __restore_flags(x)						\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	barrier();							\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
-+		barrier(); /* unmask then check (avoid races) */	\
-+		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
-+			force_evtchn_callback();			\
-+		preempt_enable();					\
-+	} else								\
-+		preempt_enable_no_resched();				\
-+} while (0)
-+
-+#define safe_halt()		((void)0)
-+
-+#define __save_and_cli(x)						\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	(x) = _vcpu->evtchn_upcall_mask;				\
-+	_vcpu->evtchn_upcall_mask = 1;					\
-+	preempt_enable_no_resched();					\
-+	barrier();							\
-+} while (0)
-+
-+#define local_irq_save(x)	__save_and_cli(x)
-+#define local_irq_restore(x)	__restore_flags(x)
-+#define local_save_flags(x)	__save_flags(x)
-+#define local_irq_disable()	__cli()
-+#define local_irq_enable()	__sti()
-+
-+/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
-+#define irqs_disabled()							\
-+({	int ___x;							\
-+	vcpu_info_t *_vcpu;						\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	___x = (_vcpu->evtchn_upcall_mask != 0);			\
-+	preempt_enable_no_resched();					\
-+	___x; })
-+
-+/*
-+ * disable hlt during certain critical i/o operations
-+ */
-+#define HAVE_DISABLE_HLT
-+void disable_hlt(void);
-+void enable_hlt(void);
-+
-+extern int es7000_plat;
-+void cpu_idle_wait(void);
-+
-+extern unsigned long arch_align_stack(unsigned long sp);
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/tlbflush.h linux-2.6.12-xen/include/asm-xen/asm-i386/tlbflush.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/tlbflush.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/tlbflush.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,102 @@
-+#ifndef _I386_TLBFLUSH_H
-+#define _I386_TLBFLUSH_H
-+
-+#include <linux/config.h>
-+#include <linux/mm.h>
-+#include <asm/processor.h>
-+
-+#define __flush_tlb() xen_tlb_flush()
-+#define __flush_tlb_global() xen_tlb_flush()
-+#define __flush_tlb_all() xen_tlb_flush()
-+
-+extern unsigned long pgkern_mask;
-+
-+#define cpu_has_invlpg	(boot_cpu_data.x86 > 3)
-+
-+#define __flush_tlb_single(addr) xen_invlpg(addr)
-+
-+#define __flush_tlb_one(addr) __flush_tlb_single(addr)
-+
-+/*
-+ * TLB flushing:
-+ *
-+ *  - flush_tlb() flushes the current mm struct TLBs
-+ *  - flush_tlb_all() flushes all processes TLBs
-+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
-+ *  - flush_tlb_page(vma, vmaddr) flushes one page
-+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
-+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
-+ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
-+ *
-+ * ..but the i386 has somewhat limited tlb flushing capabilities,
-+ * and page-granular flushes are available only on i486 and up.
-+ */
-+
-+#ifndef CONFIG_SMP
-+
-+#define flush_tlb() __flush_tlb()
-+#define flush_tlb_all() __flush_tlb_all()
-+#define local_flush_tlb() __flush_tlb()
-+
-+static inline void flush_tlb_mm(struct mm_struct *mm)
-+{
-+	if (mm == current->active_mm)
-+		__flush_tlb();
-+}
-+
-+static inline void flush_tlb_page(struct vm_area_struct *vma,
-+	unsigned long addr)
-+{
-+	if (vma->vm_mm == current->active_mm)
-+		__flush_tlb_one(addr);
-+}
-+
-+static inline void flush_tlb_range(struct vm_area_struct *vma,
-+	unsigned long start, unsigned long end)
-+{
-+	if (vma->vm_mm == current->active_mm)
-+		__flush_tlb();
-+}
-+
-+#else
-+
-+#include <asm/smp.h>
-+
-+#define local_flush_tlb() \
-+	__flush_tlb()
-+
-+extern void flush_tlb_all(void);
-+extern void flush_tlb_current_task(void);
-+extern void flush_tlb_mm(struct mm_struct *);
-+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-+
-+#define flush_tlb()	flush_tlb_current_task()
-+
-+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
-+{
-+	flush_tlb_mm(vma->vm_mm);
-+}
-+
-+#define TLBSTATE_OK	1
-+#define TLBSTATE_LAZY	2
-+
-+struct tlb_state
-+{
-+	struct mm_struct *active_mm;
-+	int state;
-+	char __cacheline_padding[L1_CACHE_BYTES-8];
-+};
-+DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
-+
-+
-+#endif
-+
-+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
-+
-+static inline void flush_tlb_pgtables(struct mm_struct *mm,
-+				      unsigned long start, unsigned long end)
-+{
-+	/* i386 does not keep any page table caches in TLB */
-+}
-+
-+#endif /* _I386_TLBFLUSH_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-i386/vga.h linux-2.6.12-xen/include/asm-xen/asm-i386/vga.h
---- pristine-linux-2.6.12/include/asm-xen/asm-i386/vga.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-i386/vga.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,20 @@
-+/*
-+ *	Access to VGA videoram
-+ *
-+ *	(c) 1998 Martin Mares <mj at ucw.cz>
-+ */
-+
-+#ifndef _LINUX_ASM_VGA_H_
-+#define _LINUX_ASM_VGA_H_
-+
-+/*
-+ *	On the PC, we can just recalculate addresses and then
-+ *	access the videoram directly without any black magic.
-+ */
-+
-+#define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x)
-+
-+#define vga_readb(x) (*(x))
-+#define vga_writeb(x,y) (*(y) = (x))
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-ia64/fixmap.h linux-2.6.12-xen/include/asm-xen/asm-ia64/fixmap.h
---- pristine-linux-2.6.12/include/asm-xen/asm-ia64/fixmap.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-ia64/fixmap.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+#define clear_fixmap(x)	do {} while (0)
-+#define	set_fixmap(x,y)	do {} while (0)
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-ia64/hypercall.h linux-2.6.12-xen/include/asm-xen/asm-ia64/hypercall.h
---- pristine-linux-2.6.12/include/asm-xen/asm-ia64/hypercall.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-ia64/hypercall.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,500 @@
-+/******************************************************************************
-+ * hypercall.h
-+ * 
-+ * Linux-specific hypervisor handling.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERCALL_H__
-+#define __HYPERCALL_H__
-+
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/xen-public/sched.h>
-+
-+/* FIXME: temp place to hold these page related macros */
-+#include <asm/page.h>
-+#define virt_to_machine(v) __pa(v)
-+#define machine_to_virt(m) __va(m)
-+//#define virt_to_mfn(v)	(__pa(v) >> 14)
-+//#define mfn_to_virt(m)	(__va(m << 14))
-+#define virt_to_mfn(v)	((__pa(v)) >> PAGE_SHIFT)
-+#define mfn_to_virt(m)	(__va((m) << PAGE_SHIFT))
-+
-+/*
-+ * Assembler stubs for hyper-calls.
-+ */
-+
-+#if 0
-+static inline int
-+HYPERVISOR_set_trap_table(
-+    trap_info_t *table)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ignore;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ignore)
-+	: "0" (__HYPERVISOR_set_trap_table), "1" (table)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_mmu_update(
-+    mmu_update_t *req, int count, int *success_count, domid_t domid)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2, ign3, ign4;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
-+	: "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
-+        "3" (success_count), "4" (domid)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_mmuext_op(
-+    struct mmuext_op *op, int count, int *success_count, domid_t domid)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2, ign3, ign4;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
-+	: "0" (__HYPERVISOR_mmuext_op), "1" (op), "2" (count),
-+        "3" (success_count), "4" (domid)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_set_gdt(
-+    unsigned long *frame_list, int entries)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-+	: "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
-+	: "memory" );
-+
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_stack_switch(
-+    unsigned long ss, unsigned long esp)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-+	: "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_set_callbacks(
-+    unsigned long event_selector, unsigned long event_address,
-+    unsigned long failsafe_selector, unsigned long failsafe_address)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2, ign3, ign4;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
-+	: "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
-+	  "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_fpu_taskswitch(
-+    int set)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign)
-+        : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
-+        : "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_sched_op(
-+    int cmd, unsigned long arg)
-+{
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_suspend(
-+    unsigned long srec)
-+{
-+    return 1;
-+}
-+
-+static inline long
-+HYPERVISOR_set_timer_op(
-+    u64 timeout)
-+{
-+#if 0
-+    int ret;
-+    unsigned long timeout_hi = (unsigned long)(timeout>>32);
-+    unsigned long timeout_lo = (unsigned long)timeout;
-+    unsigned long ign1, ign2;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-+	: "0" (__HYPERVISOR_set_timer_op), "b" (timeout_lo), "c" (timeout_hi)
-+	: "memory");
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_dom0_op(
-+    dom0_op_t *dom0_op)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1;
-+
-+    dom0_op->interface_version = DOM0_INTERFACE_VERSION;
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1)
-+	: "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
-+	: "memory");
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_set_debugreg(
-+    int reg, unsigned long value)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2;
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-+	: "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_get_debugreg(
-+    int reg)
-+{
-+#if 0
-+    unsigned long ret;
-+    unsigned long ign;
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign)
-+	: "0" (__HYPERVISOR_get_debugreg), "1" (reg)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_update_descriptor(
-+    unsigned long ma, unsigned long word1, unsigned long word2)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2, ign3;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
-+	: "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
-+	  "3" (word2)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_set_fast_trap(
-+    int idx)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign)
-+	: "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_dom_mem_op(
-+    unsigned int op, unsigned long *extent_list,
-+    unsigned long nr_extents, unsigned int extent_order)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2, ign3, ign4, ign5;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
-+	  "=D" (ign5)
-+	: "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
-+	  "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
-+        : "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_multicall(
-+    void *call_list, int nr_calls)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-+	: "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+#endif
-+
-+static inline int
-+HYPERVISOR_update_va_mapping(
-+    unsigned long va, pte_t new_val, unsigned long flags)
-+{
-+    /* no-op */
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_memory_op(
-+    unsigned int cmd, void *arg)
-+{
-+    int ret;
-+    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
-+        : "=r" (ret)
-+        : "i" (__HYPERVISOR_memory_op), "r"(cmd), "r"(arg)
-+        : "r14","r15","r2","r8","memory" );
-+    return ret;
-+}
-+
-+static inline int
-+HYPERVISOR_event_channel_op(
-+    void *op)
-+{
-+    int ret;
-+    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
-+        : "=r" (ret)
-+        : "i" (__HYPERVISOR_event_channel_op), "r"(op)
-+        : "r14","r2","r8","memory" );
-+    return ret;
-+}
-+
-+#if 0
-+static inline int
-+HYPERVISOR_xen_version(
-+    int cmd)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ignore;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ignore)
-+	: "0" (__HYPERVISOR_xen_version), "1" (cmd)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+#endif
-+
-+static inline int
-+HYPERVISOR_console_io(
-+    int cmd, int count, char *str)
-+{
-+    int ret;
-+    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r16=%4 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
-+        : "=r" (ret)
-+        : "i" (__HYPERVISOR_console_io), "r"(cmd), "r"(count), "r"(str)
-+        : "r14","r15","r16","r2","r8","memory" );
-+    return ret;
-+}
-+
-+#if 0
-+static inline int
-+HYPERVISOR_physdev_op(
-+    void *physdev_op)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign)
-+	: "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+#endif
-+
-+static inline int
-+HYPERVISOR_grant_table_op(
-+    unsigned int cmd, void *uop, unsigned int count)
-+{
-+    int ret;
-+    __asm__ __volatile__ ( ";; mov r14=%2 ; mov r15=%3 ; mov r16=%4 ; mov r2=%1 ; break 0x1000 ;; mov %0=r8 ;;"
-+        : "=r" (ret)
-+        : "i" (__HYPERVISOR_grant_table_op), "r"(cmd), "r"(uop), "r"(count)
-+        : "r14","r15","r16","r2","r8","memory" );
-+    return ret;
-+}
-+
-+#if 0
-+static inline int
-+HYPERVISOR_update_va_mapping_otherdomain(
-+    unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2, ign3, ign4;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
-+	: "0" (__HYPERVISOR_update_va_mapping_otherdomain),
-+          "1" (va), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
-+        "memory" );
-+    
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+static inline int
-+HYPERVISOR_vm_assist(
-+    unsigned int cmd, unsigned int type)
-+{
-+#if 0
-+    int ret;
-+    unsigned long ign1, ign2;
-+
-+    __asm__ __volatile__ (
-+        TRAP_INSTR
-+        : "=a" (ret), "=b" (ign1), "=c" (ign2)
-+	: "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
-+	: "memory" );
-+
-+    return ret;
-+#endif
-+    return 1;
-+}
-+
-+#endif
-+
-+#endif /* __HYPERCALL_H__ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-ia64/hypervisor.h linux-2.6.12-xen/include/asm-xen/asm-ia64/hypervisor.h
---- pristine-linux-2.6.12/include/asm-xen/asm-ia64/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-ia64/hypervisor.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,70 @@
-+/******************************************************************************
-+ * hypervisor.h
-+ * 
-+ * Linux-specific hypervisor handling.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERVISOR_H__
-+#define __HYPERVISOR_H__
-+
-+#include <linux/config.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/version.h>
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/xen-public/dom0_ops.h>
-+#include <asm/ptrace.h>
-+#include <asm/page.h>
-+
-+extern shared_info_t *HYPERVISOR_shared_info;
-+extern start_info_t *xen_start_info;
-+
-+void force_evtchn_callback(void);
-+
-+#include <asm/hypercall.h>
-+
-+// for drivers/xen/privcmd/privcmd.c
-+#define direct_remap_pfn_range(a,b,c,d,e,f) remap_pfn_range(a,b,c,d,e)
-+#define	pfn_to_mfn(x)	(x)
-+#define	mfn_to_pfn(x)	(x)
-+#define machine_to_phys_mapping 0
-+
-+// for drivers/xen/balloon/balloon.c
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-+#else
-+#define scrub_pages(_p,_n) ((void)0)
-+#endif
-+#define	pte_mfn(_x)	pte_pfn(_x)
-+#define INVALID_P2M_ENTRY	(~0UL)
-+#define __pte_ma(_x)	((pte_t) {(_x)})
-+#define phys_to_machine_mapping_valid(_x)	(1)
-+#define	kmap_flush_unused()	do {} while (0)
-+#define set_phys_to_machine(_x,_y)	do {} while (0)
-+#define xen_machphys_update(_x,_y)	do {} while (0)
-+#define pfn_pte_ma(_x,_y)	__pte_ma(0)
-+
-+#endif /* __HYPERVISOR_H__ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-ia64/synch_bitops.h linux-2.6.12-xen/include/asm-xen/asm-ia64/synch_bitops.h
---- pristine-linux-2.6.12/include/asm-xen/asm-ia64/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-ia64/synch_bitops.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,61 @@
-+#ifndef __XEN_SYNCH_BITOPS_H__
-+#define __XEN_SYNCH_BITOPS_H__
-+
-+/*
-+ * Copyright 1992, Linus Torvalds.
-+ * Heavily modified to provide guaranteed strong synchronisation
-+ * when communicating with Xen or other guest OSes running on other CPUs.
-+ */
-+
-+#include <linux/config.h>
-+
-+#define ADDR (*(volatile long *) addr)
-+
-+static __inline__ void synch_set_bit(int nr, volatile void * addr)
-+{
-+	set_bit(nr, addr);
-+}
-+
-+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
-+{
-+	clear_bit(nr, addr);
-+}
-+
-+static __inline__ void synch_change_bit(int nr, volatile void * addr)
-+{
-+	change_bit(nr, addr);
-+}
-+
-+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
-+{
-+    return test_and_set_bit(nr, addr);
-+}
-+
-+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
-+{
-+    return test_and_clear_bit(nr, addr);
-+}
-+
-+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
-+{
-+    return test_and_change_bit(nr, addr);
-+}
-+
-+static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
-+{
-+    return test_bit(nr, addr);
-+}
-+
-+static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
-+{
-+    return test_bit(nr, addr);
-+}
-+
-+#define synch_cmpxchg	ia64_cmpxchg4_acq
-+
-+#define synch_test_bit(nr,addr) \
-+(__builtin_constant_p(nr) ? \
-+ synch_const_test_bit((nr),(addr)) : \
-+ synch_var_test_bit((nr),(addr)))
-+
-+#endif /* __XEN_SYNCH_BITOPS_H__ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/arch_hooks.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/arch_hooks.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/arch_hooks.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/arch_hooks.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,27 @@
-+#ifndef _ASM_ARCH_HOOKS_H
-+#define _ASM_ARCH_HOOKS_H
-+
-+#include <linux/interrupt.h>
-+
-+/*
-+ *	linux/include/asm/arch_hooks.h
-+ *
-+ *	define the architecture specific hooks 
-+ */
-+
-+/* these aren't arch hooks, they are generic routines
-+ * that can be used by the hooks */
-+extern void init_ISA_irqs(void);
-+extern void apic_intr_init(void);
-+extern void smp_intr_init(void);
-+extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-+
-+/* these are the defined hooks */
-+extern void intr_init_hook(void);
-+extern void pre_intr_init_hook(void);
-+extern void pre_setup_arch_hook(void);
-+extern void trap_init_hook(void);
-+extern void time_init_hook(void);
-+extern void mca_nmi_hook(void);
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/bootsetup.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/bootsetup.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/bootsetup.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/bootsetup.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,42 @@
-+
-+#ifndef _X86_64_BOOTSETUP_H
-+#define _X86_64_BOOTSETUP_H 1
-+
-+#define BOOT_PARAM_SIZE		4096
-+extern char x86_boot_params[BOOT_PARAM_SIZE];
-+
-+/*
-+ * This is set up by the setup-routine at boot-time
-+ */
-+#define PARAM	((unsigned char *)x86_boot_params)
-+#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
-+#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
-+#define ALT_MEM_K (*(unsigned int *) (PARAM+0x1e0))
-+#define E820_MAP_NR (*(char*) (PARAM+E820NR))
-+#define E820_MAP    ((struct e820entry *) (PARAM+E820MAP))
-+#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
-+#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-+#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
-+#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2))
-+#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8))
-+#define SAVED_VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA))
-+#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC))
-+#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF))
-+#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210))
-+#define KERNEL_START (*(unsigned int *) (PARAM+0x214))
-+
-+#define INITRD_START (__pa(xen_start_info->mod_start))
-+#define INITRD_SIZE (xen_start_info->mod_len)
-+#define EDID_INFO   (*(struct edid_info *) (PARAM+0x440))
-+
-+#define EDD_NR     (*(unsigned char *) (PARAM+EDDNR))
-+#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF))
-+#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
-+#define EDD_BUF     ((struct edd_info *) (PARAM+EDDBUF))
-+#define COMMAND_LINE saved_command_line
-+
-+#define RAMDISK_IMAGE_START_MASK  	0x07FF
-+#define RAMDISK_PROMPT_FLAG		0x8000
-+#define RAMDISK_LOAD_FLAG		0x4000	
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/desc.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/desc.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/desc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/desc.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,240 @@
-+/* Written 2000 by Andi Kleen */ 
-+#ifndef __ARCH_DESC_H
-+#define __ARCH_DESC_H
-+
-+#include <linux/threads.h>
-+#include <asm/ldt.h>
-+
-+#ifndef __ASSEMBLY__
-+
-+#include <linux/string.h>
-+#include <asm/segment.h>
-+#include <asm/mmu.h>
-+
-+// 8 byte segment descriptor
-+struct desc_struct { 
-+	u16 limit0;
-+	u16 base0;
-+	unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
-+	unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
-+} __attribute__((packed)); 
-+
-+struct n_desc_struct { 
-+	unsigned int a,b;
-+}; 	
-+
-+enum { 
-+	GATE_INTERRUPT = 0xE, 
-+	GATE_TRAP = 0xF, 	
-+	GATE_CALL = 0xC,
-+}; 	
-+
-+// 16byte gate
-+struct gate_struct {          
-+	u16 offset_low;
-+	u16 segment; 
-+	unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
-+	u16 offset_middle;
-+	u32 offset_high;
-+	u32 zero1; 
-+} __attribute__((packed));
-+
-+#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) 
-+#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
-+#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
-+
-+enum { 
-+	DESC_TSS = 0x9,
-+	DESC_LDT = 0x2,
-+}; 
-+
-+// LDT or TSS descriptor in the GDT. 16 bytes.
-+struct ldttss_desc { 
-+	u16 limit0;
-+	u16 base0;
-+	unsigned base1 : 8, type : 5, dpl : 2, p : 1;
-+	unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
-+	u32 base3;
-+	u32 zero1; 
-+} __attribute__((packed)); 
-+
-+struct desc_ptr {
-+	unsigned short size;
-+	unsigned long address;
-+} __attribute__((packed)) ;
-+
-+extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
-+
-+extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
-+
-+#define get_cpu_gdt_table(_cpu) ((struct desc_struct *)(cpu_gdt_descr[(_cpu)].address))
-+
-+#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
-+#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
-+
-+static inline void clear_LDT(void)
-+{
-+	int cpu = get_cpu();
-+
-+	/*
-+	 * NB. We load the default_ldt for lcall7/27 handling on demand, as
-+	 * it slows down context switching. Noone uses it anyway.
-+	 */
-+	cpu = cpu;              /* XXX avoid compiler warning */
-+	xen_set_ldt(0UL, 0);
-+	put_cpu();
-+}
-+
-+/*
-+ * This is the ldt that every process will get unless we need
-+ * something other than this.
-+ */
-+extern struct desc_struct default_ldt[];
-+extern struct gate_struct idt_table[]; 
-+
-+static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)  
-+{
-+	struct gate_struct s; 	
-+	s.offset_low = PTR_LOW(func); 
-+	s.segment = __KERNEL_CS;
-+	s.ist = ist; 
-+	s.p = 1;
-+	s.dpl = dpl; 
-+	s.zero0 = 0;
-+	s.zero1 = 0; 
-+	s.type = type; 
-+	s.offset_middle = PTR_MIDDLE(func); 
-+	s.offset_high = PTR_HIGH(func); 
-+	/* does not need to be atomic because it is only done once at setup time */ 
-+	memcpy(adr, &s, 16); 
-+} 
-+
-+static inline void set_intr_gate(int nr, void *func) 
-+{ 
-+	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); 
-+} 
-+
-+static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) 
-+{ 
-+	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); 
-+} 
-+
-+static inline void set_system_gate(int nr, void *func) 
-+{ 
-+	_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); 
-+} 
-+
-+static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type, 
-+					 unsigned size) 
-+{ 
-+	struct ldttss_desc d;
-+	memset(&d,0,sizeof(d)); 
-+	d.limit0 = size & 0xFFFF;
-+	d.base0 = PTR_LOW(tss); 
-+	d.base1 = PTR_MIDDLE(tss) & 0xFF; 
-+	d.type = type;
-+	d.p = 1; 
-+	d.limit1 = (size >> 16) & 0xF;
-+	d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF; 
-+	d.base3 = PTR_HIGH(tss); 
-+	memcpy(ptr, &d, 16); 
-+}
-+
-+static inline void set_tss_desc(unsigned cpu, void *addr)
-+{ 
-+        set_tssldt_descriptor((struct ldttss_desc *)&get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS], 
-+                              (unsigned long)addr, 
-+                              DESC_TSS,
-+                              sizeof(struct tss_struct) - 1);
-+} 
-+
-+static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
-+{ 
-+	set_tssldt_descriptor((struct ldttss_desc *)&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], 
-+                              (unsigned long)addr, 
-+			      DESC_LDT, size * 8 - 1);
-+}
-+
-+static inline void set_seg_base(unsigned cpu, int entry, void *base)
-+{ 
-+	struct desc_struct *d = (struct desc_struct *)&get_cpu_gdt_table(cpu)[entry];
-+	u32 addr = (u32)(u64)base;
-+	BUG_ON((u64)base >> 32); 
-+	d->base0 = addr & 0xffff;
-+	d->base1 = (addr >> 16) & 0xff;
-+	d->base2 = (addr >> 24) & 0xff;
-+} 
-+
-+#define LDT_entry_a(info) \
-+	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
-+/* Don't allow setting of the lm bit. It is useless anyways because 
-+   64bit system calls require __USER_CS. */ 
-+#define LDT_entry_b(info) \
-+	(((info)->base_addr & 0xff000000) | \
-+	(((info)->base_addr & 0x00ff0000) >> 16) | \
-+	((info)->limit & 0xf0000) | \
-+	(((info)->read_exec_only ^ 1) << 9) | \
-+	((info)->contents << 10) | \
-+	(((info)->seg_not_present ^ 1) << 15) | \
-+	((info)->seg_32bit << 22) | \
-+	((info)->limit_in_pages << 23) | \
-+	((info)->useable << 20) | \
-+	/* ((info)->lm << 21) | */ \
-+	0x7000)
-+
-+#define LDT_empty(info) (\
-+	(info)->base_addr	== 0	&& \
-+	(info)->limit		== 0	&& \
-+	(info)->contents	== 0	&& \
-+	(info)->read_exec_only	== 1	&& \
-+	(info)->seg_32bit	== 0	&& \
-+	(info)->limit_in_pages	== 0	&& \
-+	(info)->seg_not_present	== 1	&& \
-+	(info)->useable		== 0	&& \
-+	(info)->lm		== 0)
-+
-+#if TLS_SIZE != 24
-+# error update this code.
-+#endif
-+
-+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-+{
-+#if 0
-+	u64 *gdt = (u64 *)(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN);
-+	gdt[0] = t->tls_array[0];
-+	gdt[1] = t->tls_array[1];
-+	gdt[2] = t->tls_array[2];
-+#endif
-+#define C(i) \
-+	HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), t->tls_array[i])
-+
-+	C(0); C(1); C(2);
-+#undef C
-+} 
-+
-+/*
-+ * load one particular LDT into the current CPU
-+ */
-+extern inline void load_LDT_nolock (mm_context_t *pc, int cpu)
-+{
-+        void *segments = pc->ldt;
-+        int count = pc->size;
-+
-+        if (likely(!count))
-+                segments = NULL;
-+
-+        xen_set_ldt((unsigned long)segments, count);
-+}
-+
-+static inline void load_LDT(mm_context_t *pc)
-+{
-+	int cpu = get_cpu();
-+	load_LDT_nolock(pc, cpu);
-+	put_cpu();
-+}
-+
-+extern struct desc_ptr idt_descr;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/dma-mapping.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/dma-mapping.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/dma-mapping.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/dma-mapping.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1 @@
-+#include <asm-i386/dma-mapping.h>
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/fixmap.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/fixmap.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/fixmap.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/fixmap.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,113 @@
-+/*
-+ * fixmap.h: compile-time virtual memory allocation
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1998 Ingo Molnar
-+ */
-+
-+#ifndef _ASM_FIXMAP_H
-+#define _ASM_FIXMAP_H
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <asm/apicdef.h>
-+#include <asm-xen/gnttab.h>
-+#include <asm/page.h>
-+#include <asm/vsyscall.h>
-+#include <asm/vsyscall32.h>
-+#include <asm/acpi.h>
-+
-+/*
-+ * Here we define all the compile-time 'special' virtual
-+ * addresses. The point is to have a constant address at
-+ * compile time, but to set the physical address only
-+ * in the boot process.
-+ *
-+ * these 'compile-time allocated' memory buffers are
-+ * fixed-size 4k pages. (or larger if used with an increment
-+ * highger than 1) use fixmap_set(idx,phys) to associate
-+ * physical memory with fixmap indices.
-+ *
-+ * TLB entries of such buffers will not be flushed across
-+ * task switches.
-+ */
-+
-+enum fixed_addresses {
-+	VSYSCALL_LAST_PAGE,
-+	VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
-+	VSYSCALL_HPET,
-+	FIX_HPET_BASE,
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	FIX_APIC_BASE,	/* local (CPU) APIC) -- required for SMP or not */
-+#endif
-+#ifdef CONFIG_X86_IO_APIC
-+	FIX_IO_APIC_BASE_0,
-+	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-+#endif
-+#ifdef CONFIG_ACPI_BOOT
-+	FIX_ACPI_BEGIN,
-+	FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-+#endif
-+	FIX_SHARED_INFO,
-+	FIX_GNTTAB_BEGIN,
-+	FIX_GNTTAB_END = FIX_GNTTAB_BEGIN + NR_GRANT_FRAMES - 1,
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+#define NR_FIX_ISAMAPS	256
-+	FIX_ISAMAP_END,
-+	FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
-+#endif
-+	__end_of_fixed_addresses
-+};
-+
-+extern void __set_fixmap (enum fixed_addresses idx,
-+					unsigned long phys, pgprot_t flags);
-+
-+#define set_fixmap(idx, phys) \
-+		__set_fixmap(idx, phys, PAGE_KERNEL)
-+/*
-+ * Some hardware wants to get fixmapped without caching.
-+ */
-+#define set_fixmap_nocache(idx, phys) \
-+		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-+
-+#define clear_fixmap(idx) \
-+                __set_fixmap(idx, 0, __pgprot(0))
-+
-+#define FIXADDR_TOP	(VSYSCALL_END-PAGE_SIZE)
-+#define FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
-+#define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE)
-+
-+/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
-+#define FIXADDR_USER_START	((unsigned long)VSYSCALL32_VSYSCALL)
-+#define FIXADDR_USER_END	(FIXADDR_USER_START + PAGE_SIZE)
-+
-+#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
-+
-+extern void __this_fixmap_does_not_exist(void);
-+
-+/*
-+ * 'index to address' translation. If anyone tries to use the idx
-+ * directly without translation, we catch the bug with a NULL-deference
-+ * kernel oops. Illegal ranges of incoming indices are caught too.
-+ */
-+extern inline unsigned long fix_to_virt(const unsigned int idx)
-+{
-+	/*
-+	 * this branch gets completely eliminated after inlining,
-+	 * except when someone tries to use fixaddr indices in an
-+	 * illegal way. (such as mixing up address types or using
-+	 * out-of-range indices).
-+	 *
-+	 * If it doesn't get removed, the linker will complain
-+	 * loudly with a reasonably clear error message..
-+	 */
-+	if (idx >= __end_of_fixed_addresses)
-+		__this_fixmap_does_not_exist();
-+
-+        return __fix_to_virt(idx);
-+}
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/floppy.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/floppy.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/floppy.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/floppy.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,204 @@
-+/*
-+ * Architecture specific parts of the Floppy driver
-+ *
-+ * This file is subject to the terms and conditions of the GNU General Public
-+ * License.  See the file "COPYING" in the main directory of this archive
-+ * for more details.
-+ *
-+ * Copyright (C) 1995
-+ *
-+ * Modifications for Xen are Copyright (c) 2004, Keir Fraser.
-+ */
-+#ifndef __ASM_XEN_X86_64_FLOPPY_H
-+#define __ASM_XEN_X86_64_FLOPPY_H
-+
-+#include <linux/vmalloc.h>
-+
-+
-+/*
-+ * The DMA channel used by the floppy controller cannot access data at
-+ * addresses >= 16MB
-+ *
-+ * Went back to the 1MB limit, as some people had problems with the floppy
-+ * driver otherwise. It doesn't matter much for performance anyway, as most
-+ * floppy accesses go through the track buffer.
-+ */
-+#define _CROSS_64KB(a,s,vdma) \
-+(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
-+
-+#include <linux/vmalloc.h>
-+
-+/* XEN: Hit DMA paths on the head. This trick from asm-m68k/floppy.h. */
-+#include <asm/dma.h>
-+#undef MAX_DMA_ADDRESS
-+#define MAX_DMA_ADDRESS 0
-+#define CROSS_64KB(a,s) (0)
-+
-+#define fd_inb(port)			inb_p(port)
-+#define fd_outb(value,port)		outb_p(value,port)
-+
-+#define fd_request_dma()        (0)
-+#define fd_free_dma()           ((void)0)
-+#define fd_enable_irq()         enable_irq(FLOPPY_IRQ)
-+#define fd_disable_irq()        disable_irq(FLOPPY_IRQ)
-+#define fd_free_irq()		free_irq(FLOPPY_IRQ, NULL)
-+#define fd_get_dma_residue()    vdma_get_dma_residue(FLOPPY_DMA)
-+#define fd_dma_mem_alloc(size)	vdma_mem_alloc(size)
-+#define fd_dma_mem_free(addr, size) vdma_mem_free(addr, size) 
-+#define fd_dma_setup(addr, size, mode, io) vdma_dma_setup(addr, size, mode, io)
-+
-+static int virtual_dma_count;
-+static int virtual_dma_residue;
-+static char *virtual_dma_addr;
-+static int virtual_dma_mode;
-+static int doing_pdma;
-+
-+static irqreturn_t floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
-+{
-+	register unsigned char st;
-+
-+#undef TRACE_FLPY_INT
-+
-+#ifdef TRACE_FLPY_INT
-+	static int calls=0;
-+	static int bytes=0;
-+	static int dma_wait=0;
-+#endif
-+	if (!doing_pdma)
-+		return floppy_interrupt(irq, dev_id, regs);
-+
-+#ifdef TRACE_FLPY_INT
-+	if(!calls)
-+		bytes = virtual_dma_count;
-+#endif
-+
-+	{
-+		register int lcount;
-+		register char *lptr;
-+
-+		st = 1;
-+		for(lcount=virtual_dma_count, lptr=virtual_dma_addr; 
-+		    lcount; lcount--, lptr++) {
-+			st=inb(virtual_dma_port+4) & 0xa0 ;
-+			if(st != 0xa0) 
-+				break;
-+			if(virtual_dma_mode)
-+				outb_p(*lptr, virtual_dma_port+5);
-+			else
-+				*lptr = inb_p(virtual_dma_port+5);
-+		}
-+		virtual_dma_count = lcount;
-+		virtual_dma_addr = lptr;
-+		st = inb(virtual_dma_port+4);
-+	}
-+
-+#ifdef TRACE_FLPY_INT
-+	calls++;
-+#endif
-+	if(st == 0x20)
-+		return IRQ_HANDLED;
-+	if(!(st & 0x20)) {
-+		virtual_dma_residue += virtual_dma_count;
-+		virtual_dma_count=0;
-+#ifdef TRACE_FLPY_INT
-+		printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", 
-+		       virtual_dma_count, virtual_dma_residue, calls, bytes,
-+		       dma_wait);
-+		calls = 0;
-+		dma_wait=0;
-+#endif
-+		doing_pdma = 0;
-+		floppy_interrupt(irq, dev_id, regs);
-+		return IRQ_HANDLED;
-+	}
-+#ifdef TRACE_FLPY_INT
-+	if(!virtual_dma_count)
-+		dma_wait++;
-+#endif
-+	return IRQ_HANDLED;
-+}
-+
-+static void fd_disable_dma(void)
-+{
-+	doing_pdma = 0;
-+	virtual_dma_residue += virtual_dma_count;
-+	virtual_dma_count=0;
-+}
-+
-+static int vdma_get_dma_residue(unsigned int dummy)
-+{
-+	return virtual_dma_count + virtual_dma_residue;
-+}
-+
-+
-+static int fd_request_irq(void)
-+{
-+	return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
-+					   "floppy", NULL);
-+}
-+
-+
-+static unsigned long vdma_mem_alloc(unsigned long size)
-+{
-+	return (unsigned long) vmalloc(size);
-+
-+}
-+
-+static void vdma_mem_free(unsigned long addr, unsigned long size)
-+{
-+	vfree((void *)addr);
-+}
-+
-+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
-+{
-+	doing_pdma = 1;
-+	virtual_dma_port = io;
-+	virtual_dma_mode = (mode  == DMA_MODE_WRITE);
-+	virtual_dma_addr = addr;
-+	virtual_dma_count = size;
-+	virtual_dma_residue = 0;
-+	return 0;
-+}
-+
-+/* XEN: This trick to force 'virtual DMA' is from include/asm-m68k/floppy.h. */
-+#define FDC1 xen_floppy_init()
-+static int FDC2 = -1;
-+
-+static int xen_floppy_init(void)
-+{
-+	use_virtual_dma = 1;
-+	can_use_virtual_dma = 1;
-+	return 0x340;
-+}
-+
-+/*
-+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
-+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
-+ * coincides with another rtc CMOS user.		Paul G.
-+ */
-+#define FLOPPY0_TYPE	({				\
-+	unsigned long flags;				\
-+	unsigned char val;				\
-+	spin_lock_irqsave(&rtc_lock, flags);		\
-+	val = (CMOS_READ(0x10) >> 4) & 15;		\
-+	spin_unlock_irqrestore(&rtc_lock, flags);	\
-+	val;						\
-+})
-+
-+#define FLOPPY1_TYPE	({				\
-+	unsigned long flags;				\
-+	unsigned char val;				\
-+	spin_lock_irqsave(&rtc_lock, flags);		\
-+	val = CMOS_READ(0x10) & 15;			\
-+	spin_unlock_irqrestore(&rtc_lock, flags);	\
-+	val;						\
-+})
-+
-+#define N_FDC 2
-+#define N_DRIVE 8
-+
-+#define FLOPPY_MOTOR_MASK 0xf0
-+
-+#define EXTRA_FLOPPY_PARAMS
-+
-+#endif /* __ASM_XEN_X86_64_FLOPPY_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hw_irq.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/hw_irq.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hw_irq.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/hw_irq.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,138 @@
-+#ifndef _ASM_HW_IRQ_H
-+#define _ASM_HW_IRQ_H
-+
-+/*
-+ *	linux/include/asm/hw_irq.h
-+ *
-+ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
-+ *
-+ *	moved some of the old arch/i386/kernel/irq.h to here. VY
-+ *
-+ *	IRQ/IPI changes taken from work by Thomas Radke
-+ *	<tomsoft at informatik.tu-chemnitz.de>
-+ *
-+ *	hacked by Andi Kleen for x86-64.
-+ * 
-+ *  $Id: hw_irq.h,v 1.24 2001/09/14 20:55:03 vojtech Exp $
-+ */
-+
-+#ifndef __ASSEMBLY__
-+#include <linux/config.h>
-+#include <asm/atomic.h>
-+#include <asm/irq.h>
-+#include <linux/profile.h>
-+#include <linux/smp.h>
-+
-+struct hw_interrupt_type;
-+#endif
-+
-+/*
-+ * IDT vectors usable for external interrupt sources start
-+ * at 0x20:
-+ */
-+#define FIRST_EXTERNAL_VECTOR	0x20
-+
-+#define IA32_SYSCALL_VECTOR	0x80
-+
-+
-+/*
-+ * Vectors 0x20-0x2f are used for ISA interrupts.
-+ */
-+
-+/*
-+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
-+ *
-+ *  some of the following vectors are 'rare', they are merged
-+ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
-+ *  TLB, reschedule and local APIC vectors are performance-critical.
-+ *
-+ *  Vectors 0xf0-0xf9 are free (reserved for future Linux use).
-+ */
-+#ifndef CONFIG_XEN
-+#define SPURIOUS_APIC_VECTOR	0xff
-+#define ERROR_APIC_VECTOR	0xfe
-+#define INVALIDATE_TLB_VECTOR	0xfd
-+#define RESCHEDULE_VECTOR	0xfc
-+#define TASK_MIGRATION_VECTOR	0xfb
-+#define CALL_FUNCTION_VECTOR	0xfa
-+#define KDB_VECTOR	0xf9
-+
-+#define THERMAL_APIC_VECTOR	0xf0
-+#endif
-+
-+/*
-+ * Local APIC timer IRQ vector is on a different priority level,
-+ * to work around the 'lost local interrupt if more than 2 IRQ
-+ * sources per level' errata.
-+ */
-+#define LOCAL_TIMER_VECTOR	0xef
-+
-+/*
-+ * First APIC vector available to drivers: (vectors 0x30-0xee)
-+ * we start at 0x31 to spread out vectors evenly between priority
-+ * levels. (0x80 is the syscall vector)
-+ */
-+#define FIRST_DEVICE_VECTOR	0x31
-+#define FIRST_SYSTEM_VECTOR	0xef   /* duplicated in irq.h */
-+
-+
-+#ifndef __ASSEMBLY__
-+extern u8 irq_vector[NR_IRQ_VECTORS];
-+#define IO_APIC_VECTOR(irq)	(irq_vector[irq])
-+#define AUTO_ASSIGN		-1
-+
-+/*
-+ * Various low-level irq details needed by irq.c, process.c,
-+ * time.c, io_apic.c and smp.c
-+ *
-+ * Interrupt entry/exit code at both C and assembly level
-+ */
-+
-+extern void disable_8259A_irq(unsigned int irq);
-+extern void enable_8259A_irq(unsigned int irq);
-+extern int i8259A_irq_pending(unsigned int irq);
-+extern void make_8259A_irq(unsigned int irq);
-+extern void init_8259A(int aeoi);
-+extern void FASTCALL(send_IPI_self(int vector));
-+extern void init_VISWS_APIC_irqs(void);
-+extern void setup_IO_APIC(void);
-+extern void disable_IO_APIC(void);
-+extern void print_IO_APIC(void);
-+extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
-+extern void send_IPI(int dest, int vector);
-+extern void setup_ioapic_dest(void);
-+
-+extern unsigned long io_apic_irqs;
-+
-+extern atomic_t irq_err_count;
-+extern atomic_t irq_mis_count;
-+
-+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
-+
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
-+
-+#include <asm/ptrace.h>
-+
-+#define IRQ_NAME2(nr) nr##_interrupt(void)
-+#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
-+
-+/*
-+ *	SMP has a few special interrupts for IPI messages
-+ */
-+
-+#define BUILD_IRQ(nr) \
-+asmlinkage void IRQ_NAME(nr); \
-+__asm__( \
-+"\n.p2align\n" \
-+"IRQ" #nr "_interrupt:\n\t" \
-+	"push $" #nr "-256 ; " \
-+	"jmp common_interrupt");
-+
-+extern void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i);
-+
-+#define platform_legacy_irq(irq)	((irq) < 16)
-+
-+#endif
-+
-+#endif /* _ASM_HW_IRQ_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hypercall.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/hypercall.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hypercall.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/hypercall.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,323 @@
-+/******************************************************************************
-+ * hypercall.h
-+ * 
-+ * Linux-specific hypervisor handling.
-+ * 
-+ * Copyright (c) 2002-2004, K A Fraser
-+ * 
-+ * 64-bit updates:
-+ *   Benjamin Liu <benjamin.liu at intel.com>
-+ *   Jun Nakajima <jun.nakajima at intel.com>
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __HYPERCALL_H__
-+#define __HYPERCALL_H__
-+
-+#include <asm-xen/xen-public/xen.h>
-+#include <asm-xen/xen-public/sched.h>
-+
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
-+
-+#define _hypercall0(type, name)			\
-+({						\
-+	long __res;				\
-+	asm volatile (				\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res)			\
-+		:				\
-+		: "memory" );			\
-+	(type)__res;				\
-+})
-+
-+#define _hypercall1(type, name, a1)				\
-+({								\
-+	long __res, __ign1;					\
-+	asm volatile (						\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res), "=D" (__ign1)			\
-+		: "1" ((long)(a1))				\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
-+
-+#define _hypercall2(type, name, a1, a2)				\
-+({								\
-+	long __res, __ign1, __ign2;				\
-+	asm volatile (						\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res), "=D" (__ign1), "=S" (__ign2)	\
-+		: "1" ((long)(a1)), "2" ((long)(a2))		\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
-+
-+#define _hypercall3(type, name, a1, a2, a3)			\
-+({								\
-+	long __res, __ign1, __ign2, __ign3;			\
-+	asm volatile (						\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res), "=D" (__ign1), "=S" (__ign2), 	\
-+		"=d" (__ign3)					\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3))				\
-+		: "memory" );					\
-+	(type)__res;						\
-+})
-+
-+#define _hypercall4(type, name, a1, a2, a3, a4)			\
-+({								\
-+	long __res, __ign1, __ign2, __ign3;			\
-+	asm volatile (						\
-+		"movq %7,%%r10; "				\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
-+		"=d" (__ign3)					\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3)), "g" ((long)(a4))		\
-+		: "memory", "r10" );				\
-+	(type)__res;						\
-+})
-+
-+#define _hypercall5(type, name, a1, a2, a3, a4, a5)		\
-+({								\
-+	long __res, __ign1, __ign2, __ign3;			\
-+	asm volatile (						\
-+		"movq %7,%%r10; movq %8,%%r8; "			\
-+		"call hypercall_page + ("STR(__HYPERVISOR_##name)" * 32)"\
-+		: "=a" (__res), "=D" (__ign1), "=S" (__ign2),	\
-+		"=d" (__ign3)					\
-+		: "1" ((long)(a1)), "2" ((long)(a2)),		\
-+		"3" ((long)(a3)), "g" ((long)(a4)),		\
-+		"g" ((long)(a5))				\
-+		: "memory", "r10", "r8" );			\
-+	(type)__res;						\
-+})
-+
-+static inline int
-+HYPERVISOR_set_trap_table(
-+	trap_info_t *table)
-+{
-+	return _hypercall1(int, set_trap_table, table);
-+}
-+
-+static inline int
-+HYPERVISOR_mmu_update(
-+	mmu_update_t *req, int count, int *success_count, domid_t domid)
-+{
-+	return _hypercall4(int, mmu_update, req, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_mmuext_op(
-+	struct mmuext_op *op, int count, int *success_count, domid_t domid)
-+{
-+	return _hypercall4(int, mmuext_op, op, count, success_count, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_set_gdt(
-+	unsigned long *frame_list, int entries)
-+{
-+	return _hypercall2(int, set_gdt, frame_list, entries);
-+}
-+
-+static inline int
-+HYPERVISOR_stack_switch(
-+	unsigned long ss, unsigned long esp)
-+{
-+	return _hypercall2(int, stack_switch, ss, esp);
-+}
-+
-+static inline int
-+HYPERVISOR_set_callbacks(
-+	unsigned long event_address, unsigned long failsafe_address, 
-+	unsigned long syscall_address)
-+{
-+	return _hypercall3(int, set_callbacks,
-+			   event_address, failsafe_address, syscall_address);
-+}
-+
-+static inline int
-+HYPERVISOR_fpu_taskswitch(
-+	int set)
-+{
-+	return _hypercall1(int, fpu_taskswitch, set);
-+}
-+
-+static inline int
-+HYPERVISOR_sched_op(
-+	int cmd, unsigned long arg)
-+{
-+	return _hypercall2(int, sched_op, cmd, arg);
-+}
-+
-+static inline long
-+HYPERVISOR_set_timer_op(
-+	u64 timeout)
-+{
-+	return _hypercall1(long, set_timer_op, timeout);
-+}
-+
-+static inline int
-+HYPERVISOR_dom0_op(
-+	dom0_op_t *dom0_op)
-+{
-+	dom0_op->interface_version = DOM0_INTERFACE_VERSION;
-+	return _hypercall1(int, dom0_op, dom0_op);
-+}
-+
-+static inline int
-+HYPERVISOR_set_debugreg(
-+	int reg, unsigned long value)
-+{
-+	return _hypercall2(int, set_debugreg, reg, value);
-+}
-+
-+static inline unsigned long
-+HYPERVISOR_get_debugreg(
-+	int reg)
-+{
-+	return _hypercall1(unsigned long, get_debugreg, reg);
-+}
-+
-+static inline int
-+HYPERVISOR_update_descriptor(
-+	unsigned long ma, unsigned long word)
-+{
-+	return _hypercall2(int, update_descriptor, ma, word);
-+}
-+
-+static inline int
-+HYPERVISOR_memory_op(
-+	unsigned int cmd, void *arg)
-+{
-+	return _hypercall2(int, memory_op, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_multicall(
-+	void *call_list, int nr_calls)
-+{
-+	return _hypercall2(int, multicall, call_list, nr_calls);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping(
-+	unsigned long va, pte_t new_val, unsigned long flags)
-+{
-+	return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
-+}
-+
-+static inline int
-+HYPERVISOR_event_channel_op(
-+	void *op)
-+{
-+	return _hypercall1(int, event_channel_op, op);
-+}
-+
-+static inline int
-+HYPERVISOR_xen_version(
-+	int cmd, void *arg)
-+{
-+	return _hypercall2(int, xen_version, cmd, arg);
-+}
-+
-+static inline int
-+HYPERVISOR_console_io(
-+	int cmd, int count, char *str)
-+{
-+	return _hypercall3(int, console_io, cmd, count, str);
-+}
-+
-+static inline int
-+HYPERVISOR_physdev_op(
-+	void *physdev_op)
-+{
-+	return _hypercall1(int, physdev_op, physdev_op);
-+}
-+
-+static inline int
-+HYPERVISOR_grant_table_op(
-+	unsigned int cmd, void *uop, unsigned int count)
-+{
-+	return _hypercall3(int, grant_table_op, cmd, uop, count);
-+}
-+
-+static inline int
-+HYPERVISOR_update_va_mapping_otherdomain(
-+	unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
-+{
-+	return _hypercall4(int, update_va_mapping_otherdomain, va,
-+			   new_val.pte, flags, domid);
-+}
-+
-+static inline int
-+HYPERVISOR_vm_assist(
-+	unsigned int cmd, unsigned int type)
-+{
-+	return _hypercall2(int, vm_assist, cmd, type);
-+}
-+
-+static inline int
-+HYPERVISOR_vcpu_op(
-+	int cmd, int vcpuid, void *extra_args)
-+{
-+	return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
-+}
-+
-+static inline int
-+HYPERVISOR_set_segment_base(
-+	int reg, unsigned long value)
-+{
-+	return _hypercall2(int, set_segment_base, reg, value);
-+}
-+
-+static inline int
-+HYPERVISOR_suspend(
-+	unsigned long srec)
-+{
-+	return _hypercall3(int, sched_op, SCHEDOP_shutdown,
-+			   SHUTDOWN_suspend, srec);
-+}
-+
-+static inline int
-+HYPERVISOR_nmi_op(
-+	unsigned long op,
-+	unsigned long arg)
-+{
-+	return _hypercall2(int, nmi_op, op, arg);
-+}
-+
-+#endif /* __HYPERCALL_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hypervisor.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/hypervisor.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/hypervisor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/hypervisor.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+
-+#include <asm-i386/hypervisor.h>
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/io.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/io.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/io.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/io.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,374 @@
-+#ifndef _ASM_IO_H
-+#define _ASM_IO_H
-+
-+#include <linux/config.h>
-+#include <asm/fixmap.h>
-+/*
-+ * This file contains the definitions for the x86 IO instructions
-+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
-+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
-+ * versions of the single-IO instructions (inb_p/inw_p/..).
-+ *
-+ * This file is not meant to be obfuscating: it's just complicated
-+ * to (a) handle it all in a way that makes gcc able to optimize it
-+ * as well as possible and (b) trying to avoid writing the same thing
-+ * over and over again with slight variations and possibly making a
-+ * mistake somewhere.
-+ */
-+
-+/*
-+ * Thanks to James van Artsdalen for a better timing-fix than
-+ * the two short jumps: using outb's to a nonexistent port seems
-+ * to guarantee better timings even on fast machines.
-+ *
-+ * On the other hand, I'd like to be sure of a non-existent port:
-+ * I feel a bit unsafe about using 0x80 (should be safe, though)
-+ *
-+ *		Linus
-+ */
-+
-+ /*
-+  *  Bit simplified and optimized by Jan Hubicka
-+  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
-+  *
-+  *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
-+  *  isa_read[wl] and isa_write[wl] fixed
-+  *  - Arnaldo Carvalho de Melo <acme at conectiva.com.br>
-+  */
-+
-+#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
-+
-+#ifdef REALLY_SLOW_IO
-+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-+#else
-+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
-+#endif
-+
-+/*
-+ * Talk about misusing macros..
-+ */
-+#define __OUT1(s,x) \
-+extern inline void out##s(unsigned x value, unsigned short port) {
-+
-+#define __OUT2(s,s1,s2) \
-+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
-+
-+#define __OUT(s,s1,x) \
-+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-+__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
-+
-+#define __IN1(s) \
-+extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
-+
-+#define __IN2(s,s1,s2) \
-+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
-+
-+#define __IN(s,s1,i...) \
-+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-+__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-+
-+#define __INS(s) \
-+extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-+{ __asm__ __volatile__ ("rep ; ins" #s \
-+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-+
-+#define __OUTS(s) \
-+extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-+{ __asm__ __volatile__ ("rep ; outs" #s \
-+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-+
-+#define RETURN_TYPE unsigned char
-+__IN(b,"")
-+#undef RETURN_TYPE
-+#define RETURN_TYPE unsigned short
-+__IN(w,"")
-+#undef RETURN_TYPE
-+#define RETURN_TYPE unsigned int
-+__IN(l,"")
-+#undef RETURN_TYPE
-+
-+__OUT(b,"b",char)
-+__OUT(w,"w",short)
-+__OUT(l,,int)
-+
-+__INS(b)
-+__INS(w)
-+__INS(l)
-+
-+__OUTS(b)
-+__OUTS(w)
-+__OUTS(l)
-+
-+#define IO_SPACE_LIMIT 0xffff
-+
-+#if defined(__KERNEL__) && __x86_64__
-+
-+#include <linux/vmalloc.h>
-+
-+#ifndef __i386__
-+/*
-+ * Change virtual addresses to physical addresses and vv.
-+ * These are pretty trivial
-+ */
-+extern inline unsigned long virt_to_phys(volatile void * address)
-+{
-+	return __pa(address);
-+}
-+
-+extern inline void * phys_to_virt(unsigned long address)
-+{
-+	return __va(address);
-+}
-+
-+
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
-+#endif
-+
-+/*
-+ * Change "struct page" to physical address.
-+ */
-+#ifdef CONFIG_DISCONTIGMEM
-+#include <asm/mmzone.h>
-+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+#define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
-+
-+#define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
-+				  (unsigned long) bio_offset((bio)))
-+#define bvec_to_pseudophys(bv)	 (page_to_pseudophys((bv)->bv_page) + \
-+				  (unsigned long) (bv)->bv_offset)
-+
-+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
-+	(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-+	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
-+	  bvec_to_pseudophys((vec2))))
-+#else
-+// #define page_to_phys(page)	((page - mem_map) << PAGE_SHIFT)
-+#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-+#define page_to_phys(page)	 (phys_to_machine(page_to_pseudophys(page)))
-+
-+#define bio_to_pseudophys(bio)	 (page_to_pseudophys(bio_page((bio))) + \
-+				  (unsigned long) bio_offset((bio)))
-+#define bvec_to_pseudophys(bv)	 (page_to_pseudophys((bv)->bv_page) + \
-+				  (unsigned long) (bv)->bv_offset)
-+
-+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
-+	(((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-+	 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
-+	  bvec_to_pseudophys((vec2))))
-+#endif
-+
-+#include <asm-generic/iomap.h>
-+
-+extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-+
-+extern inline void __iomem * ioremap (unsigned long offset, unsigned long size)
-+{
-+	return __ioremap(offset, size, 0);
-+}
-+
-+/*
-+ * This one maps high address device memory and turns off caching for that area.
-+ * it's useful if some control registers are in such an area and write combining
-+ * or read caching is not desirable:
-+ */
-+extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
-+extern void iounmap(volatile void __iomem *addr);
-+
-+/*
-+ * ISA I/O bus memory addresses are 1:1 with the physical address.
-+ */
-+
-+#define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
-+#define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
-+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
-+#define isa_bus_to_virt(_x) (void *)(__fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
-+#else
-+#define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
-+#endif
-+/*
-+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
-+ * are forbidden in portable PCI drivers.
-+ *
-+ * Allow them on x86 for legacy drivers, though.
-+ */
-+#define virt_to_bus(_x) phys_to_machine(__pa(_x))
-+#define bus_to_virt(_x) __va(machine_to_phys(_x))
-+
-+/*
-+ * readX/writeX() are used to access memory mapped devices. On some
-+ * architectures the memory mapped IO stuff needs to be accessed
-+ * differently. On the x86 architecture, we just read/write the
-+ * memory location directly.
-+ */
-+
-+static inline __u8 __readb(const volatile void __iomem *addr)
-+{
-+	return *(__force volatile __u8 *)addr;
-+}
-+static inline __u16 __readw(const volatile void __iomem *addr)
-+{
-+	return *(__force volatile __u16 *)addr;
-+}
-+static inline __u32 __readl(const volatile void __iomem *addr)
-+{
-+	return *(__force volatile __u32 *)addr;
-+}
-+static inline __u64 __readq(const volatile void __iomem *addr)
-+{
-+	return *(__force volatile __u64 *)addr;
-+}
-+#define readb(x) __readb(x)
-+#define readw(x) __readw(x)
-+#define readl(x) __readl(x)
-+#define readq(x) __readq(x)
-+#define readb_relaxed(a) readb(a)
-+#define readw_relaxed(a) readw(a)
-+#define readl_relaxed(a) readl(a)
-+#define readq_relaxed(a) readq(a)
-+#define __raw_readb readb
-+#define __raw_readw readw
-+#define __raw_readl readl
-+#define __raw_readq readq
-+
-+#define mmiowb()
-+
-+#ifdef CONFIG_UNORDERED_IO
-+static inline void __writel(__u32 val, volatile void __iomem *addr)
-+{
-+	volatile __u32 __iomem *target = addr;
-+	asm volatile("movnti %1,%0"
-+		     : "=m" (*target)
-+		     : "r" (val) : "memory");
-+}
-+
-+static inline void __writeq(__u64 val, volatile void __iomem *addr)
-+{
-+	volatile __u64 __iomem *target = addr;
-+	asm volatile("movnti %1,%0"
-+		     : "=m" (*target)
-+		     : "r" (val) : "memory");
-+}
-+#else
-+static inline void __writel(__u32 b, volatile void __iomem *addr)
-+{
-+	*(__force volatile __u32 *)addr = b;
-+}
-+static inline void __writeq(__u64 b, volatile void __iomem *addr)
-+{
-+	*(__force volatile __u64 *)addr = b;
-+}
-+#endif
-+static inline void __writeb(__u8 b, volatile void __iomem *addr)
-+{
-+	*(__force volatile __u8 *)addr = b;
-+}
-+static inline void __writew(__u16 b, volatile void __iomem *addr)
-+{
-+	*(__force volatile __u16 *)addr = b;
-+}
-+#define writeq(val,addr) __writeq((val),(addr))
-+#define writel(val,addr) __writel((val),(addr))
-+#define writew(val,addr) __writew((val),(addr))
-+#define writeb(val,addr) __writeb((val),(addr))
-+#define __raw_writeb writeb
-+#define __raw_writew writew
-+#define __raw_writel writel
-+#define __raw_writeq writeq
-+
-+void __memcpy_fromio(void*,unsigned long,unsigned);
-+void __memcpy_toio(unsigned long,const void*,unsigned);
-+
-+static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
-+{
-+	__memcpy_fromio(to,(unsigned long)from,len);
-+}
-+static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
-+{
-+	__memcpy_toio((unsigned long)to,from,len);
-+}
-+
-+void memset_io(volatile void __iomem *a, int b, size_t c);
-+
-+/*
-+ * ISA space is 'always mapped' on a typical x86 system, no need to
-+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
-+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
-+ * are physical addresses. The following constant pointer can be
-+ * used as the IO-area pointer (it can be iounmapped as well, so the
-+ * analogy with PCI is quite large):
-+ */
-+#define __ISA_IO_base ((char __iomem *)(fix_to_virt(FIX_ISAMAP_BEGIN)))
-+
-+#define isa_readb(a) readb(__ISA_IO_base + (a))
-+#define isa_readw(a) readw(__ISA_IO_base + (a))
-+#define isa_readl(a) readl(__ISA_IO_base + (a))
-+#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-+#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-+#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-+#define isa_memset_io(a,b,c)		memset_io(__ISA_IO_base + (a),(b),(c))
-+#define isa_memcpy_fromio(a,b,c)	memcpy_fromio((a),__ISA_IO_base + (b),(c))
-+#define isa_memcpy_toio(a,b,c)		memcpy_toio(__ISA_IO_base + (a),(b),(c))
-+
-+
-+/*
-+ * Again, x86-64 does not require mem IO specific function.
-+ */
-+
-+#define eth_io_copy_and_sum(a,b,c,d)		eth_copy_and_sum((a),(void *)(b),(c),(d))
-+#define isa_eth_io_copy_and_sum(a,b,c,d)	eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d))
-+
-+/**
-+ *	check_signature		-	find BIOS signatures
-+ *	@io_addr: mmio address to check 
-+ *	@signature:  signature block
-+ *	@length: length of signature
-+ *
-+ *	Perform a signature comparison with the mmio address io_addr. This
-+ *	address should have been obtained by ioremap.
-+ *	Returns 1 on a match.
-+ */
-+ 
-+static inline int check_signature(void __iomem *io_addr,
-+	const unsigned char *signature, int length)
-+{
-+	int retval = 0;
-+	do {
-+		if (readb(io_addr) != *signature)
-+			goto out;
-+		io_addr++;
-+		signature++;
-+		length--;
-+	} while (length);
-+	retval = 1;
-+out:
-+	return retval;
-+}
-+
-+/* Nothing to do */
-+
-+#define dma_cache_inv(_start,_size)		do { } while (0)
-+#define dma_cache_wback(_start,_size)		do { } while (0)
-+#define dma_cache_wback_inv(_start,_size)	do { } while (0)
-+
-+#define flush_write_buffers() 
-+
-+extern int iommu_bio_merge;
-+#define BIO_VMERGE_BOUNDARY iommu_bio_merge
-+
-+/*
-+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
-+ * access
-+ */
-+#define xlate_dev_mem_ptr(p)	__va(p)
-+
-+/*
-+ * Convert a virtual cached pointer to an uncached pointer
-+ */
-+#define xlate_dev_kmem_ptr(p)	p
-+
-+#endif /* __KERNEL__ */
-+
-+#define ARCH_HAS_DEV_MEM
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/irq.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/irq.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/irq.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/irq.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,36 @@
-+#ifndef _ASM_IRQ_H
-+#define _ASM_IRQ_H
-+
-+/*
-+ *	linux/include/asm/irq.h
-+ *
-+ *	(C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
-+ *
-+ *	IRQ/IPI changes taken from work by Thomas Radke
-+ *	<tomsoft at informatik.tu-chemnitz.de>
-+ */
-+
-+#include <linux/config.h>
-+#include <linux/sched.h>
-+/* include comes from machine specific directory */
-+#include "irq_vectors.h"
-+#include <asm/thread_info.h>
-+
-+static __inline__ int irq_canonicalize(int irq)
-+{
-+	return ((irq == 2) ? 9 : irq);
-+}
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#define ARCH_HAS_NMI_WATCHDOG		/* See include/linux/nmi.h */
-+#endif
-+
-+#define KDB_VECTOR	0xf9
-+
-+# define irq_ctx_init(cpu) do { } while (0)
-+
-+struct irqaction;
-+struct pt_regs;
-+int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-+
-+#endif /* _ASM_IRQ_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/io_ports.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/io_ports.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/io_ports.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/io_ports.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,30 @@
-+/*
-+ *  arch/i386/mach-generic/io_ports.h
-+ *
-+ *  Machine specific IO port address definition for generic.
-+ *  Written by Osamu Tomita <tomita at cinet.co.jp>
-+ */
-+#ifndef _MACH_IO_PORTS_H
-+#define _MACH_IO_PORTS_H
-+
-+/* i8253A PIT registers */
-+#define PIT_MODE		0x43
-+#define PIT_CH0			0x40
-+#define PIT_CH2			0x42
-+
-+/* i8259A PIC registers */
-+#define PIC_MASTER_CMD		0x20
-+#define PIC_MASTER_IMR		0x21
-+#define PIC_MASTER_ISR		PIC_MASTER_CMD
-+#define PIC_MASTER_POLL		PIC_MASTER_ISR
-+#define PIC_MASTER_OCW3		PIC_MASTER_ISR
-+#define PIC_SLAVE_CMD		0xa0
-+#define PIC_SLAVE_IMR		0xa1
-+
-+/* i8259A PIC related value */
-+#define PIC_CASCADE_IR		2
-+#define MASTER_ICW4_DEFAULT	0x01
-+#define SLAVE_ICW4_DEFAULT	0x01
-+#define PIC_ICW4_AEOI		2
-+
-+#endif /* !_MACH_IO_PORTS_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/irq_vectors.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,123 @@
-+/*
-+ * This file should contain #defines for all of the interrupt vector
-+ * numbers used by this architecture.
-+ *
-+ * In addition, there are some standard defines:
-+ *
-+ *	FIRST_EXTERNAL_VECTOR:
-+ *		The first free place for external interrupts
-+ *
-+ *	SYSCALL_VECTOR:
-+ *		The IRQ vector a syscall makes the user to kernel transition
-+ *		under.
-+ *
-+ *	TIMER_IRQ:
-+ *		The IRQ number the timer interrupt comes in at.
-+ *
-+ *	NR_IRQS:
-+ *		The total number of interrupt vectors (including all the
-+ *		architecture specific interrupts) needed.
-+ *
-+ */			
-+#ifndef _ASM_IRQ_VECTORS_H
-+#define _ASM_IRQ_VECTORS_H
-+
-+/*
-+ * IDT vectors usable for external interrupt sources start
-+ * at 0x20:
-+ */
-+#define FIRST_EXTERNAL_VECTOR	0x20
-+
-+#define SYSCALL_VECTOR		0x80
-+
-+/*
-+ * Vectors 0x20-0x2f are used for ISA interrupts.
-+ */
-+
-+#if 0
-+/*
-+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
-+ *
-+ *  some of the following vectors are 'rare', they are merged
-+ *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
-+ *  TLB, reschedule and local APIC vectors are performance-critical.
-+ *
-+ *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
-+ */
-+#define INVALIDATE_TLB_VECTOR	0xfd
-+#define RESCHEDULE_VECTOR	0xfc
-+#define CALL_FUNCTION_VECTOR	0xfb
-+
-+#define THERMAL_APIC_VECTOR	0xf0
-+/*
-+ * Local APIC timer IRQ vector is on a different priority level,
-+ * to work around the 'lost local interrupt if more than 2 IRQ
-+ * sources per level' errata.
-+ */
-+#define LOCAL_TIMER_VECTOR	0xef
-+#endif
-+
-+#define SPURIOUS_APIC_VECTOR	0xff
-+#define ERROR_APIC_VECTOR	0xfe
-+
-+/*
-+ * First APIC vector available to drivers: (vectors 0x30-0xee)
-+ * we start at 0x31 to spread out vectors evenly between priority
-+ * levels. (0x80 is the syscall vector)
-+ */
-+#define FIRST_DEVICE_VECTOR	0x31
-+#define FIRST_SYSTEM_VECTOR	0xef
-+
-+/*
-+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
-+ * Right now the APIC is mostly only used for SMP.
-+ * 256 vectors is an architectural limit. (we can have
-+ * more than 256 devices theoretically, but they will
-+ * have to use shared interrupts)
-+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
-+ * the usable vector space is 0x20-0xff (224 vectors)
-+ */
-+
-+#define RESCHEDULE_VECTOR	0
-+#define CALL_FUNCTION_VECTOR	1
-+#define NR_IPIS			2
-+
-+/*
-+ * The maximum number of vectors supported by i386 processors
-+ * is limited to 256. For processors other than i386, NR_VECTORS
-+ * should be changed accordingly.
-+ */
-+#define NR_VECTORS 256
-+
-+#define FPU_IRQ			13
-+
-+#define	FIRST_VM86_IRQ		3
-+#define LAST_VM86_IRQ		15
-+#define invalid_vm86_irq(irq)	((irq) < 3 || (irq) > 15)
-+
-+/*
-+ * The flat IRQ space is divided into two regions:
-+ *  1. A one-to-one mapping of real physical IRQs. This space is only used
-+ *     if we have physical device-access privilege. This region is at the 
-+ *     start of the IRQ space so that existing device drivers do not need
-+ *     to be modified to translate physical IRQ numbers into our IRQ space.
-+ *  3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
-+ *     are bound using the provided bind/unbind functions.
-+ */
-+
-+#define PIRQ_BASE		0
-+#define NR_PIRQS		256
-+
-+#define DYNIRQ_BASE		(PIRQ_BASE + NR_PIRQS)
-+#define NR_DYNIRQS		256
-+
-+#define NR_IRQS			(NR_PIRQS + NR_DYNIRQS)
-+#define NR_IRQ_VECTORS		NR_IRQS
-+
-+#define pirq_to_irq(_x)		((_x) + PIRQ_BASE)
-+#define irq_to_pirq(_x)		((_x) - PIRQ_BASE)
-+
-+#define dynirq_to_irq(_x)	((_x) + DYNIRQ_BASE)
-+#define irq_to_dynirq(_x)	((_x) - DYNIRQ_BASE)
-+
-+#endif /* _ASM_IRQ_VECTORS_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/mach_time.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/mach_time.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/mach_time.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/mach_time.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,122 @@
-+/*
-+ *  include/asm-i386/mach-default/mach_time.h
-+ *
-+ *  Machine specific set RTC function for generic.
-+ *  Split out from time.c by Osamu Tomita <tomita at cinet.co.jp>
-+ */
-+#ifndef _MACH_TIME_H
-+#define _MACH_TIME_H
-+
-+#include <asm-i386/mc146818rtc.h>
-+
-+/* for check timing call set_rtc_mmss() 500ms     */
-+/* used in arch/i386/time.c::do_timer_interrupt() */
-+#define USEC_AFTER	500000
-+#define USEC_BEFORE	500000
-+
-+/*
-+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
-+ * called 500 ms after the second nowtime has started, because when
-+ * nowtime is written into the registers of the CMOS clock, it will
-+ * jump to the next second precisely 500 ms later. Check the Motorola
-+ * MC146818A or Dallas DS12887 data sheet for details.
-+ *
-+ * BUG: This routine does not handle hour overflow properly; it just
-+ *      sets the minutes. Usually you'll only notice that after reboot!
-+ */
-+static inline int mach_set_rtc_mmss(unsigned long nowtime)
-+{
-+	int retval = 0;
-+	int real_seconds, real_minutes, cmos_minutes;
-+	unsigned char save_control, save_freq_select;
-+
-+	save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
-+	CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
-+
-+	save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
-+	CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
-+
-+	cmos_minutes = CMOS_READ(RTC_MINUTES);
-+	if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-+		BCD_TO_BIN(cmos_minutes);
-+
-+	/*
-+	 * since we're only adjusting minutes and seconds,
-+	 * don't interfere with hour overflow. This avoids
-+	 * messing with unknown time zones but requires your
-+	 * RTC not to be off by more than 15 minutes
-+	 */
-+	real_seconds = nowtime % 60;
-+	real_minutes = nowtime / 60;
-+	if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
-+		real_minutes += 30;		/* correct for half hour time zone */
-+	real_minutes %= 60;
-+
-+	if (abs(real_minutes - cmos_minutes) < 30) {
-+		if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
-+			BIN_TO_BCD(real_seconds);
-+			BIN_TO_BCD(real_minutes);
-+		}
-+		CMOS_WRITE(real_seconds,RTC_SECONDS);
-+		CMOS_WRITE(real_minutes,RTC_MINUTES);
-+	} else {
-+		printk(KERN_WARNING
-+		       "set_rtc_mmss: can't update from %d to %d\n",
-+		       cmos_minutes, real_minutes);
-+		retval = -1;
-+	}
-+
-+	/* The following flags have to be released exactly in this order,
-+	 * otherwise the DS12887 (popular MC146818A clone with integrated
-+	 * battery and quartz) will not reset the oscillator and will not
-+	 * update precisely 500 ms later. You won't find this mentioned in
-+	 * the Dallas Semiconductor data sheets, but who believes data
-+	 * sheets anyway ...                           -- Markus Kuhn
-+	 */
-+	CMOS_WRITE(save_control, RTC_CONTROL);
-+	CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-+
-+	return retval;
-+}
-+
-+static inline unsigned long mach_get_cmos_time(void)
-+{
-+	unsigned int year, mon, day, hour, min, sec;
-+	int i;
-+
-+	/* The Linux interpretation of the CMOS clock register contents:
-+	 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
-+	 * RTC registers show the second which has precisely just started.
-+	 * Let's hope other operating systems interpret the RTC the same way.
-+	 */
-+	/* read RTC exactly on falling edge of update flag */
-+	for (i = 0 ; i < 1000000 ; i++)	/* may take up to 1 second... */
-+		if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
-+			break;
-+	for (i = 0 ; i < 1000000 ; i++)	/* must try at least 2.228 ms */
-+		if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
-+			break;
-+	do { /* Isn't this overkill ? UIP above should guarantee consistency */
-+		sec = CMOS_READ(RTC_SECONDS);
-+		min = CMOS_READ(RTC_MINUTES);
-+		hour = CMOS_READ(RTC_HOURS);
-+		day = CMOS_READ(RTC_DAY_OF_MONTH);
-+		mon = CMOS_READ(RTC_MONTH);
-+		year = CMOS_READ(RTC_YEAR);
-+	} while (sec != CMOS_READ(RTC_SECONDS));
-+	if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-+	  {
-+	    BCD_TO_BIN(sec);
-+	    BCD_TO_BIN(min);
-+	    BCD_TO_BIN(hour);
-+	    BCD_TO_BIN(day);
-+	    BCD_TO_BIN(mon);
-+	    BCD_TO_BIN(year);
-+	  }
-+	if ((year += 1900) < 1970)
-+		year += 100;
-+
-+	return mktime(year, mon, day, hour, min, sec);
-+}
-+
-+#endif /* !_MACH_TIME_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/mach_timer.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,48 @@
-+/*
-+ *  include/asm-i386/mach-default/mach_timer.h
-+ *
-+ *  Machine specific calibrate_tsc() for generic.
-+ *  Split out from timer_tsc.c by Osamu Tomita <tomita at cinet.co.jp>
-+ */
-+/* ------ Calibrate the TSC ------- 
-+ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
-+ * Too much 64-bit arithmetic here to do this cleanly in C, and for
-+ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
-+ * output busy loop as low as possible. We avoid reading the CTC registers
-+ * directly because of the awkward 8-bit access mechanism of the 82C54
-+ * device.
-+ */
-+#ifndef _MACH_TIMER_H
-+#define _MACH_TIMER_H
-+
-+#define CALIBRATE_LATCH	(5 * LATCH)
-+
-+static inline void mach_prepare_counter(void)
-+{
-+       /* Set the Gate high, disable speaker */
-+	outb((inb(0x61) & ~0x02) | 0x01, 0x61);
-+
-+	/*
-+	 * Now let's take care of CTC channel 2
-+	 *
-+	 * Set the Gate high, program CTC channel 2 for mode 0,
-+	 * (interrupt on terminal count mode), binary count,
-+	 * load 5 * LATCH count, (LSB and MSB) to begin countdown.
-+	 *
-+	 * Some devices need a delay here.
-+	 */
-+	outb(0xb0, 0x43);			/* binary, mode 0, LSB/MSB, Ch 2 */
-+	outb_p(CALIBRATE_LATCH & 0xff, 0x42);	/* LSB of count */
-+	outb_p(CALIBRATE_LATCH >> 8, 0x42);       /* MSB of count */
-+}
-+
-+static inline void mach_countup(unsigned long *count_p)
-+{
-+	unsigned long count = 0;
-+	do {
-+		count++;
-+	} while ((inb_p(0x61) & 0x20) == 0);
-+	*count_p = count;
-+}
-+
-+#endif /* !_MACH_TIMER_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/setup_arch_post.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,52 @@
-+/**
-+ * machine_specific_memory_setup - Hook for machine specific memory setup.
-+ *
-+ * Description:
-+ *	This is included late in kernel/setup.c so that it can make
-+ *	use of all of the static functions.
-+ **/
-+
-+static char * __init machine_specific_memory_setup(void)
-+{
-+	char *who;
-+	unsigned long start_pfn, max_pfn;
-+
-+	who = "Xen";
-+
-+	start_pfn = 0;
-+	max_pfn = xen_start_info->nr_pages;
-+
-+	e820.nr_map = 0;
-+	add_memory_region(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn) - PFN_PHYS(start_pfn), E820_RAM);
-+
-+	return who;
-+}
-+
-+void __init machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c)
-+{
-+	clear_bit(X86_FEATURE_VME, c->x86_capability);
-+	clear_bit(X86_FEATURE_DE, c->x86_capability);
-+	clear_bit(X86_FEATURE_PSE, c->x86_capability);
-+	clear_bit(X86_FEATURE_PGE, c->x86_capability);
-+	clear_bit(X86_FEATURE_SEP, c->x86_capability);
-+	if (!(xen_start_info->flags & SIF_PRIVILEGED))
-+		clear_bit(X86_FEATURE_MTRR, c->x86_capability);
-+}
-+
-+extern void hypervisor_callback(void);
-+extern void failsafe_callback(void);
-+extern void nmi(void);
-+
-+static void __init machine_specific_arch_setup(void)
-+{
-+	HYPERVISOR_set_callbacks(
-+                (unsigned long) hypervisor_callback,
-+                (unsigned long) failsafe_callback,
-+                (unsigned long) system_call);
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+	HYPERVISOR_nmi_op(XENNMI_register_callback, (unsigned long)&nmi);
-+#endif
-+
-+	machine_specific_modify_cpu_capabilities(&boot_cpu_data);
-+}
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mach-xen/setup_arch_pre.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,5 @@
-+/* Hook to call BIOS initialisation function */
-+
-+#define ARCH_SETUP machine_specific_arch_setup();
-+
-+static void __init machine_specific_arch_setup(void);
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mmu_context.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mmu_context.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mmu_context.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mmu_context.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,132 @@
-+#ifndef __X86_64_MMU_CONTEXT_H
-+#define __X86_64_MMU_CONTEXT_H
-+
-+#include <linux/config.h>
-+#include <asm/desc.h>
-+#include <asm/atomic.h>
-+#include <asm/pgalloc.h>
-+#include <asm/page.h>
-+#include <asm/pda.h>
-+#include <asm/pgtable.h>
-+#include <asm/tlbflush.h>
-+
-+/*
-+ * possibly do the LDT unload here?
-+ */
-+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-+void destroy_context(struct mm_struct *mm);
-+
-+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-+{
-+#if 0 /*  XEN: no lazy tlb */
-+	if (read_pda(mmu_state) == TLBSTATE_OK) 
-+		write_pda(mmu_state, TLBSTATE_LAZY);
-+#endif
-+}
-+
-+#define prepare_arch_switch(rq,next)	__prepare_arch_switch()
-+#define finish_arch_switch(rq, next)	spin_unlock_irq(&(rq)->lock)
-+#define task_running(rq, p)		((rq)->curr == (p))
-+
-+static inline void __prepare_arch_switch(void)
-+{
-+	/*
-+	 * Save away %es, %ds, %fs and %gs. Must happen before reload
-+	 * of cr3/ldt (i.e., not in __switch_to).
-+	 */
-+	__asm__ __volatile__ (
-+		"mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
-+		: "=m" (current->thread.es),
-+		  "=m" (current->thread.ds),
-+		  "=m" (current->thread.fsindex),
-+		  "=m" (current->thread.gsindex) );
-+
-+	if (current->thread.ds)
-+		__asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
-+
-+	if (current->thread.es)
-+		__asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
-+
-+	if (current->thread.fsindex) {
-+		__asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
-+		current->thread.fs = 0;
-+	}
-+
-+	if (current->thread.gsindex) {
-+		load_gs_index(0);
-+		current->thread.gs = 0;
-+	}
-+}
-+
-+extern void mm_pin(struct mm_struct *mm);
-+extern void mm_unpin(struct mm_struct *mm);
-+void mm_pin_all(void);
-+
-+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
-+			     struct task_struct *tsk)
-+{
-+	unsigned cpu = smp_processor_id();
-+	struct mmuext_op _op[3], *op = _op;
-+
-+	if (likely(prev != next)) {
-+		if (!next->context.pinned)
-+			mm_pin(next);
-+
-+		/* stop flush ipis for the previous mm */
-+		clear_bit(cpu, &prev->cpu_vm_mask);
-+#if 0  /* XEN: no lazy tlb */
-+		write_pda(mmu_state, TLBSTATE_OK);
-+		write_pda(active_mm, next);
-+#endif
-+		set_bit(cpu, &next->cpu_vm_mask);
-+
-+		/* load_cr3(next->pgd) */
-+		per_cpu(cur_pgd, smp_processor_id()) = next->pgd;
-+		op->cmd = MMUEXT_NEW_BASEPTR;
-+		op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
-+		op++;
-+
-+		/* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
-+		op->cmd = MMUEXT_NEW_USER_BASEPTR;
-+		op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
-+		op++;
-+		
-+		if (unlikely(next->context.ldt != prev->context.ldt)) {
-+			/* load_LDT_nolock(&next->context, cpu) */
-+			op->cmd = MMUEXT_SET_LDT;
-+			op->arg1.linear_addr = (unsigned long)next->context.ldt;
-+			op->arg2.nr_ents     = next->context.size;
-+			op++;
-+		}
-+
-+		BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
-+	}
-+
-+#if 0 /* XEN: no lazy tlb */
-+	else {
-+		write_pda(mmu_state, TLBSTATE_OK);
-+		if (read_pda(active_mm) != next)
-+			out_of_line_bug();
-+		if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
-+			/* We were in lazy tlb mode and leave_mm disabled 
-+			 * tlb flush IPI delivery. We must reload CR3
-+			 * to make sure to use no freed page tables.
-+			 */
-+                        load_cr3(next->pgd);
-+                        xen_new_user_pt(__pa(__user_pgd(next->pgd)));		
-+			load_LDT_nolock(&next->context, cpu);
-+		}
-+	}
-+#endif
-+}
-+
-+#define deactivate_mm(tsk,mm)	do { \
-+	load_gs_index(0); \
-+	asm volatile("movl %0,%%fs"::"r"(0));  \
-+} while(0)
-+
-+#define activate_mm(prev, next) do {		\
-+	switch_mm((prev),(next),NULL);		\
-+} while (0)
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mmu.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/mmu.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/mmu.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/mmu.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,33 @@
-+#ifndef __x86_64_MMU_H
-+#define __x86_64_MMU_H
-+
-+#include <linux/spinlock.h>
-+#include <asm/semaphore.h>
-+
-+/*
-+ * The x86_64 doesn't have a mmu context, but
-+ * we put the segment information here.
-+ *
-+ * cpu_vm_mask is used to optimize ldt flushing.
-+ */
-+typedef struct { 
-+	void *ldt;
-+	rwlock_t ldtlock; 
-+	int size;
-+	struct semaphore sem; 
-+#ifdef CONFIG_XEN
-+	unsigned pinned:1;
-+	struct list_head unpinned;
-+#endif
-+} mm_context_t;
-+
-+#ifdef CONFIG_XEN
-+extern struct list_head mm_unpinned;
-+extern spinlock_t mm_unpinned_lock;
-+
-+/* mm/memory.c:exit_mmap hook */
-+extern void _arch_exit_mmap(struct mm_struct *mm);
-+#define arch_exit_mmap(_mm) _arch_exit_mmap(_mm)
-+#endif
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/nmi.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/nmi.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/nmi.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/nmi.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,75 @@
-+/*
-+ *  linux/include/asm-i386/nmi.h
-+ */
-+#ifndef ASM_NMI_H
-+#define ASM_NMI_H
-+
-+#include <linux/pm.h>
-+
-+#include <asm-xen/xen-public/nmi.h>
-+
-+struct pt_regs;
-+ 
-+typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
-+ 
-+/** 
-+ * set_nmi_callback
-+ *
-+ * Set a handler for an NMI. Only one handler may be
-+ * set. Return 1 if the NMI was handled.
-+ */
-+void set_nmi_callback(nmi_callback_t callback);
-+ 
-+/** 
-+ * unset_nmi_callback
-+ *
-+ * Remove the handler previously set.
-+ */
-+void unset_nmi_callback(void);
-+ 
-+#ifdef CONFIG_PM
-+ 
-+/** Replace the PM callback routine for NMI. */
-+struct pm_dev * set_nmi_pm_callback(pm_callback callback);
-+
-+/** Unset the PM callback routine back to the default. */
-+void unset_nmi_pm_callback(struct pm_dev * dev);
-+
-+#else
-+
-+static inline struct pm_dev * set_nmi_pm_callback(pm_callback callback)
-+{
-+	return 0;
-+} 
-+ 
-+static inline void unset_nmi_pm_callback(struct pm_dev * dev)
-+{
-+}
-+
-+#endif /* CONFIG_PM */
-+ 
-+extern void default_do_nmi(struct pt_regs *);
-+extern void die_nmi(char *str, struct pt_regs *regs);
-+
-+static inline unsigned char get_nmi_reason(void)
-+{
-+        shared_info_t *s = HYPERVISOR_shared_info;
-+        unsigned char reason = 0;
-+
-+        /* construct a value which looks like it came from
-+         * port 0x61.
-+         */
-+        if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
-+                reason |= 0x40;
-+        if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
-+                reason |= 0x80;
-+
-+        return reason;
-+}
-+
-+extern int panic_on_timeout;
-+extern int unknown_nmi_panic;
-+
-+extern int check_nmi_watchdog(void);
-+ 
-+#endif /* ASM_NMI_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/page.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/page.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/page.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/page.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,260 @@
-+#ifndef _X86_64_PAGE_H
-+#define _X86_64_PAGE_H
-+
-+#include <linux/config.h>
-+/* #include <linux/string.h> */
-+#ifndef __ASSEMBLY__
-+#include <linux/types.h>
-+#endif
-+#include <asm-xen/xen-public/xen.h> 
-+#include <asm-xen/foreign_page.h>
-+
-+#define arch_free_page(_page,_order)			\
-+({	int foreign = PageForeign(_page);		\
-+	if (foreign)					\
-+		(PageForeignDestructor(_page))(_page);	\
-+	foreign;					\
-+})
-+#define HAVE_ARCH_FREE_PAGE
-+
-+#ifdef CONFIG_XEN_SCRUB_PAGES
-+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
-+#else
-+#define scrub_pages(_p,_n) ((void)0)
-+#endif
-+
-+/* PAGE_SHIFT determines the page size */
-+#define PAGE_SHIFT	12
-+#ifdef __ASSEMBLY__
-+#define PAGE_SIZE	(0x1 << PAGE_SHIFT)
-+#else
-+#define PAGE_SIZE	(1UL << PAGE_SHIFT)
-+#endif
-+#define PAGE_MASK	(~(PAGE_SIZE-1))
-+#define PHYSICAL_PAGE_MASK	(~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT))
-+
-+#define THREAD_ORDER 1 
-+#ifdef __ASSEMBLY__
-+#define THREAD_SIZE  (1 << (PAGE_SHIFT + THREAD_ORDER))
-+#else
-+#define THREAD_SIZE  (1UL << (PAGE_SHIFT + THREAD_ORDER))
-+#endif
-+#define CURRENT_MASK (~(THREAD_SIZE-1))
-+
-+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
-+
-+#define HPAGE_SHIFT PMD_SHIFT
-+#define HPAGE_SIZE	((1UL) << HPAGE_SHIFT)
-+#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
-+#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
-+
-+#ifdef __KERNEL__
-+#ifndef __ASSEMBLY__
-+
-+void clear_page(void *);
-+void copy_page(void *, void *);
-+
-+#define clear_user_page(page, vaddr, pg)	clear_page(page)
-+#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
-+
-+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
-+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-+
-+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
-+#define INVALID_P2M_ENTRY	(~0UL)
-+#define FOREIGN_FRAME(m)	((m) | (1UL<<63))
-+extern unsigned long *phys_to_machine_mapping;
-+#define pfn_to_mfn(pfn)	\
-+(phys_to_machine_mapping[(unsigned int)(pfn)] & ~(1UL << 63))
-+#define	phys_to_machine_mapping_valid(pfn) \
-+	(phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
-+static inline unsigned long mfn_to_pfn(unsigned long mfn)
-+{
-+	unsigned long pfn;
-+
-+	/*
-+	 * The array access can fail (e.g., device space beyond end of RAM).
-+	 * In such cases it doesn't matter what we return (we return garbage),
-+	 * but we must handle the fault without crashing!
-+	 */
-+	asm (
-+		"1:	movq %1,%0\n"
-+		"2:\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 8\n"
-+		"	.quad 1b,2b\n"
-+		".previous"
-+		: "=r" (pfn) : "m" (machine_to_phys_mapping[mfn]) );
-+
-+	return pfn;
-+}
-+
-+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-+{
-+	phys_to_machine_mapping[pfn] = mfn;
-+}
-+
-+/* Definitions for machine and pseudophysical addresses. */
-+typedef unsigned long paddr_t;
-+typedef unsigned long maddr_t;
-+
-+static inline maddr_t phys_to_machine(paddr_t phys)
-+{
-+	maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-+	machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-+	return machine;
-+}
-+
-+static inline paddr_t machine_to_phys(maddr_t machine)
-+{
-+	paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-+	phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-+	return phys;
-+}
-+
-+/*
-+ * These are used to make use of C type-checking..
-+ */
-+typedef struct { unsigned long pte; } pte_t;
-+typedef struct { unsigned long pmd; } pmd_t;
-+typedef struct { unsigned long pud; } pud_t;
-+typedef struct { unsigned long pgd; } pgd_t;
-+#define PTE_MASK	PHYSICAL_PAGE_MASK
-+
-+typedef struct { unsigned long pgprot; } pgprot_t;
-+
-+#define pte_val(x)	(((x).pte & 1) ? machine_to_phys((x).pte) : \
-+			 (x).pte)
-+#define pte_val_ma(x)	((x).pte)
-+
-+static inline unsigned long pmd_val(pmd_t x)
-+{
-+	unsigned long ret = x.pmd;
-+	if (ret) ret = machine_to_phys(ret);
-+	return ret;
-+}
-+
-+static inline unsigned long pud_val(pud_t x)
-+{
-+	unsigned long ret = x.pud;
-+	if (ret) ret = machine_to_phys(ret);
-+	return ret;
-+}
-+
-+static inline unsigned long pgd_val(pgd_t x)
-+{
-+	unsigned long ret = x.pgd;
-+	if (ret) ret = machine_to_phys(ret);
-+	return ret;
-+}
-+
-+#define pgprot_val(x)	((x).pgprot)
-+
-+#define __pte_ma(x)     ((pte_t) { (x) } )
-+
-+static inline pte_t __pte(unsigned long x)
-+{
-+	if (x & 1) x = phys_to_machine(x);
-+	return ((pte_t) { (x) });
-+}
-+
-+static inline pmd_t __pmd(unsigned long x)
-+{
-+	if ((x & 1)) x = phys_to_machine(x);
-+	return ((pmd_t) { (x) });
-+}
-+
-+static inline pud_t __pud(unsigned long x)
-+{
-+	if ((x & 1)) x = phys_to_machine(x);
-+	return ((pud_t) { (x) });
-+}
-+
-+static inline pgd_t __pgd(unsigned long x)
-+{
-+	if ((x & 1)) x = phys_to_machine(x);
-+	return ((pgd_t) { (x) });
-+}
-+
-+#define __pgprot(x)	((pgprot_t) { (x) } )
-+
-+#define __START_KERNEL		0xffffffff80100000UL
-+#define __START_KERNEL_map	0xffffffff80000000UL
-+#define __PAGE_OFFSET           0xffff880000000000UL	
-+
-+#else
-+#define __START_KERNEL		0xffffffff80100000
-+#define __START_KERNEL_map	0xffffffff80000000
-+#define __PAGE_OFFSET           0xffff880000000000
-+#endif /* !__ASSEMBLY__ */
-+
-+/* to align the pointer to the (next) page boundary */
-+#define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
-+
-+/* See Documentation/x86_64/mm.txt for a description of the memory map. */
-+#define __PHYSICAL_MASK_SHIFT	46
-+#define __PHYSICAL_MASK		((1UL << __PHYSICAL_MASK_SHIFT) - 1)
-+#define __VIRTUAL_MASK_SHIFT	48
-+#define __VIRTUAL_MASK		((1UL << __VIRTUAL_MASK_SHIFT) - 1)
-+
-+#define KERNEL_TEXT_SIZE  (40UL*1024*1024)
-+#define KERNEL_TEXT_START 0xffffffff80000000UL 
-+
-+#ifndef __ASSEMBLY__
-+
-+#include <asm/bug.h>
-+
-+/* Pure 2^n version of get_order */
-+extern __inline__ int get_order(unsigned long size)
-+{
-+	int order;
-+
-+	size = (size-1) >> (PAGE_SHIFT-1);
-+	order = -1;
-+	do {
-+		size >>= 1;
-+		order++;
-+	} while (size);
-+	return order;
-+}
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
-+
-+/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
-+   Otherwise you risk miscompilation. */ 
-+#define __pa(x)			(((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
-+/* __pa_symbol should be used for C visible symbols.
-+   This seems to be the official gcc blessed way to do such arithmetic. */ 
-+#define __pa_symbol(x)		\
-+	({unsigned long v;  \
-+	  asm("" : "=r" (v) : "0" (x)); \
-+	  __pa(v); })
-+
-+#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
-+#ifndef CONFIG_DISCONTIGMEM
-+#define pfn_to_page(pfn)	(mem_map + (pfn))
-+#define page_to_pfn(page)	((unsigned long)((page) - mem_map))
-+#define pfn_valid(pfn)		((pfn) < max_mapnr)
-+#endif
-+
-+#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-+#define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-+#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
-+
-+/* VIRT <-> MACHINE conversion */
-+#define virt_to_machine(v)	(phys_to_machine(__pa(v)))
-+#define virt_to_mfn(v)		(pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
-+#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT))
-+
-+#define VM_DATA_DEFAULT_FLAGS \
-+	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
-+	 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+
-+#define __HAVE_ARCH_GATE_AREA 1	
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /* _X86_64_PAGE_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/param.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/param.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/param.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/param.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,22 @@
-+#ifndef _ASMx86_64_PARAM_H
-+#define _ASMx86_64_PARAM_H
-+
-+#ifdef __KERNEL__
-+# define HZ		100		/* Internal kernel timer frequency */
-+# define USER_HZ	100		/* .. some user interfaces are in "ticks" */
-+# define CLOCKS_PER_SEC		(USER_HZ)	/* like times() */
-+#endif
-+
-+#ifndef HZ
-+#define HZ 100
-+#endif
-+
-+#define EXEC_PAGESIZE	4096
-+
-+#ifndef NOGROUP
-+#define NOGROUP		(-1)
-+#endif
-+
-+#define MAXHOSTNAMELEN	64	/* max length of hostname */
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pci.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/pci.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pci.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/pci.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,167 @@
-+#ifndef __x8664_PCI_H
-+#define __x8664_PCI_H
-+
-+#include <linux/config.h>
-+#include <asm/io.h>
-+
-+#ifdef __KERNEL__
-+
-+#include <linux/mm.h> /* for struct page */
-+
-+/* Can be used to override the logic in pci_scan_bus for skipping
-+   already-configured bus numbers - to be used for buggy BIOSes
-+   or architectures with incomplete PCI setup by the loader */
-+
-+#ifdef CONFIG_PCI
-+extern unsigned int pcibios_assign_all_busses(void);
-+#else
-+#define pcibios_assign_all_busses()	0
-+#endif
-+#define pcibios_scan_all_fns(a, b)	0
-+
-+extern int no_iommu, force_iommu;
-+
-+extern unsigned long pci_mem_start;
-+#define PCIBIOS_MIN_IO		0x1000
-+#define PCIBIOS_MIN_MEM		(pci_mem_start)
-+
-+#define PCIBIOS_MIN_CARDBUS_IO	0x4000
-+
-+void pcibios_config_init(void);
-+struct pci_bus * pcibios_scan_root(int bus);
-+extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
-+extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
-+
-+void pcibios_set_master(struct pci_dev *dev);
-+void pcibios_penalize_isa_irq(int irq);
-+struct irq_routing_table *pcibios_get_irq_routing_table(void);
-+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-+
-+#include <linux/types.h>
-+#include <linux/slab.h>
-+#include <asm/scatterlist.h>
-+#include <linux/string.h>
-+#include <asm/page.h>
-+
-+extern int iommu_setup(char *opt);
-+
-+#ifdef CONFIG_GART_IOMMU
-+/* The PCI address space does equal the physical memory
-+ * address space.  The networking and block device layers use
-+ * this boolean for bounce buffer decisions
-+ *
-+ * On AMD64 it mostly equals, but we set it to zero to tell some subsystems
-+ * that an IOMMU is available.
-+ */
-+#define PCI_DMA_BUS_IS_PHYS	(no_iommu ? 1 : 0)
-+
-+/*
-+ * x86-64 always supports DAC, but sometimes it is useful to force
-+ * devices through the IOMMU to get automatic sg list merging.
-+ * Optional right now.
-+ */
-+extern int iommu_sac_force;
-+#define pci_dac_dma_supported(pci_dev, mask)	(!iommu_sac_force)
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
-+	dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
-+	__u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME)			\
-+	((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
-+	(((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME)			\
-+	((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
-+	(((PTR)->LEN_NAME) = (VAL))
-+
-+#elif defined(CONFIG_SWIOTLB)
-+
-+#define PCI_DMA_BUS_IS_PHYS	0
-+
-+#define pci_dac_dma_supported(pci_dev, mask)    1
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)	\
-+	dma_addr_t ADDR_NAME;
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)		\
-+	__u32 LEN_NAME;
-+#define pci_unmap_addr(PTR, ADDR_NAME)			\
-+	((PTR)->ADDR_NAME)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)		\
-+	(((PTR)->ADDR_NAME) = (VAL))
-+#define pci_unmap_len(PTR, LEN_NAME)			\
-+	((PTR)->LEN_NAME)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)		\
-+	(((PTR)->LEN_NAME) = (VAL))
-+
-+#else
-+/* No IOMMU */
-+
-+#define PCI_DMA_BUS_IS_PHYS	1
-+#define pci_dac_dma_supported(pci_dev, mask)    1
-+
-+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
-+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
-+#define pci_unmap_addr(PTR, ADDR_NAME)		(0)
-+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)	do { } while (0)
-+#define pci_unmap_len(PTR, LEN_NAME)		(0)
-+#define pci_unmap_len_set(PTR, LEN_NAME, VAL)	do { } while (0)
-+
-+#endif
-+
-+#include <asm-generic/pci-dma-compat.h>
-+
-+static inline dma64_addr_t
-+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-+{
-+	return ((dma64_addr_t) page_to_phys(page) +
-+		(dma64_addr_t) offset);
-+}
-+
-+static inline struct page *
-+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+	return virt_to_page(__va(dma_addr)); 	
-+}
-+
-+static inline unsigned long
-+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-+{
-+	return (dma_addr & ~PAGE_MASK);
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+}
-+
-+static inline void
-+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-+{
-+	flush_write_buffers();
-+}
-+
-+#define HAVE_PCI_MMAP
-+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-+			       enum pci_mmap_state mmap_state, int write_combine);
-+
-+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-+{
-+}
-+
-+#endif /* __KERNEL__ */
-+
-+/* generic pci stuff */
-+#ifdef CONFIG_PCI
-+#include <asm-generic/pci.h>
-+#endif
-+
-+/* On Xen we have to scan all functions since Xen hides bridges from
-+ * us.  If a bridge is at fn=0 and that slot has a multifunction
-+ * device, we won't find the additional devices without scanning all
-+ * functions. */
-+#undef pcibios_scan_all_fns
-+#define pcibios_scan_all_fns(a, b)	1
-+
-+#endif /* __x8664_PCI_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pgalloc.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/pgalloc.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pgalloc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/pgalloc.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,199 @@
-+#ifndef _X86_64_PGALLOC_H
-+#define _X86_64_PGALLOC_H
-+
-+#include <asm/fixmap.h>
-+#include <asm/pda.h>
-+#include <linux/threads.h>
-+#include <linux/mm.h>
-+#include <asm/io.h>		/* for phys_to_virt and page_to_pseudophys */
-+
-+#include <asm-xen/features.h>
-+void make_page_readonly(void *va, unsigned int feature);
-+void make_page_writable(void *va, unsigned int feature);
-+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
-+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
-+
-+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
-+
-+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
-+{
-+	set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
-+}
-+
-+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
-+{
-+	if (unlikely((mm)->context.pinned)) {
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
-+			       pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
-+		set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
-+	} else {
-+		*(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
-+	}
-+}
-+
-+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-+{
-+	if (unlikely((mm)->context.pinned)) {
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)pmd,
-+			       pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, 
-+				       PAGE_KERNEL_RO), 0));
-+		set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
-+	} else {
-+		*(pud) =  __pud(_PAGE_TABLE | __pa(pmd));
-+	}
-+}
-+
-+/*
-+ * We need to use the batch mode here, but pgd_pupulate() won't be
-+ * be called frequently.
-+ */
-+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
-+{
-+	if (unlikely((mm)->context.pinned)) {
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)pud,
-+			       pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, 
-+				       PAGE_KERNEL_RO), 0));
-+		set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
-+		set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
-+	} else {
-+		*(pgd) =  __pgd(_PAGE_TABLE | __pa(pud));
-+		*(__user_pgd(pgd)) = *(pgd);
-+	}
-+}
-+
-+extern __inline__ void pmd_free(pmd_t *pmd)
-+{
-+	pte_t *ptep = virt_to_ptep(pmd);
-+
-+	if (!pte_write(*ptep)) {
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			(unsigned long)pmd,
-+			pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, PAGE_KERNEL),
-+			0));
-+	}
-+	free_page((unsigned long)pmd);
-+}
-+
-+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-+{
-+        pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-+        return pmd;
-+}
-+
-+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-+{
-+        pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-+        return pud;
-+}
-+
-+static inline void pud_free(pud_t *pud)
-+{
-+	pte_t *ptep = virt_to_ptep(pud);
-+
-+	if (!pte_write(*ptep)) {
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			(unsigned long)pud,
-+			pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL),
-+			0));
-+	}
-+	free_page((unsigned long)pud);
-+}
-+
-+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-+{
-+        /*
-+         * We allocate two contiguous pages for kernel and user.
-+         */
-+        unsigned boundary;
-+	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
-+
-+	if (!pgd)
-+		return NULL;
-+	/*
-+	 * Copy kernel pointers in from init.
-+	 * Could keep a freelist or slab cache of those because the kernel
-+	 * part never changes.
-+	 */
-+	boundary = pgd_index(__PAGE_OFFSET);
-+	memset(pgd, 0, boundary * sizeof(pgd_t));
-+	memcpy(pgd + boundary,
-+	       init_level4_pgt + boundary,
-+	       (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
-+
-+	memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
-+        /*
-+         * Set level3_user_pgt for vsyscall area
-+         */
-+	set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START), 
-+                mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
-+	return pgd;
-+}
-+
-+static inline void pgd_free(pgd_t *pgd)
-+{
-+	pte_t *ptep = virt_to_ptep(pgd);
-+
-+	if (!pte_write(*ptep)) {
-+		xen_pgd_unpin(__pa(pgd));
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)pgd,
-+			       pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
-+			       0));
-+	}
-+
-+	ptep = virt_to_ptep(__user_pgd(pgd));
-+
-+	if (!pte_write(*ptep)) {
-+		xen_pgd_unpin(__pa(__user_pgd(pgd)));
-+		BUG_ON(HYPERVISOR_update_va_mapping(
-+			       (unsigned long)__user_pgd(pgd),
-+			       pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT, 
-+				       PAGE_KERNEL),
-+			       0));
-+	}
-+
-+	free_pages((unsigned long)pgd, 1);
-+}
-+
-+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
-+{
-+        pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
-+        if (pte)
-+		make_page_readonly(pte, XENFEAT_writable_page_tables);
-+
-+	return pte;
-+}
-+
-+static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
-+{
-+	struct page *pte;
-+
-+	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
-+	return pte;
-+}
-+
-+/* Should really implement gc for free page table pages. This could be
-+   done with a reference count in struct page. */
-+
-+extern __inline__ void pte_free_kernel(pte_t *pte)
-+{
-+	BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
-+        xen_pte_unpin(__pa(pte));
-+        make_page_writable(pte, XENFEAT_writable_page_tables);
-+	free_page((unsigned long)pte); 
-+}
-+
-+extern void pte_free(struct page *pte);
-+
-+//#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 
-+//#define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
-+//#define __pud_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
-+
-+#define __pte_free_tlb(tlb,x)   pte_free((x))
-+#define __pmd_free_tlb(tlb,x)   pmd_free((x))
-+#define __pud_free_tlb(tlb,x)   pud_free((x))
-+
-+#endif /* _X86_64_PGALLOC_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pgtable.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/pgtable.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/pgtable.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/pgtable.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,570 @@
-+#ifndef _X86_64_PGTABLE_H
-+#define _X86_64_PGTABLE_H
-+
-+/*
-+ * This file contains the functions and defines necessary to modify and use
-+ * the x86-64 page table tree.
-+ */
-+#include <asm/processor.h>
-+#include <asm/fixmap.h>
-+#include <asm/bitops.h>
-+#include <linux/threads.h>
-+#include <linux/sched.h>
-+#include <asm/pda.h>
-+#ifdef CONFIG_XEN
-+#include <asm/hypervisor.h>
-+
-+extern pud_t level3_user_pgt[512];
-+extern pud_t init_level4_user_pgt[];
-+
-+extern void xen_init_pt(void);
-+
-+#define virt_to_ptep(__va)						\
-+({									\
-+	pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));		\
-+	pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va));	\
-+	pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va));	\
-+	pte_offset_kernel(__pmd, (unsigned long)(__va));		\
-+})
-+
-+#define arbitrary_virt_to_machine(__va)					\
-+({									\
-+	maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
-+	m | ((unsigned long)(__va) & (PAGE_SIZE-1));			\
-+})
-+#endif
-+
-+extern pud_t level3_kernel_pgt[512];
-+extern pud_t level3_physmem_pgt[512];
-+extern pud_t level3_ident_pgt[512];
-+extern pmd_t level2_kernel_pgt[512];
-+extern pgd_t init_level4_pgt[];
-+extern unsigned long __supported_pte_mask;
-+
-+#define swapper_pg_dir init_level4_pgt
-+
-+extern int nonx_setup(char *str);
-+extern void paging_init(void);
-+extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
-+
-+extern unsigned long pgkern_mask;
-+
-+/*
-+ * ZERO_PAGE is a global shared page that is always zero: used
-+ * for zero-mapped memory areas etc..
-+ */
-+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-+
-+/*
-+ * PGDIR_SHIFT determines what a top-level page table entry can map
-+ */
-+#define PGDIR_SHIFT	39
-+#define PTRS_PER_PGD	512
-+
-+/*
-+ * 3rd level page
-+ */
-+#define PUD_SHIFT	30
-+#define PTRS_PER_PUD	512
-+
-+/*
-+ * PMD_SHIFT determines the size of the area a middle-level
-+ * page table can map
-+ */
-+#define PMD_SHIFT	21
-+#define PTRS_PER_PMD	512
-+
-+/*
-+ * entries per page directory level
-+ */
-+#define PTRS_PER_PTE	512
-+
-+#define pte_ERROR(e) \
-+	printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
-+#define pmd_ERROR(e) \
-+	printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
-+#define pud_ERROR(e) \
-+	printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e))
-+#define pgd_ERROR(e) \
-+	printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
-+
-+#define pgd_none(x)	(!pgd_val(x))
-+#define pud_none(x)	(!pud_val(x))
-+
-+#define set_pte_batched(pteptr, pteval) \
-+	queue_l1_entry_update(pteptr, (pteval))
-+
-+extern inline int pud_present(pud_t pud)	{ return !pud_none(pud); }
-+
-+static inline void set_pte(pte_t *dst, pte_t val)
-+{
-+	*dst = val;
-+}
-+
-+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
-+#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
-+#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
-+
-+extern inline void pud_clear (pud_t * pud)
-+{
-+	set_pud(pud, __pud(0));
-+}
-+
-+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
-+
-+extern inline void pgd_clear (pgd_t * pgd)
-+{
-+        set_pgd(pgd, __pgd(0));
-+        set_pgd(__user_pgd(pgd), __pgd(0));
-+}
-+
-+#define pud_page(pud) \
-+    ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
-+
-+/*
-+ * A note on implementation of this atomic 'get-and-clear' operation.
-+ * This is actually very simple because Xen Linux can only run on a single
-+ * processor. Therefore, we cannot race other processors setting the 'accessed'
-+ * or 'dirty' bits on a page-table entry.
-+ * Even if pages are shared between domains, that is not a problem because
-+ * each domain will have separate page tables, with their own versions of
-+ * accessed & dirty state.
-+ */
-+#define ptep_get_and_clear(mm,addr,xp)	__pte_ma(xchg(&(xp)->pte, 0))
-+
-+#if 0
-+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
-+{
-+        pte_t pte = *xp;
-+        if (pte.pte)
-+                set_pte(xp, __pte_ma(0));
-+        return pte;
-+}
-+#endif
-+
-+#define pte_same(a, b)		((a).pte == (b).pte)
-+
-+#define PMD_SIZE	(1UL << PMD_SHIFT)
-+#define PMD_MASK	(~(PMD_SIZE-1))
-+#define PUD_SIZE	(1UL << PUD_SHIFT)
-+#define PUD_MASK	(~(PUD_SIZE-1))
-+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
-+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
-+
-+#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
-+#define FIRST_USER_ADDRESS	0
-+
-+#ifndef __ASSEMBLY__
-+#define MAXMEM		 0x3fffffffffffUL
-+#define VMALLOC_START    0xffffc20000000000UL
-+#define VMALLOC_END      0xffffe1ffffffffffUL
-+#define MODULES_VADDR    0xffffffff88000000UL
-+#define MODULES_END      0xfffffffffff00000UL
-+#define MODULES_LEN   (MODULES_END - MODULES_VADDR)
-+
-+#define _PAGE_BIT_PRESENT	0
-+#define _PAGE_BIT_RW		1
-+#define _PAGE_BIT_USER		2
-+#define _PAGE_BIT_PWT		3
-+#define _PAGE_BIT_PCD		4
-+#define _PAGE_BIT_ACCESSED	5
-+#define _PAGE_BIT_DIRTY		6
-+#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
-+#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
-+#define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
-+
-+#define _PAGE_PRESENT	0x001
-+#define _PAGE_RW	0x002
-+#define _PAGE_USER	0x004
-+#define _PAGE_PWT	0x008
-+#define _PAGE_PCD	0x010
-+#define _PAGE_ACCESSED	0x020
-+#define _PAGE_DIRTY	0x040
-+#define _PAGE_PSE	0x080	/* 2MB page */
-+#define _PAGE_FILE	0x040	/* set:pagecache, unset:swap */
-+#define _PAGE_GLOBAL	0x100	/* Global TLB entry */
-+
-+#define _PAGE_PROTNONE	0x080	/* If not present */
-+#define _PAGE_NX        (1UL<<_PAGE_BIT_NX)
-+
-+#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-+#define _KERNPG_TABLE	_PAGE_TABLE
-+
-+#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-+
-+#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-+#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_COPY PAGE_COPY_NOEXEC
-+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+#define __PAGE_KERNEL \
-+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
-+#define __PAGE_KERNEL_EXEC \
-+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_USER )
-+#define __PAGE_KERNEL_NOCACHE \
-+	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
-+#define __PAGE_KERNEL_RO \
-+	(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_USER )
-+#define __PAGE_KERNEL_VSYSCALL \
-+	(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_USER )
-+#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
-+	(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD | _PAGE_USER )
-+#define __PAGE_KERNEL_LARGE \
-+	(__PAGE_KERNEL | _PAGE_PSE | _PAGE_USER )
-+
-+
-+/*
-+ * We don't support GLOBAL page in xenolinux64
-+ */
-+#define MAKE_GLOBAL(x) __pgprot((x))
-+
-+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
-+#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
-+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
-+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
-+#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
-+#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
-+#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
-+#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
-+
-+/*         xwr */
-+#define __P000	PAGE_NONE
-+#define __P001	PAGE_READONLY
-+#define __P010	PAGE_COPY
-+#define __P011	PAGE_COPY
-+#define __P100	PAGE_READONLY_EXEC
-+#define __P101	PAGE_READONLY_EXEC
-+#define __P110	PAGE_COPY_EXEC
-+#define __P111	PAGE_COPY_EXEC
-+
-+#define __S000	PAGE_NONE
-+#define __S001	PAGE_READONLY
-+#define __S010	PAGE_SHARED
-+#define __S011	PAGE_SHARED
-+#define __S100	PAGE_READONLY_EXEC
-+#define __S101	PAGE_READONLY_EXEC
-+#define __S110	PAGE_SHARED_EXEC
-+#define __S111	PAGE_SHARED_EXEC
-+
-+static inline unsigned long pgd_bad(pgd_t pgd)
-+{
-+       unsigned long val = pgd_val(pgd);
-+       val &= ~PTE_MASK;
-+       val &= ~(_PAGE_USER | _PAGE_DIRTY);
-+       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
-+}
-+
-+static inline unsigned long pud_bad(pud_t pud) 
-+{ 
-+       unsigned long val = pud_val(pud);
-+       val &= ~PTE_MASK; 
-+       val &= ~(_PAGE_USER | _PAGE_DIRTY); 
-+       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);      
-+} 
-+
-+#define set_pte_at(_mm,addr,ptep,pteval) do {				\
-+	if (((_mm) != current->mm && (_mm) != &init_mm) ||		\
-+	    HYPERVISOR_update_va_mapping((addr), (pteval), 0))		\
-+		set_pte((ptep), (pteval));				\
-+} while (0)
-+
-+#define pte_none(x)	(!(x).pte)
-+#define pte_present(x)	((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
-+#define pte_clear(mm,addr,xp)	do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
-+
-+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-+
-+/*
-+ * We detect special mappings in one of two ways:
-+ *  1. If the MFN is an I/O page then Xen will set the m2p entry
-+ *     to be outside our maximum possible pseudophys range.
-+ *  2. If the MFN belongs to a different domain then we will certainly
-+ *     not have MFN in our p2m table. Conversely, if the page is ours,
-+ *     then we'll have p2m(m2p(MFN))==MFN.
-+ * If we detect a special mapping then it doesn't have a 'struct page'.
-+ * We force !pfn_valid() by returning an out-of-range pointer.
-+ *
-+ * NB. These checks require that, for any MFN that is not in our reservation,
-+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
-+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
-+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
-+ * 
-+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
-+ *      use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
-+ *      require. In all the cases we care about, the FOREIGN_FRAME bit is
-+ *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
-+ */
-+#define pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
-+#define pte_pfn(_pte)							\
-+({									\
-+	unsigned long mfn = pte_mfn(_pte);                              \
-+	unsigned long pfn = mfn_to_pfn(mfn);                            \
-+	if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\
-+		pfn = max_mapnr; /* special: force !pfn_valid() */	\
-+	pfn;								\
-+})
-+
-+#define pte_page(x)	pfn_to_page(pte_pfn(x))
-+
-+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-+{
-+	pte_t pte;
-+        
-+	(pte).pte = (pfn_to_mfn(page_nr) << PAGE_SHIFT);
-+	(pte).pte |= pgprot_val(pgprot);
-+	(pte).pte &= __supported_pte_mask;
-+	return pte;
-+}
-+
-+#define pfn_pte_ma(pfn, prot)	__pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
-+/*
-+ * The following only work if pte_present() is true.
-+ * Undefined behaviour if not..
-+ */
-+#define __pte_val(x)	((x).pte)
-+
-+static inline int pte_user(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
-+extern inline int pte_read(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
-+extern inline int pte_exec(pte_t pte)		{ return __pte_val(pte) & _PAGE_USER; }
-+extern inline int pte_dirty(pte_t pte)		{ return __pte_val(pte) & _PAGE_DIRTY; }
-+extern inline int pte_young(pte_t pte)		{ return __pte_val(pte) & _PAGE_ACCESSED; }
-+extern inline int pte_write(pte_t pte)		{ return __pte_val(pte) & _PAGE_RW; }
-+static inline int pte_file(pte_t pte)		{ return __pte_val(pte) & _PAGE_FILE; }
-+
-+extern inline pte_t pte_rdprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_USER; return pte; }
-+extern inline pte_t pte_exprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_USER; return pte; }
-+extern inline pte_t pte_mkclean(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
-+extern inline pte_t pte_mkold(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-+extern inline pte_t pte_wrprotect(pte_t pte)	{ __pte_val(pte) &= ~_PAGE_RW; return pte; }
-+extern inline pte_t pte_mkread(pte_t pte)	{ __pte_val(pte) |= _PAGE_USER; return pte; }
-+extern inline pte_t pte_mkexec(pte_t pte)	{ __pte_val(pte) |= _PAGE_USER; return pte; }
-+extern inline pte_t pte_mkdirty(pte_t pte)	{ __pte_val(pte) |= _PAGE_DIRTY; return pte; }
-+extern inline pte_t pte_mkyoung(pte_t pte)	{ __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-+extern inline pte_t pte_mkwrite(pte_t pte)	{ __pte_val(pte) |= _PAGE_RW; return pte; }
-+
-+struct vm_area_struct;
-+
-+static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-+{
-+	pte_t pte = *ptep;
-+	int ret = pte_dirty(pte);
-+	if (ret)
-+		set_pte(ptep, pte_mkclean(pte));
-+	return ret;
-+}
-+
-+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-+{
-+	pte_t pte = *ptep;
-+	int ret = pte_young(pte);
-+	if (ret)
-+		set_pte(ptep, pte_mkold(pte));
-+	return ret;
-+}
-+
-+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-+{
-+	pte_t pte = *ptep;
-+	if (pte_write(pte))
-+		set_pte(ptep, pte_wrprotect(pte));
-+}
-+
-+/*
-+ * Macro to mark a page protection value as "uncacheable".
-+ */
-+#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
-+
-+#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) 
-+static inline int pmd_large(pmd_t pte) { 
-+	return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; 
-+} 	
-+
-+
-+/*
-+ * Conversion functions: convert a page and protection to a page entry,
-+ * and a page entry and page directory to the page they refer to.
-+ */
-+
-+#define page_pte(page) page_pte_prot(page, __pgprot(0))
-+
-+/*
-+ * Level 4 access.
-+ * Never use these in the common code.
-+ */
-+#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
-+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-+#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-+#define pgd_offset_k(address) (pgd_t *)(init_level4_pgt + pgd_index(address))
-+#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
-+#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
-+
-+/* PUD - Level3 access */
-+/* to find an entry in a page-table-directory. */
-+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-+#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
-+static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
-+{ 
-+	return pud + pud_index(address);
-+} 
-+
-+/* Find correct pud via the hidden fourth level page level: */
-+
-+/* This accesses the reference page table of the boot cpu. 
-+   Other CPUs get synced lazily via the page fault handler. */
-+static inline pud_t *pud_offset_k(unsigned long address)
-+{
-+	return pud_offset(pgd_offset_k(address), address);
-+}
-+
-+/* PMD  - Level 2 access */
-+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
-+#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-+
-+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-+#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
-+                                  pmd_index(address))
-+#define pmd_none(x)	(!pmd_val(x))
-+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
-+   can temporarily clear it. */
-+#define pmd_present(x)	(pmd_val(x))
-+#define pmd_clear(xp)	do { set_pmd(xp, __pmd(0)); } while (0)
-+#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
-+#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
-+#define pmd_pfn(x)  ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK)
-+
-+#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
-+#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
-+#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
-+
-+/* PTE - Level 1 access. */
-+
-+/* page, protection -> pte */
-+#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
-+#define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
-+ 
-+/* physical address -> PTE */
-+static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
-+{ 
-+	pte_t pte;
-+	(pte).pte = physpage | pgprot_val(pgprot); 
-+	return pte; 
-+}
-+ 
-+/* Change flags of a PTE */
-+extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-+{ 
-+        (pte).pte &= _PAGE_CHG_MASK;
-+	(pte).pte |= pgprot_val(newprot);
-+	(pte).pte &= __supported_pte_mask;
-+       return pte; 
-+}
-+
-+#define pte_index(address) \
-+		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-+#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
-+			pte_index(address))
-+
-+/* x86-64 always has all page tables mapped. */
-+#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
-+#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
-+#define pte_unmap(pte) /* NOP */
-+#define pte_unmap_nested(pte) /* NOP */ 
-+
-+#define update_mmu_cache(vma,address,pte) do { } while (0)
-+
-+/* We only update the dirty/accessed state if we set
-+ * the dirty bit by hand in the kernel, since the hardware
-+ * will do the accessed bit for us, and we don't want to
-+ * race with other CPU's that might be updating the dirty
-+ * bit at the same time. */
-+#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-+#if 0
-+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-+	do {								  \
-+		if (__dirty) {						  \
-+			set_pte(__ptep, __entry);			  \
-+			flush_tlb_page(__vma, __address);		  \
-+		}							  \
-+	} while (0)
-+#endif
-+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-+	do {								  \
-+		if (__dirty) {						  \
-+		        if ( likely((__vma)->vm_mm == current->mm) ) {    \
-+			    BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
-+			} else {                                          \
-+                            xen_l1_entry_update((__ptep), (__entry)); \
-+			    flush_tlb_page((__vma), (__address));         \
-+			}                                                 \
-+		}							  \
-+	} while (0)
-+
-+/* Encode and de-code a swap entry */
-+#define __swp_type(x)			(((x).val >> 1) & 0x3f)
-+#define __swp_offset(x)			((x).val >> 8)
-+#define __swp_entry(type, offset)	((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
-+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
-+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+extern int kern_addr_valid(unsigned long addr); 
-+
-+#define DOMID_LOCAL (0xFFFFU)
-+
-+int direct_remap_pfn_range(struct vm_area_struct *vma,
-+                            unsigned long address,
-+                            unsigned long mfn,
-+                            unsigned long size,
-+                            pgprot_t prot,
-+                            domid_t  domid);
-+
-+int direct_kernel_remap_pfn_range(unsigned long address, 
-+				  unsigned long mfn,
-+				  unsigned long size, 
-+				  pgprot_t prot,
-+				  domid_t  domid);
-+
-+int create_lookup_pte_addr(struct mm_struct *mm,
-+                           unsigned long address,
-+                           uint64_t *ptep);
-+
-+int touch_pte_range(struct mm_struct *mm,
-+                    unsigned long address,
-+                    unsigned long size);
-+
-+#define io_remap_page_range(vma, vaddr, paddr, size, prot)		\
-+		direct_remap_pfn_range(vma,vaddr,(paddr)>>PAGE_SHIFT,size,prot,DOMID_IO)
-+
-+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
-+		direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
-+
-+#define MK_IOSPACE_PFN(space, pfn)	(pfn)
-+#define GET_IOSPACE(pfn)		0
-+#define GET_PFN(pfn)			(pfn)
-+
-+#define HAVE_ARCH_UNMAPPED_AREA
-+
-+#define pgtable_cache_init()   do { } while (0)
-+#define check_pgt_cache()      do { } while (0)
-+
-+#define PAGE_AGP    PAGE_KERNEL_NOCACHE
-+#define HAVE_PAGE_AGP 1
-+
-+/* fs/proc/kcore.c */
-+#define	kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
-+#define	kc_offset_to_vaddr(o) \
-+   (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
-+
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-+#define __HAVE_ARCH_PTE_SAME
-+#include <asm-generic/pgtable.h>
-+
-+#endif /* _X86_64_PGTABLE_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/processor.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/processor.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/processor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/processor.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,486 @@
-+/*
-+ * include/asm-x86_64/processor.h
-+ *
-+ * Copyright (C) 1994 Linus Torvalds
-+ */
-+
-+#ifndef __ASM_X86_64_PROCESSOR_H
-+#define __ASM_X86_64_PROCESSOR_H
-+
-+#include <asm/segment.h>
-+#include <asm/page.h>
-+#include <asm/types.h>
-+#include <asm/sigcontext.h>
-+#include <asm/cpufeature.h>
-+#include <linux/config.h>
-+#include <linux/threads.h>
-+#include <asm/msr.h>
-+#include <asm/current.h>
-+#include <asm/system.h>
-+#include <asm/mmsegment.h>
-+#include <asm/percpu.h>
-+#include <linux/personality.h>
-+
-+#define TF_MASK		0x00000100
-+#define IF_MASK		0x00000200
-+#define IOPL_MASK	0x00003000
-+#define NT_MASK		0x00004000
-+#define VM_MASK		0x00020000
-+#define AC_MASK		0x00040000
-+#define VIF_MASK	0x00080000	/* virtual interrupt flag */
-+#define VIP_MASK	0x00100000	/* virtual interrupt pending */
-+#define ID_MASK		0x00200000
-+
-+#define desc_empty(desc) \
-+               (!((desc)->a + (desc)->b))
-+
-+#define desc_equal(desc1, desc2) \
-+               (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
-+
-+/*
-+ * Default implementation of macro that returns current
-+ * instruction pointer ("program counter").
-+ */
-+#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
-+
-+/*
-+ *  CPU type and hardware bug flags. Kept separately for each CPU.
-+ */
-+
-+struct cpuinfo_x86 {
-+	__u8	x86;		/* CPU family */
-+	__u8	x86_vendor;	/* CPU vendor */
-+	__u8	x86_model;
-+	__u8	x86_mask;
-+	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
-+	__u32	x86_capability[NCAPINTS];
-+	char	x86_vendor_id[16];
-+	char	x86_model_id[64];
-+	int 	x86_cache_size;  /* in KB */
-+	int	x86_clflush_size;
-+	int	x86_cache_alignment;
-+	int	x86_tlbsize;	/* number of 4K pages in DTLB/ITLB combined(in pages)*/
-+        __u8    x86_virt_bits, x86_phys_bits;
-+	__u8	x86_num_cores;
-+        __u32   x86_power; 	
-+	__u32   extended_cpuid_level;	/* Max extended CPUID function supported */
-+	unsigned long loops_per_jiffy;
-+} ____cacheline_aligned;
-+
-+#define X86_VENDOR_INTEL 0
-+#define X86_VENDOR_CYRIX 1
-+#define X86_VENDOR_AMD 2
-+#define X86_VENDOR_UMC 3
-+#define X86_VENDOR_NEXGEN 4
-+#define X86_VENDOR_CENTAUR 5
-+#define X86_VENDOR_RISE 6
-+#define X86_VENDOR_TRANSMETA 7
-+#define X86_VENDOR_NUM 8
-+#define X86_VENDOR_UNKNOWN 0xff
-+
-+#ifdef CONFIG_SMP
-+extern struct cpuinfo_x86 cpu_data[];
-+#define current_cpu_data cpu_data[smp_processor_id()]
-+#else
-+#define cpu_data (&boot_cpu_data)
-+#define current_cpu_data boot_cpu_data
-+#endif
-+
-+extern char ignore_irq13;
-+
-+extern void identify_cpu(struct cpuinfo_x86 *);
-+extern void print_cpu_info(struct cpuinfo_x86 *);
-+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
-+
-+/*
-+ * EFLAGS bits
-+ */
-+#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
-+#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
-+#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
-+#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
-+#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
-+#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
-+#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
-+#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
-+#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
-+#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
-+#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
-+#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
-+#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
-+#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
-+#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
-+#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
-+#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
-+
-+/*
-+ * Intel CPU features in CR4
-+ */
-+#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
-+#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
-+#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
-+#define X86_CR4_DE		0x0008	/* enable debugging extensions */
-+#define X86_CR4_PSE		0x0010	/* enable page size extensions */
-+#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
-+#define X86_CR4_MCE		0x0040	/* Machine check enable */
-+#define X86_CR4_PGE		0x0080	/* enable global pages */
-+#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
-+#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
-+#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
-+
-+/*
-+ * Save the cr4 feature set we're using (ie
-+ * Pentium 4MB enable and PPro Global page
-+ * enable), so that any CPU's that boot up
-+ * after us can get the correct flags.
-+ */
-+extern unsigned long mmu_cr4_features;
-+
-+static inline void set_in_cr4 (unsigned long mask)
-+{
-+	mmu_cr4_features |= mask;
-+	switch (mask) {
-+	case X86_CR4_OSFXSR:
-+	case X86_CR4_OSXMMEXCPT:
-+		break;
-+	default:
-+		do {
-+			const char *msg = "Xen unsupported cr4 update\n";
-+			(void)HYPERVISOR_console_io(
-+				CONSOLEIO_write, __builtin_strlen(msg),
-+				(char *)msg);
-+			BUG();
-+		} while (0);
-+	}
-+}
-+
-+
-+static inline void clear_in_cr4 (unsigned long mask)
-+{
-+#ifndef CONFIG_XEN
-+	mmu_cr4_features &= ~mask;
-+	__asm__("movq %%cr4,%%rax\n\t"
-+		"andq %0,%%rax\n\t"
-+		"movq %%rax,%%cr4\n"
-+		: : "irg" (~mask)
-+		:"ax");
-+#endif
-+}
-+
-+
-+#define load_cr3(pgdir) do {				\
-+	xen_pt_switch(__pa(pgdir));			\
-+	per_cpu(cur_pgd, smp_processor_id()) = pgdir;	\
-+} while (/* CONSTCOND */0)
-+
-+/*
-+ * Bus types
-+ */
-+#define MCA_bus 0
-+#define MCA_bus__is_a_macro
-+
-+
-+/*
-+ * User space process size. 47bits minus one guard page.
-+ */
-+#define TASK_SIZE	(0x800000000000UL - 4096)
-+
-+/* This decides where the kernel will search for a free chunk of vm
-+ * space during mmap's.
-+ */
-+#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
-+#define TASK_UNMAPPED_32 PAGE_ALIGN(IA32_PAGE_OFFSET/3)
-+#define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3) 
-+#define TASK_UNMAPPED_BASE	\
-+	(test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)  
-+
-+/*
-+ * Size of io_bitmap.
-+ */
-+#define IO_BITMAP_BITS  65536
-+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
-+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
-+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
-+#define INVALID_IO_BITMAP_OFFSET 0x8000
-+
-+struct i387_fxsave_struct {
-+	u16	cwd;
-+	u16	swd;
-+	u16	twd;
-+	u16	fop;
-+	u64	rip;
-+	u64	rdp; 
-+	u32	mxcsr;
-+	u32	mxcsr_mask;
-+	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
-+	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 128 bytes */
-+	u32	padding[24];
-+} __attribute__ ((aligned (16)));
-+
-+union i387_union {
-+	struct i387_fxsave_struct	fxsave;
-+};
-+
-+struct tss_struct {
-+	u32 reserved1;
-+	u64 rsp0;	
-+	u64 rsp1;
-+	u64 rsp2;
-+	u64 reserved2;
-+	u64 ist[7];
-+	u32 reserved3;
-+	u32 reserved4;
-+	u16 reserved5;
-+	u16 io_bitmap_base;
-+	/*
-+	 * The extra 1 is there because the CPU will access an
-+	 * additional byte beyond the end of the IO permission
-+	 * bitmap. The extra byte must be all 1 bits, and must
-+	 * be within the limit. Thus we have:
-+	 *
-+	 * 128 bytes, the bitmap itself, for ports 0..0x3ff
-+	 * 8 bytes, for an extra "long" of ~0UL
-+	 */
-+	unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
-+} __attribute__((packed)) ____cacheline_aligned;
-+
-+extern struct cpuinfo_x86 boot_cpu_data;
-+DECLARE_PER_CPU(struct tss_struct,init_tss);
-+DECLARE_PER_CPU(pgd_t *, cur_pgd);
-+
-+#define ARCH_MIN_TASKALIGN	16
-+
-+struct thread_struct {
-+	unsigned long	rsp0;
-+	unsigned long	rsp;
-+	unsigned long 	userrsp;	/* Copy from PDA */ 
-+	unsigned long	fs;
-+	unsigned long	gs;
-+       	unsigned int	io_pl;
-+	unsigned short	es, ds, fsindex, gsindex;	
-+/* Hardware debugging registers */
-+	unsigned long	debugreg0;  
-+	unsigned long	debugreg1;  
-+	unsigned long	debugreg2;  
-+	unsigned long	debugreg3;  
-+	unsigned long	debugreg6;  
-+	unsigned long	debugreg7;  
-+/* fault info */
-+	unsigned long	cr2, trap_no, error_code;
-+/* floating point info */
-+	union i387_union	i387  __attribute__((aligned(16)));
-+/* IO permissions. the bitmap could be moved into the GDT, that would make
-+   switch faster for a limited number of ioperm using tasks. -AK */
-+	int		ioperm;
-+	unsigned long	*io_bitmap_ptr;
-+	unsigned io_bitmap_max;
-+/* cached TLS descriptors. */
-+	u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
-+} __attribute__((aligned(16)));
-+
-+#define INIT_THREAD  {}
-+
-+#define INIT_MMAP \
-+{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-+
-+#define STACKFAULT_STACK 1
-+#define DOUBLEFAULT_STACK 2 
-+#define NMI_STACK 3 
-+#define DEBUG_STACK 4 
-+#define MCE_STACK 5
-+#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
-+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-+#define EXCEPTION_STACK_ORDER 0 
-+
-+#define start_thread(regs,new_rip,new_rsp) do { \
-+	asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0));	 \
-+	load_gs_index(0);							\
-+	(regs)->rip = (new_rip);						 \
-+	(regs)->rsp = (new_rsp);						 \
-+	write_pda(oldrsp, (new_rsp));						 \
-+	(regs)->cs = __USER_CS;							 \
-+	(regs)->ss = __USER_DS;							 \
-+	(regs)->eflags = 0x200;							 \
-+	set_fs(USER_DS);							 \
-+} while(0) 
-+
-+struct task_struct;
-+struct mm_struct;
-+
-+/* Free all resources held by a thread. */
-+extern void release_thread(struct task_struct *);
-+
-+/* Prepare to copy thread state - unlazy all lazy status */
-+extern void prepare_to_copy(struct task_struct *tsk);
-+
-+/*
-+ * create a kernel thread without removing it from tasklists
-+ */
-+extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-+
-+/*
-+ * Return saved PC of a blocked thread.
-+ * What is this good for? it will be always the scheduler or ret_from_fork.
-+ */
-+#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
-+
-+extern unsigned long get_wchan(struct task_struct *p);
-+#define KSTK_EIP(tsk) \
-+	(((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
-+#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
-+
-+
-+struct microcode_header {
-+	unsigned int hdrver;
-+	unsigned int rev;
-+	unsigned int date;
-+	unsigned int sig;
-+	unsigned int cksum;
-+	unsigned int ldrver;
-+	unsigned int pf;
-+	unsigned int datasize;
-+	unsigned int totalsize;
-+	unsigned int reserved[3];
-+};
-+
-+struct microcode {
-+	struct microcode_header hdr;
-+	unsigned int bits[0];
-+};
-+
-+typedef struct microcode microcode_t;
-+typedef struct microcode_header microcode_header_t;
-+
-+/* microcode format is extended from prescott processors */
-+struct extended_signature {
-+	unsigned int sig;
-+	unsigned int pf;
-+	unsigned int cksum;
-+};
-+
-+struct extended_sigtable {
-+	unsigned int count;
-+	unsigned int cksum;
-+	unsigned int reserved[3];
-+	struct extended_signature sigs[0];
-+};
-+
-+/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
-+#define MICROCODE_IOCFREE	_IO('6',0)
-+
-+
-+#define ASM_NOP1 K8_NOP1
-+#define ASM_NOP2 K8_NOP2
-+#define ASM_NOP3 K8_NOP3
-+#define ASM_NOP4 K8_NOP4
-+#define ASM_NOP5 K8_NOP5
-+#define ASM_NOP6 K8_NOP6
-+#define ASM_NOP7 K8_NOP7
-+#define ASM_NOP8 K8_NOP8
-+
-+/* Opteron nops */
-+#define K8_NOP1 ".byte 0x90\n"
-+#define K8_NOP2	".byte 0x66,0x90\n" 
-+#define K8_NOP3	".byte 0x66,0x66,0x90\n" 
-+#define K8_NOP4	".byte 0x66,0x66,0x66,0x90\n" 
-+#define K8_NOP5	K8_NOP3 K8_NOP2 
-+#define K8_NOP6	K8_NOP3 K8_NOP3
-+#define K8_NOP7	K8_NOP4 K8_NOP3
-+#define K8_NOP8	K8_NOP4 K8_NOP4
-+
-+#define ASM_NOP_MAX 8
-+
-+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-+extern inline void rep_nop(void)
-+{
-+	__asm__ __volatile__("rep;nop": : :"memory");
-+}
-+
-+/* Stop speculative execution */
-+extern inline void sync_core(void)
-+{ 
-+	int tmp;
-+	asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
-+} 
-+
-+#define cpu_has_fpu 1
-+
-+#define ARCH_HAS_PREFETCH
-+static inline void prefetch(void *x) 
-+{ 
-+	asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
-+} 
-+
-+#define ARCH_HAS_PREFETCHW 1
-+static inline void prefetchw(void *x) 
-+{ 
-+	alternative_input(ASM_NOP5,
-+			  "prefetchw (%1)",
-+			  X86_FEATURE_3DNOW,
-+			  "r" (x));
-+} 
-+
-+#define ARCH_HAS_SPINLOCK_PREFETCH 1
-+
-+#define spin_lock_prefetch(x)  prefetchw(x)
-+
-+#define cpu_relax()   rep_nop()
-+
-+/*
-+ *      NSC/Cyrix CPU configuration register indexes
-+ */
-+#define CX86_CCR0 0xc0
-+#define CX86_CCR1 0xc1
-+#define CX86_CCR2 0xc2
-+#define CX86_CCR3 0xc3
-+#define CX86_CCR4 0xe8
-+#define CX86_CCR5 0xe9
-+#define CX86_CCR6 0xea
-+#define CX86_CCR7 0xeb
-+#define CX86_DIR0 0xfe
-+#define CX86_DIR1 0xff
-+#define CX86_ARR_BASE 0xc4
-+#define CX86_RCR_BASE 0xdc
-+
-+/*
-+ *      NSC/Cyrix CPU indexed register access macros
-+ */
-+
-+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-+
-+#define setCx86(reg, data) do { \
-+	outb((reg), 0x22); \
-+	outb((data), 0x23); \
-+} while (0)
-+
-+static inline void __monitor(const void *eax, unsigned long ecx,
-+		unsigned long edx)
-+{
-+	/* "monitor %eax,%ecx,%edx;" */
-+	asm volatile(
-+		".byte 0x0f,0x01,0xc8;"
-+		: :"a" (eax), "c" (ecx), "d"(edx));
-+}
-+
-+static inline void __mwait(unsigned long eax, unsigned long ecx)
-+{
-+	/* "mwait %eax,%ecx;" */
-+	asm volatile(
-+		".byte 0x0f,0x01,0xc9;"
-+		: :"a" (eax), "c" (ecx));
-+}
-+
-+#define stack_current() \
-+({								\
-+	struct thread_info *ti;					\
-+	asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
-+	ti->task;					\
-+})
-+
-+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
-+
-+extern unsigned long boot_option_idle_override;
-+/* Boot loader type from the setup header */
-+extern int bootloader_type;
-+
-+#endif /* __ASM_X86_64_PROCESSOR_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/ptrace.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/ptrace.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/ptrace.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/ptrace.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,124 @@
-+#ifndef _X86_64_PTRACE_H
-+#define _X86_64_PTRACE_H
-+
-+#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS) 
-+#define R15 0
-+#define R14 8
-+#define R13 16
-+#define R12 24
-+#define RBP 32
-+#define RBX 40
-+/* arguments: interrupts/non tracing syscalls only save upto here*/
-+#define R11 48
-+#define R10 56	
-+#define R9 64
-+#define R8 72
-+#define RAX 80
-+#define RCX 88
-+#define RDX 96
-+#define RSI 104
-+#define RDI 112
-+#define ORIG_RAX 120       /* = ERROR */ 
-+/* end of arguments */ 	
-+/* cpu exception frame or undefined in case of fast syscall. */
-+#define RIP 128
-+#define CS 136
-+#define EFLAGS 144
-+#define RSP 152
-+#define SS 160
-+#define ARGOFFSET R11
-+#endif /* __ASSEMBLY__ */
-+
-+/* top of stack page */ 
-+#define FRAME_SIZE 168
-+
-+#define PTRACE_OLDSETOPTIONS         21
-+
-+#ifndef __ASSEMBLY__ 
-+
-+struct pt_regs {
-+	unsigned long r15;
-+	unsigned long r14;
-+	unsigned long r13;
-+	unsigned long r12;
-+	unsigned long rbp;
-+	unsigned long rbx;
-+/* arguments: non interrupts/non tracing syscalls only save upto here*/
-+ 	unsigned long r11;
-+	unsigned long r10;	
-+	unsigned long r9;
-+	unsigned long r8;
-+	unsigned long rax;
-+	unsigned long rcx;
-+	unsigned long rdx;
-+	unsigned long rsi;
-+	unsigned long rdi;
-+	unsigned long orig_rax;
-+/* end of arguments */ 	
-+/* cpu exception frame or undefined */
-+	unsigned long rip;
-+	unsigned long cs;
-+	unsigned long eflags; 
-+	unsigned long rsp; 
-+	unsigned long ss;
-+/* top of stack page */ 
-+};
-+
-+#endif
-+
-+/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
-+#define PTRACE_GETREGS            12
-+#define PTRACE_SETREGS            13
-+#define PTRACE_GETFPREGS          14
-+#define PTRACE_SETFPREGS          15
-+#define PTRACE_GETFPXREGS         18
-+#define PTRACE_SETFPXREGS         19
-+
-+/* only useful for access 32bit programs */
-+#define PTRACE_GET_THREAD_AREA    25
-+#define PTRACE_SET_THREAD_AREA    26
-+
-+#define PTRACE_ARCH_PRCTL	  30	/* arch_prctl for child */
-+
-+#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 
-+#define user_mode(regs) (!!((regs)->cs & 3))
-+#define instruction_pointer(regs) ((regs)->rip)
-+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
-+extern unsigned long profile_pc(struct pt_regs *regs);
-+#else
-+#define profile_pc(regs) instruction_pointer(regs)
-+#endif
-+
-+void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
-+
-+struct task_struct;
-+
-+extern unsigned long
-+convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs);
-+
-+enum {
-+        EF_CF   = 0x00000001,
-+        EF_PF   = 0x00000004,
-+        EF_AF   = 0x00000010,
-+        EF_ZF   = 0x00000040,
-+        EF_SF   = 0x00000080,
-+        EF_TF   = 0x00000100,
-+        EF_IE   = 0x00000200,
-+        EF_DF   = 0x00000400,
-+        EF_OF   = 0x00000800,
-+        EF_IOPL = 0x00003000,
-+        EF_IOPL_RING0 = 0x00000000,
-+        EF_IOPL_RING1 = 0x00001000,
-+        EF_IOPL_RING2 = 0x00002000,
-+        EF_NT   = 0x00004000,   /* nested task */
-+        EF_RF   = 0x00010000,   /* resume */
-+        EF_VM   = 0x00020000,   /* virtual mode */
-+        EF_AC   = 0x00040000,   /* alignment */
-+        EF_VIF  = 0x00080000,   /* virtual interrupt */
-+        EF_VIP  = 0x00100000,   /* virtual interrupt pending */
-+        EF_ID   = 0x00200000,   /* id */
-+};
-+
-+#endif
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/segment.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/segment.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/segment.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/segment.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,46 @@
-+#ifndef _ASM_SEGMENT_H
-+#define _ASM_SEGMENT_H
-+
-+#include <asm/cache.h>
-+
-+#define __KERNEL_CS	0x10
-+#define __KERNEL_DS	0x1b
-+
-+#define __KERNEL32_CS   0x3b
-+
-+/* 
-+ * we cannot use the same code segment descriptor for user and kernel
-+ * -- not even in the long flat mode, because of different DPL /kkeil 
-+ * The segment offset needs to contain a RPL. Grr. -AK
-+ * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) 
-+ */
-+
-+#define __USER32_CS   0x23   /* 4*8+3 */ 
-+#define __USER_DS     0x2b   /* 5*8+3 */ 
-+#define __USER_CS     0x33   /* 6*8+3 */ 
-+#define __USER32_DS	__USER_DS 
-+#define __KERNEL16_CS	(GDT_ENTRY_KERNELCS16 * 8)
-+#define __KERNEL_COMPAT32_CS   0x8
-+
-+#define GDT_ENTRY_TLS 1
-+#define GDT_ENTRY_TSS 8	/* needs two entries */
-+#define GDT_ENTRY_LDT 10 /* needs two entries */
-+#define GDT_ENTRY_TLS_MIN 12
-+#define GDT_ENTRY_TLS_MAX 14
-+#define GDT_ENTRY_KERNELCS16 15
-+
-+#define GDT_ENTRY_TLS_ENTRIES 3
-+
-+/* TLS indexes for 64bit - hardcoded in arch_prctl */
-+#define FS_TLS 0	
-+#define GS_TLS 1	
-+
-+#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
-+#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
-+
-+#define IDT_ENTRIES 256
-+#define GDT_ENTRIES 16
-+#define GDT_SIZE (GDT_ENTRIES * 8)
-+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) 
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/smp.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/smp.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/smp.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/smp.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,138 @@
-+#ifndef __ASM_SMP_H
-+#define __ASM_SMP_H
-+
-+/*
-+ * We need the APIC definitions automatically as part of 'smp.h'
-+ */
-+#ifndef __ASSEMBLY__
-+#include <linux/config.h>
-+#include <linux/threads.h>
-+#include <linux/cpumask.h>
-+#include <linux/bitops.h>
-+extern int disable_apic;
-+#endif
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+#ifndef __ASSEMBLY__
-+#include <asm/fixmap.h>
-+#include <asm/mpspec.h>
-+#ifdef CONFIG_X86_IO_APIC
-+#include <asm/io_apic.h>
-+#endif
-+#include <asm/apic.h>
-+#include <asm/thread_info.h>
-+#endif
-+#endif
-+
-+#ifdef CONFIG_SMP
-+#ifndef ASSEMBLY
-+
-+#include <asm/pda.h>
-+
-+struct pt_regs;
-+
-+extern cpumask_t cpu_present_mask;
-+extern cpumask_t cpu_possible_map;
-+extern cpumask_t cpu_online_map;
-+
-+/*
-+ * Private routines/data
-+ */
-+ 
-+extern void smp_alloc_memory(void);
-+extern volatile unsigned long smp_invalidate_needed;
-+extern int pic_mode;
-+extern int smp_num_siblings;
-+extern void smp_flush_tlb(void);
-+extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
-+extern void smp_send_reschedule(int cpu);
-+extern void smp_invalidate_rcv(void);		/* Process an NMI */
-+extern void zap_low_mappings(void);
-+void smp_stop_cpu(void);
-+extern cpumask_t cpu_sibling_map[NR_CPUS];
-+extern cpumask_t cpu_core_map[NR_CPUS];
-+extern int phys_proc_id[NR_CPUS];
-+extern int cpu_core_id[NR_CPUS];
-+
-+#define SMP_TRAMPOLINE_BASE 0x6000
-+
-+/*
-+ * On x86 all CPUs are mapped 1:1 to the APIC space.
-+ * This simplifies scheduling and IPI sending and
-+ * compresses data structures.
-+ */
-+
-+static inline int num_booting_cpus(void)
-+{
-+	return cpus_weight(cpu_possible_map);
-+}
-+
-+#define __smp_processor_id() read_pda(cpunumber)
-+
-+#ifdef CONFIG_X86_LOCAL_APIC
-+extern __inline int hard_smp_processor_id(void)
-+{
-+	/* we don't want to mark this access volatile - bad code generation */
-+	return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
-+}
-+#endif
-+
-+extern int safe_smp_processor_id(void);
-+
-+#endif /* !ASSEMBLY */
-+
-+#define NO_PROC_ID		0xFF		/* No processor magic marker */
-+
-+#endif
-+
-+#ifndef ASSEMBLY
-+/*
-+ * Some lowlevel functions might want to know about
-+ * the real APIC ID <-> CPU # mapping.
-+ */
-+extern u8 x86_cpu_to_apicid[NR_CPUS];	/* physical ID */
-+extern u8 x86_cpu_to_log_apicid[NR_CPUS];
-+extern u8 bios_cpu_apicid[];
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
-+{
-+	return cpus_addr(cpumask)[0];
-+}
-+
-+static inline int cpu_present_to_apicid(int mps_cpu)
-+{
-+	if (mps_cpu < NR_CPUS)
-+		return (int)bios_cpu_apicid[mps_cpu];
-+	else
-+		return BAD_APICID;
-+}
-+#endif
-+
-+#endif /* !ASSEMBLY */
-+
-+#ifndef CONFIG_SMP
-+#define stack_smp_processor_id() 0
-+#define safe_smp_processor_id() 0
-+#define cpu_logical_map(x) (x)
-+#else
-+#include <asm/thread_info.h>
-+#define stack_smp_processor_id() \
-+({ 								\
-+	struct thread_info *ti;					\
-+	__asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));	\
-+	ti->cpu;						\
-+})
-+#endif
-+
-+#ifndef __ASSEMBLY__
-+#ifdef CONFIG_X86_LOCAL_APIC
-+static __inline int logical_smp_processor_id(void)
-+{
-+	/* we don't want to mark this access volatile - bad code generation */
-+	return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
-+}
-+#endif
-+#endif
-+
-+#endif
-+
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/synch_bitops.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/synch_bitops.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/synch_bitops.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/synch_bitops.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,2 @@
-+
-+#include <asm-i386/synch_bitops.h>
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/system.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/system.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/system.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/system.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,414 @@
-+#ifndef __ASM_SYSTEM_H
-+#define __ASM_SYSTEM_H
-+
-+#include <linux/config.h>
-+#include <linux/kernel.h>
-+#include <asm/segment.h>
-+#include <asm/synch_bitops.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/xen-public/arch-x86_64.h>
-+
-+#ifdef __KERNEL__
-+
-+#ifdef CONFIG_SMP
-+#define LOCK_PREFIX "lock ; "
-+#else
-+#define LOCK_PREFIX ""
-+#endif
-+
-+#define __STR(x) #x
-+#define STR(x) __STR(x)
-+
-+#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
-+#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
-+
-+/* frame pointer must be last for get_wchan */
-+#define SAVE_CONTEXT    "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t" 
-+
-+#define __EXTRA_CLOBBER  \
-+	,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
-+
-+#define switch_to(prev,next,last) \
-+	asm volatile(SAVE_CONTEXT						    \
-+		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
-+		     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
-+		     "call __switch_to\n\t"					  \
-+		     ".globl thread_return\n"					\
-+		     "thread_return:\n\t"					    \
-+		     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
-+		     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
-+		     LOCK "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"		  \
-+		     "movq %%rax,%%rdi\n\t" 					  \
-+		     "jc   ret_from_fork\n\t"					  \
-+		     RESTORE_CONTEXT						    \
-+		     : "=a" (last)					  	  \
-+		     : [next] "S" (next), [prev] "D" (prev),			  \
-+		       [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
-+		       [ti_flags] "i" (offsetof(struct thread_info, flags)),\
-+		       [tif_fork] "i" (TIF_FORK),			  \
-+		       [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
-+		       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
-+		     : "memory", "cc" __EXTRA_CLOBBER)
-+    
-+
-+extern void load_gs_index(unsigned);
-+
-+/*
-+ * Load a segment. Fall back on loading the zero
-+ * segment if something goes wrong..
-+ */
-+#define loadsegment(seg,value)	\
-+	asm volatile("\n"			\
-+		"1:\t"				\
-+		"movl %k0,%%" #seg "\n"		\
-+		"2:\n"				\
-+		".section .fixup,\"ax\"\n"	\
-+		"3:\t"				\
-+		"movl %1,%%" #seg "\n\t" 	\
-+		"jmp 2b\n"			\
-+		".previous\n"			\
-+		".section __ex_table,\"a\"\n\t"	\
-+		".align 8\n\t"			\
-+		".quad 1b,3b\n"			\
-+		".previous"			\
-+		: :"r" (value), "r" (0))
-+
-+#define set_debug(value,register) \
-+                __asm__("movq %0,%%db" #register  \
-+		: /* no output */ \
-+		:"r" ((unsigned long) value))
-+
-+
-+#ifdef __KERNEL__
-+struct alt_instr { 
-+	__u8 *instr; 		/* original instruction */
-+	__u8 *replacement;
-+	__u8  cpuid;		/* cpuid bit set for replacement */
-+	__u8  instrlen;		/* length of original instruction */
-+	__u8  replacementlen; 	/* length of new instruction, <= instrlen */ 
-+	__u8  pad[5];
-+}; 
-+#endif
-+
-+/*
-+ * Alternative instructions for different CPU types or capabilities.
-+ * 
-+ * This allows to use optimized instructions even on generic binary
-+ * kernels.
-+ * 
-+ * length of oldinstr must be longer or equal the length of newinstr
-+ * It can be padded with nops as needed.
-+ * 
-+ * For non barrier like inlines please define new variants
-+ * without volatile and memory clobber.
-+ */
-+#define alternative(oldinstr, newinstr, feature) 	\
-+	asm volatile ("661:\n\t" oldinstr "\n662:\n" 		     \
-+		      ".section .altinstructions,\"a\"\n"     	     \
-+		      "  .align 8\n"				       \
-+		      "  .quad 661b\n"            /* label */          \
-+		      "  .quad 663f\n"		  /* new instruction */ \
-+		      "  .byte %c0\n"             /* feature bit */    \
-+		      "  .byte 662b-661b\n"       /* sourcelen */      \
-+		      "  .byte 664f-663f\n"       /* replacementlen */ \
-+		      ".previous\n"					\
-+		      ".section .altinstr_replacement,\"ax\"\n"		\
-+		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ \
-+		      ".previous" :: "i" (feature) : "memory")  
-+
-+/*
-+ * Alternative inline assembly with input.
-+ * 
-+ * Pecularities:
-+ * No memory clobber here. 
-+ * Argument numbers start with 1.
-+ * Best is to use constraints that are fixed size (like (%1) ... "r")
-+ * If you use variable sized constraints like "m" or "g" in the 
-+ * replacement maake sure to pad to the worst case length.
-+ */
-+#define alternative_input(oldinstr, newinstr, feature, input...)	\
-+	asm volatile ("661:\n\t" oldinstr "\n662:\n"			\
-+		      ".section .altinstructions,\"a\"\n"		\
-+		      "  .align 8\n"					\
-+		      "  .quad 661b\n"            /* label */		\
-+		      "  .quad 663f\n"		  /* new instruction */	\
-+		      "  .byte %c0\n"             /* feature bit */	\
-+		      "  .byte 662b-661b\n"       /* sourcelen */	\
-+		      "  .byte 664f-663f\n"       /* replacementlen */	\
-+		      ".previous\n"					\
-+		      ".section .altinstr_replacement,\"ax\"\n"		\
-+		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ \
-+		      ".previous" :: "i" (feature), ##input)
-+
-+/*
-+ * Clear and set 'TS' bit respectively
-+ */
-+#define clts() (HYPERVISOR_fpu_taskswitch(0))
-+
-+static inline unsigned long read_cr0(void)
-+{ 
-+	unsigned long cr0;
-+	asm volatile("movq %%cr0,%0" : "=r" (cr0));
-+	return cr0;
-+} 
-+
-+static inline void write_cr0(unsigned long val) 
-+{ 
-+	asm volatile("movq %0,%%cr0" :: "r" (val));
-+} 
-+
-+static inline unsigned long read_cr3(void)
-+{ 
-+	unsigned long cr3;
-+	asm("movq %%cr3,%0" : "=r" (cr3));
-+	return cr3;
-+} 
-+
-+static inline unsigned long read_cr4(void)
-+{ 
-+	unsigned long cr4;
-+	asm("movq %%cr4,%0" : "=r" (cr4));
-+	return cr4;
-+} 
-+
-+static inline void write_cr4(unsigned long val)
-+{ 
-+	asm volatile("movq %0,%%cr4" :: "r" (val));
-+} 
-+
-+#define stts() (HYPERVISOR_fpu_taskswitch(1))
-+
-+#define wbinvd() \
-+	__asm__ __volatile__ ("wbinvd": : :"memory");
-+
-+#endif	/* __KERNEL__ */
-+
-+#define nop() __asm__ __volatile__ ("nop")
-+
-+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-+
-+#define tas(ptr) (xchg((ptr),1))
-+
-+#define __xg(x) ((volatile long *)(x))
-+
-+extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
-+{
-+	*ptr = val;
-+}
-+
-+#define _set_64bit set_64bit
-+
-+/*
-+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
-+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
-+ *	  but generally the primitive is invalid, *ptr is output argument. --ANK
-+ */
-+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-+{
-+	switch (size) {
-+		case 1:
-+			__asm__ __volatile__("xchgb %b0,%1"
-+				:"=q" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+		case 2:
-+			__asm__ __volatile__("xchgw %w0,%1"
-+				:"=r" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+		case 4:
-+			__asm__ __volatile__("xchgl %k0,%1"
-+				:"=r" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+		case 8:
-+			__asm__ __volatile__("xchgq %0,%1"
-+				:"=r" (x)
-+				:"m" (*__xg(ptr)), "0" (x)
-+				:"memory");
-+			break;
-+	}
-+	return x;
-+}
-+
-+/*
-+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
-+ * store NEW in MEM.  Return the initial value in MEM.  Success is
-+ * indicated by comparing RETURN with OLD.
-+ */
-+
-+#define __HAVE_ARCH_CMPXCHG 1
-+
-+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-+				      unsigned long new, int size)
-+{
-+	unsigned long prev;
-+	switch (size) {
-+	case 1:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 2:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 4:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	case 8:
-+		__asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
-+				     : "=a"(prev)
-+				     : "q"(new), "m"(*__xg(ptr)), "0"(old)
-+				     : "memory");
-+		return prev;
-+	}
-+	return old;
-+}
-+
-+#define cmpxchg(ptr,o,n)\
-+	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
-+					(unsigned long)(n),sizeof(*(ptr))))
-+
-+#ifdef CONFIG_SMP
-+#define smp_mb()	mb()
-+#define smp_rmb()	rmb()
-+#define smp_wmb()	wmb()
-+#define smp_read_barrier_depends()	do {} while(0)
-+#else
-+#define smp_mb()	barrier()
-+#define smp_rmb()	barrier()
-+#define smp_wmb()	barrier()
-+#define smp_read_barrier_depends()	do {} while(0)
-+#endif
-+
-+    
-+/*
-+ * Force strict CPU ordering.
-+ * And yes, this is required on UP too when we're talking
-+ * to devices.
-+ */
-+#define mb() 	asm volatile("mfence":::"memory")
-+#define rmb()	asm volatile("lfence":::"memory")
-+
-+#ifdef CONFIG_UNORDERED_IO
-+#define wmb()	asm volatile("sfence" ::: "memory")
-+#else
-+#define wmb()	asm volatile("" ::: "memory")
-+#endif
-+#define read_barrier_depends()	do {} while(0)
-+#define set_mb(var, value) do { xchg(&var, value); } while (0)
-+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-+
-+#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
-+
-+
-+/* 
-+ * The use of 'barrier' in the following reflects their use as local-lock
-+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
-+ * critical operations are executed. All critical operations must complete
-+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
-+ * includes these barriers, for example.
-+ */
-+
-+#define __cli()								\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	_vcpu->evtchn_upcall_mask = 1;					\
-+	preempt_enable_no_resched();					\
-+	barrier();							\
-+} while (0)
-+
-+#define __sti()								\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	barrier();							\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	_vcpu->evtchn_upcall_mask = 0;					\
-+	barrier(); /* unmask then check (avoid races) */		\
-+	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
-+		force_evtchn_callback();				\
-+	preempt_enable();						\
-+} while (0)
-+
-+#define __save_flags(x)							\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	(x) = _vcpu->evtchn_upcall_mask;				\
-+	preempt_enable();						\
-+} while (0)
-+
-+#define __restore_flags(x)						\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	barrier();							\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
-+		barrier(); /* unmask then check (avoid races) */	\
-+		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
-+			force_evtchn_callback();			\
-+		preempt_enable();					\
-+	} else								\
-+		preempt_enable_no_resched();				\
-+} while (0)
-+
-+#define safe_halt()		((void)0)
-+
-+#define __save_and_cli(x)						\
-+do {									\
-+	vcpu_info_t *_vcpu;						\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	(x) = _vcpu->evtchn_upcall_mask;				\
-+	_vcpu->evtchn_upcall_mask = 1;					\
-+	preempt_enable_no_resched();					\
-+	barrier();							\
-+} while (0)
-+
-+void cpu_idle_wait(void);
-+
-+#define local_irq_save(x)	__save_and_cli(x)
-+#define local_irq_restore(x)	__restore_flags(x)
-+#define local_save_flags(x)	__save_flags(x)
-+#define local_irq_disable()	__cli()
-+#define local_irq_enable()	__sti()
-+
-+/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
-+#define irqs_disabled()							\
-+({	int ___x;							\
-+	vcpu_info_t *_vcpu;						\
-+	preempt_disable();						\
-+	_vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];	\
-+	___x = (_vcpu->evtchn_upcall_mask != 0);			\
-+	preempt_enable_no_resched();					\
-+	___x; })
-+
-+/*
-+ * disable hlt during certain critical i/o operations
-+ */
-+#define HAVE_DISABLE_HLT
-+void disable_hlt(void);
-+void enable_hlt(void);
-+
-+#define HAVE_EAT_KEY
-+void eat_key(void);
-+
-+extern unsigned long arch_align_stack(unsigned long sp);
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/timer.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/timer.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/timer.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/timer.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,64 @@
-+#ifndef _ASMi386_TIMER_H
-+#define _ASMi386_TIMER_H
-+#include <linux/init.h>
-+
-+/**
-+ * struct timer_ops - used to define a timer source
-+ *
-+ * @name: name of the timer.
-+ * @init: Probes and initializes the timer. Takes clock= override 
-+ *        string as an argument. Returns 0 on success, anything else
-+ *        on failure.
-+ * @mark_offset: called by the timer interrupt.
-+ * @get_offset:  called by gettimeofday(). Returns the number of microseconds
-+ *               since the last timer interupt.
-+ * @monotonic_clock: returns the number of nanoseconds since the init of the
-+ *                   timer.
-+ * @delay: delays this many clock cycles.
-+ */
-+struct timer_opts {
-+	char* name;
-+	void (*mark_offset)(void);
-+	unsigned long (*get_offset)(void);
-+	unsigned long long (*monotonic_clock)(void);
-+	void (*delay)(unsigned long);
-+};
-+
-+struct init_timer_opts {
-+	int (*init)(char *override);
-+	struct timer_opts *opts;
-+};
-+
-+#define TICK_SIZE (tick_nsec / 1000)
-+
-+extern struct timer_opts* __init select_timer(void);
-+extern void clock_fallback(void);
-+void setup_pit_timer(void);
-+
-+/* Modifiers for buggy PIT handling */
-+
-+extern int pit_latch_buggy;
-+
-+extern struct timer_opts *cur_timer;
-+extern int timer_ack;
-+
-+/* list of externed timers */
-+extern struct timer_opts timer_none;
-+extern struct timer_opts timer_pit;
-+extern struct init_timer_opts timer_pit_init;
-+extern struct init_timer_opts timer_tsc_init;
-+#ifdef CONFIG_X86_CYCLONE_TIMER
-+extern struct init_timer_opts timer_cyclone_init;
-+#endif
-+
-+extern unsigned long calibrate_tsc(void);
-+extern void init_cpu_khz(void);
-+#ifdef CONFIG_HPET_TIMER
-+extern struct init_timer_opts timer_hpet_init;
-+extern unsigned long calibrate_tsc_hpet(unsigned long *tsc_hpet_quotient_ptr);
-+#endif
-+
-+#ifdef CONFIG_X86_PM_TIMER
-+extern struct init_timer_opts timer_pmtmr_init;
-+#endif
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/tlbflush.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/tlbflush.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/tlbflush.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/tlbflush.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,97 @@
-+#ifndef _X8664_TLBFLUSH_H
-+#define _X8664_TLBFLUSH_H
-+
-+#include <linux/config.h>
-+#include <linux/mm.h>
-+#include <asm/processor.h>
-+
-+#define __flush_tlb()	xen_tlb_flush()
-+
-+/*
-+ * Global pages have to be flushed a bit differently. Not a real
-+ * performance problem because this does not happen often.
-+ */
-+#define __flush_tlb_global()	xen_tlb_flush()
-+
-+
-+extern unsigned long pgkern_mask;
-+
-+#define __flush_tlb_all() __flush_tlb_global()
-+
-+#define __flush_tlb_one(addr)	xen_invlpg((unsigned long)addr)
-+
-+
-+/*
-+ * TLB flushing:
-+ *
-+ *  - flush_tlb() flushes the current mm struct TLBs
-+ *  - flush_tlb_all() flushes all processes TLBs
-+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
-+ *  - flush_tlb_page(vma, vmaddr) flushes one page
-+ *  - flush_tlb_range(vma, start, end) flushes a range of pages
-+ *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
-+ *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
-+ *
-+ * ..but the x86_64 has somewhat limited tlb flushing capabilities,
-+ * and page-granular flushes are available only on i486 and up.
-+ */
-+
-+#ifndef CONFIG_SMP
-+
-+#define flush_tlb() __flush_tlb()
-+#define flush_tlb_all() __flush_tlb_all()
-+#define local_flush_tlb() __flush_tlb()
-+
-+static inline void flush_tlb_mm(struct mm_struct *mm)
-+{
-+	if (mm == current->active_mm)
-+		__flush_tlb();
-+}
-+
-+static inline void flush_tlb_page(struct vm_area_struct *vma,
-+	unsigned long addr)
-+{
-+	if (vma->vm_mm == current->active_mm)
-+		__flush_tlb_one(addr);
-+}
-+
-+static inline void flush_tlb_range(struct vm_area_struct *vma,
-+	unsigned long start, unsigned long end)
-+{
-+	if (vma->vm_mm == current->active_mm)
-+		__flush_tlb();
-+}
-+
-+#else
-+
-+#include <asm/smp.h>
-+
-+#define local_flush_tlb() \
-+	__flush_tlb()
-+
-+extern void flush_tlb_all(void);
-+extern void flush_tlb_current_task(void);
-+extern void flush_tlb_mm(struct mm_struct *);
-+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-+
-+#define flush_tlb()	flush_tlb_current_task()
-+
-+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
-+{
-+	flush_tlb_mm(vma->vm_mm);
-+}
-+
-+#define TLBSTATE_OK	1
-+#define TLBSTATE_LAZY	2
-+
-+#endif
-+
-+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
-+
-+static inline void flush_tlb_pgtables(struct mm_struct *mm,
-+				      unsigned long start, unsigned long end)
-+{
-+	/* x86_64 does not keep any page table caches in TLB */
-+}
-+
-+#endif /* _X8664_TLBFLUSH_H */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/vga.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/vga.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/vga.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/vga.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,20 @@
-+/*
-+ *	Access to VGA videoram
-+ *
-+ *	(c) 1998 Martin Mares <mj at ucw.cz>
-+ */
-+
-+#ifndef _LINUX_ASM_VGA_H_
-+#define _LINUX_ASM_VGA_H_
-+
-+/*
-+ *	On the PC, we can just recalculate addresses and then
-+ *	access the videoram directly without any black magic.
-+ */
-+
-+#define VGA_MAP_MEM(x) (unsigned long)isa_bus_to_virt(x)
-+
-+#define vga_readb(x) (*(x))
-+#define vga_writeb(x,y) (*(y) = (x))
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/asm-x86_64/xor.h linux-2.6.12-xen/include/asm-xen/asm-x86_64/xor.h
---- pristine-linux-2.6.12/include/asm-xen/asm-x86_64/xor.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/asm-x86_64/xor.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,328 @@
-+/*
-+ * x86-64 changes / gcc fixes from Andi Kleen. 
-+ * Copyright 2002 Andi Kleen, SuSE Labs.
-+ *
-+ * This hasn't been optimized for the hammer yet, but there are likely
-+ * no advantages to be gotten from x86-64 here anyways.
-+ */
-+
-+typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
-+
-+/* Doesn't use gcc to save the XMM registers, because there is no easy way to 
-+   tell it to do a clts before the register saving. */
-+#define XMMS_SAVE do {				\
-+	preempt_disable();			\
-+	if (!(current_thread_info()->status & TS_USEDFPU))	\
-+		clts();				\
-+	__asm__ __volatile__ ( 			\
-+		"movups %%xmm0,(%1)	;\n\t"	\
-+		"movups %%xmm1,0x10(%1)	;\n\t"	\
-+		"movups %%xmm2,0x20(%1)	;\n\t"	\
-+		"movups %%xmm3,0x30(%1)	;\n\t"	\
-+		: "=&r" (cr0)			\
-+		: "r" (xmm_save) 		\
-+		: "memory");			\
-+} while(0)
-+
-+#define XMMS_RESTORE do {			\
-+	asm volatile (				\
-+		"sfence			;\n\t"	\
-+		"movups (%1),%%xmm0	;\n\t"	\
-+		"movups 0x10(%1),%%xmm1	;\n\t"	\
-+		"movups 0x20(%1),%%xmm2	;\n\t"	\
-+		"movups 0x30(%1),%%xmm3	;\n\t"	\
-+		:				\
-+		: "r" (cr0), "r" (xmm_save)	\
-+		: "memory");			\
-+	if (!(current_thread_info()->status & TS_USEDFPU))	\
-+		stts();				\
-+	preempt_enable();			\
-+} while(0)
-+
-+#define OFFS(x)		"16*("#x")"
-+#define PF_OFFS(x)	"256+16*("#x")"
-+#define	PF0(x)		"	prefetchnta "PF_OFFS(x)"(%[p1])		;\n"
-+#define LD(x,y)		"       movaps   "OFFS(x)"(%[p1]), %%xmm"#y"	;\n"
-+#define ST(x,y)		"       movaps %%xmm"#y",   "OFFS(x)"(%[p1])	;\n"
-+#define PF1(x)		"	prefetchnta "PF_OFFS(x)"(%[p2])		;\n"
-+#define PF2(x)		"	prefetchnta "PF_OFFS(x)"(%[p3])		;\n"
-+#define PF3(x)		"	prefetchnta "PF_OFFS(x)"(%[p4])		;\n"
-+#define PF4(x)		"	prefetchnta "PF_OFFS(x)"(%[p5])		;\n"
-+#define PF5(x)		"	prefetchnta "PF_OFFS(x)"(%[p6])		;\n"
-+#define XO1(x,y)	"       xorps   "OFFS(x)"(%[p2]), %%xmm"#y"	;\n"
-+#define XO2(x,y)	"       xorps   "OFFS(x)"(%[p3]), %%xmm"#y"	;\n"
-+#define XO3(x,y)	"       xorps   "OFFS(x)"(%[p4]), %%xmm"#y"	;\n"
-+#define XO4(x,y)	"       xorps   "OFFS(x)"(%[p5]), %%xmm"#y"	;\n"
-+#define XO5(x,y)	"       xorps   "OFFS(x)"(%[p6]), %%xmm"#y"	;\n"
-+
-+
-+static void
-+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-+{
-+        unsigned int lines = bytes >> 8;
-+	unsigned long cr0;
-+	xmm_store_t xmm_save[4];
-+
-+	XMMS_SAVE;
-+
-+        asm volatile (
-+#undef BLOCK
-+#define BLOCK(i) \
-+		LD(i,0)					\
-+			LD(i+1,1)			\
-+		PF1(i)					\
-+				PF1(i+2)		\
-+				LD(i+2,2)		\
-+					LD(i+3,3)	\
-+		PF0(i+4)				\
-+				PF0(i+6)		\
-+		XO1(i,0)				\
-+			XO1(i+1,1)			\
-+				XO1(i+2,2)		\
-+					XO1(i+3,3)	\
-+		ST(i,0)					\
-+			ST(i+1,1)			\
-+				ST(i+2,2)		\
-+					ST(i+3,3)	\
-+
-+
-+		PF0(0)
-+				PF0(2)
-+
-+	" .align 32			;\n"
-+        " 1:                            ;\n"
-+
-+		BLOCK(0)
-+		BLOCK(4)
-+		BLOCK(8)
-+		BLOCK(12)
-+
-+        "       addq %[inc], %[p1]           ;\n"
-+        "       addq %[inc], %[p2]           ;\n"
-+		"		decl %[cnt] ; jnz 1b"
-+	: [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
-+	: [inc] "r" (256UL) 
-+        : "memory");
-+
-+	XMMS_RESTORE;
-+}
-+
-+static void
-+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-+	  unsigned long *p3)
-+{
-+	unsigned int lines = bytes >> 8;
-+	xmm_store_t xmm_save[4];
-+	unsigned long cr0;
-+
-+	XMMS_SAVE;
-+
-+        __asm__ __volatile__ (
-+#undef BLOCK
-+#define BLOCK(i) \
-+		PF1(i)					\
-+				PF1(i+2)		\
-+		LD(i,0)					\
-+			LD(i+1,1)			\
-+				LD(i+2,2)		\
-+					LD(i+3,3)	\
-+		PF2(i)					\
-+				PF2(i+2)		\
-+		PF0(i+4)				\
-+				PF0(i+6)		\
-+		XO1(i,0)				\
-+			XO1(i+1,1)			\
-+				XO1(i+2,2)		\
-+					XO1(i+3,3)	\
-+		XO2(i,0)				\
-+			XO2(i+1,1)			\
-+				XO2(i+2,2)		\
-+					XO2(i+3,3)	\
-+		ST(i,0)					\
-+			ST(i+1,1)			\
-+				ST(i+2,2)		\
-+					ST(i+3,3)	\
-+
-+
-+		PF0(0)
-+				PF0(2)
-+
-+	" .align 32			;\n"
-+        " 1:                            ;\n"
-+
-+		BLOCK(0)
-+		BLOCK(4)
-+		BLOCK(8)
-+		BLOCK(12)
-+
-+        "       addq %[inc], %[p1]           ;\n"
-+        "       addq %[inc], %[p2]          ;\n"
-+        "       addq %[inc], %[p3]           ;\n"
-+		"		decl %[cnt] ; jnz 1b"
-+	: [cnt] "+r" (lines),
-+	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
-+	: [inc] "r" (256UL)
-+	: "memory"); 
-+	XMMS_RESTORE;
-+}
-+
-+static void
-+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-+	  unsigned long *p3, unsigned long *p4)
-+{
-+	unsigned int lines = bytes >> 8;
-+	xmm_store_t xmm_save[4]; 
-+	unsigned long cr0;
-+
-+	XMMS_SAVE;
-+
-+        __asm__ __volatile__ (
-+#undef BLOCK
-+#define BLOCK(i) \
-+		PF1(i)					\
-+				PF1(i+2)		\
-+		LD(i,0)					\
-+			LD(i+1,1)			\
-+				LD(i+2,2)		\
-+					LD(i+3,3)	\
-+		PF2(i)					\
-+				PF2(i+2)		\
-+		XO1(i,0)				\
-+			XO1(i+1,1)			\
-+				XO1(i+2,2)		\
-+					XO1(i+3,3)	\
-+		PF3(i)					\
-+				PF3(i+2)		\
-+		PF0(i+4)				\
-+				PF0(i+6)		\
-+		XO2(i,0)				\
-+			XO2(i+1,1)			\
-+				XO2(i+2,2)		\
-+					XO2(i+3,3)	\
-+		XO3(i,0)				\
-+			XO3(i+1,1)			\
-+				XO3(i+2,2)		\
-+					XO3(i+3,3)	\
-+		ST(i,0)					\
-+			ST(i+1,1)			\
-+				ST(i+2,2)		\
-+					ST(i+3,3)	\
-+
-+
-+		PF0(0)
-+				PF0(2)
-+
-+	" .align 32			;\n"
-+        " 1:                            ;\n"
-+
-+		BLOCK(0)
-+		BLOCK(4)
-+		BLOCK(8)
-+		BLOCK(12)
-+
-+        "       addq %[inc], %[p1]           ;\n"
-+        "       addq %[inc], %[p2]           ;\n"
-+        "       addq %[inc], %[p3]           ;\n"
-+        "       addq %[inc], %[p4]           ;\n"
-+	"	decl %[cnt] ; jnz 1b"
-+	: [cnt] "+c" (lines),
-+	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
-+	: [inc] "r" (256UL)
-+        : "memory" );
-+
-+	XMMS_RESTORE;
-+}
-+
-+static void
-+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-+	  unsigned long *p3, unsigned long *p4, unsigned long *p5)
-+{
-+        unsigned int lines = bytes >> 8;
-+	xmm_store_t xmm_save[4];
-+	unsigned long cr0;
-+
-+	XMMS_SAVE;
-+
-+        __asm__ __volatile__ (
-+#undef BLOCK
-+#define BLOCK(i) \
-+		PF1(i)					\
-+				PF1(i+2)		\
-+		LD(i,0)					\
-+			LD(i+1,1)			\
-+				LD(i+2,2)		\
-+					LD(i+3,3)	\
-+		PF2(i)					\
-+				PF2(i+2)		\
-+		XO1(i,0)				\
-+			XO1(i+1,1)			\
-+				XO1(i+2,2)		\
-+					XO1(i+3,3)	\
-+		PF3(i)					\
-+				PF3(i+2)		\
-+		XO2(i,0)				\
-+			XO2(i+1,1)			\
-+				XO2(i+2,2)		\
-+					XO2(i+3,3)	\
-+		PF4(i)					\
-+				PF4(i+2)		\
-+		PF0(i+4)				\
-+				PF0(i+6)		\
-+		XO3(i,0)				\
-+			XO3(i+1,1)			\
-+				XO3(i+2,2)		\
-+					XO3(i+3,3)	\
-+		XO4(i,0)				\
-+			XO4(i+1,1)			\
-+				XO4(i+2,2)		\
-+					XO4(i+3,3)	\
-+		ST(i,0)					\
-+			ST(i+1,1)			\
-+				ST(i+2,2)		\
-+					ST(i+3,3)	\
-+
-+
-+		PF0(0)
-+				PF0(2)
-+
-+	" .align 32			;\n"
-+        " 1:                            ;\n"
-+
-+		BLOCK(0)
-+		BLOCK(4)
-+		BLOCK(8)
-+		BLOCK(12)
-+
-+        "       addq %[inc], %[p1]           ;\n"
-+        "       addq %[inc], %[p2]           ;\n"
-+        "       addq %[inc], %[p3]           ;\n"
-+        "       addq %[inc], %[p4]           ;\n"
-+        "       addq %[inc], %[p5]           ;\n"
-+	"	decl %[cnt] ; jnz 1b"
-+	: [cnt] "+c" (lines),
-+  	  [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4), 
-+	  [p5] "+r" (p5)
-+	: [inc] "r" (256UL)
-+	: "memory");
-+
-+	XMMS_RESTORE;
-+}
-+
-+static struct xor_block_template xor_block_sse = {
-+        .name = "generic_sse",
-+        .do_2 = xor_sse_2,
-+        .do_3 = xor_sse_3,
-+        .do_4 = xor_sse_4,
-+        .do_5 = xor_sse_5,
-+};
-+
-+#undef XOR_TRY_TEMPLATES
-+#define XOR_TRY_TEMPLATES				\
-+	do {						\
-+		xor_speed(&xor_block_sse);	\
-+	} while (0)
-+
-+/* We force the use of the SSE xor block because it can write around L2.
-+   We may also be able to load into the L1 only depending on how the cpu
-+   deals with a load to a line that is being prefetched.  */
-+#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/balloon.h linux-2.6.12-xen/include/asm-xen/balloon.h
---- pristine-linux-2.6.12/include/asm-xen/balloon.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/balloon.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,70 @@
-+/******************************************************************************
-+ * balloon.h
-+ *
-+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
-+ *
-+ * Copyright (c) 2003, B Dragovic
-+ * Copyright (c) 2003-2004, M Williamson, K Fraser
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __ASM_BALLOON_H__
-+#define __ASM_BALLOON_H__
-+
-+/*
-+ * Inform the balloon driver that it should allow some slop for device-driver
-+ * memory activities.
-+ */
-+extern void
-+balloon_update_driver_allowance(
-+	long delta);
-+
-+/* Allocate an empty low-memory page range. */
-+extern struct page *
-+balloon_alloc_empty_page_range(
-+	unsigned long nr_pages);
-+
-+/* Deallocate an empty page range, adding to the balloon. */
-+extern void
-+balloon_dealloc_empty_page_range(
-+	struct page *page, unsigned long nr_pages);
-+
-+/*
-+ * Prevent the balloon driver from changing the memory reservation during
-+ * a driver critical region.
-+ */
-+extern spinlock_t balloon_lock;
-+#define balloon_lock(__flags)   spin_lock_irqsave(&balloon_lock, __flags)
-+#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
-+
-+#endif /* __ASM_BALLOON_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/driver_util.h linux-2.6.12-xen/include/asm-xen/driver_util.h
---- pristine-linux-2.6.12/include/asm-xen/driver_util.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/driver_util.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,26 @@
-+
-+#ifndef __ASM_XEN_DRIVER_UTIL_H__
-+#define __ASM_XEN_DRIVER_UTIL_H__
-+
-+#include <linux/config.h>
-+#include <linux/vmalloc.h>
-+
-+/* Allocate/destroy a 'vmalloc' VM area. */
-+extern struct vm_struct *alloc_vm_area(unsigned long size);
-+extern void free_vm_area(struct vm_struct *area);
-+
-+/* Lock an area so that PTEs are accessible in the current address space. */
-+extern void lock_vm_area(struct vm_struct *area);
-+extern void unlock_vm_area(struct vm_struct *area);
-+
-+#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/evtchn.h linux-2.6.12-xen/include/asm-xen/evtchn.h
---- pristine-linux-2.6.12/include/asm-xen/evtchn.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/evtchn.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,123 @@
-+/******************************************************************************
-+ * evtchn.h
-+ * 
-+ * Communication via Xen event channels.
-+ * Also definitions for the device that demuxes notifications to userspace.
-+ * 
-+ * Copyright (c) 2004-2005, K A Fraser
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __ASM_EVTCHN_H__
-+#define __ASM_EVTCHN_H__
-+
-+#include <linux/config.h>
-+#include <linux/interrupt.h>
-+#include <asm/hypervisor.h>
-+#include <asm/ptrace.h>
-+#include <asm/synch_bitops.h>
-+#include <asm-xen/xen-public/event_channel.h>
-+#include <linux/smp.h>
-+
-+/*
-+ * LOW-LEVEL DEFINITIONS
-+ */
-+
-+/*
-+ * Dynamically bind an event source to an IRQ-like callback handler.
-+ * On some platforms this may not be implemented via the Linux IRQ subsystem.
-+ * The IRQ argument passed to the callback handler is the same as returned
-+ * from the bind call. It may not correspond to a Linux IRQ number.
-+ * Returns IRQ or negative errno.
-+ * UNBIND: Takes IRQ to unbind from; automatically closes the event channel.
-+ */
-+extern int bind_evtchn_to_irqhandler(
-+	unsigned int evtchn,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id);
-+extern int bind_virq_to_irqhandler(
-+	unsigned int virq,
-+	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id);
-+extern int bind_ipi_to_irqhandler(
-+	unsigned int ipi,
-+	unsigned int cpu,
-+	irqreturn_t (*handler)(int, void *, struct pt_regs *),
-+	unsigned long irqflags,
-+	const char *devname,
-+	void *dev_id);
-+
-+/*
-+ * Common unbind function for all event sources. Takes IRQ to unbind from.
-+ * Automatically closes the underlying event channel (even for bindings
-+ * made with bind_evtchn_to_irqhandler()).
-+ */
-+extern void unbind_from_irqhandler(unsigned int irq, void *dev_id);
-+
-+extern void irq_resume(void);
-+
-+/* Entry point for notifications into Linux subsystems. */
-+asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
-+
-+/* Entry point for notifications into the userland character device. */
-+extern void evtchn_device_upcall(int port);
-+
-+extern void mask_evtchn(int port);
-+extern void unmask_evtchn(int port);
-+
-+static inline void clear_evtchn(int port)
-+{
-+	shared_info_t *s = HYPERVISOR_shared_info;
-+	synch_clear_bit(port, &s->evtchn_pending[0]);
-+}
-+
-+static inline void notify_remote_via_evtchn(int port)
-+{
-+	evtchn_op_t op;
-+	op.cmd         = EVTCHNOP_send,
-+	op.u.send.port = port;
-+	(void)HYPERVISOR_event_channel_op(&op);
-+}
-+
-+/*
-+ * Unlike notify_remote_via_evtchn(), this is safe to use across
-+ * save/restore. Notifications on a broken connection are silently dropped.
-+ */
-+extern void notify_remote_via_irq(int irq);
-+
-+#endif /* __ASM_EVTCHN_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/features.h linux-2.6.12-xen/include/asm-xen/features.h
---- pristine-linux-2.6.12/include/asm-xen/features.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/features.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,20 @@
-+/******************************************************************************
-+ * features.h
-+ *
-+ * Query the features reported by Xen.
-+ *
-+ * Copyright (c) 2006, Ian Campbell
-+ */
-+
-+#ifndef __ASM_XEN_FEATURES_H__
-+#define __ASM_XEN_FEATURES_H__
-+
-+#include <asm-xen/xen-public/version.h>
-+
-+extern void setup_xen_features(void);
-+
-+extern unsigned long xen_features[XENFEAT_NR_SUBMAPS];
-+
-+#define xen_feature(flag)	(test_bit(flag, xen_features))
-+
-+#endif /* __ASM_XEN_FEATURES_H__ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/foreign_page.h linux-2.6.12-xen/include/asm-xen/foreign_page.h
---- pristine-linux-2.6.12/include/asm-xen/foreign_page.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/foreign_page.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,40 @@
-+/******************************************************************************
-+ * foreign_page.h
-+ * 
-+ * Provide a "foreign" page type, that is owned by a foreign allocator and 
-+ * not the normal buddy allocator in page_alloc.c
-+ * 
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __ASM_XEN_FOREIGN_PAGE_H__
-+#define __ASM_XEN_FOREIGN_PAGE_H__
-+
-+#define PG_foreign		PG_arch_1
-+
-+#define PageForeign(page)	test_bit(PG_foreign, &(page)->flags)
-+
-+#define SetPageForeign(page, dtor) do {		\
-+	set_bit(PG_foreign, &(page)->flags);	\
-+	(page)->mapping = (void *)dtor;		\
-+} while (0)
-+
-+#define ClearPageForeign(page) do {		\
-+	clear_bit(PG_foreign, &(page)->flags);	\
-+	(page)->mapping = NULL;			\
-+} while (0)
-+
-+#define PageForeignDestructor(page)	\
-+	( (void (*) (struct page *)) (page)->mapping )
-+
-+#endif /* __ASM_XEN_FOREIGN_PAGE_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/gnttab.h linux-2.6.12-xen/include/asm-xen/gnttab.h
---- pristine-linux-2.6.12/include/asm-xen/gnttab.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/gnttab.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,98 @@
-+/******************************************************************************
-+ * gnttab.h
-+ * 
-+ * Two sets of functionality:
-+ * 1. Granting foreign access to our memory reservation.
-+ * 2. Accessing others' memory reservations via grant references.
-+ * (i.e., mechanisms for both sender and recipient of grant references)
-+ * 
-+ * Copyright (c) 2004-2005, K A Fraser
-+ * Copyright (c) 2005, Christopher Clark
-+ */
-+
-+#ifndef __ASM_GNTTAB_H__
-+#define __ASM_GNTTAB_H__
-+
-+#include <linux/config.h>
-+#include <asm/hypervisor.h>
-+#include <asm-xen/xen-public/grant_table.h>
-+
-+/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
-+#ifdef __ia64__
-+#define NR_GRANT_FRAMES 1
-+#else
-+#define NR_GRANT_FRAMES 4
-+#endif
-+
-+struct gnttab_free_callback {
-+	struct gnttab_free_callback *next;
-+	void (*fn)(void *);
-+	void *arg;
-+	u16 count;
-+};
-+
-+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
-+				int readonly);
-+
-+/*
-+ * End access through the given grant reference, iff the grant entry is no
-+ * longer in use.  Return 1 if the grant entry was freed, 0 if it is still in
-+ * use.
-+ */
-+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
-+
-+/*
-+ * Eventually end access through the given grant reference, and once that
-+ * access has been ended, free the given page too.  Access will be ended
-+ * immediately iff the grant entry is not in use, otherwise it will happen
-+ * some time later.  page may be 0, in which case no freeing will occur.
-+ */
-+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
-+			       unsigned long page);
-+
-+int gnttab_grant_foreign_transfer(domid_t domid);
-+
-+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
-+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
-+
-+int gnttab_query_foreign_access(grant_ref_t ref);
-+
-+/*
-+ * operations on reserved batches of grant references
-+ */
-+int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
-+
-+void gnttab_free_grant_reference(grant_ref_t ref);
-+
-+void gnttab_free_grant_references(grant_ref_t head);
-+
-+int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
-+
-+void gnttab_release_grant_reference(grant_ref_t *private_head,
-+				    grant_ref_t release);
-+
-+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
-+				  void (*fn)(void *), void *arg, u16 count);
-+
-+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
-+				     unsigned long frame, int readonly);
-+
-+void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid);
-+
-+#ifdef __ia64__
-+#define gnttab_map_vaddr(map) __va(map.dev_bus_addr)
-+#else
-+#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
-+#endif
-+
-+#endif /* __ASM_GNTTAB_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/linux-public/evtchn.h linux-2.6.12-xen/include/asm-xen/linux-public/evtchn.h
---- pristine-linux-2.6.12/include/asm-xen/linux-public/evtchn.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/linux-public/evtchn.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,98 @@
-+/******************************************************************************
-+ * evtchn.h
-+ * 
-+ * Interface to /dev/xen/evtchn.
-+ * 
-+ * Copyright (c) 2003-2005, K A Fraser
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __LINUX_PUBLIC_EVTCHN_H__
-+#define __LINUX_PUBLIC_EVTCHN_H__
-+
-+/* /dev/xen/evtchn resides at device number major=10, minor=201 */
-+#define EVTCHN_MINOR 201
-+
-+/*
-+ * Bind a fresh port to VIRQ @virq.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_VIRQ				\
-+	_IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
-+struct ioctl_evtchn_bind_virq {
-+	unsigned int virq;
-+};
-+
-+/*
-+ * Bind a fresh port to remote <@remote_domain, @remote_port>.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_INTERDOMAIN			\
-+	_IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
-+struct ioctl_evtchn_bind_interdomain {
-+	unsigned int remote_domain, remote_port;
-+};
-+
-+/*
-+ * Allocate a fresh port for binding to @remote_domain.
-+ * Return allocated port.
-+ */
-+#define IOCTL_EVTCHN_BIND_UNBOUND_PORT			\
-+	_IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
-+struct ioctl_evtchn_bind_unbound_port {
-+	unsigned int remote_domain;
-+};
-+
-+/*
-+ * Unbind previously allocated @port.
-+ */
-+#define IOCTL_EVTCHN_UNBIND				\
-+	_IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
-+struct ioctl_evtchn_unbind {
-+	unsigned int port;
-+};
-+
-+/*
-+ * Unbind previously allocated @port.
-+ */
-+#define IOCTL_EVTCHN_NOTIFY				\
-+	_IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
-+struct ioctl_evtchn_notify {
-+	unsigned int port;
-+};
-+
-+/* Clear and reinitialise the event buffer. Clear error condition. */
-+#define IOCTL_EVTCHN_RESET				\
-+	_IOC(_IOC_NONE, 'E', 5, 0)
-+
-+#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/linux-public/privcmd.h linux-2.6.12-xen/include/asm-xen/linux-public/privcmd.h
---- pristine-linux-2.6.12/include/asm-xen/linux-public/privcmd.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/linux-public/privcmd.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,91 @@
-+/******************************************************************************
-+ * privcmd.h
-+ * 
-+ * Interface to /proc/xen/privcmd.
-+ * 
-+ * Copyright (c) 2003-2005, K A Fraser
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef __LINUX_PUBLIC_PRIVCMD_H__
-+#define __LINUX_PUBLIC_PRIVCMD_H__
-+
-+#ifndef __user
-+#define __user
-+#endif
-+
-+typedef struct privcmd_hypercall
-+{
-+	unsigned long op;
-+	unsigned long arg[5];
-+} privcmd_hypercall_t;
-+
-+typedef struct privcmd_mmap_entry {
-+	unsigned long va;
-+	unsigned long mfn;
-+	unsigned long npages;
-+} privcmd_mmap_entry_t; 
-+
-+typedef struct privcmd_mmap {
-+	int num;
-+	domid_t dom; /* target domain */
-+	privcmd_mmap_entry_t __user *entry;
-+} privcmd_mmap_t; 
-+
-+typedef struct privcmd_mmapbatch {
-+	int num;     /* number of pages to populate */
-+	domid_t dom; /* target domain */
-+	unsigned long addr;  /* virtual address */
-+	unsigned long __user *arr; /* array of mfns - top nibble set on err */
-+} privcmd_mmapbatch_t; 
-+
-+typedef struct privcmd_blkmsg
-+{
-+	unsigned long op;
-+	void         *buf;
-+	int           buf_size;
-+} privcmd_blkmsg_t;
-+
-+/*
-+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
-+ * @arg: &privcmd_hypercall_t
-+ * Return: Value returned from execution of the specified hypercall.
-+ */
-+#define IOCTL_PRIVCMD_HYPERCALL					\
-+	_IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
-+#define IOCTL_PRIVCMD_MMAP					\
-+	_IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
-+#define IOCTL_PRIVCMD_MMAPBATCH					\
-+	_IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
-+
-+#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/net_driver_util.h linux-2.6.12-xen/include/asm-xen/net_driver_util.h
---- pristine-linux-2.6.12/include/asm-xen/net_driver_util.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/net_driver_util.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,56 @@
-+/*****************************************************************************
-+ *
-+ * Utility functions for Xen network devices.
-+ *
-+ * Copyright (c) 2005 XenSource Ltd.
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following
-+ * license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a
-+ * copy of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject
-+ * to the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-+ * DEALINGS IN THE SOFTWARE.
-+ */
-+
-+#ifndef _ASM_XEN_NET_DRIVER_UTIL_H
-+#define _ASM_XEN_NET_DRIVER_UTIL_H
-+
-+
-+#include <asm-xen/xenbus.h>
-+
-+
-+/**
-+ * Read the 'mac' node at the given device's node in the store, and parse that
-+ * as colon-separated octets, placing result the given mac array.  mac must be
-+ * a preallocated array of length ETH_ALEN (as declared in linux/if_ether.h).
-+ * Return 0 on success, or -errno on error.
-+ */
-+int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]);
-+
-+
-+#endif /* _ASM_XEN_NET_DRIVER_UTIL_H */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/tpmfe.h linux-2.6.12-xen/include/asm-xen/tpmfe.h
---- pristine-linux-2.6.12/include/asm-xen/tpmfe.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/tpmfe.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,33 @@
-+#ifndef TPM_FE_H
-+#define TPM_FE_H
-+
-+struct tpmfe_device {
-+	/*
-+	 * Let upper layer receive data from front-end
-+	 */
-+	int (*receive)(const u8 *buffer, size_t count, const void *ptr);
-+	/*
-+	 * Indicate the status of the front-end to the upper
-+	 * layer.
-+	 */
-+	void (*status)(unsigned int flags);
-+
-+	/*
-+	 * This field indicates the maximum size the driver can
-+	 * transfer in one chunk. It is filled out by the front-end
-+	 * driver and should be propagated to the generic tpm driver
-+	 * for allocation of buffers.
-+	 */
-+	unsigned int max_tx_size;
-+};
-+
-+enum {
-+	TPMFE_STATUS_DISCONNECTED = 0x0,
-+	TPMFE_STATUS_CONNECTED = 0x1
-+};
-+
-+int tpm_fe_send(const u8 * buf, size_t count, void *ptr);
-+int tpm_fe_register_receiver(struct tpmfe_device *);
-+void tpm_fe_unregister_receiver(void);
-+
-+#endif
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xenbus.h linux-2.6.12-xen/include/asm-xen/xenbus.h
---- pristine-linux-2.6.12/include/asm-xen/xenbus.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xenbus.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,254 @@
-+/******************************************************************************
-+ * xenbus.h
-+ *
-+ * Talks to Xen Store to figure out what devices we have.
-+ *
-+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
-+ * Copyright (C) 2005 XenSource Ltd.
-+ * 
-+ * This file may be distributed separately from the Linux kernel, or
-+ * incorporated into other software packages, subject to the following license:
-+ * 
-+ * Permission is hereby granted, free of charge, to any person obtaining a copy
-+ * of this source file (the "Software"), to deal in the Software without
-+ * restriction, including without limitation the rights to use, copy, modify,
-+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
-+ * and to permit persons to whom the Software is furnished to do so, subject to
-+ * the following conditions:
-+ * 
-+ * The above copyright notice and this permission notice shall be included in
-+ * all copies or substantial portions of the Software.
-+ * 
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+ * IN THE SOFTWARE.
-+ */
-+
-+#ifndef _ASM_XEN_XENBUS_H
-+#define _ASM_XEN_XENBUS_H
-+
-+#include <linux/device.h>
-+#include <linux/notifier.h>
-+#include <asm/semaphore.h>
-+#include <asm-xen/xen-public/io/xenbus.h>
-+#include <asm-xen/xen-public/io/xs_wire.h>
-+
-+#define XBT_NULL 0
-+
-+/* Register callback to watch this node. */
-+struct xenbus_watch
-+{
-+	struct list_head list;
-+
-+	/* Path being watched. */
-+	const char *node;
-+
-+	/* Callback (executed in a process context with no locks held). */
-+	void (*callback)(struct xenbus_watch *,
-+			 const char **vec, unsigned int len);
-+};
-+
-+
-+/* A xenbus device. */
-+struct xenbus_device {
-+	const char *devicetype;
-+	const char *nodename;
-+	const char *otherend;
-+	int otherend_id;
-+	struct xenbus_watch otherend_watch;
-+	struct device dev;
-+	int has_error;
-+	void *data;
-+};
-+
-+static inline struct xenbus_device *to_xenbus_device(struct device *dev)
-+{
-+	return container_of(dev, struct xenbus_device, dev);
-+}
-+
-+struct xenbus_device_id
-+{
-+	/* .../device/<device_type>/<identifier> */
-+	char devicetype[32]; 	/* General class of device. */
-+};
-+
-+/* A xenbus driver. */
-+struct xenbus_driver {
-+	char *name;
-+	struct module *owner;
-+	const struct xenbus_device_id *ids;
-+	int (*probe)(struct xenbus_device *dev,
-+		     const struct xenbus_device_id *id);
-+	void (*otherend_changed)(struct xenbus_device *dev,
-+				 XenbusState backend_state);
-+	int (*remove)(struct xenbus_device *dev);
-+	int (*suspend)(struct xenbus_device *dev);
-+	int (*resume)(struct xenbus_device *dev);
-+	int (*hotplug)(struct xenbus_device *, char **, int, char *, int);
-+	struct device_driver driver;
-+	int (*read_otherend_details)(struct xenbus_device *dev);
-+};
-+
-+static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
-+{
-+	return container_of(drv, struct xenbus_driver, driver);
-+}
-+
-+int xenbus_register_frontend(struct xenbus_driver *drv);
-+int xenbus_register_backend(struct xenbus_driver *drv);
-+void xenbus_unregister_driver(struct xenbus_driver *drv);
-+
-+typedef u32 xenbus_transaction_t;
-+
-+char **xenbus_directory(xenbus_transaction_t t,
-+			const char *dir, const char *node, unsigned int *num);
-+void *xenbus_read(xenbus_transaction_t t,
-+		  const char *dir, const char *node, unsigned int *len);
-+int xenbus_write(xenbus_transaction_t t,
-+		 const char *dir, const char *node, const char *string);
-+int xenbus_mkdir(xenbus_transaction_t t,
-+		 const char *dir, const char *node);
-+int xenbus_exists(xenbus_transaction_t t,
-+		  const char *dir, const char *node);
-+int xenbus_rm(xenbus_transaction_t t, const char *dir, const char *node);
-+int xenbus_transaction_start(xenbus_transaction_t *t);
-+int xenbus_transaction_end(xenbus_transaction_t t, int abort);
-+
-+/* Single read and scanf: returns -errno or num scanned if > 0. */
-+int xenbus_scanf(xenbus_transaction_t t,
-+		 const char *dir, const char *node, const char *fmt, ...)
-+	__attribute__((format(scanf, 4, 5)));
-+
-+/* Single printf and write: returns -errno or 0. */
-+int xenbus_printf(xenbus_transaction_t t,
-+		  const char *dir, const char *node, const char *fmt, ...)
-+	__attribute__((format(printf, 4, 5)));
-+
-+/* Generic read function: NULL-terminated triples of name,
-+ * sprintf-style type string, and pointer. Returns 0 or errno.*/
-+int xenbus_gather(xenbus_transaction_t t, const char *dir, ...);
-+
-+/* notifer routines for when the xenstore comes up */
-+int register_xenstore_notifier(struct notifier_block *nb);
-+void unregister_xenstore_notifier(struct notifier_block *nb);
-+
-+int register_xenbus_watch(struct xenbus_watch *watch);
-+void unregister_xenbus_watch(struct xenbus_watch *watch);
-+void xs_suspend(void);
-+void xs_resume(void);
-+
-+/* Used by xenbus_dev to borrow kernel's store connection. */
-+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
-+
-+/* Called from xen core code. */
-+void xenbus_suspend(void);
-+void xenbus_resume(void);
-+
-+#define XENBUS_IS_ERR_READ(str) ({			\
-+	if (!IS_ERR(str) && strlen(str) == 0) {		\
-+		kfree(str);				\
-+		str = ERR_PTR(-ERANGE);			\
-+	}						\
-+	IS_ERR(str);					\
-+})
-+
-+#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
-+
-+
-+/**
-+ * Register a watch on the given path, using the given xenbus_watch structure
-+ * for storage, and the given callback function as the callback.  Return 0 on
-+ * success, or -errno on error.  On success, the given path will be saved as
-+ * watch->node, and remains the caller's to free.  On error, watch->node will
-+ * be NULL, the device will switch to XenbusStateClosing, and the error will
-+ * be saved in the store.
-+ */
-+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
-+		      struct xenbus_watch *watch, 
-+		      void (*callback)(struct xenbus_watch *,
-+				       const char **, unsigned int));
-+
-+
-+/**
-+ * Register a watch on the given path/path2, using the given xenbus_watch
-+ * structure for storage, and the given callback function as the callback.
-+ * Return 0 on success, or -errno on error.  On success, the watched path
-+ * (path/path2) will be saved as watch->node, and becomes the caller's to
-+ * kfree().  On error, watch->node will be NULL, so the caller has nothing to
-+ * free, the device will switch to XenbusStateClosing, and the error will be
-+ * saved in the store.
-+ */
-+int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
-+		       const char *path2, struct xenbus_watch *watch, 
-+		       void (*callback)(struct xenbus_watch *,
-+					const char **, unsigned int));
-+
-+
-+/**
-+ * Advertise in the store a change of the given driver to the given new_state.
-+ * Perform the change inside the given transaction xbt.  xbt may be NULL, in
-+ * which case this is performed inside its own transaction.  Return 0 on
-+ * success, or -errno on error.  On error, the device will switch to
-+ * XenbusStateClosing, and the error will be saved in the store.
-+ */
-+int xenbus_switch_state(struct xenbus_device *dev,
-+			xenbus_transaction_t xbt,
-+			XenbusState new_state);
-+
-+
-+/**
-+ * Grant access to the given ring_mfn to the peer of the given device.  Return
-+ * 0 on success, or -errno on error.  On error, the device will switch to
-+ * XenbusStateClosing, and the error will be saved in the store.
-+ */
-+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
-+
-+
-+/**
-+ * Allocate an event channel for the given xenbus_device, assigning the newly
-+ * created local port to *port.  Return 0 on success, or -errno on error.  On
-+ * error, the device will switch to XenbusStateClosing, and the error will be
-+ * saved in the store.
-+ */
-+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
-+
-+
-+/**
-+ * Return the state of the driver rooted at the given store path, or
-+ * XenbusStateClosed if no state can be read.
-+ */
-+XenbusState xenbus_read_driver_state(const char *path);
-+
-+
-+/***
-+ * Report the given negative errno into the store, along with the given
-+ * formatted message.
-+ */
-+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt,
-+		      ...);
-+
-+
-+/***
-+ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
-+ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
-+ * closedown of this driver and its peer.
-+ */
-+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt,
-+		      ...);
-+
-+
-+#endif /* _ASM_XEN_XENBUS_H */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xencons.h linux-2.6.12-xen/include/asm-xen/xencons.h
---- pristine-linux-2.6.12/include/asm-xen/xencons.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xencons.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,14 @@
-+#ifndef __ASM_XENCONS_H__
-+#define __ASM_XENCONS_H__
-+
-+void xencons_force_flush(void);
-+void xencons_resume(void);
-+
-+/* Interrupt work hooks. Receive data, or kick data out. */
-+void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
-+void xencons_tx(void);
-+
-+int xencons_ring_init(void);
-+int xencons_ring_send(const char *data, unsigned len);
-+
-+#endif /* __ASM_XENCONS_H__ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen_proc.h linux-2.6.12-xen/include/asm-xen/xen_proc.h
---- pristine-linux-2.6.12/include/asm-xen/xen_proc.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen_proc.h	2006-02-16 23:44:08.000000000 +0100
-@@ -0,0 +1,23 @@
-+
-+#ifndef __ASM_XEN_PROC_H__
-+#define __ASM_XEN_PROC_H__
-+
-+#include <linux/config.h>
-+#include <linux/proc_fs.h>
-+
-+extern struct proc_dir_entry *create_xen_proc_entry(
-+	const char *name, mode_t mode);
-+extern void remove_xen_proc_entry(
-+	const char *name);
-+
-+#endif /* __ASM_XEN_PROC_H__ */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/acm.h linux-2.6.12-xen/include/asm-xen/xen-public/acm.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/acm.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/acm.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,181 @@
-+/*
-+ * acm.h: Xen access control module interface defintions
-+ *
-+ * Reiner Sailer <sailer at watson.ibm.com>
-+ * Copyright (c) 2005, International Business Machines Corporation.
-+ */
-+
-+#ifndef _XEN_PUBLIC_ACM_H
-+#define _XEN_PUBLIC_ACM_H
-+
-+#include "xen.h"
-+#include "sched_ctl.h"
-+
-+/* if ACM_DEBUG defined, all hooks should
-+ * print a short trace message (comment it out
-+ * when not in testing mode )
-+ */
-+/* #define ACM_DEBUG */
-+
-+#ifdef ACM_DEBUG
-+#  define printkd(fmt, args...) printk(fmt,## args)
-+#else
-+#  define printkd(fmt, args...)
-+#endif
-+
-+/* default ssid reference value if not supplied */
-+#define ACM_DEFAULT_SSID  0x0
-+#define ACM_DEFAULT_LOCAL_SSID  0x0
-+
-+/* Internal ACM ERROR types */
-+#define ACM_OK     0
-+#define ACM_UNDEF   -1
-+#define ACM_INIT_SSID_ERROR  -2
-+#define ACM_INIT_SOID_ERROR  -3
-+#define ACM_ERROR          -4
-+
-+/* External ACCESS DECISIONS */
-+#define ACM_ACCESS_PERMITTED        0
-+#define ACM_ACCESS_DENIED           -111
-+#define ACM_NULL_POINTER_ERROR      -200
-+
-+/* primary policy in lower 4 bits */
-+#define ACM_NULL_POLICY 0
-+#define ACM_CHINESE_WALL_POLICY 1
-+#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
-+#define ACM_POLICY_UNDEFINED 15
-+
-+/* combinations have secondary policy component in higher 4bit */
-+#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
-+    ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
-+
-+/* policy: */
-+#define ACM_POLICY_NAME(X) \
-+ ((X) == (ACM_NULL_POLICY)) ? "NULL policy" :                        \
-+    ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL policy" :        \
-+    ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT policy" : \
-+    ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT policy" : \
-+     "UNDEFINED policy"
-+
-+/* the following policy versions must be increased
-+ * whenever the interpretation of the related
-+ * policy's data structure changes
-+ */
-+#define ACM_POLICY_VERSION 1
-+#define ACM_CHWALL_VERSION 1
-+#define ACM_STE_VERSION  1
-+
-+/* defines a ssid reference used by xen */
-+typedef uint32_t ssidref_t;
-+
-+/* hooks that are known to domains */
-+enum acm_hook_type {NONE=0, SHARING};
-+
-+/* -------security policy relevant type definitions-------- */
-+
-+/* type identifier; compares to "equal" or "not equal" */
-+typedef uint16_t domaintype_t;
-+
-+/* CHINESE WALL POLICY DATA STRUCTURES
-+ *
-+ * current accumulated conflict type set:
-+ * When a domain is started and has a type that is in
-+ * a conflict set, the conflicting types are incremented in
-+ * the aggregate set. When a domain is destroyed, the 
-+ * conflicting types to its type are decremented.
-+ * If a domain has multiple types, this procedure works over
-+ * all those types.
-+ *
-+ * conflict_aggregate_set[i] holds the number of
-+ *   running domains that have a conflict with type i.
-+ *
-+ * running_types[i] holds the number of running domains
-+ *        that include type i in their ssidref-referenced type set
-+ *
-+ * conflict_sets[i][j] is "0" if type j has no conflict
-+ *    with type i and is "1" otherwise.
-+ */
-+/* high-16 = version, low-16 = check magic */
-+#define ACM_MAGIC  0x0001debc
-+
-+/* each offset in bytes from start of the struct they
-+ * are part of */
-+
-+/* each buffer consists of all policy information for
-+ * the respective policy given in the policy code
-+ *
-+ * acm_policy_buffer, acm_chwall_policy_buffer,
-+ * and acm_ste_policy_buffer need to stay 32-bit aligned
-+ * because we create binary policies also with external
-+ * tools that assume packed representations (e.g. the java tool)
-+ */
-+struct acm_policy_buffer {
-+    uint32_t policy_version; /* ACM_POLICY_VERSION */
-+    uint32_t magic;
-+    uint32_t len;
-+    uint32_t primary_policy_code;
-+    uint32_t primary_buffer_offset;
-+    uint32_t secondary_policy_code;
-+    uint32_t secondary_buffer_offset;
-+};
-+
-+struct acm_chwall_policy_buffer {
-+    uint32_t policy_version; /* ACM_CHWALL_VERSION */
-+    uint32_t policy_code;
-+    uint32_t chwall_max_types;
-+    uint32_t chwall_max_ssidrefs;
-+    uint32_t chwall_max_conflictsets;
-+    uint32_t chwall_ssid_offset;
-+    uint32_t chwall_conflict_sets_offset;
-+    uint32_t chwall_running_types_offset;
-+    uint32_t chwall_conflict_aggregate_offset;
-+};
-+
-+struct acm_ste_policy_buffer {
-+    uint32_t policy_version; /* ACM_STE_VERSION */
-+    uint32_t policy_code;
-+    uint32_t ste_max_types;
-+    uint32_t ste_max_ssidrefs;
-+    uint32_t ste_ssid_offset;
-+};
-+
-+struct acm_stats_buffer {
-+    uint32_t magic;
-+    uint32_t len;
-+    uint32_t primary_policy_code;
-+    uint32_t primary_stats_offset;
-+    uint32_t secondary_policy_code;
-+    uint32_t secondary_stats_offset;
-+};
-+
-+struct acm_ste_stats_buffer {
-+    uint32_t ec_eval_count;
-+    uint32_t gt_eval_count;
-+    uint32_t ec_denied_count;
-+    uint32_t gt_denied_count; 
-+    uint32_t ec_cachehit_count;
-+    uint32_t gt_cachehit_count;
-+};
-+
-+struct acm_ssid_buffer {
-+    uint32_t len;
-+    ssidref_t ssidref;
-+    uint32_t primary_policy_code;
-+    uint32_t primary_max_types;
-+    uint32_t primary_types_offset;
-+    uint32_t secondary_policy_code;
-+    uint32_t secondary_max_types;
-+    uint32_t secondary_types_offset;
-+};
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/acm_ops.h linux-2.6.12-xen/include/asm-xen/xen-public/acm_ops.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/acm_ops.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/acm_ops.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,96 @@
-+/*
-+ * acm_ops.h: Xen access control module hypervisor commands
-+ *
-+ * Reiner Sailer <sailer at watson.ibm.com>
-+ * Copyright (c) 2005, International Business Machines Corporation.
-+ */
-+
-+#ifndef __XEN_PUBLIC_ACM_OPS_H__
-+#define __XEN_PUBLIC_ACM_OPS_H__
-+
-+#include "xen.h"
-+#include "sched_ctl.h"
-+
-+/*
-+ * Make sure you increment the interface version whenever you modify this file!
-+ * This makes sure that old versions of acm tools will stop working in a
-+ * well-defined way (rather than crashing the machine, for instance).
-+ */
-+#define ACM_INTERFACE_VERSION   0xAAAA0005
-+
-+/************************************************************************/
-+
-+#define ACM_SETPOLICY         4
-+struct acm_setpolicy {
-+    /* OUT variables */
-+    void *pushcache;
-+    uint32_t pushcache_size;
-+};
-+
-+
-+#define ACM_GETPOLICY         5
-+struct acm_getpolicy {
-+    /* OUT variables */
-+    void *pullcache;
-+    uint32_t pullcache_size;
-+};
-+
-+
-+#define ACM_DUMPSTATS         6
-+struct acm_dumpstats {
-+    void *pullcache;
-+    uint32_t pullcache_size;
-+};
-+
-+
-+#define ACM_GETSSID           7
-+enum get_type {UNSET=0, SSIDREF, DOMAINID};
-+struct acm_getssid {
-+    enum get_type get_ssid_by;
-+    union {
-+        domaintype_t domainid;
-+        ssidref_t    ssidref;
-+    } id;
-+    void *ssidbuf;
-+    uint32_t ssidbuf_size;
-+};
-+
-+#define ACM_GETDECISION        8
-+struct acm_getdecision {
-+    enum get_type get_decision_by1; /* in */
-+    enum get_type get_decision_by2;
-+    union {
-+        domaintype_t domainid;
-+        ssidref_t    ssidref;
-+    } id1;
-+    union {
-+        domaintype_t domainid;
-+        ssidref_t    ssidref;
-+    } id2;
-+    enum acm_hook_type hook;
-+    int acm_decision;           /* out */
-+};
-+
-+struct acm_op {
-+    uint32_t cmd;
-+    uint32_t interface_version;      /* ACM_INTERFACE_VERSION */
-+    union {
-+        struct acm_setpolicy setpolicy;
-+        struct acm_getpolicy getpolicy;
-+        struct acm_dumpstats dumpstats;
-+        struct acm_getssid getssid;
-+        struct acm_getdecision getdecision;
-+    } u;
-+};
-+
-+#endif                          /* __XEN_PUBLIC_ACM_OPS_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/arch-ia64.h linux-2.6.12-xen/include/asm-xen/xen-public/arch-ia64.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/arch-ia64.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/arch-ia64.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,314 @@
-+/******************************************************************************
-+ * arch-ia64/hypervisor-if.h
-+ * 
-+ * Guest OS interface to IA64 Xen.
-+ */
-+
-+#ifndef __HYPERVISOR_IF_IA64_H__
-+#define __HYPERVISOR_IF_IA64_H__
-+
-+/* Maximum number of virtual CPUs in multi-processor guests. */
-+/* WARNING: before changing this, check that shared_info fits on a page */
-+#define MAX_VIRT_CPUS 4
-+
-+#ifndef __ASSEMBLY__
-+
-+#define MAX_NR_SECTION  32  /* at most 32 memory holes */
-+typedef struct {
-+    unsigned long start;  /* start of memory hole */
-+    unsigned long end;    /* end of memory hole */
-+} mm_section_t;
-+
-+typedef struct {
-+    unsigned long mfn : 56;
-+    unsigned long type: 8;
-+} pmt_entry_t;
-+
-+#define GPFN_MEM          (0UL << 56) /* Guest pfn is normal mem */
-+#define GPFN_FRAME_BUFFER (1UL << 56) /* VGA framebuffer */
-+#define GPFN_LOW_MMIO     (2UL << 56) /* Low MMIO range */
-+#define GPFN_PIB          (3UL << 56) /* PIB base */
-+#define GPFN_IOSAPIC      (4UL << 56) /* IOSAPIC base */
-+#define GPFN_LEGACY_IO    (5UL << 56) /* Legacy I/O base */
-+#define GPFN_GFW          (6UL << 56) /* Guest Firmware */
-+#define GPFN_HIGH_MMIO    (7UL << 56) /* High MMIO range */
-+
-+#define GPFN_IO_MASK     (7UL << 56)  /* Guest pfn is I/O type */
-+#define GPFN_INV_MASK    (31UL << 59) /* Guest pfn is invalid */
-+
-+#define INVALID_MFN       (~0UL)
-+
-+#define MEM_G   (1UL << 30)	
-+#define MEM_M   (1UL << 20)	
-+
-+#define MMIO_START       (3 * MEM_G)
-+#define MMIO_SIZE        (512 * MEM_M)
-+
-+#define VGA_IO_START     0xA0000UL
-+#define VGA_IO_SIZE      0x20000
-+
-+#define LEGACY_IO_START  (MMIO_START + MMIO_SIZE)
-+#define LEGACY_IO_SIZE   (64*MEM_M)  
-+
-+#define IO_PAGE_START (LEGACY_IO_START + LEGACY_IO_SIZE)
-+#define IO_PAGE_SIZE  PAGE_SIZE
-+
-+#define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
-+#define STORE_PAGE_SIZE	 PAGE_SIZE
-+
-+#define IO_SAPIC_START   0xfec00000UL
-+#define IO_SAPIC_SIZE    0x100000
-+
-+#define PIB_START 0xfee00000UL
-+#define PIB_SIZE 0x100000 
-+
-+#define GFW_START        (4*MEM_G -16*MEM_M)
-+#define GFW_SIZE         (16*MEM_M)
-+
-+/*
-+ * NB. This may become a 64-bit count with no shift. If this happens then the 
-+ * structure size will still be 8 bytes, so no other alignments will change.
-+ */
-+typedef struct {
-+    unsigned int  tsc_bits;      /* 0: 32 bits read from the CPU's TSC. */
-+    unsigned int  tsc_bitshift;  /* 4: 'tsc_bits' uses N:N+31 of TSC.   */
-+} tsc_timestamp_t; /* 8 bytes */
-+
-+struct pt_fpreg {
-+    union {
-+        unsigned long bits[2];
-+        long double __dummy;    /* force 16-byte alignment */
-+    } u;
-+};
-+
-+typedef struct cpu_user_regs{
-+    /* The following registers are saved by SAVE_MIN: */
-+    unsigned long b6;  /* scratch */
-+    unsigned long b7;  /* scratch */
-+
-+    unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
-+    unsigned long ar_ssd; /* reserved for future use (scratch) */
-+
-+    unsigned long r8;  /* scratch (return value register 0) */
-+    unsigned long r9;  /* scratch (return value register 1) */
-+    unsigned long r10; /* scratch (return value register 2) */
-+    unsigned long r11; /* scratch (return value register 3) */
-+
-+    unsigned long cr_ipsr; /* interrupted task's psr */
-+    unsigned long cr_iip;  /* interrupted task's instruction pointer */
-+    unsigned long cr_ifs;  /* interrupted task's function state */
-+
-+    unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
-+    unsigned long ar_pfs;  /* prev function state  */
-+    unsigned long ar_rsc;  /* RSE configuration */
-+    /* The following two are valid only if cr_ipsr.cpl > 0: */
-+    unsigned long ar_rnat;  /* RSE NaT */
-+    unsigned long ar_bspstore; /* RSE bspstore */
-+
-+    unsigned long pr;  /* 64 predicate registers (1 bit each) */
-+    unsigned long b0;  /* return pointer (bp) */
-+    unsigned long loadrs;  /* size of dirty partition << 16 */
-+
-+    unsigned long r1;  /* the gp pointer */
-+    unsigned long r12; /* interrupted task's memory stack pointer */
-+    unsigned long r13; /* thread pointer */
-+
-+    unsigned long ar_fpsr;  /* floating point status (preserved) */
-+    unsigned long r15;  /* scratch */
-+
-+ /* The remaining registers are NOT saved for system calls.  */
-+
-+    unsigned long r14;  /* scratch */
-+    unsigned long r2;  /* scratch */
-+    unsigned long r3;  /* scratch */
-+    unsigned long r16;  /* scratch */
-+    unsigned long r17;  /* scratch */
-+    unsigned long r18;  /* scratch */
-+    unsigned long r19;  /* scratch */
-+    unsigned long r20;  /* scratch */
-+    unsigned long r21;  /* scratch */
-+    unsigned long r22;  /* scratch */
-+    unsigned long r23;  /* scratch */
-+    unsigned long r24;  /* scratch */
-+    unsigned long r25;  /* scratch */
-+    unsigned long r26;  /* scratch */
-+    unsigned long r27;  /* scratch */
-+    unsigned long r28;  /* scratch */
-+    unsigned long r29;  /* scratch */
-+    unsigned long r30;  /* scratch */
-+    unsigned long r31;  /* scratch */
-+    unsigned long ar_ccv;  /* compare/exchange value (scratch) */
-+
-+    /*
-+     * Floating point registers that the kernel considers scratch:
-+     */
-+    struct pt_fpreg f6;  /* scratch */
-+    struct pt_fpreg f7;  /* scratch */
-+    struct pt_fpreg f8;  /* scratch */
-+    struct pt_fpreg f9;  /* scratch */
-+    struct pt_fpreg f10;  /* scratch */
-+    struct pt_fpreg f11;  /* scratch */
-+    unsigned long r4;  /* preserved */
-+    unsigned long r5;  /* preserved */
-+    unsigned long r6;  /* preserved */
-+    unsigned long r7;  /* preserved */
-+    unsigned long eml_unat;    /* used for emulating instruction */
-+    unsigned long rfi_pfs;     /* used for elulating rfi */
-+
-+}cpu_user_regs_t;
-+
-+typedef union {
-+    unsigned long value;
-+    struct {
-+        int a_int:1;
-+        int a_from_int_cr:1;
-+        int a_to_int_cr:1;
-+        int a_from_psr:1;
-+        int a_from_cpuid:1;
-+        int a_cover:1;
-+        int a_bsw:1;
-+        long reserved:57;
-+    };
-+} vac_t;
-+
-+typedef union {
-+    unsigned long value;
-+    struct {
-+        int d_vmsw:1;
-+        int d_extint:1;
-+        int d_ibr_dbr:1;
-+        int d_pmc:1;
-+        int d_to_pmd:1;
-+        int d_itm:1;
-+        long reserved:58;
-+    };
-+} vdc_t;
-+
-+typedef struct {
-+    vac_t   vac;
-+    vdc_t   vdc;
-+    unsigned long  virt_env_vaddr;
-+    unsigned long  reserved1[29];
-+    unsigned long  vhpi;
-+    unsigned long  reserved2[95];
-+    union {
-+        unsigned long  vgr[16];
-+        unsigned long bank1_regs[16]; // bank1 regs (r16-r31) when bank0 active
-+    };
-+    union {
-+        unsigned long  vbgr[16];
-+        unsigned long bank0_regs[16]; // bank0 regs (r16-r31) when bank1 active
-+    };
-+    unsigned long  vnat;
-+    unsigned long  vbnat;
-+    unsigned long  vcpuid[5];
-+    unsigned long  reserved3[11];
-+    unsigned long  vpsr;
-+    unsigned long  vpr;
-+    unsigned long  reserved4[76];
-+    union {
-+        unsigned long  vcr[128];
-+        struct {
-+            unsigned long dcr;  // CR0
-+            unsigned long itm;
-+            unsigned long iva;
-+            unsigned long rsv1[5];
-+            unsigned long pta;  // CR8
-+            unsigned long rsv2[7];
-+            unsigned long ipsr;  // CR16
-+            unsigned long isr;
-+            unsigned long rsv3;
-+            unsigned long iip;
-+            unsigned long ifa;
-+            unsigned long itir;
-+            unsigned long iipa;
-+            unsigned long ifs;
-+            unsigned long iim;  // CR24
-+            unsigned long iha;
-+            unsigned long rsv4[38];
-+            unsigned long lid;  // CR64
-+            unsigned long ivr;
-+            unsigned long tpr;
-+            unsigned long eoi;
-+            unsigned long irr[4];
-+            unsigned long itv;  // CR72
-+            unsigned long pmv;
-+            unsigned long cmcv;
-+            unsigned long rsv5[5];
-+            unsigned long lrr0;  // CR80
-+            unsigned long lrr1;
-+            unsigned long rsv6[46];
-+        };
-+    };
-+    union {
-+        unsigned long  reserved5[128];
-+        struct {
-+            unsigned long precover_ifs;
-+            unsigned long unat;  // not sure if this is needed until NaT arch is done
-+            int interrupt_collection_enabled; // virtual psr.ic
-+            int interrupt_delivery_enabled; // virtual psr.i
-+            int pending_interruption;
-+            int incomplete_regframe; // see SDM vol2 6.8
-+            unsigned long delivery_mask[4];
-+            int metaphysical_mode; // 1 = use metaphys mapping, 0 = use virtual
-+            int banknum; // 0 or 1, which virtual register bank is active
-+            unsigned long rrs[8]; // region registers
-+            unsigned long krs[8]; // kernel registers
-+            unsigned long pkrs[8]; // protection key registers
-+            unsigned long tmp[8]; // temp registers (e.g. for hyperprivops)
-+            // FIXME: tmp[8] temp'ly being used for virtual psr.pp
-+        };
-+    };
-+    unsigned long  reserved6[3456];
-+    unsigned long  vmm_avail[128];
-+    unsigned long  reserved7[4096];
-+} mapped_regs_t;
-+
-+typedef struct {
-+    mapped_regs_t *privregs;
-+    int evtchn_vector;
-+} arch_vcpu_info_t;
-+
-+typedef mapped_regs_t vpd_t;
-+
-+typedef struct {
-+    unsigned int flags;
-+    unsigned long start_info_pfn;
-+} arch_shared_info_t;
-+
-+typedef struct {
-+    unsigned long start;
-+    unsigned long size; 
-+} arch_initrd_info_t;
-+
-+#define IA64_COMMAND_LINE_SIZE 512
-+typedef struct vcpu_guest_context {
-+#define VGCF_FPU_VALID (1<<0)
-+#define VGCF_VMX_GUEST (1<<1)
-+#define VGCF_IN_KERNEL (1<<2)
-+    unsigned long flags;       /* VGCF_* flags */
-+    unsigned long pt_base;     /* PMT table base */
-+    unsigned long share_io_pg; /* Shared page for I/O emulation */
-+    unsigned long sys_pgnr;    /* System pages out of domain memory */
-+    unsigned long vm_assist;   /* VMASST_TYPE_* bitmap, now none on IPF */
-+
-+    cpu_user_regs_t regs;
-+    arch_vcpu_info_t vcpu;
-+    arch_shared_info_t shared;
-+    arch_initrd_info_t initrd;
-+    char cmdline[IA64_COMMAND_LINE_SIZE];
-+} vcpu_guest_context_t;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif /* __HYPERVISOR_IF_IA64_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/arch-x86_32.h linux-2.6.12-xen/include/asm-xen/xen-public/arch-x86_32.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/arch-x86_32.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/arch-x86_32.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,158 @@
-+/******************************************************************************
-+ * arch-x86_32.h
-+ * 
-+ * Guest OS interface to x86 32-bit Xen.
-+ * 
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
-+#define __XEN_PUBLIC_ARCH_X86_32_H__
-+
-+/*
-+ * SEGMENT DESCRIPTOR TABLES
-+ */
-+/*
-+ * A number of GDT entries are reserved by Xen. These are not situated at the
-+ * start of the GDT because some stupid OSes export hard-coded selector values
-+ * in their ABI. These hard-coded values are always near the start of the GDT,
-+ * so Xen places itself out of the way, at the far end of the GDT.
-+ */
-+#define FIRST_RESERVED_GDT_PAGE  14
-+#define FIRST_RESERVED_GDT_BYTE  (FIRST_RESERVED_GDT_PAGE * 4096)
-+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
-+
-+/*
-+ * These flat segments are in the Xen-private section of every GDT. Since these
-+ * are also present in the initial GDT, many OSes will be able to avoid
-+ * installing their own GDT.
-+ */
-+#define FLAT_RING1_CS 0xe019    /* GDT index 259 */
-+#define FLAT_RING1_DS 0xe021    /* GDT index 260 */
-+#define FLAT_RING1_SS 0xe021    /* GDT index 260 */
-+#define FLAT_RING3_CS 0xe02b    /* GDT index 261 */
-+#define FLAT_RING3_DS 0xe033    /* GDT index 262 */
-+#define FLAT_RING3_SS 0xe033    /* GDT index 262 */
-+
-+#define FLAT_KERNEL_CS FLAT_RING1_CS
-+#define FLAT_KERNEL_DS FLAT_RING1_DS
-+#define FLAT_KERNEL_SS FLAT_RING1_SS
-+#define FLAT_USER_CS    FLAT_RING3_CS
-+#define FLAT_USER_DS    FLAT_RING3_DS
-+#define FLAT_USER_SS    FLAT_RING3_SS
-+
-+/* And the trap vector is... */
-+#define TRAP_INSTR "int $0x82"
-+
-+/*
-+ * Virtual addresses beyond this are not modifiable by guest OSes. The 
-+ * machine->physical mapping table starts at this address, read-only.
-+ */
-+#ifdef CONFIG_X86_PAE
-+#define __HYPERVISOR_VIRT_START 0xF5800000
-+#else
-+#define __HYPERVISOR_VIRT_START 0xFC000000
-+#endif
-+
-+#ifndef HYPERVISOR_VIRT_START
-+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-+#endif
-+
-+#ifndef machine_to_phys_mapping
-+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-+#endif
-+
-+/* Maximum number of virtual CPUs in multi-processor guests. */
-+#define MAX_VIRT_CPUS 32
-+
-+#ifndef __ASSEMBLY__
-+
-+/*
-+ * Send an array of these to HYPERVISOR_set_trap_table()
-+ */
-+#define TI_GET_DPL(_ti)      ((_ti)->flags & 3)
-+#define TI_GET_IF(_ti)       ((_ti)->flags & 4)
-+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
-+#define TI_SET_IF(_ti,_if)   ((_ti)->flags |= ((!!(_if))<<2))
-+typedef struct trap_info {
-+    uint8_t       vector;  /* exception vector                              */
-+    uint8_t       flags;   /* 0-3: privilege level; 4: clear event enable?  */
-+    uint16_t      cs;      /* code selector                                 */
-+    unsigned long address; /* code offset                                   */
-+} trap_info_t;
-+
-+typedef struct cpu_user_regs {
-+    uint32_t ebx;
-+    uint32_t ecx;
-+    uint32_t edx;
-+    uint32_t esi;
-+    uint32_t edi;
-+    uint32_t ebp;
-+    uint32_t eax;
-+    uint16_t error_code;    /* private */
-+    uint16_t entry_vector;  /* private */
-+    uint32_t eip;
-+    uint16_t cs;
-+    uint8_t  saved_upcall_mask;
-+    uint8_t  _pad0;
-+    uint32_t eflags;        /* eflags.IF == !saved_upcall_mask */
-+    uint32_t esp;
-+    uint16_t ss, _pad1;
-+    uint16_t es, _pad2;
-+    uint16_t ds, _pad3;
-+    uint16_t fs, _pad4;
-+    uint16_t gs, _pad5;
-+} cpu_user_regs_t;
-+
-+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
-+
-+/*
-+ * The following is all CPU context. Note that the fpu_ctxt block is filled 
-+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
-+ */
-+typedef struct vcpu_guest_context {
-+    /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
-+    struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
-+#define VGCF_I387_VALID (1<<0)
-+#define VGCF_VMX_GUEST  (1<<1)
-+#define VGCF_IN_KERNEL  (1<<2)
-+    unsigned long flags;                    /* VGCF_* flags                 */
-+    cpu_user_regs_t user_regs;              /* User-level CPU registers     */
-+    struct trap_info trap_ctxt[256];        /* Virtual IDT                  */
-+    unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
-+    unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
-+    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
-+    unsigned long ctrlreg[8];               /* CR0-CR7 (control registers)  */
-+    unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
-+    unsigned long event_callback_cs;        /* CS:EIP of event callback     */
-+    unsigned long event_callback_eip;
-+    unsigned long failsafe_callback_cs;     /* CS:EIP of failsafe callback  */
-+    unsigned long failsafe_callback_eip;
-+    unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
-+} vcpu_guest_context_t;
-+
-+typedef struct arch_shared_info {
-+    unsigned long max_pfn;                  /* max pfn that appears in table */
-+    /* Frame containing list of mfns containing list of mfns containing p2m. */
-+    unsigned long pfn_to_mfn_frame_list_list; 
-+    unsigned long nmi_reason;
-+} arch_shared_info_t;
-+
-+typedef struct {
-+    unsigned long cr2;
-+    unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
-+} arch_vcpu_info_t;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/arch-x86_64.h linux-2.6.12-xen/include/asm-xen/xen-public/arch-x86_64.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/arch-x86_64.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/arch-x86_64.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,243 @@
-+/******************************************************************************
-+ * arch-x86_64.h
-+ * 
-+ * Guest OS interface to x86 64-bit Xen.
-+ * 
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_ARCH_X86_64_H__
-+#define __XEN_PUBLIC_ARCH_X86_64_H__
-+
-+/*
-+ * SEGMENT DESCRIPTOR TABLES
-+ */
-+/*
-+ * A number of GDT entries are reserved by Xen. These are not situated at the
-+ * start of the GDT because some stupid OSes export hard-coded selector values
-+ * in their ABI. These hard-coded values are always near the start of the GDT,
-+ * so Xen places itself out of the way, at the far end of the GDT.
-+ */
-+#define FIRST_RESERVED_GDT_PAGE  14
-+#define FIRST_RESERVED_GDT_BYTE  (FIRST_RESERVED_GDT_PAGE * 4096)
-+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
-+
-+/*
-+ * 64-bit segment selectors
-+ * These flat segments are in the Xen-private section of every GDT. Since these
-+ * are also present in the initial GDT, many OSes will be able to avoid
-+ * installing their own GDT.
-+ */
-+
-+#define FLAT_RING3_CS32 0xe023  /* GDT index 260 */
-+#define FLAT_RING3_CS64 0xe033  /* GDT index 261 */
-+#define FLAT_RING3_DS32 0xe02b  /* GDT index 262 */
-+#define FLAT_RING3_DS64 0x0000  /* NULL selector */
-+#define FLAT_RING3_SS32 0xe02b  /* GDT index 262 */
-+#define FLAT_RING3_SS64 0xe02b  /* GDT index 262 */
-+
-+#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
-+#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
-+#define FLAT_KERNEL_DS   FLAT_KERNEL_DS64
-+#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
-+#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
-+#define FLAT_KERNEL_CS   FLAT_KERNEL_CS64
-+#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
-+#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
-+#define FLAT_KERNEL_SS   FLAT_KERNEL_SS64
-+
-+#define FLAT_USER_DS64 FLAT_RING3_DS64
-+#define FLAT_USER_DS32 FLAT_RING3_DS32
-+#define FLAT_USER_DS   FLAT_USER_DS64
-+#define FLAT_USER_CS64 FLAT_RING3_CS64
-+#define FLAT_USER_CS32 FLAT_RING3_CS32
-+#define FLAT_USER_CS   FLAT_USER_CS64
-+#define FLAT_USER_SS64 FLAT_RING3_SS64
-+#define FLAT_USER_SS32 FLAT_RING3_SS32
-+#define FLAT_USER_SS   FLAT_USER_SS64
-+
-+/* And the trap vector is... */
-+#define TRAP_INSTR "syscall"
-+
-+#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
-+#define __HYPERVISOR_VIRT_END   0xFFFF880000000000
-+
-+#ifndef HYPERVISOR_VIRT_START
-+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
-+#define HYPERVISOR_VIRT_END   mk_unsigned_long(__HYPERVISOR_VIRT_END)
-+#endif
-+
-+/* Maximum number of virtual CPUs in multi-processor guests. */
-+#define MAX_VIRT_CPUS 32
-+
-+#ifndef __ASSEMBLY__
-+
-+/* The machine->physical mapping table starts at this address, read-only. */
-+#ifndef machine_to_phys_mapping
-+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
-+#endif
-+
-+/*
-+ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
-+ *  @which == SEGBASE_*  ;  @base == 64-bit base address
-+ * Returns 0 on success.
-+ */
-+#define SEGBASE_FS          0
-+#define SEGBASE_GS_USER     1
-+#define SEGBASE_GS_KERNEL   2
-+#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
-+
-+/*
-+ * int HYPERVISOR_iret(void)
-+ * All arguments are on the kernel stack, in the following format.
-+ * Never returns if successful. Current kernel context is lost.
-+ * The saved CS is mapped as follows:
-+ *   RING0 -> RING3 kernel mode.
-+ *   RING1 -> RING3 kernel mode.
-+ *   RING2 -> RING3 kernel mode.
-+ *   RING3 -> RING3 user mode.
-+ * However RING0 indicates that the guest kernel should return to iteself
-+ * directly with
-+ *      orb   $3,1*8(%rsp)
-+ *      iretq
-+ * If flags contains VGCF_IN_SYSCALL:
-+ *   Restore RAX, RIP, RFLAGS, RSP.
-+ *   Discard R11, RCX, CS, SS.
-+ * Otherwise:
-+ *   Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
-+ * All other registers are saved on hypercall entry and restored to user.
-+ */
-+/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
-+#define VGCF_IN_SYSCALL (1<<8)
-+struct iret_context {
-+    /* Top of stack (%rsp at point of hypercall). */
-+    uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+    /* Bottom of iret stack frame. */
-+};
-+/*
-+ * For compatibility with HYPERVISOR_switch_to_user which is the old
-+ * name for HYPERVISOR_iret.
-+ */
-+struct switch_to_user {
-+    /* Top of stack (%rsp at point of hypercall). */
-+    uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
-+    /* Bottom of iret stack frame. */
-+};
-+
-+/*
-+ * Send an array of these to HYPERVISOR_set_trap_table().
-+ * N.B. As in x86/32 mode, the privilege level specifies which modes may enter
-+ * a trap via a software interrupt. Since rings 1 and 2 are unavailable, we
-+ * allocate privilege levels as follows:
-+ *  Level == 0: Noone may enter
-+ *  Level == 1: Kernel may enter
-+ *  Level == 2: Kernel may enter
-+ *  Level == 3: Everyone may enter
-+ */
-+#define TI_GET_DPL(_ti)      ((_ti)->flags & 3)
-+#define TI_GET_IF(_ti)       ((_ti)->flags & 4)
-+#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
-+#define TI_SET_IF(_ti,_if)   ((_ti)->flags |= ((!!(_if))<<2))
-+typedef struct trap_info {
-+    uint8_t       vector;  /* exception vector                              */
-+    uint8_t       flags;   /* 0-3: privilege level; 4: clear event enable?  */
-+    uint16_t      cs;      /* code selector                                 */
-+    unsigned long address; /* code offset                                   */
-+} trap_info_t;
-+
-+#ifdef __GNUC__
-+/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
-+#define __DECL_REG(name) union { uint64_t r ## name, e ## name; }
-+#else
-+/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
-+#define __DECL_REG(name) uint64_t r ## name
-+#endif
-+
-+typedef struct cpu_user_regs {
-+    uint64_t r15;
-+    uint64_t r14;
-+    uint64_t r13;
-+    uint64_t r12;
-+    __DECL_REG(bp);
-+    __DECL_REG(bx);
-+    uint64_t r11;
-+    uint64_t r10;
-+    uint64_t r9;
-+    uint64_t r8;
-+    __DECL_REG(ax);
-+    __DECL_REG(cx);
-+    __DECL_REG(dx);
-+    __DECL_REG(si);
-+    __DECL_REG(di);
-+    uint32_t error_code;    /* private */
-+    uint32_t entry_vector;  /* private */
-+    __DECL_REG(ip);
-+    uint16_t cs, _pad0[1];
-+    uint8_t  saved_upcall_mask;
-+    uint8_t  _pad1[3];
-+    __DECL_REG(flags);      /* rflags.IF == !saved_upcall_mask */
-+    __DECL_REG(sp);
-+    uint16_t ss, _pad2[3];
-+    uint16_t es, _pad3[3];
-+    uint16_t ds, _pad4[3];
-+    uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base.     */
-+    uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
-+} cpu_user_regs_t;
-+
-+#undef __DECL_REG
-+
-+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
-+
-+/*
-+ * The following is all CPU context. Note that the fpu_ctxt block is filled 
-+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
-+ */
-+typedef struct vcpu_guest_context {
-+    /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
-+    struct { char x[512]; } fpu_ctxt;       /* User-level FPU registers     */
-+#define VGCF_I387_VALID (1<<0)
-+#define VGCF_VMX_GUEST  (1<<1)
-+#define VGCF_IN_KERNEL  (1<<2)
-+    unsigned long flags;                    /* VGCF_* flags                 */
-+    cpu_user_regs_t user_regs;              /* User-level CPU registers     */
-+    struct trap_info trap_ctxt[256];        /* Virtual IDT                  */
-+    unsigned long ldt_base, ldt_ents;       /* LDT (linear address, # ents) */
-+    unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
-+    unsigned long kernel_ss, kernel_sp;     /* Virtual TSS (only SS1/SP1)   */
-+    unsigned long ctrlreg[8];               /* CR0-CR7 (control registers)  */
-+    unsigned long debugreg[8];              /* DB0-DB7 (debug registers)    */
-+    unsigned long event_callback_eip;
-+    unsigned long failsafe_callback_eip;
-+    unsigned long syscall_callback_eip;
-+    unsigned long vm_assist;                /* VMASST_TYPE_* bitmap */
-+    /* Segment base addresses. */
-+    uint64_t      fs_base;
-+    uint64_t      gs_base_kernel;
-+    uint64_t      gs_base_user;
-+} vcpu_guest_context_t;
-+
-+typedef struct arch_shared_info {
-+    unsigned long max_pfn;                  /* max pfn that appears in table */
-+    /* Frame containing list of mfns containing list of mfns containing p2m. */
-+    unsigned long pfn_to_mfn_frame_list_list; 
-+    unsigned long nmi_reason;
-+} arch_shared_info_t;
-+
-+typedef struct {
-+    unsigned long cr2;
-+    unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
-+} arch_vcpu_info_t;
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/COPYING linux-2.6.12-xen/include/asm-xen/xen-public/COPYING
---- pristine-linux-2.6.12/include/asm-xen/xen-public/COPYING	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/COPYING	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,28 @@
-+XEN NOTICE
-+==========
-+
-+This copyright applies to all files within this subdirectory. All
-+other files in the Xen source distribution are covered by version 2 of
-+the GNU General Public License.
-+
-+ -- Keir Fraser (on behalf of the Xen team)
-+
-+=====================================================================
-+
-+Permission is hereby granted, free of charge, to any person obtaining a copy
-+of this software and associated documentation files (the "Software"), to
-+deal in the Software without restriction, including without limitation the
-+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+sell copies of the Software, and to permit persons to whom the Software is
-+furnished to do so, subject to the following conditions:
-+
-+The above copyright notice and this permission notice shall be included in
-+all copies or substantial portions of the Software.
-+
-+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
-+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
-+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
-+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
-+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
-+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
-+DEALINGS IN THE SOFTWARE.
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/dom0_ops.h linux-2.6.12-xen/include/asm-xen/xen-public/dom0_ops.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/dom0_ops.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/dom0_ops.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,488 @@
-+/******************************************************************************
-+ * dom0_ops.h
-+ * 
-+ * Process command requests from domain-0 guest OS.
-+ * 
-+ * Copyright (c) 2002-2003, B Dragovic
-+ * Copyright (c) 2002-2004, K Fraser
-+ */
-+
-+
-+#ifndef __XEN_PUBLIC_DOM0_OPS_H__
-+#define __XEN_PUBLIC_DOM0_OPS_H__
-+
-+#include "xen.h"
-+#include "sched_ctl.h"
-+
-+/*
-+ * Make sure you increment the interface version whenever you modify this file!
-+ * This makes sure that old versions of dom0 tools will stop working in a
-+ * well-defined way (rather than crashing the machine, for instance).
-+ */
-+#define DOM0_INTERFACE_VERSION   0x03000000
-+
-+/************************************************************************/
-+
-+#define DOM0_GETMEMLIST        2
-+typedef struct dom0_getmemlist {
-+    /* IN variables. */
-+    domid_t       domain;
-+    unsigned long max_pfns;
-+    void         *buffer;
-+    /* OUT variables. */
-+    unsigned long num_pfns;
-+} dom0_getmemlist_t;
-+
-+#define DOM0_SCHEDCTL          6
-+ /* struct sched_ctl_cmd is from sched-ctl.h   */
-+typedef struct sched_ctl_cmd dom0_schedctl_t;
-+
-+#define DOM0_ADJUSTDOM         7
-+/* struct sched_adjdom_cmd is from sched-ctl.h */
-+typedef struct sched_adjdom_cmd dom0_adjustdom_t;
-+
-+#define DOM0_CREATEDOMAIN      8
-+typedef struct dom0_createdomain {
-+    /* IN parameters */
-+    uint32_t ssidref;
-+    xen_domain_handle_t handle;
-+    /* IN/OUT parameters. */
-+    /* Identifier for new domain (auto-allocate if zero is specified). */
-+    domid_t domain;
-+} dom0_createdomain_t;
-+
-+#define DOM0_DESTROYDOMAIN     9
-+typedef struct dom0_destroydomain {
-+    /* IN variables. */
-+    domid_t domain;
-+} dom0_destroydomain_t;
-+
-+#define DOM0_PAUSEDOMAIN      10
-+typedef struct dom0_pausedomain {
-+    /* IN parameters. */
-+    domid_t domain;
-+} dom0_pausedomain_t;
-+
-+#define DOM0_UNPAUSEDOMAIN    11
-+typedef struct dom0_unpausedomain {
-+    /* IN parameters. */
-+    domid_t domain;
-+} dom0_unpausedomain_t;
-+
-+#define DOM0_GETDOMAININFO    12
-+typedef struct dom0_getdomaininfo {
-+    /* IN variables. */
-+    domid_t  domain;                  /* NB. IN/OUT variable. */
-+    /* OUT variables. */
-+#define DOMFLAGS_DYING     (1<<0) /* Domain is scheduled to die.             */
-+#define DOMFLAGS_SHUTDOWN  (1<<2) /* The guest OS has shut down.             */
-+#define DOMFLAGS_PAUSED    (1<<3) /* Currently paused by control software.   */
-+#define DOMFLAGS_BLOCKED   (1<<4) /* Currently blocked pending an event.     */
-+#define DOMFLAGS_RUNNING   (1<<5) /* Domain is currently running.            */
-+#define DOMFLAGS_CPUMASK      255 /* CPU to which this domain is bound.      */
-+#define DOMFLAGS_CPUSHIFT       8
-+#define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code.  */
-+#define DOMFLAGS_SHUTDOWNSHIFT 16
-+    uint32_t flags;
-+    unsigned long tot_pages;
-+    unsigned long max_pages;
-+    unsigned long shared_info_frame;       /* MFN of shared_info struct */
-+    uint64_t cpu_time;
-+    uint32_t nr_online_vcpus;     /* Number of VCPUs currently online. */
-+    uint32_t max_vcpu_id;         /* Maximum VCPUID in use by this domain. */
-+    uint32_t ssidref;
-+    xen_domain_handle_t handle;
-+} dom0_getdomaininfo_t;
-+
-+#define DOM0_SETVCPUCONTEXT   13
-+typedef struct dom0_setvcpucontext {
-+    /* IN variables. */
-+    domid_t               domain;
-+    uint32_t              vcpu;
-+    /* IN/OUT parameters */
-+    vcpu_guest_context_t *ctxt;
-+} dom0_setvcpucontext_t;
-+
-+#define DOM0_MSR              15
-+typedef struct dom0_msr {
-+    /* IN variables. */
-+    uint32_t write;
-+    cpumap_t cpu_mask;
-+    uint32_t msr;
-+    uint32_t in1;
-+    uint32_t in2;
-+    /* OUT variables. */
-+    uint32_t out1;
-+    uint32_t out2;
-+} dom0_msr_t;
-+
-+/*
-+ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
-+ * 1 January, 1970 if the current system time was <system_time>.
-+ */
-+#define DOM0_SETTIME          17
-+typedef struct dom0_settime {
-+    /* IN variables. */
-+    uint32_t secs;
-+    uint32_t nsecs;
-+    uint64_t system_time;
-+} dom0_settime_t;
-+
-+#define DOM0_GETPAGEFRAMEINFO 18
-+#define NOTAB 0         /* normal page */
-+#define L1TAB (1<<28)
-+#define L2TAB (2<<28)
-+#define L3TAB (3<<28)
-+#define L4TAB (4<<28)
-+#define LPINTAB  (1<<31)
-+#define XTAB  (0xf<<28) /* invalid page */
-+#define LTAB_MASK XTAB
-+#define LTABTYPE_MASK (0x7<<28)
-+
-+typedef struct dom0_getpageframeinfo {
-+    /* IN variables. */
-+    unsigned long pfn;     /* Machine page frame number to query.       */
-+    domid_t domain;        /* To which domain does the frame belong?    */
-+    /* OUT variables. */
-+    /* Is the page PINNED to a type? */
-+    uint32_t type;              /* see above type defs */
-+} dom0_getpageframeinfo_t;
-+
-+/*
-+ * Read console content from Xen buffer ring.
-+ */
-+#define DOM0_READCONSOLE      19
-+typedef struct dom0_readconsole {
-+    /* IN variables. */
-+    uint32_t clear;        /* Non-zero -> clear after reading. */
-+    /* IN/OUT variables. */
-+    char    *buffer;       /* In: Buffer start; Out: Used buffer start */
-+    uint32_t count;        /* In: Buffer size;  Out: Used buffer size  */
-+} dom0_readconsole_t;
-+
-+/* 
-+ * Set which physical cpus a vcpu can execute on.
-+ */
-+#define DOM0_SETVCPUAFFINITY  20
-+typedef struct dom0_setvcpuaffinity {
-+    /* IN variables. */
-+    domid_t   domain;
-+    uint32_t  vcpu;
-+    cpumap_t  cpumap;
-+} dom0_setvcpuaffinity_t;
-+
-+/* Get trace buffers machine base address */
-+#define DOM0_TBUFCONTROL       21
-+typedef struct dom0_tbufcontrol {
-+    /* IN variables */
-+#define DOM0_TBUF_GET_INFO     0
-+#define DOM0_TBUF_SET_CPU_MASK 1
-+#define DOM0_TBUF_SET_EVT_MASK 2
-+#define DOM0_TBUF_SET_SIZE     3
-+#define DOM0_TBUF_ENABLE       4
-+#define DOM0_TBUF_DISABLE      5
-+    uint32_t      op;
-+    /* IN/OUT variables */
-+    cpumap_t      cpu_mask;
-+    uint32_t      evt_mask;
-+    /* OUT variables */
-+    unsigned long buffer_mfn;
-+    uint32_t size;
-+} dom0_tbufcontrol_t;
-+
-+/*
-+ * Get physical information about the host machine
-+ */
-+#define DOM0_PHYSINFO         22
-+typedef struct dom0_physinfo {
-+    uint32_t threads_per_core;
-+    uint32_t cores_per_socket;
-+    uint32_t sockets_per_node;
-+    uint32_t nr_nodes;
-+    uint32_t cpu_khz;
-+    unsigned long total_pages;
-+    unsigned long free_pages;
-+    uint32_t hw_cap[8];
-+} dom0_physinfo_t;
-+
-+/*
-+ * Get the ID of the current scheduler.
-+ */
-+#define DOM0_SCHED_ID        24
-+typedef struct dom0_sched_id {
-+    /* OUT variable */
-+    uint32_t sched_id;
-+} dom0_sched_id_t;
-+
-+/* 
-+ * Control shadow pagetables operation
-+ */
-+#define DOM0_SHADOW_CONTROL  25
-+
-+#define DOM0_SHADOW_CONTROL_OP_OFF         0
-+#define DOM0_SHADOW_CONTROL_OP_ENABLE_TEST 1
-+#define DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY 2
-+#define DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE 3
-+
-+#define DOM0_SHADOW_CONTROL_OP_FLUSH       10     /* table ops */
-+#define DOM0_SHADOW_CONTROL_OP_CLEAN       11
-+#define DOM0_SHADOW_CONTROL_OP_PEEK        12
-+
-+typedef struct dom0_shadow_control_stats {
-+    uint32_t fault_count;
-+    uint32_t dirty_count;
-+    uint32_t dirty_net_count;     
-+    uint32_t dirty_block_count;     
-+} dom0_shadow_control_stats_t;
-+
-+typedef struct dom0_shadow_control {
-+    /* IN variables. */
-+    domid_t        domain;
-+    uint32_t       op;
-+    unsigned long *dirty_bitmap; /* pointer to locked buffer */
-+    /* IN/OUT variables. */
-+    unsigned long  pages;        /* size of buffer, updated with actual size */
-+    /* OUT variables. */
-+    dom0_shadow_control_stats_t stats;
-+} dom0_shadow_control_t;
-+
-+#define DOM0_SETDOMAINMAXMEM   28
-+typedef struct dom0_setdomainmaxmem {
-+    /* IN variables. */
-+    domid_t       domain;
-+    unsigned long max_memkb;
-+} dom0_setdomainmaxmem_t;
-+
-+#define DOM0_GETPAGEFRAMEINFO2 29   /* batched interface */
-+typedef struct dom0_getpageframeinfo2 {
-+    /* IN variables. */
-+    domid_t        domain;
-+    unsigned long  num;
-+    /* IN/OUT variables. */
-+    unsigned long *array;
-+} dom0_getpageframeinfo2_t;
-+
-+/*
-+ * Request memory range (@pfn, @pfn+ at nr_pfns-1) to have type @type.
-+ * On x86, @type is an architecture-defined MTRR memory type.
-+ * On success, returns the MTRR that was used (@reg) and a handle that can
-+ * be passed to DOM0_DEL_MEMTYPE to accurately tear down the new setting.
-+ * (x86-specific).
-+ */
-+#define DOM0_ADD_MEMTYPE         31
-+typedef struct dom0_add_memtype {
-+    /* IN variables. */
-+    unsigned long pfn;
-+    unsigned long nr_pfns;
-+    uint32_t      type;
-+    /* OUT variables. */
-+    uint32_t      handle;
-+    uint32_t      reg;
-+} dom0_add_memtype_t;
-+
-+/*
-+ * Tear down an existing memory-range type. If @handle is remembered then it
-+ * should be passed in to accurately tear down the correct setting (in case
-+ * of overlapping memory regions with differing types). If it is not known
-+ * then @handle should be set to zero. In all cases @reg must be set.
-+ * (x86-specific).
-+ */
-+#define DOM0_DEL_MEMTYPE         32
-+typedef struct dom0_del_memtype {
-+    /* IN variables. */
-+    uint32_t handle;
-+    uint32_t reg;
-+} dom0_del_memtype_t;
-+
-+/* Read current type of an MTRR (x86-specific). */
-+#define DOM0_READ_MEMTYPE        33
-+typedef struct dom0_read_memtype {
-+    /* IN variables. */
-+    uint32_t reg;
-+    /* OUT variables. */
-+    unsigned long pfn;
-+    unsigned long nr_pfns;
-+    uint32_t type;
-+} dom0_read_memtype_t;
-+
-+/* Interface for controlling Xen software performance counters. */
-+#define DOM0_PERFCCONTROL        34
-+/* Sub-operations: */
-+#define DOM0_PERFCCONTROL_OP_RESET 1   /* Reset all counters to zero. */
-+#define DOM0_PERFCCONTROL_OP_QUERY 2   /* Get perfctr information. */
-+typedef struct dom0_perfc_desc {
-+    uint8_t      name[80];             /* name of perf counter */
-+    uint32_t     nr_vals;              /* number of values for this counter */
-+    uint32_t     vals[64];             /* array of values */
-+} dom0_perfc_desc_t;
-+typedef struct dom0_perfccontrol {
-+    /* IN variables. */
-+    uint32_t       op;                /*  DOM0_PERFCCONTROL_OP_??? */
-+    /* OUT variables. */
-+    uint32_t       nr_counters;       /*  number of counters */
-+    dom0_perfc_desc_t *desc;          /*  counter information (or NULL) */
-+} dom0_perfccontrol_t;
-+
-+#define DOM0_MICROCODE           35
-+typedef struct dom0_microcode {
-+    /* IN variables. */
-+    void    *data;                    /* Pointer to microcode data */
-+    uint32_t length;                  /* Length of microcode data. */
-+} dom0_microcode_t;
-+
-+#define DOM0_IOPORT_PERMISSION   36
-+typedef struct dom0_ioport_permission {
-+    domid_t  domain;                  /* domain to be affected */
-+    uint32_t first_port;              /* first port int range */
-+    uint32_t nr_ports;                /* size of port range */
-+    uint8_t  allow_access;            /* allow or deny access to range? */
-+} dom0_ioport_permission_t;
-+
-+#define DOM0_GETVCPUCONTEXT      37
-+typedef struct dom0_getvcpucontext {
-+    /* IN variables. */
-+    domid_t  domain;                  /* domain to be affected */
-+    uint32_t vcpu;                    /* vcpu # */
-+    /* OUT variables. */
-+    vcpu_guest_context_t *ctxt;
-+} dom0_getvcpucontext_t;
-+
-+#define DOM0_GETVCPUINFO         43
-+typedef struct dom0_getvcpuinfo {
-+    /* IN variables. */
-+    domid_t  domain;                  /* domain to be affected */
-+    uint32_t vcpu;                    /* vcpu # */
-+    /* OUT variables. */
-+    uint8_t  online;                  /* currently online (not hotplugged)? */
-+    uint8_t  blocked;                 /* blocked waiting for an event? */
-+    uint8_t  running;                 /* currently scheduled on its CPU? */
-+    uint64_t cpu_time;                /* total cpu time consumed (ns) */
-+    uint32_t cpu;                     /* current mapping   */
-+    cpumap_t cpumap;                  /* allowable mapping */
-+} dom0_getvcpuinfo_t;
-+
-+#define DOM0_GETDOMAININFOLIST   38
-+typedef struct dom0_getdomaininfolist {
-+    /* IN variables. */
-+    domid_t               first_domain;
-+    uint32_t              max_domains;
-+    dom0_getdomaininfo_t *buffer;
-+    /* OUT variables. */
-+    uint32_t              num_domains;
-+} dom0_getdomaininfolist_t;
-+
-+#define DOM0_PLATFORM_QUIRK      39  
-+#define QUIRK_NOIRQBALANCING  1
-+typedef struct dom0_platform_quirk {
-+    /* IN variables. */
-+    uint32_t quirk_id;
-+} dom0_platform_quirk_t;
-+
-+#define DOM0_PHYSICAL_MEMORY_MAP 40
-+typedef struct dom0_physical_memory_map {
-+    /* IN variables. */
-+    uint32_t max_map_entries;
-+    /* OUT variables. */
-+    uint32_t nr_map_entries;
-+    struct dom0_memory_map_entry {
-+        uint64_t start, end;
-+        uint32_t flags; /* reserved */
-+        uint8_t  is_ram;
-+    } *memory_map;
-+} dom0_physical_memory_map_t;
-+
-+#define DOM0_MAX_VCPUS 41
-+typedef struct dom0_max_vcpus {
-+    domid_t  domain;        /* domain to be affected */
-+    uint32_t max;           /* maximum number of vcpus */
-+} dom0_max_vcpus_t;
-+
-+#define DOM0_SETDOMAINHANDLE 44
-+typedef struct dom0_setdomainhandle {
-+    domid_t domain;
-+    xen_domain_handle_t handle;
-+} dom0_setdomainhandle_t;
-+
-+#define DOM0_SETDEBUGGING 45
-+typedef struct dom0_setdebugging {
-+    domid_t domain;
-+    uint8_t enable;
-+} dom0_setdebugging_t;
-+
-+#define DOM0_IRQ_PERMISSION 46
-+typedef struct dom0_irq_permission {
-+    domid_t domain;          /* domain to be affected */
-+    uint8_t pirq;
-+    uint8_t allow_access;    /* flag to specify enable/disable of IRQ access */
-+} dom0_irq_permission_t;
-+
-+#define DOM0_IOMEM_PERMISSION 47
-+typedef struct dom0_iomem_permission {
-+    domid_t  domain;          /* domain to be affected */
-+    unsigned long first_pfn;  /* first page (physical page number) in range */
-+    unsigned long nr_pfns;    /* number of pages in range (>0) */
-+    uint8_t allow_access;     /* allow (!0) or deny (0) access to range? */
-+} dom0_iomem_permission_t;
-+ 
-+#define DOM0_HYPERCALL_INIT   48
-+typedef struct dom0_hypercall_init {
-+    domid_t  domain;          /* domain to be affected */
-+    unsigned long mfn;        /* machine frame to be initialised */
-+} dom0_hypercall_init_t;
-+ 
-+typedef struct dom0_op {
-+    uint32_t cmd;
-+    uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
-+    union {
-+        struct dom0_createdomain      createdomain;
-+        struct dom0_pausedomain       pausedomain;
-+        struct dom0_unpausedomain     unpausedomain;
-+        struct dom0_destroydomain     destroydomain;
-+        struct dom0_getmemlist        getmemlist;
-+        struct sched_ctl_cmd          schedctl;
-+        struct sched_adjdom_cmd       adjustdom;
-+        struct dom0_setvcpucontext    setvcpucontext;
-+        struct dom0_getdomaininfo     getdomaininfo;
-+        struct dom0_getpageframeinfo  getpageframeinfo;
-+        struct dom0_msr               msr;
-+        struct dom0_settime           settime;
-+        struct dom0_readconsole       readconsole;
-+        struct dom0_setvcpuaffinity   setvcpuaffinity;
-+        struct dom0_tbufcontrol       tbufcontrol;
-+        struct dom0_physinfo          physinfo;
-+        struct dom0_sched_id          sched_id;
-+        struct dom0_shadow_control    shadow_control;
-+        struct dom0_setdomainmaxmem   setdomainmaxmem;
-+        struct dom0_getpageframeinfo2 getpageframeinfo2;
-+        struct dom0_add_memtype       add_memtype;
-+        struct dom0_del_memtype       del_memtype;
-+        struct dom0_read_memtype      read_memtype;
-+        struct dom0_perfccontrol      perfccontrol;
-+        struct dom0_microcode         microcode;
-+        struct dom0_ioport_permission ioport_permission;
-+        struct dom0_getvcpucontext    getvcpucontext;
-+        struct dom0_getvcpuinfo       getvcpuinfo;
-+        struct dom0_getdomaininfolist getdomaininfolist;
-+        struct dom0_platform_quirk    platform_quirk;
-+        struct dom0_physical_memory_map physical_memory_map;
-+        struct dom0_max_vcpus         max_vcpus;
-+        struct dom0_setdomainhandle   setdomainhandle;        
-+        struct dom0_setdebugging      setdebugging;
-+        struct dom0_irq_permission    irq_permission;
-+        struct dom0_iomem_permission  iomem_permission;
-+        struct dom0_hypercall_init    hypercall_init;
-+        uint8_t                  pad[128];
-+    } u;
-+} dom0_op_t;
-+
-+#endif /* __XEN_PUBLIC_DOM0_OPS_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/event_channel.h linux-2.6.12-xen/include/asm-xen/xen-public/event_channel.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/event_channel.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/event_channel.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,203 @@
-+/******************************************************************************
-+ * event_channel.h
-+ * 
-+ * Event channels between domains.
-+ * 
-+ * Copyright (c) 2003-2004, K A Fraser.
-+ */
-+
-+#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
-+#define __XEN_PUBLIC_EVENT_CHANNEL_H__
-+
-+typedef uint32_t evtchn_port_t;
-+
-+/*
-+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
-+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
-+ * is allocated in <dom> and returned as <port>.
-+ * NOTES:
-+ *  1. If the caller is unprivileged then <dom> must be DOMID_SELF.
-+ *  2. <rdom> may be DOMID_SELF, allowing loopback connections.
-+ */
-+#define EVTCHNOP_alloc_unbound    6
-+typedef struct evtchn_alloc_unbound {
-+    /* IN parameters */
-+    domid_t dom, remote_dom;
-+    /* OUT parameters */
-+    evtchn_port_t port;
-+} evtchn_alloc_unbound_t;
-+
-+/*
-+ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
-+ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
-+ * a port that is unbound and marked as accepting bindings from the calling
-+ * domain. A fresh port is allocated in the calling domain and returned as
-+ * <local_port>.
-+ * NOTES:
-+ *  2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
-+ */
-+#define EVTCHNOP_bind_interdomain 0
-+typedef struct evtchn_bind_interdomain {
-+    /* IN parameters. */
-+    domid_t remote_dom;
-+    evtchn_port_t remote_port;
-+    /* OUT parameters. */
-+    evtchn_port_t local_port;
-+} evtchn_bind_interdomain_t;
-+
-+/*
-+ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
-+ * vcpu.
-+ * NOTES:
-+ *  1. A virtual IRQ may be bound to at most one event channel per vcpu.
-+ *  2. The allocated event channel is bound to the specified vcpu. The binding
-+ *     may not be changed.
-+ */
-+#define EVTCHNOP_bind_virq        1
-+typedef struct evtchn_bind_virq {
-+    /* IN parameters. */
-+    uint32_t virq;
-+    uint32_t vcpu;
-+    /* OUT parameters. */
-+    evtchn_port_t port;
-+} evtchn_bind_virq_t;
-+
-+/*
-+ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
-+ * NOTES:
-+ *  1. A physical IRQ may be bound to at most one event channel per domain.
-+ *  2. Only a sufficiently-privileged domain may bind to a physical IRQ.
-+ */
-+#define EVTCHNOP_bind_pirq        2
-+typedef struct evtchn_bind_pirq {
-+    /* IN parameters. */
-+    uint32_t pirq;
-+#define BIND_PIRQ__WILL_SHARE 1
-+    uint32_t flags; /* BIND_PIRQ__* */
-+    /* OUT parameters. */
-+    evtchn_port_t port;
-+} evtchn_bind_pirq_t;
-+
-+/*
-+ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
-+ * NOTES:
-+ *  1. The allocated event channel is bound to the specified vcpu. The binding
-+ *     may not be changed.
-+ */
-+#define EVTCHNOP_bind_ipi         7
-+typedef struct evtchn_bind_ipi {
-+    uint32_t vcpu;
-+    /* OUT parameters. */
-+    evtchn_port_t port;
-+} evtchn_bind_ipi_t;
-+
-+/*
-+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
-+ * interdomain then the remote end is placed in the unbound state
-+ * (EVTCHNSTAT_unbound), awaiting a new connection.
-+ */
-+#define EVTCHNOP_close            3
-+typedef struct evtchn_close {
-+    /* IN parameters. */
-+    evtchn_port_t port;
-+} evtchn_close_t;
-+
-+/*
-+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
-+ * endpoint is <port>.
-+ */
-+#define EVTCHNOP_send             4
-+typedef struct evtchn_send {
-+    /* IN parameters. */
-+    evtchn_port_t port;
-+} evtchn_send_t;
-+
-+/*
-+ * EVTCHNOP_status: Get the current status of the communication channel which
-+ * has an endpoint at <dom, port>.
-+ * NOTES:
-+ *  1. <dom> may be specified as DOMID_SELF.
-+ *  2. Only a sufficiently-privileged domain may obtain the status of an event
-+ *     channel for which <dom> is not DOMID_SELF.
-+ */
-+#define EVTCHNOP_status           5
-+typedef struct evtchn_status {
-+    /* IN parameters */
-+    domid_t  dom;
-+    evtchn_port_t port;
-+    /* OUT parameters */
-+#define EVTCHNSTAT_closed       0  /* Channel is not in use.                 */
-+#define EVTCHNSTAT_unbound      1  /* Channel is waiting interdom connection.*/
-+#define EVTCHNSTAT_interdomain  2  /* Channel is connected to remote domain. */
-+#define EVTCHNSTAT_pirq         3  /* Channel is bound to a phys IRQ line.   */
-+#define EVTCHNSTAT_virq         4  /* Channel is bound to a virtual IRQ line */
-+#define EVTCHNSTAT_ipi          5  /* Channel is bound to a virtual IPI line */
-+    uint32_t status;
-+    uint32_t vcpu;                 /* VCPU to which this channel is bound.   */
-+    union {
-+        struct {
-+            domid_t dom;
-+        } unbound; /* EVTCHNSTAT_unbound */
-+        struct {
-+            domid_t dom;
-+            evtchn_port_t port;
-+        } interdomain; /* EVTCHNSTAT_interdomain */
-+        uint32_t pirq;      /* EVTCHNSTAT_pirq        */
-+        uint32_t virq;      /* EVTCHNSTAT_virq        */
-+    } u;
-+} evtchn_status_t;
-+
-+/*
-+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
-+ * event is pending.
-+ * NOTES:
-+ *  1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
-+ *     the binding. This binding cannot be changed.
-+ *  2. All other channels notify vcpu0 by default. This default is set when
-+ *     the channel is allocated (a port that is freed and subsequently reused
-+ *     has its binding reset to vcpu0).
-+ */
-+#define EVTCHNOP_bind_vcpu        8
-+typedef struct evtchn_bind_vcpu {
-+    /* IN parameters. */
-+    evtchn_port_t port;
-+    uint32_t vcpu;
-+} evtchn_bind_vcpu_t;
-+
-+/*
-+ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
-+ * a notification to the appropriate VCPU if an event is pending.
-+ */
-+#define EVTCHNOP_unmask           9
-+typedef struct evtchn_unmask {
-+    /* IN parameters. */
-+    evtchn_port_t port;
-+} evtchn_unmask_t;
-+
-+typedef struct evtchn_op {
-+    uint32_t cmd; /* EVTCHNOP_* */
-+    union {
-+        evtchn_alloc_unbound_t    alloc_unbound;
-+        evtchn_bind_interdomain_t bind_interdomain;
-+        evtchn_bind_virq_t        bind_virq;
-+        evtchn_bind_pirq_t        bind_pirq;
-+        evtchn_bind_ipi_t         bind_ipi;
-+        evtchn_close_t            close;
-+        evtchn_send_t             send;
-+        evtchn_status_t           status;
-+        evtchn_bind_vcpu_t        bind_vcpu;
-+        evtchn_unmask_t           unmask;
-+    } u;
-+} evtchn_op_t;
-+
-+#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/grant_table.h linux-2.6.12-xen/include/asm-xen/xen-public/grant_table.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/grant_table.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/grant_table.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,306 @@
-+/******************************************************************************
-+ * grant_table.h
-+ * 
-+ * Interface for granting foreign access to page frames, and receiving
-+ * page-ownership transfers.
-+ * 
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
-+#define __XEN_PUBLIC_GRANT_TABLE_H__
-+
-+
-+/***********************************
-+ * GRANT TABLE REPRESENTATION
-+ */
-+
-+/* Some rough guidelines on accessing and updating grant-table entries
-+ * in a concurrency-safe manner. For more information, Linux contains a
-+ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
-+ * 
-+ * NB. WMB is a no-op on current-generation x86 processors. However, a
-+ *     compiler barrier will still be required.
-+ * 
-+ * Introducing a valid entry into the grant table:
-+ *  1. Write ent->domid.
-+ *  2. Write ent->frame:
-+ *      GTF_permit_access:   Frame to which access is permitted.
-+ *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
-+ *                           frame, or zero if none.
-+ *  3. Write memory barrier (WMB).
-+ *  4. Write ent->flags, inc. valid type.
-+ * 
-+ * Invalidating an unused GTF_permit_access entry:
-+ *  1. flags = ent->flags.
-+ *  2. Observe that !(flags & (GTF_reading|GTF_writing)).
-+ *  3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
-+ *  NB. No need for WMB as reuse of entry is control-dependent on success of
-+ *      step 3, and all architectures guarantee ordering of ctrl-dep writes.
-+ *
-+ * Invalidating an in-use GTF_permit_access entry:
-+ *  This cannot be done directly. Request assistance from the domain controller
-+ *  which can set a timeout on the use of a grant entry and take necessary
-+ *  action. (NB. This is not yet implemented!).
-+ * 
-+ * Invalidating an unused GTF_accept_transfer entry:
-+ *  1. flags = ent->flags.
-+ *  2. Observe that !(flags & GTF_transfer_committed). [*]
-+ *  3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
-+ *  NB. No need for WMB as reuse of entry is control-dependent on success of
-+ *      step 3, and all architectures guarantee ordering of ctrl-dep writes.
-+ *  [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
-+ *      The guest must /not/ modify the grant entry until the address of the
-+ *      transferred frame is written. It is safe for the guest to spin waiting
-+ *      for this to occur (detect by observing GTF_transfer_completed in
-+ *      ent->flags).
-+ *
-+ * Invalidating a committed GTF_accept_transfer entry:
-+ *  1. Wait for (ent->flags & GTF_transfer_completed).
-+ *
-+ * Changing a GTF_permit_access from writable to read-only:
-+ *  Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
-+ * 
-+ * Changing a GTF_permit_access from read-only to writable:
-+ *  Use SMP-safe bit-setting instruction.
-+ */
-+
-+/*
-+ * A grant table comprises a packed array of grant entries in one or more
-+ * page frames shared between Xen and a guest.
-+ * [XEN]: This field is written by Xen and read by the sharing guest.
-+ * [GST]: This field is written by the guest and read by Xen.
-+ */
-+typedef struct grant_entry {
-+    /* GTF_xxx: various type and flag information.  [XEN,GST] */
-+    uint16_t flags;
-+    /* The domain being granted foreign privileges. [GST] */
-+    domid_t  domid;
-+    /*
-+     * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
-+     * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
-+     */
-+    uint32_t frame;
-+} grant_entry_t;
-+
-+/*
-+ * Type of grant entry.
-+ *  GTF_invalid: This grant entry grants no privileges.
-+ *  GTF_permit_access: Allow @domid to map/access @frame.
-+ *  GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
-+ *                       to this guest. Xen writes the page number to @frame.
-+ */
-+#define GTF_invalid         (0U<<0)
-+#define GTF_permit_access   (1U<<0)
-+#define GTF_accept_transfer (2U<<0)
-+#define GTF_type_mask       (3U<<0)
-+
-+/*
-+ * Subflags for GTF_permit_access.
-+ *  GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
-+ *  GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
-+ *  GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
-+ */
-+#define _GTF_readonly       (2)
-+#define GTF_readonly        (1U<<_GTF_readonly)
-+#define _GTF_reading        (3)
-+#define GTF_reading         (1U<<_GTF_reading)
-+#define _GTF_writing        (4)
-+#define GTF_writing         (1U<<_GTF_writing)
-+
-+/*
-+ * Subflags for GTF_accept_transfer:
-+ *  GTF_transfer_committed: Xen sets this flag to indicate that it is committed
-+ *      to transferring ownership of a page frame. When a guest sees this flag
-+ *      it must /not/ modify the grant entry until GTF_transfer_completed is
-+ *      set by Xen.
-+ *  GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
-+ *      after reading GTF_transfer_committed. Xen will always write the frame
-+ *      address, followed by ORing this flag, in a timely manner.
-+ */
-+#define _GTF_transfer_committed (2)
-+#define GTF_transfer_committed  (1U<<_GTF_transfer_committed)
-+#define _GTF_transfer_completed (3)
-+#define GTF_transfer_completed  (1U<<_GTF_transfer_completed)
-+
-+
-+/***********************************
-+ * GRANT TABLE QUERIES AND USES
-+ */
-+
-+/*
-+ * Reference to a grant entry in a specified domain's grant table.
-+ */
-+typedef uint32_t grant_ref_t;
-+
-+/*
-+ * Handle to track a mapping created via a grant reference.
-+ */
-+typedef uint32_t grant_handle_t;
-+
-+/*
-+ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
-+ * by devices and/or host CPUs. If successful, <handle> is a tracking number
-+ * that must be presented later to destroy the mapping(s). On error, <handle>
-+ * is a negative status code.
-+ * NOTES:
-+ *  1. If GNTPIN_map_for_dev is specified then <dev_bus_addr> is the address
-+ *     via which I/O devices may access the granted frame.
-+ *  2. If GNTPIN_map_for_host is specified then a mapping will be added at
-+ *     either a host virtual address in the current address space, or at
-+ *     a PTE at the specified machine address.  The type of mapping to
-+ *     perform is selected through the GNTMAP_contains_pte flag, and the 
-+ *     address is specified in <host_addr>.
-+ *  3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
-+ *     host mapping is destroyed by other means then it is *NOT* guaranteed
-+ *     to be accounted to the correct grant reference!
-+ */
-+#define GNTTABOP_map_grant_ref        0
-+typedef struct gnttab_map_grant_ref {
-+    /* IN parameters. */
-+    uint64_t host_addr;
-+    uint32_t flags;               /* GNTMAP_* */
-+    grant_ref_t ref;
-+    domid_t  dom;
-+    /* OUT parameters. */
-+    int16_t  status;              /* GNTST_* */
-+    grant_handle_t handle;
-+    uint64_t dev_bus_addr;
-+} gnttab_map_grant_ref_t;
-+
-+/*
-+ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
-+ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
-+ * field is ignored. If non-zero, they must refer to a device/host mapping
-+ * that is tracked by <handle>
-+ * NOTES:
-+ *  1. The call may fail in an undefined manner if either mapping is not
-+ *     tracked by <handle>.
-+ *  3. After executing a batch of unmaps, it is guaranteed that no stale
-+ *     mappings will remain in the device or host TLBs.
-+ */
-+#define GNTTABOP_unmap_grant_ref      1
-+typedef struct gnttab_unmap_grant_ref {
-+    /* IN parameters. */
-+    uint64_t host_addr;
-+    uint64_t dev_bus_addr;
-+    grant_handle_t handle;
-+    /* OUT parameters. */
-+    int16_t  status;              /* GNTST_* */
-+} gnttab_unmap_grant_ref_t;
-+
-+/*
-+ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
-+ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
-+ * Only <nr_frames> addresses are written, even if the table is larger.
-+ * NOTES:
-+ *  1. <dom> may be specified as DOMID_SELF.
-+ *  2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
-+ *  3. Xen may not support more than a single grant-table page per domain.
-+ */
-+#define GNTTABOP_setup_table          2
-+typedef struct gnttab_setup_table {
-+    /* IN parameters. */
-+    domid_t  dom;
-+    uint32_t nr_frames;
-+    /* OUT parameters. */
-+    int16_t  status;              /* GNTST_* */
-+    unsigned long *frame_list;
-+} gnttab_setup_table_t;
-+
-+/*
-+ * GNTTABOP_dump_table: Dump the contents of the grant table to the
-+ * xen console. Debugging use only.
-+ */
-+#define GNTTABOP_dump_table           3
-+typedef struct gnttab_dump_table {
-+    /* IN parameters. */
-+    domid_t dom;
-+    /* OUT parameters. */
-+    int16_t status;               /* GNTST_* */
-+} gnttab_dump_table_t;
-+
-+/*
-+ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
-+ * foreign domain has previously registered its interest in the transfer via
-+ * <domid, ref>.
-+ * 
-+ * Note that, even if the transfer fails, the specified page no longer belongs
-+ * to the calling domain *unless* the error is GNTST_bad_page.
-+ */
-+#define GNTTABOP_transfer                4
-+typedef struct {
-+    /* IN parameters. */
-+    unsigned long mfn;
-+    domid_t       domid;
-+    grant_ref_t   ref;
-+    /* OUT parameters. */
-+    int16_t       status;
-+} gnttab_transfer_t;
-+
-+/*
-+ * Bitfield values for update_pin_status.flags.
-+ */
-+ /* Map the grant entry for access by I/O devices. */
-+#define _GNTMAP_device_map      (0)
-+#define GNTMAP_device_map       (1<<_GNTMAP_device_map)
-+ /* Map the grant entry for access by host CPUs. */
-+#define _GNTMAP_host_map        (1)
-+#define GNTMAP_host_map         (1<<_GNTMAP_host_map)
-+ /* Accesses to the granted frame will be restricted to read-only access. */
-+#define _GNTMAP_readonly        (2)
-+#define GNTMAP_readonly         (1<<_GNTMAP_readonly)
-+ /*
-+  * GNTMAP_host_map subflag:
-+  *  0 => The host mapping is usable only by the guest OS.
-+  *  1 => The host mapping is usable by guest OS + current application.
-+  */
-+#define _GNTMAP_application_map (3)
-+#define GNTMAP_application_map  (1<<_GNTMAP_application_map)
-+
-+ /*
-+  * GNTMAP_contains_pte subflag:
-+  *  0 => This map request contains a host virtual address.
-+  *  1 => This map request contains the machine addess of the PTE to update.
-+  */ 
-+#define _GNTMAP_contains_pte    (4)
-+#define GNTMAP_contains_pte     (1<<_GNTMAP_contains_pte)
-+
-+/*
-+ * Values for error status returns. All errors are -ve.
-+ */
-+#define GNTST_okay             (0)  /* Normal return.                        */
-+#define GNTST_general_error    (-1) /* General undefined error.              */
-+#define GNTST_bad_domain       (-2) /* Unrecognsed domain id.                */
-+#define GNTST_bad_gntref       (-3) /* Unrecognised or inappropriate gntref. */
-+#define GNTST_bad_handle       (-4) /* Unrecognised or inappropriate handle. */
-+#define GNTST_bad_virt_addr    (-5) /* Inappropriate virtual address to map. */
-+#define GNTST_bad_dev_addr     (-6) /* Inappropriate device address to unmap.*/
-+#define GNTST_no_device_space  (-7) /* Out of space in I/O MMU.              */
-+#define GNTST_permission_denied (-8) /* Not enough privilege for operation.  */
-+#define GNTST_bad_page         (-9) /* Specified page was invalid for op.    */
-+
-+#define GNTTABOP_error_msgs {                   \
-+    "okay",                                     \
-+    "undefined error",                          \
-+    "unrecognised domain id",                   \
-+    "invalid grant reference",                  \
-+    "invalid mapping handle",                   \
-+    "invalid virtual address",                  \
-+    "invalid device address",                   \
-+    "no spare translation slot in the I/O MMU", \
-+    "permission denied",                        \
-+    "bad page"                                  \
-+}
-+
-+#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/hvm_info_table.h linux-2.6.12-xen/include/asm-xen/xen-public/hvm/hvm_info_table.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/hvm_info_table.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/hvm/hvm_info_table.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,24 @@
-+/******************************************************************************
-+ * hvm/hvm_info_table.h
-+ * 
-+ * HVM parameter and information table, written into guest memory map.
-+ */
-+
-+#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
-+#define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
-+
-+#define HVM_INFO_PFN         0x09F
-+#define HVM_INFO_OFFSET      0x800
-+#define HVM_INFO_PADDR       ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET)
-+
-+struct hvm_info_table {
-+    char        signature[8]; /* "HVM INFO" */
-+    uint32_t    length;
-+    uint8_t     checksum;
-+    uint8_t     acpi_enabled;
-+    uint8_t     apic_enabled;
-+    uint8_t     pad[1];
-+    uint32_t    nr_vcpus;
-+};
-+
-+#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/ioreq.h linux-2.6.12-xen/include/asm-xen/xen-public/hvm/ioreq.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/ioreq.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/hvm/ioreq.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,90 @@
-+/*
-+ * ioreq.h: I/O request definitions for device models
-+ * Copyright (c) 2004, Intel Corporation.
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms and conditions of the GNU General Public License,
-+ * version 2, as published by the Free Software Foundation.
-+ *
-+ * This program is distributed in the hope it will be useful, but WITHOUT
-+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-+ * more details.
-+ *
-+ * You should have received a copy of the GNU General Public License along with
-+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
-+ * Place - Suite 330, Boston, MA 02111-1307 USA.
-+ *
-+ */
-+
-+#ifndef _IOREQ_H_
-+#define _IOREQ_H_
-+
-+#define IOREQ_READ      1
-+#define IOREQ_WRITE     0
-+
-+#define STATE_INVALID           0
-+#define STATE_IOREQ_READY       1
-+#define STATE_IOREQ_INPROCESS   2
-+#define STATE_IORESP_READY      3
-+#define STATE_IORESP_HOOK       4
-+
-+#define IOREQ_TYPE_PIO          0 /* pio */
-+#define IOREQ_TYPE_COPY         1 /* mmio ops */
-+#define IOREQ_TYPE_AND          2
-+#define IOREQ_TYPE_OR           3
-+#define IOREQ_TYPE_XOR          4
-+
-+/*
-+ * VMExit dispatcher should cooperate with instruction decoder to
-+ * prepare this structure and notify service OS and DM by sending
-+ * virq
-+ */
-+typedef struct {
-+    uint64_t addr;          /*  physical address            */
-+    uint64_t size;          /*  size in bytes               */
-+    uint64_t count;         /*  for rep prefixes            */
-+    union {
-+        uint64_t data;      /*  data                        */
-+        void    *pdata;     /*  pointer to data             */
-+    } u;
-+    uint8_t state:4;
-+    uint8_t pdata_valid:1;  /* if 1, use pdata above        */
-+    uint8_t dir:1;          /*  1=read, 0=write             */
-+    uint8_t df:1;
-+    uint8_t type;           /* I/O type                     */
-+} ioreq_t;
-+
-+#define MAX_VECTOR      256
-+#define BITS_PER_BYTE   8
-+#define INTR_LEN        (MAX_VECTOR/(BITS_PER_BYTE * sizeof(uint64_t)))
-+#define INTR_LEN_32     (MAX_VECTOR/(BITS_PER_BYTE * sizeof(uint32_t)))
-+
-+typedef struct {
-+    uint16_t    pic_elcr;
-+    uint16_t    pic_irr;
-+    uint16_t    pic_last_irr;
-+    uint16_t    pic_clear_irr;
-+    int         eport; /* Event channel port */
-+} global_iodata_t;
-+
-+typedef struct {
-+    ioreq_t     vp_ioreq;
-+} vcpu_iodata_t;
-+
-+typedef struct {
-+    global_iodata_t sp_global;
-+    vcpu_iodata_t   vcpu_iodata[1];
-+} shared_iopage_t;
-+
-+#endif /* _IOREQ_H_ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/vmx_assist.h linux-2.6.12-xen/include/asm-xen/xen-public/hvm/vmx_assist.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/hvm/vmx_assist.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/hvm/vmx_assist.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,97 @@
-+/*
-+ * vmx_assist.h: Context definitions for the VMXASSIST world switch.
-+ *
-+ * Leendert van Doorn, leendert at watson.ibm.com
-+ * Copyright (c) 2005, International Business Machines Corporation.
-+ */
-+
-+#ifndef _VMX_ASSIST_H_
-+#define _VMX_ASSIST_H_
-+
-+#define VMXASSIST_BASE         0xD0000
-+#define VMXASSIST_MAGIC        0x17101966
-+#define VMXASSIST_MAGIC_OFFSET (VMXASSIST_BASE+8)
-+
-+#define VMXASSIST_NEW_CONTEXT (VMXASSIST_BASE + 12)
-+#define VMXASSIST_OLD_CONTEXT (VMXASSIST_NEW_CONTEXT + 4)
-+
-+#ifndef __ASSEMBLY__
-+
-+union vmcs_arbytes {
-+    struct arbyte_fields {
-+        unsigned int seg_type : 4,
-+            s         : 1,
-+            dpl       : 2,
-+            p         : 1, 
-+            reserved0 : 4,
-+            avl       : 1,
-+            reserved1 : 1,     
-+            default_ops_size: 1,
-+            g         : 1,
-+            null_bit  : 1, 
-+            reserved2 : 15;
-+    } fields;
-+    unsigned int bytes;
-+};
-+
-+/*
-+ * World switch state
-+ */
-+typedef struct vmx_assist_context {
-+    uint32_t  eip;        /* execution pointer */
-+    uint32_t  esp;        /* stack pointer */
-+    uint32_t  eflags;     /* flags register */
-+    uint32_t  cr0;
-+    uint32_t  cr3;        /* page table directory */
-+    uint32_t  cr4;
-+    uint32_t  idtr_limit; /* idt */
-+    uint32_t  idtr_base;
-+    uint32_t  gdtr_limit; /* gdt */
-+    uint32_t  gdtr_base;
-+    uint32_t  cs_sel;     /* cs selector */
-+    uint32_t  cs_limit;
-+    uint32_t  cs_base;
-+    union vmcs_arbytes cs_arbytes;
-+    uint32_t  ds_sel;     /* ds selector */
-+    uint32_t  ds_limit;
-+    uint32_t  ds_base;
-+    union vmcs_arbytes ds_arbytes;
-+    uint32_t  es_sel;     /* es selector */
-+    uint32_t  es_limit;
-+    uint32_t  es_base;
-+    union vmcs_arbytes es_arbytes;
-+    uint32_t  ss_sel;     /* ss selector */
-+    uint32_t  ss_limit;
-+    uint32_t  ss_base;
-+    union vmcs_arbytes ss_arbytes;
-+    uint32_t  fs_sel;     /* fs selector */
-+    uint32_t  fs_limit;
-+    uint32_t  fs_base;
-+    union vmcs_arbytes fs_arbytes;
-+    uint32_t  gs_sel;     /* gs selector */
-+    uint32_t  gs_limit;
-+    uint32_t  gs_base;
-+    union vmcs_arbytes gs_arbytes;
-+    uint32_t  tr_sel;     /* task selector */
-+    uint32_t  tr_limit;
-+    uint32_t  tr_base;
-+    union vmcs_arbytes tr_arbytes;
-+    uint32_t  ldtr_sel;   /* ldtr selector */
-+    uint32_t  ldtr_limit;
-+    uint32_t  ldtr_base;
-+    union vmcs_arbytes ldtr_arbytes;
-+} vmx_assist_context_t;
-+
-+#endif /* __ASSEMBLY__ */
-+
-+#endif /* _VMX_ASSIST_H_ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/blkif.h linux-2.6.12-xen/include/asm-xen/xen-public/io/blkif.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/io/blkif.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/io/blkif.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,85 @@
-+/******************************************************************************
-+ * blkif.h
-+ * 
-+ * Unified block-device I/O interface for Xen guest OSes.
-+ * 
-+ * Copyright (c) 2003-2004, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_BLKIF_H__
-+#define __XEN_PUBLIC_IO_BLKIF_H__
-+
-+#include "ring.h"
-+#include "../grant_table.h"
-+
-+/*
-+ * Front->back notifications: When enqueuing a new request, sending a
-+ * notification can be made conditional on req_event (i.e., the generic
-+ * hold-off mechanism provided by the ring macros). Backends must set
-+ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
-+ * 
-+ * Back->front notifications: When enqueuing a new response, sending a
-+ * notification can be made conditional on rsp_event (i.e., the generic
-+ * hold-off mechanism provided by the ring macros). Frontends must set
-+ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
-+ */
-+
-+#ifndef blkif_vdev_t
-+#define blkif_vdev_t   uint16_t
-+#endif
-+#define blkif_sector_t uint64_t
-+
-+#define BLKIF_OP_READ      0
-+#define BLKIF_OP_WRITE     1
-+
-+/*
-+ * Maximum scatter/gather segments per request.
-+ * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
-+ * NB. This could be 12 if the ring indexes weren't stored in the same page.
-+ */
-+#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
-+
-+typedef struct blkif_request {
-+    uint8_t        operation;    /* BLKIF_OP_???                         */
-+    uint8_t        nr_segments;  /* number of segments                   */
-+    blkif_vdev_t   handle;       /* only for read/write requests         */
-+    uint64_t       id;           /* private guest value, echoed in resp  */
-+    blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-+    struct blkif_request_segment {
-+        grant_ref_t gref;        /* reference to I/O buffer frame        */
-+        /* @first_sect: first sector in frame to transfer (inclusive).   */
-+        /* @last_sect: last sector in frame to transfer (inclusive).     */
-+        uint8_t     first_sect, last_sect;
-+    } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-+} blkif_request_t;
-+
-+typedef struct blkif_response {
-+    uint64_t        id;              /* copied from request */
-+    uint8_t         operation;       /* copied from request */
-+    int16_t         status;          /* BLKIF_RSP_???       */
-+} blkif_response_t;
-+
-+#define BLKIF_RSP_ERROR  -1 /* non-specific 'error' */
-+#define BLKIF_RSP_OKAY    0 /* non-specific 'okay'  */
-+
-+/*
-+ * Generate blkif ring structures and types.
-+ */
-+
-+DEFINE_RING_TYPES(blkif, blkif_request_t, blkif_response_t);
-+
-+#define VDISK_CDROM        0x1
-+#define VDISK_REMOVABLE    0x2
-+#define VDISK_READONLY     0x4
-+
-+#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/console.h linux-2.6.12-xen/include/asm-xen/xen-public/io/console.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/io/console.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/io/console.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,33 @@
-+/******************************************************************************
-+ * console.h
-+ * 
-+ * Console I/O interface for Xen guest OSes.
-+ * 
-+ * Copyright (c) 2005, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
-+#define __XEN_PUBLIC_IO_CONSOLE_H__
-+
-+typedef uint32_t XENCONS_RING_IDX;
-+
-+#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
-+
-+struct xencons_interface {
-+    char in[1024];
-+    char out[2048];
-+    XENCONS_RING_IDX in_cons, in_prod;
-+    XENCONS_RING_IDX out_cons, out_prod;
-+};
-+
-+#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/netif.h linux-2.6.12-xen/include/asm-xen/xen-public/io/netif.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/io/netif.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/io/netif.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,76 @@
-+/******************************************************************************
-+ * netif.h
-+ * 
-+ * Unified network-device I/O interface for Xen guest OSes.
-+ * 
-+ * Copyright (c) 2003-2004, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_NETIF_H__
-+#define __XEN_PUBLIC_IO_NETIF_H__
-+
-+#include "ring.h"
-+#include "../grant_table.h"
-+
-+/*
-+ * Note that there is *never* any need to notify the backend when enqueuing
-+ * receive requests (netif_rx_request_t). Notifications after enqueuing any
-+ * other type of message should be conditional on the appropriate req_event
-+ * or rsp_event field in the shared ring.
-+ */
-+
-+/* Protocol checksum field is blank in the packet (hardware offload)? */
-+#define _NETTXF_csum_blank (0)
-+#define  NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
-+
-+typedef struct netif_tx_request {
-+    grant_ref_t gref;      /* Reference to buffer page */
-+    uint16_t offset;       /* Offset within buffer page */
-+    uint16_t flags;        /* NETTXF_* */
-+    uint16_t id;           /* Echoed in response message. */
-+    uint16_t size;         /* Packet size in bytes.       */
-+} netif_tx_request_t;
-+
-+typedef struct netif_tx_response {
-+    uint16_t id;
-+    int16_t  status;       /* NETIF_RSP_* */
-+} netif_tx_response_t;
-+
-+typedef struct {
-+    uint16_t    id;        /* Echoed in response message.        */
-+    grant_ref_t gref;      /* Reference to incoming granted frame */
-+} netif_rx_request_t;
-+
-+/* Protocol checksum already validated (e.g., performed by hardware)? */
-+#define _NETRXF_csum_valid (0)
-+#define  NETRXF_csum_valid (1U<<_NETRXF_csum_valid)
-+
-+typedef struct {
-+    uint16_t id;
-+    uint16_t offset;       /* Offset in page of start of received packet  */
-+    uint16_t flags;        /* NETRXF_* */
-+    int16_t  status;       /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
-+} netif_rx_response_t;
-+
-+/*
-+ * Generate netif ring structures and types.
-+ */
-+
-+DEFINE_RING_TYPES(netif_tx, netif_tx_request_t, netif_tx_response_t);
-+DEFINE_RING_TYPES(netif_rx, netif_rx_request_t, netif_rx_response_t);
-+
-+#define NETIF_RSP_DROPPED         -2
-+#define NETIF_RSP_ERROR           -1
-+#define NETIF_RSP_OKAY             0
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/ring.h linux-2.6.12-xen/include/asm-xen/xen-public/io/ring.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/io/ring.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/io/ring.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,270 @@
-+/******************************************************************************
-+ * ring.h
-+ * 
-+ * Shared producer-consumer ring macros.
-+ *
-+ * Tim Deegan and Andrew Warfield November 2004.
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_RING_H__
-+#define __XEN_PUBLIC_IO_RING_H__
-+
-+typedef unsigned int RING_IDX;
-+
-+/* Round a 32-bit unsigned constant down to the nearest power of two. */
-+#define __RD2(_x)  (((_x) & 0x00000002) ? 0x2                  : ((_x) & 0x1))
-+#define __RD4(_x)  (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2    : __RD2(_x))
-+#define __RD8(_x)  (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4    : __RD4(_x))
-+#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8    : __RD8(_x))
-+#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
-+
-+/*
-+ * Calculate size of a shared ring, given the total available space for the
-+ * ring and indexes (_sz), and the name tag of the request/response structure.
-+ * A ring contains as many entries as will fit, rounded down to the nearest 
-+ * power of two (so we can mask with (size-1) to loop around).
-+ */
-+#define __RING_SIZE(_s, _sz) \
-+    (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
-+
-+/*
-+ * Macros to make the correct C datatypes for a new kind of ring.
-+ * 
-+ * To make a new ring datatype, you need to have two message structures,
-+ * let's say request_t, and response_t already defined.
-+ *
-+ * In a header where you want the ring datatype declared, you then do:
-+ *
-+ *     DEFINE_RING_TYPES(mytag, request_t, response_t);
-+ *
-+ * These expand out to give you a set of types, as you can see below.
-+ * The most important of these are:
-+ *  
-+ *     mytag_sring_t      - The shared ring.
-+ *     mytag_front_ring_t - The 'front' half of the ring.
-+ *     mytag_back_ring_t  - The 'back' half of the ring.
-+ *
-+ * To initialize a ring in your code you need to know the location and size
-+ * of the shared memory area (PAGE_SIZE, for instance). To initialise
-+ * the front half:
-+ *
-+ *     mytag_front_ring_t front_ring;
-+ *     SHARED_RING_INIT((mytag_sring_t *)shared_page);
-+ *     FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
-+ *
-+ * Initializing the back follows similarly (note that only the front
-+ * initializes the shared ring):
-+ *
-+ *     mytag_back_ring_t back_ring;
-+ *     BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
-+ */
-+         
-+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t)                     \
-+                                                                        \
-+/* Shared ring entry */                                                 \
-+union __name##_sring_entry {                                            \
-+    __req_t req;                                                        \
-+    __rsp_t rsp;                                                        \
-+};                                                                      \
-+                                                                        \
-+/* Shared ring page */                                                  \
-+struct __name##_sring {                                                 \
-+    RING_IDX req_prod, req_event;                                       \
-+    RING_IDX rsp_prod, rsp_event;                                       \
-+    uint8_t  pad[48];                                                   \
-+    union __name##_sring_entry ring[1]; /* variable-length */           \
-+};                                                                      \
-+                                                                        \
-+/* "Front" end's private variables */                                   \
-+struct __name##_front_ring {                                            \
-+    RING_IDX req_prod_pvt;                                              \
-+    RING_IDX rsp_cons;                                                  \
-+    unsigned int nr_ents;                                               \
-+    struct __name##_sring *sring;                                       \
-+};                                                                      \
-+                                                                        \
-+/* "Back" end's private variables */                                    \
-+struct __name##_back_ring {                                             \
-+    RING_IDX rsp_prod_pvt;                                              \
-+    RING_IDX req_cons;                                                  \
-+    unsigned int nr_ents;                                               \
-+    struct __name##_sring *sring;                                       \
-+};                                                                      \
-+                                                                        \
-+/* Syntactic sugar */                                                   \
-+typedef struct __name##_sring __name##_sring_t;                         \
-+typedef struct __name##_front_ring __name##_front_ring_t;               \
-+typedef struct __name##_back_ring __name##_back_ring_t
-+
-+/*
-+ * Macros for manipulating rings.  
-+ * 
-+ * FRONT_RING_whatever works on the "front end" of a ring: here 
-+ * requests are pushed on to the ring and responses taken off it.
-+ * 
-+ * BACK_RING_whatever works on the "back end" of a ring: here 
-+ * requests are taken off the ring and responses put on.
-+ * 
-+ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.  
-+ * This is OK in 1-for-1 request-response situations where the 
-+ * requestor (front end) never has more than RING_SIZE()-1
-+ * outstanding requests.
-+ */
-+
-+/* Initialising empty rings */
-+#define SHARED_RING_INIT(_s) do {                                       \
-+    (_s)->req_prod  = (_s)->rsp_prod  = 0;                              \
-+    (_s)->req_event = (_s)->rsp_event = 1;                              \
-+    memset((_s)->pad, 0, sizeof((_s)->pad));                            \
-+} while(0)
-+
-+#define FRONT_RING_INIT(_r, _s, __size) do {                            \
-+    (_r)->req_prod_pvt = 0;                                             \
-+    (_r)->rsp_cons = 0;                                                 \
-+    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
-+    (_r)->sring = (_s);                                                 \
-+} while (0)
-+
-+#define BACK_RING_INIT(_r, _s, __size) do {                             \
-+    (_r)->rsp_prod_pvt = 0;                                             \
-+    (_r)->req_cons = 0;                                                 \
-+    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
-+    (_r)->sring = (_s);                                                 \
-+} while (0)
-+
-+/* Initialize to existing shared indexes -- for recovery */
-+#define FRONT_RING_ATTACH(_r, _s, __size) do {                          \
-+    (_r)->sring = (_s);                                                 \
-+    (_r)->req_prod_pvt = (_s)->req_prod;                                \
-+    (_r)->rsp_cons = (_s)->rsp_prod;                                    \
-+    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
-+} while (0)
-+
-+#define BACK_RING_ATTACH(_r, _s, __size) do {                           \
-+    (_r)->sring = (_s);                                                 \
-+    (_r)->rsp_prod_pvt = (_s)->rsp_prod;                                \
-+    (_r)->req_cons = (_s)->req_prod;                                    \
-+    (_r)->nr_ents = __RING_SIZE(_s, __size);                            \
-+} while (0)
-+
-+/* How big is this ring? */
-+#define RING_SIZE(_r)                                                   \
-+    ((_r)->nr_ents)
-+
-+/* Test if there is an empty slot available on the front ring. 
-+ * (This is only meaningful from the front. )
-+ */
-+#define RING_FULL(_r)                                                   \
-+    (((_r)->req_prod_pvt - (_r)->rsp_cons) == RING_SIZE(_r))
-+
-+/* Test if there are outstanding messages to be processed on a ring. */
-+#define RING_HAS_UNCONSUMED_RESPONSES(_r)                               \
-+   ( (_r)->rsp_cons != (_r)->sring->rsp_prod )
-+   
-+#define RING_HAS_UNCONSUMED_REQUESTS(_r)                                \
-+   ( ((_r)->req_cons != (_r)->sring->req_prod ) &&                      \
-+     (((_r)->req_cons - (_r)->rsp_prod_pvt) !=                          \
-+      RING_SIZE(_r)) )
-+      
-+/* Direct access to individual ring elements, by index. */
-+#define RING_GET_REQUEST(_r, _idx)                                      \
-+ (&((_r)->sring->ring[                                                  \
-+     ((_idx) & (RING_SIZE(_r) - 1))                                     \
-+     ].req))
-+
-+#define RING_GET_RESPONSE(_r, _idx)                                     \
-+ (&((_r)->sring->ring[                                                  \
-+     ((_idx) & (RING_SIZE(_r) - 1))                                     \
-+     ].rsp))   
-+    
-+/* Loop termination condition: Would the specified index overflow the ring? */
-+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons)                           \
-+    (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
-+
-+#define RING_PUSH_REQUESTS(_r) do {                                     \
-+    wmb(); /* back sees requests /before/ updated producer index */     \
-+    (_r)->sring->req_prod = (_r)->req_prod_pvt;                         \
-+} while (0)
-+
-+#define RING_PUSH_RESPONSES(_r) do {                                    \
-+    wmb(); /* front sees responses /before/ updated producer index */   \
-+    (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt;                         \
-+} while (0)
-+
-+/*
-+ * Notification hold-off (req_event and rsp_event):
-+ * 
-+ * When queueing requests or responses on a shared ring, it may not always be
-+ * necessary to notify the remote end. For example, if requests are in flight
-+ * in a backend, the front may be able to queue further requests without
-+ * notifying the back (if the back checks for new requests when it queues
-+ * responses).
-+ * 
-+ * When enqueuing requests or responses:
-+ * 
-+ *  Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
-+ *  is a boolean return value. True indicates that the receiver requires an
-+ *  asynchronous notification.
-+ * 
-+ * After dequeuing requests or responses (before sleeping the connection):
-+ * 
-+ *  Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
-+ *  The second argument is a boolean return value. True indicates that there
-+ *  are pending messages on the ring (i.e., the connection should not be put
-+ *  to sleep).
-+ *  
-+ *  These macros will set the req_event/rsp_event field to trigger a
-+ *  notification on the very next message that is enqueued. If you want to
-+ *  create batches of work (i.e., only receive a notification after several
-+ *  messages have been enqueued) then you will need to create a customised
-+ *  version of the FINAL_CHECK macro in your own code, which sets the event
-+ *  field appropriately.
-+ */
-+
-+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do {           \
-+    RING_IDX __old = (_r)->sring->req_prod;                             \
-+    RING_IDX __new = (_r)->req_prod_pvt;                                \
-+    wmb(); /* back sees requests /before/ updated producer index */     \
-+    (_r)->sring->req_prod = __new;                                      \
-+    mb(); /* back sees new requests /before/ we check req_event */      \
-+    (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) <           \
-+                 (RING_IDX)(__new - __old));                            \
-+} while (0)
-+
-+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do {          \
-+    RING_IDX __old = (_r)->sring->rsp_prod;                             \
-+    RING_IDX __new = (_r)->rsp_prod_pvt;                                \
-+    wmb(); /* front sees responses /before/ updated producer index */   \
-+    (_r)->sring->rsp_prod = __new;                                      \
-+    mb(); /* front sees new responses /before/ we check rsp_event */    \
-+    (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) <           \
-+                 (RING_IDX)(__new - __old));                            \
-+} while (0)
-+
-+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do {             \
-+    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
-+    if (_work_to_do) break;                                             \
-+    (_r)->sring->req_event = (_r)->req_cons + 1;                        \
-+    mb();                                                               \
-+    (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r);                   \
-+} while (0)
-+
-+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do {            \
-+    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
-+    if (_work_to_do) break;                                             \
-+    (_r)->sring->rsp_event = (_r)->rsp_cons + 1;                        \
-+    mb();                                                               \
-+    (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r);                  \
-+} while (0)
-+
-+#endif /* __XEN_PUBLIC_IO_RING_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/tpmif.h linux-2.6.12-xen/include/asm-xen/xen-public/io/tpmif.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/io/tpmif.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/io/tpmif.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,56 @@
-+/******************************************************************************
-+ * tpmif.h
-+ *
-+ * TPM I/O interface for Xen guest OSes.
-+ *
-+ * Copyright (c) 2005, IBM Corporation
-+ *
-+ * Author: Stefan Berger, stefanb at us.ibm.com
-+ * Grant table support: Mahadevan Gomathisankaran
-+ *
-+ * This code has been derived from tools/libxc/xen/io/netif.h
-+ *
-+ * Copyright (c) 2003-2004, Keir Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_IO_TPMIF_H__
-+#define __XEN_PUBLIC_IO_TPMIF_H__
-+
-+#include "../grant_table.h"
-+
-+typedef struct {
-+    unsigned long addr;   /* Machine address of packet.   */
-+    grant_ref_t ref;      /* grant table access reference */
-+    uint16_t id;          /* Echoed in response message.  */
-+    uint16_t size;        /* Packet size in bytes.        */
-+} tpmif_tx_request_t;
-+
-+/*
-+ * The TPMIF_TX_RING_SIZE defines the number of pages the
-+ * front-end and backend can exchange (= size of array).
-+ */
-+typedef uint32_t TPMIF_RING_IDX;
-+
-+#define TPMIF_TX_RING_SIZE 10
-+
-+/* This structure must fit in a memory page. */
-+
-+typedef struct {
-+    tpmif_tx_request_t req;
-+} tpmif_ring_t;
-+
-+typedef struct {
-+    tpmif_ring_t ring[TPMIF_TX_RING_SIZE];
-+} tpmif_tx_interface_t;
-+
-+#endif
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/xenbus.h linux-2.6.12-xen/include/asm-xen/xen-public/io/xenbus.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/io/xenbus.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/io/xenbus.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,44 @@
-+/*****************************************************************************
-+ * xenbus.h
-+ *
-+ * Xenbus protocol details.
-+ *
-+ * Copyright (C) 2005 XenSource Ltd.
-+ */
-+
-+#ifndef _XEN_XENBUS_H
-+#define _XEN_XENBUS_H
-+
-+
-+/* The state of either end of the Xenbus, i.e. the current communication
-+   status of initialisation across the bus.  States here imply nothing about
-+   the state of the connection between the driver and the kernel's device
-+   layers.  */
-+typedef enum
-+{
-+  XenbusStateUnknown      = 0,
-+  XenbusStateInitialising = 1,
-+  XenbusStateInitWait     = 2,  /* Finished early initialisation, but waiting
-+                                   for information from the peer or hotplug
-+				   scripts. */
-+  XenbusStateInitialised  = 3,  /* Initialised and waiting for a connection
-+				   from the peer. */
-+  XenbusStateConnected    = 4,
-+  XenbusStateClosing      = 5,  /* The device is being closed due to an error
-+				   or an unplug event. */
-+  XenbusStateClosed       = 6
-+
-+} XenbusState;
-+
-+
-+#endif /* _XEN_XENBUS_H */
-+
-+/*
-+ * Local variables:
-+ *  c-file-style: "linux"
-+ *  indent-tabs-mode: t
-+ *  c-indent-level: 8
-+ *  c-basic-offset: 8
-+ *  tab-width: 8
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/io/xs_wire.h linux-2.6.12-xen/include/asm-xen/xen-public/io/xs_wire.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/io/xs_wire.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/io/xs_wire.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,97 @@
-+/*
-+ * Details of the "wire" protocol between Xen Store Daemon and client
-+ * library or guest kernel.
-+ * Copyright (C) 2005 Rusty Russell IBM Corporation
-+ */
-+
-+#ifndef _XS_WIRE_H
-+#define _XS_WIRE_H
-+
-+enum xsd_sockmsg_type
-+{
-+    XS_DEBUG,
-+    XS_DIRECTORY,
-+    XS_READ,
-+    XS_GET_PERMS,
-+    XS_WATCH,
-+    XS_UNWATCH,
-+    XS_TRANSACTION_START,
-+    XS_TRANSACTION_END,
-+    XS_INTRODUCE,
-+    XS_RELEASE,
-+    XS_GET_DOMAIN_PATH,
-+    XS_WRITE,
-+    XS_MKDIR,
-+    XS_RM,
-+    XS_SET_PERMS,
-+    XS_WATCH_EVENT,
-+    XS_ERROR,
-+    XS_IS_DOMAIN_INTRODUCED
-+};
-+
-+#define XS_WRITE_NONE "NONE"
-+#define XS_WRITE_CREATE "CREATE"
-+#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
-+
-+/* We hand errors as strings, for portability. */
-+struct xsd_errors
-+{
-+    int errnum;
-+    const char *errstring;
-+};
-+#define XSD_ERROR(x) { x, #x }
-+static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
-+    XSD_ERROR(EINVAL),
-+    XSD_ERROR(EACCES),
-+    XSD_ERROR(EEXIST),
-+    XSD_ERROR(EISDIR),
-+    XSD_ERROR(ENOENT),
-+    XSD_ERROR(ENOMEM),
-+    XSD_ERROR(ENOSPC),
-+    XSD_ERROR(EIO),
-+    XSD_ERROR(ENOTEMPTY),
-+    XSD_ERROR(ENOSYS),
-+    XSD_ERROR(EROFS),
-+    XSD_ERROR(EBUSY),
-+    XSD_ERROR(EAGAIN),
-+    XSD_ERROR(EISCONN),
-+};
-+
-+struct xsd_sockmsg
-+{
-+    uint32_t type;  /* XS_??? */
-+    uint32_t req_id;/* Request identifier, echoed in daemon's response.  */
-+    uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
-+    uint32_t len;   /* Length of data following this. */
-+
-+    /* Generally followed by nul-terminated string(s). */
-+};
-+
-+enum xs_watch_type
-+{
-+    XS_WATCH_PATH = 0,
-+    XS_WATCH_TOKEN,
-+};
-+
-+/* Inter-domain shared memory communications. */
-+#define XENSTORE_RING_SIZE 1024
-+typedef uint32_t XENSTORE_RING_IDX;
-+#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
-+struct xenstore_domain_interface {
-+    char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
-+    char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
-+    XENSTORE_RING_IDX req_cons, req_prod;
-+    XENSTORE_RING_IDX rsp_cons, rsp_prod;
-+};
-+
-+#endif /* _XS_WIRE_H */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/memory.h linux-2.6.12-xen/include/asm-xen/xen-public/memory.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/memory.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/memory.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,127 @@
-+/******************************************************************************
-+ * memory.h
-+ * 
-+ * Memory reservation and information.
-+ * 
-+ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_MEMORY_H__
-+#define __XEN_PUBLIC_MEMORY_H__
-+
-+/*
-+ * Increase or decrease the specified domain's memory reservation. Returns a
-+ * -ve errcode on failure, or the # extents successfully allocated or freed.
-+ * arg == addr of struct xen_memory_reservation.
-+ */
-+#define XENMEM_increase_reservation 0
-+#define XENMEM_decrease_reservation 1
-+#define XENMEM_populate_physmap     6
-+typedef struct xen_memory_reservation {
-+
-+    /*
-+     * XENMEM_increase_reservation:
-+     *   OUT: MFN bases of extents that were allocated
-+     * XENMEM_decrease_reservation:
-+     *   IN:  MFN bases of extents to free
-+     * XENMEM_populate_physmap:
-+     *   IN:  PFN bases of extents to populate with memory
-+     *   OUT: MFN bases of extents that were allocated
-+     *   (NB. This command also updates the mach_to_phys translation table)
-+     */
-+    unsigned long *extent_start;
-+
-+    /* Number of extents, and size/alignment of each (2^extent_order pages). */
-+    unsigned long  nr_extents;
-+    unsigned int   extent_order;
-+
-+    /*
-+     * Mmaximum # bits addressable by the user of the allocated region (e.g., 
-+     * I/O devices often have a 32-bit limitation even in 64-bit systems). If 
-+     * zero then the user has no addressing restriction.
-+     * This field is not used by XENMEM_decrease_reservation.
-+     */
-+    unsigned int   address_bits;
-+
-+    /*
-+     * Domain whose reservation is being changed.
-+     * Unprivileged domains can specify only DOMID_SELF.
-+     */
-+    domid_t        domid;
-+
-+} xen_memory_reservation_t;
-+
-+/*
-+ * Returns the maximum machine frame number of mapped RAM in this system.
-+ * This command always succeeds (it never returns an error code).
-+ * arg == NULL.
-+ */
-+#define XENMEM_maximum_ram_page     2
-+
-+/*
-+ * Returns the current or maximum memory reservation, in pages, of the
-+ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
-+ * arg == addr of domid_t.
-+ */
-+#define XENMEM_current_reservation  3
-+#define XENMEM_maximum_reservation  4
-+
-+/*
-+ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
-+ * mapping table. Architectures which do not have a m2p table do not implement
-+ * this command.
-+ * arg == addr of xen_machphys_mfn_list_t.
-+ */
-+#define XENMEM_machphys_mfn_list    5
-+typedef struct xen_machphys_mfn_list {
-+    /*
-+     * Size of the 'extent_start' array. Fewer entries will be filled if the
-+     * machphys table is smaller than max_extents * 2MB.
-+     */
-+    unsigned int max_extents;
-+    
-+    /*
-+     * Pointer to buffer to fill with list of extent starts. If there are
-+     * any large discontiguities in the machine address space, 2MB gaps in
-+     * the machphys table will be represented by an MFN base of zero.
-+     */
-+    unsigned long *extent_start;
-+
-+    /*
-+     * Number of extents written to the above array. This will be smaller
-+     * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
-+     */
-+    unsigned int nr_extents;
-+} xen_machphys_mfn_list_t;
-+
-+/*
-+ * Returns the base and size of the specified reserved 'RAM hole' in the
-+ * specified guest's pseudophysical address space.
-+ * arg == addr of xen_reserved_phys_area_t.
-+ */
-+#define XENMEM_reserved_phys_area   7
-+typedef struct xen_reserved_phys_area {
-+    /* Which request to report about? */
-+    domid_t domid;
-+
-+    /*
-+     * Which reserved area to report? Out-of-range request reports
-+     * -ESRCH. Currently no architecture will have more than one reserved area.
-+     */
-+    unsigned int idx;
-+
-+    /* Base and size of the specified reserved area. */
-+    unsigned long first_pfn, nr_pfns;
-+} xen_reserved_phys_area_t;
-+
-+#endif /* __XEN_PUBLIC_MEMORY_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/nmi.h linux-2.6.12-xen/include/asm-xen/xen-public/nmi.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/nmi.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/nmi.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,54 @@
-+/******************************************************************************
-+ * nmi.h
-+ * 
-+ * NMI callback registration and reason codes.
-+ * 
-+ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_NMI_H__
-+#define __XEN_PUBLIC_NMI_H__
-+
-+/*
-+ * NMI reason codes:
-+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
-+ */
-+ /* I/O-check error reported via ISA port 0x61, bit 6. */
-+#define _XEN_NMIREASON_io_error     0
-+#define XEN_NMIREASON_io_error      (1UL << _XEN_NMIREASON_io_error)
-+ /* Parity error reported via ISA port 0x61, bit 7. */
-+#define _XEN_NMIREASON_parity_error 1
-+#define XEN_NMIREASON_parity_error  (1UL << _XEN_NMIREASON_parity_error)
-+ /* Unknown hardware-generated NMI. */
-+#define _XEN_NMIREASON_unknown      2
-+#define XEN_NMIREASON_unknown       (1UL << _XEN_NMIREASON_unknown)
-+
-+/*
-+ * long nmi_op(unsigned int cmd, void *arg)
-+ * NB. All ops return zero on success, else a negative error code.
-+ */
-+
-+/*
-+ * Register NMI callback for this (calling) VCPU. Currently this only makes
-+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
-+ * arg == address of callback function.
-+ */
-+#define XENNMI_register_callback   0
-+
-+/*
-+ * Deregister NMI callback for this (calling) VCPU.
-+ * arg == NULL.
-+ */
-+#define XENNMI_unregister_callback 1
-+
-+#endif /* __XEN_PUBLIC_NMI_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/physdev.h linux-2.6.12-xen/include/asm-xen/xen-public/physdev.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/physdev.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/physdev.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,70 @@
-+
-+#ifndef __XEN_PUBLIC_PHYSDEV_H__
-+#define __XEN_PUBLIC_PHYSDEV_H__
-+
-+/* Commands to HYPERVISOR_physdev_op() */
-+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY     4
-+#define PHYSDEVOP_IRQ_STATUS_QUERY      5
-+#define PHYSDEVOP_SET_IOPL              6
-+#define PHYSDEVOP_SET_IOBITMAP          7
-+#define PHYSDEVOP_APIC_READ             8
-+#define PHYSDEVOP_APIC_WRITE            9
-+#define PHYSDEVOP_ASSIGN_VECTOR         10
-+
-+typedef struct physdevop_irq_status_query {
-+    /* IN */
-+    uint32_t irq;
-+    /* OUT */
-+/* Need to call PHYSDEVOP_IRQ_UNMASK_NOTIFY when the IRQ has been serviced? */
-+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY (1<<0)
-+    uint32_t flags;
-+} physdevop_irq_status_query_t;
-+
-+typedef struct physdevop_set_iopl {
-+    /* IN */
-+    uint32_t iopl;
-+} physdevop_set_iopl_t;
-+
-+typedef struct physdevop_set_iobitmap {
-+    /* IN */
-+    uint8_t *bitmap;
-+    uint32_t nr_ports;
-+} physdevop_set_iobitmap_t;
-+
-+typedef struct physdevop_apic {
-+    /* IN */
-+    uint32_t apic;
-+    uint32_t offset;
-+    /* IN or OUT */
-+    uint32_t value;
-+} physdevop_apic_t; 
-+
-+typedef struct physdevop_irq {
-+    /* IN */
-+    uint32_t irq;
-+    /* OUT */
-+    uint32_t vector;
-+} physdevop_irq_t; 
-+
-+typedef struct physdev_op {
-+    uint32_t cmd;
-+    union {
-+        physdevop_irq_status_query_t      irq_status_query;
-+        physdevop_set_iopl_t              set_iopl;
-+        physdevop_set_iobitmap_t          set_iobitmap;
-+        physdevop_apic_t                  apic_op;
-+        physdevop_irq_t                   irq_op;
-+    } u;
-+} physdev_op_t;
-+
-+#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/sched_ctl.h linux-2.6.12-xen/include/asm-xen/xen-public/sched_ctl.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/sched_ctl.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/sched_ctl.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,68 @@
-+/******************************************************************************
-+ * Generic scheduler control interface.
-+ *
-+ * Mark Williamson, (C) 2004 Intel Research Cambridge
-+ */
-+
-+#ifndef __XEN_PUBLIC_SCHED_CTL_H__
-+#define __XEN_PUBLIC_SCHED_CTL_H__
-+
-+/* Scheduler types. */
-+#define SCHED_BVT      0
-+#define SCHED_SEDF     4
-+
-+/* Set or get info? */
-+#define SCHED_INFO_PUT 0
-+#define SCHED_INFO_GET 1
-+
-+/*
-+ * Generic scheduler control command - used to adjust system-wide scheduler
-+ * parameters
-+ */
-+struct sched_ctl_cmd {
-+    uint32_t sched_id;
-+    uint32_t direction;
-+    union {
-+        struct bvt_ctl {
-+            uint32_t ctx_allow;
-+        } bvt;
-+    } u;
-+};
-+
-+struct sched_adjdom_cmd {
-+    uint32_t sched_id;
-+    uint32_t direction;
-+    domid_t  domain;
-+    union {
-+        struct bvt_adjdom
-+        {
-+            uint32_t mcu_adv;      /* mcu advance: inverse of weight */
-+            uint32_t warpback;     /* warp? */
-+            int32_t  warpvalue;    /* warp value */
-+            int64_t  warpl;        /* warp limit */
-+            int64_t  warpu;        /* unwarp time requirement */
-+        } bvt;
-+        
-+        struct sedf_adjdom
-+        {
-+            uint64_t period;
-+            uint64_t slice;
-+            uint64_t latency;
-+            uint32_t extratime;
-+            uint32_t weight;
-+        } sedf;
-+
-+    } u;
-+};
-+
-+#endif /* __XEN_PUBLIC_SCHED_CTL_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/sched.h linux-2.6.12-xen/include/asm-xen/xen-public/sched.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/sched.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/sched.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,60 @@
-+/******************************************************************************
-+ * sched.h
-+ * 
-+ * Scheduler state interactions
-+ * 
-+ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_SCHED_H__
-+#define __XEN_PUBLIC_SCHED_H__
-+
-+/*
-+ * Prototype for this hypercall is:
-+ *  int sched_op(int cmd, unsigned long arg)
-+ * @cmd == SCHEDOP_??? (scheduler operation).
-+ * @arg == Operation-specific extra argument(s).
-+ */
-+
-+/*
-+ * Voluntarily yield the CPU.
-+ * @arg == 0.
-+ */
-+#define SCHEDOP_yield       0
-+
-+/*
-+ * Block execution of this VCPU until an event is received for processing.
-+ * If called with event upcalls masked, this operation will atomically
-+ * reenable event delivery and check for pending events before blocking the
-+ * VCPU. This avoids a "wakeup waiting" race.
-+ * @arg == 0.
-+ */
-+#define SCHEDOP_block       1
-+
-+/*
-+ * Halt execution of this domain (all VCPUs) and notify the system controller.
-+ * @arg == SHUTDOWN_??? (reason for shutdown).
-+ */
-+#define SCHEDOP_shutdown    2
-+
-+/*
-+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by controller
-+ * software to determine the appropriate action. For the most part, Xen does
-+ * not care about the shutdown code.
-+ */
-+#define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
-+#define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
-+#define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
-+#define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
-+
-+#endif /* __XEN_PUBLIC_SCHED_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/trace.h linux-2.6.12-xen/include/asm-xen/xen-public/trace.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/trace.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/trace.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,90 @@
-+/******************************************************************************
-+ * include/public/trace.h
-+ * 
-+ * Mark Williamson, (C) 2004 Intel Research Cambridge
-+ * Copyright (C) 2005 Bin Ren
-+ */
-+
-+#ifndef __XEN_PUBLIC_TRACE_H__
-+#define __XEN_PUBLIC_TRACE_H__
-+
-+/* Trace classes */
-+#define TRC_CLS_SHIFT 16
-+#define TRC_GEN     0x0001f000    /* General trace            */
-+#define TRC_SCHED   0x0002f000    /* Xen Scheduler trace      */
-+#define TRC_DOM0OP  0x0004f000    /* Xen DOM0 operation trace */
-+#define TRC_VMX     0x0008f000    /* Xen VMX trace            */
-+#define TRC_MEM     0x000af000    /* Xen memory trace         */
-+#define TRC_ALL     0xfffff000
-+
-+/* Trace subclasses */
-+#define TRC_SUBCLS_SHIFT 12
-+/* trace subclasses for VMX */
-+#define TRC_VMXEXIT  0x00081000   /* VMX exit trace            */
-+#define TRC_VMXTIMER 0x00082000   /* VMX timer trace           */
-+#define TRC_VMXINT   0x00084000   /* VMX interrupt trace       */
-+#define TRC_VMXIO    0x00088000   /* VMX io emulation trace  */
-+#define TRC_VMEXIT_HANDLER    0x00090000   /* VMX handler trace  */
-+
-+/* Trace events per class */
-+
-+#define TRC_SCHED_DOM_ADD       (TRC_SCHED +  1)
-+#define TRC_SCHED_DOM_REM       (TRC_SCHED +  2)
-+#define TRC_SCHED_SLEEP         (TRC_SCHED +  3)
-+#define TRC_SCHED_WAKE          (TRC_SCHED +  4)
-+#define TRC_SCHED_YIELD         (TRC_SCHED +  5)
-+#define TRC_SCHED_BLOCK         (TRC_SCHED +  6)
-+#define TRC_SCHED_SHUTDOWN      (TRC_SCHED +  7)
-+#define TRC_SCHED_CTL           (TRC_SCHED +  8)
-+#define TRC_SCHED_ADJDOM        (TRC_SCHED +  9)
-+#define TRC_SCHED_SWITCH        (TRC_SCHED + 10)
-+#define TRC_SCHED_S_TIMER_FN    (TRC_SCHED + 11)
-+#define TRC_SCHED_T_TIMER_FN    (TRC_SCHED + 12)
-+#define TRC_SCHED_DOM_TIMER_FN  (TRC_SCHED + 13)
-+#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED + 14)
-+#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED + 15)
-+
-+#define TRC_MEM_PAGE_GRANT_MAP      (TRC_MEM + 1)
-+#define TRC_MEM_PAGE_GRANT_UNMAP    (TRC_MEM + 2)
-+#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
-+
-+/* trace events per subclass */
-+#define TRC_VMX_VMEXIT          (TRC_VMXEXIT + 1)
-+#define TRC_VMX_VECTOR          (TRC_VMXEXIT + 2)
-+
-+#define TRC_VMX_TIMER_INTR      (TRC_VMXTIMER + 1)
-+
-+#define TRC_VMX_INT             (TRC_VMXINT + 1)
-+
-+#define TRC_VMEXIT              (TRC_VMEXIT_HANDLER + 1)
-+#define TRC_VMENTRY             (TRC_VMEXIT_HANDLER + 2)
-+
-+
-+/* This structure represents a single trace buffer record. */
-+struct t_rec {
-+    uint64_t cycles;          /* cycle counter timestamp */
-+    uint32_t event;           /* event ID                */
-+    unsigned long data[5];    /* event data items        */
-+};
-+
-+/*
-+ * This structure contains the metadata for a single trace buffer.  The head
-+ * field, indexes into an array of struct t_rec's.
-+ */
-+struct t_buf {
-+    uint32_t cons;      /* Next item to be consumed by control tools. */
-+    uint32_t prod;      /* Next item to be produced by Xen.           */
-+    /* 'nr_recs' records follow immediately after the meta-data header.    */
-+};
-+
-+#endif /* __XEN_PUBLIC_TRACE_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/vcpu.h linux-2.6.12-xen/include/asm-xen/xen-public/vcpu.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/vcpu.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/vcpu.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,64 @@
-+/******************************************************************************
-+ * vcpu.h
-+ * 
-+ * VCPU initialisation, query, and hotplug.
-+ * 
-+ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_VCPU_H__
-+#define __XEN_PUBLIC_VCPU_H__
-+
-+/*
-+ * Prototype for this hypercall is:
-+ *  int vcpu_op(int cmd, int vcpuid, void *extra_args)
-+ * @cmd        == VCPUOP_??? (VCPU operation).
-+ * @vcpuid     == VCPU to operate on.
-+ * @extra_args == Operation-specific extra arguments (NULL if none).
-+ */
-+
-+/*
-+ * Initialise a VCPU. Each VCPU can be initialised only once. A 
-+ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
-+ * 
-+ * @extra_arg == pointer to vcpu_guest_context structure containing initial
-+ *               state for the VCPU.
-+ */
-+#define VCPUOP_initialise           0
-+
-+/*
-+ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
-+ * if the VCPU has not been initialised (VCPUOP_initialise).
-+ */
-+#define VCPUOP_up                   1
-+
-+/*
-+ * Bring down a VCPU (i.e., make it non-runnable).
-+ * There are a few caveats that callers should observe:
-+ *  1. This operation may return, and VCPU_is_up may return false, before the
-+ *     VCPU stops running (i.e., the command is asynchronous). It is a good
-+ *     idea to ensure that the VCPU has entered a non-critical loop before
-+ *     bringing it down. Alternatively, this operation is guaranteed
-+ *     synchronous if invoked by the VCPU itself.
-+ *  2. After a VCPU is initialised, there is currently no way to drop all its
-+ *     references to domain memory. Even a VCPU that is down still holds
-+ *     memory references via its pagetable base pointer and GDT. It is good
-+ *     practise to move a VCPU onto an 'idle' or default page table, LDT and
-+ *     GDT before bringing it down.
-+ */
-+#define VCPUOP_down                 2
-+
-+/* Returns 1 if the given VCPU is up. */
-+#define VCPUOP_is_up                3
-+
-+#endif /* __XEN_PUBLIC_VCPU_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/version.h linux-2.6.12-xen/include/asm-xen/xen-public/version.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/version.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/version.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,63 @@
-+/******************************************************************************
-+ * version.h
-+ * 
-+ * Xen version, type, and compile information.
-+ * 
-+ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh at gmail.com>
-+ * Copyright (c) 2005, Keir Fraser <keir at xensource.com>
-+ */
-+
-+#ifndef __XEN_PUBLIC_VERSION_H__
-+#define __XEN_PUBLIC_VERSION_H__
-+
-+/* NB. All ops return zero on success, except XENVER_version. */
-+
-+/* arg == NULL; returns major:minor (16:16). */
-+#define XENVER_version      0
-+
-+/* arg == xen_extraversion_t. */
-+#define XENVER_extraversion 1
-+typedef char xen_extraversion_t[16];
-+
-+/* arg == xen_compile_info_t. */
-+#define XENVER_compile_info 2
-+typedef struct xen_compile_info {
-+    char compiler[64];
-+    char compile_by[16];
-+    char compile_domain[32];
-+    char compile_date[32];
-+} xen_compile_info_t;
-+
-+#define XENVER_capabilities 3
-+typedef char xen_capabilities_info_t[1024];
-+
-+#define XENVER_changeset 4
-+typedef char xen_changeset_info_t[64];
-+
-+#define XENVER_platform_parameters 5
-+typedef struct xen_platform_parameters {
-+    unsigned long virt_start;
-+} xen_platform_parameters_t;
-+
-+#define XENVER_get_features 6
-+typedef struct xen_feature_info {
-+    unsigned int submap_idx;    /* IN: which 32-bit submap to return */
-+    uint32_t     submap;        /* OUT: 32-bit submap */
-+} xen_feature_info_t;
-+
-+#define XENFEAT_writable_page_tables       0
-+#define XENFEAT_writable_descriptor_tables 1
-+
-+#define XENFEAT_NR_SUBMAPS 1
-+
-+#endif /* __XEN_PUBLIC_VERSION_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/asm-xen/xen-public/xen.h linux-2.6.12-xen/include/asm-xen/xen-public/xen.h
---- pristine-linux-2.6.12/include/asm-xen/xen-public/xen.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/asm-xen/xen-public/xen.h	2006-02-16 23:44:10.000000000 +0100
-@@ -0,0 +1,447 @@
-+/******************************************************************************
-+ * xen.h
-+ * 
-+ * Guest OS interface to Xen.
-+ * 
-+ * Copyright (c) 2004, K A Fraser
-+ */
-+
-+#ifndef __XEN_PUBLIC_XEN_H__
-+#define __XEN_PUBLIC_XEN_H__
-+
-+#if defined(__i386__)
-+#include "arch-x86_32.h"
-+#elif defined(__x86_64__)
-+#include "arch-x86_64.h"
-+#elif defined(__ia64__)
-+#include "arch-ia64.h"
-+#else
-+#error "Unsupported architecture"
-+#endif
-+
-+/*
-+ * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
-+ */
-+
-+/*
-+ * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
-+ *         EAX = return value
-+ *         (argument registers may be clobbered on return)
-+ * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6. 
-+ *         RAX = return value
-+ *         (argument registers not clobbered on return; RCX, R11 are)
-+ */
-+#define __HYPERVISOR_set_trap_table        0
-+#define __HYPERVISOR_mmu_update            1
-+#define __HYPERVISOR_set_gdt               2
-+#define __HYPERVISOR_stack_switch          3
-+#define __HYPERVISOR_set_callbacks         4
-+#define __HYPERVISOR_fpu_taskswitch        5
-+#define __HYPERVISOR_sched_op              6
-+#define __HYPERVISOR_dom0_op               7
-+#define __HYPERVISOR_set_debugreg          8
-+#define __HYPERVISOR_get_debugreg          9
-+#define __HYPERVISOR_update_descriptor    10
-+#define __HYPERVISOR_memory_op            12
-+#define __HYPERVISOR_multicall            13
-+#define __HYPERVISOR_update_va_mapping    14
-+#define __HYPERVISOR_set_timer_op         15
-+#define __HYPERVISOR_event_channel_op     16
-+#define __HYPERVISOR_xen_version          17
-+#define __HYPERVISOR_console_io           18
-+#define __HYPERVISOR_physdev_op           19
-+#define __HYPERVISOR_grant_table_op       20
-+#define __HYPERVISOR_vm_assist            21
-+#define __HYPERVISOR_update_va_mapping_otherdomain 22
-+#define __HYPERVISOR_iret                 23 /* x86 only */
-+#define __HYPERVISOR_switch_vm86          23 /* x86/32 only (obsolete name) */
-+#define __HYPERVISOR_switch_to_user       23 /* x86/64 only (obsolete name) */
-+#define __HYPERVISOR_vcpu_op              24
-+#define __HYPERVISOR_set_segment_base     25 /* x86/64 only */
-+#define __HYPERVISOR_mmuext_op            26
-+#define __HYPERVISOR_acm_op               27
-+#define __HYPERVISOR_nmi_op               28
-+
-+/* 
-+ * VIRTUAL INTERRUPTS
-+ * 
-+ * Virtual interrupts that a guest OS may receive from Xen.
-+ */
-+#define VIRQ_TIMER      0  /* Timebase update, and/or requested timeout.  */
-+#define VIRQ_DEBUG      1  /* Request guest to dump debug info.           */
-+#define VIRQ_CONSOLE    2  /* (DOM0) Bytes received on emergency console. */
-+#define VIRQ_DOM_EXC    3  /* (DOM0) Exceptional event for some domain.   */
-+#define VIRQ_DEBUGGER   6  /* (DOM0) A domain has paused for debugging.   */
-+#define NR_VIRQS        8
-+
-+/*
-+ * MMU-UPDATE REQUESTS
-+ * 
-+ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
-+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
-+ * Where the FD has some effect, it is described below.
-+ * ptr[1:0] specifies the appropriate MMU_* command.
-+ * 
-+ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
-+ * Updates an entry in a page table. If updating an L1 table, and the new
-+ * table entry is valid/present, the mapped frame must belong to the FD, if
-+ * an FD has been specified. If attempting to map an I/O page then the
-+ * caller assumes the privilege of the FD.
-+ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
-+ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
-+ * ptr[:2]  -- Machine address of the page-table entry to modify.
-+ * val      -- Value to write.
-+ * 
-+ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
-+ * Updates an entry in the machine->pseudo-physical mapping table.
-+ * ptr[:2]  -- Machine address within the frame whose mapping to modify.
-+ *             The frame must belong to the FD, if one is specified.
-+ * val      -- Value to write into the mapping entry.
-+ */
-+#define MMU_NORMAL_PT_UPDATE     0 /* checked '*ptr = val'. ptr is MA.       */
-+#define MMU_MACHPHYS_UPDATE      1 /* ptr = MA of frame to modify entry for  */
-+
-+/*
-+ * MMU EXTENDED OPERATIONS
-+ * 
-+ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
-+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
-+ * Where the FD has some effect, it is described below.
-+ * 
-+ * cmd: MMUEXT_(UN)PIN_*_TABLE
-+ * mfn: Machine frame number to be (un)pinned as a p.t. page.
-+ *      The frame must belong to the FD, if one is specified.
-+ * 
-+ * cmd: MMUEXT_NEW_BASEPTR
-+ * mfn: Machine frame number of new page-table base to install in MMU.
-+ * 
-+ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
-+ * mfn: Machine frame number of new page-table base to install in MMU
-+ *      when in user space.
-+ * 
-+ * cmd: MMUEXT_TLB_FLUSH_LOCAL
-+ * No additional arguments. Flushes local TLB.
-+ * 
-+ * cmd: MMUEXT_INVLPG_LOCAL
-+ * linear_addr: Linear address to be flushed from the local TLB.
-+ * 
-+ * cmd: MMUEXT_TLB_FLUSH_MULTI
-+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
-+ * 
-+ * cmd: MMUEXT_INVLPG_MULTI
-+ * linear_addr: Linear address to be flushed.
-+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
-+ * 
-+ * cmd: MMUEXT_TLB_FLUSH_ALL
-+ * No additional arguments. Flushes all VCPUs' TLBs.
-+ * 
-+ * cmd: MMUEXT_INVLPG_ALL
-+ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
-+ * 
-+ * cmd: MMUEXT_FLUSH_CACHE
-+ * No additional arguments. Writes back and flushes cache contents.
-+ * 
-+ * cmd: MMUEXT_SET_LDT
-+ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
-+ * nr_ents: Number of entries in LDT.
-+ */
-+#define MMUEXT_PIN_L1_TABLE      0
-+#define MMUEXT_PIN_L2_TABLE      1
-+#define MMUEXT_PIN_L3_TABLE      2
-+#define MMUEXT_PIN_L4_TABLE      3
-+#define MMUEXT_UNPIN_TABLE       4
-+#define MMUEXT_NEW_BASEPTR       5
-+#define MMUEXT_TLB_FLUSH_LOCAL   6
-+#define MMUEXT_INVLPG_LOCAL      7
-+#define MMUEXT_TLB_FLUSH_MULTI   8
-+#define MMUEXT_INVLPG_MULTI      9
-+#define MMUEXT_TLB_FLUSH_ALL    10
-+#define MMUEXT_INVLPG_ALL       11
-+#define MMUEXT_FLUSH_CACHE      12
-+#define MMUEXT_SET_LDT          13
-+#define MMUEXT_NEW_USER_BASEPTR 15
-+
-+#ifndef __ASSEMBLY__
-+struct mmuext_op {
-+    unsigned int cmd;
-+    union {
-+        /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
-+        unsigned long mfn;
-+        /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
-+        unsigned long linear_addr;
-+    } arg1;
-+    union {
-+        /* SET_LDT */
-+        unsigned int nr_ents;
-+        /* TLB_FLUSH_MULTI, INVLPG_MULTI */
-+        void *vcpumask;
-+    } arg2;
-+};
-+#endif
-+
-+/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
-+/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap.   */
-+/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer.         */
-+#define UVMF_NONE               (0UL<<0) /* No flushing at all.   */
-+#define UVMF_TLB_FLUSH          (1UL<<0) /* Flush entire TLB(s).  */
-+#define UVMF_INVLPG             (2UL<<0) /* Flush only one entry. */
-+#define UVMF_FLUSHTYPE_MASK     (3UL<<0)
-+#define UVMF_MULTI              (0UL<<2) /* Flush subset of TLBs. */
-+#define UVMF_LOCAL              (0UL<<2) /* Flush local TLB.      */
-+#define UVMF_ALL                (1UL<<2) /* Flush all TLBs.       */
-+
-+/*
-+ * Commands to HYPERVISOR_console_io().
-+ */
-+#define CONSOLEIO_write         0
-+#define CONSOLEIO_read          1
-+
-+/*
-+ * Commands to HYPERVISOR_vm_assist().
-+ */
-+#define VMASST_CMD_enable                0
-+#define VMASST_CMD_disable               1
-+#define VMASST_TYPE_4gb_segments         0
-+#define VMASST_TYPE_4gb_segments_notify  1
-+#define VMASST_TYPE_writable_pagetables  2
-+#define MAX_VMASST_TYPE 2
-+
-+#ifndef __ASSEMBLY__
-+
-+typedef uint16_t domid_t;
-+
-+/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
-+#define DOMID_FIRST_RESERVED (0x7FF0U)
-+
-+/* DOMID_SELF is used in certain contexts to refer to oneself. */
-+#define DOMID_SELF (0x7FF0U)
-+
-+/*
-+ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
-+ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
-+ * is useful to ensure that no mappings to the OS's own heap are accidentally
-+ * installed. (e.g., in Linux this could cause havoc as reference counts
-+ * aren't adjusted on the I/O-mapping code path).
-+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
-+ * be specified by any calling domain.
-+ */
-+#define DOMID_IO   (0x7FF1U)
-+
-+/*
-+ * DOMID_XEN is used to allow privileged domains to map restricted parts of
-+ * Xen's heap space (e.g., the machine_to_phys table).
-+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
-+ * the caller is privileged.
-+ */
-+#define DOMID_XEN  (0x7FF2U)
-+
-+/*
-+ * Send an array of these to HYPERVISOR_mmu_update().
-+ * NB. The fields are natural pointer/address size for this architecture.
-+ */
-+typedef struct mmu_update {
-+    uint64_t ptr;       /* Machine address of PTE. */
-+    uint64_t val;       /* New contents of PTE.    */
-+} mmu_update_t;
-+
-+/*
-+ * Send an array of these to HYPERVISOR_multicall().
-+ * NB. The fields are natural register size for this architecture.
-+ */
-+typedef struct multicall_entry {
-+    unsigned long op, result;
-+    unsigned long args[6];
-+} multicall_entry_t;
-+
-+/*
-+ * Event channel endpoints per domain:
-+ *  1024 if a long is 32 bits; 4096 if a long is 64 bits.
-+ */
-+#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
-+
-+typedef struct vcpu_time_info {
-+    /*
-+     * Updates to the following values are preceded and followed by an
-+     * increment of 'version'. The guest can therefore detect updates by
-+     * looking for changes to 'version'. If the least-significant bit of
-+     * the version number is set then an update is in progress and the guest
-+     * must wait to read a consistent set of values.
-+     * The correct way to interact with the version number is similar to
-+     * Linux's seqlock: see the implementations of read_seqbegin/read_seqretry.
-+     */
-+    uint32_t version;
-+    uint32_t pad0;
-+    uint64_t tsc_timestamp;   /* TSC at last update of time vals.  */
-+    uint64_t system_time;     /* Time, in nanosecs, since boot.    */
-+    /*
-+     * Current system time:
-+     *   system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
-+     * CPU frequency (Hz):
-+     *   ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
-+     */
-+    uint32_t tsc_to_system_mul;
-+    int8_t   tsc_shift;
-+    int8_t   pad1[3];
-+} vcpu_time_info_t; /* 32 bytes */
-+
-+typedef struct vcpu_info {
-+    /*
-+     * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
-+     * a pending notification for a particular VCPU. It is then cleared 
-+     * by the guest OS /before/ checking for pending work, thus avoiding
-+     * a set-and-check race. Note that the mask is only accessed by Xen
-+     * on the CPU that is currently hosting the VCPU. This means that the
-+     * pending and mask flags can be updated by the guest without special
-+     * synchronisation (i.e., no need for the x86 LOCK prefix).
-+     * This may seem suboptimal because if the pending flag is set by
-+     * a different CPU then an IPI may be scheduled even when the mask
-+     * is set. However, note:
-+     *  1. The task of 'interrupt holdoff' is covered by the per-event-
-+     *     channel mask bits. A 'noisy' event that is continually being
-+     *     triggered can be masked at source at this very precise
-+     *     granularity.
-+     *  2. The main purpose of the per-VCPU mask is therefore to restrict
-+     *     reentrant execution: whether for concurrency control, or to
-+     *     prevent unbounded stack usage. Whatever the purpose, we expect
-+     *     that the mask will be asserted only for short periods at a time,
-+     *     and so the likelihood of a 'spurious' IPI is suitably small.
-+     * The mask is read before making an event upcall to the guest: a
-+     * non-zero mask therefore guarantees that the VCPU will not receive
-+     * an upcall activation. The mask is cleared when the VCPU requests
-+     * to block: this avoids wakeup-waiting races.
-+     */
-+    uint8_t evtchn_upcall_pending;
-+    uint8_t evtchn_upcall_mask;
-+    unsigned long evtchn_pending_sel;
-+    arch_vcpu_info_t arch;
-+    vcpu_time_info_t time;
-+} vcpu_info_t; /* 64 bytes (x86) */
-+
-+/*
-+ * Xen/kernel shared data -- pointer provided in start_info.
-+ * NB. We expect that this struct is smaller than a page.
-+ */
-+typedef struct shared_info {
-+    vcpu_info_t vcpu_info[MAX_VIRT_CPUS];
-+
-+    /*
-+     * A domain can create "event channels" on which it can send and receive
-+     * asynchronous event notifications. There are three classes of event that
-+     * are delivered by this mechanism:
-+     *  1. Bi-directional inter- and intra-domain connections. Domains must
-+     *     arrange out-of-band to set up a connection (usually by allocating
-+     *     an unbound 'listener' port and avertising that via a storage service
-+     *     such as xenstore).
-+     *  2. Physical interrupts. A domain with suitable hardware-access
-+     *     privileges can bind an event-channel port to a physical interrupt
-+     *     source.
-+     *  3. Virtual interrupts ('events'). A domain can bind an event-channel
-+     *     port to a virtual interrupt source, such as the virtual-timer
-+     *     device or the emergency console.
-+     * 
-+     * Event channels are addressed by a "port index". Each channel is
-+     * associated with two bits of information:
-+     *  1. PENDING -- notifies the domain that there is a pending notification
-+     *     to be processed. This bit is cleared by the guest.
-+     *  2. MASK -- if this bit is clear then a 0->1 transition of PENDING
-+     *     will cause an asynchronous upcall to be scheduled. This bit is only
-+     *     updated by the guest. It is read-only within Xen. If a channel
-+     *     becomes pending while the channel is masked then the 'edge' is lost
-+     *     (i.e., when the channel is unmasked, the guest must manually handle
-+     *     pending notifications as no upcall will be scheduled by Xen).
-+     * 
-+     * To expedite scanning of pending notifications, any 0->1 pending
-+     * transition on an unmasked channel causes a corresponding bit in a
-+     * per-vcpu selector word to be set. Each bit in the selector covers a
-+     * 'C long' in the PENDING bitfield array.
-+     */
-+    unsigned long evtchn_pending[sizeof(unsigned long) * 8];
-+    unsigned long evtchn_mask[sizeof(unsigned long) * 8];
-+
-+    /*
-+     * Wallclock time: updated only by control software. Guests should base
-+     * their gettimeofday() syscall on this wallclock-base value.
-+     */
-+    uint32_t wc_version;      /* Version counter: see vcpu_time_info_t. */
-+    uint32_t wc_sec;          /* Secs  00:00:00 UTC, Jan 1, 1970.  */
-+    uint32_t wc_nsec;         /* Nsecs 00:00:00 UTC, Jan 1, 1970.  */
-+
-+    arch_shared_info_t arch;
-+
-+} shared_info_t;
-+
-+/*
-+ * Start-of-day memory layout for the initial domain (DOM0):
-+ *  1. The domain is started within contiguous virtual-memory region.
-+ *  2. The contiguous region begins and ends on an aligned 4MB boundary.
-+ *  3. The region start corresponds to the load address of the OS image.
-+ *     If the load address is not 4MB aligned then the address is rounded down.
-+ *  4. This the order of bootstrap elements in the initial virtual region:
-+ *      a. relocated kernel image
-+ *      b. initial ram disk              [mod_start, mod_len]
-+ *      c. list of allocated page frames [mfn_list, nr_pages]
-+ *      d. bootstrap page tables         [pt_base, CR3 (x86)]
-+ *      e. start_info_t structure        [register ESI (x86)]
-+ *      f. bootstrap stack               [register ESP (x86)]
-+ *  5. Bootstrap elements are packed together, but each is 4kB-aligned.
-+ *  6. The initial ram disk may be omitted.
-+ *  7. The list of page frames forms a contiguous 'pseudo-physical' memory
-+ *     layout for the domain. In particular, the bootstrap virtual-memory
-+ *     region is a 1:1 mapping to the first section of the pseudo-physical map.
-+ *  8. All bootstrap elements are mapped read-writable for the guest OS. The
-+ *     only exception is the bootstrap page table, which is mapped read-only.
-+ *  9. There is guaranteed to be at least 512kB padding after the final
-+ *     bootstrap element. If necessary, the bootstrap virtual region is
-+ *     extended by an extra 4MB to ensure this.
-+ */
-+
-+#define MAX_GUEST_CMDLINE 1024
-+typedef struct start_info {
-+    /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME.    */
-+    char magic[32];             /* "xen-<version>-<platform>".            */
-+    unsigned long nr_pages;     /* Total pages allocated to this domain.  */
-+    unsigned long shared_info;  /* MACHINE address of shared info struct. */
-+    uint32_t flags;             /* SIF_xxx flags.                         */
-+    unsigned long store_mfn;    /* MACHINE page number of shared page.    */
-+    uint32_t store_evtchn;      /* Event channel for store communication. */
-+    unsigned long console_mfn;  /* MACHINE address of console page.       */
-+    uint32_t console_evtchn;    /* Event channel for console messages.    */
-+    /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME).     */
-+    unsigned long pt_base;      /* VIRTUAL address of page directory.     */
-+    unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames.       */
-+    unsigned long mfn_list;     /* VIRTUAL address of page-frame list.    */
-+    unsigned long mod_start;    /* VIRTUAL address of pre-loaded module.  */
-+    unsigned long mod_len;      /* Size (bytes) of pre-loaded module.     */
-+    int8_t cmd_line[MAX_GUEST_CMDLINE];
-+} start_info_t;
-+
-+/* These flags are passed in the 'flags' field of start_info_t. */
-+#define SIF_PRIVILEGED    (1<<0)  /* Is the domain privileged? */
-+#define SIF_INITDOMAIN    (1<<1)  /* Is this the initial control domain? */
-+
-+typedef uint64_t cpumap_t;
-+
-+typedef uint8_t xen_domain_handle_t[16];
-+
-+/* Turn a plain number into a C unsigned long constant. */
-+#define __mk_unsigned_long(x) x ## UL
-+#define mk_unsigned_long(x) __mk_unsigned_long(x)
-+
-+#else /* __ASSEMBLY__ */
-+
-+/* In assembly code we cannot use C numeric constant suffixes. */
-+#define mk_unsigned_long(x) x
-+
-+#endif /* !__ASSEMBLY__ */
-+
-+#endif /* __XEN_PUBLIC_XEN_H__ */
-+
-+/*
-+ * Local variables:
-+ * mode: C
-+ * c-set-style: "BSD"
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * indent-tabs-mode: nil
-+ * End:
-+ */
-diff -Nurp pristine-linux-2.6.12/include/linux/autoconf.h linux-2.6.12-xen/include/linux/autoconf.h
---- pristine-linux-2.6.12/include/linux/autoconf.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/include/linux/autoconf.h	2006-02-25 00:12:57.401432383 +0100
-@@ -0,0 +1,2967 @@
-+/*
-+ * Automatically generated C config: don't edit
-+ * Linux kernel version: 2.6.12.6-xen
-+ * Sat Feb 25 00:12:55 2006
-+ */
-+#define AUTOCONF_INCLUDED
-+#define CONFIG_XEN 1
-+#define CONFIG_ARCH_XEN 1
-+#define CONFIG_NO_IDLE_HZ 1
-+
-+/*
-+ * XEN
-+ */
-+#define CONFIG_XEN_PRIVILEGED_GUEST 1
-+#define CONFIG_XEN_PHYSDEV_ACCESS 1
-+#define CONFIG_XEN_BLKDEV_BACKEND 1
-+#undef CONFIG_XEN_BLKDEV_TAP_BE
-+#define CONFIG_XEN_NETDEV_BACKEND 1
-+#undef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
-+#undef CONFIG_XEN_TPMDEV_FRONTEND
-+#undef CONFIG_XEN_TPMDEV_BACKEND
-+#define CONFIG_XEN_BLKDEV_FRONTEND 1
-+#define CONFIG_XEN_NETDEV_FRONTEND 1
-+#undef CONFIG_XEN_BLKDEV_TAP
-+#undef CONFIG_XEN_SHADOW_MODE
-+#define CONFIG_XEN_SCRUB_PAGES 1
-+#define CONFIG_XEN_X86 1
-+#undef CONFIG_XEN_X86_64
-+#define CONFIG_HAVE_ARCH_ALLOC_SKB 1
-+#define CONFIG_HAVE_ARCH_DEV_ALLOC_SKB 1
-+
-+/*
-+ * Code maturity level options
-+ */
-+#define CONFIG_EXPERIMENTAL 1
-+#undef CONFIG_CLEAN_COMPILE
-+#define CONFIG_BROKEN 1
-+#define CONFIG_BROKEN_ON_SMP 1
-+#define CONFIG_LOCK_KERNEL 1
-+#define CONFIG_INIT_ENV_ARG_LIMIT 32
-+
-+/*
-+ * General setup
-+ */
-+#define CONFIG_LOCALVERSION ""
-+#define CONFIG_SWAP 1
-+#define CONFIG_SYSVIPC 1
-+#define CONFIG_POSIX_MQUEUE 1
-+#define CONFIG_BSD_PROCESS_ACCT 1
-+#undef CONFIG_BSD_PROCESS_ACCT_V3
-+#define CONFIG_SYSCTL 1
-+#undef CONFIG_AUDIT
-+#define CONFIG_HOTPLUG 1
-+#define CONFIG_KOBJECT_UEVENT 1
-+#define CONFIG_IKCONFIG 1
-+#define CONFIG_IKCONFIG_PROC 1
-+#undef CONFIG_CPUSETS
-+#define CONFIG_EMBEDDED 1
-+#define CONFIG_KALLSYMS 1
-+#undef CONFIG_KALLSYMS_ALL
-+#undef CONFIG_KALLSYMS_EXTRA_PASS
-+#define CONFIG_PRINTK 1
-+#define CONFIG_BUG 1
-+#define CONFIG_BASE_FULL 1
-+#define CONFIG_FUTEX 1
-+#define CONFIG_EPOLL 1
-+#undef CONFIG_CC_OPTIMIZE_FOR_SIZE
-+#define CONFIG_SHMEM 1
-+#define CONFIG_CC_ALIGN_FUNCTIONS 0
-+#define CONFIG_CC_ALIGN_LABELS 0
-+#define CONFIG_CC_ALIGN_LOOPS 0
-+#define CONFIG_CC_ALIGN_JUMPS 0
-+#undef CONFIG_TINY_SHMEM
-+#define CONFIG_BASE_SMALL 0
-+
-+/*
-+ * Loadable module support
-+ */
-+#define CONFIG_MODULES 1
-+#define CONFIG_MODULE_UNLOAD 1
-+#define CONFIG_MODULE_FORCE_UNLOAD 1
-+#define CONFIG_OBSOLETE_MODPARM 1
-+#define CONFIG_MODVERSIONS 1
-+#undef CONFIG_MODULE_SRCVERSION_ALL
-+#define CONFIG_KMOD 1
-+#define CONFIG_STOP_MACHINE 1
-+
-+/*
-+ * X86 Processor Configuration
-+ */
-+#define CONFIG_XENARCH "i386"
-+#define CONFIG_X86 1
-+#define CONFIG_MMU 1
-+#define CONFIG_UID16 1
-+#define CONFIG_GENERIC_ISA_DMA 1
-+#define CONFIG_GENERIC_IOMAP 1
-+#undef CONFIG_M386
-+#undef CONFIG_M486
-+#undef CONFIG_M586
-+#undef CONFIG_M586TSC
-+#undef CONFIG_M586MMX
-+#define CONFIG_M686 1
-+#undef CONFIG_MPENTIUMII
-+#undef CONFIG_MPENTIUMIII
-+#undef CONFIG_MPENTIUMM
-+#undef CONFIG_MPENTIUM4
-+#undef CONFIG_MK6
-+#undef CONFIG_MK7
-+#undef CONFIG_MK8
-+#undef CONFIG_MCRUSOE
-+#undef CONFIG_MEFFICEON
-+#undef CONFIG_MWINCHIPC6
-+#undef CONFIG_MWINCHIP2
-+#undef CONFIG_MWINCHIP3D
-+#undef CONFIG_MGEODEGX1
-+#undef CONFIG_MCYRIXIII
-+#undef CONFIG_MVIAC3_2
-+#undef CONFIG_X86_GENERIC
-+#define CONFIG_X86_CMPXCHG 1
-+#define CONFIG_X86_XADD 1
-+#define CONFIG_X86_L1_CACHE_SHIFT 5
-+#define CONFIG_RWSEM_XCHGADD_ALGORITHM 1
-+#define CONFIG_GENERIC_CALIBRATE_DELAY 1
-+#define CONFIG_X86_PPRO_FENCE 1
-+#define CONFIG_X86_WP_WORKS_OK 1
-+#define CONFIG_X86_INVLPG 1
-+#define CONFIG_X86_BSWAP 1
-+#define CONFIG_X86_POPAD_OK 1
-+#define CONFIG_X86_GOOD_APIC 1
-+#define CONFIG_X86_USE_PPRO_CHECKSUM 1
-+#undef CONFIG_HPET_TIMER
-+#undef CONFIG_HPET_EMULATE_RTC
-+#define CONFIG_SMP 1
-+#define CONFIG_SMP_ALTERNATIVES 1
-+#define CONFIG_NR_CPUS 8
-+#undef CONFIG_SCHED_SMT
-+#undef CONFIG_X86_REBOOTFIXUPS
-+#define CONFIG_MICROCODE 1
-+#define CONFIG_X86_CPUID_MODULE 1
-+#define CONFIG_SWIOTLB 1
-+
-+/*
-+ * Firmware Drivers
-+ */
-+#define CONFIG_EDD_MODULE 1
-+#undef CONFIG_NOHIGHMEM
-+#define CONFIG_HIGHMEM4G 1
-+#undef CONFIG_HIGHMEM64G
-+#define CONFIG_HIGHMEM 1
-+#define CONFIG_MTRR 1
-+#define CONFIG_HAVE_DEC_LOCK 1
-+#undef CONFIG_REGPARM
-+#define CONFIG_X86_LOCAL_APIC 1
-+#define CONFIG_X86_IO_APIC 1
-+#define CONFIG_HOTPLUG_CPU 1
-+
-+/*
-+ * Bus options (PCI, PCMCIA, EISA, MCA, ISA)
-+ */
-+#define CONFIG_PCI 1
-+#undef CONFIG_PCI_GOMMCONFIG
-+#undef CONFIG_PCI_GODIRECT
-+#define CONFIG_PCI_GOANY 1
-+#define CONFIG_PCI_DIRECT 1
-+#define CONFIG_PCI_MMCONFIG 1
-+#undef CONFIG_PCIEPORTBUS
-+#undef CONFIG_PCI_MSI
-+#undef CONFIG_PCI_LEGACY_PROC
-+#define CONFIG_PCI_NAMES 1
-+#undef CONFIG_PCI_DEBUG
-+#define CONFIG_ISA_DMA_API 1
-+#define CONFIG_ISA 1
-+#undef CONFIG_EISA
-+#undef CONFIG_MCA
-+#define CONFIG_SCx200_MODULE 1
-+
-+/*
-+ * PCCARD (PCMCIA/CardBus) support
-+ */
-+#define CONFIG_PCCARD_MODULE 1
-+#undef CONFIG_PCMCIA_DEBUG
-+#define CONFIG_PCMCIA_MODULE 1
-+#define CONFIG_CARDBUS 1
-+
-+/*
-+ * PC-card bridges
-+ */
-+#define CONFIG_YENTA_MODULE 1
-+#define CONFIG_PD6729_MODULE 1
-+#define CONFIG_I82092_MODULE 1
-+#define CONFIG_I82365_MODULE 1
-+#define CONFIG_TCIC_MODULE 1
-+#define CONFIG_PCMCIA_PROBE 1
-+#define CONFIG_PCCARD_NONSTATIC_MODULE 1
-+
-+/*
-+ * PCI Hotplug Support
-+ */
-+#define CONFIG_HOTPLUG_PCI_MODULE 1
-+#define CONFIG_HOTPLUG_PCI_FAKE_MODULE 1
-+#undef CONFIG_HOTPLUG_PCI_ACPI
-+#define CONFIG_HOTPLUG_PCI_CPCI 1
-+#define CONFIG_HOTPLUG_PCI_CPCI_ZT5550_MODULE 1
-+#define CONFIG_HOTPLUG_PCI_CPCI_GENERIC_MODULE 1
-+#define CONFIG_HOTPLUG_PCI_SHPC_MODULE 1
-+#undef CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE
-+#define CONFIG_GENERIC_HARDIRQS 1
-+#define CONFIG_GENERIC_IRQ_PROBE 1
-+#define CONFIG_X86_SMP 1
-+#define CONFIG_X86_BIOS_REBOOT 1
-+#define CONFIG_X86_TRAMPOLINE 1
-+#define CONFIG_SECCOMP 1
-+#undef CONFIG_EARLY_PRINTK
-+
-+/*
-+ * Executable file formats
-+ */
-+#define CONFIG_BINFMT_ELF 1
-+#define CONFIG_BINFMT_AOUT_MODULE 1
-+#define CONFIG_BINFMT_MISC_MODULE 1
-+
-+/*
-+ * Device Drivers
-+ */
-+
-+/*
-+ * Generic Driver Options
-+ */
-+#define CONFIG_STANDALONE 1
-+#define CONFIG_PREVENT_FIRMWARE_BUILD 1
-+#define CONFIG_FW_LOADER_MODULE 1
-+#undef CONFIG_DEBUG_DRIVER
-+
-+/*
-+ * Memory Technology Devices (MTD)
-+ */
-+#define CONFIG_MTD_MODULE 1
-+#undef CONFIG_MTD_DEBUG
-+#define CONFIG_MTD_CONCAT_MODULE 1
-+#define CONFIG_MTD_PARTITIONS 1
-+#define CONFIG_MTD_REDBOOT_PARTS_MODULE 1
-+#define CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK -1
-+#undef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
-+#undef CONFIG_MTD_REDBOOT_PARTS_READONLY
-+#undef CONFIG_MTD_CMDLINE_PARTS
-+
-+/*
-+ * User Modules And Translation Layers
-+ */
-+#define CONFIG_MTD_CHAR_MODULE 1
-+#define CONFIG_MTD_BLOCK_MODULE 1
-+#define CONFIG_MTD_BLOCK_RO_MODULE 1
-+#define CONFIG_FTL_MODULE 1
-+#define CONFIG_NFTL_MODULE 1
-+#define CONFIG_NFTL_RW 1
-+#define CONFIG_INFTL_MODULE 1
-+
-+/*
-+ * RAM/ROM/Flash chip drivers
-+ */
-+#define CONFIG_MTD_CFI_MODULE 1
-+#define CONFIG_MTD_JEDECPROBE_MODULE 1
-+#define CONFIG_MTD_GEN_PROBE_MODULE 1
-+#undef CONFIG_MTD_CFI_ADV_OPTIONS
-+#define CONFIG_MTD_MAP_BANK_WIDTH_1 1
-+#define CONFIG_MTD_MAP_BANK_WIDTH_2 1
-+#define CONFIG_MTD_MAP_BANK_WIDTH_4 1
-+#undef CONFIG_MTD_MAP_BANK_WIDTH_8
-+#undef CONFIG_MTD_MAP_BANK_WIDTH_16
-+#undef CONFIG_MTD_MAP_BANK_WIDTH_32
-+#define CONFIG_MTD_CFI_I1 1
-+#define CONFIG_MTD_CFI_I2 1
-+#undef CONFIG_MTD_CFI_I4
-+#undef CONFIG_MTD_CFI_I8
-+#define CONFIG_MTD_CFI_INTELEXT_MODULE 1
-+#define CONFIG_MTD_CFI_AMDSTD_MODULE 1
-+#define CONFIG_MTD_CFI_AMDSTD_RETRY 0
-+#define CONFIG_MTD_CFI_STAA_MODULE 1
-+#define CONFIG_MTD_CFI_UTIL_MODULE 1
-+#define CONFIG_MTD_RAM_MODULE 1
-+#define CONFIG_MTD_ROM_MODULE 1
-+#define CONFIG_MTD_ABSENT_MODULE 1
-+#undef CONFIG_MTD_OBSOLETE_CHIPS
-+
-+/*
-+ * Mapping drivers for chip access
-+ */
-+#define CONFIG_MTD_COMPLEX_MAPPINGS 1
-+#define CONFIG_MTD_PHYSMAP_MODULE 1
-+#define CONFIG_MTD_PHYSMAP_START 0x8000000
-+#define CONFIG_MTD_PHYSMAP_LEN 0x4000000
-+#define CONFIG_MTD_PHYSMAP_BANKWIDTH 2
-+#define CONFIG_MTD_PNC2000_MODULE 1
-+#define CONFIG_MTD_SC520CDP_MODULE 1
-+#define CONFIG_MTD_NETSC520_MODULE 1
-+#define CONFIG_MTD_TS5500_MODULE 1
-+#define CONFIG_MTD_SBC_GXX_MODULE 1
-+#define CONFIG_MTD_ELAN_104NC_MODULE 1
-+#define CONFIG_MTD_SCx200_DOCFLASH_MODULE 1
-+#undef CONFIG_MTD_AMD76XROM
-+#undef CONFIG_MTD_ICHXROM
-+#undef CONFIG_MTD_SCB2_FLASH
-+#define CONFIG_MTD_NETtel_MODULE 1
-+#define CONFIG_MTD_DILNETPC_MODULE 1
-+#define CONFIG_MTD_DILNETPC_BOOTSIZE 0x80000
-+#undef CONFIG_MTD_L440GX
-+#define CONFIG_MTD_PCI_MODULE 1
-+#define CONFIG_MTD_PCMCIA_MODULE 1
-+
-+/*
-+ * Self-contained MTD device drivers
-+ */
-+#define CONFIG_MTD_PMC551_MODULE 1
-+#undef CONFIG_MTD_PMC551_BUGFIX
-+#undef CONFIG_MTD_PMC551_DEBUG
-+#define CONFIG_MTD_SLRAM_MODULE 1
-+#define CONFIG_MTD_PHRAM_MODULE 1
-+#define CONFIG_MTD_MTDRAM_MODULE 1
-+#define CONFIG_MTDRAM_TOTAL_SIZE 4096
-+#define CONFIG_MTDRAM_ERASE_SIZE 128
-+#define CONFIG_MTD_BLKMTD_MODULE 1
-+#undef CONFIG_MTD_BLOCK2MTD
-+
-+/*
-+ * Disk-On-Chip Device Drivers
-+ */
-+#define CONFIG_MTD_DOC2000_MODULE 1
-+#define CONFIG_MTD_DOC2001_MODULE 1
-+#define CONFIG_MTD_DOC2001PLUS_MODULE 1
-+#define CONFIG_MTD_DOCPROBE_MODULE 1
-+#define CONFIG_MTD_DOCECC_MODULE 1
-+#undef CONFIG_MTD_DOCPROBE_ADVANCED
-+#define CONFIG_MTD_DOCPROBE_ADDRESS 0x0
-+
-+/*
-+ * NAND Flash Device Drivers
-+ */
-+#define CONFIG_MTD_NAND_MODULE 1
-+#undef CONFIG_MTD_NAND_VERIFY_WRITE
-+#define CONFIG_MTD_NAND_IDS_MODULE 1
-+#define CONFIG_MTD_NAND_DISKONCHIP_MODULE 1
-+#undef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED
-+#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0x0
-+#undef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
-+#undef CONFIG_MTD_NAND_NANDSIM
-+
-+/*
-+ * Parallel port support
-+ */
-+#define CONFIG_PARPORT_MODULE 1
-+#define CONFIG_PARPORT_PC_MODULE 1
-+#define CONFIG_PARPORT_SERIAL_MODULE 1
-+#define CONFIG_PARPORT_PC_FIFO 1
-+#undef CONFIG_PARPORT_PC_SUPERIO
-+#define CONFIG_PARPORT_PC_PCMCIA_MODULE 1
-+#define CONFIG_PARPORT_NOT_PC 1
-+#undef CONFIG_PARPORT_GSC
-+#define CONFIG_PARPORT_1284 1
-+
-+/*
-+ * Plug and Play support
-+ */
-+#define CONFIG_PNP 1
-+#undef CONFIG_PNP_DEBUG
-+
-+/*
-+ * Protocols
-+ */
-+#define CONFIG_ISAPNP 1
-+#undef CONFIG_PNPBIOS
-+#undef CONFIG_PNPACPI
-+
-+/*
-+ * Block devices
-+ */
-+#define CONFIG_BLK_DEV_FD_MODULE 1
-+#define CONFIG_BLK_DEV_XD_MODULE 1
-+#define CONFIG_PARIDE_MODULE 1
-+#define CONFIG_PARIDE_PARPORT_MODULE 1
-+
-+/*
-+ * Parallel IDE high-level drivers
-+ */
-+#define CONFIG_PARIDE_PD_MODULE 1
-+#define CONFIG_PARIDE_PCD_MODULE 1
-+#define CONFIG_PARIDE_PF_MODULE 1
-+#define CONFIG_PARIDE_PT_MODULE 1
-+#define CONFIG_PARIDE_PG_MODULE 1
-+
-+/*
-+ * Parallel IDE protocol modules
-+ */
-+#define CONFIG_PARIDE_ATEN_MODULE 1
-+#define CONFIG_PARIDE_BPCK_MODULE 1
-+#define CONFIG_PARIDE_BPCK6_MODULE 1
-+#define CONFIG_PARIDE_COMM_MODULE 1
-+#define CONFIG_PARIDE_DSTR_MODULE 1
-+#define CONFIG_PARIDE_FIT2_MODULE 1
-+#define CONFIG_PARIDE_FIT3_MODULE 1
-+#define CONFIG_PARIDE_EPAT_MODULE 1
-+#undef CONFIG_PARIDE_EPATC8
-+#define CONFIG_PARIDE_EPIA_MODULE 1
-+#define CONFIG_PARIDE_FRIQ_MODULE 1
-+#define CONFIG_PARIDE_FRPW_MODULE 1
-+#define CONFIG_PARIDE_KBIC_MODULE 1
-+#define CONFIG_PARIDE_KTTI_MODULE 1
-+#define CONFIG_PARIDE_ON20_MODULE 1
-+#define CONFIG_PARIDE_ON26_MODULE 1
-+#define CONFIG_BLK_CPQ_DA_MODULE 1
-+#define CONFIG_BLK_CPQ_CISS_DA_MODULE 1
-+#define CONFIG_CISS_SCSI_TAPE 1
-+#define CONFIG_BLK_DEV_DAC960_MODULE 1
-+#define CONFIG_BLK_DEV_UMEM_MODULE 1
-+#undef CONFIG_BLK_DEV_COW_COMMON
-+#define CONFIG_BLK_DEV_LOOP_MODULE 1
-+#define CONFIG_BLK_DEV_CRYPTOLOOP_MODULE 1
-+#define CONFIG_BLK_DEV_NBD_MODULE 1
-+#define CONFIG_BLK_DEV_SX8_MODULE 1
-+#undef CONFIG_BLK_DEV_UB
-+#define CONFIG_BLK_DEV_RAM 1
-+#define CONFIG_BLK_DEV_RAM_COUNT 16
-+#define CONFIG_BLK_DEV_RAM_SIZE 16384
-+#define CONFIG_BLK_DEV_INITRD 1
-+#define CONFIG_INITRAMFS_SOURCE ""
-+#define CONFIG_LBD 1
-+#define CONFIG_CDROM_PKTCDVD_MODULE 1
-+#define CONFIG_CDROM_PKTCDVD_BUFFERS 8
-+#undef CONFIG_CDROM_PKTCDVD_WCACHE
-+
-+/*
-+ * IO Schedulers
-+ */
-+#define CONFIG_IOSCHED_NOOP 1
-+#define CONFIG_IOSCHED_AS 1
-+#define CONFIG_IOSCHED_DEADLINE 1
-+#define CONFIG_IOSCHED_CFQ 1
-+#define CONFIG_ATA_OVER_ETH_MODULE 1
-+
-+/*
-+ * ATA/ATAPI/MFM/RLL support
-+ */
-+#define CONFIG_IDE 1
-+#define CONFIG_BLK_DEV_IDE 1
-+
-+/*
-+ * Please see Documentation/ide.txt for help/info on IDE drives
-+ */
-+#undef CONFIG_BLK_DEV_IDE_SATA
-+#undef CONFIG_BLK_DEV_HD_IDE
-+#define CONFIG_BLK_DEV_IDEDISK 1
-+#define CONFIG_IDEDISK_MULTI_MODE 1
-+#define CONFIG_BLK_DEV_IDECS_MODULE 1
-+#define CONFIG_BLK_DEV_IDECD 1
-+#define CONFIG_BLK_DEV_IDETAPE_MODULE 1
-+#define CONFIG_BLK_DEV_IDEFLOPPY 1
-+#define CONFIG_BLK_DEV_IDESCSI_MODULE 1
-+#undef CONFIG_IDE_TASK_IOCTL
-+
-+/*
-+ * IDE chipset support/bugfixes
-+ */
-+#define CONFIG_IDE_GENERIC 1
-+#define CONFIG_BLK_DEV_CMD640 1
-+#define CONFIG_BLK_DEV_CMD640_ENHANCED 1
-+#define CONFIG_BLK_DEV_IDEPNP 1
-+#define CONFIG_BLK_DEV_IDEPCI 1
-+#define CONFIG_IDEPCI_SHARE_IRQ 1
-+#undef CONFIG_BLK_DEV_OFFBOARD
-+#define CONFIG_BLK_DEV_GENERIC 1
-+#define CONFIG_BLK_DEV_OPTI621_MODULE 1
-+#define CONFIG_BLK_DEV_RZ1000 1
-+#define CONFIG_BLK_DEV_IDEDMA_PCI 1
-+#undef CONFIG_BLK_DEV_IDEDMA_FORCED
-+#define CONFIG_IDEDMA_PCI_AUTO 1
-+#undef CONFIG_IDEDMA_ONLYDISK
-+#define CONFIG_BLK_DEV_AEC62XX 1
-+#define CONFIG_BLK_DEV_ALI15X3 1
-+#undef CONFIG_WDC_ALI15X3
-+#define CONFIG_BLK_DEV_AMD74XX 1
-+#define CONFIG_BLK_DEV_ATIIXP 1
-+#define CONFIG_BLK_DEV_CMD64X 1
-+#define CONFIG_BLK_DEV_TRIFLEX 1
-+#define CONFIG_BLK_DEV_CY82C693 1
-+#define CONFIG_BLK_DEV_CS5520 1
-+#define CONFIG_BLK_DEV_CS5530 1
-+#define CONFIG_BLK_DEV_HPT34X 1
-+#undef CONFIG_HPT34X_AUTODMA
-+#define CONFIG_BLK_DEV_HPT366 1
-+#define CONFIG_BLK_DEV_SC1200_MODULE 1
-+#define CONFIG_BLK_DEV_PIIX 1
-+#define CONFIG_BLK_DEV_NS87415_MODULE 1
-+#define CONFIG_BLK_DEV_PDC202XX_OLD 1
-+#define CONFIG_PDC202XX_BURST 1
-+#define CONFIG_BLK_DEV_PDC202XX_NEW 1
-+#define CONFIG_PDC202XX_FORCE 1
-+#define CONFIG_BLK_DEV_SVWKS 1
-+#define CONFIG_BLK_DEV_SIIMAGE 1
-+#define CONFIG_BLK_DEV_SIS5513 1
-+#define CONFIG_BLK_DEV_SLC90E66 1
-+#define CONFIG_BLK_DEV_TRM290_MODULE 1
-+#define CONFIG_BLK_DEV_VIA82CXXX 1
-+#undef CONFIG_IDE_ARM
-+#undef CONFIG_IDE_CHIPSETS
-+#define CONFIG_BLK_DEV_IDEDMA 1
-+#undef CONFIG_IDEDMA_IVB
-+#define CONFIG_IDEDMA_AUTO 1
-+#undef CONFIG_BLK_DEV_HD
-+
-+/*
-+ * SCSI device support
-+ */
-+#define CONFIG_SCSI_MODULE 1
-+#define CONFIG_SCSI_PROC_FS 1
-+
-+/*
-+ * SCSI support type (disk, tape, CD-ROM)
-+ */
-+#define CONFIG_BLK_DEV_SD_MODULE 1
-+#define CONFIG_CHR_DEV_ST_MODULE 1
-+#define CONFIG_CHR_DEV_OSST_MODULE 1
-+#define CONFIG_BLK_DEV_SR_MODULE 1
-+#undef CONFIG_BLK_DEV_SR_VENDOR
-+#define CONFIG_CHR_DEV_SG_MODULE 1
-+
-+/*
-+ * Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-+ */
-+#define CONFIG_SCSI_MULTI_LUN 1
-+#define CONFIG_SCSI_CONSTANTS 1
-+#define CONFIG_SCSI_LOGGING 1
-+
-+/*
-+ * SCSI Transport Attributes
-+ */
-+#define CONFIG_SCSI_SPI_ATTRS_MODULE 1
-+#define CONFIG_SCSI_FC_ATTRS_MODULE 1
-+#undef CONFIG_SCSI_ISCSI_ATTRS
-+
-+/*
-+ * SCSI low-level drivers
-+ */
-+#define CONFIG_BLK_DEV_3W_XXXX_RAID_MODULE 1
-+#define CONFIG_SCSI_3W_9XXX_MODULE 1
-+#undef CONFIG_SCSI_7000FASST
-+#define CONFIG_SCSI_ACARD_MODULE 1
-+#define CONFIG_SCSI_AHA152X_MODULE 1
-+#undef CONFIG_SCSI_AHA1542
-+#define CONFIG_SCSI_AACRAID_MODULE 1
-+#define CONFIG_SCSI_AIC7XXX_MODULE 1
-+#define CONFIG_AIC7XXX_CMDS_PER_DEVICE 8
-+#define CONFIG_AIC7XXX_RESET_DELAY_MS 15000
-+#define CONFIG_AIC7XXX_DEBUG_ENABLE 1
-+#define CONFIG_AIC7XXX_DEBUG_MASK 0
-+#define CONFIG_AIC7XXX_REG_PRETTY_PRINT 1
-+#define CONFIG_SCSI_AIC7XXX_OLD_MODULE 1
-+#define CONFIG_SCSI_AIC79XX_MODULE 1
-+#define CONFIG_AIC79XX_CMDS_PER_DEVICE 32
-+#define CONFIG_AIC79XX_RESET_DELAY_MS 15000
-+#define CONFIG_AIC79XX_ENABLE_RD_STRM 1
-+#define CONFIG_AIC79XX_DEBUG_ENABLE 1
-+#define CONFIG_AIC79XX_DEBUG_MASK 0
-+#define CONFIG_AIC79XX_REG_PRETTY_PRINT 1
-+#define CONFIG_SCSI_DPT_I2O_MODULE 1
-+#define CONFIG_SCSI_ADVANSYS_MODULE 1
-+#define CONFIG_SCSI_IN2000_MODULE 1
-+#define CONFIG_MEGARAID_NEWGEN 1
-+#define CONFIG_MEGARAID_MM_MODULE 1
-+#define CONFIG_MEGARAID_MAILBOX_MODULE 1
-+#define CONFIG_SCSI_SATA 1
-+#define CONFIG_SCSI_SATA_AHCI_MODULE 1
-+#define CONFIG_SCSI_SATA_SVW_MODULE 1
-+#define CONFIG_SCSI_ATA_PIIX_MODULE 1
-+#define CONFIG_SCSI_SATA_NV_MODULE 1
-+#define CONFIG_SCSI_SATA_PROMISE_MODULE 1
-+#undef CONFIG_SCSI_SATA_QSTOR
-+#define CONFIG_SCSI_SATA_SX4_MODULE 1
-+#define CONFIG_SCSI_SATA_SIL_MODULE 1
-+#define CONFIG_SCSI_SATA_SIS_MODULE 1
-+#define CONFIG_SCSI_SATA_ULI_MODULE 1
-+#define CONFIG_SCSI_SATA_VIA_MODULE 1
-+#define CONFIG_SCSI_SATA_VITESSE_MODULE 1
-+#define CONFIG_SCSI_BUSLOGIC_MODULE 1
-+#undef CONFIG_SCSI_OMIT_FLASHPOINT
-+#undef CONFIG_SCSI_CPQFCTS
-+#define CONFIG_SCSI_DMX3191D_MODULE 1
-+#define CONFIG_SCSI_DTC3280_MODULE 1
-+#define CONFIG_SCSI_EATA_MODULE 1
-+#define CONFIG_SCSI_EATA_TAGGED_QUEUE 1
-+#define CONFIG_SCSI_EATA_LINKED_COMMANDS 1
-+#define CONFIG_SCSI_EATA_MAX_TAGS 16
-+#define CONFIG_SCSI_EATA_PIO_MODULE 1
-+#define CONFIG_SCSI_FUTURE_DOMAIN_MODULE 1
-+#define CONFIG_SCSI_GDTH_MODULE 1
-+#define CONFIG_SCSI_GENERIC_NCR5380_MODULE 1
-+#define CONFIG_SCSI_GENERIC_NCR5380_MMIO_MODULE 1
-+#define CONFIG_SCSI_GENERIC_NCR53C400 1
-+#define CONFIG_SCSI_IPS_MODULE 1
-+#undef CONFIG_SCSI_INITIO
-+#undef CONFIG_SCSI_INIA100
-+#define CONFIG_SCSI_PPA_MODULE 1
-+#define CONFIG_SCSI_IMM_MODULE 1
-+#undef CONFIG_SCSI_IZIP_EPP16
-+#undef CONFIG_SCSI_IZIP_SLOW_CTR
-+#define CONFIG_SCSI_NCR53C406A_MODULE 1
-+#define CONFIG_SCSI_SYM53C8XX_2_MODULE 1
-+#define CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE 1
-+#define CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS 16
-+#define CONFIG_SCSI_SYM53C8XX_MAX_TAGS 64
-+#undef CONFIG_SCSI_SYM53C8XX_IOMAPPED
-+#define CONFIG_SCSI_IPR_MODULE 1
-+#undef CONFIG_SCSI_IPR_TRACE
-+#undef CONFIG_SCSI_IPR_DUMP
-+#define CONFIG_SCSI_PAS16_MODULE 1
-+#undef CONFIG_SCSI_PCI2000
-+#undef CONFIG_SCSI_PCI2220I
-+#define CONFIG_SCSI_PSI240I_MODULE 1
-+#define CONFIG_SCSI_QLOGIC_FAS_MODULE 1
-+#define CONFIG_SCSI_QLOGIC_ISP_MODULE 1
-+#define CONFIG_SCSI_QLOGIC_FC_MODULE 1
-+#define CONFIG_SCSI_QLOGIC_FC_FIRMWARE 1
-+#define CONFIG_SCSI_QLOGIC_1280_MODULE 1
-+#define CONFIG_SCSI_QLOGIC_1280_1040 1
-+#define CONFIG_SCSI_QLA2XXX_MODULE 1
-+#define CONFIG_SCSI_QLA21XX_MODULE 1
-+#define CONFIG_SCSI_QLA22XX_MODULE 1
-+#define CONFIG_SCSI_QLA2300_MODULE 1
-+#define CONFIG_SCSI_QLA2322_MODULE 1
-+#define CONFIG_SCSI_QLA6312_MODULE 1
-+#define CONFIG_SCSI_LPFC_MODULE 1
-+#undef CONFIG_SCSI_SEAGATE
-+#define CONFIG_SCSI_SYM53C416_MODULE 1
-+#define CONFIG_SCSI_DC395x_MODULE 1
-+#define CONFIG_SCSI_DC390T_MODULE 1
-+#define CONFIG_SCSI_T128_MODULE 1
-+#define CONFIG_SCSI_U14_34F_MODULE 1
-+#define CONFIG_SCSI_U14_34F_TAGGED_QUEUE 1
-+#define CONFIG_SCSI_U14_34F_LINKED_COMMANDS 1
-+#define CONFIG_SCSI_U14_34F_MAX_TAGS 8
-+#undef CONFIG_SCSI_ULTRASTOR
-+#define CONFIG_SCSI_NSP32_MODULE 1
-+#define CONFIG_SCSI_DEBUG_MODULE 1
-+
-+/*
-+ * PCMCIA SCSI adapter support
-+ */
-+#define CONFIG_PCMCIA_AHA152X_MODULE 1
-+#define CONFIG_PCMCIA_FDOMAIN_MODULE 1
-+#define CONFIG_PCMCIA_NINJA_SCSI_MODULE 1
-+#define CONFIG_PCMCIA_QLOGIC_MODULE 1
-+#define CONFIG_PCMCIA_SYM53C500_MODULE 1
-+
-+/*
-+ * Old CD-ROM drivers (not SCSI, not IDE)
-+ */
-+#define CONFIG_CD_NO_IDESCSI 1
-+#define CONFIG_AZTCD_MODULE 1
-+#define CONFIG_GSCD_MODULE 1
-+#undef CONFIG_SBPCD
-+#define CONFIG_MCDX_MODULE 1
-+#define CONFIG_OPTCD_MODULE 1
-+#undef CONFIG_CM206
-+#define CONFIG_SJCD_MODULE 1
-+#define CONFIG_ISP16_CDI_MODULE 1
-+#define CONFIG_CDU31A_MODULE 1
-+#define CONFIG_CDU535_MODULE 1
-+
-+/*
-+ * Multi-device support (RAID and LVM)
-+ */
-+#define CONFIG_MD 1
-+#define CONFIG_BLK_DEV_MD_MODULE 1
-+#define CONFIG_MD_LINEAR_MODULE 1
-+#define CONFIG_MD_RAID0_MODULE 1
-+#define CONFIG_MD_RAID1_MODULE 1
-+#define CONFIG_MD_RAID10_MODULE 1
-+#define CONFIG_MD_RAID5_MODULE 1
-+#define CONFIG_MD_RAID6_MODULE 1
-+#define CONFIG_MD_MULTIPATH_MODULE 1
-+#define CONFIG_MD_FAULTY_MODULE 1
-+#define CONFIG_BLK_DEV_DM_MODULE 1
-+#define CONFIG_DM_CRYPT_MODULE 1
-+#define CONFIG_DM_SNAPSHOT_MODULE 1
-+#define CONFIG_DM_MIRROR_MODULE 1
-+#define CONFIG_DM_ZERO_MODULE 1
-+#define CONFIG_DM_MULTIPATH_MODULE 1
-+#define CONFIG_DM_MULTIPATH_EMC_MODULE 1
-+
-+/*
-+ * Fusion MPT device support
-+ */
-+#define CONFIG_FUSION_MODULE 1
-+#define CONFIG_FUSION_MAX_SGE 40
-+#define CONFIG_FUSION_CTL_MODULE 1
-+#define CONFIG_FUSION_LAN_MODULE 1
-+
-+/*
-+ * IEEE 1394 (FireWire) support
-+ */
-+#define CONFIG_IEEE1394_MODULE 1
-+
-+/*
-+ * Subsystem Options
-+ */
-+#undef CONFIG_IEEE1394_VERBOSEDEBUG
-+#undef CONFIG_IEEE1394_OUI_DB
-+#define CONFIG_IEEE1394_EXTRA_CONFIG_ROMS 1
-+#define CONFIG_IEEE1394_CONFIG_ROM_IP1394 1
-+
-+/*
-+ * Device Drivers
-+ */
-+#define CONFIG_IEEE1394_PCILYNX_MODULE 1
-+#define CONFIG_IEEE1394_OHCI1394_MODULE 1
-+
-+/*
-+ * Protocol Drivers
-+ */
-+#define CONFIG_IEEE1394_VIDEO1394_MODULE 1
-+#define CONFIG_IEEE1394_SBP2_MODULE 1
-+#undef CONFIG_IEEE1394_SBP2_PHYS_DMA
-+#define CONFIG_IEEE1394_ETH1394_MODULE 1
-+#define CONFIG_IEEE1394_DV1394_MODULE 1
-+#define CONFIG_IEEE1394_RAWIO_MODULE 1
-+#define CONFIG_IEEE1394_CMP_MODULE 1
-+#define CONFIG_IEEE1394_AMDTP_MODULE 1
-+
-+/*
-+ * I2O device support
-+ */
-+#define CONFIG_I2O_MODULE 1
-+#define CONFIG_I2O_CONFIG_MODULE 1
-+#define CONFIG_I2O_BLOCK_MODULE 1
-+#define CONFIG_I2O_SCSI_MODULE 1
-+#define CONFIG_I2O_PROC_MODULE 1
-+
-+/*
-+ * Networking support
-+ */
-+#define CONFIG_NET 1
-+
-+/*
-+ * Networking options
-+ */
-+#define CONFIG_PACKET_MODULE 1
-+#define CONFIG_PACKET_MMAP 1
-+#define CONFIG_UNIX_MODULE 1
-+#define CONFIG_NET_KEY_MODULE 1
-+#define CONFIG_INET 1
-+#define CONFIG_IP_MULTICAST 1
-+#define CONFIG_IP_ADVANCED_ROUTER 1
-+#define CONFIG_IP_MULTIPLE_TABLES 1
-+#define CONFIG_IP_ROUTE_FWMARK 1
-+#define CONFIG_IP_ROUTE_MULTIPATH 1
-+#undef CONFIG_IP_ROUTE_MULTIPATH_CACHED
-+#define CONFIG_IP_ROUTE_VERBOSE 1
-+#undef CONFIG_IP_PNP
-+#define CONFIG_NET_IPIP_MODULE 1
-+#define CONFIG_NET_IPGRE_MODULE 1
-+#define CONFIG_NET_IPGRE_BROADCAST 1
-+#define CONFIG_IP_MROUTE 1
-+#define CONFIG_IP_PIMSM_V1 1
-+#define CONFIG_IP_PIMSM_V2 1
-+#undef CONFIG_ARPD
-+#define CONFIG_SYN_COOKIES 1
-+#define CONFIG_INET_AH_MODULE 1
-+#define CONFIG_INET_ESP_MODULE 1
-+#define CONFIG_INET_IPCOMP_MODULE 1
-+#define CONFIG_INET_TUNNEL_MODULE 1
-+#define CONFIG_IP_TCPDIAG_MODULE 1
-+#define CONFIG_IP_TCPDIAG_IPV6 1
-+
-+/*
-+ * IP: Virtual Server Configuration
-+ */
-+#define CONFIG_IP_VS_MODULE 1
-+#undef CONFIG_IP_VS_DEBUG
-+#define CONFIG_IP_VS_TAB_BITS 12
-+
-+/*
-+ * IPVS transport protocol load balancing support
-+ */
-+#define CONFIG_IP_VS_PROTO_TCP 1
-+#define CONFIG_IP_VS_PROTO_UDP 1
-+#define CONFIG_IP_VS_PROTO_ESP 1
-+#define CONFIG_IP_VS_PROTO_AH 1
-+
-+/*
-+ * IPVS scheduler
-+ */
-+#define CONFIG_IP_VS_RR_MODULE 1
-+#define CONFIG_IP_VS_WRR_MODULE 1
-+#define CONFIG_IP_VS_LC_MODULE 1
-+#define CONFIG_IP_VS_WLC_MODULE 1
-+#define CONFIG_IP_VS_LBLC_MODULE 1
-+#define CONFIG_IP_VS_LBLCR_MODULE 1
-+#define CONFIG_IP_VS_DH_MODULE 1
-+#define CONFIG_IP_VS_SH_MODULE 1
-+#define CONFIG_IP_VS_SED_MODULE 1
-+#define CONFIG_IP_VS_NQ_MODULE 1
-+
-+/*
-+ * IPVS application helper
-+ */
-+#define CONFIG_IP_VS_FTP_MODULE 1
-+#define CONFIG_IPV6_MODULE 1
-+#define CONFIG_IPV6_PRIVACY 1
-+#define CONFIG_INET6_AH_MODULE 1
-+#define CONFIG_INET6_ESP_MODULE 1
-+#define CONFIG_INET6_IPCOMP_MODULE 1
-+#define CONFIG_INET6_TUNNEL_MODULE 1
-+#define CONFIG_IPV6_TUNNEL_MODULE 1
-+#define CONFIG_NETFILTER 1
-+#undef CONFIG_NETFILTER_DEBUG
-+#define CONFIG_BRIDGE_NETFILTER 1
-+
-+/*
-+ * IP: Netfilter Configuration
-+ */
-+#define CONFIG_IP_NF_CONNTRACK_MODULE 1
-+#define CONFIG_IP_NF_CT_ACCT 1
-+#define CONFIG_IP_NF_CONNTRACK_MARK 1
-+#define CONFIG_IP_NF_CT_PROTO_SCTP_MODULE 1
-+#define CONFIG_IP_NF_FTP_MODULE 1
-+#define CONFIG_IP_NF_IRC_MODULE 1
-+#define CONFIG_IP_NF_TFTP_MODULE 1
-+#define CONFIG_IP_NF_AMANDA_MODULE 1
-+#define CONFIG_IP_NF_QUEUE_MODULE 1
-+#define CONFIG_IP_NF_IPTABLES_MODULE 1
-+#define CONFIG_IP_NF_MATCH_LIMIT_MODULE 1
-+#define CONFIG_IP_NF_MATCH_IPRANGE_MODULE 1
-+#define CONFIG_IP_NF_MATCH_MAC_MODULE 1
-+#define CONFIG_IP_NF_MATCH_PKTTYPE_MODULE 1
-+#define CONFIG_IP_NF_MATCH_MARK_MODULE 1
-+#define CONFIG_IP_NF_MATCH_MULTIPORT_MODULE 1
-+#define CONFIG_IP_NF_MATCH_TOS_MODULE 1
-+#define CONFIG_IP_NF_MATCH_RECENT_MODULE 1
-+#define CONFIG_IP_NF_MATCH_ECN_MODULE 1
-+#define CONFIG_IP_NF_MATCH_DSCP_MODULE 1
-+#define CONFIG_IP_NF_MATCH_AH_ESP_MODULE 1
-+#define CONFIG_IP_NF_MATCH_LENGTH_MODULE 1
-+#define CONFIG_IP_NF_MATCH_TTL_MODULE 1
-+#define CONFIG_IP_NF_MATCH_TCPMSS_MODULE 1
-+#define CONFIG_IP_NF_MATCH_HELPER_MODULE 1
-+#define CONFIG_IP_NF_MATCH_STATE_MODULE 1
-+#define CONFIG_IP_NF_MATCH_CONNTRACK_MODULE 1
-+#define CONFIG_IP_NF_MATCH_OWNER_MODULE 1
-+#define CONFIG_IP_NF_MATCH_PHYSDEV_MODULE 1
-+#define CONFIG_IP_NF_MATCH_ADDRTYPE_MODULE 1
-+#define CONFIG_IP_NF_MATCH_REALM_MODULE 1
-+#define CONFIG_IP_NF_MATCH_SCTP_MODULE 1
-+#define CONFIG_IP_NF_MATCH_COMMENT_MODULE 1
-+#define CONFIG_IP_NF_MATCH_CONNMARK_MODULE 1
-+#define CONFIG_IP_NF_MATCH_HASHLIMIT_MODULE 1
-+#define CONFIG_IP_NF_FILTER_MODULE 1
-+#define CONFIG_IP_NF_TARGET_REJECT_MODULE 1
-+#define CONFIG_IP_NF_TARGET_LOG_MODULE 1
-+#define CONFIG_IP_NF_TARGET_ULOG_MODULE 1
-+#define CONFIG_IP_NF_TARGET_TCPMSS_MODULE 1
-+#define CONFIG_IP_NF_NAT_MODULE 1
-+#define CONFIG_IP_NF_NAT_NEEDED 1
-+#define CONFIG_IP_NF_TARGET_MASQUERADE_MODULE 1
-+#define CONFIG_IP_NF_TARGET_REDIRECT_MODULE 1
-+#define CONFIG_IP_NF_TARGET_NETMAP_MODULE 1
-+#define CONFIG_IP_NF_TARGET_SAME_MODULE 1
-+#define CONFIG_IP_NF_NAT_SNMP_BASIC_MODULE 1
-+#define CONFIG_IP_NF_NAT_IRC_MODULE 1
-+#define CONFIG_IP_NF_NAT_FTP_MODULE 1
-+#define CONFIG_IP_NF_NAT_TFTP_MODULE 1
-+#define CONFIG_IP_NF_NAT_AMANDA_MODULE 1
-+#define CONFIG_IP_NF_MANGLE_MODULE 1
-+#define CONFIG_IP_NF_TARGET_TOS_MODULE 1
-+#define CONFIG_IP_NF_TARGET_ECN_MODULE 1
-+#define CONFIG_IP_NF_TARGET_DSCP_MODULE 1
-+#define CONFIG_IP_NF_TARGET_MARK_MODULE 1
-+#define CONFIG_IP_NF_TARGET_CLASSIFY_MODULE 1
-+#define CONFIG_IP_NF_TARGET_CONNMARK_MODULE 1
-+#define CONFIG_IP_NF_TARGET_CLUSTERIP_MODULE 1
-+#define CONFIG_IP_NF_RAW_MODULE 1
-+#define CONFIG_IP_NF_TARGET_NOTRACK_MODULE 1
-+#define CONFIG_IP_NF_ARPTABLES_MODULE 1
-+#define CONFIG_IP_NF_ARPFILTER_MODULE 1
-+#define CONFIG_IP_NF_ARP_MANGLE_MODULE 1
-+
-+/*
-+ * IPv6: Netfilter Configuration (EXPERIMENTAL)
-+ */
-+#define CONFIG_IP6_NF_QUEUE_MODULE 1
-+#define CONFIG_IP6_NF_IPTABLES_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_LIMIT_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_MAC_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_RT_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_OPTS_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_FRAG_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_HL_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_MULTIPORT_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_OWNER_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_MARK_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_IPV6HEADER_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_AHESP_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_LENGTH_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_EUI64_MODULE 1
-+#define CONFIG_IP6_NF_MATCH_PHYSDEV_MODULE 1
-+#define CONFIG_IP6_NF_FILTER_MODULE 1
-+#define CONFIG_IP6_NF_TARGET_LOG_MODULE 1
-+#define CONFIG_IP6_NF_MANGLE_MODULE 1
-+#define CONFIG_IP6_NF_TARGET_MARK_MODULE 1
-+#define CONFIG_IP6_NF_RAW_MODULE 1
-+
-+/*
-+ * DECnet: Netfilter Configuration
-+ */
-+#define CONFIG_DECNET_NF_GRABULATOR_MODULE 1
-+
-+/*
-+ * Bridge: Netfilter Configuration
-+ */
-+#define CONFIG_BRIDGE_NF_EBTABLES_MODULE 1
-+#define CONFIG_BRIDGE_EBT_BROUTE_MODULE 1
-+#define CONFIG_BRIDGE_EBT_T_FILTER_MODULE 1
-+#define CONFIG_BRIDGE_EBT_T_NAT_MODULE 1
-+#define CONFIG_BRIDGE_EBT_802_3_MODULE 1
-+#define CONFIG_BRIDGE_EBT_AMONG_MODULE 1
-+#define CONFIG_BRIDGE_EBT_ARP_MODULE 1
-+#define CONFIG_BRIDGE_EBT_IP_MODULE 1
-+#define CONFIG_BRIDGE_EBT_LIMIT_MODULE 1
-+#define CONFIG_BRIDGE_EBT_MARK_MODULE 1
-+#define CONFIG_BRIDGE_EBT_PKTTYPE_MODULE 1
-+#define CONFIG_BRIDGE_EBT_STP_MODULE 1
-+#define CONFIG_BRIDGE_EBT_VLAN_MODULE 1
-+#define CONFIG_BRIDGE_EBT_ARPREPLY_MODULE 1
-+#define CONFIG_BRIDGE_EBT_DNAT_MODULE 1
-+#define CONFIG_BRIDGE_EBT_MARK_T_MODULE 1
-+#define CONFIG_BRIDGE_EBT_REDIRECT_MODULE 1
-+#define CONFIG_BRIDGE_EBT_SNAT_MODULE 1
-+#define CONFIG_BRIDGE_EBT_LOG_MODULE 1
-+#undef CONFIG_BRIDGE_EBT_ULOG
-+#define CONFIG_XFRM 1
-+#define CONFIG_XFRM_USER_MODULE 1
-+
-+/*
-+ * SCTP Configuration (EXPERIMENTAL)
-+ */
-+#define CONFIG_IP_SCTP_MODULE 1
-+#undef CONFIG_SCTP_DBG_MSG
-+#undef CONFIG_SCTP_DBG_OBJCNT
-+#undef CONFIG_SCTP_HMAC_NONE
-+#undef CONFIG_SCTP_HMAC_SHA1
-+#define CONFIG_SCTP_HMAC_MD5 1
-+#define CONFIG_ATM 1
-+#define CONFIG_ATM_CLIP 1
-+#undef CONFIG_ATM_CLIP_NO_ICMP
-+#define CONFIG_ATM_LANE_MODULE 1
-+#define CONFIG_ATM_MPOA_MODULE 1
-+#define CONFIG_ATM_BR2684_MODULE 1
-+#undef CONFIG_ATM_BR2684_IPFILTER
-+#define CONFIG_BRIDGE_MODULE 1
-+#define CONFIG_VLAN_8021Q_MODULE 1
-+#define CONFIG_DECNET_MODULE 1
-+#undef CONFIG_DECNET_ROUTER
-+#define CONFIG_LLC 1
-+#define CONFIG_LLC2_MODULE 1
-+#define CONFIG_IPX_MODULE 1
-+#undef CONFIG_IPX_INTERN
-+#define CONFIG_ATALK_MODULE 1
-+#define CONFIG_DEV_APPLETALK 1
-+#define CONFIG_LTPC_MODULE 1
-+#define CONFIG_COPS_MODULE 1
-+#define CONFIG_COPS_DAYNA 1
-+#define CONFIG_COPS_TANGENT 1
-+#define CONFIG_IPDDP_MODULE 1
-+#define CONFIG_IPDDP_ENCAP 1
-+#define CONFIG_IPDDP_DECAP 1
-+#define CONFIG_X25_MODULE 1
-+#define CONFIG_LAPB_MODULE 1
-+#undef CONFIG_NET_DIVERT
-+#define CONFIG_ECONET_MODULE 1
-+#define CONFIG_ECONET_AUNUDP 1
-+#define CONFIG_ECONET_NATIVE 1
-+#define CONFIG_WAN_ROUTER_MODULE 1
-+
-+/*
-+ * QoS and/or fair queueing
-+ */
-+#define CONFIG_NET_SCHED 1
-+#define CONFIG_NET_SCH_CLK_JIFFIES 1
-+#undef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
-+#undef CONFIG_NET_SCH_CLK_CPU
-+#define CONFIG_NET_SCH_CBQ_MODULE 1
-+#define CONFIG_NET_SCH_HTB_MODULE 1
-+#define CONFIG_NET_SCH_HFSC_MODULE 1
-+#define CONFIG_NET_SCH_ATM_MODULE 1
-+#define CONFIG_NET_SCH_PRIO_MODULE 1
-+#define CONFIG_NET_SCH_RED_MODULE 1
-+#define CONFIG_NET_SCH_SFQ_MODULE 1
-+#define CONFIG_NET_SCH_TEQL_MODULE 1
-+#define CONFIG_NET_SCH_TBF_MODULE 1
-+#define CONFIG_NET_SCH_GRED_MODULE 1
-+#define CONFIG_NET_SCH_DSMARK_MODULE 1
-+#define CONFIG_NET_SCH_NETEM_MODULE 1
-+#define CONFIG_NET_SCH_INGRESS_MODULE 1
-+#define CONFIG_NET_QOS 1
-+#define CONFIG_NET_ESTIMATOR 1
-+#define CONFIG_NET_CLS 1
-+#define CONFIG_NET_CLS_BASIC_MODULE 1
-+#define CONFIG_NET_CLS_TCINDEX_MODULE 1
-+#define CONFIG_NET_CLS_ROUTE4_MODULE 1
-+#define CONFIG_NET_CLS_ROUTE 1
-+#define CONFIG_NET_CLS_FW_MODULE 1
-+#define CONFIG_NET_CLS_U32_MODULE 1
-+#undef CONFIG_CLS_U32_PERF
-+#undef CONFIG_NET_CLS_IND
-+#undef CONFIG_CLS_U32_MARK
-+#define CONFIG_NET_CLS_RSVP_MODULE 1
-+#define CONFIG_NET_CLS_RSVP6_MODULE 1
-+#define CONFIG_NET_EMATCH 1
-+#define CONFIG_NET_EMATCH_STACK 32
-+#define CONFIG_NET_EMATCH_CMP_MODULE 1
-+#define CONFIG_NET_EMATCH_NBYTE_MODULE 1
-+#define CONFIG_NET_EMATCH_U32_MODULE 1
-+#define CONFIG_NET_EMATCH_META_MODULE 1
-+#undef CONFIG_NET_CLS_ACT
-+#define CONFIG_NET_CLS_POLICE 1
-+
-+/*
-+ * Network testing
-+ */
-+#define CONFIG_NET_PKTGEN_MODULE 1
-+#define CONFIG_NETPOLL 1
-+#undef CONFIG_NETPOLL_RX
-+#undef CONFIG_NETPOLL_TRAP
-+#define CONFIG_NET_POLL_CONTROLLER 1
-+#define CONFIG_HAMRADIO 1
-+
-+/*
-+ * Packet Radio protocols
-+ */
-+#define CONFIG_AX25_MODULE 1
-+#undef CONFIG_AX25_DAMA_SLAVE
-+#define CONFIG_NETROM_MODULE 1
-+#define CONFIG_ROSE_MODULE 1
-+
-+/*
-+ * AX.25 network device drivers
-+ */
-+#define CONFIG_MKISS_MODULE 1
-+#define CONFIG_6PACK_MODULE 1
-+#define CONFIG_BPQETHER_MODULE 1
-+#undef CONFIG_DMASCC
-+#define CONFIG_SCC_MODULE 1
-+#undef CONFIG_SCC_DELAY
-+#undef CONFIG_SCC_TRXECHO
-+#define CONFIG_BAYCOM_SER_FDX_MODULE 1
-+#define CONFIG_BAYCOM_SER_HDX_MODULE 1
-+#define CONFIG_BAYCOM_PAR_MODULE 1
-+#define CONFIG_BAYCOM_EPP_MODULE 1
-+#define CONFIG_YAM_MODULE 1
-+#define CONFIG_IRDA_MODULE 1
-+
-+/*
-+ * IrDA protocols
-+ */
-+#define CONFIG_IRLAN_MODULE 1
-+#define CONFIG_IRNET_MODULE 1
-+#define CONFIG_IRCOMM_MODULE 1
-+#undef CONFIG_IRDA_ULTRA
-+
-+/*
-+ * IrDA options
-+ */
-+#define CONFIG_IRDA_CACHE_LAST_LSAP 1
-+#define CONFIG_IRDA_FAST_RR 1
-+#define CONFIG_IRDA_DEBUG 1
-+
-+/*
-+ * Infrared-port device drivers
-+ */
-+
-+/*
-+ * SIR device drivers
-+ */
-+#define CONFIG_IRTTY_SIR_MODULE 1
-+
-+/*
-+ * Dongle support
-+ */
-+#define CONFIG_DONGLE 1
-+#define CONFIG_ESI_DONGLE_MODULE 1
-+#define CONFIG_ACTISYS_DONGLE_MODULE 1
-+#define CONFIG_TEKRAM_DONGLE_MODULE 1
-+#define CONFIG_LITELINK_DONGLE_MODULE 1
-+#define CONFIG_MA600_DONGLE_MODULE 1
-+#define CONFIG_GIRBIL_DONGLE_MODULE 1
-+#define CONFIG_MCP2120_DONGLE_MODULE 1
-+#define CONFIG_OLD_BELKIN_DONGLE_MODULE 1
-+#define CONFIG_ACT200L_DONGLE_MODULE 1
-+
-+/*
-+ * Old SIR device drivers
-+ */
-+#define CONFIG_IRPORT_SIR_MODULE 1
-+
-+/*
-+ * Old Serial dongle support
-+ */
-+#undef CONFIG_DONGLE_OLD
-+
-+/*
-+ * FIR device drivers
-+ */
-+#define CONFIG_USB_IRDA_MODULE 1
-+#define CONFIG_SIGMATEL_FIR_MODULE 1
-+#define CONFIG_NSC_FIR_MODULE 1
-+#define CONFIG_WINBOND_FIR_MODULE 1
-+#undef CONFIG_TOSHIBA_FIR
-+#define CONFIG_SMC_IRCC_FIR_MODULE 1
-+#define CONFIG_ALI_FIR_MODULE 1
-+#define CONFIG_VLSI_FIR_MODULE 1
-+#define CONFIG_VIA_FIR_MODULE 1
-+#define CONFIG_BT_MODULE 1
-+#define CONFIG_BT_L2CAP_MODULE 1
-+#define CONFIG_BT_SCO_MODULE 1
-+#define CONFIG_BT_RFCOMM_MODULE 1
-+#define CONFIG_BT_RFCOMM_TTY 1
-+#define CONFIG_BT_BNEP_MODULE 1
-+#define CONFIG_BT_BNEP_MC_FILTER 1
-+#define CONFIG_BT_BNEP_PROTO_FILTER 1
-+#define CONFIG_BT_CMTP_MODULE 1
-+#define CONFIG_BT_HIDP_MODULE 1
-+
-+/*
-+ * Bluetooth device drivers
-+ */
-+#define CONFIG_BT_HCIUSB_MODULE 1
-+#define CONFIG_BT_HCIUSB_SCO 1
-+#define CONFIG_BT_HCIUART_MODULE 1
-+#define CONFIG_BT_HCIUART_H4 1
-+#define CONFIG_BT_HCIUART_BCSP 1
-+#undef CONFIG_BT_HCIUART_BCSP_TXCRC
-+#define CONFIG_BT_HCIBCM203X_MODULE 1
-+#undef CONFIG_BT_HCIBPA10X
-+#define CONFIG_BT_HCIBFUSB_MODULE 1
-+#define CONFIG_BT_HCIDTL1_MODULE 1
-+#define CONFIG_BT_HCIBT3C_MODULE 1
-+#define CONFIG_BT_HCIBLUECARD_MODULE 1
-+#define CONFIG_BT_HCIBTUART_MODULE 1
-+#define CONFIG_BT_HCIVHCI_MODULE 1
-+#define CONFIG_NETDEVICES 1
-+#define CONFIG_DUMMY_MODULE 1
-+#define CONFIG_BONDING_MODULE 1
-+#define CONFIG_EQUALIZER_MODULE 1
-+#define CONFIG_TUN_MODULE 1
-+#define CONFIG_NET_SB1000_MODULE 1
-+
-+/*
-+ * ARCnet devices
-+ */
-+#define CONFIG_ARCNET_MODULE 1
-+#define CONFIG_ARCNET_1201_MODULE 1
-+#define CONFIG_ARCNET_1051_MODULE 1
-+#define CONFIG_ARCNET_RAW_MODULE 1
-+#undef CONFIG_ARCNET_CAP
-+#define CONFIG_ARCNET_COM90xx_MODULE 1
-+#define CONFIG_ARCNET_COM90xxIO_MODULE 1
-+#define CONFIG_ARCNET_RIM_I_MODULE 1
-+#define CONFIG_ARCNET_COM20020_MODULE 1
-+#define CONFIG_ARCNET_COM20020_ISA_MODULE 1
-+#define CONFIG_ARCNET_COM20020_PCI_MODULE 1
-+
-+/*
-+ * Ethernet (10 or 100Mbit)
-+ */
-+#define CONFIG_NET_ETHERNET 1
-+#define CONFIG_MII_MODULE 1
-+#define CONFIG_HAPPYMEAL_MODULE 1
-+#define CONFIG_SUNGEM_MODULE 1
-+#define CONFIG_NET_VENDOR_3COM 1
-+#define CONFIG_EL1_MODULE 1
-+#define CONFIG_EL2_MODULE 1
-+#undef CONFIG_ELPLUS
-+#define CONFIG_EL16_MODULE 1
-+#define CONFIG_EL3_MODULE 1
-+#undef CONFIG_3C515
-+#define CONFIG_VORTEX_MODULE 1
-+#define CONFIG_TYPHOON_MODULE 1
-+#undef CONFIG_LANCE
-+#define CONFIG_NET_VENDOR_SMC 1
-+#define CONFIG_WD80x3_MODULE 1
-+#define CONFIG_ULTRA_MODULE 1
-+#define CONFIG_SMC9194_MODULE 1
-+#define CONFIG_NET_VENDOR_RACAL 1
-+#define CONFIG_NI5010_MODULE 1
-+#define CONFIG_NI52_MODULE 1
-+#undef CONFIG_NI65
-+
-+/*
-+ * Tulip family network device support
-+ */
-+#define CONFIG_NET_TULIP 1
-+#define CONFIG_DE2104X_MODULE 1
-+#define CONFIG_TULIP_MODULE 1
-+#undef CONFIG_TULIP_MWI
-+#undef CONFIG_TULIP_MMIO
-+#undef CONFIG_TULIP_NAPI
-+#define CONFIG_DE4X5_MODULE 1
-+#define CONFIG_WINBOND_840_MODULE 1
-+#define CONFIG_DM9102_MODULE 1
-+#define CONFIG_PCMCIA_XIRCOM_MODULE 1
-+#undef CONFIG_PCMCIA_XIRTULIP
-+#define CONFIG_AT1700_MODULE 1
-+#define CONFIG_DEPCA_MODULE 1
-+#define CONFIG_HP100_MODULE 1
-+#define CONFIG_NET_ISA 1
-+#define CONFIG_E2100_MODULE 1
-+#define CONFIG_EWRK3_MODULE 1
-+#define CONFIG_EEXPRESS_MODULE 1
-+#define CONFIG_EEXPRESS_PRO_MODULE 1
-+#define CONFIG_HPLAN_PLUS_MODULE 1
-+#define CONFIG_HPLAN_MODULE 1
-+#define CONFIG_LP486E_MODULE 1
-+#define CONFIG_ETH16I_MODULE 1
-+#define CONFIG_NE2000_MODULE 1
-+#define CONFIG_ZNET_MODULE 1
-+#define CONFIG_SEEQ8005_MODULE 1
-+#define CONFIG_NET_PCI 1
-+#define CONFIG_PCNET32_MODULE 1
-+#define CONFIG_AMD8111_ETH_MODULE 1
-+#undef CONFIG_AMD8111E_NAPI
-+#define CONFIG_ADAPTEC_STARFIRE_MODULE 1
-+#undef CONFIG_ADAPTEC_STARFIRE_NAPI
-+#define CONFIG_AC3200_MODULE 1
-+#define CONFIG_APRICOT_MODULE 1
-+#define CONFIG_B44_MODULE 1
-+#define CONFIG_FORCEDETH_MODULE 1
-+#define CONFIG_CS89x0_MODULE 1
-+#undef CONFIG_DGRS
-+#define CONFIG_EEPRO100_MODULE 1
-+#define CONFIG_E100_MODULE 1
-+#define CONFIG_FEALNX_MODULE 1
-+#define CONFIG_NATSEMI_MODULE 1
-+#define CONFIG_NE2K_PCI_MODULE 1
-+#define CONFIG_8139CP_MODULE 1
-+#define CONFIG_8139TOO_MODULE 1
-+#define CONFIG_8139TOO_PIO 1
-+#define CONFIG_8139TOO_TUNE_TWISTER 1
-+#define CONFIG_8139TOO_8129 1
-+#undef CONFIG_8139_OLD_RX_RESET
-+#define CONFIG_SIS900_MODULE 1
-+#define CONFIG_EPIC100_MODULE 1
-+#define CONFIG_SUNDANCE_MODULE 1
-+#undef CONFIG_SUNDANCE_MMIO
-+#define CONFIG_TLAN_MODULE 1
-+#define CONFIG_VIA_RHINE_MODULE 1
-+#undef CONFIG_VIA_RHINE_MMIO
-+#define CONFIG_NET_POCKET 1
-+#define CONFIG_ATP_MODULE 1
-+#define CONFIG_DE600_MODULE 1
-+#define CONFIG_DE620_MODULE 1
-+
-+/*
-+ * Ethernet (1000 Mbit)
-+ */
-+#undef CONFIG_ACENIC
-+#define CONFIG_DL2K_MODULE 1
-+#define CONFIG_E1000_MODULE 1
-+#undef CONFIG_E1000_NAPI
-+#define CONFIG_NS83820_MODULE 1
-+#define CONFIG_HAMACHI_MODULE 1
-+#define CONFIG_YELLOWFIN_MODULE 1
-+#define CONFIG_R8169_MODULE 1
-+#undef CONFIG_R8169_NAPI
-+#undef CONFIG_R8169_VLAN
-+#define CONFIG_SK98LIN_MODULE 1
-+#define CONFIG_VIA_VELOCITY_MODULE 1
-+#define CONFIG_TIGON3_MODULE 1
-+#define CONFIG_BNX2_MODULE 1
-+
-+/*
-+ * Ethernet (10000 Mbit)
-+ */
-+#define CONFIG_IXGB_MODULE 1
-+#undef CONFIG_IXGB_NAPI
-+#define CONFIG_S2IO_MODULE 1
-+#undef CONFIG_S2IO_NAPI
-+#undef CONFIG_2BUFF_MODE
-+
-+/*
-+ * Token Ring devices
-+ */
-+#define CONFIG_TR 1
-+#define CONFIG_IBMTR_MODULE 1
-+#define CONFIG_IBMOL_MODULE 1
-+#define CONFIG_IBMLS_MODULE 1
-+#define CONFIG_3C359_MODULE 1
-+#define CONFIG_TMS380TR_MODULE 1
-+#define CONFIG_TMSPCI_MODULE 1
-+#define CONFIG_SKISA_MODULE 1
-+#define CONFIG_PROTEON_MODULE 1
-+#define CONFIG_ABYSS_MODULE 1
-+#undef CONFIG_SMCTR
-+
-+/*
-+ * Wireless LAN (non-hamradio)
-+ */
-+#define CONFIG_NET_RADIO 1
-+
-+/*
-+ * Obsolete Wireless cards support (pre-802.11)
-+ */
-+#define CONFIG_STRIP_MODULE 1
-+#define CONFIG_ARLAN_MODULE 1
-+#define CONFIG_WAVELAN_MODULE 1
-+#define CONFIG_PCMCIA_WAVELAN_MODULE 1
-+#define CONFIG_PCMCIA_NETWAVE_MODULE 1
-+
-+/*
-+ * Wireless 802.11 Frequency Hopping cards support
-+ */
-+#define CONFIG_PCMCIA_RAYCS_MODULE 1
-+
-+/*
-+ * Wireless 802.11b ISA/PCI cards support
-+ */
-+#define CONFIG_AIRO_MODULE 1
-+#define CONFIG_HERMES_MODULE 1
-+#define CONFIG_PLX_HERMES_MODULE 1
-+#define CONFIG_TMD_HERMES_MODULE 1
-+#define CONFIG_PCI_HERMES_MODULE 1
-+#define CONFIG_ATMEL_MODULE 1
-+#define CONFIG_PCI_ATMEL_MODULE 1
-+
-+/*
-+ * Wireless 802.11b Pcmcia/Cardbus cards support
-+ */
-+#define CONFIG_PCMCIA_HERMES_MODULE 1
-+#define CONFIG_AIRO_CS_MODULE 1
-+#define CONFIG_PCMCIA_ATMEL_MODULE 1
-+#define CONFIG_PCMCIA_WL3501_MODULE 1
-+
-+/*
-+ * Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
-+ */
-+#define CONFIG_PRISM54_MODULE 1
-+#define CONFIG_NET_WIRELESS 1
-+
-+/*
-+ * PCMCIA network device support
-+ */
-+#define CONFIG_NET_PCMCIA 1
-+#define CONFIG_PCMCIA_3C589_MODULE 1
-+#define CONFIG_PCMCIA_3C574_MODULE 1
-+#define CONFIG_PCMCIA_FMVJ18X_MODULE 1
-+#define CONFIG_PCMCIA_PCNET_MODULE 1
-+#define CONFIG_PCMCIA_NMCLAN_MODULE 1
-+#define CONFIG_PCMCIA_SMC91C92_MODULE 1
-+#define CONFIG_PCMCIA_XIRC2PS_MODULE 1
-+#define CONFIG_PCMCIA_AXNET_MODULE 1
-+#define CONFIG_ARCNET_COM20020_CS_MODULE 1
-+#define CONFIG_PCMCIA_IBMTR_MODULE 1
-+
-+/*
-+ * Wan interfaces
-+ */
-+#define CONFIG_WAN 1
-+#define CONFIG_HOSTESS_SV11_MODULE 1
-+#define CONFIG_COSA_MODULE 1
-+#define CONFIG_DSCC4_MODULE 1
-+#define CONFIG_DSCC4_PCISYNC 1
-+#define CONFIG_DSCC4_PCI_RST 1
-+#define CONFIG_LANMEDIA_MODULE 1
-+#define CONFIG_SEALEVEL_4021_MODULE 1
-+#define CONFIG_SYNCLINK_SYNCPPP_MODULE 1
-+#define CONFIG_HDLC_MODULE 1
-+#define CONFIG_HDLC_RAW 1
-+#define CONFIG_HDLC_RAW_ETH 1
-+#define CONFIG_HDLC_CISCO 1
-+#define CONFIG_HDLC_FR 1
-+#define CONFIG_HDLC_PPP 1
-+#define CONFIG_HDLC_X25 1
-+#define CONFIG_PCI200SYN_MODULE 1
-+#define CONFIG_WANXL_MODULE 1
-+#define CONFIG_PC300_MODULE 1
-+#define CONFIG_PC300_MLPPP 1
-+#define CONFIG_N2_MODULE 1
-+#define CONFIG_C101_MODULE 1
-+#define CONFIG_FARSYNC_MODULE 1
-+#define CONFIG_DLCI_MODULE 1
-+#define CONFIG_DLCI_COUNT 24
-+#define CONFIG_DLCI_MAX 8
-+#define CONFIG_SDLA_MODULE 1
-+#define CONFIG_WAN_ROUTER_DRIVERS 1
-+#undef CONFIG_VENDOR_SANGOMA
-+#define CONFIG_CYCLADES_SYNC_MODULE 1
-+#define CONFIG_CYCLOMX_X25 1
-+#define CONFIG_LAPBETHER_MODULE 1
-+#define CONFIG_X25_ASY_MODULE 1
-+#define CONFIG_SBNI_MODULE 1
-+#undef CONFIG_SBNI_MULTILINE
-+
-+/*
-+ * ATM drivers
-+ */
-+#define CONFIG_ATM_TCP_MODULE 1
-+#define CONFIG_ATM_LANAI_MODULE 1
-+#define CONFIG_ATM_ENI_MODULE 1
-+#undef CONFIG_ATM_ENI_DEBUG
-+#undef CONFIG_ATM_ENI_TUNE_BURST
-+#define CONFIG_ATM_FIRESTREAM_MODULE 1
-+#define CONFIG_ATM_ZATM_MODULE 1
-+#undef CONFIG_ATM_ZATM_DEBUG
-+#define CONFIG_ATM_NICSTAR_MODULE 1
-+#undef CONFIG_ATM_NICSTAR_USE_SUNI
-+#undef CONFIG_ATM_NICSTAR_USE_IDT77105
-+#define CONFIG_ATM_IDT77252_MODULE 1
-+#undef CONFIG_ATM_IDT77252_DEBUG
-+#undef CONFIG_ATM_IDT77252_RCV_ALL
-+#define CONFIG_ATM_IDT77252_USE_SUNI 1
-+#define CONFIG_ATM_AMBASSADOR_MODULE 1
-+#undef CONFIG_ATM_AMBASSADOR_DEBUG
-+#define CONFIG_ATM_HORIZON_MODULE 1
-+#undef CONFIG_ATM_HORIZON_DEBUG
-+#define CONFIG_ATM_IA_MODULE 1
-+#undef CONFIG_ATM_IA_DEBUG
-+#define CONFIG_ATM_FORE200E_MAYBE_MODULE 1
-+#define CONFIG_ATM_FORE200E_PCA 1
-+#define CONFIG_ATM_FORE200E_PCA_DEFAULT_FW 1
-+#undef CONFIG_ATM_FORE200E_USE_TASKLET
-+#define CONFIG_ATM_FORE200E_TX_RETRY 16
-+#define CONFIG_ATM_FORE200E_DEBUG 0
-+#define CONFIG_ATM_FORE200E_MODULE 1
-+#define CONFIG_ATM_HE_MODULE 1
-+#define CONFIG_ATM_HE_USE_SUNI 1
-+#define CONFIG_FDDI 1
-+#define CONFIG_DEFXX_MODULE 1
-+#define CONFIG_SKFP_MODULE 1
-+#define CONFIG_HIPPI 1
-+#define CONFIG_ROADRUNNER_MODULE 1
-+#undef CONFIG_ROADRUNNER_LARGE_RINGS
-+#define CONFIG_PLIP_MODULE 1
-+#define CONFIG_PPP_MODULE 1
-+#define CONFIG_PPP_MULTILINK 1
-+#define CONFIG_PPP_FILTER 1
-+#define CONFIG_PPP_ASYNC_MODULE 1
-+#define CONFIG_PPP_SYNC_TTY_MODULE 1
-+#define CONFIG_PPP_DEFLATE_MODULE 1
-+#define CONFIG_PPP_BSDCOMP_MODULE 1
-+#define CONFIG_PPPOE_MODULE 1
-+#define CONFIG_PPPOATM_MODULE 1
-+#define CONFIG_SLIP_MODULE 1
-+#define CONFIG_SLIP_COMPRESSED 1
-+#define CONFIG_SLIP_SMART 1
-+#define CONFIG_SLIP_MODE_SLIP6 1
-+#define CONFIG_NET_FC 1
-+#define CONFIG_SHAPER_MODULE 1
-+#define CONFIG_NETCONSOLE_MODULE 1
-+
-+/*
-+ * ISDN subsystem
-+ */
-+#define CONFIG_ISDN_MODULE 1
-+
-+/*
-+ * Old ISDN4Linux
-+ */
-+#define CONFIG_ISDN_I4L_MODULE 1
-+#define CONFIG_ISDN_PPP 1
-+#define CONFIG_ISDN_PPP_VJ 1
-+#define CONFIG_ISDN_MPP 1
-+#define CONFIG_IPPP_FILTER 1
-+#define CONFIG_ISDN_PPP_BSDCOMP_MODULE 1
-+#define CONFIG_ISDN_AUDIO 1
-+#define CONFIG_ISDN_TTY_FAX 1
-+#define CONFIG_ISDN_X25 1
-+
-+/*
-+ * ISDN feature submodules
-+ */
-+#undef CONFIG_ISDN_DRV_LOOP
-+#undef CONFIG_ISDN_DIVERSION
-+
-+/*
-+ * ISDN4Linux hardware drivers
-+ */
-+
-+/*
-+ * Passive cards
-+ */
-+#define CONFIG_ISDN_DRV_HISAX_MODULE 1
-+
-+/*
-+ * D-channel protocol features
-+ */
-+#define CONFIG_HISAX_EURO 1
-+#define CONFIG_DE_AOC 1
-+#undef CONFIG_HISAX_NO_SENDCOMPLETE
-+#undef CONFIG_HISAX_NO_LLC
-+#undef CONFIG_HISAX_NO_KEYPAD
-+#define CONFIG_HISAX_1TR6 1
-+#define CONFIG_HISAX_NI1 1
-+#define CONFIG_HISAX_MAX_CARDS 8
-+
-+/*
-+ * HiSax supported cards
-+ */
-+#define CONFIG_HISAX_16_0 1
-+#define CONFIG_HISAX_16_3 1
-+#define CONFIG_HISAX_TELESPCI 1
-+#define CONFIG_HISAX_S0BOX 1
-+#define CONFIG_HISAX_AVM_A1 1
-+#define CONFIG_HISAX_FRITZPCI 1
-+#define CONFIG_HISAX_AVM_A1_PCMCIA 1
-+#define CONFIG_HISAX_ELSA 1
-+#define CONFIG_HISAX_IX1MICROR2 1
-+#define CONFIG_HISAX_DIEHLDIVA 1
-+#define CONFIG_HISAX_ASUSCOM 1
-+#define CONFIG_HISAX_TELEINT 1
-+#define CONFIG_HISAX_HFCS 1
-+#define CONFIG_HISAX_SEDLBAUER 1
-+#define CONFIG_HISAX_SPORTSTER 1
-+#define CONFIG_HISAX_MIC 1
-+#define CONFIG_HISAX_NETJET 1
-+#define CONFIG_HISAX_NETJET_U 1
-+#define CONFIG_HISAX_NICCY 1
-+#define CONFIG_HISAX_ISURF 1
-+#define CONFIG_HISAX_HSTSAPHIR 1
-+#define CONFIG_HISAX_BKM_A4T 1
-+#define CONFIG_HISAX_SCT_QUADRO 1
-+#define CONFIG_HISAX_GAZEL 1
-+#define CONFIG_HISAX_HFC_PCI 1
-+#define CONFIG_HISAX_W6692 1
-+#define CONFIG_HISAX_HFC_SX 1
-+#define CONFIG_HISAX_ENTERNOW_PCI 1
-+#undef CONFIG_HISAX_DEBUG
-+
-+/*
-+ * HiSax PCMCIA card service modules
-+ */
-+#define CONFIG_HISAX_SEDLBAUER_CS_MODULE 1
-+#define CONFIG_HISAX_ELSA_CS_MODULE 1
-+#define CONFIG_HISAX_AVM_A1_CS_MODULE 1
-+#define CONFIG_HISAX_TELES_CS_MODULE 1
-+
-+/*
-+ * HiSax sub driver modules
-+ */
-+#define CONFIG_HISAX_ST5481_MODULE 1
-+#define CONFIG_HISAX_HFCUSB_MODULE 1
-+#define CONFIG_HISAX_HFC4S8S_MODULE 1
-+#define CONFIG_HISAX_FRITZ_PCIPNP_MODULE 1
-+#define CONFIG_HISAX_HDLC 1
-+
-+/*
-+ * Active cards
-+ */
-+#define CONFIG_ISDN_DRV_ICN_MODULE 1
-+#define CONFIG_ISDN_DRV_PCBIT_MODULE 1
-+#define CONFIG_ISDN_DRV_SC_MODULE 1
-+#define CONFIG_ISDN_DRV_ACT2000_MODULE 1
-+#undef CONFIG_HYSDN
-+
-+/*
-+ * CAPI subsystem
-+ */
-+#define CONFIG_ISDN_CAPI_MODULE 1
-+#define CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON 1
-+#define CONFIG_ISDN_CAPI_MIDDLEWARE 1
-+#define CONFIG_ISDN_CAPI_CAPI20_MODULE 1
-+#define CONFIG_ISDN_CAPI_CAPIFS_BOOL 1
-+#define CONFIG_ISDN_CAPI_CAPIFS_MODULE 1
-+#define CONFIG_ISDN_CAPI_CAPIDRV_MODULE 1
-+
-+/*
-+ * CAPI hardware drivers
-+ */
-+
-+/*
-+ * Active AVM cards
-+ */
-+#define CONFIG_CAPI_AVM 1
-+#define CONFIG_ISDN_DRV_AVMB1_B1ISA_MODULE 1
-+#define CONFIG_ISDN_DRV_AVMB1_B1PCI_MODULE 1
-+#define CONFIG_ISDN_DRV_AVMB1_B1PCIV4 1
-+#define CONFIG_ISDN_DRV_AVMB1_T1ISA_MODULE 1
-+#define CONFIG_ISDN_DRV_AVMB1_B1PCMCIA_MODULE 1
-+#define CONFIG_ISDN_DRV_AVMB1_AVM_CS_MODULE 1
-+#define CONFIG_ISDN_DRV_AVMB1_T1PCI_MODULE 1
-+#define CONFIG_ISDN_DRV_AVMB1_C4_MODULE 1
-+
-+/*
-+ * Active Eicon DIVA Server cards
-+ */
-+#define CONFIG_CAPI_EICON 1
-+#define CONFIG_ISDN_DIVAS_MODULE 1
-+#define CONFIG_ISDN_DIVAS_BRIPCI 1
-+#define CONFIG_ISDN_DIVAS_PRIPCI 1
-+#define CONFIG_ISDN_DIVAS_DIVACAPI_MODULE 1
-+#define CONFIG_ISDN_DIVAS_USERIDI_MODULE 1
-+#define CONFIG_ISDN_DIVAS_MAINT_MODULE 1
-+
-+/*
-+ * Telephony Support
-+ */
-+#define CONFIG_PHONE_MODULE 1
-+#define CONFIG_PHONE_IXJ_MODULE 1
-+#define CONFIG_PHONE_IXJ_PCMCIA_MODULE 1
-+
-+/*
-+ * Input device support
-+ */
-+#define CONFIG_INPUT 1
-+
-+/*
-+ * Userland interfaces
-+ */
-+#define CONFIG_INPUT_MOUSEDEV 1
-+#define CONFIG_INPUT_MOUSEDEV_PSAUX 1
-+#define CONFIG_INPUT_MOUSEDEV_SCREEN_X 1024
-+#define CONFIG_INPUT_MOUSEDEV_SCREEN_Y 768
-+#define CONFIG_INPUT_JOYDEV_MODULE 1
-+#define CONFIG_INPUT_TSDEV_MODULE 1
-+#define CONFIG_INPUT_TSDEV_SCREEN_X 240
-+#define CONFIG_INPUT_TSDEV_SCREEN_Y 320
-+#define CONFIG_INPUT_EVDEV_MODULE 1
-+#define CONFIG_INPUT_EVBUG_MODULE 1
-+
-+/*
-+ * Input Device Drivers
-+ */
-+#define CONFIG_INPUT_KEYBOARD 1
-+#define CONFIG_KEYBOARD_ATKBD 1
-+#define CONFIG_KEYBOARD_SUNKBD_MODULE 1
-+#define CONFIG_KEYBOARD_LKKBD_MODULE 1
-+#define CONFIG_KEYBOARD_XTKBD_MODULE 1
-+#define CONFIG_KEYBOARD_NEWTON_MODULE 1
-+#define CONFIG_INPUT_MOUSE 1
-+#define CONFIG_MOUSE_PS2 1
-+#define CONFIG_MOUSE_SERIAL_MODULE 1
-+#define CONFIG_MOUSE_INPORT_MODULE 1
-+#undef CONFIG_MOUSE_ATIXL
-+#define CONFIG_MOUSE_LOGIBM_MODULE 1
-+#define CONFIG_MOUSE_PC110PAD_MODULE 1
-+#define CONFIG_MOUSE_VSXXXAA_MODULE 1
-+#define CONFIG_INPUT_JOYSTICK 1
-+#define CONFIG_JOYSTICK_ANALOG_MODULE 1
-+#define CONFIG_JOYSTICK_A3D_MODULE 1
-+#define CONFIG_JOYSTICK_ADI_MODULE 1
-+#define CONFIG_JOYSTICK_COBRA_MODULE 1
-+#define CONFIG_JOYSTICK_GF2K_MODULE 1
-+#define CONFIG_JOYSTICK_GRIP_MODULE 1
-+#define CONFIG_JOYSTICK_GRIP_MP_MODULE 1
-+#define CONFIG_JOYSTICK_GUILLEMOT_MODULE 1
-+#define CONFIG_JOYSTICK_INTERACT_MODULE 1
-+#define CONFIG_JOYSTICK_SIDEWINDER_MODULE 1
-+#define CONFIG_JOYSTICK_TMDC_MODULE 1
-+#define CONFIG_JOYSTICK_IFORCE_MODULE 1
-+#define CONFIG_JOYSTICK_IFORCE_USB 1
-+#define CONFIG_JOYSTICK_IFORCE_232 1
-+#define CONFIG_JOYSTICK_WARRIOR_MODULE 1
-+#define CONFIG_JOYSTICK_MAGELLAN_MODULE 1
-+#define CONFIG_JOYSTICK_SPACEORB_MODULE 1
-+#define CONFIG_JOYSTICK_SPACEBALL_MODULE 1
-+#define CONFIG_JOYSTICK_STINGER_MODULE 1
-+#define CONFIG_JOYSTICK_TWIDJOY_MODULE 1
-+#define CONFIG_JOYSTICK_DB9_MODULE 1
-+#define CONFIG_JOYSTICK_GAMECON_MODULE 1
-+#define CONFIG_JOYSTICK_TURBOGRAFX_MODULE 1
-+#define CONFIG_JOYSTICK_JOYDUMP_MODULE 1
-+#define CONFIG_INPUT_TOUCHSCREEN 1
-+#define CONFIG_TOUCHSCREEN_GUNZE_MODULE 1
-+#define CONFIG_TOUCHSCREEN_ELO_MODULE 1
-+#define CONFIG_TOUCHSCREEN_MTOUCH_MODULE 1
-+#define CONFIG_TOUCHSCREEN_MK712_MODULE 1
-+#define CONFIG_INPUT_MISC 1
-+#define CONFIG_INPUT_PCSPKR_MODULE 1
-+#define CONFIG_INPUT_UINPUT_MODULE 1
-+
-+/*
-+ * Hardware I/O ports
-+ */
-+#define CONFIG_SERIO 1
-+#define CONFIG_SERIO_I8042 1
-+#define CONFIG_SERIO_SERPORT_MODULE 1
-+#define CONFIG_SERIO_CT82C710_MODULE 1
-+#define CONFIG_SERIO_PARKBD_MODULE 1
-+#define CONFIG_SERIO_PCIPS2_MODULE 1
-+#define CONFIG_SERIO_LIBPS2 1
-+#define CONFIG_SERIO_RAW_MODULE 1
-+#define CONFIG_GAMEPORT_MODULE 1
-+#define CONFIG_GAMEPORT_NS558_MODULE 1
-+#define CONFIG_GAMEPORT_L4_MODULE 1
-+#define CONFIG_GAMEPORT_EMU10K1_MODULE 1
-+#define CONFIG_GAMEPORT_VORTEX_MODULE 1
-+#define CONFIG_GAMEPORT_FM801_MODULE 1
-+#undef CONFIG_GAMEPORT_CS461X
-+
-+/*
-+ * Character devices
-+ */
-+#define CONFIG_VT 1
-+#define CONFIG_VT_CONSOLE 1
-+#define CONFIG_HW_CONSOLE 1
-+#undef CONFIG_SERIAL_NONSTANDARD
-+
-+/*
-+ * Serial drivers
-+ */
-+#define CONFIG_SERIAL_8250_MODULE 1
-+#undef CONFIG_SERIAL_8250_CS
-+#undef CONFIG_SERIAL_8250_ACPI
-+#define CONFIG_SERIAL_8250_NR_UARTS 4
-+#undef CONFIG_SERIAL_8250_EXTENDED
-+
-+/*
-+ * Non-8250 serial port support
-+ */
-+#define CONFIG_SERIAL_CORE_MODULE 1
-+#define CONFIG_SERIAL_JSM_MODULE 1
-+#define CONFIG_UNIX98_PTYS 1
-+#define CONFIG_LEGACY_PTYS 1
-+#define CONFIG_LEGACY_PTY_COUNT 256
-+#define CONFIG_PRINTER_MODULE 1
-+#undef CONFIG_LP_CONSOLE
-+#define CONFIG_PPDEV_MODULE 1
-+#define CONFIG_TIPAR_MODULE 1
-+
-+/*
-+ * IPMI
-+ */
-+#define CONFIG_IPMI_HANDLER_MODULE 1
-+#undef CONFIG_IPMI_PANIC_EVENT
-+#define CONFIG_IPMI_DEVICE_INTERFACE_MODULE 1
-+#define CONFIG_IPMI_SI_MODULE 1
-+#define CONFIG_IPMI_WATCHDOG_MODULE 1
-+#define CONFIG_IPMI_POWEROFF_MODULE 1
-+
-+/*
-+ * Watchdog Cards
-+ */
-+#define CONFIG_WATCHDOG 1
-+#undef CONFIG_WATCHDOG_NOWAYOUT
-+
-+/*
-+ * Watchdog Device Drivers
-+ */
-+#define CONFIG_SOFT_WATCHDOG_MODULE 1
-+#define CONFIG_ACQUIRE_WDT_MODULE 1
-+#define CONFIG_ADVANTECH_WDT_MODULE 1
-+#define CONFIG_ALIM1535_WDT_MODULE 1
-+#define CONFIG_ALIM7101_WDT_MODULE 1
-+#define CONFIG_SC520_WDT_MODULE 1
-+#define CONFIG_EUROTECH_WDT_MODULE 1
-+#define CONFIG_IB700_WDT_MODULE 1
-+#define CONFIG_WAFER_WDT_MODULE 1
-+#define CONFIG_I8XX_TCO_MODULE 1
-+#define CONFIG_SC1200_WDT_MODULE 1
-+#define CONFIG_SCx200_WDT_MODULE 1
-+#define CONFIG_60XX_WDT_MODULE 1
-+#define CONFIG_CPU5_WDT_MODULE 1
-+#define CONFIG_W83627HF_WDT_MODULE 1
-+#define CONFIG_W83877F_WDT_MODULE 1
-+#define CONFIG_MACHZ_WDT_MODULE 1
-+
-+/*
-+ * ISA-based Watchdog Cards
-+ */
-+#define CONFIG_PCWATCHDOG_MODULE 1
-+#define CONFIG_MIXCOMWD_MODULE 1
-+#define CONFIG_WDT_MODULE 1
-+#define CONFIG_WDT_501 1
-+
-+/*
-+ * PCI-based Watchdog Cards
-+ */
-+#define CONFIG_PCIPCWATCHDOG_MODULE 1
-+#define CONFIG_WDTPCI_MODULE 1
-+#define CONFIG_WDT_501_PCI 1
-+
-+/*
-+ * USB-based Watchdog Cards
-+ */
-+#define CONFIG_USBPCWATCHDOG_MODULE 1
-+#define CONFIG_HW_RANDOM_MODULE 1
-+#define CONFIG_NVRAM_MODULE 1
-+#define CONFIG_RTC_MODULE 1
-+#define CONFIG_GEN_RTC_MODULE 1
-+#define CONFIG_GEN_RTC_X 1
-+#define CONFIG_DTLK_MODULE 1
-+#define CONFIG_R3964_MODULE 1
-+#define CONFIG_APPLICOM_MODULE 1
-+#define CONFIG_SONYPI_MODULE 1
-+
-+/*
-+ * Ftape, the floppy tape device driver
-+ */
-+#undef CONFIG_FTAPE
-+#define CONFIG_AGP_MODULE 1
-+#define CONFIG_AGP_ALI_MODULE 1
-+#define CONFIG_AGP_ATI_MODULE 1
-+#define CONFIG_AGP_AMD_MODULE 1
-+#define CONFIG_AGP_AMD64_MODULE 1
-+#define CONFIG_AGP_INTEL_MODULE 1
-+#define CONFIG_AGP_NVIDIA_MODULE 1
-+#define CONFIG_AGP_SIS_MODULE 1
-+#define CONFIG_AGP_SWORKS_MODULE 1
-+#define CONFIG_AGP_VIA_MODULE 1
-+#define CONFIG_AGP_EFFICEON_MODULE 1
-+#define CONFIG_DRM_MODULE 1
-+#define CONFIG_DRM_TDFX_MODULE 1
-+#undef CONFIG_DRM_GAMMA
-+#define CONFIG_DRM_R128_MODULE 1
-+#define CONFIG_DRM_RADEON_MODULE 1
-+#define CONFIG_DRM_I810_MODULE 1
-+#define CONFIG_DRM_I830_MODULE 1
-+#define CONFIG_DRM_I915_MODULE 1
-+#define CONFIG_DRM_MGA_MODULE 1
-+#define CONFIG_DRM_SIS_MODULE 1
-+
-+/*
-+ * PCMCIA character devices
-+ */
-+#define CONFIG_SYNCLINK_CS_MODULE 1
-+#define CONFIG_MWAVE_MODULE 1
-+#define CONFIG_SCx200_GPIO_MODULE 1
-+#define CONFIG_RAW_DRIVER_MODULE 1
-+#undef CONFIG_HPET
-+#define CONFIG_MAX_RAW_DEVS 256
-+#define CONFIG_HANGCHECK_TIMER_MODULE 1
-+
-+/*
-+ * TPM devices
-+ */
-+#undef CONFIG_TCG_TPM
-+
-+/*
-+ * I2C support
-+ */
-+#define CONFIG_I2C_MODULE 1
-+#define CONFIG_I2C_CHARDEV_MODULE 1
-+
-+/*
-+ * I2C Algorithms
-+ */
-+#define CONFIG_I2C_ALGOBIT_MODULE 1
-+#define CONFIG_I2C_ALGOPCF_MODULE 1
-+#define CONFIG_I2C_ALGOPCA_MODULE 1
-+
-+/*
-+ * I2C Hardware Bus support
-+ */
-+#define CONFIG_I2C_ALI1535_MODULE 1
-+#define CONFIG_I2C_ALI1563_MODULE 1
-+#define CONFIG_I2C_ALI15X3_MODULE 1
-+#define CONFIG_I2C_AMD756_MODULE 1
-+#define CONFIG_I2C_AMD756_S4882_MODULE 1
-+#define CONFIG_I2C_AMD8111_MODULE 1
-+#define CONFIG_I2C_ELEKTOR_MODULE 1
-+#define CONFIG_I2C_I801_MODULE 1
-+#define CONFIG_I2C_I810_MODULE 1
-+#define CONFIG_I2C_PIIX4_MODULE 1
-+#define CONFIG_I2C_ISA_MODULE 1
-+#define CONFIG_I2C_NFORCE2_MODULE 1
-+#define CONFIG_I2C_PARPORT_MODULE 1
-+#define CONFIG_I2C_PARPORT_LIGHT_MODULE 1
-+#define CONFIG_I2C_PROSAVAGE_MODULE 1
-+#define CONFIG_I2C_SAVAGE4_MODULE 1
-+#define CONFIG_SCx200_I2C_MODULE 1
-+#define CONFIG_SCx200_I2C_SCL 12
-+#define CONFIG_SCx200_I2C_SDA 13
-+#define CONFIG_SCx200_ACB_MODULE 1
-+#define CONFIG_I2C_SIS5595_MODULE 1
-+#define CONFIG_I2C_SIS630_MODULE 1
-+#define CONFIG_I2C_SIS96X_MODULE 1
-+#define CONFIG_I2C_STUB_MODULE 1
-+#define CONFIG_I2C_VIA_MODULE 1
-+#define CONFIG_I2C_VIAPRO_MODULE 1
-+#define CONFIG_I2C_VOODOO3_MODULE 1
-+#define CONFIG_I2C_PCA_ISA_MODULE 1
-+
-+/*
-+ * Hardware Sensors Chip support
-+ */
-+#define CONFIG_I2C_SENSOR_MODULE 1
-+#define CONFIG_SENSORS_ADM1021_MODULE 1
-+#define CONFIG_SENSORS_ADM1025_MODULE 1
-+#define CONFIG_SENSORS_ADM1026_MODULE 1
-+#define CONFIG_SENSORS_ADM1031_MODULE 1
-+#define CONFIG_SENSORS_ASB100_MODULE 1
-+#define CONFIG_SENSORS_DS1621_MODULE 1
-+#define CONFIG_SENSORS_FSCHER_MODULE 1
-+#define CONFIG_SENSORS_FSCPOS_MODULE 1
-+#define CONFIG_SENSORS_GL518SM_MODULE 1
-+#define CONFIG_SENSORS_GL520SM_MODULE 1
-+#define CONFIG_SENSORS_IT87_MODULE 1
-+#define CONFIG_SENSORS_LM63_MODULE 1
-+#define CONFIG_SENSORS_LM75_MODULE 1
-+#define CONFIG_SENSORS_LM77_MODULE 1
-+#define CONFIG_SENSORS_LM78_MODULE 1
-+#define CONFIG_SENSORS_LM80_MODULE 1
-+#define CONFIG_SENSORS_LM83_MODULE 1
-+#define CONFIG_SENSORS_LM85_MODULE 1
-+#define CONFIG_SENSORS_LM87_MODULE 1
-+#define CONFIG_SENSORS_LM90_MODULE 1
-+#define CONFIG_SENSORS_LM92_MODULE 1
-+#define CONFIG_SENSORS_MAX1619_MODULE 1
-+#define CONFIG_SENSORS_PC87360_MODULE 1
-+#undef CONFIG_SENSORS_SMSC47B397
-+#define CONFIG_SENSORS_SIS5595_MODULE 1
-+#define CONFIG_SENSORS_SMSC47M1_MODULE 1
-+#define CONFIG_SENSORS_VIA686A_MODULE 1
-+#define CONFIG_SENSORS_W83781D_MODULE 1
-+#define CONFIG_SENSORS_W83L785TS_MODULE 1
-+#define CONFIG_SENSORS_W83627HF_MODULE 1
-+
-+/*
-+ * Other I2C Chip support
-+ */
-+#define CONFIG_SENSORS_DS1337_MODULE 1
-+#define CONFIG_SENSORS_EEPROM_MODULE 1
-+#define CONFIG_SENSORS_PCF8574_MODULE 1
-+#define CONFIG_SENSORS_PCF8591_MODULE 1
-+#define CONFIG_SENSORS_RTC8564_MODULE 1
-+#undef CONFIG_I2C_DEBUG_CORE
-+#undef CONFIG_I2C_DEBUG_ALGO
-+#undef CONFIG_I2C_DEBUG_BUS
-+#undef CONFIG_I2C_DEBUG_CHIP
-+
-+/*
-+ * Dallas's 1-wire bus
-+ */
-+#define CONFIG_W1_MODULE 1
-+#define CONFIG_W1_MATROX_MODULE 1
-+#define CONFIG_W1_DS9490_MODULE 1
-+#define CONFIG_W1_DS9490_BRIDGE_MODULE 1
-+#define CONFIG_W1_THERM_MODULE 1
-+#define CONFIG_W1_SMEM_MODULE 1
-+
-+/*
-+ * Misc devices
-+ */
-+#define CONFIG_IBM_ASM_MODULE 1
-+
-+/*
-+ * Multimedia devices
-+ */
-+#define CONFIG_VIDEO_DEV_MODULE 1
-+
-+/*
-+ * Video For Linux
-+ */
-+
-+/*
-+ * Video Adapters
-+ */
-+#define CONFIG_VIDEO_BT848_MODULE 1
-+#define CONFIG_VIDEO_PMS_MODULE 1
-+#define CONFIG_VIDEO_BWQCAM_MODULE 1
-+#define CONFIG_VIDEO_CQCAM_MODULE 1
-+#define CONFIG_VIDEO_W9966_MODULE 1
-+#define CONFIG_VIDEO_CPIA_MODULE 1
-+#define CONFIG_VIDEO_CPIA_PP_MODULE 1
-+#define CONFIG_VIDEO_CPIA_USB_MODULE 1
-+#define CONFIG_VIDEO_SAA5246A_MODULE 1
-+#define CONFIG_VIDEO_SAA5249_MODULE 1
-+#define CONFIG_TUNER_3036_MODULE 1
-+#define CONFIG_VIDEO_STRADIS_MODULE 1
-+#define CONFIG_VIDEO_ZORAN_MODULE 1
-+#define CONFIG_VIDEO_ZORAN_BUZ_MODULE 1
-+#define CONFIG_VIDEO_ZORAN_DC10_MODULE 1
-+#define CONFIG_VIDEO_ZORAN_DC30_MODULE 1
-+#define CONFIG_VIDEO_ZORAN_LML33_MODULE 1
-+#define CONFIG_VIDEO_ZORAN_LML33R10_MODULE 1
-+#undef CONFIG_VIDEO_ZR36120
-+#define CONFIG_VIDEO_MEYE_MODULE 1
-+#undef CONFIG_VIDEO_SAA7134
-+#define CONFIG_VIDEO_MXB_MODULE 1
-+#define CONFIG_VIDEO_DPC_MODULE 1
-+#define CONFIG_VIDEO_HEXIUM_ORION_MODULE 1
-+#define CONFIG_VIDEO_HEXIUM_GEMINI_MODULE 1
-+#define CONFIG_VIDEO_CX88_MODULE 1
-+#undef CONFIG_VIDEO_CX88_DVB
-+#define CONFIG_VIDEO_OVCAMCHIP_MODULE 1
-+
-+/*
-+ * Radio Adapters
-+ */
-+#define CONFIG_RADIO_CADET_MODULE 1
-+#define CONFIG_RADIO_RTRACK_MODULE 1
-+#define CONFIG_RADIO_RTRACK2_MODULE 1
-+#define CONFIG_RADIO_AZTECH_MODULE 1
-+#define CONFIG_RADIO_GEMTEK_MODULE 1
-+#define CONFIG_RADIO_GEMTEK_PCI_MODULE 1
-+#define CONFIG_RADIO_MAXIRADIO_MODULE 1
-+#define CONFIG_RADIO_MAESTRO_MODULE 1
-+#define CONFIG_RADIO_MIROPCM20_MODULE 1
-+#define CONFIG_RADIO_MIROPCM20_RDS_MODULE 1
-+#define CONFIG_RADIO_SF16FMI_MODULE 1
-+#define CONFIG_RADIO_SF16FMR2_MODULE 1
-+#define CONFIG_RADIO_TERRATEC_MODULE 1
-+#define CONFIG_RADIO_TRUST_MODULE 1
-+#define CONFIG_RADIO_TYPHOON_MODULE 1
-+#define CONFIG_RADIO_TYPHOON_PROC_FS 1
-+#define CONFIG_RADIO_ZOLTRIX_MODULE 1
-+
-+/*
-+ * Digital Video Broadcasting Devices
-+ */
-+#define CONFIG_DVB 1
-+#define CONFIG_DVB_CORE_MODULE 1
-+
-+/*
-+ * Supported SAA7146 based PCI Adapters
-+ */
-+#define CONFIG_DVB_AV7110_MODULE 1
-+#undef CONFIG_DVB_AV7110_OSD
-+#define CONFIG_DVB_BUDGET_MODULE 1
-+#define CONFIG_DVB_BUDGET_CI_MODULE 1
-+#define CONFIG_DVB_BUDGET_AV_MODULE 1
-+#define CONFIG_DVB_BUDGET_PATCH_MODULE 1
-+
-+/*
-+ * Supported USB Adapters
-+ */
-+#define CONFIG_DVB_TTUSB_BUDGET_MODULE 1
-+#define CONFIG_DVB_TTUSB_DEC_MODULE 1
-+#define CONFIG_DVB_DIBUSB_MODULE 1
-+#define CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES 1
-+#undef CONFIG_DVB_DIBCOM_DEBUG
-+#define CONFIG_DVB_CINERGYT2_MODULE 1
-+#undef CONFIG_DVB_CINERGYT2_TUNING
-+
-+/*
-+ * Supported FlexCopII (B2C2) Adapters
-+ */
-+#define CONFIG_DVB_B2C2_FLEXCOP_MODULE 1
-+#define CONFIG_DVB_B2C2_FLEXCOP_PCI_MODULE 1
-+#define CONFIG_DVB_B2C2_FLEXCOP_USB_MODULE 1
-+#undef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
-+#define CONFIG_DVB_B2C2_SKYSTAR_MODULE 1
-+
-+/*
-+ * Supported BT878 Adapters
-+ */
-+#define CONFIG_DVB_BT8XX_MODULE 1
-+
-+/*
-+ * Supported DVB Frontends
-+ */
-+
-+/*
-+ * Customise DVB Frontends
-+ */
-+
-+/*
-+ * DVB-S (satellite) frontends
-+ */
-+#define CONFIG_DVB_STV0299_MODULE 1
-+#define CONFIG_DVB_CX24110_MODULE 1
-+#define CONFIG_DVB_TDA8083_MODULE 1
-+#define CONFIG_DVB_TDA80XX_MODULE 1
-+#define CONFIG_DVB_MT312_MODULE 1
-+#define CONFIG_DVB_VES1X93_MODULE 1
-+
-+/*
-+ * DVB-T (terrestrial) frontends
-+ */
-+#define CONFIG_DVB_SP8870_MODULE 1
-+#define CONFIG_DVB_SP887X_MODULE 1
-+#define CONFIG_DVB_CX22700_MODULE 1
-+#define CONFIG_DVB_CX22702_MODULE 1
-+#define CONFIG_DVB_L64781_MODULE 1
-+#define CONFIG_DVB_TDA1004X_MODULE 1
-+#define CONFIG_DVB_NXT6000_MODULE 1
-+#define CONFIG_DVB_MT352_MODULE 1
-+#define CONFIG_DVB_DIB3000MB_MODULE 1
-+#define CONFIG_DVB_DIB3000MC_MODULE 1
-+
-+/*
-+ * DVB-C (cable) frontends
-+ */
-+#define CONFIG_DVB_ATMEL_AT76C651_MODULE 1
-+#define CONFIG_DVB_VES1820_MODULE 1
-+#define CONFIG_DVB_TDA10021_MODULE 1
-+#define CONFIG_DVB_STV0297_MODULE 1
-+
-+/*
-+ * ATSC (North American/Korean Terresterial DTV) frontends
-+ */
-+#define CONFIG_DVB_NXT2002_MODULE 1
-+#define CONFIG_DVB_OR51211_MODULE 1
-+#define CONFIG_DVB_OR51132_MODULE 1
-+#define CONFIG_VIDEO_SAA7146_MODULE 1
-+#define CONFIG_VIDEO_SAA7146_VV_MODULE 1
-+#define CONFIG_VIDEO_VIDEOBUF_MODULE 1
-+#define CONFIG_VIDEO_TUNER_MODULE 1
-+#define CONFIG_VIDEO_BUF_MODULE 1
-+#define CONFIG_VIDEO_BTCX_MODULE 1
-+#define CONFIG_VIDEO_IR_MODULE 1
-+#define CONFIG_VIDEO_TVEEPROM_MODULE 1
-+
-+/*
-+ * Graphics support
-+ */
-+#define CONFIG_FB 1
-+#define CONFIG_FB_CFB_FILLRECT_MODULE 1
-+#define CONFIG_FB_CFB_COPYAREA_MODULE 1
-+#define CONFIG_FB_CFB_IMAGEBLIT_MODULE 1
-+#define CONFIG_FB_SOFT_CURSOR_MODULE 1
-+#undef CONFIG_FB_MACMODES
-+#define CONFIG_FB_MODE_HELPERS 1
-+#define CONFIG_FB_TILEBLITTING 1
-+#define CONFIG_FB_CIRRUS_MODULE 1
-+#define CONFIG_FB_PM2_MODULE 1
-+#define CONFIG_FB_PM2_FIFO_DISCONNECT 1
-+#define CONFIG_FB_CYBER2000_MODULE 1
-+#undef CONFIG_FB_ASILIANT
-+#undef CONFIG_FB_IMSTT
-+#define CONFIG_FB_VGA16_MODULE 1
-+#undef CONFIG_FB_VESA
-+#define CONFIG_VIDEO_SELECT 1
-+#define CONFIG_FB_HGA_MODULE 1
-+#undef CONFIG_FB_HGA_ACCEL
-+#define CONFIG_FB_NVIDIA_MODULE 1
-+#define CONFIG_FB_NVIDIA_I2C 1
-+#define CONFIG_FB_RIVA_MODULE 1
-+#define CONFIG_FB_RIVA_I2C 1
-+#define CONFIG_FB_RIVA_DEBUG 1
-+#define CONFIG_FB_I810_MODULE 1
-+#undef CONFIG_FB_I810_GTF
-+#define CONFIG_FB_INTEL_MODULE 1
-+#undef CONFIG_FB_INTEL_DEBUG
-+#define CONFIG_FB_MATROX_MODULE 1
-+#define CONFIG_FB_MATROX_MILLENIUM 1
-+#define CONFIG_FB_MATROX_MYSTIQUE 1
-+#undef CONFIG_FB_MATROX_G
-+#define CONFIG_FB_MATROX_I2C_MODULE 1
-+#define CONFIG_FB_MATROX_MULTIHEAD 1
-+#define CONFIG_FB_RADEON_OLD_MODULE 1
-+#define CONFIG_FB_RADEON_MODULE 1
-+#define CONFIG_FB_RADEON_I2C 1
-+#undef CONFIG_FB_RADEON_DEBUG
-+#define CONFIG_FB_ATY128_MODULE 1
-+#define CONFIG_FB_ATY_MODULE 1
-+#define CONFIG_FB_ATY_CT 1
-+#define CONFIG_FB_ATY_GENERIC_LCD 1
-+#define CONFIG_FB_ATY_XL_INIT 1
-+#define CONFIG_FB_ATY_GX 1
-+#define CONFIG_FB_SAVAGE_MODULE 1
-+#define CONFIG_FB_SAVAGE_I2C 1
-+#define CONFIG_FB_SAVAGE_ACCEL 1
-+#define CONFIG_FB_SIS_MODULE 1
-+#define CONFIG_FB_SIS_300 1
-+#define CONFIG_FB_SIS_315 1
-+#define CONFIG_FB_NEOMAGIC_MODULE 1
-+#define CONFIG_FB_KYRO_MODULE 1
-+#define CONFIG_FB_3DFX_MODULE 1
-+#undef CONFIG_FB_3DFX_ACCEL
-+#define CONFIG_FB_VOODOO1_MODULE 1
-+#define CONFIG_FB_TRIDENT_MODULE 1
-+#undef CONFIG_FB_TRIDENT_ACCEL
-+#undef CONFIG_FB_PM3
-+#define CONFIG_FB_GEODE 1
-+#define CONFIG_FB_GEODE_GX1_MODULE 1
-+#define CONFIG_FB_S1D13XXX_MODULE 1
-+#define CONFIG_FB_VIRTUAL_MODULE 1
-+
-+/*
-+ * Console display driver support
-+ */
-+#define CONFIG_VGA_CONSOLE 1
-+#define CONFIG_MDA_CONSOLE_MODULE 1
-+#define CONFIG_DUMMY_CONSOLE 1
-+#define CONFIG_FRAMEBUFFER_CONSOLE_MODULE 1
-+#undef CONFIG_FONTS
-+#define CONFIG_FONT_8x8 1
-+#define CONFIG_FONT_8x16 1
-+
-+/*
-+ * Logo configuration
-+ */
-+#undef CONFIG_LOGO
-+#undef CONFIG_BACKLIGHT_LCD_SUPPORT
-+
-+/*
-+ * Sound
-+ */
-+#define CONFIG_SOUND_MODULE 1
-+
-+/*
-+ * Advanced Linux Sound Architecture
-+ */
-+#define CONFIG_SND_MODULE 1
-+#define CONFIG_SND_TIMER_MODULE 1
-+#define CONFIG_SND_PCM_MODULE 1
-+#define CONFIG_SND_HWDEP_MODULE 1
-+#define CONFIG_SND_RAWMIDI_MODULE 1
-+#define CONFIG_SND_SEQUENCER_MODULE 1
-+#define CONFIG_SND_SEQ_DUMMY_MODULE 1
-+#define CONFIG_SND_OSSEMUL 1
-+#define CONFIG_SND_MIXER_OSS_MODULE 1
-+#define CONFIG_SND_PCM_OSS_MODULE 1
-+#define CONFIG_SND_SEQUENCER_OSS 1
-+#define CONFIG_SND_RTCTIMER_MODULE 1
-+#undef CONFIG_SND_VERBOSE_PRINTK
-+#undef CONFIG_SND_DEBUG
-+#define CONFIG_SND_GENERIC_PM 1
-+
-+/*
-+ * Generic devices
-+ */
-+#define CONFIG_SND_MPU401_UART_MODULE 1
-+#define CONFIG_SND_OPL3_LIB_MODULE 1
-+#define CONFIG_SND_OPL4_LIB_MODULE 1
-+#define CONFIG_SND_VX_LIB_MODULE 1
-+#define CONFIG_SND_DUMMY_MODULE 1
-+#define CONFIG_SND_VIRMIDI_MODULE 1
-+#define CONFIG_SND_MTPAV_MODULE 1
-+#define CONFIG_SND_SERIAL_U16550_MODULE 1
-+#define CONFIG_SND_MPU401_MODULE 1
-+
-+/*
-+ * ISA devices
-+ */
-+#define CONFIG_SND_AD1848_LIB_MODULE 1
-+#define CONFIG_SND_CS4231_LIB_MODULE 1
-+#define CONFIG_SND_AD1816A_MODULE 1
-+#define CONFIG_SND_AD1848_MODULE 1
-+#define CONFIG_SND_CS4231_MODULE 1
-+#define CONFIG_SND_CS4232_MODULE 1
-+#define CONFIG_SND_CS4236_MODULE 1
-+#define CONFIG_SND_ES968_MODULE 1
-+#define CONFIG_SND_ES1688_MODULE 1
-+#define CONFIG_SND_ES18XX_MODULE 1
-+#define CONFIG_SND_GUS_SYNTH_MODULE 1
-+#define CONFIG_SND_GUSCLASSIC_MODULE 1
-+#define CONFIG_SND_GUSEXTREME_MODULE 1
-+#define CONFIG_SND_GUSMAX_MODULE 1
-+#define CONFIG_SND_INTERWAVE_MODULE 1
-+#define CONFIG_SND_INTERWAVE_STB_MODULE 1
-+#define CONFIG_SND_OPTI92X_AD1848_MODULE 1
-+#define CONFIG_SND_OPTI92X_CS4231_MODULE 1
-+#define CONFIG_SND_OPTI93X_MODULE 1
-+#define CONFIG_SND_SB8_MODULE 1
-+#define CONFIG_SND_SB16_MODULE 1
-+#define CONFIG_SND_SBAWE_MODULE 1
-+#define CONFIG_SND_SB16_CSP 1
-+#define CONFIG_SND_WAVEFRONT_MODULE 1
-+#define CONFIG_SND_ALS100_MODULE 1
-+#define CONFIG_SND_AZT2320_MODULE 1
-+#define CONFIG_SND_CMI8330_MODULE 1
-+#define CONFIG_SND_DT019X_MODULE 1
-+#define CONFIG_SND_OPL3SA2_MODULE 1
-+#define CONFIG_SND_SGALAXY_MODULE 1
-+#define CONFIG_SND_SSCAPE_MODULE 1
-+
-+/*
-+ * PCI devices
-+ */
-+#define CONFIG_SND_AC97_CODEC_MODULE 1
-+#define CONFIG_SND_ALI5451_MODULE 1
-+#define CONFIG_SND_ATIIXP_MODULE 1
-+#define CONFIG_SND_ATIIXP_MODEM_MODULE 1
-+#define CONFIG_SND_AU8810_MODULE 1
-+#define CONFIG_SND_AU8820_MODULE 1
-+#define CONFIG_SND_AU8830_MODULE 1
-+#define CONFIG_SND_AZT3328_MODULE 1
-+#define CONFIG_SND_BT87X_MODULE 1
-+#undef CONFIG_SND_BT87X_OVERCLOCK
-+#define CONFIG_SND_CS46XX_MODULE 1
-+#define CONFIG_SND_CS46XX_NEW_DSP 1
-+#define CONFIG_SND_CS4281_MODULE 1
-+#define CONFIG_SND_EMU10K1_MODULE 1
-+#undef CONFIG_SND_EMU10K1X
-+#undef CONFIG_SND_CA0106
-+#define CONFIG_SND_KORG1212_MODULE 1
-+#define CONFIG_SND_MIXART_MODULE 1
-+#define CONFIG_SND_NM256_MODULE 1
-+#define CONFIG_SND_RME32_MODULE 1
-+#define CONFIG_SND_RME96_MODULE 1
-+#define CONFIG_SND_RME9652_MODULE 1
-+#define CONFIG_SND_HDSP_MODULE 1
-+#define CONFIG_SND_TRIDENT_MODULE 1
-+#define CONFIG_SND_YMFPCI_MODULE 1
-+#define CONFIG_SND_ALS4000_MODULE 1
-+#define CONFIG_SND_CMIPCI_MODULE 1
-+#define CONFIG_SND_ENS1370_MODULE 1
-+#define CONFIG_SND_ENS1371_MODULE 1
-+#define CONFIG_SND_ES1938_MODULE 1
-+#define CONFIG_SND_ES1968_MODULE 1
-+#define CONFIG_SND_MAESTRO3_MODULE 1
-+#define CONFIG_SND_FM801_MODULE 1
-+#define CONFIG_SND_FM801_TEA575X_MODULE 1
-+#define CONFIG_SND_ICE1712_MODULE 1
-+#define CONFIG_SND_ICE1724_MODULE 1
-+#define CONFIG_SND_INTEL8X0_MODULE 1
-+#define CONFIG_SND_INTEL8X0M_MODULE 1
-+#define CONFIG_SND_SONICVIBES_MODULE 1
-+#define CONFIG_SND_VIA82XX_MODULE 1
-+#undef CONFIG_SND_VIA82XX_MODEM
-+#define CONFIG_SND_VX222_MODULE 1
-+#define CONFIG_SND_HDA_INTEL_MODULE 1
-+
-+/*
-+ * USB devices
-+ */
-+#define CONFIG_SND_USB_AUDIO_MODULE 1
-+#define CONFIG_SND_USB_USX2Y_MODULE 1
-+
-+/*
-+ * PCMCIA devices
-+ */
-+#define CONFIG_SND_VXPOCKET_MODULE 1
-+#define CONFIG_SND_VXP440_MODULE 1
-+#define CONFIG_SND_PDAUDIOCF_MODULE 1
-+
-+/*
-+ * Open Sound System
-+ */
-+#define CONFIG_SOUND_PRIME_MODULE 1
-+#define CONFIG_SOUND_BT878_MODULE 1
-+#define CONFIG_SOUND_CMPCI_MODULE 1
-+#undef CONFIG_SOUND_CMPCI_FM
-+#undef CONFIG_SOUND_CMPCI_MIDI
-+#define CONFIG_SOUND_CMPCI_JOYSTICK 1
-+#define CONFIG_SOUND_EMU10K1_MODULE 1
-+#define CONFIG_MIDI_EMU10K1 1
-+#define CONFIG_SOUND_FUSION_MODULE 1
-+#define CONFIG_SOUND_CS4281_MODULE 1
-+#define CONFIG_SOUND_ES1370_MODULE 1
-+#define CONFIG_SOUND_ES1371_MODULE 1
-+#define CONFIG_SOUND_ESSSOLO1_MODULE 1
-+#define CONFIG_SOUND_MAESTRO_MODULE 1
-+#define CONFIG_SOUND_MAESTRO3_MODULE 1
-+#define CONFIG_SOUND_ICH_MODULE 1
-+#define CONFIG_SOUND_SONICVIBES_MODULE 1
-+#define CONFIG_SOUND_TRIDENT_MODULE 1
-+#undef CONFIG_SOUND_MSNDCLAS
-+#undef CONFIG_SOUND_MSNDPIN
-+#define CONFIG_SOUND_VIA82CXXX_MODULE 1
-+#define CONFIG_MIDI_VIA82CXXX 1
-+#define CONFIG_SOUND_OSS_MODULE 1
-+#undef CONFIG_SOUND_TRACEINIT
-+#undef CONFIG_SOUND_DMAP
-+#undef CONFIG_SOUND_AD1816
-+#define CONFIG_SOUND_AD1889_MODULE 1
-+#define CONFIG_SOUND_SGALAXY_MODULE 1
-+#define CONFIG_SOUND_ADLIB_MODULE 1
-+#define CONFIG_SOUND_ACI_MIXER_MODULE 1
-+#define CONFIG_SOUND_CS4232_MODULE 1
-+#define CONFIG_SOUND_SSCAPE_MODULE 1
-+#define CONFIG_SOUND_GUS_MODULE 1
-+#define CONFIG_SOUND_GUS16 1
-+#define CONFIG_SOUND_GUSMAX 1
-+#define CONFIG_SOUND_VMIDI_MODULE 1
-+#define CONFIG_SOUND_TRIX_MODULE 1
-+#define CONFIG_SOUND_MSS_MODULE 1
-+#define CONFIG_SOUND_MPU401_MODULE 1
-+#define CONFIG_SOUND_NM256_MODULE 1
-+#define CONFIG_SOUND_MAD16_MODULE 1
-+#define CONFIG_MAD16_OLDCARD 1
-+#define CONFIG_SOUND_PAS_MODULE 1
-+#define CONFIG_SOUND_PSS_MODULE 1
-+#define CONFIG_PSS_MIXER 1
-+#define CONFIG_SOUND_SB_MODULE 1
-+#undef CONFIG_SOUND_AWE32_SYNTH
-+#define CONFIG_SOUND_WAVEFRONT_MODULE 1
-+#define CONFIG_SOUND_MAUI_MODULE 1
-+#define CONFIG_SOUND_YM3812_MODULE 1
-+#define CONFIG_SOUND_OPL3SA1_MODULE 1
-+#define CONFIG_SOUND_OPL3SA2_MODULE 1
-+#define CONFIG_SOUND_YMFPCI_MODULE 1
-+#undef CONFIG_SOUND_YMFPCI_LEGACY
-+#define CONFIG_SOUND_UART6850_MODULE 1
-+#define CONFIG_SOUND_AEDSP16_MODULE 1
-+#define CONFIG_SC6600 1
-+#define CONFIG_SC6600_JOY 1
-+#define CONFIG_SC6600_CDROM 4
-+#define CONFIG_SC6600_CDROMBASE 0x0
-+#undef CONFIG_AEDSP16_MSS
-+#undef CONFIG_AEDSP16_SBPRO
-+#undef CONFIG_AEDSP16_MPU401
-+#define CONFIG_SOUND_TVMIXER_MODULE 1
-+#define CONFIG_SOUND_KAHLUA_MODULE 1
-+#define CONFIG_SOUND_ALI5455_MODULE 1
-+#define CONFIG_SOUND_FORTE_MODULE 1
-+#define CONFIG_SOUND_RME96XX_MODULE 1
-+#define CONFIG_SOUND_AD1980_MODULE 1
-+
-+/*
-+ * USB support
-+ */
-+#define CONFIG_USB_ARCH_HAS_HCD 1
-+#define CONFIG_USB_ARCH_HAS_OHCI 1
-+#define CONFIG_USB 1
-+#undef CONFIG_USB_DEBUG
-+
-+/*
-+ * Miscellaneous USB options
-+ */
-+#define CONFIG_USB_DEVICEFS 1
-+#define CONFIG_USB_BANDWIDTH 1
-+#undef CONFIG_USB_DYNAMIC_MINORS
-+#undef CONFIG_USB_OTG
-+
-+/*
-+ * USB Host Controller Drivers
-+ */
-+#define CONFIG_USB_EHCI_HCD 1
-+#define CONFIG_USB_EHCI_SPLIT_ISO 1
-+#define CONFIG_USB_EHCI_ROOT_HUB_TT 1
-+#define CONFIG_USB_OHCI_HCD_MODULE 1
-+#undef CONFIG_USB_OHCI_BIG_ENDIAN
-+#define CONFIG_USB_OHCI_LITTLE_ENDIAN 1
-+#define CONFIG_USB_UHCI_HCD_MODULE 1
-+#define CONFIG_USB_SL811_HCD_MODULE 1
-+#define CONFIG_USB_SL811_CS_MODULE 1
-+
-+/*
-+ * USB Device Class drivers
-+ */
-+#define CONFIG_USB_AUDIO_MODULE 1
-+
-+/*
-+ * USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
-+ */
-+#define CONFIG_USB_MIDI_MODULE 1
-+#define CONFIG_USB_ACM_MODULE 1
-+#define CONFIG_USB_PRINTER_MODULE 1
-+
-+/*
-+ * NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-+ */
-+#define CONFIG_USB_STORAGE_MODULE 1
-+#undef CONFIG_USB_STORAGE_DEBUG
-+#define CONFIG_USB_STORAGE_DATAFAB 1
-+#define CONFIG_USB_STORAGE_FREECOM 1
-+#define CONFIG_USB_STORAGE_ISD200 1
-+#define CONFIG_USB_STORAGE_DPCM 1
-+#define CONFIG_USB_STORAGE_USBAT 1
-+#define CONFIG_USB_STORAGE_SDDR09 1
-+#define CONFIG_USB_STORAGE_SDDR55 1
-+#define CONFIG_USB_STORAGE_JUMPSHOT 1
-+
-+/*
-+ * USB Input Devices
-+ */
-+#define CONFIG_USB_HID_MODULE 1
-+#define CONFIG_USB_HIDINPUT 1
-+#undef CONFIG_HID_FF
-+#define CONFIG_USB_HIDDEV 1
-+
-+/*
-+ * USB HID Boot Protocol drivers
-+ */
-+#define CONFIG_USB_KBD_MODULE 1
-+#define CONFIG_USB_MOUSE_MODULE 1
-+#define CONFIG_USB_AIPTEK_MODULE 1
-+#define CONFIG_USB_WACOM_MODULE 1
-+#define CONFIG_USB_KBTAB_MODULE 1
-+#define CONFIG_USB_POWERMATE_MODULE 1
-+#define CONFIG_USB_MTOUCH_MODULE 1
-+#define CONFIG_USB_EGALAX_MODULE 1
-+#define CONFIG_USB_XPAD_MODULE 1
-+#define CONFIG_USB_ATI_REMOTE_MODULE 1
-+
-+/*
-+ * USB Imaging devices
-+ */
-+#define CONFIG_USB_MDC800_MODULE 1
-+#define CONFIG_USB_MICROTEK_MODULE 1
-+
-+/*
-+ * USB Multimedia devices
-+ */
-+#undef CONFIG_USB_DABUSB
-+#define CONFIG_USB_VICAM_MODULE 1
-+#define CONFIG_USB_DSBR_MODULE 1
-+#define CONFIG_USB_IBMCAM_MODULE 1
-+#define CONFIG_USB_KONICAWC_MODULE 1
-+#define CONFIG_USB_OV511_MODULE 1
-+#define CONFIG_USB_SE401_MODULE 1
-+#define CONFIG_USB_SN9C102_MODULE 1
-+#define CONFIG_USB_STV680_MODULE 1
-+#define CONFIG_USB_W9968CF_MODULE 1
-+#define CONFIG_USB_PWC_MODULE 1
-+
-+/*
-+ * USB Network Adapters
-+ */
-+#define CONFIG_USB_CATC_MODULE 1
-+#define CONFIG_USB_KAWETH_MODULE 1
-+#define CONFIG_USB_PEGASUS_MODULE 1
-+#define CONFIG_USB_RTL8150_MODULE 1
-+#define CONFIG_USB_USBNET_MODULE 1
-+
-+/*
-+ * USB Host-to-Host Cables
-+ */
-+#define CONFIG_USB_ALI_M5632 1
-+#define CONFIG_USB_AN2720 1
-+#define CONFIG_USB_BELKIN 1
-+#define CONFIG_USB_GENESYS 1
-+#define CONFIG_USB_NET1080 1
-+#define CONFIG_USB_PL2301 1
-+#define CONFIG_USB_KC2190 1
-+
-+/*
-+ * Intelligent USB Devices/Gadgets
-+ */
-+#define CONFIG_USB_ARMLINUX 1
-+#define CONFIG_USB_EPSON2888 1
-+#define CONFIG_USB_ZAURUS 1
-+#define CONFIG_USB_CDCETHER 1
-+
-+/*
-+ * USB Network Adapters
-+ */
-+#define CONFIG_USB_AX8817X 1
-+#define CONFIG_USB_ZD1201_MODULE 1
-+#define CONFIG_USB_MON_MODULE 1
-+
-+/*
-+ * USB port drivers
-+ */
-+#define CONFIG_USB_USS720_MODULE 1
-+
-+/*
-+ * USB Serial Converter support
-+ */
-+#define CONFIG_USB_SERIAL_MODULE 1
-+#define CONFIG_USB_SERIAL_GENERIC 1
-+#define CONFIG_USB_SERIAL_AIRPRIME_MODULE 1
-+#define CONFIG_USB_SERIAL_BELKIN_MODULE 1
-+#define CONFIG_USB_SERIAL_WHITEHEAT_MODULE 1
-+#define CONFIG_USB_SERIAL_DIGI_ACCELEPORT_MODULE 1
-+#define CONFIG_USB_SERIAL_CP2101_MODULE 1
-+#define CONFIG_USB_SERIAL_CYPRESS_M8_MODULE 1
-+#define CONFIG_USB_SERIAL_EMPEG_MODULE 1
-+#define CONFIG_USB_SERIAL_FTDI_SIO_MODULE 1
-+#define CONFIG_USB_SERIAL_VISOR_MODULE 1
-+#define CONFIG_USB_SERIAL_IPAQ_MODULE 1
-+#define CONFIG_USB_SERIAL_IR_MODULE 1
-+#define CONFIG_USB_SERIAL_EDGEPORT_MODULE 1
-+#define CONFIG_USB_SERIAL_EDGEPORT_TI_MODULE 1
-+#undef CONFIG_USB_SERIAL_GARMIN
-+#define CONFIG_USB_SERIAL_IPW_MODULE 1
-+#define CONFIG_USB_SERIAL_KEYSPAN_PDA_MODULE 1
-+#define CONFIG_USB_SERIAL_KEYSPAN_MODULE 1
-+#undef CONFIG_USB_SERIAL_KEYSPAN_MPR
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA28
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA28X
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA28XA
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA28XB
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA19
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA18X
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA19W
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA19QW
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA19QI
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA49W
-+#undef CONFIG_USB_SERIAL_KEYSPAN_USA49WLC
-+#define CONFIG_USB_SERIAL_KLSI_MODULE 1
-+#define CONFIG_USB_SERIAL_KOBIL_SCT_MODULE 1
-+#define CONFIG_USB_SERIAL_MCT_U232_MODULE 1
-+#define CONFIG_USB_SERIAL_PL2303_MODULE 1
-+#define CONFIG_USB_SERIAL_HP4X_MODULE 1
-+#define CONFIG_USB_SERIAL_SAFE_MODULE 1
-+#undef CONFIG_USB_SERIAL_SAFE_PADDED
-+#undef CONFIG_USB_SERIAL_TI
-+#define CONFIG_USB_SERIAL_CYBERJACK_MODULE 1
-+#define CONFIG_USB_SERIAL_XIRCOM_MODULE 1
-+#define CONFIG_USB_SERIAL_OPTION_MODULE 1
-+#define CONFIG_USB_SERIAL_OMNINET_MODULE 1
-+#define CONFIG_USB_EZUSB 1
-+
-+/*
-+ * USB Miscellaneous drivers
-+ */
-+#undef CONFIG_USB_EMI62
-+#undef CONFIG_USB_EMI26
-+#define CONFIG_USB_AUERSWALD_MODULE 1
-+#define CONFIG_USB_RIO500_MODULE 1
-+#define CONFIG_USB_LEGOTOWER_MODULE 1
-+#define CONFIG_USB_LCD_MODULE 1
-+#define CONFIG_USB_LED_MODULE 1
-+#define CONFIG_USB_CYTHERM_MODULE 1
-+#define CONFIG_USB_PHIDGETKIT_MODULE 1
-+#define CONFIG_USB_PHIDGETSERVO_MODULE 1
-+#undef CONFIG_USB_IDMOUSE
-+#define CONFIG_USB_SISUSBVGA_MODULE 1
-+#define CONFIG_USB_TEST_MODULE 1
-+
-+/*
-+ * USB ATM/DSL drivers
-+ */
-+#define CONFIG_USB_ATM_MODULE 1
-+#define CONFIG_USB_SPEEDTOUCH_MODULE 1
-+
-+/*
-+ * USB Gadget Support
-+ */
-+#define CONFIG_USB_GADGET_MODULE 1
-+#undef CONFIG_USB_GADGET_DEBUG_FILES
-+#define CONFIG_USB_GADGET_NET2280 1
-+#define CONFIG_USB_NET2280_MODULE 1
-+#undef CONFIG_USB_GADGET_PXA2XX
-+#undef CONFIG_USB_GADGET_GOKU
-+#undef CONFIG_USB_GADGET_LH7A40X
-+#undef CONFIG_USB_GADGET_OMAP
-+#undef CONFIG_USB_GADGET_DUMMY_HCD
-+#define CONFIG_USB_GADGET_DUALSPEED 1
-+#define CONFIG_USB_ZERO_MODULE 1
-+#define CONFIG_USB_ETH_MODULE 1
-+#define CONFIG_USB_ETH_RNDIS 1
-+#define CONFIG_USB_GADGETFS_MODULE 1
-+#define CONFIG_USB_FILE_STORAGE_MODULE 1
-+#undef CONFIG_USB_FILE_STORAGE_TEST
-+#define CONFIG_USB_G_SERIAL_MODULE 1
-+
-+/*
-+ * MMC/SD Card support
-+ */
-+#undef CONFIG_MMC
-+
-+/*
-+ * InfiniBand support
-+ */
-+#undef CONFIG_INFINIBAND
-+
-+/*
-+ * Power management options
-+ */
-+
-+/*
-+ * ACPI (Advanced Configuration and Power Interface) Support
-+ */
-+#define CONFIG_ACPI 1
-+#define CONFIG_ACPI_BOOT 1
-+#define CONFIG_ACPI_INTERPRETER 1
-+#define CONFIG_ACPI_AC_MODULE 1
-+#define CONFIG_ACPI_BATTERY_MODULE 1
-+#define CONFIG_ACPI_BUTTON_MODULE 1
-+#define CONFIG_ACPI_VIDEO_MODULE 1
-+#define CONFIG_ACPI_FAN_MODULE 1
-+#define CONFIG_ACPI_PROCESSOR_MODULE 1
-+#undef CONFIG_ACPI_HOTPLUG_CPU
-+#define CONFIG_ACPI_THERMAL_MODULE 1
-+#define CONFIG_ACPI_ASUS_MODULE 1
-+#define CONFIG_ACPI_IBM_MODULE 1
-+#define CONFIG_ACPI_TOSHIBA_MODULE 1
-+#define CONFIG_ACPI_BLACKLIST_YEAR 0
-+#undef CONFIG_ACPI_DEBUG
-+#define CONFIG_ACPI_BUS 1
-+#define CONFIG_ACPI_EC 1
-+#define CONFIG_ACPI_POWER 1
-+#define CONFIG_ACPI_PCI 1
-+#define CONFIG_ACPI_SYSTEM 1
-+#undef CONFIG_X86_PM_TIMER
-+#undef CONFIG_ACPI_CONTAINER
-+
-+/*
-+ * File systems
-+ */
-+#define CONFIG_EXT2_FS 1
-+#define CONFIG_EXT2_FS_XATTR 1
-+#define CONFIG_EXT2_FS_POSIX_ACL 1
-+#define CONFIG_EXT2_FS_SECURITY 1
-+#define CONFIG_EXT3_FS_MODULE 1
-+#define CONFIG_EXT3_FS_XATTR 1
-+#define CONFIG_EXT3_FS_POSIX_ACL 1
-+#define CONFIG_EXT3_FS_SECURITY 1
-+#define CONFIG_JBD_MODULE 1
-+#undef CONFIG_JBD_DEBUG
-+#define CONFIG_FS_MBCACHE 1
-+#define CONFIG_REISERFS_FS_MODULE 1
-+#undef CONFIG_REISERFS_CHECK
-+#undef CONFIG_REISERFS_PROC_INFO
-+#undef CONFIG_REISERFS_FS_XATTR
-+#define CONFIG_JFS_FS_MODULE 1
-+#define CONFIG_JFS_POSIX_ACL 1
-+#undef CONFIG_JFS_SECURITY
-+#undef CONFIG_JFS_DEBUG
-+#define CONFIG_JFS_STATISTICS 1
-+#define CONFIG_FS_POSIX_ACL 1
-+
-+/*
-+ * XFS support
-+ */
-+#define CONFIG_XFS_FS_MODULE 1
-+#define CONFIG_XFS_EXPORT 1
-+#define CONFIG_XFS_RT 1
-+#define CONFIG_XFS_QUOTA 1
-+#define CONFIG_XFS_SECURITY 1
-+#define CONFIG_XFS_POSIX_ACL 1
-+#define CONFIG_MINIX_FS_MODULE 1
-+#define CONFIG_ROMFS_FS_MODULE 1
-+#define CONFIG_QUOTA 1
-+#define CONFIG_QFMT_V1_MODULE 1
-+#define CONFIG_QFMT_V2_MODULE 1
-+#define CONFIG_QUOTACTL 1
-+#define CONFIG_DNOTIFY 1
-+#define CONFIG_AUTOFS_FS_MODULE 1
-+#define CONFIG_AUTOFS4_FS_MODULE 1
-+
-+/*
-+ * CD-ROM/DVD Filesystems
-+ */
-+#define CONFIG_ISO9660_FS_MODULE 1
-+#define CONFIG_JOLIET 1
-+#define CONFIG_ZISOFS 1
-+#define CONFIG_ZISOFS_FS_MODULE 1
-+#define CONFIG_UDF_FS_MODULE 1
-+#define CONFIG_UDF_NLS 1
-+
-+/*
-+ * DOS/FAT/NT Filesystems
-+ */
-+#define CONFIG_FAT_FS_MODULE 1
-+#define CONFIG_MSDOS_FS_MODULE 1
-+#define CONFIG_VFAT_FS_MODULE 1
-+#define CONFIG_FAT_DEFAULT_CODEPAGE 437
-+#define CONFIG_FAT_DEFAULT_IOCHARSET "iso8859-1"
-+#define CONFIG_NTFS_FS_MODULE 1
-+#undef CONFIG_NTFS_DEBUG
-+#undef CONFIG_NTFS_RW
-+
-+/*
-+ * Pseudo filesystems
-+ */
-+#define CONFIG_PROC_FS 1
-+#define CONFIG_PROC_KCORE 1
-+#define CONFIG_SYSFS 1
-+#undef CONFIG_DEVFS_FS
-+#define CONFIG_DEVPTS_FS_XATTR 1
-+#define CONFIG_DEVPTS_FS_SECURITY 1
-+#define CONFIG_TMPFS 1
-+#define CONFIG_TMPFS_XATTR 1
-+#define CONFIG_TMPFS_SECURITY 1
-+#undef CONFIG_HUGETLBFS
-+#undef CONFIG_HUGETLB_PAGE
-+#define CONFIG_RAMFS 1
-+
-+/*
-+ * Miscellaneous filesystems
-+ */
-+#define CONFIG_ADFS_FS_MODULE 1
-+#undef CONFIG_ADFS_FS_RW
-+#define CONFIG_AFFS_FS_MODULE 1
-+#define CONFIG_HFS_FS_MODULE 1
-+#define CONFIG_HFSPLUS_FS_MODULE 1
-+#define CONFIG_BEFS_FS_MODULE 1
-+#undef CONFIG_BEFS_DEBUG
-+#define CONFIG_BFS_FS_MODULE 1
-+#define CONFIG_EFS_FS_MODULE 1
-+#define CONFIG_JFFS_FS_MODULE 1
-+#define CONFIG_JFFS_FS_VERBOSE 0
-+#define CONFIG_JFFS_PROC_FS 1
-+#define CONFIG_JFFS2_FS_MODULE 1
-+#define CONFIG_JFFS2_FS_DEBUG 0
-+#undef CONFIG_JFFS2_FS_NAND
-+#undef CONFIG_JFFS2_FS_NOR_ECC
-+#undef CONFIG_JFFS2_COMPRESSION_OPTIONS
-+#define CONFIG_JFFS2_ZLIB 1
-+#define CONFIG_JFFS2_RTIME 1
-+#undef CONFIG_JFFS2_RUBIN
-+#define CONFIG_CRAMFS 1
-+#define CONFIG_VXFS_FS_MODULE 1
-+#define CONFIG_HPFS_FS_MODULE 1
-+#define CONFIG_QNX4FS_FS_MODULE 1
-+#undef CONFIG_QNX4FS_RW
-+#define CONFIG_SYSV_FS_MODULE 1
-+#define CONFIG_UFS_FS_MODULE 1
-+#undef CONFIG_UFS_FS_WRITE
-+
-+/*
-+ * Network File Systems
-+ */
-+#define CONFIG_NFS_FS_MODULE 1
-+#define CONFIG_NFS_V3 1
-+#define CONFIG_NFS_V4 1
-+#define CONFIG_NFS_DIRECTIO 1
-+#define CONFIG_NFSD_MODULE 1
-+#define CONFIG_NFSD_V3 1
-+#define CONFIG_NFSD_V4 1
-+#define CONFIG_NFSD_TCP 1
-+#define CONFIG_LOCKD_MODULE 1
-+#define CONFIG_LOCKD_V4 1
-+#define CONFIG_EXPORTFS_MODULE 1
-+#define CONFIG_SUNRPC_MODULE 1
-+#define CONFIG_SUNRPC_GSS_MODULE 1
-+#define CONFIG_RPCSEC_GSS_KRB5_MODULE 1
-+#define CONFIG_RPCSEC_GSS_SPKM3_MODULE 1
-+#define CONFIG_SMB_FS_MODULE 1
-+#undef CONFIG_SMB_NLS_DEFAULT
-+#define CONFIG_CIFS_MODULE 1
-+#undef CONFIG_CIFS_STATS
-+#undef CONFIG_CIFS_XATTR
-+#undef CONFIG_CIFS_EXPERIMENTAL
-+#define CONFIG_NCP_FS_MODULE 1
-+#define CONFIG_NCPFS_PACKET_SIGNING 1
-+#define CONFIG_NCPFS_IOCTL_LOCKING 1
-+#define CONFIG_NCPFS_STRONG 1
-+#define CONFIG_NCPFS_NFS_NS 1
-+#define CONFIG_NCPFS_OS2_NS 1
-+#undef CONFIG_NCPFS_SMALLDOS
-+#define CONFIG_NCPFS_NLS 1
-+#define CONFIG_NCPFS_EXTRAS 1
-+#define CONFIG_CODA_FS_MODULE 1
-+#undef CONFIG_CODA_FS_OLD_API
-+#define CONFIG_AFS_FS_MODULE 1
-+#define CONFIG_RXRPC_MODULE 1
-+
-+/*
-+ * Partition Types
-+ */
-+#define CONFIG_PARTITION_ADVANCED 1
-+#define CONFIG_ACORN_PARTITION 1
-+#define CONFIG_ACORN_PARTITION_CUMANA 1
-+#undef CONFIG_ACORN_PARTITION_EESOX
-+#define CONFIG_ACORN_PARTITION_ICS 1
-+#undef CONFIG_ACORN_PARTITION_ADFS
-+#undef CONFIG_ACORN_PARTITION_POWERTEC
-+#define CONFIG_ACORN_PARTITION_RISCIX 1
-+#define CONFIG_OSF_PARTITION 1
-+#define CONFIG_AMIGA_PARTITION 1
-+#define CONFIG_ATARI_PARTITION 1
-+#define CONFIG_MAC_PARTITION 1
-+#define CONFIG_MSDOS_PARTITION 1
-+#define CONFIG_BSD_DISKLABEL 1
-+#define CONFIG_MINIX_SUBPARTITION 1
-+#define CONFIG_SOLARIS_X86_PARTITION 1
-+#define CONFIG_UNIXWARE_DISKLABEL 1
-+#define CONFIG_LDM_PARTITION 1
-+#undef CONFIG_LDM_DEBUG
-+#define CONFIG_SGI_PARTITION 1
-+#define CONFIG_ULTRIX_PARTITION 1
-+#define CONFIG_SUN_PARTITION 1
-+#define CONFIG_EFI_PARTITION 1
-+
-+/*
-+ * Native Language Support
-+ */
-+#define CONFIG_NLS 1
-+#define CONFIG_NLS_DEFAULT "cp437"
-+#define CONFIG_NLS_CODEPAGE_437_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_737_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_775_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_850_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_852_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_855_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_857_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_860_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_861_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_862_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_863_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_864_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_865_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_866_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_869_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_936_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_950_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_932_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_949_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_874_MODULE 1
-+#define CONFIG_NLS_ISO8859_8_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_1250_MODULE 1
-+#define CONFIG_NLS_CODEPAGE_1251_MODULE 1
-+#define CONFIG_NLS_ASCII_MODULE 1
-+#define CONFIG_NLS_ISO8859_1_MODULE 1
-+#define CONFIG_NLS_ISO8859_2_MODULE 1
-+#define CONFIG_NLS_ISO8859_3_MODULE 1
-+#define CONFIG_NLS_ISO8859_4_MODULE 1
-+#define CONFIG_NLS_ISO8859_5_MODULE 1
-+#define CONFIG_NLS_ISO8859_6_MODULE 1
-+#define CONFIG_NLS_ISO8859_7_MODULE 1
-+#define CONFIG_NLS_ISO8859_9_MODULE 1
-+#define CONFIG_NLS_ISO8859_13_MODULE 1
-+#define CONFIG_NLS_ISO8859_14_MODULE 1
-+#define CONFIG_NLS_ISO8859_15_MODULE 1
-+#define CONFIG_NLS_KOI8_R_MODULE 1
-+#define CONFIG_NLS_KOI8_U_MODULE 1
-+#define CONFIG_NLS_UTF8_MODULE 1
-+
-+/*
-+ * Security options
-+ */
-+#define CONFIG_KEYS 1
-+#undef CONFIG_KEYS_DEBUG_PROC_KEYS
-+#define CONFIG_SECURITY 1
-+#undef CONFIG_SECURITY_NETWORK
-+#define CONFIG_SECURITY_CAPABILITIES 1
-+#define CONFIG_SECURITY_ROOTPLUG_MODULE 1
-+#define CONFIG_SECURITY_SECLVL_MODULE 1
-+#define CONFIG_SECURITY_SELINUX 1
-+#define CONFIG_SECURITY_SELINUX_BOOTPARAM 1
-+#define CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE 0
-+#define CONFIG_SECURITY_SELINUX_DISABLE 1
-+#define CONFIG_SECURITY_SELINUX_DEVELOP 1
-+#define CONFIG_SECURITY_SELINUX_AVC_STATS 1
-+#define CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE 1
-+
-+/*
-+ * Cryptographic options
-+ */
-+#define CONFIG_CRYPTO 1
-+#define CONFIG_CRYPTO_HMAC 1
-+#define CONFIG_CRYPTO_NULL_MODULE 1
-+#define CONFIG_CRYPTO_MD4_MODULE 1
-+#define CONFIG_CRYPTO_MD5 1
-+#define CONFIG_CRYPTO_SHA1_MODULE 1
-+#define CONFIG_CRYPTO_SHA256_MODULE 1
-+#define CONFIG_CRYPTO_SHA512_MODULE 1
-+#define CONFIG_CRYPTO_WP512_MODULE 1
-+#define CONFIG_CRYPTO_TGR192_MODULE 1
-+#define CONFIG_CRYPTO_DES_MODULE 1
-+#define CONFIG_CRYPTO_BLOWFISH_MODULE 1
-+#define CONFIG_CRYPTO_TWOFISH_MODULE 1
-+#define CONFIG_CRYPTO_SERPENT_MODULE 1
-+#define CONFIG_CRYPTO_AES_586_MODULE 1
-+#define CONFIG_CRYPTO_CAST5_MODULE 1
-+#define CONFIG_CRYPTO_CAST6_MODULE 1
-+#define CONFIG_CRYPTO_TEA_MODULE 1
-+#define CONFIG_CRYPTO_ARC4_MODULE 1
-+#define CONFIG_CRYPTO_KHAZAD_MODULE 1
-+#define CONFIG_CRYPTO_ANUBIS_MODULE 1
-+#define CONFIG_CRYPTO_DEFLATE_MODULE 1
-+#define CONFIG_CRYPTO_MICHAEL_MIC_MODULE 1
-+#define CONFIG_CRYPTO_CRC32C_MODULE 1
-+#define CONFIG_CRYPTO_TEST_MODULE 1
-+
-+/*
-+ * Hardware crypto devices
-+ */
-+#undef CONFIG_CRYPTO_DEV_PADLOCK
-+
-+/*
-+ * Library routines
-+ */
-+#define CONFIG_CRC_CCITT_MODULE 1
-+#define CONFIG_CRC32 1
-+#define CONFIG_LIBCRC32C_MODULE 1
-+#define CONFIG_ZLIB_INFLATE 1
-+#define CONFIG_ZLIB_DEFLATE_MODULE 1
-+#define CONFIG_REED_SOLOMON_MODULE 1
-+#define CONFIG_REED_SOLOMON_DEC16 1
-+
-+/*
-+ * Kernel hacking
-+ */
-+#undef CONFIG_PRINTK_TIME
-+#define CONFIG_DEBUG_KERNEL 1
-+#define CONFIG_MAGIC_SYSRQ 1
-+#define CONFIG_LOG_BUF_SHIFT 14
-+#undef CONFIG_SCHEDSTATS
-+#undef CONFIG_DEBUG_SLAB
-+#undef CONFIG_DEBUG_SPINLOCK
-+#undef CONFIG_DEBUG_SPINLOCK_SLEEP
-+#undef CONFIG_DEBUG_KOBJECT
-+#undef CONFIG_DEBUG_HIGHMEM
-+#undef CONFIG_DEBUG_BUGVERBOSE
-+#undef CONFIG_DEBUG_INFO
-+#undef CONFIG_DEBUG_FS
-+#undef CONFIG_FRAME_POINTER
-+#undef CONFIG_DEBUG_STACKOVERFLOW
-+#undef CONFIG_KPROBES
-+#undef CONFIG_DEBUG_STACK_USAGE
-+#undef CONFIG_DEBUG_PAGEALLOC
-+#undef CONFIG_4KSTACKS
-+#define CONFIG_X86_FIND_SMP_CONFIG 1
-+#define CONFIG_X86_MPPARSE 1
-diff -Nurp pristine-linux-2.6.12/include/linux/gfp.h linux-2.6.12-xen/include/linux/gfp.h
---- pristine-linux-2.6.12/include/linux/gfp.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/linux/gfp.h	2006-02-16 23:44:08.000000000 +0100
-@@ -77,8 +77,12 @@ struct vm_area_struct;
-  * optimized to &contig_page_data at compile-time.
-  */
- 
-+/*
-+ * If arch_free_page returns non-zero then the generic free_page code can
-+ * immediately bail: the arch-specific function has done all the work.
-+ */
- #ifndef HAVE_ARCH_FREE_PAGE
--static inline void arch_free_page(struct page *page, int order) { }
-+#define arch_free_page(page, order) 0
- #endif
- 
- extern struct page *
-diff -Nurp pristine-linux-2.6.12/include/linux/highmem.h linux-2.6.12-xen/include/linux/highmem.h
---- pristine-linux-2.6.12/include/linux/highmem.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/linux/highmem.h	2006-02-16 23:44:08.000000000 +0100
-@@ -13,10 +13,12 @@
- 
- /* declarations for linux/mm/highmem.c */
- unsigned int nr_free_highpages(void);
-+void kmap_flush_unused(void);
- 
- #else /* CONFIG_HIGHMEM */
- 
- static inline unsigned int nr_free_highpages(void) { return 0; }
-+static inline void kmap_flush_unused(void) { }
- 
- static inline void *kmap(struct page *page)
- {
-diff -Nurp pristine-linux-2.6.12/include/linux/if_shaper.h linux-2.6.12-xen/include/linux/if_shaper.h
---- pristine-linux-2.6.12/include/linux/if_shaper.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/linux/if_shaper.h	2006-02-25 00:12:33.767994850 +0100
-@@ -23,7 +23,7 @@ struct shaper
- 	__u32 shapeclock;
- 	unsigned long recovery;	/* Time we can next clock a packet out on
- 				   an empty queue */
--	struct semaphore sem;
-+	spinlock_t lock;
-         struct net_device_stats stats;
- 	struct net_device *dev;
- 	int  (*hard_start_xmit) (struct sk_buff *skb,
-diff -Nurp pristine-linux-2.6.12/include/linux/init.h linux-2.6.12-xen/include/linux/init.h
---- pristine-linux-2.6.12/include/linux/init.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/linux/init.h	2006-02-25 00:12:33.788991685 +0100
-@@ -229,6 +229,18 @@ void __init parse_early_param(void);
- #define __devexitdata __exitdata
- #endif
- 
-+#ifdef CONFIG_HOTPLUG_CPU
-+#define __cpuinit
-+#define __cpuinitdata
-+#define __cpuexit
-+#define __cpuexitdata
-+#else
-+#define __cpuinit	__init
-+#define __cpuinitdata __initdata
-+#define __cpuexit __exit
-+#define __cpuexitdata	__exitdata
-+#endif
-+
- /* Functions marked as __devexit may be discarded at kernel link time, depending
-    on config options.  Newer versions of binutils detect references from
-    retained sections to discarded sections and flag an error.  Pointers to
-diff -Nurp pristine-linux-2.6.12/include/linux/irq.h linux-2.6.12-xen/include/linux/irq.h
---- pristine-linux-2.6.12/include/linux/irq.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/linux/irq.h	2006-02-16 23:44:08.000000000 +0100
-@@ -74,6 +74,7 @@ extern irq_desc_t irq_desc [NR_IRQS];
- #include <asm/hw_irq.h> /* the arch dependent stuff */
- 
- extern int setup_irq(unsigned int irq, struct irqaction * new);
-+extern int teardown_irq(unsigned int irq, struct irqaction * old);
- 
- #ifdef CONFIG_GENERIC_HARDIRQS
- extern cpumask_t irq_affinity[NR_IRQS];
-diff -Nurp pristine-linux-2.6.12/include/linux/mm.h linux-2.6.12-xen/include/linux/mm.h
---- pristine-linux-2.6.12/include/linux/mm.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/linux/mm.h	2006-02-16 23:44:08.000000000 +0100
-@@ -161,6 +161,7 @@ extern unsigned int kobjsize(const void 
- #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
- #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
- #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
-+#define VM_FOREIGN	0x02000000	/* Has pages belonging to another VM */
- 
- #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
- #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
-@@ -816,6 +817,12 @@ extern int check_user_page_readable(stru
- int remap_pfn_range(struct vm_area_struct *, unsigned long,
- 		unsigned long, unsigned long, pgprot_t);
- 
-+typedef int (*pte_fn_t)(pte_t *pte, struct page *pte_page, unsigned long addr, 
-+                        void *data);
-+extern int generic_page_range(struct mm_struct *mm, unsigned long address, 
-+                              unsigned long size, pte_fn_t fn, void *data);
-+
-+
- #ifdef CONFIG_PROC_FS
- void __vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
- #else
-diff -Nurp pristine-linux-2.6.12/include/linux/skbuff.h linux-2.6.12-xen/include/linux/skbuff.h
---- pristine-linux-2.6.12/include/linux/skbuff.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/linux/skbuff.h	2006-02-16 23:44:08.000000000 +0100
-@@ -177,6 +177,8 @@ struct skb_shared_info {
-  *	@local_df: allow local fragmentation
-  *	@cloned: Head may be cloned (check refcnt to be sure)
-  *	@nohdr: Payload reference only, must not modify header
-+ *	@proto_csum_valid: Protocol csum validated since arriving at localhost
-+ *	@proto_csum_blank: Protocol csum must be added before leaving localhost
-  *	@pkt_type: Packet class
-  *	@ip_summed: Driver fed us an IP checksum
-  *	@priority: Packet queueing priority
-@@ -252,6 +254,8 @@ struct sk_buff {
- 	unsigned char		local_df,
- 				cloned:1,
- 				nohdr:1,
-+				proto_csum_valid:1,
-+				proto_csum_blank:1,
- 				pkt_type,
- 				ip_summed;
- 	__u32			priority;
-diff -Nurp pristine-linux-2.6.12/include/linux/zlib.h linux-2.6.12-xen/include/linux/zlib.h
---- pristine-linux-2.6.12/include/linux/zlib.h	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/include/linux/zlib.h	2006-02-25 00:12:33.768994699 +0100
-@@ -506,6 +506,11 @@ extern int zlib_deflateReset (z_streamp 
-    stream state was inconsistent (such as zalloc or state being NULL).
- */
- 
-+static inline unsigned long deflateBound(unsigned long s)
-+{
-+	return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
-+}
-+
- extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
- /*
-      Dynamically update the compression level and compression strategy.  The
-diff -Nurp pristine-linux-2.6.12/kernel/cpu.c linux-2.6.12-xen/kernel/cpu.c
---- pristine-linux-2.6.12/kernel/cpu.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/kernel/cpu.c	2006-02-25 00:12:33.815987616 +0100
-@@ -63,19 +63,15 @@ static int take_cpu_down(void *unused)
- {
- 	int err;
- 
--	/* Take offline: makes arch_cpu_down somewhat easier. */
--	cpu_clear(smp_processor_id(), cpu_online_map);
--
- 	/* Ensure this CPU doesn't handle any more interrupts. */
- 	err = __cpu_disable();
- 	if (err < 0)
--		cpu_set(smp_processor_id(), cpu_online_map);
--	else
--		/* Force idle task to run as soon as we yield: it should
--		   immediately notice cpu is offline and die quickly. */
--		sched_idle_next();
-+		return err;
- 
--	return err;
-+	/* Force idle task to run as soon as we yield: it should
-+	   immediately notice cpu is offline and die quickly. */
-+	sched_idle_next();
-+	return 0;
- }
- 
- int cpu_down(unsigned int cpu)
-diff -Nurp pristine-linux-2.6.12/kernel/irq/manage.c linux-2.6.12-xen/kernel/irq/manage.c
---- pristine-linux-2.6.12/kernel/irq/manage.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/kernel/irq/manage.c	2006-02-16 23:44:08.000000000 +0100
-@@ -146,9 +146,14 @@ int can_request_irq(unsigned int irq, un
- 	return !action;
- }
- 
--/*
-- * Internal function to register an irqaction - typically used to
-- * allocate special interrupts that are part of the architecture.
-+/**
-+ *	setup_irq - register an irqaction structure
-+ *	@irq: Interrupt to register
-+ *	@irqaction: The irqaction structure to be registered
-+ *
-+ *	Normally called by request_irq, this function can be used
-+ *	directly to allocate special interrupts that are part of the
-+ *	architecture.
-  */
- int setup_irq(unsigned int irq, struct irqaction * new)
- {
-@@ -217,28 +222,27 @@ int setup_irq(unsigned int irq, struct i
- 	return 0;
- }
- 
--/**
-- *	free_irq - free an interrupt
-- *	@irq: Interrupt line to free
-- *	@dev_id: Device identity to free
-- *
-- *	Remove an interrupt handler. The handler is removed and if the
-- *	interrupt line is no longer in use by any driver it is disabled.
-- *	On a shared IRQ the caller must ensure the interrupt is disabled
-- *	on the card it drives before calling this function. The function
-- *	does not return until any executing interrupts for this IRQ
-- *	have completed.
-+/*
-+ *	teardown_irq - unregister an irqaction
-+ *	@irq: Interrupt line being freed
-+ *	@old: Pointer to the irqaction that is to be unregistered
-+ *
-+ *	This function is called by free_irq and does the actual
-+ *	business of unregistering the handler. It exists as a 
-+ *	seperate function to enable handlers to be unregistered 
-+ *	for irqactions that have been allocated statically at 
-+ *	boot time.
-  *
-  *	This function must not be called from interrupt context.
-  */
--void free_irq(unsigned int irq, void *dev_id)
-+int teardown_irq(unsigned int irq, struct irqaction * old)
- {
- 	struct irq_desc *desc;
- 	struct irqaction **p;
- 	unsigned long flags;
- 
- 	if (irq >= NR_IRQS)
--		return;
-+		return -ENOENT;
- 
- 	desc = irq_desc + irq;
- 	spin_lock_irqsave(&desc->lock,flags);
-@@ -250,7 +254,7 @@ void free_irq(unsigned int irq, void *de
- 			struct irqaction **pp = p;
- 
- 			p = &action->next;
--			if (action->dev_id != dev_id)
-+			if (action != old)
- 				continue;
- 
- 			/* Found it - now remove it from the list of entries */
-@@ -267,13 +271,52 @@ void free_irq(unsigned int irq, void *de
- 
- 			/* Make sure it's not being used on another CPU */
- 			synchronize_irq(irq);
--			kfree(action);
--			return;
-+			return 0;
- 		}
--		printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
-+		printk(KERN_ERR "Trying to teardown free IRQ%d\n",irq);
- 		spin_unlock_irqrestore(&desc->lock,flags);
-+		return -ENOENT;
-+	}
-+}
-+
-+/**
-+ *	free_irq - free an interrupt
-+ *	@irq: Interrupt line to free
-+ *	@dev_id: Device identity to free
-+ *
-+ *	Remove an interrupt handler. The handler is removed and if the
-+ *	interrupt line is no longer in use by any driver it is disabled.
-+ *	On a shared IRQ the caller must ensure the interrupt is disabled
-+ *	on the card it drives before calling this function. The function
-+ *	does not return until any executing interrupts for this IRQ
-+ *	have completed.
-+ *
-+ *	This function must not be called from interrupt context.
-+ */
-+void free_irq(unsigned int irq, void *dev_id)
-+{
-+	struct irq_desc *desc;
-+	struct irqaction *action;
-+	unsigned long flags;
-+
-+	if (irq >= NR_IRQS)
-+		return;
-+
-+	desc = irq_desc + irq;
-+	spin_lock_irqsave(&desc->lock,flags);
-+	for (action = desc->action; action != NULL; action = action->next) {
-+		if (action->dev_id != dev_id)
-+			continue;
-+
-+		spin_unlock_irqrestore(&desc->lock,flags);
-+
-+		if (teardown_irq(irq, action) == 0)
-+			kfree(action);
- 		return;
- 	}
-+	printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
-+	spin_unlock_irqrestore(&desc->lock,flags);
-+	return;
- }
- 
- EXPORT_SYMBOL(free_irq);
-diff -Nurp pristine-linux-2.6.12/kernel/module.c linux-2.6.12-xen/kernel/module.c
---- pristine-linux-2.6.12/kernel/module.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/kernel/module.c	2006-02-25 00:12:33.769994548 +0100
-@@ -249,13 +249,18 @@ static inline unsigned int block_size(in
- /* Created by linker magic */
- extern char __per_cpu_start[], __per_cpu_end[];
- 
--static void *percpu_modalloc(unsigned long size, unsigned long align)
-+static void *percpu_modalloc(unsigned long size, unsigned long align,
-+			     const char *name)
- {
- 	unsigned long extra;
- 	unsigned int i;
- 	void *ptr;
- 
--	BUG_ON(align > SMP_CACHE_BYTES);
-+	if (align > SMP_CACHE_BYTES) {
-+		printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
-+		       name, align, SMP_CACHE_BYTES);
-+		align = SMP_CACHE_BYTES;
-+	}
- 
- 	ptr = __per_cpu_start;
- 	for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
-@@ -347,7 +352,8 @@ static int percpu_modinit(void)
- }	
- __initcall(percpu_modinit);
- #else /* ... !CONFIG_SMP */
--static inline void *percpu_modalloc(unsigned long size, unsigned long align)
-+static inline void *percpu_modalloc(unsigned long size, unsigned long align,
-+				    const char *name)
- {
- 	return NULL;
- }
-@@ -1554,7 +1560,8 @@ static struct module *load_module(void _
- 	if (pcpuindex) {
- 		/* We have a special allocation for this section. */
- 		percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
--					 sechdrs[pcpuindex].sh_addralign);
-+					 sechdrs[pcpuindex].sh_addralign,
-+					 mod->name);
- 		if (!percpu) {
- 			err = -ENOMEM;
- 			goto free_mod;
-diff -Nurp pristine-linux-2.6.12/kernel/rcupdate.c linux-2.6.12-xen/kernel/rcupdate.c
---- pristine-linux-2.6.12/kernel/rcupdate.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/kernel/rcupdate.c	2006-02-25 00:12:33.826985958 +0100
-@@ -202,8 +202,11 @@ static void rcu_start_batch(struct rcu_c
-  */
- static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp, struct rcu_state *rsp)
- {
-+	cpumask_t mask;
-+
- 	cpu_clear(cpu, rsp->cpumask);
--	if (cpus_empty(rsp->cpumask)) {
-+	cpus_andnot(mask, rsp->cpumask, nohz_cpu_mask);
-+	if (cpus_empty(mask)) {
- 		/* batch completed ! */
- 		rcp->completed = rcp->cur;
- 		rcu_start_batch(rcp, rsp, 0);
-diff -Nurp pristine-linux-2.6.12/kernel/signal.c linux-2.6.12-xen/kernel/signal.c
---- pristine-linux-2.6.12/kernel/signal.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/kernel/signal.c	2006-02-25 00:12:33.771994247 +0100
-@@ -686,7 +686,7 @@ static void handle_stop_signal(int sig, 
- {
- 	struct task_struct *t;
- 
--	if (p->flags & SIGNAL_GROUP_EXIT)
-+	if (p->signal->flags & SIGNAL_GROUP_EXIT)
- 		/*
- 		 * The process is in the middle of dying already.
- 		 */
-diff -Nurp pristine-linux-2.6.12/lib/inflate.c linux-2.6.12-xen/lib/inflate.c
---- pristine-linux-2.6.12/lib/inflate.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/lib/inflate.c	2006-02-25 00:12:33.772994096 +0100
-@@ -326,7 +326,7 @@ DEBG("huft1 ");
-   {
-     *t = (struct huft *)NULL;
-     *m = 0;
--    return 0;
-+    return 2;
-   }
- 
- DEBG("huft2 ");
-@@ -374,6 +374,7 @@ DEBG("huft5 ");
-     if ((j = *p++) != 0)
-       v[x[j]++] = i;
-   } while (++i < n);
-+  n = x[g];                   /* set n to length of v */
- 
- DEBG("h6 ");
- 
-@@ -410,12 +411,13 @@ DEBG1("1 ");
- DEBG1("2 ");
-           f -= a + 1;           /* deduct codes from patterns left */
-           xp = c + k;
--          while (++j < z)       /* try smaller tables up to z bits */
--          {
--            if ((f <<= 1) <= *++xp)
--              break;            /* enough codes to use up j bits */
--            f -= *xp;           /* else deduct codes from patterns */
--          }
-+          if (j < z)
-+            while (++j < z)       /* try smaller tables up to z bits */
-+            {
-+              if ((f <<= 1) <= *++xp)
-+                break;            /* enough codes to use up j bits */
-+              f -= *xp;           /* else deduct codes from patterns */
-+            }
-         }
- DEBG1("3 ");
-         z = 1 << j;             /* table entries for j-bit table */
-diff -Nurp pristine-linux-2.6.12/Makefile linux-2.6.12-xen/Makefile
---- pristine-linux-2.6.12/Makefile	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/Makefile	2006-02-25 00:12:57.503417009 +0100
-@@ -2,6 +2,7 @@ VERSION = 2
- PATCHLEVEL = 6
- SUBLEVEL = 12
- EXTRAVERSION =
-+XENGUEST = -xen
- NAME=Woozy Numbat
- 
- # *DOCUMENTATION*
-@@ -1149,7 +1150,7 @@ endif # KBUILD_EXTMOD
- #(which is the most common case IMHO) to avoid unneeded clutter in the big tags file.
- #Adding $(srctree) adds about 20M on i386 to the size of the output file!
- 
--ifeq ($(KBUILD_OUTPUT),)
-+ifeq ($(src),$(obj))
- __srctree =
- else
- __srctree = $(srctree)/
-diff -Nurp pristine-linux-2.6.12/mm/highmem.c linux-2.6.12-xen/mm/highmem.c
---- pristine-linux-2.6.12/mm/highmem.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/mm/highmem.c	2006-02-16 23:44:08.000000000 +0100
-@@ -148,6 +148,15 @@ start:
- 	return vaddr;
- }
- 
-+void kmap_flush_unused(void)
-+{
-+	spin_lock(&kmap_lock);
-+	flush_all_zero_pkmaps();
-+	spin_unlock(&kmap_lock);
-+}
-+
-+EXPORT_SYMBOL(kmap_flush_unused);
-+
- void fastcall *kmap_high(struct page *page)
- {
- 	unsigned long vaddr;
-diff -Nurp pristine-linux-2.6.12/mm/memory.c linux-2.6.12-xen/mm/memory.c
---- pristine-linux-2.6.12/mm/memory.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/mm/memory.c	2006-02-16 23:44:08.000000000 +0100
-@@ -940,6 +940,24 @@ int get_user_pages(struct task_struct *t
- 			continue;
- 		}
- 
-+                if (vma && (vma->vm_flags & VM_FOREIGN))
-+                {
-+                    struct page **map = vma->vm_private_data;
-+                    int offset = (start - vma->vm_start) >> PAGE_SHIFT;
-+
-+                    if (map[offset] != NULL) {
-+                        if (pages) {
-+                            pages[i] = map[offset];
-+                        } 
-+                        if (vmas) 
-+                            vmas[i] = vma;
-+                        i++;
-+                        start += PAGE_SIZE;
-+                        len--;
-+                        continue;
-+                    } 
-+                }
-+
- 		if (!vma || (vma->vm_flags & VM_IO)
- 				|| !(flags & vma->vm_flags))
- 			return i ? : -EFAULT;
-@@ -1195,6 +1213,104 @@ int remap_pfn_range(struct vm_area_struc
- }
- EXPORT_SYMBOL(remap_pfn_range);
- 
-+static inline int generic_pte_range(struct mm_struct *mm,
-+                                    pmd_t *pmd, 
-+                                    unsigned long addr, 
-+                                    unsigned long end,
-+                                    pte_fn_t fn, void *data)
-+{
-+	pte_t *pte;
-+        int err;
-+        struct page *pte_page;
-+
-+        pte = (mm == &init_mm) ? 
-+                pte_alloc_kernel(mm, pmd, addr) :
-+                pte_alloc_map(mm, pmd, addr);
-+        if (!pte)
-+                return -ENOMEM;
-+
-+        pte_page = pmd_page(*pmd);
-+
-+        do {
-+                err = fn(pte, pte_page, addr, data);
-+		if (err)
-+                        break;
-+        } while (pte++, addr += PAGE_SIZE, addr != end);
-+
-+        if (mm != &init_mm)
-+                pte_unmap(pte-1);
-+        return err;
-+
-+}
-+
-+static inline int generic_pmd_range(struct mm_struct *mm,
-+                                    pud_t *pud, 
-+                                    unsigned long addr, 
-+                                    unsigned long end,
-+                                    pte_fn_t fn, void *data)
-+{
-+	pmd_t *pmd;
-+	unsigned long next;
-+        int err;
-+
-+	pmd = pmd_alloc(mm, pud, addr);
-+	if (!pmd)
-+		return -ENOMEM;
-+	do {
-+		next = pmd_addr_end(addr, end);
-+                err = generic_pte_range(mm, pmd, addr, next, fn, data);
-+                if (err)
-+                    break;
-+	} while (pmd++, addr = next, addr != end);
-+	return err;
-+}
-+
-+static inline int generic_pud_range(struct mm_struct *mm, pgd_t *pgd, 
-+                                    unsigned long addr,
-+                                    unsigned long end,
-+                                    pte_fn_t fn, void *data)
-+{
-+	pud_t *pud;
-+	unsigned long next;
-+        int err;
-+
-+	pud = pud_alloc(mm, pgd, addr);
-+	if (!pud)
-+		return -ENOMEM;
-+	do {
-+		next = pud_addr_end(addr, end);
-+		err = generic_pmd_range(mm, pud, addr, next, fn, data);
-+                if (err)
-+			break;
-+	} while (pud++, addr = next, addr != end);
-+	return err;
-+}
-+
-+/*
-+ * Scan a region of virtual memory, filling in page tables as necessary
-+ * and calling a provided function on each leaf page table.
-+ */
-+int generic_page_range(struct mm_struct *mm, unsigned long addr, 
-+                  unsigned long size, pte_fn_t fn, void *data)
-+{
-+	pgd_t *pgd;
-+	unsigned long next;
-+	unsigned long end = addr + size;
-+	int err;
-+
-+	BUG_ON(addr >= end);
-+	pgd = pgd_offset(mm, addr);
-+	spin_lock(&mm->page_table_lock);
-+	do {
-+		next = pgd_addr_end(addr, end);
-+		err = generic_pud_range(mm, pgd, addr, next, fn, data);
-+		if (err)
-+			break;
-+	} while (pgd++, addr = next, addr != end);
-+	spin_unlock(&mm->page_table_lock);
-+	return err;
-+}
-+
- /*
-  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
-  * servicing faults for write access.  In the normal case, do always want
-@@ -1249,20 +1365,15 @@ static int do_wp_page(struct mm_struct *
- 	struct page *old_page, *new_page;
- 	unsigned long pfn = pte_pfn(pte);
- 	pte_t entry;
-+	struct page invalid_page;
- 
- 	if (unlikely(!pfn_valid(pfn))) {
--		/*
--		 * This should really halt the system so it can be debugged or
--		 * at least the kernel stops what it's doing before it corrupts
--		 * data, but for the moment just pretend this is OOM.
--		 */
--		pte_unmap(page_table);
--		printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
--				address);
--		spin_unlock(&mm->page_table_lock);
--		return VM_FAULT_OOM;
-+		/* This can happen with /dev/mem (PROT_WRITE, MAP_PRIVATE). */
-+		invalid_page.flags = (1<<PG_reserved) | (1<<PG_locked);
-+		old_page = &invalid_page;
-+	} else {
-+		old_page = pfn_to_page(pfn);
- 	}
--	old_page = pfn_to_page(pfn);
- 
- 	if (!TestSetPageLocked(old_page)) {
- 		int reuse = can_share_swap_page(old_page);
-@@ -1298,7 +1409,13 @@ static int do_wp_page(struct mm_struct *
- 		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
- 		if (!new_page)
- 			goto no_new_page;
--		copy_user_highpage(new_page, old_page, address);
-+		if (old_page == &invalid_page) {
-+			char *vto = kmap_atomic(new_page, KM_USER1);
-+			copy_page(vto, (void *)(address & PAGE_MASK));
-+			kunmap_atomic(vto, KM_USER1);
-+		} else {
-+			copy_user_highpage(new_page, old_page, address);
-+		}
- 	}
- 	/*
- 	 * Re-check the pte - we dropped the lock
-diff -Nurp pristine-linux-2.6.12/mm/mempolicy.c linux-2.6.12-xen/mm/mempolicy.c
---- pristine-linux-2.6.12/mm/mempolicy.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/mm/mempolicy.c	2006-02-25 00:12:33.774993795 +0100
-@@ -409,7 +409,7 @@ asmlinkage long sys_set_mempolicy(int mo
- 	struct mempolicy *new;
- 	DECLARE_BITMAP(nodes, MAX_NUMNODES);
- 
--	if (mode > MPOL_MAX)
-+	if (mode < 0 || mode > MPOL_MAX)
- 		return -EINVAL;
- 	err = get_nodes(nodes, nmask, maxnode, mode);
- 	if (err)
-diff -Nurp pristine-linux-2.6.12/mm/mmap.c linux-2.6.12-xen/mm/mmap.c
---- pristine-linux-2.6.12/mm/mmap.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/mm/mmap.c	2006-02-16 23:44:08.000000000 +0100
-@@ -1904,6 +1904,10 @@ void exit_mmap(struct mm_struct *mm)
- 	unsigned long nr_accounted = 0;
- 	unsigned long end;
- 
-+#ifdef arch_exit_mmap
-+	arch_exit_mmap(mm);
-+#endif
-+
- 	lru_add_drain();
- 
- 	spin_lock(&mm->page_table_lock);
-diff -Nurp pristine-linux-2.6.12/mm/page_alloc.c linux-2.6.12-xen/mm/page_alloc.c
---- pristine-linux-2.6.12/mm/page_alloc.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/mm/page_alloc.c	2006-02-16 23:44:08.000000000 +0100
-@@ -368,7 +368,8 @@ void __free_pages_ok(struct page *page, 
- 	LIST_HEAD(list);
- 	int i;
- 
--	arch_free_page(page, order);
-+	if (arch_free_page(page, order))
-+		return;
- 
- 	mod_page_state(pgfree, 1 << order);
- 
-@@ -608,7 +609,8 @@ static void fastcall free_hot_cold_page(
- 	struct per_cpu_pages *pcp;
- 	unsigned long flags;
- 
--	arch_free_page(page, 0);
-+	if (arch_free_page(page, 0))
-+		return;
- 
- 	kernel_map_pages(page, 1, 0);
- 	inc_page_state(pgfree);
-diff -Nurp pristine-linux-2.6.12/net/8021q/vlan.c linux-2.6.12-xen/net/8021q/vlan.c
---- pristine-linux-2.6.12/net/8021q/vlan.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/8021q/vlan.c	2006-02-25 00:12:33.775993644 +0100
-@@ -578,6 +578,14 @@ static int vlan_device_event(struct noti
- 			if (!vlandev)
- 				continue;
- 
-+			if (netif_carrier_ok(dev)) {
-+				if (!netif_carrier_ok(vlandev))
-+					netif_carrier_on(vlandev);
-+			} else {
-+				if (netif_carrier_ok(vlandev))
-+					netif_carrier_off(vlandev);
-+			}
-+
- 			if ((vlandev->state & VLAN_LINK_STATE_MASK) != flgs) {
- 				vlandev->state = (vlandev->state &~ VLAN_LINK_STATE_MASK) 
- 					| flgs;
-diff -Nurp pristine-linux-2.6.12/net/core/dev.c linux-2.6.12-xen/net/core/dev.c
---- pristine-linux-2.6.12/net/core/dev.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/core/dev.c	2006-02-16 23:44:08.000000000 +0100
-@@ -115,6 +115,11 @@
- #endif	/* CONFIG_NET_RADIO */
- #include <asm/current.h>
- 
-+#include <net/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+
-+
- /* This define, if set, will randomly drop a packet when congestion
-  * is more than moderate.  It helps fairness in the multi-interface
-  * case when one of them is a hog, but it kills performance for the
-@@ -1261,6 +1266,35 @@ int dev_queue_xmit(struct sk_buff *skb)
- 	    __skb_linearize(skb, GFP_ATOMIC))
- 		goto out_kfree_skb;
- 
-+	/* If a checksum-deferred packet is forwarded to a device that needs a
-+	 * checksum, correct the pointers and force checksumming.
-+	 */
-+	if (skb->proto_csum_blank) {
-+		if (skb->protocol != htons(ETH_P_IP))
-+			goto out_kfree_skb;
-+		skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
-+		if (skb->h.raw >= skb->tail)
-+			goto out_kfree_skb;
-+		switch (skb->nh.iph->protocol) {
-+		case IPPROTO_TCP:
-+			skb->csum = offsetof(struct tcphdr, check);
-+			break;
-+		case IPPROTO_UDP:
-+			skb->csum = offsetof(struct udphdr, check);
-+			break;
-+		default:
-+			if (net_ratelimit())
-+				printk(KERN_ERR "Attempting to checksum a non-"
-+				       "TCP/UDP packet, dropping a protocol"
-+				       " %d packet", skb->nh.iph->protocol);
-+			rc = -EPROTO;
-+			goto out_kfree_skb;
-+		}
-+		if ((skb->h.raw + skb->csum + 2) > skb->tail)
-+			goto out_kfree_skb;
-+		skb->ip_summed = CHECKSUM_HW;
-+	}
-+
- 	/* If packet is not checksummed and device does not support
- 	 * checksumming for this protocol, complete checksumming here.
- 	 */
-@@ -1680,6 +1714,17 @@ int netif_receive_skb(struct sk_buff *sk
- 	}
- #endif
- 
-+	switch (skb->ip_summed) {
-+	case CHECKSUM_UNNECESSARY:
-+		skb->proto_csum_valid = 1;
-+		break;
-+	case CHECKSUM_HW:
-+		/* XXX Implement me. */
-+	default:
-+		skb->proto_csum_valid = 0;
-+		break;
-+	}
-+
- 	list_for_each_entry_rcu(ptype, &ptype_all, list) {
- 		if (!ptype->dev || ptype->dev == skb->dev) {
- 			if (pt_prev) 
-diff -Nurp pristine-linux-2.6.12/net/core/skbuff.c linux-2.6.12-xen/net/core/skbuff.c
---- pristine-linux-2.6.12/net/core/skbuff.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/core/skbuff.c	2006-02-16 23:44:08.000000000 +0100
-@@ -129,6 +129,7 @@ void skb_under_panic(struct sk_buff *skb
-  *	Buffers may only be allocated from interrupts using a @gfp_mask of
-  *	%GFP_ATOMIC.
-  */
-+#ifndef CONFIG_HAVE_ARCH_ALLOC_SKB
- struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
- {
- 	struct sk_buff *skb;
-@@ -166,6 +167,7 @@ nodata:
- 	skb = NULL;
- 	goto out;
- }
-+#endif /* !CONFIG_HAVE_ARCH_ALLOC_SKB */
- 
- /**
-  *	alloc_skb_from_cache	-	allocate a network buffer
-@@ -353,6 +355,8 @@ struct sk_buff *skb_clone(struct sk_buff
- 	C(local_df);
- 	n->cloned = 1;
- 	n->nohdr = 0;
-+	C(proto_csum_valid);
-+	C(proto_csum_blank);
- 	C(pkt_type);
- 	C(ip_summed);
- 	C(priority);
-diff -Nurp pristine-linux-2.6.12/net/ipv4/icmp.c linux-2.6.12-xen/net/ipv4/icmp.c
---- pristine-linux-2.6.12/net/ipv4/icmp.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv4/icmp.c	2006-02-25 00:12:33.776993493 +0100
-@@ -349,12 +349,12 @@ static void icmp_push_reply(struct icmp_
- {
- 	struct sk_buff *skb;
- 
--	ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
--		       icmp_param->data_len+icmp_param->head_len,
--		       icmp_param->head_len,
--		       ipc, rt, MSG_DONTWAIT);
--
--	if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
-+	if (ip_append_data(icmp_socket->sk, icmp_glue_bits, icmp_param,
-+		           icmp_param->data_len+icmp_param->head_len,
-+		           icmp_param->head_len,
-+		           ipc, rt, MSG_DONTWAIT) < 0)
-+		ip_flush_pending_frames(icmp_socket->sk);
-+	else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) {
- 		struct icmphdr *icmph = skb->h.icmph;
- 		unsigned int csum = 0;
- 		struct sk_buff *skb1;
-diff -Nurp pristine-linux-2.6.12/net/ipv4/ip_output.c linux-2.6.12-xen/net/ipv4/ip_output.c
---- pristine-linux-2.6.12/net/ipv4/ip_output.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv4/ip_output.c	2006-02-25 00:12:33.777993342 +0100
-@@ -111,7 +111,6 @@ static int ip_dev_loopback_xmit(struct s
- #ifdef CONFIG_NETFILTER_DEBUG
- 	nf_debug_ip_loopback_xmit(newskb);
- #endif
--	nf_reset(newskb);
- 	netif_rx(newskb);
- 	return 0;
- }
-@@ -196,8 +195,6 @@ static inline int ip_finish_output2(stru
- 	nf_debug_ip_finish_output2(skb);
- #endif /*CONFIG_NETFILTER_DEBUG*/
- 
--	nf_reset(skb);
--
- 	if (hh) {
- 		int hh_alen;
- 
-diff -Nurp pristine-linux-2.6.12/net/ipv4/ip_sockglue.c linux-2.6.12-xen/net/ipv4/ip_sockglue.c
---- pristine-linux-2.6.12/net/ipv4/ip_sockglue.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv4/ip_sockglue.c	2006-02-25 00:12:33.777993342 +0100
-@@ -848,6 +848,9 @@ mc_msf_out:
-  
- 		case IP_IPSEC_POLICY:
- 		case IP_XFRM_POLICY:
-+			err = -EPERM;
-+			if (!capable(CAP_NET_ADMIN))
-+				break;
- 			err = xfrm_user_policy(sk, optname, optval, optlen);
- 			break;
- 
-diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_core.c linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_core.c
---- pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_core.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_core.c	2006-02-25 00:12:33.778993192 +0100
-@@ -1124,6 +1124,9 @@ void ip_conntrack_cleanup(void)
- 		schedule();
- 		goto i_see_dead_people;
- 	}
-+	/* wait until all references to ip_conntrack_untracked are dropped */
-+	while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
-+		schedule();
- 
- 	kmem_cache_destroy(ip_conntrack_cachep);
- 	kmem_cache_destroy(ip_conntrack_expect_cachep);
-diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_proto_udp.c linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_proto_udp.c
---- pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_proto_udp.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_proto_udp.c	2006-02-25 00:12:33.820986862 +0100
-@@ -120,6 +120,7 @@ static int udp_error(struct sk_buff *skb
- 	 * and moreover root might send raw packets.
- 	 * FIXME: Source route IP option packets --RR */
- 	if (hooknum == NF_IP_PRE_ROUTING
-+	    && skb->ip_summed != CHECKSUM_UNNECESSARY
- 	    && csum_tcpudp_magic(iph->saddr, iph->daddr, udplen, IPPROTO_UDP,
- 			         skb->ip_summed == CHECKSUM_HW ? skb->csum
- 			      	 : skb_checksum(skb, iph->ihl*4, udplen, 0))) {
-diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_standalone.c linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_standalone.c
---- pristine-linux-2.6.12/net/ipv4/netfilter/ip_conntrack_standalone.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv4/netfilter/ip_conntrack_standalone.c	2006-02-25 00:12:33.779993041 +0100
-@@ -432,6 +432,13 @@ static unsigned int ip_conntrack_defrag(
- 				        const struct net_device *out,
- 				        int (*okfn)(struct sk_buff *))
- {
-+#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
-+	/* Previously seen (loopback)?  Ignore.  Do this before
-+           fragment check. */
-+	if ((*pskb)->nfct)
-+		return NF_ACCEPT;
-+#endif
-+
- 	/* Gather fragments. */
- 	if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
- 		*pskb = ip_ct_gather_frags(*pskb,
-diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_tcp.c linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_tcp.c
---- pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_tcp.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_tcp.c	2006-02-25 00:12:33.822986561 +0100
-@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tup
- 		 enum ip_nat_manip_type maniptype,
- 		 const struct ip_conntrack *conntrack)
- {
--	static u_int16_t port, *portptr;
-+	static u_int16_t port;
-+	u_int16_t *portptr;
- 	unsigned int range_size, min, i;
- 
- 	if (maniptype == IP_NAT_MANIP_SRC)
-@@ -127,10 +128,16 @@ tcp_manip_pkt(struct sk_buff **pskb,
- 	if (hdrsize < sizeof(*hdr))
- 		return 1;
- 
--	hdr->check = ip_nat_cheat_check(~oldip, newip,
-+	if ((*pskb)->proto_csum_blank) {
-+		hdr->check = ip_nat_cheat_check(oldip, ~newip,
-+				ip_nat_cheat_check(oldport ^ 0xFFFF,
-+					newport, hdr->check));
-+	} else { 
-+		hdr->check = ip_nat_cheat_check(~oldip, newip,
- 					ip_nat_cheat_check(oldport ^ 0xFFFF,
- 							   newport,
- 							   hdr->check));
-+	}
- 	return 1;
- }
- 
-diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_tcp.c.orig linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_tcp.c.orig
---- pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_tcp.c.orig	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_tcp.c.orig	2006-02-25 00:12:33.779993041 +0100
-@@ -0,0 +1,179 @@
-+/* (C) 1999-2001 Paul `Rusty' Russell
-+ * (C) 2002-2004 Netfilter Core Team <coreteam at netfilter.org>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/init.h>
-+#include <linux/netfilter.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/if.h>
-+#include <linux/netfilter_ipv4/ip_nat.h>
-+#include <linux/netfilter_ipv4/ip_nat_rule.h>
-+#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-+#include <linux/netfilter_ipv4/ip_nat_core.h>
-+
-+static int
-+tcp_in_range(const struct ip_conntrack_tuple *tuple,
-+	     enum ip_nat_manip_type maniptype,
-+	     const union ip_conntrack_manip_proto *min,
-+	     const union ip_conntrack_manip_proto *max)
-+{
-+	u_int16_t port;
-+
-+	if (maniptype == IP_NAT_MANIP_SRC)
-+		port = tuple->src.u.tcp.port;
-+	else
-+		port = tuple->dst.u.tcp.port;
-+
-+	return ntohs(port) >= ntohs(min->tcp.port)
-+		&& ntohs(port) <= ntohs(max->tcp.port);
-+}
-+
-+static int
-+tcp_unique_tuple(struct ip_conntrack_tuple *tuple,
-+		 const struct ip_nat_range *range,
-+		 enum ip_nat_manip_type maniptype,
-+		 const struct ip_conntrack *conntrack)
-+{
-+	static u_int16_t port;
-+	u_int16_t *portptr;
-+	unsigned int range_size, min, i;
-+
-+	if (maniptype == IP_NAT_MANIP_SRC)
-+		portptr = &tuple->src.u.tcp.port;
-+	else
-+		portptr = &tuple->dst.u.tcp.port;
-+
-+	/* If no range specified... */
-+	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
-+		/* If it's dst rewrite, can't change port */
-+		if (maniptype == IP_NAT_MANIP_DST)
-+			return 0;
-+
-+		/* Map privileged onto privileged. */
-+		if (ntohs(*portptr) < 1024) {
-+			/* Loose convention: >> 512 is credential passing */
-+			if (ntohs(*portptr)<512) {
-+				min = 1;
-+				range_size = 511 - min + 1;
-+			} else {
-+				min = 600;
-+				range_size = 1023 - min + 1;
-+			}
-+		} else {
-+			min = 1024;
-+			range_size = 65535 - 1024 + 1;
-+		}
-+	} else {
-+		min = ntohs(range->min.tcp.port);
-+		range_size = ntohs(range->max.tcp.port) - min + 1;
-+	}
-+
-+	for (i = 0; i < range_size; i++, port++) {
-+		*portptr = htons(min + port % range_size);
-+		if (!ip_nat_used_tuple(tuple, conntrack)) {
-+			return 1;
-+		}
-+	}
-+	return 0;
-+}
-+
-+static int
-+tcp_manip_pkt(struct sk_buff **pskb,
-+	      unsigned int iphdroff,
-+	      const struct ip_conntrack_tuple *tuple,
-+	      enum ip_nat_manip_type maniptype)
-+{
-+	struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
-+	struct tcphdr *hdr;
-+	unsigned int hdroff = iphdroff + iph->ihl*4;
-+	u32 oldip, newip;
-+	u16 *portptr, newport, oldport;
-+	int hdrsize = 8; /* TCP connection tracking guarantees this much */
-+
-+	/* this could be a inner header returned in icmp packet; in such
-+	   cases we cannot update the checksum field since it is outside of
-+	   the 8 bytes of transport layer headers we are guaranteed */
-+	if ((*pskb)->len >= hdroff + sizeof(struct tcphdr))
-+		hdrsize = sizeof(struct tcphdr);
-+
-+	if (!skb_ip_make_writable(pskb, hdroff + hdrsize))
-+		return 0;
-+
-+	iph = (struct iphdr *)((*pskb)->data + iphdroff);
-+	hdr = (struct tcphdr *)((*pskb)->data + hdroff);
-+
-+	if (maniptype == IP_NAT_MANIP_SRC) {
-+		/* Get rid of src ip and src pt */
-+		oldip = iph->saddr;
-+		newip = tuple->src.ip;
-+		newport = tuple->src.u.tcp.port;
-+		portptr = &hdr->source;
-+	} else {
-+		/* Get rid of dst ip and dst pt */
-+		oldip = iph->daddr;
-+		newip = tuple->dst.ip;
-+		newport = tuple->dst.u.tcp.port;
-+		portptr = &hdr->dest;
-+	}
-+
-+	oldport = *portptr;
-+	*portptr = newport;
-+
-+	if (hdrsize < sizeof(*hdr))
-+		return 1;
-+
-+	hdr->check = ip_nat_cheat_check(~oldip, newip,
-+					ip_nat_cheat_check(oldport ^ 0xFFFF,
-+							   newport,
-+							   hdr->check));
-+	return 1;
-+}
-+
-+static unsigned int
-+tcp_print(char *buffer,
-+	  const struct ip_conntrack_tuple *match,
-+	  const struct ip_conntrack_tuple *mask)
-+{
-+	unsigned int len = 0;
-+
-+	if (mask->src.u.tcp.port)
-+		len += sprintf(buffer + len, "srcpt=%u ",
-+			       ntohs(match->src.u.tcp.port));
-+
-+
-+	if (mask->dst.u.tcp.port)
-+		len += sprintf(buffer + len, "dstpt=%u ",
-+			       ntohs(match->dst.u.tcp.port));
-+
-+	return len;
-+}
-+
-+static unsigned int
-+tcp_print_range(char *buffer, const struct ip_nat_range *range)
-+{
-+	if (range->min.tcp.port != 0 || range->max.tcp.port != 0xFFFF) {
-+		if (range->min.tcp.port == range->max.tcp.port)
-+			return sprintf(buffer, "port %u ",
-+				       ntohs(range->min.tcp.port));
-+		else
-+			return sprintf(buffer, "ports %u-%u ",
-+				       ntohs(range->min.tcp.port),
-+				       ntohs(range->max.tcp.port));
-+	}
-+	else return 0;
-+}
-+
-+struct ip_nat_protocol ip_nat_protocol_tcp
-+= { "TCP", IPPROTO_TCP,
-+    tcp_manip_pkt,
-+    tcp_in_range,
-+    tcp_unique_tuple,
-+    tcp_print,
-+    tcp_print_range
-+};
-diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_udp.c linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_udp.c
---- pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_udp.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_udp.c	2006-02-25 00:12:33.821986711 +0100
-@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tup
- 		 enum ip_nat_manip_type maniptype,
- 		 const struct ip_conntrack *conntrack)
- {
--	static u_int16_t port, *portptr;
-+	static u_int16_t port;
-+	u_int16_t *portptr;
- 	unsigned int range_size, min, i;
- 
- 	if (maniptype == IP_NAT_MANIP_SRC)
-@@ -112,11 +113,19 @@ udp_manip_pkt(struct sk_buff **pskb,
- 		newport = tuple->dst.u.udp.port;
- 		portptr = &hdr->dest;
- 	}
--	if (hdr->check) /* 0 is a special case meaning no checksum */
--		hdr->check = ip_nat_cheat_check(~oldip, newip,
-+	
-+	if (hdr->check) { /* 0 is a special case meaning no checksum */
-+		if ((*pskb)->proto_csum_blank) {
-+			hdr->check = ip_nat_cheat_check(oldip, ~newip, 
-+					ip_nat_cheat_check(*portptr ^ 0xFFFF, 
-+						newport, hdr->check));
-+		} else {
-+			hdr->check = ip_nat_cheat_check(~oldip, newip,
- 					ip_nat_cheat_check(*portptr ^ 0xFFFF,
- 							   newport,
- 							   hdr->check));
-+		}
-+	}
- 	*portptr = newport;
- 	return 1;
- }
-diff -Nurp pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_udp.c.orig linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_udp.c.orig
---- pristine-linux-2.6.12/net/ipv4/netfilter/ip_nat_proto_udp.c.orig	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/net/ipv4/netfilter/ip_nat_proto_udp.c.orig	2006-02-25 00:12:33.780992890 +0100
-@@ -0,0 +1,166 @@
-+/* (C) 1999-2001 Paul `Rusty' Russell
-+ * (C) 2002-2004 Netfilter Core Team <coreteam at netfilter.org>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#include <linux/types.h>
-+#include <linux/init.h>
-+#include <linux/netfilter.h>
-+#include <linux/ip.h>
-+#include <linux/udp.h>
-+#include <linux/if.h>
-+
-+#include <linux/netfilter_ipv4/ip_nat.h>
-+#include <linux/netfilter_ipv4/ip_nat_core.h>
-+#include <linux/netfilter_ipv4/ip_nat_rule.h>
-+#include <linux/netfilter_ipv4/ip_nat_protocol.h>
-+
-+static int
-+udp_in_range(const struct ip_conntrack_tuple *tuple,
-+	     enum ip_nat_manip_type maniptype,
-+	     const union ip_conntrack_manip_proto *min,
-+	     const union ip_conntrack_manip_proto *max)
-+{
-+	u_int16_t port;
-+
-+	if (maniptype == IP_NAT_MANIP_SRC)
-+		port = tuple->src.u.udp.port;
-+	else
-+		port = tuple->dst.u.udp.port;
-+
-+	return ntohs(port) >= ntohs(min->udp.port)
-+		&& ntohs(port) <= ntohs(max->udp.port);
-+}
-+
-+static int
-+udp_unique_tuple(struct ip_conntrack_tuple *tuple,
-+		 const struct ip_nat_range *range,
-+		 enum ip_nat_manip_type maniptype,
-+		 const struct ip_conntrack *conntrack)
-+{
-+	static u_int16_t port;
-+	u_int16_t *portptr;
-+	unsigned int range_size, min, i;
-+
-+	if (maniptype == IP_NAT_MANIP_SRC)
-+		portptr = &tuple->src.u.udp.port;
-+	else
-+		portptr = &tuple->dst.u.udp.port;
-+
-+	/* If no range specified... */
-+	if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
-+		/* If it's dst rewrite, can't change port */
-+		if (maniptype == IP_NAT_MANIP_DST)
-+			return 0;
-+
-+		if (ntohs(*portptr) < 1024) {
-+			/* Loose convention: >> 512 is credential passing */
-+			if (ntohs(*portptr)<512) {
-+				min = 1;
-+				range_size = 511 - min + 1;
-+			} else {
-+				min = 600;
-+				range_size = 1023 - min + 1;
-+			}
-+		} else {
-+			min = 1024;
-+			range_size = 65535 - 1024 + 1;
-+		}
-+	} else {
-+		min = ntohs(range->min.udp.port);
-+		range_size = ntohs(range->max.udp.port) - min + 1;
-+	}
-+
-+	for (i = 0; i < range_size; i++, port++) {
-+		*portptr = htons(min + port % range_size);
-+		if (!ip_nat_used_tuple(tuple, conntrack))
-+			return 1;
-+	}
-+	return 0;
-+}
-+
-+static int
-+udp_manip_pkt(struct sk_buff **pskb,
-+	      unsigned int iphdroff,
-+	      const struct ip_conntrack_tuple *tuple,
-+	      enum ip_nat_manip_type maniptype)
-+{
-+	struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
-+	struct udphdr *hdr;
-+	unsigned int hdroff = iphdroff + iph->ihl*4;
-+	u32 oldip, newip;
-+	u16 *portptr, newport;
-+
-+	if (!skb_ip_make_writable(pskb, hdroff + sizeof(*hdr)))
-+		return 0;
-+
-+	iph = (struct iphdr *)((*pskb)->data + iphdroff);
-+	hdr = (struct udphdr *)((*pskb)->data + hdroff);
-+
-+	if (maniptype == IP_NAT_MANIP_SRC) {
-+		/* Get rid of src ip and src pt */
-+		oldip = iph->saddr;
-+		newip = tuple->src.ip;
-+		newport = tuple->src.u.udp.port;
-+		portptr = &hdr->source;
-+	} else {
-+		/* Get rid of dst ip and dst pt */
-+		oldip = iph->daddr;
-+		newip = tuple->dst.ip;
-+		newport = tuple->dst.u.udp.port;
-+		portptr = &hdr->dest;
-+	}
-+	if (hdr->check) /* 0 is a special case meaning no checksum */
-+		hdr->check = ip_nat_cheat_check(~oldip, newip,
-+					ip_nat_cheat_check(*portptr ^ 0xFFFF,
-+							   newport,
-+							   hdr->check));
-+	*portptr = newport;
-+	return 1;
-+}
-+
-+static unsigned int
-+udp_print(char *buffer,
-+	  const struct ip_conntrack_tuple *match,
-+	  const struct ip_conntrack_tuple *mask)
-+{
-+	unsigned int len = 0;
-+
-+	if (mask->src.u.udp.port)
-+		len += sprintf(buffer + len, "srcpt=%u ",
-+			       ntohs(match->src.u.udp.port));
-+
-+
-+	if (mask->dst.u.udp.port)
-+		len += sprintf(buffer + len, "dstpt=%u ",
-+			       ntohs(match->dst.u.udp.port));
-+
-+	return len;
-+}
-+
-+static unsigned int
-+udp_print_range(char *buffer, const struct ip_nat_range *range)
-+{
-+	if (range->min.udp.port != 0 || range->max.udp.port != 0xFFFF) {
-+		if (range->min.udp.port == range->max.udp.port)
-+			return sprintf(buffer, "port %u ",
-+				       ntohs(range->min.udp.port));
-+		else
-+			return sprintf(buffer, "ports %u-%u ",
-+				       ntohs(range->min.udp.port),
-+				       ntohs(range->max.udp.port));
-+	}
-+	else return 0;
-+}
-+
-+struct ip_nat_protocol ip_nat_protocol_udp
-+= { "UDP", IPPROTO_UDP,
-+    udp_manip_pkt,
-+    udp_in_range,
-+    udp_unique_tuple,
-+    udp_print,
-+    udp_print_range
-+};
-diff -Nurp pristine-linux-2.6.12/net/ipv6/ip6_input.c linux-2.6.12-xen/net/ipv6/ip6_input.c
---- pristine-linux-2.6.12/net/ipv6/ip6_input.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv6/ip6_input.c	2006-02-25 00:12:33.780992890 +0100
-@@ -198,12 +198,13 @@ resubmit:
- 		if (!raw_sk) {
- 			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- 				IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
--				icmpv6_param_prob(skb, ICMPV6_UNK_NEXTHDR, nhoff);
-+				icmpv6_send(skb, ICMPV6_PARAMPROB,
-+				            ICMPV6_UNK_NEXTHDR, nhoff,
-+				            skb->dev);
- 			}
--		} else {
-+		} else
- 			IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
--			kfree_skb(skb);
--		}
-+		kfree_skb(skb);
- 	}
- 	rcu_read_unlock();
- 	return 0;
-diff -Nurp pristine-linux-2.6.12/net/ipv6/ipv6_sockglue.c linux-2.6.12-xen/net/ipv6/ipv6_sockglue.c
---- pristine-linux-2.6.12/net/ipv6/ipv6_sockglue.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv6/ipv6_sockglue.c	2006-02-25 00:12:33.781992740 +0100
-@@ -503,6 +503,9 @@ done:
- 		break;
- 	case IPV6_IPSEC_POLICY:
- 	case IPV6_XFRM_POLICY:
-+		retv = -EPERM;
-+		if (!capable(CAP_NET_ADMIN))
-+			break;
- 		retv = xfrm_user_policy(sk, optname, optval, optlen);
- 		break;
- 
-diff -Nurp pristine-linux-2.6.12/net/ipv6/netfilter/ip6_queue.c linux-2.6.12-xen/net/ipv6/netfilter/ip6_queue.c
---- pristine-linux-2.6.12/net/ipv6/netfilter/ip6_queue.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/ipv6/netfilter/ip6_queue.c	2006-02-25 00:12:33.781992740 +0100
-@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
- static void
- ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
- {
-+	local_bh_disable();
- 	nf_reinject(entry->skb, entry->info, verdict);
-+	local_bh_enable();
- 	kfree(entry);
- }
- 
-diff -Nurp pristine-linux-2.6.12/net/netlink/af_netlink.c linux-2.6.12-xen/net/netlink/af_netlink.c
---- pristine-linux-2.6.12/net/netlink/af_netlink.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/netlink/af_netlink.c	2006-02-25 00:12:33.782992589 +0100
-@@ -315,8 +315,8 @@ err:
- static void netlink_remove(struct sock *sk)
- {
- 	netlink_table_grab();
--	nl_table[sk->sk_protocol].hash.entries--;
--	sk_del_node_init(sk);
-+	if (sk_del_node_init(sk))
-+		nl_table[sk->sk_protocol].hash.entries--;
- 	if (nlk_sk(sk)->groups)
- 		__sk_del_bind_node(sk);
- 	netlink_table_ungrab();
-@@ -429,7 +429,12 @@ retry:
- 	err = netlink_insert(sk, pid);
- 	if (err == -EADDRINUSE)
- 		goto retry;
--	return 0;
-+
-+	/* If 2 threads race to autobind, that is fine.  */
-+	if (err == -EBUSY)
-+		err = 0;
-+
-+	return err;
- }
- 
- static inline int netlink_capable(struct socket *sock, unsigned int flag) 
-diff -Nurp pristine-linux-2.6.12/net/packet/af_packet.c linux-2.6.12-xen/net/packet/af_packet.c
---- pristine-linux-2.6.12/net/packet/af_packet.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/packet/af_packet.c	2006-02-25 00:12:33.783992438 +0100
-@@ -274,6 +274,9 @@ static int packet_rcv_spkt(struct sk_buf
- 	dst_release(skb->dst);
- 	skb->dst = NULL;
- 
-+	/* drop conntrack reference */
-+	nf_reset(skb);
-+
- 	spkt = (struct sockaddr_pkt*)skb->cb;
- 
- 	skb_push(skb, skb->data-skb->mac.raw);
-@@ -517,6 +520,9 @@ static int packet_rcv(struct sk_buff *sk
- 	dst_release(skb->dst);
- 	skb->dst = NULL;
- 
-+	/* drop conntrack reference */
-+	nf_reset(skb);
-+
- 	spin_lock(&sk->sk_receive_queue.lock);
- 	po->stats.tp_packets++;
- 	__skb_queue_tail(&sk->sk_receive_queue, skb);
-diff -Nurp pristine-linux-2.6.12/net/xfrm/xfrm_user.c linux-2.6.12-xen/net/xfrm/xfrm_user.c
---- pristine-linux-2.6.12/net/xfrm/xfrm_user.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/net/xfrm/xfrm_user.c	2006-02-25 00:12:33.784992288 +0100
-@@ -1180,6 +1180,9 @@ static struct xfrm_policy *xfrm_compile_
- 	if (nr > XFRM_MAX_DEPTH)
- 		return NULL;
- 
-+	if (p->dir > XFRM_POLICY_OUT)
-+		return NULL;
-+
- 	xp = xfrm_policy_alloc(GFP_KERNEL);
- 	if (xp == NULL) {
- 		*dir = -ENOBUFS;
-Binärdateien pristine-linux-2.6.12/scripts/basic/docproc and linux-2.6.12-xen/scripts/basic/docproc sind verschieden.
-diff -Nurp pristine-linux-2.6.12/scripts/basic/.docproc.cmd linux-2.6.12-xen/scripts/basic/.docproc.cmd
---- pristine-linux-2.6.12/scripts/basic/.docproc.cmd	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/basic/.docproc.cmd	2006-02-25 00:12:50.113530992 +0100
-@@ -0,0 +1,68 @@
-+cmd_scripts/basic/docproc := gcc -Wp,-MD,scripts/basic/.docproc.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer        -o scripts/basic/docproc scripts/basic/docproc.c
-+
-+deps_scripts/basic/docproc := \
-+  scripts/basic/docproc.c \
-+  /usr/include/stdio.h \
-+  /usr/include/features.h \
-+  /usr/include/sys/cdefs.h \
-+  /usr/include/gnu/stubs.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
-+  /usr/include/bits/types.h \
-+  /usr/include/bits/wordsize.h \
-+  /usr/include/bits/typesizes.h \
-+  /usr/include/libio.h \
-+  /usr/include/_G_config.h \
-+  /usr/include/wchar.h \
-+  /usr/include/bits/wchar.h \
-+  /usr/include/gconv.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
-+  /usr/include/bits/stdio_lim.h \
-+  /usr/include/bits/sys_errlist.h \
-+  /usr/include/bits/stdio.h \
-+  /usr/include/stdlib.h \
-+  /usr/include/sys/types.h \
-+  /usr/include/time.h \
-+  /usr/include/endian.h \
-+  /usr/include/bits/endian.h \
-+  /usr/include/sys/select.h \
-+  /usr/include/bits/select.h \
-+  /usr/include/bits/sigset.h \
-+  /usr/include/bits/time.h \
-+  /usr/include/sys/sysmacros.h \
-+  /usr/include/bits/pthreadtypes.h \
-+  /usr/include/bits/sched.h \
-+  /usr/include/alloca.h \
-+  /usr/include/string.h \
-+  /usr/include/bits/string.h \
-+  /usr/include/bits/string2.h \
-+  /usr/include/ctype.h \
-+  /usr/include/unistd.h \
-+  /usr/include/bits/posix_opt.h \
-+  /usr/include/bits/confname.h \
-+  /usr/include/getopt.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/limits.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/syslimits.h \
-+  /usr/include/limits.h \
-+  /usr/include/bits/posix1_lim.h \
-+  /usr/include/bits/local_lim.h \
-+  /usr/include/linux/limits.h \
-+  /usr/include/bits/posix2_lim.h \
-+  /usr/include/sys/wait.h \
-+  /usr/include/signal.h \
-+  /usr/include/bits/signum.h \
-+  /usr/include/bits/siginfo.h \
-+  /usr/include/bits/sigaction.h \
-+  /usr/include/bits/sigcontext.h \
-+  /usr/include/asm/sigcontext.h \
-+  /usr/include/asm-i486/sigcontext.h \
-+  /usr/include/linux/compiler.h \
-+  /usr/include/bits/sigstack.h \
-+  /usr/include/bits/sigthread.h \
-+  /usr/include/sys/resource.h \
-+  /usr/include/bits/resource.h \
-+  /usr/include/bits/waitflags.h \
-+  /usr/include/bits/waitstatus.h \
-+
-+scripts/basic/docproc: $(deps_scripts/basic/docproc)
-+
-+$(deps_scripts/basic/docproc):
-Binärdateien pristine-linux-2.6.12/scripts/basic/fixdep and linux-2.6.12-xen/scripts/basic/fixdep sind verschieden.
-diff -Nurp pristine-linux-2.6.12/scripts/basic/.fixdep.cmd linux-2.6.12-xen/scripts/basic/.fixdep.cmd
---- pristine-linux-2.6.12/scripts/basic/.fixdep.cmd	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/basic/.fixdep.cmd	2006-02-25 00:12:49.405637701 +0100
-@@ -0,0 +1,78 @@
-+cmd_scripts/basic/fixdep := gcc -Wp,-MD,scripts/basic/.fixdep.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer        -o scripts/basic/fixdep scripts/basic/fixdep.c
-+
-+deps_scripts/basic/fixdep := \
-+  scripts/basic/fixdep.c \
-+    $(wildcard include/config/his/driver.h) \
-+    $(wildcard include/config/my/option.h) \
-+    $(wildcard include/config/.h) \
-+    $(wildcard include/config/foo.h) \
-+    $(wildcard include/config/boom.h) \
-+  /usr/include/sys/types.h \
-+  /usr/include/features.h \
-+  /usr/include/sys/cdefs.h \
-+  /usr/include/gnu/stubs.h \
-+  /usr/include/bits/types.h \
-+  /usr/include/bits/wordsize.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
-+  /usr/include/bits/typesizes.h \
-+  /usr/include/time.h \
-+  /usr/include/endian.h \
-+  /usr/include/bits/endian.h \
-+  /usr/include/sys/select.h \
-+  /usr/include/bits/select.h \
-+  /usr/include/bits/sigset.h \
-+  /usr/include/bits/time.h \
-+  /usr/include/sys/sysmacros.h \
-+  /usr/include/bits/pthreadtypes.h \
-+  /usr/include/bits/sched.h \
-+  /usr/include/sys/stat.h \
-+  /usr/include/bits/stat.h \
-+  /usr/include/sys/mman.h \
-+  /usr/include/bits/mman.h \
-+  /usr/include/unistd.h \
-+  /usr/include/bits/posix_opt.h \
-+  /usr/include/bits/confname.h \
-+  /usr/include/getopt.h \
-+  /usr/include/fcntl.h \
-+  /usr/include/bits/fcntl.h \
-+  /usr/include/string.h \
-+  /usr/include/bits/string.h \
-+  /usr/include/bits/string2.h \
-+  /usr/include/stdlib.h \
-+  /usr/include/alloca.h \
-+  /usr/include/stdio.h \
-+  /usr/include/libio.h \
-+  /usr/include/_G_config.h \
-+  /usr/include/wchar.h \
-+  /usr/include/bits/wchar.h \
-+  /usr/include/gconv.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
-+  /usr/include/bits/stdio_lim.h \
-+  /usr/include/bits/sys_errlist.h \
-+  /usr/include/bits/stdio.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/limits.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/syslimits.h \
-+  /usr/include/limits.h \
-+  /usr/include/bits/posix1_lim.h \
-+  /usr/include/bits/local_lim.h \
-+  /usr/include/linux/limits.h \
-+  /usr/include/bits/posix2_lim.h \
-+  /usr/include/ctype.h \
-+  /usr/include/arpa/inet.h \
-+  /usr/include/netinet/in.h \
-+  /usr/include/stdint.h \
-+  /usr/include/sys/socket.h \
-+  /usr/include/sys/uio.h \
-+  /usr/include/bits/uio.h \
-+  /usr/include/bits/socket.h \
-+  /usr/include/bits/sockaddr.h \
-+  /usr/include/asm/socket.h \
-+  /usr/include/asm-i486/socket.h \
-+  /usr/include/asm/sockios.h \
-+  /usr/include/asm-i486/sockios.h \
-+  /usr/include/bits/in.h \
-+  /usr/include/bits/byteswap.h \
-+
-+scripts/basic/fixdep: $(deps_scripts/basic/fixdep)
-+
-+$(deps_scripts/basic/fixdep):
-Binärdateien pristine-linux-2.6.12/scripts/basic/split-include and linux-2.6.12-xen/scripts/basic/split-include sind verschieden.
-diff -Nurp pristine-linux-2.6.12/scripts/basic/.split-include.cmd linux-2.6.12-xen/scripts/basic/.split-include.cmd
---- pristine-linux-2.6.12/scripts/basic/.split-include.cmd	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/basic/.split-include.cmd	2006-02-25 00:12:49.723589773 +0100
-@@ -0,0 +1,58 @@
-+cmd_scripts/basic/split-include := gcc -Wp,-MD,scripts/basic/.split-include.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer        -o scripts/basic/split-include scripts/basic/split-include.c
-+
-+deps_scripts/basic/split-include := \
-+  scripts/basic/split-include.c \
-+    $(wildcard include/config/.h) \
-+  /usr/include/sys/stat.h \
-+  /usr/include/features.h \
-+  /usr/include/sys/cdefs.h \
-+  /usr/include/gnu/stubs.h \
-+  /usr/include/bits/types.h \
-+  /usr/include/bits/wordsize.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
-+  /usr/include/bits/typesizes.h \
-+  /usr/include/time.h \
-+  /usr/include/bits/stat.h \
-+  /usr/include/sys/types.h \
-+  /usr/include/endian.h \
-+  /usr/include/bits/endian.h \
-+  /usr/include/sys/select.h \
-+  /usr/include/bits/select.h \
-+  /usr/include/bits/sigset.h \
-+  /usr/include/bits/time.h \
-+  /usr/include/sys/sysmacros.h \
-+  /usr/include/bits/pthreadtypes.h \
-+  /usr/include/bits/sched.h \
-+  /usr/include/ctype.h \
-+  /usr/include/errno.h \
-+  /usr/include/bits/errno.h \
-+  /usr/include/linux/errno.h \
-+  /usr/include/asm/errno.h \
-+  /usr/include/asm-i486/errno.h \
-+  /usr/include/asm-generic/errno.h \
-+  /usr/include/asm-generic/errno-base.h \
-+  /usr/include/fcntl.h \
-+  /usr/include/bits/fcntl.h \
-+  /usr/include/stdio.h \
-+  /usr/include/libio.h \
-+  /usr/include/_G_config.h \
-+  /usr/include/wchar.h \
-+  /usr/include/bits/wchar.h \
-+  /usr/include/gconv.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
-+  /usr/include/bits/stdio_lim.h \
-+  /usr/include/bits/sys_errlist.h \
-+  /usr/include/bits/stdio.h \
-+  /usr/include/stdlib.h \
-+  /usr/include/alloca.h \
-+  /usr/include/string.h \
-+  /usr/include/bits/string.h \
-+  /usr/include/bits/string2.h \
-+  /usr/include/unistd.h \
-+  /usr/include/bits/posix_opt.h \
-+  /usr/include/bits/confname.h \
-+  /usr/include/getopt.h \
-+
-+scripts/basic/split-include: $(deps_scripts/basic/split-include)
-+
-+$(deps_scripts/basic/split-include):
-Binärdateien pristine-linux-2.6.12/scripts/kconfig/conf and linux-2.6.12-xen/scripts/kconfig/conf sind verschieden.
-diff -Nurp pristine-linux-2.6.12/scripts/kconfig/.conf.cmd linux-2.6.12-xen/scripts/kconfig/.conf.cmd
---- pristine-linux-2.6.12/scripts/kconfig/.conf.cmd	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/kconfig/.conf.cmd	2006-02-25 00:12:54.495870385 +0100
-@@ -0,0 +1 @@
-+cmd_scripts/kconfig/conf := gcc  -o scripts/kconfig/conf scripts/kconfig/conf.o scripts/kconfig/zconf.tab.o  
-Binärdateien pristine-linux-2.6.12/scripts/kconfig/conf.o and linux-2.6.12-xen/scripts/kconfig/conf.o sind verschieden.
-diff -Nurp pristine-linux-2.6.12/scripts/kconfig/.conf.o.cmd linux-2.6.12-xen/scripts/kconfig/.conf.o.cmd
---- pristine-linux-2.6.12/scripts/kconfig/.conf.o.cmd	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/kconfig/.conf.o.cmd	2006-02-25 00:12:50.559463772 +0100
-@@ -0,0 +1,55 @@
-+cmd_scripts/kconfig/conf.o := gcc -Wp,-MD,scripts/kconfig/.conf.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer       -c -o scripts/kconfig/conf.o scripts/kconfig/conf.c
-+
-+deps_scripts/kconfig/conf.o := \
-+  scripts/kconfig/conf.c \
-+  /usr/include/ctype.h \
-+  /usr/include/features.h \
-+  /usr/include/sys/cdefs.h \
-+  /usr/include/gnu/stubs.h \
-+  /usr/include/bits/types.h \
-+  /usr/include/bits/wordsize.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
-+  /usr/include/bits/typesizes.h \
-+  /usr/include/endian.h \
-+  /usr/include/bits/endian.h \
-+  /usr/include/stdlib.h \
-+  /usr/include/sys/types.h \
-+  /usr/include/time.h \
-+  /usr/include/sys/select.h \
-+  /usr/include/bits/select.h \
-+  /usr/include/bits/sigset.h \
-+  /usr/include/bits/time.h \
-+  /usr/include/sys/sysmacros.h \
-+  /usr/include/bits/pthreadtypes.h \
-+  /usr/include/bits/sched.h \
-+  /usr/include/alloca.h \
-+  /usr/include/string.h \
-+  /usr/include/bits/string.h \
-+  /usr/include/bits/string2.h \
-+  /usr/include/unistd.h \
-+  /usr/include/bits/posix_opt.h \
-+  /usr/include/bits/confname.h \
-+  /usr/include/getopt.h \
-+  /usr/include/sys/stat.h \
-+  /usr/include/bits/stat.h \
-+  scripts/kconfig/lkc.h \
-+  scripts/kconfig/expr.h \
-+  /usr/include/stdio.h \
-+  /usr/include/libio.h \
-+  /usr/include/_G_config.h \
-+  /usr/include/wchar.h \
-+  /usr/include/bits/wchar.h \
-+  /usr/include/gconv.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
-+  /usr/include/bits/stdio_lim.h \
-+  /usr/include/bits/sys_errlist.h \
-+  /usr/include/bits/stdio.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdbool.h \
-+  /usr/include/libintl.h \
-+  /usr/include/locale.h \
-+  /usr/include/bits/locale.h \
-+  scripts/kconfig/lkc_proto.h \
-+
-+scripts/kconfig/conf.o: $(deps_scripts/kconfig/conf.o)
-+
-+$(deps_scripts/kconfig/conf.o):
-Binärdateien pristine-linux-2.6.12/scripts/kconfig/kxgettext.o and linux-2.6.12-xen/scripts/kconfig/kxgettext.o sind verschieden.
-diff -Nurp pristine-linux-2.6.12/scripts/kconfig/.kxgettext.o.cmd linux-2.6.12-xen/scripts/kconfig/.kxgettext.o.cmd
---- pristine-linux-2.6.12/scripts/kconfig/.kxgettext.o.cmd	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/kconfig/.kxgettext.o.cmd	2006-02-25 00:12:51.373341086 +0100
-@@ -0,0 +1,48 @@
-+cmd_scripts/kconfig/kxgettext.o := gcc -Wp,-MD,scripts/kconfig/.kxgettext.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer       -c -o scripts/kconfig/kxgettext.o scripts/kconfig/kxgettext.c
-+
-+deps_scripts/kconfig/kxgettext.o := \
-+  scripts/kconfig/kxgettext.c \
-+  /usr/include/stdlib.h \
-+  /usr/include/features.h \
-+  /usr/include/sys/cdefs.h \
-+  /usr/include/gnu/stubs.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
-+  /usr/include/sys/types.h \
-+  /usr/include/bits/types.h \
-+  /usr/include/bits/wordsize.h \
-+  /usr/include/bits/typesizes.h \
-+  /usr/include/time.h \
-+  /usr/include/endian.h \
-+  /usr/include/bits/endian.h \
-+  /usr/include/sys/select.h \
-+  /usr/include/bits/select.h \
-+  /usr/include/bits/sigset.h \
-+  /usr/include/bits/time.h \
-+  /usr/include/sys/sysmacros.h \
-+  /usr/include/bits/pthreadtypes.h \
-+  /usr/include/bits/sched.h \
-+  /usr/include/alloca.h \
-+  /usr/include/string.h \
-+  /usr/include/bits/string.h \
-+  /usr/include/bits/string2.h \
-+  scripts/kconfig/lkc.h \
-+  scripts/kconfig/expr.h \
-+  /usr/include/stdio.h \
-+  /usr/include/libio.h \
-+  /usr/include/_G_config.h \
-+  /usr/include/wchar.h \
-+  /usr/include/bits/wchar.h \
-+  /usr/include/gconv.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
-+  /usr/include/bits/stdio_lim.h \
-+  /usr/include/bits/sys_errlist.h \
-+  /usr/include/bits/stdio.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdbool.h \
-+  /usr/include/libintl.h \
-+  /usr/include/locale.h \
-+  /usr/include/bits/locale.h \
-+  scripts/kconfig/lkc_proto.h \
-+
-+scripts/kconfig/kxgettext.o: $(deps_scripts/kconfig/kxgettext.o)
-+
-+$(deps_scripts/kconfig/kxgettext.o):
-diff -Nurp pristine-linux-2.6.12/scripts/kconfig/lex.zconf.c linux-2.6.12-xen/scripts/kconfig/lex.zconf.c
---- pristine-linux-2.6.12/scripts/kconfig/lex.zconf.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/kconfig/lex.zconf.c	2006-02-25 00:12:52.353193381 +0100
-@@ -0,0 +1,3688 @@
-+
-+#line 3 "lex.zconf.c"
-+
-+#define  YY_INT_ALIGNED short int
-+
-+/* A lexical scanner generated by flex */
-+
-+#define FLEX_SCANNER
-+#define YY_FLEX_MAJOR_VERSION 2
-+#define YY_FLEX_MINOR_VERSION 5
-+#define YY_FLEX_SUBMINOR_VERSION 31
-+#if YY_FLEX_SUBMINOR_VERSION > 0
-+#define FLEX_BETA
-+#endif
-+
-+/* First, we deal with  platform-specific or compiler-specific issues. */
-+
-+/* begin standard C headers. */
-+#include <stdio.h>
-+#include <string.h>
-+#include <errno.h>
-+#include <stdlib.h>
-+
-+/* end standard C headers. */
-+
-+/* flex integer type definitions */
-+
-+#ifndef FLEXINT_H
-+#define FLEXINT_H
-+
-+/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
-+
-+#if defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L
-+#include <inttypes.h>
-+typedef int8_t flex_int8_t;
-+typedef uint8_t flex_uint8_t;
-+typedef int16_t flex_int16_t;
-+typedef uint16_t flex_uint16_t;
-+typedef int32_t flex_int32_t;
-+typedef uint32_t flex_uint32_t;
-+#else
-+typedef signed char flex_int8_t;
-+typedef short int flex_int16_t;
-+typedef int flex_int32_t;
-+typedef unsigned char flex_uint8_t; 
-+typedef unsigned short int flex_uint16_t;
-+typedef unsigned int flex_uint32_t;
-+#endif /* ! C99 */
-+
-+/* Limits of integral types. */
-+#ifndef INT8_MIN
-+#define INT8_MIN               (-128)
-+#endif
-+#ifndef INT16_MIN
-+#define INT16_MIN              (-32767-1)
-+#endif
-+#ifndef INT32_MIN
-+#define INT32_MIN              (-2147483647-1)
-+#endif
-+#ifndef INT8_MAX
-+#define INT8_MAX               (127)
-+#endif
-+#ifndef INT16_MAX
-+#define INT16_MAX              (32767)
-+#endif
-+#ifndef INT32_MAX
-+#define INT32_MAX              (2147483647)
-+#endif
-+#ifndef UINT8_MAX
-+#define UINT8_MAX              (255U)
-+#endif
-+#ifndef UINT16_MAX
-+#define UINT16_MAX             (65535U)
-+#endif
-+#ifndef UINT32_MAX
-+#define UINT32_MAX             (4294967295U)
-+#endif
-+
-+#endif /* ! FLEXINT_H */
-+
-+#ifdef __cplusplus
-+
-+/* The "const" storage-class-modifier is valid. */
-+#define YY_USE_CONST
-+
-+#else	/* ! __cplusplus */
-+
-+#if __STDC__
-+
-+#define YY_USE_CONST
-+
-+#endif	/* __STDC__ */
-+#endif	/* ! __cplusplus */
-+
-+#ifdef YY_USE_CONST
-+#define yyconst const
-+#else
-+#define yyconst
-+#endif
-+
-+/* Returned upon end-of-file. */
-+#define YY_NULL 0
-+
-+/* Promotes a possibly negative, possibly signed char to an unsigned
-+ * integer for use as an array index.  If the signed char is negative,
-+ * we want to instead treat it as an 8-bit unsigned char, hence the
-+ * double cast.
-+ */
-+#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c)
-+
-+/* Enter a start condition.  This macro really ought to take a parameter,
-+ * but we do it the disgusting crufty way forced on us by the ()-less
-+ * definition of BEGIN.
-+ */
-+#define BEGIN (yy_start) = 1 + 2 *
-+
-+/* Translate the current start state into a value that can be later handed
-+ * to BEGIN to return to the state.  The YYSTATE alias is for lex
-+ * compatibility.
-+ */
-+#define YY_START (((yy_start) - 1) / 2)
-+#define YYSTATE YY_START
-+
-+/* Action number for EOF rule of a given start state. */
-+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
-+
-+/* Special action meaning "start processing a new file". */
-+#define YY_NEW_FILE zconfrestart(zconfin  )
-+
-+#define YY_END_OF_BUFFER_CHAR 0
-+
-+/* Size of default input buffer. */
-+#ifndef YY_BUF_SIZE
-+#define YY_BUF_SIZE 16384
-+#endif
-+
-+#ifndef YY_TYPEDEF_YY_BUFFER_STATE
-+#define YY_TYPEDEF_YY_BUFFER_STATE
-+typedef struct yy_buffer_state *YY_BUFFER_STATE;
-+#endif
-+
-+extern int zconfleng;
-+
-+extern FILE *zconfin, *zconfout;
-+
-+#define EOB_ACT_CONTINUE_SCAN 0
-+#define EOB_ACT_END_OF_FILE 1
-+#define EOB_ACT_LAST_MATCH 2
-+
-+    #define YY_LESS_LINENO(n)
-+    
-+/* Return all but the first "n" matched characters back to the input stream. */
-+#define yyless(n) \
-+	do \
-+		{ \
-+		/* Undo effects of setting up zconftext. */ \
-+        int yyless_macro_arg = (n); \
-+        YY_LESS_LINENO(yyless_macro_arg);\
-+		*yy_cp = (yy_hold_char); \
-+		YY_RESTORE_YY_MORE_OFFSET \
-+		(yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
-+		YY_DO_BEFORE_ACTION; /* set up zconftext again */ \
-+		} \
-+	while ( 0 )
-+
-+#define unput(c) yyunput( c, (yytext_ptr)  )
-+
-+/* The following is because we cannot portably get our hands on size_t
-+ * (without autoconf's help, which isn't available because we want
-+ * flex-generated scanners to compile on their own).
-+ */
-+
-+#ifndef YY_TYPEDEF_YY_SIZE_T
-+#define YY_TYPEDEF_YY_SIZE_T
-+typedef unsigned int yy_size_t;
-+#endif
-+
-+#ifndef YY_STRUCT_YY_BUFFER_STATE
-+#define YY_STRUCT_YY_BUFFER_STATE
-+struct yy_buffer_state
-+	{
-+	FILE *yy_input_file;
-+
-+	char *yy_ch_buf;		/* input buffer */
-+	char *yy_buf_pos;		/* current position in input buffer */
-+
-+	/* Size of input buffer in bytes, not including room for EOB
-+	 * characters.
-+	 */
-+	yy_size_t yy_buf_size;
-+
-+	/* Number of characters read into yy_ch_buf, not including EOB
-+	 * characters.
-+	 */
-+	int yy_n_chars;
-+
-+	/* Whether we "own" the buffer - i.e., we know we created it,
-+	 * and can realloc() it to grow it, and should free() it to
-+	 * delete it.
-+	 */
-+	int yy_is_our_buffer;
-+
-+	/* Whether this is an "interactive" input source; if so, and
-+	 * if we're using stdio for input, then we want to use getc()
-+	 * instead of fread(), to make sure we stop fetching input after
-+	 * each newline.
-+	 */
-+	int yy_is_interactive;
-+
-+	/* Whether we're considered to be at the beginning of a line.
-+	 * If so, '^' rules will be active on the next match, otherwise
-+	 * not.
-+	 */
-+	int yy_at_bol;
-+
-+    int yy_bs_lineno; /**< The line count. */
-+    int yy_bs_column; /**< The column count. */
-+    
-+	/* Whether to try to fill the input buffer when we reach the
-+	 * end of it.
-+	 */
-+	int yy_fill_buffer;
-+
-+	int yy_buffer_status;
-+
-+#define YY_BUFFER_NEW 0
-+#define YY_BUFFER_NORMAL 1
-+	/* When an EOF's been seen but there's still some text to process
-+	 * then we mark the buffer as YY_EOF_PENDING, to indicate that we
-+	 * shouldn't try reading from the input source any more.  We might
-+	 * still have a bunch of tokens to match, though, because of
-+	 * possible backing-up.
-+	 *
-+	 * When we actually see the EOF, we change the status to "new"
-+	 * (via zconfrestart()), so that the user can continue scanning by
-+	 * just pointing zconfin at a new input file.
-+	 */
-+#define YY_BUFFER_EOF_PENDING 2
-+
-+	};
-+#endif /* !YY_STRUCT_YY_BUFFER_STATE */
-+
-+/* Stack of input buffers. */
-+static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */
-+static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */
-+static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */
-+
-+/* We provide macros for accessing buffer states in case in the
-+ * future we want to put the buffer states in a more general
-+ * "scanner state".
-+ *
-+ * Returns the top of the stack, or NULL.
-+ */
-+#define YY_CURRENT_BUFFER ( (yy_buffer_stack) \
-+                          ? (yy_buffer_stack)[(yy_buffer_stack_top)] \
-+                          : NULL)
-+
-+/* Same as previous macro, but useful when we know that the buffer stack is not
-+ * NULL or when we need an lvalue. For internal use only.
-+ */
-+#define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)]
-+
-+/* yy_hold_char holds the character lost when zconftext is formed. */
-+static char yy_hold_char;
-+static int yy_n_chars;		/* number of characters read into yy_ch_buf */
-+int zconfleng;
-+
-+/* Points to current character in buffer. */
-+static char *yy_c_buf_p = (char *) 0;
-+static int yy_init = 1;		/* whether we need to initialize */
-+static int yy_start = 0;	/* start state number */
-+
-+/* Flag which is used to allow zconfwrap()'s to do buffer switches
-+ * instead of setting up a fresh zconfin.  A bit of a hack ...
-+ */
-+static int yy_did_buffer_switch_on_eof;
-+
-+void zconfrestart (FILE *input_file  );
-+void zconf_switch_to_buffer (YY_BUFFER_STATE new_buffer  );
-+YY_BUFFER_STATE zconf_create_buffer (FILE *file,int size  );
-+void zconf_delete_buffer (YY_BUFFER_STATE b  );
-+void zconf_flush_buffer (YY_BUFFER_STATE b  );
-+void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer  );
-+void zconfpop_buffer_state (void );
-+
-+static void zconfensure_buffer_stack (void );
-+static void zconf_load_buffer_state (void );
-+static void zconf_init_buffer (YY_BUFFER_STATE b,FILE *file  );
-+
-+#define YY_FLUSH_BUFFER zconf_flush_buffer(YY_CURRENT_BUFFER )
-+
-+YY_BUFFER_STATE zconf_scan_buffer (char *base,yy_size_t size  );
-+YY_BUFFER_STATE zconf_scan_string (yyconst char *yy_str  );
-+YY_BUFFER_STATE zconf_scan_bytes (yyconst char *bytes,int len  );
-+
-+void *zconfalloc (yy_size_t  );
-+void *zconfrealloc (void *,yy_size_t  );
-+void zconffree (void *  );
-+
-+#define yy_new_buffer zconf_create_buffer
-+
-+#define yy_set_interactive(is_interactive) \
-+	{ \
-+	if ( ! YY_CURRENT_BUFFER ){ \
-+        zconfensure_buffer_stack (); \
-+		YY_CURRENT_BUFFER_LVALUE =    \
-+            zconf_create_buffer(zconfin,YY_BUF_SIZE ); \
-+	} \
-+	YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
-+	}
-+
-+#define yy_set_bol(at_bol) \
-+	{ \
-+	if ( ! YY_CURRENT_BUFFER ){\
-+        zconfensure_buffer_stack (); \
-+		YY_CURRENT_BUFFER_LVALUE =    \
-+            zconf_create_buffer(zconfin,YY_BUF_SIZE ); \
-+	} \
-+	YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
-+	}
-+
-+#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
-+
-+/* Begin user sect3 */
-+
-+#define zconfwrap(n) 1
-+#define YY_SKIP_YYWRAP
-+
-+typedef unsigned char YY_CHAR;
-+
-+FILE *zconfin = (FILE *) 0, *zconfout = (FILE *) 0;
-+
-+typedef int yy_state_type;
-+
-+extern int zconflineno;
-+
-+int zconflineno = 1;
-+
-+extern char *zconftext;
-+#define yytext_ptr zconftext
-+static yyconst flex_int16_t yy_nxt[][38] =
-+    {
-+    {
-+        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-+        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-+        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-+        0,    0,    0,    0,    0,    0,    0,    0
-+    },
-+
-+    {
-+       11,   12,   13,   14,   12,   12,   15,   12,   12,   12,
-+       12,   12,   12,   12,   12,   12,   12,   12,   12,   12,
-+       12,   12,   12,   12,   12,   12,   12,   12,   12,   12,
-+       12,   12,   12,   12,   12,   12,   12,   12
-+    },
-+
-+    {
-+       11,   12,   13,   14,   12,   12,   15,   12,   12,   12,
-+       12,   12,   12,   12,   12,   12,   12,   12,   12,   12,
-+
-+       12,   12,   12,   12,   12,   12,   12,   12,   12,   12,
-+       12,   12,   12,   12,   12,   12,   12,   12
-+    },
-+
-+    {
-+       11,   16,   16,   17,   16,   16,   16,   16,   16,   16,
-+       16,   16,   16,   18,   16,   16,   18,   18,   19,   20,
-+       21,   22,   18,   18,   23,   24,   18,   25,   18,   26,
-+       27,   18,   28,   29,   30,   18,   18,   16
-+    },
-+
-+    {
-+       11,   16,   16,   17,   16,   16,   16,   16,   16,   16,
-+       16,   16,   16,   18,   16,   16,   18,   18,   19,   20,
-+       21,   22,   18,   18,   23,   24,   18,   25,   18,   26,
-+       27,   18,   28,   29,   30,   18,   18,   16
-+
-+    },
-+
-+    {
-+       11,   31,   32,   33,   31,   31,   31,   31,   31,   31,
-+       31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
-+       31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
-+       31,   31,   31,   31,   31,   31,   31,   31
-+    },
-+
-+    {
-+       11,   31,   32,   33,   31,   31,   31,   31,   31,   31,
-+       31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
-+       31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
-+       31,   31,   31,   31,   31,   31,   31,   31
-+    },
-+
-+    {
-+       11,   34,   34,   35,   34,   36,   34,   34,   36,   34,
-+       34,   34,   34,   34,   34,   37,   34,   34,   34,   34,
-+
-+       34,   34,   34,   34,   34,   34,   34,   34,   34,   34,
-+       34,   34,   34,   34,   34,   34,   34,   34
-+    },
-+
-+    {
-+       11,   34,   34,   35,   34,   36,   34,   34,   36,   34,
-+       34,   34,   34,   34,   34,   37,   34,   34,   34,   34,
-+       34,   34,   34,   34,   34,   34,   34,   34,   34,   34,
-+       34,   34,   34,   34,   34,   34,   34,   34
-+    },
-+
-+    {
-+       11,   38,   38,   39,   40,   41,   42,   43,   41,   44,
-+       45,   46,   47,   47,   48,   49,   47,   47,   47,   47,
-+       47,   47,   47,   47,   47,   50,   47,   47,   47,   51,
-+       47,   47,   47,   47,   47,   47,   47,   52
-+
-+    },
-+
-+    {
-+       11,   38,   38,   39,   40,   41,   42,   43,   41,   44,
-+       45,   46,   47,   47,   48,   49,   47,   47,   47,   47,
-+       47,   47,   47,   47,   47,   50,   47,   47,   47,   51,
-+       47,   47,   47,   47,   47,   47,   47,   52
-+    },
-+
-+    {
-+      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,
-+      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,
-+      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11,
-+      -11,  -11,  -11,  -11,  -11,  -11,  -11,  -11
-+    },
-+
-+    {
-+       11,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,
-+      -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,
-+
-+      -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12,
-+      -12,  -12,  -12,  -12,  -12,  -12,  -12,  -12
-+    },
-+
-+    {
-+       11,  -13,   53,   54,  -13,  -13,   55,  -13,  -13,  -13,
-+      -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,
-+      -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13,
-+      -13,  -13,  -13,  -13,  -13,  -13,  -13,  -13
-+    },
-+
-+    {
-+       11,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,
-+      -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,
-+      -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14,
-+      -14,  -14,  -14,  -14,  -14,  -14,  -14,  -14
-+
-+    },
-+
-+    {
-+       11,   56,   56,   57,   56,   56,   56,   56,   56,   56,
-+       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
-+       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
-+       56,   56,   56,   56,   56,   56,   56,   56
-+    },
-+
-+    {
-+       11,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,
-+      -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,
-+      -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16,
-+      -16,  -16,  -16,  -16,  -16,  -16,  -16,  -16
-+    },
-+
-+    {
-+       11,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,
-+      -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,
-+
-+      -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17,
-+      -17,  -17,  -17,  -17,  -17,  -17,  -17,  -17
-+    },
-+
-+    {
-+       11,  -18,  -18,  -18,  -18,  -18,  -18,  -18,  -18,  -18,
-+      -18,  -18,  -18,   58,  -18,  -18,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -18
-+    },
-+
-+    {
-+       11,  -19,  -19,  -19,  -19,  -19,  -19,  -19,  -19,  -19,
-+      -19,  -19,  -19,   58,  -19,  -19,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   59,
-+       58,   58,   58,   58,   58,   58,   58,  -19
-+
-+    },
-+
-+    {
-+       11,  -20,  -20,  -20,  -20,  -20,  -20,  -20,  -20,  -20,
-+      -20,  -20,  -20,   58,  -20,  -20,   58,   58,   58,   58,
-+       58,   58,   58,   58,   60,   58,   58,   58,   58,   61,
-+       58,   58,   58,   58,   58,   58,   58,  -20
-+    },
-+
-+    {
-+       11,  -21,  -21,  -21,  -21,  -21,  -21,  -21,  -21,  -21,
-+      -21,  -21,  -21,   58,  -21,  -21,   58,   58,   58,   58,
-+       58,   62,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -21
-+    },
-+
-+    {
-+       11,  -22,  -22,  -22,  -22,  -22,  -22,  -22,  -22,  -22,
-+      -22,  -22,  -22,   58,  -22,  -22,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   63,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -22
-+    },
-+
-+    {
-+       11,  -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23,  -23,
-+      -23,  -23,  -23,   58,  -23,  -23,   58,   58,   58,   58,
-+       58,   64,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -23
-+    },
-+
-+    {
-+       11,  -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24,  -24,
-+      -24,  -24,  -24,   58,  -24,  -24,   58,   58,   58,   58,
-+       58,   58,   65,   58,   58,   58,   58,   58,   66,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -24
-+
-+    },
-+
-+    {
-+       11,  -25,  -25,  -25,  -25,  -25,  -25,  -25,  -25,  -25,
-+      -25,  -25,  -25,   58,  -25,  -25,   58,   67,   58,   58,
-+       58,   68,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -25
-+    },
-+
-+    {
-+       11,  -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26,  -26,
-+      -26,  -26,  -26,   58,  -26,  -26,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       69,   58,   58,   58,   58,   58,   58,  -26
-+    },
-+
-+    {
-+       11,  -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27,  -27,
-+      -27,  -27,  -27,   58,  -27,  -27,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   70,   58,   58,   58,   58,  -27
-+    },
-+
-+    {
-+       11,  -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28,  -28,
-+      -28,  -28,  -28,   58,  -28,  -28,   58,   71,   58,   58,
-+       58,   72,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -28
-+    },
-+
-+    {
-+       11,  -29,  -29,  -29,  -29,  -29,  -29,  -29,  -29,  -29,
-+      -29,  -29,  -29,   58,  -29,  -29,   58,   58,   58,   58,
-+       58,   73,   58,   58,   58,   58,   58,   58,   58,   74,
-+       58,   58,   58,   58,   75,   58,   58,  -29
-+
-+    },
-+
-+    {
-+       11,  -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30,  -30,
-+      -30,  -30,  -30,   58,  -30,  -30,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   76,   58,   58,   58,   58,  -30
-+    },
-+
-+    {
-+       11,   77,   77,  -31,   77,   77,   77,   77,   77,   77,
-+       77,   77,   77,   77,   77,   77,   77,   77,   77,   77,
-+       77,   77,   77,   77,   77,   77,   77,   77,   77,   77,
-+       77,   77,   77,   77,   77,   77,   77,   77
-+    },
-+
-+    {
-+       11,  -32,   78,   79,  -32,  -32,  -32,  -32,  -32,  -32,
-+      -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,
-+
-+      -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32,
-+      -32,  -32,  -32,  -32,  -32,  -32,  -32,  -32
-+    },
-+
-+    {
-+       11,   80,  -33,  -33,   80,   80,   80,   80,   80,   80,
-+       80,   80,   80,   80,   80,   80,   80,   80,   80,   80,
-+       80,   80,   80,   80,   80,   80,   80,   80,   80,   80,
-+       80,   80,   80,   80,   80,   80,   80,   80
-+    },
-+
-+    {
-+       11,   81,   81,   82,   81,  -34,   81,   81,  -34,   81,
-+       81,   81,   81,   81,   81,  -34,   81,   81,   81,   81,
-+       81,   81,   81,   81,   81,   81,   81,   81,   81,   81,
-+       81,   81,   81,   81,   81,   81,   81,   81
-+
-+    },
-+
-+    {
-+       11,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,
-+      -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,
-+      -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35,
-+      -35,  -35,  -35,  -35,  -35,  -35,  -35,  -35
-+    },
-+
-+    {
-+       11,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,
-+      -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,
-+      -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36,
-+      -36,  -36,  -36,  -36,  -36,  -36,  -36,  -36
-+    },
-+
-+    {
-+       11,   83,   83,   84,   83,   83,   83,   83,   83,   83,
-+       83,   83,   83,   83,   83,   83,   83,   83,   83,   83,
-+
-+       83,   83,   83,   83,   83,   83,   83,   83,   83,   83,
-+       83,   83,   83,   83,   83,   83,   83,   83
-+    },
-+
-+    {
-+       11,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,
-+      -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,
-+      -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38,
-+      -38,  -38,  -38,  -38,  -38,  -38,  -38,  -38
-+    },
-+
-+    {
-+       11,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,
-+      -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,
-+      -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39,
-+      -39,  -39,  -39,  -39,  -39,  -39,  -39,  -39
-+
-+    },
-+
-+    {
-+       11,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,
-+      -40,  -40,  -40,  -40,   85,  -40,  -40,  -40,  -40,  -40,
-+      -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40,
-+      -40,  -40,  -40,  -40,  -40,  -40,  -40,  -40
-+    },
-+
-+    {
-+       11,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,
-+      -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,
-+      -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41,
-+      -41,  -41,  -41,  -41,  -41,  -41,  -41,  -41
-+    },
-+
-+    {
-+       11,   86,   86,  -42,   86,   86,   86,   86,   86,   86,
-+       86,   86,   86,   86,   86,   86,   86,   86,   86,   86,
-+
-+       86,   86,   86,   86,   86,   86,   86,   86,   86,   86,
-+       86,   86,   86,   86,   86,   86,   86,   86
-+    },
-+
-+    {
-+       11,  -43,  -43,  -43,  -43,  -43,  -43,   87,  -43,  -43,
-+      -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,
-+      -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43,
-+      -43,  -43,  -43,  -43,  -43,  -43,  -43,  -43
-+    },
-+
-+    {
-+       11,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,
-+      -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,
-+      -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44,
-+      -44,  -44,  -44,  -44,  -44,  -44,  -44,  -44
-+
-+    },
-+
-+    {
-+       11,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,
-+      -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,
-+      -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45,
-+      -45,  -45,  -45,  -45,  -45,  -45,  -45,  -45
-+    },
-+
-+    {
-+       11,  -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46,  -46,
-+      -46,   88,   89,   89,  -46,  -46,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,  -46
-+    },
-+
-+    {
-+       11,  -47,  -47,  -47,  -47,  -47,  -47,  -47,  -47,  -47,
-+      -47,   89,   89,   89,  -47,  -47,   89,   89,   89,   89,
-+
-+       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,  -47
-+    },
-+
-+    {
-+       11,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,
-+      -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,
-+      -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48,
-+      -48,  -48,  -48,  -48,  -48,  -48,  -48,  -48
-+    },
-+
-+    {
-+       11,  -49,  -49,   90,  -49,  -49,  -49,  -49,  -49,  -49,
-+      -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,
-+      -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49,
-+      -49,  -49,  -49,  -49,  -49,  -49,  -49,  -49
-+
-+    },
-+
-+    {
-+       11,  -50,  -50,  -50,  -50,  -50,  -50,  -50,  -50,  -50,
-+      -50,   89,   89,   89,  -50,  -50,   89,   89,   89,   89,
-+       89,   89,   91,   89,   89,   89,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,  -50
-+    },
-+
-+    {
-+       11,  -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51,  -51,
-+      -51,   89,   89,   89,  -51,  -51,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,   89,   92,   89,
-+       89,   89,   89,   89,   89,   89,   89,  -51
-+    },
-+
-+    {
-+       11,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,
-+      -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,
-+
-+      -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,  -52,
-+      -52,  -52,  -52,  -52,  -52,  -52,  -52,   93
-+    },
-+
-+    {
-+       11,  -53,   53,   54,  -53,  -53,   55,  -53,  -53,  -53,
-+      -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,
-+      -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53,
-+      -53,  -53,  -53,  -53,  -53,  -53,  -53,  -53
-+    },
-+
-+    {
-+       11,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,
-+      -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,
-+      -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54,
-+      -54,  -54,  -54,  -54,  -54,  -54,  -54,  -54
-+
-+    },
-+
-+    {
-+       11,   56,   56,   57,   56,   56,   56,   56,   56,   56,
-+       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
-+       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
-+       56,   56,   56,   56,   56,   56,   56,   56
-+    },
-+
-+    {
-+       11,   56,   56,   57,   56,   56,   56,   56,   56,   56,
-+       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
-+       56,   56,   56,   56,   56,   56,   56,   56,   56,   56,
-+       56,   56,   56,   56,   56,   56,   56,   56
-+    },
-+
-+    {
-+       11,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,
-+      -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,
-+
-+      -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57,
-+      -57,  -57,  -57,  -57,  -57,  -57,  -57,  -57
-+    },
-+
-+    {
-+       11,  -58,  -58,  -58,  -58,  -58,  -58,  -58,  -58,  -58,
-+      -58,  -58,  -58,   58,  -58,  -58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -58
-+    },
-+
-+    {
-+       11,  -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59,  -59,
-+      -59,  -59,  -59,   58,  -59,  -59,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   94,
-+       58,   58,   58,   58,   58,   58,   58,  -59
-+
-+    },
-+
-+    {
-+       11,  -60,  -60,  -60,  -60,  -60,  -60,  -60,  -60,  -60,
-+      -60,  -60,  -60,   58,  -60,  -60,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   95,
-+       58,   58,   58,   58,   58,   58,   58,  -60
-+    },
-+
-+    {
-+       11,  -61,  -61,  -61,  -61,  -61,  -61,  -61,  -61,  -61,
-+      -61,  -61,  -61,   58,  -61,  -61,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   96,   97,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -61
-+    },
-+
-+    {
-+       11,  -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62,  -62,
-+      -62,  -62,  -62,   58,  -62,  -62,   58,   58,   58,   58,
-+
-+       58,   58,   98,   58,   58,   58,   58,   58,   58,   58,
-+       99,   58,   58,   58,   58,   58,   58,  -62
-+    },
-+
-+    {
-+       11,  -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63,  -63,
-+      -63,  -63,  -63,   58,  -63,  -63,   58,  100,   58,   58,
-+      101,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -63
-+    },
-+
-+    {
-+       11,  -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64,  -64,
-+      -64,  -64,  -64,   58,  -64,  -64,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,  102,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,  103,  -64
-+
-+    },
-+
-+    {
-+       11,  -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65,  -65,
-+      -65,  -65,  -65,   58,  -65,  -65,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -65
-+    },
-+
-+    {
-+       11,  -66,  -66,  -66,  -66,  -66,  -66,  -66,  -66,  -66,
-+      -66,  -66,  -66,   58,  -66,  -66,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  104,   58,   58,  -66
-+    },
-+
-+    {
-+       11,  -67,  -67,  -67,  -67,  -67,  -67,  -67,  -67,  -67,
-+      -67,  -67,  -67,   58,  -67,  -67,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,  105,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -67
-+    },
-+
-+    {
-+       11,  -68,  -68,  -68,  -68,  -68,  -68,  -68,  -68,  -68,
-+      -68,  -68,  -68,   58,  -68,  -68,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,  106,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -68
-+    },
-+
-+    {
-+       11,  -69,  -69,  -69,  -69,  -69,  -69,  -69,  -69,  -69,
-+      -69,  -69,  -69,   58,  -69,  -69,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  107,   58,   58,  -69
-+
-+    },
-+
-+    {
-+       11,  -70,  -70,  -70,  -70,  -70,  -70,  -70,  -70,  -70,
-+      -70,  -70,  -70,   58,  -70,  -70,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,  108,
-+       58,   58,   58,   58,   58,   58,   58,  -70
-+    },
-+
-+    {
-+       11,  -71,  -71,  -71,  -71,  -71,  -71,  -71,  -71,  -71,
-+      -71,  -71,  -71,   58,  -71,  -71,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,  109,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -71
-+    },
-+
-+    {
-+       11,  -72,  -72,  -72,  -72,  -72,  -72,  -72,  -72,  -72,
-+      -72,  -72,  -72,   58,  -72,  -72,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,  110,   58,   58,   58,   58,   58,  -72
-+    },
-+
-+    {
-+       11,  -73,  -73,  -73,  -73,  -73,  -73,  -73,  -73,  -73,
-+      -73,  -73,  -73,   58,  -73,  -73,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,  111,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -73
-+    },
-+
-+    {
-+       11,  -74,  -74,  -74,  -74,  -74,  -74,  -74,  -74,  -74,
-+      -74,  -74,  -74,   58,  -74,  -74,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  112,   58,  -74
-+
-+    },
-+
-+    {
-+       11,  -75,  -75,  -75,  -75,  -75,  -75,  -75,  -75,  -75,
-+      -75,  -75,  -75,   58,  -75,  -75,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,  113,   58,   58,   58,   58,  -75
-+    },
-+
-+    {
-+       11,  -76,  -76,  -76,  -76,  -76,  -76,  -76,  -76,  -76,
-+      -76,  -76,  -76,   58,  -76,  -76,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  114,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -76
-+    },
-+
-+    {
-+       11,   77,   77,  -77,   77,   77,   77,   77,   77,   77,
-+       77,   77,   77,   77,   77,   77,   77,   77,   77,   77,
-+
-+       77,   77,   77,   77,   77,   77,   77,   77,   77,   77,
-+       77,   77,   77,   77,   77,   77,   77,   77
-+    },
-+
-+    {
-+       11,  -78,   78,   79,  -78,  -78,  -78,  -78,  -78,  -78,
-+      -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,
-+      -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78,
-+      -78,  -78,  -78,  -78,  -78,  -78,  -78,  -78
-+    },
-+
-+    {
-+       11,   80,  -79,  -79,   80,   80,   80,   80,   80,   80,
-+       80,   80,   80,   80,   80,   80,   80,   80,   80,   80,
-+       80,   80,   80,   80,   80,   80,   80,   80,   80,   80,
-+       80,   80,   80,   80,   80,   80,   80,   80
-+
-+    },
-+
-+    {
-+       11,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,
-+      -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,
-+      -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80,
-+      -80,  -80,  -80,  -80,  -80,  -80,  -80,  -80
-+    },
-+
-+    {
-+       11,   81,   81,   82,   81,  -81,   81,   81,  -81,   81,
-+       81,   81,   81,   81,   81,  -81,   81,   81,   81,   81,
-+       81,   81,   81,   81,   81,   81,   81,   81,   81,   81,
-+       81,   81,   81,   81,   81,   81,   81,   81
-+    },
-+
-+    {
-+       11,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,
-+      -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,
-+
-+      -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82,
-+      -82,  -82,  -82,  -82,  -82,  -82,  -82,  -82
-+    },
-+
-+    {
-+       11,  -83,  -83,   84,  -83,  -83,  -83,  -83,  -83,  -83,
-+      -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,
-+      -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83,
-+      -83,  -83,  -83,  -83,  -83,  -83,  -83,  -83
-+    },
-+
-+    {
-+       11,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,
-+      -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,
-+      -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84,
-+      -84,  -84,  -84,  -84,  -84,  -84,  -84,  -84
-+
-+    },
-+
-+    {
-+       11,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,
-+      -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,
-+      -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85,
-+      -85,  -85,  -85,  -85,  -85,  -85,  -85,  -85
-+    },
-+
-+    {
-+       11,   86,   86,  -86,   86,   86,   86,   86,   86,   86,
-+       86,   86,   86,   86,   86,   86,   86,   86,   86,   86,
-+       86,   86,   86,   86,   86,   86,   86,   86,   86,   86,
-+       86,   86,   86,   86,   86,   86,   86,   86
-+    },
-+
-+    {
-+       11,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,
-+      -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,
-+
-+      -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87,
-+      -87,  -87,  -87,  -87,  -87,  -87,  -87,  -87
-+    },
-+
-+    {
-+       11,  -88,  -88,  -88,  -88,  -88,  -88,  -88,  -88,  -88,
-+      -88,  115,   89,   89,  -88,  -88,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,  -88
-+    },
-+
-+    {
-+       11,  -89,  -89,  -89,  -89,  -89,  -89,  -89,  -89,  -89,
-+      -89,   89,   89,   89,  -89,  -89,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,  -89
-+
-+    },
-+
-+    {
-+       11,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,
-+      -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,
-+      -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90,
-+      -90,  -90,  -90,  -90,  -90,  -90,  -90,  -90
-+    },
-+
-+    {
-+       11,  -91,  -91,  -91,  -91,  -91,  -91,  -91,  -91,  -91,
-+      -91,   89,   89,   89,  -91,  -91,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,  -91
-+    },
-+
-+    {
-+       11,  -92,  -92,  -92,  -92,  -92,  -92,  -92,  -92,  -92,
-+      -92,   89,   89,   89,  -92,  -92,   89,   89,   89,   89,
-+
-+       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,  -92
-+    },
-+
-+    {
-+       11,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,
-+      -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,
-+      -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93,
-+      -93,  -93,  -93,  -93,  -93,  -93,  -93,  -93
-+    },
-+
-+    {
-+       11,  -94,  -94,  -94,  -94,  -94,  -94,  -94,  -94,  -94,
-+      -94,  -94,  -94,   58,  -94,  -94,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,  116,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -94
-+
-+    },
-+
-+    {
-+       11,  -95,  -95,  -95,  -95,  -95,  -95,  -95,  -95,  -95,
-+      -95,  -95,  -95,   58,  -95,  -95,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  117,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -95
-+    },
-+
-+    {
-+       11,  -96,  -96,  -96,  -96,  -96,  -96,  -96,  -96,  -96,
-+      -96,  -96,  -96,   58,  -96,  -96,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  118,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -96
-+    },
-+
-+    {
-+       11,  -97,  -97,  -97,  -97,  -97,  -97,  -97,  -97,  -97,
-+      -97,  -97,  -97,   58,  -97,  -97,   58,   58,   58,   58,
-+
-+       58,   58,  119,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -97
-+    },
-+
-+    {
-+       11,  -98,  -98,  -98,  -98,  -98,  -98,  -98,  -98,  -98,
-+      -98,  -98,  -98,   58,  -98,  -98,  120,  121,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -98
-+    },
-+
-+    {
-+       11,  -99,  -99,  -99,  -99,  -99,  -99,  -99,  -99,  -99,
-+      -99,  -99,  -99,   58,  -99,  -99,   58,   58,   58,   58,
-+       58,  122,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  -99
-+
-+    },
-+
-+    {
-+       11, -100, -100, -100, -100, -100, -100, -100, -100, -100,
-+     -100, -100, -100,   58, -100, -100,   58,   58,  123,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -100
-+    },
-+
-+    {
-+       11, -101, -101, -101, -101, -101, -101, -101, -101, -101,
-+     -101, -101, -101,   58, -101, -101,   58,   58,   58,  124,
-+       58,   58,   58,   58,   58,  125,   58,  126,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -101
-+    },
-+
-+    {
-+       11, -102, -102, -102, -102, -102, -102, -102, -102, -102,
-+     -102, -102, -102,   58, -102, -102,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+      127,   58,   58,   58,   58,   58,   58, -102
-+    },
-+
-+    {
-+       11, -103, -103, -103, -103, -103, -103, -103, -103, -103,
-+     -103, -103, -103,   58, -103, -103,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -103
-+    },
-+
-+    {
-+       11, -104, -104, -104, -104, -104, -104, -104, -104, -104,
-+     -104, -104, -104,   58, -104, -104,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -104
-+
-+    },
-+
-+    {
-+       11, -105, -105, -105, -105, -105, -105, -105, -105, -105,
-+     -105, -105, -105,   58, -105, -105,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,  128,   58,
-+       58,   58,   58,   58,   58,   58,   58, -105
-+    },
-+
-+    {
-+       11, -106, -106, -106, -106, -106, -106, -106, -106, -106,
-+     -106, -106, -106,   58, -106, -106,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  129,   58, -106
-+    },
-+
-+    {
-+       11, -107, -107, -107, -107, -107, -107, -107, -107, -107,
-+     -107, -107, -107,   58, -107, -107,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,  130,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -107
-+    },
-+
-+    {
-+       11, -108, -108, -108, -108, -108, -108, -108, -108, -108,
-+     -108, -108, -108,   58, -108, -108,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  131,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -108
-+    },
-+
-+    {
-+       11, -109, -109, -109, -109, -109, -109, -109, -109, -109,
-+     -109, -109, -109,   58, -109, -109,   58,   58,   58,   58,
-+       58,   58,   58,  132,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -109
-+
-+    },
-+
-+    {
-+       11, -110, -110, -110, -110, -110, -110, -110, -110, -110,
-+     -110, -110, -110,   58, -110, -110,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  133,   58, -110
-+    },
-+
-+    {
-+       11, -111, -111, -111, -111, -111, -111, -111, -111, -111,
-+     -111, -111, -111,   58, -111, -111,   58,   58,   58,   58,
-+       58,  134,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -111
-+    },
-+
-+    {
-+       11, -112, -112, -112, -112, -112, -112, -112, -112, -112,
-+     -112, -112, -112,   58, -112, -112,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,  135,   58,   58,   58,   58, -112
-+    },
-+
-+    {
-+       11, -113, -113, -113, -113, -113, -113, -113, -113, -113,
-+     -113, -113, -113,   58, -113, -113,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  136,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -113
-+    },
-+
-+    {
-+       11, -114, -114, -114, -114, -114, -114, -114, -114, -114,
-+     -114, -114, -114,   58, -114, -114,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,  137,   58,   58,   58, -114
-+
-+    },
-+
-+    {
-+       11, -115, -115, -115, -115, -115, -115, -115, -115, -115,
-+     -115,   89,   89,   89, -115, -115,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89,   89,   89,   89,
-+       89,   89,   89,   89,   89,   89,   89, -115
-+    },
-+
-+    {
-+       11, -116, -116, -116, -116, -116, -116, -116, -116, -116,
-+     -116, -116, -116,   58, -116, -116,   58,   58,   58,   58,
-+       58,  138,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -116
-+    },
-+
-+    {
-+       11, -117, -117, -117, -117, -117, -117, -117, -117, -117,
-+     -117, -117, -117,   58, -117, -117,   58,   58,   58,  139,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -117
-+    },
-+
-+    {
-+       11, -118, -118, -118, -118, -118, -118, -118, -118, -118,
-+     -118, -118, -118,   58, -118, -118,   58,   58,   58,   58,
-+       58,  140,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -118
-+    },
-+
-+    {
-+       11, -119, -119, -119, -119, -119, -119, -119, -119, -119,
-+     -119, -119, -119,   58, -119, -119,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  141,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -119
-+
-+    },
-+
-+    {
-+       11, -120, -120, -120, -120, -120, -120, -120, -120, -120,
-+     -120, -120, -120,   58, -120, -120,   58,   58,  142,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  143,   58,   58, -120
-+    },
-+
-+    {
-+       11, -121, -121, -121, -121, -121, -121, -121, -121, -121,
-+     -121, -121, -121,   58, -121, -121,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  144,   58, -121
-+    },
-+
-+    {
-+       11, -122, -122, -122, -122, -122, -122, -122, -122, -122,
-+     -122, -122, -122,   58, -122, -122,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,  145,   58,
-+       58,   58,   58,   58,   58,   58,   58, -122
-+    },
-+
-+    {
-+       11, -123, -123, -123, -123, -123, -123, -123, -123, -123,
-+     -123, -123, -123,   58, -123, -123,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,  146,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -123
-+    },
-+
-+    {
-+       11, -124, -124, -124, -124, -124, -124, -124, -124, -124,
-+     -124, -124, -124,   58, -124, -124,   58,   58,   58,   58,
-+       58,   58,   58,   58,  147,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -124
-+
-+    },
-+
-+    {
-+       11, -125, -125, -125, -125, -125, -125, -125, -125, -125,
-+     -125, -125, -125,   58, -125, -125,   58,   58,   58,   58,
-+       58,   58,  148,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -125
-+    },
-+
-+    {
-+       11, -126, -126, -126, -126, -126, -126, -126, -126, -126,
-+     -126, -126, -126,   58, -126, -126,   58,   58,   58,   58,
-+       58,  149,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -126
-+    },
-+
-+    {
-+       11, -127, -127, -127, -127, -127, -127, -127, -127, -127,
-+     -127, -127, -127,   58, -127, -127,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -127
-+    },
-+
-+    {
-+       11, -128, -128, -128, -128, -128, -128, -128, -128, -128,
-+     -128, -128, -128,   58, -128, -128,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,  150,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -128
-+    },
-+
-+    {
-+       11, -129, -129, -129, -129, -129, -129, -129, -129, -129,
-+     -129, -129, -129,   58, -129, -129,   58,   58,   58,  151,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -129
-+
-+    },
-+
-+    {
-+       11, -130, -130, -130, -130, -130, -130, -130, -130, -130,
-+     -130, -130, -130,   58, -130, -130,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,  152,
-+       58,   58,   58,   58,   58,   58,   58, -130
-+    },
-+
-+    {
-+       11, -131, -131, -131, -131, -131, -131, -131, -131, -131,
-+     -131, -131, -131,   58, -131, -131,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+      153,   58,   58,   58,   58,   58,   58, -131
-+    },
-+
-+    {
-+       11, -132, -132, -132, -132, -132, -132, -132, -132, -132,
-+     -132, -132, -132,   58, -132, -132,   58,   58,   58,   58,
-+
-+       58,  154,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -132
-+    },
-+
-+    {
-+       11, -133, -133, -133, -133, -133, -133, -133, -133, -133,
-+     -133, -133, -133,   58, -133, -133,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  155,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -133
-+    },
-+
-+    {
-+       11, -134, -134, -134, -134, -134, -134, -134, -134, -134,
-+     -134, -134, -134,   58, -134, -134,   58,   58,   58,  156,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -134
-+
-+    },
-+
-+    {
-+       11, -135, -135, -135, -135, -135, -135, -135, -135, -135,
-+     -135, -135, -135,   58, -135, -135,   58,   58,   58,  157,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -135
-+    },
-+
-+    {
-+       11, -136, -136, -136, -136, -136, -136, -136, -136, -136,
-+     -136, -136, -136,   58, -136, -136,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,  158,   58,
-+       58,   58,   58,   58,   58,   58,   58, -136
-+    },
-+
-+    {
-+       11, -137, -137, -137, -137, -137, -137, -137, -137, -137,
-+     -137, -137, -137,   58, -137, -137,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  159,   58,   58, -137
-+    },
-+
-+    {
-+       11, -138, -138, -138, -138, -138, -138, -138, -138, -138,
-+     -138, -138, -138,   58, -138, -138,   58,  160,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -138
-+    },
-+
-+    {
-+       11, -139, -139, -139, -139, -139, -139, -139, -139, -139,
-+     -139, -139, -139,   58, -139, -139,   58,   58,   58,   58,
-+       58,  161,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -139
-+
-+    },
-+
-+    {
-+       11, -140, -140, -140, -140, -140, -140, -140, -140, -140,
-+     -140, -140, -140,   58, -140, -140,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,  162,   58,
-+       58,   58,   58,   58,   58,   58,   58, -140
-+    },
-+
-+    {
-+       11, -141, -141, -141, -141, -141, -141, -141, -141, -141,
-+     -141, -141, -141,   58, -141, -141,   58,   58,   58,   58,
-+       58,   58,   58,  163,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -141
-+    },
-+
-+    {
-+       11, -142, -142, -142, -142, -142, -142, -142, -142, -142,
-+     -142, -142, -142,   58, -142, -142,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,  164,
-+       58,   58,   58,   58,   58,   58,   58, -142
-+    },
-+
-+    {
-+       11, -143, -143, -143, -143, -143, -143, -143, -143, -143,
-+     -143, -143, -143,   58, -143, -143,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,  165,   58,   58,   58,   58, -143
-+    },
-+
-+    {
-+       11, -144, -144, -144, -144, -144, -144, -144, -144, -144,
-+     -144, -144, -144,   58, -144, -144,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,  166,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -144
-+
-+    },
-+
-+    {
-+       11, -145, -145, -145, -145, -145, -145, -145, -145, -145,
-+     -145, -145, -145,   58, -145, -145,   58,   58,   58,   58,
-+      167,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -145
-+    },
-+
-+    {
-+       11, -146, -146, -146, -146, -146, -146, -146, -146, -146,
-+     -146, -146, -146,   58, -146, -146,   58,   58,   58,   58,
-+       58,  168,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -146
-+    },
-+
-+    {
-+       11, -147, -147, -147, -147, -147, -147, -147, -147, -147,
-+     -147, -147, -147,   58, -147, -147,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,  169,
-+       58,   58,   58,   58,   58,   58,   58, -147
-+    },
-+
-+    {
-+       11, -148, -148, -148, -148, -148, -148, -148, -148, -148,
-+     -148, -148, -148,   58, -148, -148,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -148
-+    },
-+
-+    {
-+       11, -149, -149, -149, -149, -149, -149, -149, -149, -149,
-+     -149, -149, -149,   58, -149, -149,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,  170,   58,
-+       58,   58,   58,   58,   58,   58,   58, -149
-+
-+    },
-+
-+    {
-+       11, -150, -150, -150, -150, -150, -150, -150, -150, -150,
-+     -150, -150, -150,   58, -150, -150,   58,   58,   58,   58,
-+       58,  171,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -150
-+    },
-+
-+    {
-+       11, -151, -151, -151, -151, -151, -151, -151, -151, -151,
-+     -151, -151, -151,   58, -151, -151,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,  172,
-+       58,   58,   58,   58,   58,   58,   58, -151
-+    },
-+
-+    {
-+       11, -152, -152, -152, -152, -152, -152, -152, -152, -152,
-+     -152, -152, -152,   58, -152, -152,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,  173,   58,
-+       58,   58,   58,   58,   58,   58,   58, -152
-+    },
-+
-+    {
-+       11, -153, -153, -153, -153, -153, -153, -153, -153, -153,
-+     -153, -153, -153,   58, -153, -153,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  174,   58,   58, -153
-+    },
-+
-+    {
-+       11, -154, -154, -154, -154, -154, -154, -154, -154, -154,
-+     -154, -154, -154,   58, -154, -154,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -154
-+
-+    },
-+
-+    {
-+       11, -155, -155, -155, -155, -155, -155, -155, -155, -155,
-+     -155, -155, -155,   58, -155, -155,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,  175,   58,   58,   58,   58, -155
-+    },
-+
-+    {
-+       11, -156, -156, -156, -156, -156, -156, -156, -156, -156,
-+     -156, -156, -156,   58, -156, -156,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  176,   58,   58, -156
-+    },
-+
-+    {
-+       11, -157, -157, -157, -157, -157, -157, -157, -157, -157,
-+     -157, -157, -157,   58, -157, -157,   58,   58,   58,   58,
-+
-+       58,  177,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -157
-+    },
-+
-+    {
-+       11, -158, -158, -158, -158, -158, -158, -158, -158, -158,
-+     -158, -158, -158,   58, -158, -158,   58,   58,   58,   58,
-+       58,   58,   58,  178,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -158
-+    },
-+
-+    {
-+       11, -159, -159, -159, -159, -159, -159, -159, -159, -159,
-+     -159, -159, -159,   58, -159, -159,   58,  179,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -159
-+
-+    },
-+
-+    {
-+       11, -160, -160, -160, -160, -160, -160, -160, -160, -160,
-+     -160, -160, -160,   58, -160, -160,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,  180,   58,
-+       58,   58,   58,   58,   58,   58,   58, -160
-+    },
-+
-+    {
-+       11, -161, -161, -161, -161, -161, -161, -161, -161, -161,
-+     -161, -161, -161,   58, -161, -161,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -161
-+    },
-+
-+    {
-+       11, -162, -162, -162, -162, -162, -162, -162, -162, -162,
-+     -162, -162, -162,   58, -162, -162,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  181,   58,   58, -162
-+    },
-+
-+    {
-+       11, -163, -163, -163, -163, -163, -163, -163, -163, -163,
-+     -163, -163, -163,   58, -163, -163,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -163
-+    },
-+
-+    {
-+       11, -164, -164, -164, -164, -164, -164, -164, -164, -164,
-+     -164, -164, -164,   58, -164, -164,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,  182,
-+       58,   58,   58,   58,   58,   58,   58, -164
-+
-+    },
-+
-+    {
-+       11, -165, -165, -165, -165, -165, -165, -165, -165, -165,
-+     -165, -165, -165,   58, -165, -165,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  183,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -165
-+    },
-+
-+    {
-+       11, -166, -166, -166, -166, -166, -166, -166, -166, -166,
-+     -166, -166, -166,   58, -166, -166,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  184,   58,   58, -166
-+    },
-+
-+    {
-+       11, -167, -167, -167, -167, -167, -167, -167, -167, -167,
-+     -167, -167, -167,   58, -167, -167,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,  185,   58,   58,   58, -167
-+    },
-+
-+    {
-+       11, -168, -168, -168, -168, -168, -168, -168, -168, -168,
-+     -168, -168, -168,   58, -168, -168,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -168
-+    },
-+
-+    {
-+       11, -169, -169, -169, -169, -169, -169, -169, -169, -169,
-+     -169, -169, -169,   58, -169, -169,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  186,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -169
-+
-+    },
-+
-+    {
-+       11, -170, -170, -170, -170, -170, -170, -170, -170, -170,
-+     -170, -170, -170,   58, -170, -170,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  187,   58, -170
-+    },
-+
-+    {
-+       11, -171, -171, -171, -171, -171, -171, -171, -171, -171,
-+     -171, -171, -171,   58, -171, -171,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,  188,   58,
-+       58,   58,   58,   58,   58,   58,   58, -171
-+    },
-+
-+    {
-+       11, -172, -172, -172, -172, -172, -172, -172, -172, -172,
-+     -172, -172, -172,   58, -172, -172,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,  189,   58,
-+       58,   58,   58,   58,   58,   58,   58, -172
-+    },
-+
-+    {
-+       11, -173, -173, -173, -173, -173, -173, -173, -173, -173,
-+     -173, -173, -173,   58, -173, -173,   58,  190,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -173
-+    },
-+
-+    {
-+       11, -174, -174, -174, -174, -174, -174, -174, -174, -174,
-+     -174, -174, -174,   58, -174, -174,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -174
-+
-+    },
-+
-+    {
-+       11, -175, -175, -175, -175, -175, -175, -175, -175, -175,
-+     -175, -175, -175,   58, -175, -175,   58,   58,   58,   58,
-+       58,  191,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -175
-+    },
-+
-+    {
-+       11, -176, -176, -176, -176, -176, -176, -176, -176, -176,
-+     -176, -176, -176,   58, -176, -176,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -176
-+    },
-+
-+    {
-+       11, -177, -177, -177, -177, -177, -177, -177, -177, -177,
-+     -177, -177, -177,   58, -177, -177,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -177
-+    },
-+
-+    {
-+       11, -178, -178, -178, -178, -178, -178, -178, -178, -178,
-+     -178, -178, -178,   58, -178, -178,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -178
-+    },
-+
-+    {
-+       11, -179, -179, -179, -179, -179, -179, -179, -179, -179,
-+     -179, -179, -179,   58, -179, -179,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  192,   58,   58, -179
-+
-+    },
-+
-+    {
-+       11, -180, -180, -180, -180, -180, -180, -180, -180, -180,
-+     -180, -180, -180,   58, -180, -180,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -180
-+    },
-+
-+    {
-+       11, -181, -181, -181, -181, -181, -181, -181, -181, -181,
-+     -181, -181, -181,   58, -181, -181,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -181
-+    },
-+
-+    {
-+       11, -182, -182, -182, -182, -182, -182, -182, -182, -182,
-+     -182, -182, -182,   58, -182, -182,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,  193,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -182
-+    },
-+
-+    {
-+       11, -183, -183, -183, -183, -183, -183, -183, -183, -183,
-+     -183, -183, -183,   58, -183, -183,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,  194,   58,   58,   58, -183
-+    },
-+
-+    {
-+       11, -184, -184, -184, -184, -184, -184, -184, -184, -184,
-+     -184, -184, -184,   58, -184, -184,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -184
-+
-+    },
-+
-+    {
-+       11, -185, -185, -185, -185, -185, -185, -185, -185, -185,
-+     -185, -185, -185,   58, -185, -185,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -185
-+    },
-+
-+    {
-+       11, -186, -186, -186, -186, -186, -186, -186, -186, -186,
-+     -186, -186, -186,   58, -186, -186,   58,   58,   58,  195,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -186
-+    },
-+
-+    {
-+       11, -187, -187, -187, -187, -187, -187, -187, -187, -187,
-+     -187, -187, -187,   58, -187, -187,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -187
-+    },
-+
-+    {
-+       11, -188, -188, -188, -188, -188, -188, -188, -188, -188,
-+     -188, -188, -188,   58, -188, -188,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,  196,   58, -188
-+    },
-+
-+    {
-+       11, -189, -189, -189, -189, -189, -189, -189, -189, -189,
-+     -189, -189, -189,   58, -189, -189,   58,   58,   58,   58,
-+       58,   58,  197,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -189
-+
-+    },
-+
-+    {
-+       11, -190, -190, -190, -190, -190, -190, -190, -190, -190,
-+     -190, -190, -190,   58, -190, -190,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,  198,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -190
-+    },
-+
-+    {
-+       11, -191, -191, -191, -191, -191, -191, -191, -191, -191,
-+     -191, -191, -191,   58, -191, -191,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,  199,   58,   58,   58, -191
-+    },
-+
-+    {
-+       11, -192, -192, -192, -192, -192, -192, -192, -192, -192,
-+     -192, -192, -192,   58, -192, -192,   58,   58,   58,   58,
-+
-+       58,  200,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -192
-+    },
-+
-+    {
-+       11, -193, -193, -193, -193, -193, -193, -193, -193, -193,
-+     -193, -193, -193,   58, -193, -193,   58,   58,   58,   58,
-+       58,  201,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -193
-+    },
-+
-+    {
-+       11, -194, -194, -194, -194, -194, -194, -194, -194, -194,
-+     -194, -194, -194,   58, -194, -194,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  202,   58,   58, -194
-+
-+    },
-+
-+    {
-+       11, -195, -195, -195, -195, -195, -195, -195, -195, -195,
-+     -195, -195, -195,   58, -195, -195,   58,   58,   58,   58,
-+       58,  203,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -195
-+    },
-+
-+    {
-+       11, -196, -196, -196, -196, -196, -196, -196, -196, -196,
-+     -196, -196, -196,   58, -196, -196,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -196
-+    },
-+
-+    {
-+       11, -197, -197, -197, -197, -197, -197, -197, -197, -197,
-+     -197, -197, -197,   58, -197, -197,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,  204,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -197
-+    },
-+
-+    {
-+       11, -198, -198, -198, -198, -198, -198, -198, -198, -198,
-+     -198, -198, -198,   58, -198, -198,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -198
-+    },
-+
-+    {
-+       11, -199, -199, -199, -199, -199, -199, -199, -199, -199,
-+     -199, -199, -199,   58, -199, -199,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -199
-+
-+    },
-+
-+    {
-+       11, -200, -200, -200, -200, -200, -200, -200, -200, -200,
-+     -200, -200, -200,   58, -200, -200,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -200
-+    },
-+
-+    {
-+       11, -201, -201, -201, -201, -201, -201, -201, -201, -201,
-+     -201, -201, -201,   58, -201, -201,   58,  205,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -201
-+    },
-+
-+    {
-+       11, -202, -202, -202, -202, -202, -202, -202, -202, -202,
-+     -202, -202, -202,   58, -202, -202,   58,  206,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -202
-+    },
-+
-+    {
-+       11, -203, -203, -203, -203, -203, -203, -203, -203, -203,
-+     -203, -203, -203,   58, -203, -203,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -203
-+    },
-+
-+    {
-+       11, -204, -204, -204, -204, -204, -204, -204, -204, -204,
-+     -204, -204, -204,   58, -204, -204,   58,   58,   58,   58,
-+       58,   58,   58,  207,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -204
-+
-+    },
-+
-+    {
-+       11, -205, -205, -205, -205, -205, -205, -205, -205, -205,
-+     -205, -205, -205,   58, -205, -205,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,  208,   58,
-+       58,   58,   58,   58,   58,   58,   58, -205
-+    },
-+
-+    {
-+       11, -206, -206, -206, -206, -206, -206, -206, -206, -206,
-+     -206, -206, -206,   58, -206, -206,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,  209,   58,   58, -206
-+    },
-+
-+    {
-+       11, -207, -207, -207, -207, -207, -207, -207, -207, -207,
-+     -207, -207, -207,   58, -207, -207,   58,   58,   58,   58,
-+
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -207
-+    },
-+
-+    {
-+       11, -208, -208, -208, -208, -208, -208, -208, -208, -208,
-+     -208, -208, -208,   58, -208, -208,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -208
-+    },
-+
-+    {
-+       11, -209, -209, -209, -209, -209, -209, -209, -209, -209,
-+     -209, -209, -209,   58, -209, -209,   58,   58,   58,   58,
-+       58,  210,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -209
-+
-+    },
-+
-+    {
-+       11, -210, -210, -210, -210, -210, -210, -210, -210, -210,
-+     -210, -210, -210,   58, -210, -210,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58,   58,   58,   58,
-+       58,   58,   58,   58,   58,   58,   58, -210
-+    },
-+
-+    } ;
-+
-+static yy_state_type yy_get_previous_state (void );
-+static yy_state_type yy_try_NUL_trans (yy_state_type current_state  );
-+static int yy_get_next_buffer (void );
-+static void yy_fatal_error (yyconst char msg[]  );
-+
-+/* Done after the current pattern has been matched and before the
-+ * corresponding action - sets up zconftext.
-+ */
-+#define YY_DO_BEFORE_ACTION \
-+	(yytext_ptr) = yy_bp; \
-+	zconfleng = (size_t) (yy_cp - yy_bp); \
-+	(yy_hold_char) = *yy_cp; \
-+	*yy_cp = '\0'; \
-+	(yy_c_buf_p) = yy_cp;
-+
-+#define YY_NUM_RULES 64
-+#define YY_END_OF_BUFFER 65
-+/* This struct is not used in this scanner,
-+   but its presence is necessary. */
-+struct yy_trans_info
-+	{
-+	flex_int32_t yy_verify;
-+	flex_int32_t yy_nxt;
-+	};
-+static yyconst flex_int16_t yy_accept[211] =
-+    {   0,
-+        0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
-+       65,    5,    4,    3,    2,   36,   37,   35,   35,   35,
-+       35,   35,   35,   35,   35,   35,   35,   35,   35,   35,
-+       63,   60,   62,   55,   59,   58,   57,   53,   48,   42,
-+       47,   51,   53,   40,   41,   50,   50,   43,   53,   50,
-+       50,   53,    4,    3,    2,    2,    1,   35,   35,   35,
-+       35,   35,   35,   35,   16,   35,   35,   35,   35,   35,
-+       35,   35,   35,   35,   35,   35,   63,   60,   62,   61,
-+       55,   54,   57,   56,   44,   51,   38,   50,   50,   52,
-+       45,   46,   39,   35,   35,   35,   35,   35,   35,   35,
-+
-+       35,   35,   30,   29,   35,   35,   35,   35,   35,   35,
-+       35,   35,   35,   35,   49,   25,   35,   35,   35,   35,
-+       35,   35,   35,   35,   35,   35,   15,   35,    7,   35,
-+       35,   35,   35,   35,   35,   35,   35,   35,   35,   35,
-+       35,   35,   35,   35,   35,   35,   35,   17,   35,   35,
-+       35,   35,   35,   34,   35,   35,   35,   35,   35,   35,
-+       10,   35,   13,   35,   35,   35,   35,   33,   35,   35,
-+       35,   35,   35,   22,   35,   32,    9,   31,   35,   26,
-+       12,   35,   35,   21,   18,   35,    8,   35,   35,   35,
-+       35,   35,   27,   35,   35,    6,   35,   20,   19,   23,
-+
-+       35,   35,   11,   35,   35,   35,   14,   28,   35,   24
-+    } ;
-+
-+static yyconst flex_int32_t yy_ec[256] =
-+    {   0,
-+        1,    1,    1,    1,    1,    1,    1,    1,    2,    3,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    2,    4,    5,    6,    1,    1,    7,    8,    9,
-+       10,    1,    1,    1,   11,   12,   12,   13,   13,   13,
-+       13,   13,   13,   13,   13,   13,   13,    1,    1,    1,
-+       14,    1,    1,    1,   13,   13,   13,   13,   13,   13,
-+       13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
-+       13,   13,   13,   13,   13,   13,   13,   13,   13,   13,
-+        1,   15,    1,    1,   16,    1,   17,   18,   19,   20,
-+
-+       21,   22,   23,   24,   25,   13,   13,   26,   27,   28,
-+       29,   30,   31,   32,   33,   34,   35,   13,   13,   36,
-+       13,   13,    1,   37,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-+        1,    1,    1,    1,    1
-+    } ;
-+
-+extern int zconf_flex_debug;
-+int zconf_flex_debug = 0;
-+
-+/* The intent behind this definition is that it'll catch
-+ * any uses of REJECT which flex missed.
-+ */
-+#define REJECT reject_used_but_not_detected
-+#define yymore() yymore_used_but_not_detected
-+#define YY_MORE_ADJ 0
-+#define YY_RESTORE_YY_MORE_OFFSET
-+char *zconftext;
-+
-+/*
-+ * Copyright (C) 2002 Roman Zippel <zippel at linux-m68k.org>
-+ * Released under the terms of the GNU GPL v2.0.
-+ */
-+
-+#include <limits.h>
-+#include <stdio.h>
-+#include <stdlib.h>
-+#include <string.h>
-+#include <unistd.h>
-+
-+#define LKC_DIRECT_LINK
-+#include "lkc.h"
-+
-+#define START_STRSIZE	16
-+
-+char *text;
-+static char *text_ptr;
-+static int text_size, text_asize;
-+
-+struct buffer {
-+        struct buffer *parent;
-+        YY_BUFFER_STATE state;
-+};
-+
-+struct buffer *current_buf;
-+
-+static int last_ts, first_ts;
-+
-+static void zconf_endhelp(void);
-+static struct buffer *zconf_endfile(void);
-+
-+void new_string(void)
-+{
-+	text = malloc(START_STRSIZE);
-+	text_asize = START_STRSIZE;
-+	text_ptr = text;
-+	text_size = 0;
-+	*text_ptr = 0;
-+}
-+
-+void append_string(const char *str, int size)
-+{
-+	int new_size = text_size + size + 1;
-+	if (new_size > text_asize) {
-+		text = realloc(text, new_size);
-+		text_asize = new_size;
-+		text_ptr = text + text_size;
-+	}
-+	memcpy(text_ptr, str, size);
-+	text_ptr += size;
-+	text_size += size;
-+	*text_ptr = 0;
-+}
-+
-+void alloc_string(const char *str, int size)
-+{
-+	text = malloc(size + 1);
-+	memcpy(text, str, size);
-+	text[size] = 0;
-+}
-+
-+#define INITIAL 0
-+#define COMMAND 1
-+#define HELP 2
-+#define STRING 3
-+#define PARAM 4
-+
-+/* Special case for "unistd.h", since it is non-ANSI. We include it way
-+ * down here because we want the user's section 1 to have been scanned first.
-+ * The user has a chance to override it with an option.
-+ */
-+#include <unistd.h>
-+
-+#ifndef YY_EXTRA_TYPE
-+#define YY_EXTRA_TYPE void *
-+#endif
-+
-+/* Macros after this point can all be overridden by user definitions in
-+ * section 1.
-+ */
-+
-+#ifndef YY_SKIP_YYWRAP
-+#ifdef __cplusplus
-+extern "C" int zconfwrap (void );
-+#else
-+extern int zconfwrap (void );
-+#endif
-+#endif
-+
-+    static void yyunput (int c,char *buf_ptr  );
-+    
-+#ifndef yytext_ptr
-+static void yy_flex_strncpy (char *,yyconst char *,int );
-+#endif
-+
-+#ifdef YY_NEED_STRLEN
-+static int yy_flex_strlen (yyconst char * );
-+#endif
-+
-+#ifndef YY_NO_INPUT
-+
-+#ifdef __cplusplus
-+static int yyinput (void );
-+#else
-+static int input (void );
-+#endif
-+
-+#endif
-+
-+/* Amount of stuff to slurp up with each read. */
-+#ifndef YY_READ_BUF_SIZE
-+#define YY_READ_BUF_SIZE 8192
-+#endif
-+
-+/* Copy whatever the last rule matched to the standard output. */
-+#ifndef ECHO
-+/* This used to be an fputs(), but since the string might contain NUL's,
-+ * we now use fwrite().
-+ */
-+#define ECHO (void) fwrite( zconftext, zconfleng, 1, zconfout )
-+#endif
-+
-+/* Gets input and stuffs it into "buf".  number of characters read, or YY_NULL,
-+ * is returned in "result".
-+ */
-+#ifndef YY_INPUT
-+#define YY_INPUT(buf,result,max_size) \
-+	errno=0; \
-+	while ( (result = read( fileno(zconfin), (char *) buf, max_size )) < 0 ) \
-+	{ \
-+		if( errno != EINTR) \
-+		{ \
-+			YY_FATAL_ERROR( "input in flex scanner failed" ); \
-+			break; \
-+		} \
-+		errno=0; \
-+		clearerr(zconfin); \
-+	}\
-+\
-+
-+#endif
-+
-+/* No semi-colon after return; correct usage is to write "yyterminate();" -
-+ * we don't want an extra ';' after the "return" because that will cause
-+ * some compilers to complain about unreachable statements.
-+ */
-+#ifndef yyterminate
-+#define yyterminate() return YY_NULL
-+#endif
-+
-+/* Number of entries by which start-condition stack grows. */
-+#ifndef YY_START_STACK_INCR
-+#define YY_START_STACK_INCR 25
-+#endif
-+
-+/* Report a fatal error. */
-+#ifndef YY_FATAL_ERROR
-+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg )
-+#endif
-+
-+/* end tables serialization structures and prototypes */
-+
-+/* Default declaration of generated scanner - a define so the user can
-+ * easily add parameters.
-+ */
-+#ifndef YY_DECL
-+#define YY_DECL_IS_OURS 1
-+
-+extern int zconflex (void);
-+
-+#define YY_DECL int zconflex (void)
-+#endif /* !YY_DECL */
-+
-+/* Code executed at the beginning of each rule, after zconftext and zconfleng
-+ * have been set up.
-+ */
-+#ifndef YY_USER_ACTION
-+#define YY_USER_ACTION
-+#endif
-+
-+/* Code executed at the end of each rule. */
-+#ifndef YY_BREAK
-+#define YY_BREAK break;
-+#endif
-+
-+#define YY_RULE_SETUP \
-+	YY_USER_ACTION
-+
-+/** The main scanner function which does all the work.
-+ */
-+YY_DECL
-+{
-+	register yy_state_type yy_current_state;
-+	register char *yy_cp, *yy_bp;
-+	register int yy_act;
-+    
-+	int str = 0;
-+	int ts, i;
-+
-+	if ( (yy_init) )
-+		{
-+		(yy_init) = 0;
-+
-+#ifdef YY_USER_INIT
-+		YY_USER_INIT;
-+#endif
-+
-+		if ( ! (yy_start) )
-+			(yy_start) = 1;	/* first start state */
-+
-+		if ( ! zconfin )
-+			zconfin = stdin;
-+
-+		if ( ! zconfout )
-+			zconfout = stdout;
-+
-+		if ( ! YY_CURRENT_BUFFER ) {
-+			zconfensure_buffer_stack ();
-+			YY_CURRENT_BUFFER_LVALUE =
-+				zconf_create_buffer(zconfin,YY_BUF_SIZE );
-+		}
-+
-+		zconf_load_buffer_state( );
-+		}
-+
-+	while ( 1 )		/* loops until end-of-file is reached */
-+		{
-+		yy_cp = (yy_c_buf_p);
-+
-+		/* Support of zconftext. */
-+		*yy_cp = (yy_hold_char);
-+
-+		/* yy_bp points to the position in yy_ch_buf of the start of
-+		 * the current run.
-+		 */
-+		yy_bp = yy_cp;
-+
-+		yy_current_state = (yy_start);
-+yy_match:
-+		while ( (yy_current_state = yy_nxt[yy_current_state][ yy_ec[YY_SC_TO_UI(*yy_cp)]  ]) > 0 )
-+			++yy_cp;
-+
-+		yy_current_state = -yy_current_state;
-+
-+yy_find_action:
-+		yy_act = yy_accept[yy_current_state];
-+
-+		YY_DO_BEFORE_ACTION;
-+
-+do_action:	/* This label is used only to access EOF actions. */
-+
-+		switch ( yy_act )
-+	{ /* beginning of action switch */
-+case 1:
-+/* rule 1 can match eol */
-+YY_RULE_SETUP
-+current_file->lineno++;
-+	YY_BREAK
-+case 2:
-+YY_RULE_SETUP
-+
-+	YY_BREAK
-+case 3:
-+/* rule 3 can match eol */
-+YY_RULE_SETUP
-+current_file->lineno++; return T_EOL;
-+	YY_BREAK
-+case 4:
-+YY_RULE_SETUP
-+{
-+	BEGIN(COMMAND);
-+}
-+	YY_BREAK
-+case 5:
-+YY_RULE_SETUP
-+{
-+	unput(zconftext[0]);
-+	BEGIN(COMMAND);
-+}
-+	YY_BREAK
-+
-+case 6:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_MAINMENU;
-+	YY_BREAK
-+case 7:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_MENU;
-+	YY_BREAK
-+case 8:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_ENDMENU;
-+	YY_BREAK
-+case 9:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_SOURCE;
-+	YY_BREAK
-+case 10:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_CHOICE;
-+	YY_BREAK
-+case 11:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_ENDCHOICE;
-+	YY_BREAK
-+case 12:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_COMMENT;
-+	YY_BREAK
-+case 13:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_CONFIG;
-+	YY_BREAK
-+case 14:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_MENUCONFIG;
-+	YY_BREAK
-+case 15:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_HELP;
-+	YY_BREAK
-+case 16:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_IF;
-+	YY_BREAK
-+case 17:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_ENDIF;
-+	YY_BREAK
-+case 18:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_DEPENDS;
-+	YY_BREAK
-+case 19:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_REQUIRES;
-+	YY_BREAK
-+case 20:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_OPTIONAL;
-+	YY_BREAK
-+case 21:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_DEFAULT;
-+	YY_BREAK
-+case 22:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_PROMPT;
-+	YY_BREAK
-+case 23:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_TRISTATE;
-+	YY_BREAK
-+case 24:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_DEF_TRISTATE;
-+	YY_BREAK
-+case 25:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_BOOLEAN;
-+	YY_BREAK
-+case 26:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_BOOLEAN;
-+	YY_BREAK
-+case 27:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_DEF_BOOLEAN;
-+	YY_BREAK
-+case 28:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_DEF_BOOLEAN;
-+	YY_BREAK
-+case 29:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_INT;
-+	YY_BREAK
-+case 30:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_HEX;
-+	YY_BREAK
-+case 31:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_STRING;
-+	YY_BREAK
-+case 32:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_SELECT;
-+	YY_BREAK
-+case 33:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_SELECT;
-+	YY_BREAK
-+case 34:
-+YY_RULE_SETUP
-+BEGIN(PARAM); return T_RANGE;
-+	YY_BREAK
-+case 35:
-+YY_RULE_SETUP
-+{
-+		alloc_string(zconftext, zconfleng);
-+		zconflval.string = text;
-+		return T_WORD;
-+	}
-+	YY_BREAK
-+case 36:
-+YY_RULE_SETUP
-+
-+	YY_BREAK
-+case 37:
-+/* rule 37 can match eol */
-+YY_RULE_SETUP
-+current_file->lineno++; BEGIN(INITIAL);
-+	YY_BREAK
-+
-+case 38:
-+YY_RULE_SETUP
-+return T_AND;
-+	YY_BREAK
-+case 39:
-+YY_RULE_SETUP
-+return T_OR;
-+	YY_BREAK
-+case 40:
-+YY_RULE_SETUP
-+return T_OPEN_PAREN;
-+	YY_BREAK
-+case 41:
-+YY_RULE_SETUP
-+return T_CLOSE_PAREN;
-+	YY_BREAK
-+case 42:
-+YY_RULE_SETUP
-+return T_NOT;
-+	YY_BREAK
-+case 43:
-+YY_RULE_SETUP
-+return T_EQUAL;
-+	YY_BREAK
-+case 44:
-+YY_RULE_SETUP
-+return T_UNEQUAL;
-+	YY_BREAK
-+case 45:
-+YY_RULE_SETUP
-+return T_IF;
-+	YY_BREAK
-+case 46:
-+YY_RULE_SETUP
-+return T_ON;
-+	YY_BREAK
-+case 47:
-+YY_RULE_SETUP
-+{
-+		str = zconftext[0];
-+		new_string();
-+		BEGIN(STRING);
-+	}
-+	YY_BREAK
-+case 48:
-+/* rule 48 can match eol */
-+YY_RULE_SETUP
-+BEGIN(INITIAL); current_file->lineno++; return T_EOL;
-+	YY_BREAK
-+case 49:
-+YY_RULE_SETUP
-+/* ignore */
-+	YY_BREAK
-+case 50:
-+YY_RULE_SETUP
-+{
-+		alloc_string(zconftext, zconfleng);
-+		zconflval.string = text;
-+		return T_WORD;
-+	}
-+	YY_BREAK
-+case 51:
-+YY_RULE_SETUP
-+/* comment */
-+	YY_BREAK
-+case 52:
-+/* rule 52 can match eol */
-+YY_RULE_SETUP
-+current_file->lineno++;
-+	YY_BREAK
-+case 53:
-+YY_RULE_SETUP
-+
-+	YY_BREAK
-+case YY_STATE_EOF(PARAM):
-+{
-+		BEGIN(INITIAL);
-+	}
-+	YY_BREAK
-+
-+case 54:
-+/* rule 54 can match eol */
-+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
-+(yy_c_buf_p) = yy_cp -= 1;
-+YY_DO_BEFORE_ACTION; /* set up zconftext again */
-+YY_RULE_SETUP
-+{
-+		append_string(zconftext, zconfleng);
-+		zconflval.string = text;
-+		return T_WORD_QUOTE;
-+	}
-+	YY_BREAK
-+case 55:
-+YY_RULE_SETUP
-+{
-+		append_string(zconftext, zconfleng);
-+	}
-+	YY_BREAK
-+case 56:
-+/* rule 56 can match eol */
-+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
-+(yy_c_buf_p) = yy_cp -= 1;
-+YY_DO_BEFORE_ACTION; /* set up zconftext again */
-+YY_RULE_SETUP
-+{
-+		append_string(zconftext + 1, zconfleng - 1);
-+		zconflval.string = text;
-+		return T_WORD_QUOTE;
-+	}
-+	YY_BREAK
-+case 57:
-+YY_RULE_SETUP
-+{
-+		append_string(zconftext + 1, zconfleng - 1);
-+	}
-+	YY_BREAK
-+case 58:
-+YY_RULE_SETUP
-+{
-+		if (str == zconftext[0]) {
-+			BEGIN(PARAM);
-+			zconflval.string = text;
-+			return T_WORD_QUOTE;
-+		} else
-+			append_string(zconftext, 1);
-+	}
-+	YY_BREAK
-+case 59:
-+/* rule 59 can match eol */
-+YY_RULE_SETUP
-+{
-+		printf("%s:%d:warning: multi-line strings not supported\n", zconf_curname(), zconf_lineno());
-+		current_file->lineno++;
-+		BEGIN(INITIAL);
-+		return T_EOL;
-+	}
-+	YY_BREAK
-+case YY_STATE_EOF(STRING):
-+{
-+		BEGIN(INITIAL);
-+	}
-+	YY_BREAK
-+
-+case 60:
-+YY_RULE_SETUP
-+{
-+		ts = 0;
-+		for (i = 0; i < zconfleng; i++) {
-+			if (zconftext[i] == '\t')
-+				ts = (ts & ~7) + 8;
-+			else
-+				ts++;
-+		}
-+		last_ts = ts;
-+		if (first_ts) {
-+			if (ts < first_ts) {
-+				zconf_endhelp();
-+				return T_HELPTEXT;
-+			}
-+			ts -= first_ts;
-+			while (ts > 8) {
-+				append_string("        ", 8);
-+				ts -= 8;
-+			}
-+			append_string("        ", ts);
-+		}
-+	}
-+	YY_BREAK
-+case 61:
-+/* rule 61 can match eol */
-+*yy_cp = (yy_hold_char); /* undo effects of setting up zconftext */
-+(yy_c_buf_p) = yy_cp -= 1;
-+YY_DO_BEFORE_ACTION; /* set up zconftext again */
-+YY_RULE_SETUP
-+{
-+		current_file->lineno++;
-+		zconf_endhelp();
-+		return T_HELPTEXT;
-+	}
-+	YY_BREAK
-+case 62:
-+/* rule 62 can match eol */
-+YY_RULE_SETUP
-+{
-+		current_file->lineno++;
-+		append_string("\n", 1);
-+	}
-+	YY_BREAK
-+case 63:
-+YY_RULE_SETUP
-+{
-+		append_string(zconftext, zconfleng);
-+		if (!first_ts)
-+			first_ts = last_ts;
-+	}
-+	YY_BREAK
-+case YY_STATE_EOF(HELP):
-+{
-+		zconf_endhelp();
-+		return T_HELPTEXT;
-+	}
-+	YY_BREAK
-+
-+case YY_STATE_EOF(INITIAL):
-+case YY_STATE_EOF(COMMAND):
-+{
-+	if (current_buf) {
-+		zconf_endfile();
-+		return T_EOF;
-+	}
-+	fclose(zconfin);
-+	yyterminate();
-+}
-+	YY_BREAK
-+case 64:
-+YY_RULE_SETUP
-+YY_FATAL_ERROR( "flex scanner jammed" );
-+	YY_BREAK
-+
-+	case YY_END_OF_BUFFER:
-+		{
-+		/* Amount of text matched not including the EOB char. */
-+		int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1;
-+
-+		/* Undo the effects of YY_DO_BEFORE_ACTION. */
-+		*yy_cp = (yy_hold_char);
-+		YY_RESTORE_YY_MORE_OFFSET
-+
-+		if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
-+			{
-+			/* We're scanning a new file or input source.  It's
-+			 * possible that this happened because the user
-+			 * just pointed zconfin at a new source and called
-+			 * zconflex().  If so, then we have to assure
-+			 * consistency between YY_CURRENT_BUFFER and our
-+			 * globals.  Here is the right place to do so, because
-+			 * this is the first action (other than possibly a
-+			 * back-up) that will match for the new input source.
-+			 */
-+			(yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
-+			YY_CURRENT_BUFFER_LVALUE->yy_input_file = zconfin;
-+			YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
-+			}
-+
-+		/* Note that here we test for yy_c_buf_p "<=" to the position
-+		 * of the first EOB in the buffer, since yy_c_buf_p will
-+		 * already have been incremented past the NUL character
-+		 * (since all states make transitions on EOB to the
-+		 * end-of-buffer state).  Contrast this with the test
-+		 * in input().
-+		 */
-+		if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
-+			{ /* This was really a NUL. */
-+			yy_state_type yy_next_state;
-+
-+			(yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text;
-+
-+			yy_current_state = yy_get_previous_state(  );
-+
-+			/* Okay, we're now positioned to make the NUL
-+			 * transition.  We couldn't have
-+			 * yy_get_previous_state() go ahead and do it
-+			 * for us because it doesn't know how to deal
-+			 * with the possibility of jamming (and we don't
-+			 * want to build jamming into it because then it
-+			 * will run more slowly).
-+			 */
-+
-+			yy_next_state = yy_try_NUL_trans( yy_current_state );
-+
-+			yy_bp = (yytext_ptr) + YY_MORE_ADJ;
-+
-+			if ( yy_next_state )
-+				{
-+				/* Consume the NUL. */
-+				yy_cp = ++(yy_c_buf_p);
-+				yy_current_state = yy_next_state;
-+				goto yy_match;
-+				}
-+
-+			else
-+				{
-+				yy_cp = (yy_c_buf_p);
-+				goto yy_find_action;
-+				}
-+			}
-+
-+		else switch ( yy_get_next_buffer(  ) )
-+			{
-+			case EOB_ACT_END_OF_FILE:
-+				{
-+				(yy_did_buffer_switch_on_eof) = 0;
-+
-+				if ( zconfwrap( ) )
-+					{
-+					/* Note: because we've taken care in
-+					 * yy_get_next_buffer() to have set up
-+					 * zconftext, we can now set up
-+					 * yy_c_buf_p so that if some total
-+					 * hoser (like flex itself) wants to
-+					 * call the scanner after we return the
-+					 * YY_NULL, it'll still work - another
-+					 * YY_NULL will get returned.
-+					 */
-+					(yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ;
-+
-+					yy_act = YY_STATE_EOF(YY_START);
-+					goto do_action;
-+					}
-+
-+				else
-+					{
-+					if ( ! (yy_did_buffer_switch_on_eof) )
-+						YY_NEW_FILE;
-+					}
-+				break;
-+				}
-+
-+			case EOB_ACT_CONTINUE_SCAN:
-+				(yy_c_buf_p) =
-+					(yytext_ptr) + yy_amount_of_matched_text;
-+
-+				yy_current_state = yy_get_previous_state(  );
-+
-+				yy_cp = (yy_c_buf_p);
-+				yy_bp = (yytext_ptr) + YY_MORE_ADJ;
-+				goto yy_match;
-+
-+			case EOB_ACT_LAST_MATCH:
-+				(yy_c_buf_p) =
-+				&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)];
-+
-+				yy_current_state = yy_get_previous_state(  );
-+
-+				yy_cp = (yy_c_buf_p);
-+				yy_bp = (yytext_ptr) + YY_MORE_ADJ;
-+				goto yy_find_action;
-+			}
-+		break;
-+		}
-+
-+	default:
-+		YY_FATAL_ERROR(
-+			"fatal flex scanner internal error--no action found" );
-+	} /* end of action switch */
-+		} /* end of scanning one token */
-+} /* end of zconflex */
-+
-+/* yy_get_next_buffer - try to read in a new buffer
-+ *
-+ * Returns a code representing an action:
-+ *	EOB_ACT_LAST_MATCH -
-+ *	EOB_ACT_CONTINUE_SCAN - continue scanning from current position
-+ *	EOB_ACT_END_OF_FILE - end of file
-+ */
-+static int yy_get_next_buffer (void)
-+{
-+    	register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
-+	register char *source = (yytext_ptr);
-+	register int number_to_move, i;
-+	int ret_val;
-+
-+	if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
-+		YY_FATAL_ERROR(
-+		"fatal flex scanner internal error--end of buffer missed" );
-+
-+	if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
-+		{ /* Don't try to fill the buffer, so this is an EOF. */
-+		if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 )
-+			{
-+			/* We matched a single character, the EOB, so
-+			 * treat this as a final EOF.
-+			 */
-+			return EOB_ACT_END_OF_FILE;
-+			}
-+
-+		else
-+			{
-+			/* We matched some text prior to the EOB, first
-+			 * process it.
-+			 */
-+			return EOB_ACT_LAST_MATCH;
-+			}
-+		}
-+
-+	/* Try to read more data. */
-+
-+	/* First move last chars to start of buffer. */
-+	number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1;
-+
-+	for ( i = 0; i < number_to_move; ++i )
-+		*(dest++) = *(source++);
-+
-+	if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
-+		/* don't do the read, it's not guaranteed to return an EOF,
-+		 * just force an EOF
-+		 */
-+		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0;
-+
-+	else
-+		{
-+			size_t num_to_read =
-+			YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
-+
-+		while ( num_to_read <= 0 )
-+			{ /* Not enough room in the buffer - grow it. */
-+
-+			/* just a shorter name for the current buffer */
-+			YY_BUFFER_STATE b = YY_CURRENT_BUFFER;
-+
-+			int yy_c_buf_p_offset =
-+				(int) ((yy_c_buf_p) - b->yy_ch_buf);
-+
-+			if ( b->yy_is_our_buffer )
-+				{
-+				int new_size = b->yy_buf_size * 2;
-+
-+				if ( new_size <= 0 )
-+					b->yy_buf_size += b->yy_buf_size / 8;
-+				else
-+					b->yy_buf_size *= 2;
-+
-+				b->yy_ch_buf = (char *)
-+					/* Include room in for 2 EOB chars. */
-+					zconfrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2  );
-+				}
-+			else
-+				/* Can't grow it, we don't own it. */
-+				b->yy_ch_buf = 0;
-+
-+			if ( ! b->yy_ch_buf )
-+				YY_FATAL_ERROR(
-+				"fatal error - scanner input buffer overflow" );
-+
-+			(yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset];
-+
-+			num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
-+						number_to_move - 1;
-+
-+			}
-+
-+		if ( num_to_read > YY_READ_BUF_SIZE )
-+			num_to_read = YY_READ_BUF_SIZE;
-+
-+		/* Read in more data. */
-+		YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
-+			(yy_n_chars), num_to_read );
-+
-+		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
-+		}
-+
-+	if ( (yy_n_chars) == 0 )
-+		{
-+		if ( number_to_move == YY_MORE_ADJ )
-+			{
-+			ret_val = EOB_ACT_END_OF_FILE;
-+			zconfrestart(zconfin  );
-+			}
-+
-+		else
-+			{
-+			ret_val = EOB_ACT_LAST_MATCH;
-+			YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
-+				YY_BUFFER_EOF_PENDING;
-+			}
-+		}
-+
-+	else
-+		ret_val = EOB_ACT_CONTINUE_SCAN;
-+
-+	(yy_n_chars) += number_to_move;
-+	YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR;
-+	YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR;
-+
-+	(yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
-+
-+	return ret_val;
-+}
-+
-+/* yy_get_previous_state - get the state just before the EOB char was reached */
-+
-+    static yy_state_type yy_get_previous_state (void)
-+{
-+	register yy_state_type yy_current_state;
-+	register char *yy_cp;
-+    
-+	yy_current_state = (yy_start);
-+
-+	for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp )
-+		{
-+		yy_current_state = yy_nxt[yy_current_state][(*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1)];
-+		}
-+
-+	return yy_current_state;
-+}
-+
-+/* yy_try_NUL_trans - try to make a transition on the NUL character
-+ *
-+ * synopsis
-+ *	next_state = yy_try_NUL_trans( current_state );
-+ */
-+    static yy_state_type yy_try_NUL_trans  (yy_state_type yy_current_state )
-+{
-+	register int yy_is_jam;
-+    
-+	yy_current_state = yy_nxt[yy_current_state][1];
-+	yy_is_jam = (yy_current_state <= 0);
-+
-+	return yy_is_jam ? 0 : yy_current_state;
-+}
-+
-+    static void yyunput (int c, register char * yy_bp )
-+{
-+	register char *yy_cp;
-+    
-+    yy_cp = (yy_c_buf_p);
-+
-+	/* undo effects of setting up zconftext */
-+	*yy_cp = (yy_hold_char);
-+
-+	if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
-+		{ /* need to shift things up to make room */
-+		/* +2 for EOB chars. */
-+		register int number_to_move = (yy_n_chars) + 2;
-+		register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[
-+					YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];
-+		register char *source =
-+				&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];
-+
-+		while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
-+			*--dest = *--source;
-+
-+		yy_cp += (int) (dest - source);
-+		yy_bp += (int) (dest - source);
-+		YY_CURRENT_BUFFER_LVALUE->yy_n_chars =
-+			(yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_buf_size;
-+
-+		if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
-+			YY_FATAL_ERROR( "flex scanner push-back overflow" );
-+		}
-+
-+	*--yy_cp = (char) c;
-+
-+	(yytext_ptr) = yy_bp;
-+	(yy_hold_char) = *yy_cp;
-+	(yy_c_buf_p) = yy_cp;
-+}
-+
-+#ifndef YY_NO_INPUT
-+#ifdef __cplusplus
-+    static int yyinput (void)
-+#else
-+    static int input  (void)
-+#endif
-+
-+{
-+	int c;
-+    
-+	*(yy_c_buf_p) = (yy_hold_char);
-+
-+	if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR )
-+		{
-+		/* yy_c_buf_p now points to the character we want to return.
-+		 * If this occurs *before* the EOB characters, then it's a
-+		 * valid NUL; if not, then we've hit the end of the buffer.
-+		 */
-+		if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] )
-+			/* This was really a NUL. */
-+			*(yy_c_buf_p) = '\0';
-+
-+		else
-+			{ /* need more input */
-+			int offset = (yy_c_buf_p) - (yytext_ptr);
-+			++(yy_c_buf_p);
-+
-+			switch ( yy_get_next_buffer(  ) )
-+				{
-+				case EOB_ACT_LAST_MATCH:
-+					/* This happens because yy_g_n_b()
-+					 * sees that we've accumulated a
-+					 * token and flags that we need to
-+					 * try matching the token before
-+					 * proceeding.  But for input(),
-+					 * there's no matching to consider.
-+					 * So convert the EOB_ACT_LAST_MATCH
-+					 * to EOB_ACT_END_OF_FILE.
-+					 */
-+
-+					/* Reset buffer status. */
-+					zconfrestart(zconfin );
-+
-+					/*FALLTHROUGH*/
-+
-+				case EOB_ACT_END_OF_FILE:
-+					{
-+					if ( zconfwrap( ) )
-+						return EOF;
-+
-+					if ( ! (yy_did_buffer_switch_on_eof) )
-+						YY_NEW_FILE;
-+#ifdef __cplusplus
-+					return yyinput();
-+#else
-+					return input();
-+#endif
-+					}
-+
-+				case EOB_ACT_CONTINUE_SCAN:
-+					(yy_c_buf_p) = (yytext_ptr) + offset;
-+					break;
-+				}
-+			}
-+		}
-+
-+	c = *(unsigned char *) (yy_c_buf_p);	/* cast for 8-bit char's */
-+	*(yy_c_buf_p) = '\0';	/* preserve zconftext */
-+	(yy_hold_char) = *++(yy_c_buf_p);
-+
-+	return c;
-+}
-+#endif	/* ifndef YY_NO_INPUT */
-+
-+/** Immediately switch to a different input stream.
-+ * @param input_file A readable stream.
-+ * 
-+ * @note This function does not reset the start condition to @c INITIAL .
-+ */
-+    void zconfrestart  (FILE * input_file )
-+{
-+    
-+	if ( ! YY_CURRENT_BUFFER ){
-+        zconfensure_buffer_stack ();
-+		YY_CURRENT_BUFFER_LVALUE =
-+            zconf_create_buffer(zconfin,YY_BUF_SIZE );
-+	}
-+
-+	zconf_init_buffer(YY_CURRENT_BUFFER,input_file );
-+	zconf_load_buffer_state( );
-+}
-+
-+/** Switch to a different input buffer.
-+ * @param new_buffer The new input buffer.
-+ * 
-+ */
-+    void zconf_switch_to_buffer  (YY_BUFFER_STATE  new_buffer )
-+{
-+    
-+	/* TODO. We should be able to replace this entire function body
-+	 * with
-+	 *		zconfpop_buffer_state();
-+	 *		zconfpush_buffer_state(new_buffer);
-+     */
-+	zconfensure_buffer_stack ();
-+	if ( YY_CURRENT_BUFFER == new_buffer )
-+		return;
-+
-+	if ( YY_CURRENT_BUFFER )
-+		{
-+		/* Flush out information for old buffer. */
-+		*(yy_c_buf_p) = (yy_hold_char);
-+		YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
-+		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
-+		}
-+
-+	YY_CURRENT_BUFFER_LVALUE = new_buffer;
-+	zconf_load_buffer_state( );
-+
-+	/* We don't actually know whether we did this switch during
-+	 * EOF (zconfwrap()) processing, but the only time this flag
-+	 * is looked at is after zconfwrap() is called, so it's safe
-+	 * to go ahead and always set it.
-+	 */
-+	(yy_did_buffer_switch_on_eof) = 1;
-+}
-+
-+static void zconf_load_buffer_state  (void)
-+{
-+    	(yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
-+	(yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
-+	zconfin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
-+	(yy_hold_char) = *(yy_c_buf_p);
-+}
-+
-+/** Allocate and initialize an input buffer state.
-+ * @param file A readable stream.
-+ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
-+ * 
-+ * @return the allocated buffer state.
-+ */
-+    YY_BUFFER_STATE zconf_create_buffer  (FILE * file, int  size )
-+{
-+	YY_BUFFER_STATE b;
-+    
-+	b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state )  );
-+	if ( ! b )
-+		YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" );
-+
-+	b->yy_buf_size = size;
-+
-+	/* yy_ch_buf has to be 2 characters longer than the size given because
-+	 * we need to put in 2 end-of-buffer characters.
-+	 */
-+	b->yy_ch_buf = (char *) zconfalloc(b->yy_buf_size + 2  );
-+	if ( ! b->yy_ch_buf )
-+		YY_FATAL_ERROR( "out of dynamic memory in zconf_create_buffer()" );
-+
-+	b->yy_is_our_buffer = 1;
-+
-+	zconf_init_buffer(b,file );
-+
-+	return b;
-+}
-+
-+/** Destroy the buffer.
-+ * @param b a buffer created with zconf_create_buffer()
-+ * 
-+ */
-+    void zconf_delete_buffer (YY_BUFFER_STATE  b )
-+{
-+    
-+	if ( ! b )
-+		return;
-+
-+	if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
-+		YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
-+
-+	if ( b->yy_is_our_buffer )
-+		zconffree((void *) b->yy_ch_buf  );
-+
-+	zconffree((void *) b  );
-+}
-+
-+/* Initializes or reinitializes a buffer.
-+ * This function is sometimes called more than once on the same buffer,
-+ * such as during a zconfrestart() or at EOF.
-+ */
-+    static void zconf_init_buffer  (YY_BUFFER_STATE  b, FILE * file )
-+
-+{
-+	int oerrno = errno;
-+    
-+	zconf_flush_buffer(b );
-+
-+	b->yy_input_file = file;
-+	b->yy_fill_buffer = 1;
-+
-+    /* If b is the current buffer, then zconf_init_buffer was _probably_
-+     * called from zconfrestart() or through yy_get_next_buffer.
-+     * In that case, we don't want to reset the lineno or column.
-+     */
-+    if (b != YY_CURRENT_BUFFER){
-+        b->yy_bs_lineno = 1;
-+        b->yy_bs_column = 0;
-+    }
-+
-+        b->yy_is_interactive = 0;
-+    
-+	errno = oerrno;
-+}
-+
-+/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
-+ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
-+ * 
-+ */
-+    void zconf_flush_buffer (YY_BUFFER_STATE  b )
-+{
-+    	if ( ! b )
-+		return;
-+
-+	b->yy_n_chars = 0;
-+
-+	/* We always need two end-of-buffer characters.  The first causes
-+	 * a transition to the end-of-buffer state.  The second causes
-+	 * a jam in that state.
-+	 */
-+	b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
-+	b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
-+
-+	b->yy_buf_pos = &b->yy_ch_buf[0];
-+
-+	b->yy_at_bol = 1;
-+	b->yy_buffer_status = YY_BUFFER_NEW;
-+
-+	if ( b == YY_CURRENT_BUFFER )
-+		zconf_load_buffer_state( );
-+}
-+
-+/** Pushes the new state onto the stack. The new state becomes
-+ *  the current state. This function will allocate the stack
-+ *  if necessary.
-+ *  @param new_buffer The new state.
-+ *  
-+ */
-+void zconfpush_buffer_state (YY_BUFFER_STATE new_buffer )
-+{
-+    	if (new_buffer == NULL)
-+		return;
-+
-+	zconfensure_buffer_stack();
-+
-+	/* This block is copied from zconf_switch_to_buffer. */
-+	if ( YY_CURRENT_BUFFER )
-+		{
-+		/* Flush out information for old buffer. */
-+		*(yy_c_buf_p) = (yy_hold_char);
-+		YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p);
-+		YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
-+		}
-+
-+	/* Only push if top exists. Otherwise, replace top. */
-+	if (YY_CURRENT_BUFFER)
-+		(yy_buffer_stack_top)++;
-+	YY_CURRENT_BUFFER_LVALUE = new_buffer;
-+
-+	/* copied from zconf_switch_to_buffer. */
-+	zconf_load_buffer_state( );
-+	(yy_did_buffer_switch_on_eof) = 1;
-+}
-+
-+/** Removes and deletes the top of the stack, if present.
-+ *  The next element becomes the new top.
-+ *  
-+ */
-+void zconfpop_buffer_state (void)
-+{
-+    	if (!YY_CURRENT_BUFFER)
-+		return;
-+
-+	zconf_delete_buffer(YY_CURRENT_BUFFER );
-+	YY_CURRENT_BUFFER_LVALUE = NULL;
-+	if ((yy_buffer_stack_top) > 0)
-+		--(yy_buffer_stack_top);
-+
-+	if (YY_CURRENT_BUFFER) {
-+		zconf_load_buffer_state( );
-+		(yy_did_buffer_switch_on_eof) = 1;
-+	}
-+}
-+
-+/* Allocates the stack if it does not exist.
-+ *  Guarantees space for at least one push.
-+ */
-+static void zconfensure_buffer_stack (void)
-+{
-+	int num_to_alloc;
-+    
-+	if (!(yy_buffer_stack)) {
-+
-+		/* First allocation is just for 2 elements, since we don't know if this
-+		 * scanner will even need a stack. We use 2 instead of 1 to avoid an
-+		 * immediate realloc on the next call.
-+         */
-+		num_to_alloc = 1;
-+		(yy_buffer_stack) = (struct yy_buffer_state**)zconfalloc
-+								(num_to_alloc * sizeof(struct yy_buffer_state*)
-+								);
-+		
-+		memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*));
-+				
-+		(yy_buffer_stack_max) = num_to_alloc;
-+		(yy_buffer_stack_top) = 0;
-+		return;
-+	}
-+
-+	if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){
-+
-+		/* Increase the buffer to prepare for a possible push. */
-+		int grow_size = 8 /* arbitrary grow size */;
-+
-+		num_to_alloc = (yy_buffer_stack_max) + grow_size;
-+		(yy_buffer_stack) = (struct yy_buffer_state**)zconfrealloc
-+								((yy_buffer_stack),
-+								num_to_alloc * sizeof(struct yy_buffer_state*)
-+								);
-+
-+		/* zero only the new slots.*/
-+		memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*));
-+		(yy_buffer_stack_max) = num_to_alloc;
-+	}
-+}
-+
-+/** Setup the input buffer state to scan directly from a user-specified character buffer.
-+ * @param base the character buffer
-+ * @param size the size in bytes of the character buffer
-+ * 
-+ * @return the newly allocated buffer state object. 
-+ */
-+YY_BUFFER_STATE zconf_scan_buffer  (char * base, yy_size_t  size )
-+{
-+	YY_BUFFER_STATE b;
-+    
-+	if ( size < 2 ||
-+	     base[size-2] != YY_END_OF_BUFFER_CHAR ||
-+	     base[size-1] != YY_END_OF_BUFFER_CHAR )
-+		/* They forgot to leave room for the EOB's. */
-+		return 0;
-+
-+	b = (YY_BUFFER_STATE) zconfalloc(sizeof( struct yy_buffer_state )  );
-+	if ( ! b )
-+		YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_buffer()" );
-+
-+	b->yy_buf_size = size - 2;	/* "- 2" to take care of EOB's */
-+	b->yy_buf_pos = b->yy_ch_buf = base;
-+	b->yy_is_our_buffer = 0;
-+	b->yy_input_file = 0;
-+	b->yy_n_chars = b->yy_buf_size;
-+	b->yy_is_interactive = 0;
-+	b->yy_at_bol = 1;
-+	b->yy_fill_buffer = 0;
-+	b->yy_buffer_status = YY_BUFFER_NEW;
-+
-+	zconf_switch_to_buffer(b  );
-+
-+	return b;
-+}
-+
-+/** Setup the input buffer state to scan a string. The next call to zconflex() will
-+ * scan from a @e copy of @a str.
-+ * @param str a NUL-terminated string to scan
-+ * 
-+ * @return the newly allocated buffer state object.
-+ * @note If you want to scan bytes that may contain NUL values, then use
-+ *       zconf_scan_bytes() instead.
-+ */
-+YY_BUFFER_STATE zconf_scan_string (yyconst char * str )
-+{
-+    
-+	return zconf_scan_bytes(str,strlen(str) );
-+}
-+
-+/** Setup the input buffer state to scan the given bytes. The next call to zconflex() will
-+ * scan from a @e copy of @a bytes.
-+ * @param bytes the byte buffer to scan
-+ * @param len the number of bytes in the buffer pointed to by @a bytes.
-+ * 
-+ * @return the newly allocated buffer state object.
-+ */
-+YY_BUFFER_STATE zconf_scan_bytes  (yyconst char * bytes, int  len )
-+{
-+	YY_BUFFER_STATE b;
-+	char *buf;
-+	yy_size_t n;
-+	int i;
-+    
-+	/* Get memory for full buffer, including space for trailing EOB's. */
-+	n = len + 2;
-+	buf = (char *) zconfalloc(n  );
-+	if ( ! buf )
-+		YY_FATAL_ERROR( "out of dynamic memory in zconf_scan_bytes()" );
-+
-+	for ( i = 0; i < len; ++i )
-+		buf[i] = bytes[i];
-+
-+	buf[len] = buf[len+1] = YY_END_OF_BUFFER_CHAR;
-+
-+	b = zconf_scan_buffer(buf,n );
-+	if ( ! b )
-+		YY_FATAL_ERROR( "bad buffer in zconf_scan_bytes()" );
-+
-+	/* It's okay to grow etc. this buffer, and we should throw it
-+	 * away when we're done.
-+	 */
-+	b->yy_is_our_buffer = 1;
-+
-+	return b;
-+}
-+
-+#ifndef YY_EXIT_FAILURE
-+#define YY_EXIT_FAILURE 2
-+#endif
-+
-+static void yy_fatal_error (yyconst char* msg )
-+{
-+    	(void) fprintf( stderr, "%s\n", msg );
-+	exit( YY_EXIT_FAILURE );
-+}
-+
-+/* Redefine yyless() so it works in section 3 code. */
-+
-+#undef yyless
-+#define yyless(n) \
-+	do \
-+		{ \
-+		/* Undo effects of setting up zconftext. */ \
-+        int yyless_macro_arg = (n); \
-+        YY_LESS_LINENO(yyless_macro_arg);\
-+		zconftext[zconfleng] = (yy_hold_char); \
-+		(yy_c_buf_p) = zconftext + yyless_macro_arg; \
-+		(yy_hold_char) = *(yy_c_buf_p); \
-+		*(yy_c_buf_p) = '\0'; \
-+		zconfleng = yyless_macro_arg; \
-+		} \
-+	while ( 0 )
-+
-+/* Accessor  methods (get/set functions) to struct members. */
-+
-+/** Get the current line number.
-+ * 
-+ */
-+int zconfget_lineno  (void)
-+{
-+        
-+    return zconflineno;
-+}
-+
-+/** Get the input stream.
-+ * 
-+ */
-+FILE *zconfget_in  (void)
-+{
-+        return zconfin;
-+}
-+
-+/** Get the output stream.
-+ * 
-+ */
-+FILE *zconfget_out  (void)
-+{
-+        return zconfout;
-+}
-+
-+/** Get the length of the current token.
-+ * 
-+ */
-+int zconfget_leng  (void)
-+{
-+        return zconfleng;
-+}
-+
-+/** Get the current token.
-+ * 
-+ */
-+
-+char *zconfget_text  (void)
-+{
-+        return zconftext;
-+}
-+
-+/** Set the current line number.
-+ * @param line_number
-+ * 
-+ */
-+void zconfset_lineno (int  line_number )
-+{
-+    
-+    zconflineno = line_number;
-+}
-+
-+/** Set the input stream. This does not discard the current
-+ * input buffer.
-+ * @param in_str A readable stream.
-+ * 
-+ * @see zconf_switch_to_buffer
-+ */
-+void zconfset_in (FILE *  in_str )
-+{
-+        zconfin = in_str ;
-+}
-+
-+void zconfset_out (FILE *  out_str )
-+{
-+        zconfout = out_str ;
-+}
-+
-+int zconfget_debug  (void)
-+{
-+        return zconf_flex_debug;
-+}
-+
-+void zconfset_debug (int  bdebug )
-+{
-+        zconf_flex_debug = bdebug ;
-+}
-+
-+/* zconflex_destroy is for both reentrant and non-reentrant scanners. */
-+int zconflex_destroy  (void)
-+{
-+    
-+    /* Pop the buffer stack, destroying each element. */
-+	while(YY_CURRENT_BUFFER){
-+		zconf_delete_buffer(YY_CURRENT_BUFFER  );
-+		YY_CURRENT_BUFFER_LVALUE = NULL;
-+		zconfpop_buffer_state();
-+	}
-+
-+	/* Destroy the stack itself. */
-+	zconffree((yy_buffer_stack) );
-+	(yy_buffer_stack) = NULL;
-+
-+    return 0;
-+}
-+
-+/*
-+ * Internal utility routines.
-+ */
-+
-+#ifndef yytext_ptr
-+static void yy_flex_strncpy (char* s1, yyconst char * s2, int n )
-+{
-+	register int i;
-+    	for ( i = 0; i < n; ++i )
-+		s1[i] = s2[i];
-+}
-+#endif
-+
-+#ifdef YY_NEED_STRLEN
-+static int yy_flex_strlen (yyconst char * s )
-+{
-+	register int n;
-+    	for ( n = 0; s[n]; ++n )
-+		;
-+
-+	return n;
-+}
-+#endif
-+
-+void *zconfalloc (yy_size_t  size )
-+{
-+	return (void *) malloc( size );
-+}
-+
-+void *zconfrealloc  (void * ptr, yy_size_t  size )
-+{
-+	/* The cast to (char *) in the following accommodates both
-+	 * implementations that use char* generic pointers, and those
-+	 * that use void* generic pointers.  It works with the latter
-+	 * because both ANSI C and C++ allow castless assignment from
-+	 * any pointer type to void*, and deal with argument conversions
-+	 * as though doing an assignment.
-+	 */
-+	return (void *) realloc( (char *) ptr, size );
-+}
-+
-+void zconffree (void * ptr )
-+{
-+	free( (char *) ptr );	/* see zconfrealloc() for (char *) cast */
-+}
-+
-+#define YYTABLES_NAME "yytables"
-+
-+#undef YY_NEW_FILE
-+#undef YY_FLUSH_BUFFER
-+#undef yy_set_bol
-+#undef yy_new_buffer
-+#undef yy_set_interactive
-+#undef yytext_ptr
-+#undef YY_DO_BEFORE_ACTION
-+
-+#ifdef YY_DECL_IS_OURS
-+#undef YY_DECL_IS_OURS
-+#undef YY_DECL
-+#endif
-+
-+void zconf_starthelp(void)
-+{
-+	new_string();
-+	last_ts = first_ts = 0;
-+	BEGIN(HELP);
-+}
-+
-+static void zconf_endhelp(void)
-+{
-+	zconflval.string = text;
-+	BEGIN(INITIAL);
-+}
-+
-+/*
-+ * Try to open specified file with following names:
-+ * ./name
-+ * $(srctree)/name
-+ * The latter is used when srctree is separate from objtree
-+ * when compiling the kernel.
-+ * Return NULL if file is not found.
-+ */
-+FILE *zconf_fopen(const char *name)
-+{
-+	char *env, fullname[PATH_MAX+1];
-+	FILE *f;
-+
-+	f = fopen(name, "r");
-+	if (!f && name[0] != '/') {
-+		env = getenv(SRCTREE);
-+		if (env) {
-+			sprintf(fullname, "%s/%s", env, name);
-+			f = fopen(fullname, "r");
-+		}
-+	}
-+	return f;
-+}
-+
-+void zconf_initscan(const char *name)
-+{
-+	zconfin = zconf_fopen(name);
-+	if (!zconfin) {
-+		printf("can't find file %s\n", name);
-+		exit(1);
-+	}
-+
-+	current_buf = malloc(sizeof(*current_buf));
-+	memset(current_buf, 0, sizeof(*current_buf));
-+
-+	current_file = file_lookup(name);
-+	current_file->lineno = 1;
-+	current_file->flags = FILE_BUSY;
-+}
-+
-+void zconf_nextfile(const char *name)
-+{
-+	struct file *file = file_lookup(name);
-+	struct buffer *buf = malloc(sizeof(*buf));
-+	memset(buf, 0, sizeof(*buf));
-+
-+	current_buf->state = YY_CURRENT_BUFFER;
-+	zconfin = zconf_fopen(name);
-+	if (!zconfin) {
-+		printf("%s:%d: can't open file \"%s\"\n", zconf_curname(), zconf_lineno(), name);
-+		exit(1);
-+	}
-+	zconf_switch_to_buffer(zconf_create_buffer(zconfin,YY_BUF_SIZE));
-+	buf->parent = current_buf;
-+	current_buf = buf;
-+
-+	if (file->flags & FILE_BUSY) {
-+		printf("recursive scan (%s)?\n", name);
-+		exit(1);
-+	}
-+	if (file->flags & FILE_SCANNED) {
-+		printf("file %s already scanned?\n", name);
-+		exit(1);
-+	}
-+	file->flags |= FILE_BUSY;
-+	file->lineno = 1;
-+	file->parent = current_file;
-+	current_file = file;
-+}
-+
-+static struct buffer *zconf_endfile(void)
-+{
-+	struct buffer *parent;
-+
-+	current_file->flags |= FILE_SCANNED;
-+	current_file->flags &= ~FILE_BUSY;
-+	current_file = current_file->parent;
-+
-+	parent = current_buf->parent;
-+	if (parent) {
-+		fclose(zconfin);
-+		zconf_delete_buffer(YY_CURRENT_BUFFER);
-+		zconf_switch_to_buffer(parent->state);
-+	}
-+	free(current_buf);
-+	current_buf = parent;
-+
-+	return parent;
-+}
-+
-+int zconf_lineno(void)
-+{
-+	if (current_buf)
-+		return current_file->lineno - 1;
-+	else
-+		return 0;
-+}
-+
-+char *zconf_curname(void)
-+{
-+	if (current_buf)
-+		return current_file->name;
-+	else
-+		return "<none>";
-+}
-+
-Binärdateien pristine-linux-2.6.12/scripts/kconfig/mconf.o and linux-2.6.12-xen/scripts/kconfig/mconf.o sind verschieden.
-diff -Nurp pristine-linux-2.6.12/scripts/kconfig/.mconf.o.cmd linux-2.6.12-xen/scripts/kconfig/.mconf.o.cmd
---- pristine-linux-2.6.12/scripts/kconfig/.mconf.o.cmd	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/kconfig/.mconf.o.cmd	2006-02-25 00:12:52.335196094 +0100
-@@ -0,0 +1,96 @@
-+cmd_scripts/kconfig/mconf.o := gcc -Wp,-MD,scripts/kconfig/.mconf.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer       -c -o scripts/kconfig/mconf.o scripts/kconfig/mconf.c
-+
-+deps_scripts/kconfig/mconf.o := \
-+  scripts/kconfig/mconf.c \
-+    $(wildcard include/config/mode.h) \
-+    $(wildcard include/config/.h) \
-+  /usr/include/sys/ioctl.h \
-+  /usr/include/features.h \
-+  /usr/include/sys/cdefs.h \
-+  /usr/include/gnu/stubs.h \
-+  /usr/include/bits/ioctls.h \
-+  /usr/include/asm/ioctls.h \
-+  /usr/include/asm-i486/ioctls.h \
-+  /usr/include/asm/ioctl.h \
-+  /usr/include/asm-i486/ioctl.h \
-+  /usr/include/bits/ioctl-types.h \
-+  /usr/include/sys/ttydefaults.h \
-+  /usr/include/sys/wait.h \
-+  /usr/include/signal.h \
-+  /usr/include/bits/sigset.h \
-+  /usr/include/bits/types.h \
-+  /usr/include/bits/wordsize.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
-+  /usr/include/bits/typesizes.h \
-+  /usr/include/bits/signum.h \
-+  /usr/include/time.h \
-+  /usr/include/bits/siginfo.h \
-+  /usr/include/bits/sigaction.h \
-+  /usr/include/bits/sigcontext.h \
-+  /usr/include/asm/sigcontext.h \
-+  /usr/include/asm-i486/sigcontext.h \
-+  /usr/include/linux/compiler.h \
-+  /usr/include/bits/sigstack.h \
-+  /usr/include/bits/pthreadtypes.h \
-+  /usr/include/bits/sched.h \
-+  /usr/include/bits/sigthread.h \
-+  /usr/include/sys/resource.h \
-+  /usr/include/bits/resource.h \
-+  /usr/include/bits/time.h \
-+  /usr/include/bits/waitflags.h \
-+  /usr/include/bits/waitstatus.h \
-+  /usr/include/endian.h \
-+  /usr/include/bits/endian.h \
-+  /usr/include/ctype.h \
-+  /usr/include/errno.h \
-+  /usr/include/bits/errno.h \
-+  /usr/include/linux/errno.h \
-+  /usr/include/asm/errno.h \
-+  /usr/include/asm-i486/errno.h \
-+  /usr/include/asm-generic/errno.h \
-+  /usr/include/asm-generic/errno-base.h \
-+  /usr/include/fcntl.h \
-+  /usr/include/bits/fcntl.h \
-+  /usr/include/sys/types.h \
-+  /usr/include/sys/select.h \
-+  /usr/include/bits/select.h \
-+  /usr/include/sys/sysmacros.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/limits.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/syslimits.h \
-+  /usr/include/limits.h \
-+  /usr/include/bits/posix1_lim.h \
-+  /usr/include/bits/local_lim.h \
-+  /usr/include/linux/limits.h \
-+  /usr/include/bits/posix2_lim.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
-+  /usr/include/stdlib.h \
-+  /usr/include/alloca.h \
-+  /usr/include/string.h \
-+  /usr/include/bits/string.h \
-+  /usr/include/bits/string2.h \
-+  /usr/include/termios.h \
-+  /usr/include/bits/termios.h \
-+  /usr/include/unistd.h \
-+  /usr/include/bits/posix_opt.h \
-+  /usr/include/bits/confname.h \
-+  /usr/include/getopt.h \
-+  scripts/kconfig/lkc.h \
-+  scripts/kconfig/expr.h \
-+  /usr/include/stdio.h \
-+  /usr/include/libio.h \
-+  /usr/include/_G_config.h \
-+  /usr/include/wchar.h \
-+  /usr/include/bits/wchar.h \
-+  /usr/include/gconv.h \
-+  /usr/include/bits/stdio_lim.h \
-+  /usr/include/bits/sys_errlist.h \
-+  /usr/include/bits/stdio.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdbool.h \
-+  /usr/include/libintl.h \
-+  /usr/include/locale.h \
-+  /usr/include/bits/locale.h \
-+  scripts/kconfig/lkc_proto.h \
-+
-+scripts/kconfig/mconf.o: $(deps_scripts/kconfig/mconf.o)
-+
-+$(deps_scripts/kconfig/mconf.o):
-diff -Nurp pristine-linux-2.6.12/scripts/kconfig/zconf.tab.c linux-2.6.12-xen/scripts/kconfig/zconf.tab.c
---- pristine-linux-2.6.12/scripts/kconfig/zconf.tab.c	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/kconfig/zconf.tab.c	2006-02-25 00:12:52.345194586 +0100
-@@ -0,0 +1,2130 @@
-+/* A Bison parser, made by GNU Bison 1.875a.  */
-+
-+/* Skeleton parser for Yacc-like parsing with Bison,
-+   Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
-+
-+   This program is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 2, or (at your option)
-+   any later version.
-+
-+   This program is distributed in the hope that it will be useful,
-+   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+   GNU General Public License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; if not, write to the Free Software
-+   Foundation, Inc., 59 Temple Place - Suite 330,
-+   Boston, MA 02111-1307, USA.  */
-+
-+/* As a special exception, when this file is copied by Bison into a
-+   Bison output file, you may use that output file without restriction.
-+   This special exception was added by the Free Software Foundation
-+   in version 1.24 of Bison.  */
-+
-+/* Written by Richard Stallman by simplifying the original so called
-+   ``semantic'' parser.  */
-+
-+/* All symbols defined below should begin with yy or YY, to avoid
-+   infringing on user name space.  This should be done even for local
-+   variables, as they might otherwise be expanded by user macros.
-+   There are some unavoidable exceptions within include files to
-+   define necessary library symbols; they are noted "INFRINGES ON
-+   USER NAME SPACE" below.  */
-+
-+/* Identify Bison output.  */
-+#define YYBISON 1
-+
-+/* Skeleton name.  */
-+#define YYSKELETON_NAME "yacc.c"
-+
-+/* Pure parsers.  */
-+#define YYPURE 0
-+
-+/* Using locations.  */
-+#define YYLSP_NEEDED 0
-+
-+/* If NAME_PREFIX is specified substitute the variables and functions
-+   names.  */
-+#define yyparse zconfparse
-+#define yylex   zconflex
-+#define yyerror zconferror
-+#define yylval  zconflval
-+#define yychar  zconfchar
-+#define yydebug zconfdebug
-+#define yynerrs zconfnerrs
-+
-+
-+/* Tokens.  */
-+#ifndef YYTOKENTYPE
-+# define YYTOKENTYPE
-+   /* Put the tokens into the symbol table, so that GDB and other debuggers
-+      know about them.  */
-+   enum yytokentype {
-+     T_MAINMENU = 258,
-+     T_MENU = 259,
-+     T_ENDMENU = 260,
-+     T_SOURCE = 261,
-+     T_CHOICE = 262,
-+     T_ENDCHOICE = 263,
-+     T_COMMENT = 264,
-+     T_CONFIG = 265,
-+     T_MENUCONFIG = 266,
-+     T_HELP = 267,
-+     T_HELPTEXT = 268,
-+     T_IF = 269,
-+     T_ENDIF = 270,
-+     T_DEPENDS = 271,
-+     T_REQUIRES = 272,
-+     T_OPTIONAL = 273,
-+     T_PROMPT = 274,
-+     T_DEFAULT = 275,
-+     T_TRISTATE = 276,
-+     T_DEF_TRISTATE = 277,
-+     T_BOOLEAN = 278,
-+     T_DEF_BOOLEAN = 279,
-+     T_STRING = 280,
-+     T_INT = 281,
-+     T_HEX = 282,
-+     T_WORD = 283,
-+     T_WORD_QUOTE = 284,
-+     T_UNEQUAL = 285,
-+     T_EOF = 286,
-+     T_EOL = 287,
-+     T_CLOSE_PAREN = 288,
-+     T_OPEN_PAREN = 289,
-+     T_ON = 290,
-+     T_SELECT = 291,
-+     T_RANGE = 292,
-+     T_OR = 293,
-+     T_AND = 294,
-+     T_EQUAL = 295,
-+     T_NOT = 296
-+   };
-+#endif
-+#define T_MAINMENU 258
-+#define T_MENU 259
-+#define T_ENDMENU 260
-+#define T_SOURCE 261
-+#define T_CHOICE 262
-+#define T_ENDCHOICE 263
-+#define T_COMMENT 264
-+#define T_CONFIG 265
-+#define T_MENUCONFIG 266
-+#define T_HELP 267
-+#define T_HELPTEXT 268
-+#define T_IF 269
-+#define T_ENDIF 270
-+#define T_DEPENDS 271
-+#define T_REQUIRES 272
-+#define T_OPTIONAL 273
-+#define T_PROMPT 274
-+#define T_DEFAULT 275
-+#define T_TRISTATE 276
-+#define T_DEF_TRISTATE 277
-+#define T_BOOLEAN 278
-+#define T_DEF_BOOLEAN 279
-+#define T_STRING 280
-+#define T_INT 281
-+#define T_HEX 282
-+#define T_WORD 283
-+#define T_WORD_QUOTE 284
-+#define T_UNEQUAL 285
-+#define T_EOF 286
-+#define T_EOL 287
-+#define T_CLOSE_PAREN 288
-+#define T_OPEN_PAREN 289
-+#define T_ON 290
-+#define T_SELECT 291
-+#define T_RANGE 292
-+#define T_OR 293
-+#define T_AND 294
-+#define T_EQUAL 295
-+#define T_NOT 296
-+
-+
-+
-+
-+/* Copy the first part of user declarations.  */
-+
-+
-+/*
-+ * Copyright (C) 2002 Roman Zippel <zippel at linux-m68k.org>
-+ * Released under the terms of the GNU GPL v2.0.
-+ */
-+
-+#include <ctype.h>
-+#include <stdarg.h>
-+#include <stdio.h>
-+#include <stdlib.h>
-+#include <string.h>
-+#include <stdbool.h>
-+
-+#define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt)
-+
-+#define PRINTD		0x0001
-+#define DEBUG_PARSE	0x0002
-+
-+int cdebug = PRINTD;
-+
-+extern int zconflex(void);
-+static void zconfprint(const char *err, ...);
-+static void zconferror(const char *err);
-+static bool zconf_endtoken(int token, int starttoken, int endtoken);
-+
-+struct symbol *symbol_hash[257];
-+
-+static struct menu *current_menu, *current_entry;
-+
-+#define YYERROR_VERBOSE
-+
-+
-+/* Enabling traces.  */
-+#ifndef YYDEBUG
-+# define YYDEBUG 0
-+#endif
-+
-+/* Enabling verbose error messages.  */
-+#ifdef YYERROR_VERBOSE
-+# undef YYERROR_VERBOSE
-+# define YYERROR_VERBOSE 1
-+#else
-+# define YYERROR_VERBOSE 0
-+#endif
-+
-+#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED)
-+
-+typedef union YYSTYPE {
-+	int token;
-+	char *string;
-+	struct symbol *symbol;
-+	struct expr *expr;
-+	struct menu *menu;
-+} YYSTYPE;
-+/* Line 191 of yacc.c.  */
-+
-+# define yystype YYSTYPE /* obsolescent; will be withdrawn */
-+# define YYSTYPE_IS_DECLARED 1
-+# define YYSTYPE_IS_TRIVIAL 1
-+#endif
-+
-+
-+
-+/* Copy the second part of user declarations.  */
-+
-+
-+#define LKC_DIRECT_LINK
-+#include "lkc.h"
-+
-+
-+/* Line 214 of yacc.c.  */
-+
-+
-+#if ! defined (yyoverflow) || YYERROR_VERBOSE
-+
-+/* The parser invokes alloca or malloc; define the necessary symbols.  */
-+
-+# if YYSTACK_USE_ALLOCA
-+#  define YYSTACK_ALLOC alloca
-+# else
-+#  ifndef YYSTACK_USE_ALLOCA
-+#   if defined (alloca) || defined (_ALLOCA_H)
-+#    define YYSTACK_ALLOC alloca
-+#   else
-+#    ifdef __GNUC__
-+#     define YYSTACK_ALLOC __builtin_alloca
-+#    endif
-+#   endif
-+#  endif
-+# endif
-+
-+# ifdef YYSTACK_ALLOC
-+   /* Pacify GCC's `empty if-body' warning. */
-+#  define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
-+# else
-+#  if defined (__STDC__) || defined (__cplusplus)
-+#   include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-+#   define YYSIZE_T size_t
-+#  endif
-+#  define YYSTACK_ALLOC malloc
-+#  define YYSTACK_FREE free
-+# endif
-+#endif /* ! defined (yyoverflow) || YYERROR_VERBOSE */
-+
-+
-+#if (! defined (yyoverflow) \
-+     && (! defined (__cplusplus) \
-+	 || (YYSTYPE_IS_TRIVIAL)))
-+
-+/* A type that is properly aligned for any stack member.  */
-+union yyalloc
-+{
-+  short yyss;
-+  YYSTYPE yyvs;
-+  };
-+
-+/* The size of the maximum gap between one aligned stack and the next.  */
-+# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
-+
-+/* The size of an array large to enough to hold all stacks, each with
-+   N elements.  */
-+# define YYSTACK_BYTES(N) \
-+     ((N) * (sizeof (short) + sizeof (YYSTYPE))				\
-+      + YYSTACK_GAP_MAXIMUM)
-+
-+/* Copy COUNT objects from FROM to TO.  The source and destination do
-+   not overlap.  */
-+# ifndef YYCOPY
-+#  if 1 < __GNUC__
-+#   define YYCOPY(To, From, Count) \
-+      __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
-+#  else
-+#   define YYCOPY(To, From, Count)		\
-+      do					\
-+	{					\
-+	  register YYSIZE_T yyi;		\
-+	  for (yyi = 0; yyi < (Count); yyi++)	\
-+	    (To)[yyi] = (From)[yyi];		\
-+	}					\
-+      while (0)
-+#  endif
-+# endif
-+
-+/* Relocate STACK from its old location to the new one.  The
-+   local variables YYSIZE and YYSTACKSIZE give the old and new number of
-+   elements in the stack, and YYPTR gives the new location of the
-+   stack.  Advance YYPTR to a properly aligned location for the next
-+   stack.  */
-+# define YYSTACK_RELOCATE(Stack)					\
-+    do									\
-+      {									\
-+	YYSIZE_T yynewbytes;						\
-+	YYCOPY (&yyptr->Stack, Stack, yysize);				\
-+	Stack = &yyptr->Stack;						\
-+	yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
-+	yyptr += yynewbytes / sizeof (*yyptr);				\
-+      }									\
-+    while (0)
-+
-+#endif
-+
-+#if defined (__STDC__) || defined (__cplusplus)
-+   typedef signed char yysigned_char;
-+#else
-+   typedef short yysigned_char;
-+#endif
-+
-+/* YYFINAL -- State number of the termination state. */
-+#define YYFINAL  2
-+/* YYLAST -- Last index in YYTABLE.  */
-+#define YYLAST   201
-+
-+/* YYNTOKENS -- Number of terminals. */
-+#define YYNTOKENS  42
-+/* YYNNTS -- Number of nonterminals. */
-+#define YYNNTS  41
-+/* YYNRULES -- Number of rules. */
-+#define YYNRULES  104
-+/* YYNRULES -- Number of states. */
-+#define YYNSTATES  182
-+
-+/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
-+#define YYUNDEFTOK  2
-+#define YYMAXUTOK   296
-+
-+#define YYTRANSLATE(YYX) 						\
-+  ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
-+
-+/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX.  */
-+static const unsigned char yytranslate[] =
-+{
-+       0,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-+       2,     2,     2,     2,     2,     2,     1,     2,     3,     4,
-+       5,     6,     7,     8,     9,    10,    11,    12,    13,    14,
-+      15,    16,    17,    18,    19,    20,    21,    22,    23,    24,
-+      25,    26,    27,    28,    29,    30,    31,    32,    33,    34,
-+      35,    36,    37,    38,    39,    40,    41
-+};
-+
-+#if YYDEBUG
-+/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
-+   YYRHS.  */
-+static const unsigned short yyprhs[] =
-+{
-+       0,     0,     3,     4,     7,     9,    11,    13,    17,    19,
-+      21,    23,    26,    28,    30,    32,    34,    36,    38,    42,
-+      45,    49,    52,    53,    56,    59,    62,    65,    69,    74,
-+      78,    83,    87,    91,    95,   100,   105,   110,   116,   119,
-+     122,   124,   128,   131,   132,   135,   138,   141,   144,   149,
-+     153,   157,   160,   165,   166,   169,   173,   175,   179,   182,
-+     183,   186,   189,   192,   196,   199,   201,   205,   208,   209,
-+     212,   215,   218,   222,   226,   228,   232,   235,   238,   241,
-+     242,   245,   248,   253,   257,   261,   262,   265,   267,   269,
-+     272,   275,   278,   280,   282,   283,   286,   288,   292,   296,
-+     300,   303,   307,   311,   313
-+};
-+
-+/* YYRHS -- A `-1'-separated list of the rules' RHS. */
-+static const yysigned_char yyrhs[] =
-+{
-+      43,     0,    -1,    -1,    43,    44,    -1,    45,    -1,    55,
-+      -1,    66,    -1,     3,    77,    79,    -1,     5,    -1,    15,
-+      -1,     8,    -1,     1,    79,    -1,    61,    -1,    71,    -1,
-+      47,    -1,    49,    -1,    69,    -1,    79,    -1,    10,    28,
-+      32,    -1,    46,    50,    -1,    11,    28,    32,    -1,    48,
-+      50,    -1,    -1,    50,    51,    -1,    50,    75,    -1,    50,
-+      73,    -1,    50,    32,    -1,    21,    76,    32,    -1,    22,
-+      81,    80,    32,    -1,    23,    76,    32,    -1,    24,    81,
-+      80,    32,    -1,    26,    76,    32,    -1,    27,    76,    32,
-+      -1,    25,    76,    32,    -1,    19,    77,    80,    32,    -1,
-+      20,    81,    80,    32,    -1,    36,    28,    80,    32,    -1,
-+      37,    82,    82,    80,    32,    -1,     7,    32,    -1,    52,
-+      56,    -1,    78,    -1,    53,    58,    54,    -1,    53,    58,
-+      -1,    -1,    56,    57,    -1,    56,    75,    -1,    56,    73,
-+      -1,    56,    32,    -1,    19,    77,    80,    32,    -1,    21,
-+      76,    32,    -1,    23,    76,    32,    -1,    18,    32,    -1,
-+      20,    28,    80,    32,    -1,    -1,    58,    45,    -1,    14,
-+      81,    32,    -1,    78,    -1,    59,    62,    60,    -1,    59,
-+      62,    -1,    -1,    62,    45,    -1,    62,    66,    -1,    62,
-+      55,    -1,     4,    77,    32,    -1,    63,    74,    -1,    78,
-+      -1,    64,    67,    65,    -1,    64,    67,    -1,    -1,    67,
-+      45,    -1,    67,    66,    -1,    67,    55,    -1,    67,     1,
-+      32,    -1,     6,    77,    32,    -1,    68,    -1,     9,    77,
-+      32,    -1,    70,    74,    -1,    12,    32,    -1,    72,    13,
-+      -1,    -1,    74,    75,    -1,    74,    32,    -1,    16,    35,
-+      81,    32,    -1,    16,    81,    32,    -1,    17,    81,    32,
-+      -1,    -1,    77,    80,    -1,    28,    -1,    29,    -1,     5,
-+      79,    -1,     8,    79,    -1,    15,    79,    -1,    32,    -1,
-+      31,    -1,    -1,    14,    81,    -1,    82,    -1,    82,    40,
-+      82,    -1,    82,    30,    82,    -1,    34,    81,    33,    -1,
-+      41,    81,    -1,    81,    38,    81,    -1,    81,    39,    81,
-+      -1,    28,    -1,    29,    -1
-+};
-+
-+/* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
-+static const unsigned short yyrline[] =
-+{
-+       0,    94,    94,    95,    98,    99,   100,   101,   102,   103,
-+     104,   105,   109,   110,   111,   112,   113,   114,   120,   128,
-+     134,   142,   152,   154,   155,   156,   157,   160,   166,   173,
-+     179,   186,   192,   198,   204,   210,   216,   222,   230,   239,
-+     245,   254,   255,   261,   263,   264,   265,   266,   269,   275,
-+     281,   287,   293,   299,   301,   306,   315,   324,   325,   331,
-+     333,   334,   335,   340,   347,   353,   362,   363,   369,   371,
-+     372,   373,   374,   377,   383,   390,   397,   404,   410,   417,
-+     418,   419,   422,   427,   432,   440,   442,   447,   448,   451,
-+     452,   453,   457,   457,   459,   460,   463,   464,   465,   466,
-+     467,   468,   469,   472,   473
-+};
-+#endif
-+
-+#if YYDEBUG || YYERROR_VERBOSE
-+/* YYTNME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
-+   First, the terminals, then, starting at YYNTOKENS, nonterminals. */
-+static const char *const yytname[] =
-+{
-+  "$end", "error", "$undefined", "T_MAINMENU", "T_MENU", "T_ENDMENU", 
-+  "T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG", 
-+  "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS", 
-+  "T_REQUIRES", "T_OPTIONAL", "T_PROMPT", "T_DEFAULT", "T_TRISTATE", 
-+  "T_DEF_TRISTATE", "T_BOOLEAN", "T_DEF_BOOLEAN", "T_STRING", "T_INT", 
-+  "T_HEX", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL", "T_EOF", "T_EOL", 
-+  "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_ON", "T_SELECT", "T_RANGE", "T_OR", 
-+  "T_AND", "T_EQUAL", "T_NOT", "$accept", "input", "block", 
-+  "common_block", "config_entry_start", "config_stmt", 
-+  "menuconfig_entry_start", "menuconfig_stmt", "config_option_list", 
-+  "config_option", "choice", "choice_entry", "choice_end", "choice_stmt", 
-+  "choice_option_list", "choice_option", "choice_block", "if", "if_end", 
-+  "if_stmt", "if_block", "menu", "menu_entry", "menu_end", "menu_stmt", 
-+  "menu_block", "source", "source_stmt", "comment", "comment_stmt", 
-+  "help_start", "help", "depends_list", "depends", "prompt_stmt_opt", 
-+  "prompt", "end", "nl_or_eof", "if_expr", "expr", "symbol", 0
-+};
-+#endif
-+
-+# ifdef YYPRINT
-+/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
-+   token YYLEX-NUM.  */
-+static const unsigned short yytoknum[] =
-+{
-+       0,   256,   257,   258,   259,   260,   261,   262,   263,   264,
-+     265,   266,   267,   268,   269,   270,   271,   272,   273,   274,
-+     275,   276,   277,   278,   279,   280,   281,   282,   283,   284,
-+     285,   286,   287,   288,   289,   290,   291,   292,   293,   294,
-+     295,   296
-+};
-+# endif
-+
-+/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives.  */
-+static const unsigned char yyr1[] =
-+{
-+       0,    42,    43,    43,    44,    44,    44,    44,    44,    44,
-+      44,    44,    45,    45,    45,    45,    45,    45,    46,    47,
-+      48,    49,    50,    50,    50,    50,    50,    51,    51,    51,
-+      51,    51,    51,    51,    51,    51,    51,    51,    52,    53,
-+      54,    55,    55,    56,    56,    56,    56,    56,    57,    57,
-+      57,    57,    57,    58,    58,    59,    60,    61,    61,    62,
-+      62,    62,    62,    63,    64,    65,    66,    66,    67,    67,
-+      67,    67,    67,    68,    69,    70,    71,    72,    73,    74,
-+      74,    74,    75,    75,    75,    76,    76,    77,    77,    78,
-+      78,    78,    79,    79,    80,    80,    81,    81,    81,    81,
-+      81,    81,    81,    82,    82
-+};
-+
-+/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
-+static const unsigned char yyr2[] =
-+{
-+       0,     2,     0,     2,     1,     1,     1,     3,     1,     1,
-+       1,     2,     1,     1,     1,     1,     1,     1,     3,     2,
-+       3,     2,     0,     2,     2,     2,     2,     3,     4,     3,
-+       4,     3,     3,     3,     4,     4,     4,     5,     2,     2,
-+       1,     3,     2,     0,     2,     2,     2,     2,     4,     3,
-+       3,     2,     4,     0,     2,     3,     1,     3,     2,     0,
-+       2,     2,     2,     3,     2,     1,     3,     2,     0,     2,
-+       2,     2,     3,     3,     1,     3,     2,     2,     2,     0,
-+       2,     2,     4,     3,     3,     0,     2,     1,     1,     2,
-+       2,     2,     1,     1,     0,     2,     1,     3,     3,     3,
-+       2,     3,     3,     1,     1
-+};
-+
-+/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
-+   STATE-NUM when YYTABLE doesn't specify something else to do.  Zero
-+   means the default is an error.  */
-+static const unsigned char yydefact[] =
-+{
-+       2,     0,     1,     0,     0,     0,     8,     0,     0,    10,
-+       0,     0,     0,     0,     9,    93,    92,     3,     4,    22,
-+      14,    22,    15,    43,    53,     5,    59,    12,    79,    68,
-+       6,    74,    16,    79,    13,    17,    11,    87,    88,     0,
-+       0,     0,    38,     0,     0,     0,   103,   104,     0,     0,
-+       0,    96,    19,    21,    39,    42,    58,    64,     0,    76,
-+       7,    63,    73,    75,    18,    20,     0,   100,    55,     0,
-+       0,     0,     0,     0,     0,     0,     0,     0,    85,     0,
-+      85,     0,    85,    85,    85,    26,     0,     0,    23,     0,
-+      25,    24,     0,     0,     0,    85,    85,    47,    44,    46,
-+      45,     0,     0,     0,    54,    41,    40,    60,    62,    57,
-+      61,    56,    81,    80,     0,    69,    71,    66,    70,    65,
-+      99,   101,   102,    98,    97,    77,     0,     0,     0,    94,
-+      94,     0,    94,    94,     0,    94,     0,     0,     0,    94,
-+       0,    78,    51,    94,    94,     0,     0,    89,    90,    91,
-+      72,     0,    83,    84,     0,     0,     0,    27,    86,     0,
-+      29,     0,    33,    31,    32,     0,    94,     0,     0,    49,
-+      50,    82,    95,    34,    35,    28,    30,    36,     0,    48,
-+      52,    37
-+};
-+
-+/* YYDEFGOTO[NTERM-NUM]. */
-+static const short yydefgoto[] =
-+{
-+      -1,     1,    17,    18,    19,    20,    21,    22,    52,    88,
-+      23,    24,   105,    25,    54,    98,    55,    26,   109,    27,
-+      56,    28,    29,   117,    30,    58,    31,    32,    33,    34,
-+      89,    90,    57,    91,   131,   132,   106,    35,   155,    50,
-+      51
-+};
-+
-+/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
-+   STATE-NUM.  */
-+#define YYPACT_NINF -99
-+static const short yypact[] =
-+{
-+     -99,    48,   -99,    38,    46,    46,   -99,    46,   -29,   -99,
-+      46,   -17,    -3,   -11,   -99,   -99,   -99,   -99,   -99,   -99,
-+     -99,   -99,   -99,   -99,   -99,   -99,   -99,   -99,   -99,   -99,
-+     -99,   -99,   -99,   -99,   -99,   -99,   -99,   -99,   -99,    38,
-+      12,    15,   -99,    18,    51,    62,   -99,   -99,   -11,   -11,
-+       4,   -24,   138,   138,   160,   121,   110,    -4,    81,    -4,
-+     -99,   -99,   -99,   -99,   -99,   -99,   -19,   -99,   -99,   -11,
-+     -11,    70,    70,    73,    32,   -11,    46,   -11,    46,   -11,
-+      46,   -11,    46,    46,    46,   -99,    36,    70,   -99,    95,
-+     -99,   -99,    96,    46,   106,    46,    46,   -99,   -99,   -99,
-+     -99,    38,    38,    38,   -99,   -99,   -99,   -99,   -99,   -99,
-+     -99,   -99,   -99,   -99,   112,   -99,   -99,   -99,   -99,   -99,
-+     -99,   117,   -99,   -99,   -99,   -99,   -11,    33,    65,   131,
-+       1,   119,   131,     1,   136,     1,   153,   154,   155,   131,
-+      70,   -99,   -99,   131,   131,   156,   157,   -99,   -99,   -99,
-+     -99,   101,   -99,   -99,   -11,   158,   159,   -99,   -99,   161,
-+     -99,   162,   -99,   -99,   -99,   163,   131,   164,   165,   -99,
-+     -99,   -99,    99,   -99,   -99,   -99,   -99,   -99,   166,   -99,
-+     -99,   -99
-+};
-+
-+/* YYPGOTO[NTERM-NUM].  */
-+static const short yypgoto[] =
-+{
-+     -99,   -99,   -99,   111,   -99,   -99,   -99,   -99,   178,   -99,
-+     -99,   -99,   -99,    91,   -99,   -99,   -99,   -99,   -99,   -99,
-+     -99,   -99,   -99,   -99,   115,   -99,   -99,   -99,   -99,   -99,
-+     -99,   146,   168,    89,    27,     0,   126,    -1,   -98,   -48,
-+     -63
-+};
-+
-+/* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
-+   positive, shift that token.  If negative, reduce the rule which
-+   number is the opposite.  If zero, do what YYDEFACT says.
-+   If YYTABLE_NINF, syntax error.  */
-+#define YYTABLE_NINF -68
-+static const short yytable[] =
-+{
-+      66,    67,    36,    42,    39,    40,    71,    41,   123,   124,
-+      43,    44,    74,    75,   120,   154,    72,    46,    47,    69,
-+      70,   121,   122,    48,   140,    45,   127,   128,   112,   130,
-+      49,   133,   156,   135,   158,   159,    68,   161,    60,    69,
-+      70,   165,    69,    70,    61,   167,   168,    62,     2,     3,
-+      63,     4,     5,     6,     7,     8,     9,    10,    11,    12,
-+      46,    47,    13,    14,   139,   152,    48,   126,   178,    15,
-+      16,    69,    70,    49,    37,    38,   129,   166,   151,    15,
-+      16,   -67,   114,    64,   -67,     5,   101,     7,     8,   102,
-+      10,    11,    12,   143,    65,    13,   103,   153,    46,    47,
-+     147,   148,   149,    69,    70,   125,   172,   134,   141,   136,
-+     137,   138,    15,    16,     5,   101,     7,     8,   102,    10,
-+      11,    12,   145,   146,    13,   103,   101,     7,   142,   102,
-+      10,    11,    12,   171,   144,    13,   103,    69,    70,    69,
-+      70,    15,    16,   100,   150,   154,   113,   108,   113,   116,
-+      73,   157,    15,    16,    74,    75,    70,    76,    77,    78,
-+      79,    80,    81,    82,    83,    84,   104,   107,   160,   115,
-+      85,   110,    73,   118,    86,    87,    74,    75,    92,    93,
-+      94,    95,   111,    96,   119,   162,   163,   164,   169,   170,
-+     173,   174,    97,   175,   176,   177,   179,   180,   181,    53,
-+      99,    59
-+};
-+
-+static const unsigned char yycheck[] =
-+{
-+      48,    49,     3,    32,     4,     5,    30,     7,    71,    72,
-+      10,    28,    16,    17,    33,    14,    40,    28,    29,    38,
-+      39,    69,    70,    34,    87,    28,    74,    75,    32,    77,
-+      41,    79,   130,    81,   132,   133,    32,   135,    39,    38,
-+      39,   139,    38,    39,    32,   143,   144,    32,     0,     1,
-+      32,     3,     4,     5,     6,     7,     8,     9,    10,    11,
-+      28,    29,    14,    15,    28,    32,    34,    35,   166,    31,
-+      32,    38,    39,    41,    28,    29,    76,   140,   126,    31,
-+      32,     0,     1,    32,     3,     4,     5,     6,     7,     8,
-+       9,    10,    11,    93,    32,    14,    15,    32,    28,    29,
-+     101,   102,   103,    38,    39,    32,   154,    80,    13,    82,
-+      83,    84,    31,    32,     4,     5,     6,     7,     8,     9,
-+      10,    11,    95,    96,    14,    15,     5,     6,    32,     8,
-+       9,    10,    11,    32,    28,    14,    15,    38,    39,    38,
-+      39,    31,    32,    54,    32,    14,    57,    56,    59,    58,
-+      12,    32,    31,    32,    16,    17,    39,    19,    20,    21,
-+      22,    23,    24,    25,    26,    27,    55,    56,    32,    58,
-+      32,    56,    12,    58,    36,    37,    16,    17,    18,    19,
-+      20,    21,    56,    23,    58,    32,    32,    32,    32,    32,
-+      32,    32,    32,    32,    32,    32,    32,    32,    32,    21,
-+      54,    33
-+};
-+
-+/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
-+   symbol of state STATE-NUM.  */
-+static const unsigned char yystos[] =
-+{
-+       0,    43,     0,     1,     3,     4,     5,     6,     7,     8,
-+       9,    10,    11,    14,    15,    31,    32,    44,    45,    46,
-+      47,    48,    49,    52,    53,    55,    59,    61,    63,    64,
-+      66,    68,    69,    70,    71,    79,    79,    28,    29,    77,
-+      77,    77,    32,    77,    28,    28,    28,    29,    34,    41,
-+      81,    82,    50,    50,    56,    58,    62,    74,    67,    74,
-+      79,    32,    32,    32,    32,    32,    81,    81,    32,    38,
-+      39,    30,    40,    12,    16,    17,    19,    20,    21,    22,
-+      23,    24,    25,    26,    27,    32,    36,    37,    51,    72,
-+      73,    75,    18,    19,    20,    21,    23,    32,    57,    73,
-+      75,     5,     8,    15,    45,    54,    78,    45,    55,    60,
-+      66,    78,    32,    75,     1,    45,    55,    65,    66,    78,
-+      33,    81,    81,    82,    82,    32,    35,    81,    81,    77,
-+      81,    76,    77,    81,    76,    81,    76,    76,    76,    28,
-+      82,    13,    32,    77,    28,    76,    76,    79,    79,    79,
-+      32,    81,    32,    32,    14,    80,    80,    32,    80,    80,
-+      32,    80,    32,    32,    32,    80,    82,    80,    80,    32,
-+      32,    32,    81,    32,    32,    32,    32,    32,    80,    32,
-+      32,    32
-+};
-+
-+#if ! defined (YYSIZE_T) && defined (__SIZE_TYPE__)
-+# define YYSIZE_T __SIZE_TYPE__
-+#endif
-+#if ! defined (YYSIZE_T) && defined (size_t)
-+# define YYSIZE_T size_t
-+#endif
-+#if ! defined (YYSIZE_T)
-+# if defined (__STDC__) || defined (__cplusplus)
-+#  include <stddef.h> /* INFRINGES ON USER NAME SPACE */
-+#  define YYSIZE_T size_t
-+# endif
-+#endif
-+#if ! defined (YYSIZE_T)
-+# define YYSIZE_T unsigned int
-+#endif
-+
-+#define yyerrok		(yyerrstatus = 0)
-+#define yyclearin	(yychar = YYEMPTY)
-+#define YYEMPTY		(-2)
-+#define YYEOF		0
-+
-+#define YYACCEPT	goto yyacceptlab
-+#define YYABORT		goto yyabortlab
-+#define YYERROR		goto yyerrlab1
-+
-+
-+/* Like YYERROR except do call yyerror.  This remains here temporarily
-+   to ease the transition to the new meaning of YYERROR, for GCC.
-+   Once GCC version 2 has supplanted version 1, this can go.  */
-+
-+#define YYFAIL		goto yyerrlab
-+
-+#define YYRECOVERING()  (!!yyerrstatus)
-+
-+#define YYBACKUP(Token, Value)					\
-+do								\
-+  if (yychar == YYEMPTY && yylen == 1)				\
-+    {								\
-+      yychar = (Token);						\
-+      yylval = (Value);						\
-+      yytoken = YYTRANSLATE (yychar);				\
-+      YYPOPSTACK;						\
-+      goto yybackup;						\
-+    }								\
-+  else								\
-+    { 								\
-+      yyerror ("syntax error: cannot back up");\
-+      YYERROR;							\
-+    }								\
-+while (0)
-+
-+#define YYTERROR	1
-+#define YYERRCODE	256
-+
-+/* YYLLOC_DEFAULT -- Compute the default location (before the actions
-+   are run).  */
-+
-+#ifndef YYLLOC_DEFAULT
-+# define YYLLOC_DEFAULT(Current, Rhs, N)         \
-+  Current.first_line   = Rhs[1].first_line;      \
-+  Current.first_column = Rhs[1].first_column;    \
-+  Current.last_line    = Rhs[N].last_line;       \
-+  Current.last_column  = Rhs[N].last_column;
-+#endif
-+
-+/* YYLEX -- calling `yylex' with the right arguments.  */
-+
-+#ifdef YYLEX_PARAM
-+# define YYLEX yylex (YYLEX_PARAM)
-+#else
-+# define YYLEX yylex ()
-+#endif
-+
-+/* Enable debugging if requested.  */
-+#if YYDEBUG
-+
-+# ifndef YYFPRINTF
-+#  include <stdio.h> /* INFRINGES ON USER NAME SPACE */
-+#  define YYFPRINTF fprintf
-+# endif
-+
-+# define YYDPRINTF(Args)			\
-+do {						\
-+  if (yydebug)					\
-+    YYFPRINTF Args;				\
-+} while (0)
-+
-+# define YYDSYMPRINT(Args)			\
-+do {						\
-+  if (yydebug)					\
-+    yysymprint Args;				\
-+} while (0)
-+
-+# define YYDSYMPRINTF(Title, Token, Value, Location)		\
-+do {								\
-+  if (yydebug)							\
-+    {								\
-+      YYFPRINTF (stderr, "%s ", Title);				\
-+      yysymprint (stderr, 					\
-+                  Token, Value);	\
-+      YYFPRINTF (stderr, "\n");					\
-+    }								\
-+} while (0)
-+
-+/*------------------------------------------------------------------.
-+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
-+| TOP (cinluded).                                                   |
-+`------------------------------------------------------------------*/
-+
-+#if defined (__STDC__) || defined (__cplusplus)
-+static void
-+yy_stack_print (short *bottom, short *top)
-+#else
-+static void
-+yy_stack_print (bottom, top)
-+    short *bottom;
-+    short *top;
-+#endif
-+{
-+  YYFPRINTF (stderr, "Stack now");
-+  for (/* Nothing. */; bottom <= top; ++bottom)
-+    YYFPRINTF (stderr, " %d", *bottom);
-+  YYFPRINTF (stderr, "\n");
-+}
-+
-+# define YY_STACK_PRINT(Bottom, Top)				\
-+do {								\
-+  if (yydebug)							\
-+    yy_stack_print ((Bottom), (Top));				\
-+} while (0)
-+
-+
-+/*------------------------------------------------.
-+| Report that the YYRULE is going to be reduced.  |
-+`------------------------------------------------*/
-+
-+#if defined (__STDC__) || defined (__cplusplus)
-+static void
-+yy_reduce_print (int yyrule)
-+#else
-+static void
-+yy_reduce_print (yyrule)
-+    int yyrule;
-+#endif
-+{
-+  int yyi;
-+  unsigned int yylineno = yyrline[yyrule];
-+  YYFPRINTF (stderr, "Reducing stack by rule %d (line %u), ",
-+             yyrule - 1, yylineno);
-+  /* Print the symbols being reduced, and their result.  */
-+  for (yyi = yyprhs[yyrule]; 0 <= yyrhs[yyi]; yyi++)
-+    YYFPRINTF (stderr, "%s ", yytname [yyrhs[yyi]]);
-+  YYFPRINTF (stderr, "-> %s\n", yytname [yyr1[yyrule]]);
-+}
-+
-+# define YY_REDUCE_PRINT(Rule)		\
-+do {					\
-+  if (yydebug)				\
-+    yy_reduce_print (Rule);		\
-+} while (0)
-+
-+/* Nonzero means print parse trace.  It is left uninitialized so that
-+   multiple parsers can coexist.  */
-+int yydebug;
-+#else /* !YYDEBUG */
-+# define YYDPRINTF(Args)
-+# define YYDSYMPRINT(Args)
-+# define YYDSYMPRINTF(Title, Token, Value, Location)
-+# define YY_STACK_PRINT(Bottom, Top)
-+# define YY_REDUCE_PRINT(Rule)
-+#endif /* !YYDEBUG */
-+
-+
-+/* YYINITDEPTH -- initial size of the parser's stacks.  */
-+#ifndef	YYINITDEPTH
-+# define YYINITDEPTH 200
-+#endif
-+
-+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
-+   if the built-in stack extension method is used).
-+
-+   Do not make this value too large; the results are undefined if
-+   SIZE_MAX < YYSTACK_BYTES (YYMAXDEPTH)
-+   evaluated with infinite-precision integer arithmetic.  */
-+
-+#if YYMAXDEPTH == 0
-+# undef YYMAXDEPTH
-+#endif
-+
-+#ifndef YYMAXDEPTH
-+# define YYMAXDEPTH 10000
-+#endif
-+
-+
-+
-+#if YYERROR_VERBOSE
-+
-+# ifndef yystrlen
-+#  if defined (__GLIBC__) && defined (_STRING_H)
-+#   define yystrlen strlen
-+#  else
-+/* Return the length of YYSTR.  */
-+static YYSIZE_T
-+#   if defined (__STDC__) || defined (__cplusplus)
-+yystrlen (const char *yystr)
-+#   else
-+yystrlen (yystr)
-+     const char *yystr;
-+#   endif
-+{
-+  register const char *yys = yystr;
-+
-+  while (*yys++ != '\0')
-+    continue;
-+
-+  return yys - yystr - 1;
-+}
-+#  endif
-+# endif
-+
-+# ifndef yystpcpy
-+#  if defined (__GLIBC__) && defined (_STRING_H) && defined (_GNU_SOURCE)
-+#   define yystpcpy stpcpy
-+#  else
-+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
-+   YYDEST.  */
-+static char *
-+#   if defined (__STDC__) || defined (__cplusplus)
-+yystpcpy (char *yydest, const char *yysrc)
-+#   else
-+yystpcpy (yydest, yysrc)
-+     char *yydest;
-+     const char *yysrc;
-+#   endif
-+{
-+  register char *yyd = yydest;
-+  register const char *yys = yysrc;
-+
-+  while ((*yyd++ = *yys++) != '\0')
-+    continue;
-+
-+  return yyd - 1;
-+}
-+#  endif
-+# endif
-+
-+#endif /* !YYERROR_VERBOSE */
-+
-+
-+
-+#if YYDEBUG
-+/*--------------------------------.
-+| Print this symbol on YYOUTPUT.  |
-+`--------------------------------*/
-+
-+#if defined (__STDC__) || defined (__cplusplus)
-+static void
-+yysymprint (FILE *yyoutput, int yytype, YYSTYPE *yyvaluep)
-+#else
-+static void
-+yysymprint (yyoutput, yytype, yyvaluep)
-+    FILE *yyoutput;
-+    int yytype;
-+    YYSTYPE *yyvaluep;
-+#endif
-+{
-+  /* Pacify ``unused variable'' warnings.  */
-+  (void) yyvaluep;
-+
-+  if (yytype < YYNTOKENS)
-+    {
-+      YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
-+# ifdef YYPRINT
-+      YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
-+# endif
-+    }
-+  else
-+    YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
-+
-+  switch (yytype)
-+    {
-+      default:
-+        break;
-+    }
-+  YYFPRINTF (yyoutput, ")");
-+}
-+
-+#endif /* ! YYDEBUG */
-+/*-----------------------------------------------.
-+| Release the memory associated to this symbol.  |
-+`-----------------------------------------------*/
-+
-+#if defined (__STDC__) || defined (__cplusplus)
-+static void
-+yydestruct (int yytype, YYSTYPE *yyvaluep)
-+#else
-+static void
-+yydestruct (yytype, yyvaluep)
-+    int yytype;
-+    YYSTYPE *yyvaluep;
-+#endif
-+{
-+  /* Pacify ``unused variable'' warnings.  */
-+  (void) yyvaluep;
-+
-+  switch (yytype)
-+    {
-+
-+      default:
-+        break;
-+    }
-+}
-+
-+
-+/* Prevent warnings from -Wmissing-prototypes.  */
-+
-+#ifdef YYPARSE_PARAM
-+# if defined (__STDC__) || defined (__cplusplus)
-+int yyparse (void *YYPARSE_PARAM);
-+# else
-+int yyparse ();
-+# endif
-+#else /* ! YYPARSE_PARAM */
-+#if defined (__STDC__) || defined (__cplusplus)
-+int yyparse (void);
-+#else
-+int yyparse ();
-+#endif
-+#endif /* ! YYPARSE_PARAM */
-+
-+
-+
-+/* The lookahead symbol.  */
-+int yychar;
-+
-+/* The semantic value of the lookahead symbol.  */
-+YYSTYPE yylval;
-+
-+/* Number of syntax errors so far.  */
-+int yynerrs;
-+
-+
-+
-+/*----------.
-+| yyparse.  |
-+`----------*/
-+
-+#ifdef YYPARSE_PARAM
-+# if defined (__STDC__) || defined (__cplusplus)
-+int yyparse (void *YYPARSE_PARAM)
-+# else
-+int yyparse (YYPARSE_PARAM)
-+  void *YYPARSE_PARAM;
-+# endif
-+#else /* ! YYPARSE_PARAM */
-+#if defined (__STDC__) || defined (__cplusplus)
-+int
-+yyparse (void)
-+#else
-+int
-+yyparse ()
-+
-+#endif
-+#endif
-+{
-+  
-+  register int yystate;
-+  register int yyn;
-+  int yyresult;
-+  /* Number of tokens to shift before error messages enabled.  */
-+  int yyerrstatus;
-+  /* Lookahead token as an internal (translated) token number.  */
-+  int yytoken = 0;
-+
-+  /* Three stacks and their tools:
-+     `yyss': related to states,
-+     `yyvs': related to semantic values,
-+     `yyls': related to locations.
-+
-+     Refer to the stacks thru separate pointers, to allow yyoverflow
-+     to reallocate them elsewhere.  */
-+
-+  /* The state stack.  */
-+  short	yyssa[YYINITDEPTH];
-+  short *yyss = yyssa;
-+  register short *yyssp;
-+
-+  /* The semantic value stack.  */
-+  YYSTYPE yyvsa[YYINITDEPTH];
-+  YYSTYPE *yyvs = yyvsa;
-+  register YYSTYPE *yyvsp;
-+
-+
-+
-+#define YYPOPSTACK   (yyvsp--, yyssp--)
-+
-+  YYSIZE_T yystacksize = YYINITDEPTH;
-+
-+  /* The variables used to return semantic value and location from the
-+     action routines.  */
-+  YYSTYPE yyval;
-+
-+
-+  /* When reducing, the number of symbols on the RHS of the reduced
-+     rule.  */
-+  int yylen;
-+
-+  YYDPRINTF ((stderr, "Starting parse\n"));
-+
-+  yystate = 0;
-+  yyerrstatus = 0;
-+  yynerrs = 0;
-+  yychar = YYEMPTY;		/* Cause a token to be read.  */
-+
-+  /* Initialize stack pointers.
-+     Waste one element of value and location stack
-+     so that they stay on the same level as the state stack.
-+     The wasted elements are never initialized.  */
-+
-+  yyssp = yyss;
-+  yyvsp = yyvs;
-+
-+  goto yysetstate;
-+
-+/*------------------------------------------------------------.
-+| yynewstate -- Push a new state, which is found in yystate.  |
-+`------------------------------------------------------------*/
-+ yynewstate:
-+  /* In all cases, when you get here, the value and location stacks
-+     have just been pushed. so pushing a state here evens the stacks.
-+     */
-+  yyssp++;
-+
-+ yysetstate:
-+  *yyssp = yystate;
-+
-+  if (yyss + yystacksize - 1 <= yyssp)
-+    {
-+      /* Get the current used size of the three stacks, in elements.  */
-+      YYSIZE_T yysize = yyssp - yyss + 1;
-+
-+#ifdef yyoverflow
-+      {
-+	/* Give user a chance to reallocate the stack. Use copies of
-+	   these so that the &'s don't force the real ones into
-+	   memory.  */
-+	YYSTYPE *yyvs1 = yyvs;
-+	short *yyss1 = yyss;
-+
-+
-+	/* Each stack pointer address is followed by the size of the
-+	   data in use in that stack, in bytes.  This used to be a
-+	   conditional around just the two extra args, but that might
-+	   be undefined if yyoverflow is a macro.  */
-+	yyoverflow ("parser stack overflow",
-+		    &yyss1, yysize * sizeof (*yyssp),
-+		    &yyvs1, yysize * sizeof (*yyvsp),
-+
-+		    &yystacksize);
-+
-+	yyss = yyss1;
-+	yyvs = yyvs1;
-+      }
-+#else /* no yyoverflow */
-+# ifndef YYSTACK_RELOCATE
-+      goto yyoverflowlab;
-+# else
-+      /* Extend the stack our own way.  */
-+      if (YYMAXDEPTH <= yystacksize)
-+	goto yyoverflowlab;
-+      yystacksize *= 2;
-+      if (YYMAXDEPTH < yystacksize)
-+	yystacksize = YYMAXDEPTH;
-+
-+      {
-+	short *yyss1 = yyss;
-+	union yyalloc *yyptr =
-+	  (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
-+	if (! yyptr)
-+	  goto yyoverflowlab;
-+	YYSTACK_RELOCATE (yyss);
-+	YYSTACK_RELOCATE (yyvs);
-+
-+#  undef YYSTACK_RELOCATE
-+	if (yyss1 != yyssa)
-+	  YYSTACK_FREE (yyss1);
-+      }
-+# endif
-+#endif /* no yyoverflow */
-+
-+      yyssp = yyss + yysize - 1;
-+      yyvsp = yyvs + yysize - 1;
-+
-+
-+      YYDPRINTF ((stderr, "Stack size increased to %lu\n",
-+		  (unsigned long int) yystacksize));
-+
-+      if (yyss + yystacksize - 1 <= yyssp)
-+	YYABORT;
-+    }
-+
-+  YYDPRINTF ((stderr, "Entering state %d\n", yystate));
-+
-+  goto yybackup;
-+
-+/*-----------.
-+| yybackup.  |
-+`-----------*/
-+yybackup:
-+
-+/* Do appropriate processing given the current state.  */
-+/* Read a lookahead token if we need one and don't already have one.  */
-+/* yyresume: */
-+
-+  /* First try to decide what to do without reference to lookahead token.  */
-+
-+  yyn = yypact[yystate];
-+  if (yyn == YYPACT_NINF)
-+    goto yydefault;
-+
-+  /* Not known => get a lookahead token if don't already have one.  */
-+
-+  /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol.  */
-+  if (yychar == YYEMPTY)
-+    {
-+      YYDPRINTF ((stderr, "Reading a token: "));
-+      yychar = YYLEX;
-+    }
-+
-+  if (yychar <= YYEOF)
-+    {
-+      yychar = yytoken = YYEOF;
-+      YYDPRINTF ((stderr, "Now at end of input.\n"));
-+    }
-+  else
-+    {
-+      yytoken = YYTRANSLATE (yychar);
-+      YYDSYMPRINTF ("Next token is", yytoken, &yylval, &yylloc);
-+    }
-+
-+  /* If the proper action on seeing token YYTOKEN is to reduce or to
-+     detect an error, take that action.  */
-+  yyn += yytoken;
-+  if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
-+    goto yydefault;
-+  yyn = yytable[yyn];
-+  if (yyn <= 0)
-+    {
-+      if (yyn == 0 || yyn == YYTABLE_NINF)
-+	goto yyerrlab;
-+      yyn = -yyn;
-+      goto yyreduce;
-+    }
-+
-+  if (yyn == YYFINAL)
-+    YYACCEPT;
-+
-+  /* Shift the lookahead token.  */
-+  YYDPRINTF ((stderr, "Shifting token %s, ", yytname[yytoken]));
-+
-+  /* Discard the token being shifted unless it is eof.  */
-+  if (yychar != YYEOF)
-+    yychar = YYEMPTY;
-+
-+  *++yyvsp = yylval;
-+
-+
-+  /* Count tokens shifted since error; after three, turn off error
-+     status.  */
-+  if (yyerrstatus)
-+    yyerrstatus--;
-+
-+  yystate = yyn;
-+  goto yynewstate;
-+
-+
-+/*-----------------------------------------------------------.
-+| yydefault -- do the default action for the current state.  |
-+`-----------------------------------------------------------*/
-+yydefault:
-+  yyn = yydefact[yystate];
-+  if (yyn == 0)
-+    goto yyerrlab;
-+  goto yyreduce;
-+
-+
-+/*-----------------------------.
-+| yyreduce -- Do a reduction.  |
-+`-----------------------------*/
-+yyreduce:
-+  /* yyn is the number of a rule to reduce with.  */
-+  yylen = yyr2[yyn];
-+
-+  /* If YYLEN is nonzero, implement the default value of the action:
-+     `$$ = $1'.
-+
-+     Otherwise, the following line sets YYVAL to garbage.
-+     This behavior is undocumented and Bison
-+     users should not rely upon it.  Assigning to YYVAL
-+     unconditionally makes the parser a bit smaller, and it avoids a
-+     GCC warning that YYVAL may be used uninitialized.  */
-+  yyval = yyvsp[1-yylen];
-+
-+
-+  YY_REDUCE_PRINT (yyn);
-+  switch (yyn)
-+    {
-+        case 8:
-+
-+    { zconfprint("unexpected 'endmenu' statement"); ;}
-+    break;
-+
-+  case 9:
-+
-+    { zconfprint("unexpected 'endif' statement"); ;}
-+    break;
-+
-+  case 10:
-+
-+    { zconfprint("unexpected 'endchoice' statement"); ;}
-+    break;
-+
-+  case 11:
-+
-+    { zconfprint("syntax error"); yyerrok; ;}
-+    break;
-+
-+  case 18:
-+
-+    {
-+	struct symbol *sym = sym_lookup(yyvsp[-1].string, 0);
-+	sym->flags |= SYMBOL_OPTIONAL;
-+	menu_add_entry(sym);
-+	printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), yyvsp[-1].string);
-+;}
-+    break;
-+
-+  case 19:
-+
-+    {
-+	menu_end_entry();
-+	printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 20:
-+
-+    {
-+	struct symbol *sym = sym_lookup(yyvsp[-1].string, 0);
-+	sym->flags |= SYMBOL_OPTIONAL;
-+	menu_add_entry(sym);
-+	printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), yyvsp[-1].string);
-+;}
-+    break;
-+
-+  case 21:
-+
-+    {
-+	if (current_entry->prompt)
-+		current_entry->prompt->type = P_MENU;
-+	else
-+		zconfprint("warning: menuconfig statement without prompt");
-+	menu_end_entry();
-+	printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 27:
-+
-+    {
-+	menu_set_type(S_TRISTATE);
-+	printd(DEBUG_PARSE, "%s:%d:tristate\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 28:
-+
-+    {
-+	menu_add_expr(P_DEFAULT, yyvsp[-2].expr, yyvsp[-1].expr);
-+	menu_set_type(S_TRISTATE);
-+	printd(DEBUG_PARSE, "%s:%d:def_boolean\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 29:
-+
-+    {
-+	menu_set_type(S_BOOLEAN);
-+	printd(DEBUG_PARSE, "%s:%d:boolean\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 30:
-+
-+    {
-+	menu_add_expr(P_DEFAULT, yyvsp[-2].expr, yyvsp[-1].expr);
-+	menu_set_type(S_BOOLEAN);
-+	printd(DEBUG_PARSE, "%s:%d:def_boolean\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 31:
-+
-+    {
-+	menu_set_type(S_INT);
-+	printd(DEBUG_PARSE, "%s:%d:int\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 32:
-+
-+    {
-+	menu_set_type(S_HEX);
-+	printd(DEBUG_PARSE, "%s:%d:hex\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 33:
-+
-+    {
-+	menu_set_type(S_STRING);
-+	printd(DEBUG_PARSE, "%s:%d:string\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 34:
-+
-+    {
-+	menu_add_prompt(P_PROMPT, yyvsp[-2].string, yyvsp[-1].expr);
-+	printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 35:
-+
-+    {
-+	menu_add_expr(P_DEFAULT, yyvsp[-2].expr, yyvsp[-1].expr);
-+	printd(DEBUG_PARSE, "%s:%d:default\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 36:
-+
-+    {
-+	menu_add_symbol(P_SELECT, sym_lookup(yyvsp[-2].string, 0), yyvsp[-1].expr);
-+	printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 37:
-+
-+    {
-+	menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,yyvsp[-3].symbol, yyvsp[-2].symbol), yyvsp[-1].expr);
-+	printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 38:
-+
-+    {
-+	struct symbol *sym = sym_lookup(NULL, 0);
-+	sym->flags |= SYMBOL_CHOICE;
-+	menu_add_entry(sym);
-+	menu_add_expr(P_CHOICE, NULL, NULL);
-+	printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 39:
-+
-+    {
-+	menu_end_entry();
-+	menu_add_menu();
-+;}
-+    break;
-+
-+  case 40:
-+
-+    {
-+	if (zconf_endtoken(yyvsp[0].token, T_CHOICE, T_ENDCHOICE)) {
-+		menu_end_menu();
-+		printd(DEBUG_PARSE, "%s:%d:endchoice\n", zconf_curname(), zconf_lineno());
-+	}
-+;}
-+    break;
-+
-+  case 42:
-+
-+    {
-+	printf("%s:%d: missing 'endchoice' for this 'choice' statement\n", current_menu->file->name, current_menu->lineno);
-+	zconfnerrs++;
-+;}
-+    break;
-+
-+  case 48:
-+
-+    {
-+	menu_add_prompt(P_PROMPT, yyvsp[-2].string, yyvsp[-1].expr);
-+	printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 49:
-+
-+    {
-+	menu_set_type(S_TRISTATE);
-+	printd(DEBUG_PARSE, "%s:%d:tristate\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 50:
-+
-+    {
-+	menu_set_type(S_BOOLEAN);
-+	printd(DEBUG_PARSE, "%s:%d:boolean\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 51:
-+
-+    {
-+	current_entry->sym->flags |= SYMBOL_OPTIONAL;
-+	printd(DEBUG_PARSE, "%s:%d:optional\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 52:
-+
-+    {
-+	menu_add_symbol(P_DEFAULT, sym_lookup(yyvsp[-2].string, 0), yyvsp[-1].expr);
-+	printd(DEBUG_PARSE, "%s:%d:default\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 55:
-+
-+    {
-+	printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
-+	menu_add_entry(NULL);
-+	menu_add_dep(yyvsp[-1].expr);
-+	menu_end_entry();
-+	menu_add_menu();
-+;}
-+    break;
-+
-+  case 56:
-+
-+    {
-+	if (zconf_endtoken(yyvsp[0].token, T_IF, T_ENDIF)) {
-+		menu_end_menu();
-+		printd(DEBUG_PARSE, "%s:%d:endif\n", zconf_curname(), zconf_lineno());
-+	}
-+;}
-+    break;
-+
-+  case 58:
-+
-+    {
-+	printf("%s:%d: missing 'endif' for this 'if' statement\n", current_menu->file->name, current_menu->lineno);
-+	zconfnerrs++;
-+;}
-+    break;
-+
-+  case 63:
-+
-+    {
-+	menu_add_entry(NULL);
-+	menu_add_prop(P_MENU, yyvsp[-1].string, NULL, NULL);
-+	printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 64:
-+
-+    {
-+	menu_end_entry();
-+	menu_add_menu();
-+;}
-+    break;
-+
-+  case 65:
-+
-+    {
-+	if (zconf_endtoken(yyvsp[0].token, T_MENU, T_ENDMENU)) {
-+		menu_end_menu();
-+		printd(DEBUG_PARSE, "%s:%d:endmenu\n", zconf_curname(), zconf_lineno());
-+	}
-+;}
-+    break;
-+
-+  case 67:
-+
-+    {
-+	printf("%s:%d: missing 'endmenu' for this 'menu' statement\n", current_menu->file->name, current_menu->lineno);
-+	zconfnerrs++;
-+;}
-+    break;
-+
-+  case 72:
-+
-+    { zconfprint("invalid menu option"); yyerrok; ;}
-+    break;
-+
-+  case 73:
-+
-+    {
-+	yyval.string = yyvsp[-1].string;
-+	printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), yyvsp[-1].string);
-+;}
-+    break;
-+
-+  case 74:
-+
-+    {
-+	zconf_nextfile(yyvsp[0].string);
-+;}
-+    break;
-+
-+  case 75:
-+
-+    {
-+	menu_add_entry(NULL);
-+	menu_add_prop(P_COMMENT, yyvsp[-1].string, NULL, NULL);
-+	printd(DEBUG_PARSE, "%s:%d:comment\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 76:
-+
-+    {
-+	menu_end_entry();
-+;}
-+    break;
-+
-+  case 77:
-+
-+    {
-+	printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
-+	zconf_starthelp();
-+;}
-+    break;
-+
-+  case 78:
-+
-+    {
-+	current_entry->sym->help = yyvsp[0].string;
-+;}
-+    break;
-+
-+  case 82:
-+
-+    {
-+	menu_add_dep(yyvsp[-1].expr);
-+	printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 83:
-+
-+    {
-+	menu_add_dep(yyvsp[-1].expr);
-+	printd(DEBUG_PARSE, "%s:%d:depends\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 84:
-+
-+    {
-+	menu_add_dep(yyvsp[-1].expr);
-+	printd(DEBUG_PARSE, "%s:%d:requires\n", zconf_curname(), zconf_lineno());
-+;}
-+    break;
-+
-+  case 86:
-+
-+    {
-+	menu_add_prop(P_PROMPT, yyvsp[-1].string, NULL, yyvsp[0].expr);
-+;}
-+    break;
-+
-+  case 89:
-+
-+    { yyval.token = T_ENDMENU; ;}
-+    break;
-+
-+  case 90:
-+
-+    { yyval.token = T_ENDCHOICE; ;}
-+    break;
-+
-+  case 91:
-+
-+    { yyval.token = T_ENDIF; ;}
-+    break;
-+
-+  case 94:
-+
-+    { yyval.expr = NULL; ;}
-+    break;
-+
-+  case 95:
-+
-+    { yyval.expr = yyvsp[0].expr; ;}
-+    break;
-+
-+  case 96:
-+
-+    { yyval.expr = expr_alloc_symbol(yyvsp[0].symbol); ;}
-+    break;
-+
-+  case 97:
-+
-+    { yyval.expr = expr_alloc_comp(E_EQUAL, yyvsp[-2].symbol, yyvsp[0].symbol); ;}
-+    break;
-+
-+  case 98:
-+
-+    { yyval.expr = expr_alloc_comp(E_UNEQUAL, yyvsp[-2].symbol, yyvsp[0].symbol); ;}
-+    break;
-+
-+  case 99:
-+
-+    { yyval.expr = yyvsp[-1].expr; ;}
-+    break;
-+
-+  case 100:
-+
-+    { yyval.expr = expr_alloc_one(E_NOT, yyvsp[0].expr); ;}
-+    break;
-+
-+  case 101:
-+
-+    { yyval.expr = expr_alloc_two(E_OR, yyvsp[-2].expr, yyvsp[0].expr); ;}
-+    break;
-+
-+  case 102:
-+
-+    { yyval.expr = expr_alloc_two(E_AND, yyvsp[-2].expr, yyvsp[0].expr); ;}
-+    break;
-+
-+  case 103:
-+
-+    { yyval.symbol = sym_lookup(yyvsp[0].string, 0); free(yyvsp[0].string); ;}
-+    break;
-+
-+  case 104:
-+
-+    { yyval.symbol = sym_lookup(yyvsp[0].string, 1); free(yyvsp[0].string); ;}
-+    break;
-+
-+
-+    }
-+
-+/* Line 999 of yacc.c.  */
-+
-+
-+  yyvsp -= yylen;
-+  yyssp -= yylen;
-+
-+
-+  YY_STACK_PRINT (yyss, yyssp);
-+
-+  *++yyvsp = yyval;
-+
-+
-+  /* Now `shift' the result of the reduction.  Determine what state
-+     that goes to, based on the state we popped back to and the rule
-+     number reduced by.  */
-+
-+  yyn = yyr1[yyn];
-+
-+  yystate = yypgoto[yyn - YYNTOKENS] + *yyssp;
-+  if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp)
-+    yystate = yytable[yystate];
-+  else
-+    yystate = yydefgoto[yyn - YYNTOKENS];
-+
-+  goto yynewstate;
-+
-+
-+/*------------------------------------.
-+| yyerrlab -- here on detecting error |
-+`------------------------------------*/
-+yyerrlab:
-+  /* If not already recovering from an error, report this error.  */
-+  if (!yyerrstatus)
-+    {
-+      ++yynerrs;
-+#if YYERROR_VERBOSE
-+      yyn = yypact[yystate];
-+
-+      if (YYPACT_NINF < yyn && yyn < YYLAST)
-+	{
-+	  YYSIZE_T yysize = 0;
-+	  int yytype = YYTRANSLATE (yychar);
-+	  char *yymsg;
-+	  int yyx, yycount;
-+
-+	  yycount = 0;
-+	  /* Start YYX at -YYN if negative to avoid negative indexes in
-+	     YYCHECK.  */
-+	  for (yyx = yyn < 0 ? -yyn : 0;
-+	       yyx < (int) (sizeof (yytname) / sizeof (char *)); yyx++)
-+	    if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
-+	      yysize += yystrlen (yytname[yyx]) + 15, yycount++;
-+	  yysize += yystrlen ("syntax error, unexpected ") + 1;
-+	  yysize += yystrlen (yytname[yytype]);
-+	  yymsg = (char *) YYSTACK_ALLOC (yysize);
-+	  if (yymsg != 0)
-+	    {
-+	      char *yyp = yystpcpy (yymsg, "syntax error, unexpected ");
-+	      yyp = yystpcpy (yyp, yytname[yytype]);
-+
-+	      if (yycount < 5)
-+		{
-+		  yycount = 0;
-+		  for (yyx = yyn < 0 ? -yyn : 0;
-+		       yyx < (int) (sizeof (yytname) / sizeof (char *));
-+		       yyx++)
-+		    if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
-+		      {
-+			const char *yyq = ! yycount ? ", expecting " : " or ";
-+			yyp = yystpcpy (yyp, yyq);
-+			yyp = yystpcpy (yyp, yytname[yyx]);
-+			yycount++;
-+		      }
-+		}
-+	      yyerror (yymsg);
-+	      YYSTACK_FREE (yymsg);
-+	    }
-+	  else
-+	    yyerror ("syntax error; also virtual memory exhausted");
-+	}
-+      else
-+#endif /* YYERROR_VERBOSE */
-+	yyerror ("syntax error");
-+    }
-+
-+
-+
-+  if (yyerrstatus == 3)
-+    {
-+      /* If just tried and failed to reuse lookahead token after an
-+	 error, discard it.  */
-+
-+      /* Return failure if at end of input.  */
-+      if (yychar == YYEOF)
-+        {
-+	  /* Pop the error token.  */
-+          YYPOPSTACK;
-+	  /* Pop the rest of the stack.  */
-+	  while (yyss < yyssp)
-+	    {
-+	      YYDSYMPRINTF ("Error: popping", yystos[*yyssp], yyvsp, yylsp);
-+	      yydestruct (yystos[*yyssp], yyvsp);
-+	      YYPOPSTACK;
-+	    }
-+	  YYABORT;
-+        }
-+
-+      YYDSYMPRINTF ("Error: discarding", yytoken, &yylval, &yylloc);
-+      yydestruct (yytoken, &yylval);
-+      yychar = YYEMPTY;
-+
-+    }
-+
-+  /* Else will try to reuse lookahead token after shifting the error
-+     token.  */
-+  goto yyerrlab1;
-+
-+
-+/*----------------------------------------------------.
-+| yyerrlab1 -- error raised explicitly by an action.  |
-+`----------------------------------------------------*/
-+yyerrlab1:
-+  yyerrstatus = 3;	/* Each real token shifted decrements this.  */
-+
-+  for (;;)
-+    {
-+      yyn = yypact[yystate];
-+      if (yyn != YYPACT_NINF)
-+	{
-+	  yyn += YYTERROR;
-+	  if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
-+	    {
-+	      yyn = yytable[yyn];
-+	      if (0 < yyn)
-+		break;
-+	    }
-+	}
-+
-+      /* Pop the current state because it cannot handle the error token.  */
-+      if (yyssp == yyss)
-+	YYABORT;
-+
-+      YYDSYMPRINTF ("Error: popping", yystos[*yyssp], yyvsp, yylsp);
-+      yydestruct (yystos[yystate], yyvsp);
-+      yyvsp--;
-+      yystate = *--yyssp;
-+
-+      YY_STACK_PRINT (yyss, yyssp);
-+    }
-+
-+  if (yyn == YYFINAL)
-+    YYACCEPT;
-+
-+  YYDPRINTF ((stderr, "Shifting error token, "));
-+
-+  *++yyvsp = yylval;
-+
-+
-+  yystate = yyn;
-+  goto yynewstate;
-+
-+
-+/*-------------------------------------.
-+| yyacceptlab -- YYACCEPT comes here.  |
-+`-------------------------------------*/
-+yyacceptlab:
-+  yyresult = 0;
-+  goto yyreturn;
-+
-+/*-----------------------------------.
-+| yyabortlab -- YYABORT comes here.  |
-+`-----------------------------------*/
-+yyabortlab:
-+  yyresult = 1;
-+  goto yyreturn;
-+
-+#ifndef yyoverflow
-+/*----------------------------------------------.
-+| yyoverflowlab -- parser overflow comes here.  |
-+`----------------------------------------------*/
-+yyoverflowlab:
-+  yyerror ("parser stack overflow");
-+  yyresult = 2;
-+  /* Fall through.  */
-+#endif
-+
-+yyreturn:
-+#ifndef yyoverflow
-+  if (yyss != yyssa)
-+    YYSTACK_FREE (yyss);
-+#endif
-+  return yyresult;
-+}
-+
-+
-+
-+
-+
-+void conf_parse(const char *name)
-+{
-+	struct symbol *sym;
-+	int i;
-+
-+	zconf_initscan(name);
-+
-+	sym_init();
-+	menu_init();
-+	modules_sym = sym_lookup("MODULES", 0);
-+	rootmenu.prompt = menu_add_prop(P_MENU, "Linux Kernel Configuration", NULL, NULL);
-+
-+	//zconfdebug = 1;
-+	zconfparse();
-+	if (zconfnerrs)
-+		exit(1);
-+	menu_finalize(&rootmenu);
-+	for_all_symbols(i, sym) {
-+                if (!(sym->flags & SYMBOL_CHECKED) && sym_check_deps(sym))
-+                        printf("\n");
-+		else
-+			sym->flags |= SYMBOL_CHECK_DONE;
-+        }
-+
-+	sym_change_count = 1;
-+}
-+
-+const char *zconf_tokenname(int token)
-+{
-+	switch (token) {
-+	case T_MENU:		return "menu";
-+	case T_ENDMENU:		return "endmenu";
-+	case T_CHOICE:		return "choice";
-+	case T_ENDCHOICE:	return "endchoice";
-+	case T_IF:		return "if";
-+	case T_ENDIF:		return "endif";
-+	}
-+	return "<token>";
-+}
-+
-+static bool zconf_endtoken(int token, int starttoken, int endtoken)
-+{
-+	if (token != endtoken) {
-+		zconfprint("unexpected '%s' within %s block", zconf_tokenname(token), zconf_tokenname(starttoken));
-+		zconfnerrs++;
-+		return false;
-+	}
-+	if (current_menu->file != current_file) {
-+		zconfprint("'%s' in different file than '%s'", zconf_tokenname(token), zconf_tokenname(starttoken));
-+		zconfprint("location of the '%s'", zconf_tokenname(starttoken));
-+		zconfnerrs++;
-+		return false;
-+	}
-+	return true;
-+}
-+
-+static void zconfprint(const char *err, ...)
-+{
-+	va_list ap;
-+
-+	fprintf(stderr, "%s:%d: ", zconf_curname(), zconf_lineno() + 1);
-+	va_start(ap, err);
-+	vfprintf(stderr, err, ap);
-+	va_end(ap);
-+	fprintf(stderr, "\n");
-+}
-+
-+static void zconferror(const char *err)
-+{
-+	fprintf(stderr, "%s:%d: %s\n", zconf_curname(), zconf_lineno() + 1, err);
-+}
-+
-+void print_quoted_string(FILE *out, const char *str)
-+{
-+	const char *p;
-+	int len;
-+
-+	putc('"', out);
-+	while ((p = strchr(str, '"'))) {
-+		len = p - str;
-+		if (len)
-+			fprintf(out, "%.*s", len, str);
-+		fputs("\\\"", out);
-+		str = p + 1;
-+	}
-+	fputs(str, out);
-+	putc('"', out);
-+}
-+
-+void print_symbol(FILE *out, struct menu *menu)
-+{
-+	struct symbol *sym = menu->sym;
-+	struct property *prop;
-+
-+	if (sym_is_choice(sym))
-+		fprintf(out, "choice\n");
-+	else
-+		fprintf(out, "config %s\n", sym->name);
-+	switch (sym->type) {
-+	case S_BOOLEAN:
-+		fputs("  boolean\n", out);
-+		break;
-+	case S_TRISTATE:
-+		fputs("  tristate\n", out);
-+		break;
-+	case S_STRING:
-+		fputs("  string\n", out);
-+		break;
-+	case S_INT:
-+		fputs("  integer\n", out);
-+		break;
-+	case S_HEX:
-+		fputs("  hex\n", out);
-+		break;
-+	default:
-+		fputs("  ???\n", out);
-+		break;
-+	}
-+	for (prop = sym->prop; prop; prop = prop->next) {
-+		if (prop->menu != menu)
-+			continue;
-+		switch (prop->type) {
-+		case P_PROMPT:
-+			fputs("  prompt ", out);
-+			print_quoted_string(out, prop->text);
-+			if (!expr_is_yes(prop->visible.expr)) {
-+				fputs(" if ", out);
-+				expr_fprint(prop->visible.expr, out);
-+			}
-+			fputc('\n', out);
-+			break;
-+		case P_DEFAULT:
-+			fputs( "  default ", out);
-+			expr_fprint(prop->expr, out);
-+			if (!expr_is_yes(prop->visible.expr)) {
-+				fputs(" if ", out);
-+				expr_fprint(prop->visible.expr, out);
-+			}
-+			fputc('\n', out);
-+			break;
-+		case P_CHOICE:
-+			fputs("  #choice value\n", out);
-+			break;
-+		default:
-+			fprintf(out, "  unknown prop %d!\n", prop->type);
-+			break;
-+		}
-+	}
-+	if (sym->help) {
-+		int len = strlen(sym->help);
-+		while (sym->help[--len] == '\n')
-+			sym->help[len] = 0;
-+		fprintf(out, "  help\n%s\n", sym->help);
-+	}
-+	fputc('\n', out);
-+}
-+
-+void zconfdump(FILE *out)
-+{
-+	struct property *prop;
-+	struct symbol *sym;
-+	struct menu *menu;
-+
-+	menu = rootmenu.list;
-+	while (menu) {
-+		if ((sym = menu->sym))
-+			print_symbol(out, menu);
-+		else if ((prop = menu->prompt)) {
-+			switch (prop->type) {
-+			case P_COMMENT:
-+				fputs("\ncomment ", out);
-+				print_quoted_string(out, prop->text);
-+				fputs("\n", out);
-+				break;
-+			case P_MENU:
-+				fputs("\nmenu ", out);
-+				print_quoted_string(out, prop->text);
-+				fputs("\n", out);
-+				break;
-+			default:
-+				;
-+			}
-+			if (!expr_is_yes(prop->visible.expr)) {
-+				fputs("  depends ", out);
-+				expr_fprint(prop->visible.expr, out);
-+				fputc('\n', out);
-+			}
-+			fputs("\n", out);
-+		}
-+
-+		if (menu->list)
-+			menu = menu->list;
-+		else if (menu->next)
-+			menu = menu->next;
-+		else while ((menu = menu->parent)) {
-+			if (menu->prompt && menu->prompt->type == P_MENU)
-+				fputs("\nendmenu\n", out);
-+			if (menu->next) {
-+				menu = menu->next;
-+				break;
-+			}
-+		}
-+	}
-+}
-+
-+#include "lex.zconf.c"
-+#include "util.c"
-+#include "confdata.c"
-+#include "expr.c"
-+#include "symbol.c"
-+#include "menu.c"
-+
-+
-diff -Nurp pristine-linux-2.6.12/scripts/kconfig/zconf.tab.h linux-2.6.12-xen/scripts/kconfig/zconf.tab.h
---- pristine-linux-2.6.12/scripts/kconfig/zconf.tab.h	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/kconfig/zconf.tab.h	2006-02-25 00:12:50.150525416 +0100
-@@ -0,0 +1,125 @@
-+/* A Bison parser, made from zconf.y, by GNU bison 1.75.  */
-+
-+/* Skeleton parser for Yacc-like parsing with Bison,
-+   Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002 Free Software Foundation, Inc.
-+
-+   This program is free software; you can redistribute it and/or modify
-+   it under the terms of the GNU General Public License as published by
-+   the Free Software Foundation; either version 2, or (at your option)
-+   any later version.
-+
-+   This program is distributed in the hope that it will be useful,
-+   but WITHOUT ANY WARRANTY; without even the implied warranty of
-+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+   GNU General Public License for more details.
-+
-+   You should have received a copy of the GNU General Public License
-+   along with this program; if not, write to the Free Software
-+   Foundation, Inc., 59 Temple Place - Suite 330,
-+   Boston, MA 02111-1307, USA.  */
-+
-+/* As a special exception, when this file is copied by Bison into a
-+   Bison output file, you may use that output file without restriction.
-+   This special exception was added by the Free Software Foundation
-+   in version 1.24 of Bison.  */
-+
-+#ifndef BISON_ZCONF_TAB_H
-+# define BISON_ZCONF_TAB_H
-+
-+/* Tokens.  */
-+#ifndef YYTOKENTYPE
-+# define YYTOKENTYPE
-+   /* Put the tokens into the symbol table, so that GDB and other debuggers
-+      know about them.  */
-+   enum yytokentype {
-+     T_MAINMENU = 258,
-+     T_MENU = 259,
-+     T_ENDMENU = 260,
-+     T_SOURCE = 261,
-+     T_CHOICE = 262,
-+     T_ENDCHOICE = 263,
-+     T_COMMENT = 264,
-+     T_CONFIG = 265,
-+     T_HELP = 266,
-+     T_HELPTEXT = 267,
-+     T_IF = 268,
-+     T_ENDIF = 269,
-+     T_DEPENDS = 270,
-+     T_REQUIRES = 271,
-+     T_OPTIONAL = 272,
-+     T_PROMPT = 273,
-+     T_DEFAULT = 274,
-+     T_TRISTATE = 275,
-+     T_BOOLEAN = 276,
-+     T_INT = 277,
-+     T_HEX = 278,
-+     T_WORD = 279,
-+     T_STRING = 280,
-+     T_UNEQUAL = 281,
-+     T_EOF = 282,
-+     T_EOL = 283,
-+     T_CLOSE_PAREN = 284,
-+     T_OPEN_PAREN = 285,
-+     T_ON = 286,
-+     T_OR = 287,
-+     T_AND = 288,
-+     T_EQUAL = 289,
-+     T_NOT = 290
-+   };
-+#endif
-+#define T_MAINMENU 258
-+#define T_MENU 259
-+#define T_ENDMENU 260
-+#define T_SOURCE 261
-+#define T_CHOICE 262
-+#define T_ENDCHOICE 263
-+#define T_COMMENT 264
-+#define T_CONFIG 265
-+#define T_HELP 266
-+#define T_HELPTEXT 267
-+#define T_IF 268
-+#define T_ENDIF 269
-+#define T_DEPENDS 270
-+#define T_REQUIRES 271
-+#define T_OPTIONAL 272
-+#define T_PROMPT 273
-+#define T_DEFAULT 274
-+#define T_TRISTATE 275
-+#define T_BOOLEAN 276
-+#define T_INT 277
-+#define T_HEX 278
-+#define T_WORD 279
-+#define T_STRING 280
-+#define T_UNEQUAL 281
-+#define T_EOF 282
-+#define T_EOL 283
-+#define T_CLOSE_PAREN 284
-+#define T_OPEN_PAREN 285
-+#define T_ON 286
-+#define T_OR 287
-+#define T_AND 288
-+#define T_EQUAL 289
-+#define T_NOT 290
-+
-+
-+
-+
-+#ifndef YYSTYPE
-+#line 33 "zconf.y"
-+typedef union {
-+	int token;
-+	char *string;
-+	struct symbol *symbol;
-+	struct expr *expr;
-+	struct menu *menu;
-+} yystype;
-+/* Line 1281 of /usr/share/bison/yacc.c.  */
-+#line 118 "zconf.tab.h"
-+# define YYSTYPE yystype
-+#endif
-+
-+extern YYSTYPE zconflval;
-+
-+
-+#endif /* not BISON_ZCONF_TAB_H */
-+
-Binärdateien pristine-linux-2.6.12/scripts/kconfig/zconf.tab.o and linux-2.6.12-xen/scripts/kconfig/zconf.tab.o sind verschieden.
-diff -Nurp pristine-linux-2.6.12/scripts/kconfig/.zconf.tab.o.cmd linux-2.6.12-xen/scripts/kconfig/.zconf.tab.o.cmd
---- pristine-linux-2.6.12/scripts/kconfig/.zconf.tab.o.cmd	1970-01-01 01:00:00.000000000 +0100
-+++ linux-2.6.12-xen/scripts/kconfig/.zconf.tab.o.cmd	2006-02-25 00:12:54.450877167 +0100
-@@ -0,0 +1,80 @@
-+cmd_scripts/kconfig/zconf.tab.o := gcc -Wp,-MD,scripts/kconfig/.zconf.tab.o.d -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer      -Iscripts/kconfig -c -o scripts/kconfig/zconf.tab.o scripts/kconfig/zconf.tab.c
-+
-+deps_scripts/kconfig/zconf.tab.o := \
-+  scripts/kconfig/zconf.tab.c \
-+  /usr/include/ctype.h \
-+  /usr/include/features.h \
-+  /usr/include/sys/cdefs.h \
-+  /usr/include/gnu/stubs.h \
-+  /usr/include/bits/types.h \
-+  /usr/include/bits/wordsize.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stddef.h \
-+  /usr/include/bits/typesizes.h \
-+  /usr/include/endian.h \
-+  /usr/include/bits/endian.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdarg.h \
-+  /usr/include/stdio.h \
-+  /usr/include/libio.h \
-+  /usr/include/_G_config.h \
-+  /usr/include/wchar.h \
-+  /usr/include/bits/wchar.h \
-+  /usr/include/gconv.h \
-+  /usr/include/bits/stdio_lim.h \
-+  /usr/include/bits/sys_errlist.h \
-+  /usr/include/bits/stdio.h \
-+  /usr/include/stdlib.h \
-+  /usr/include/sys/types.h \
-+  /usr/include/time.h \
-+  /usr/include/sys/select.h \
-+  /usr/include/bits/select.h \
-+  /usr/include/bits/sigset.h \
-+  /usr/include/bits/time.h \
-+  /usr/include/sys/sysmacros.h \
-+  /usr/include/bits/pthreadtypes.h \
-+  /usr/include/bits/sched.h \
-+  /usr/include/alloca.h \
-+  /usr/include/string.h \
-+  /usr/include/bits/string.h \
-+  /usr/include/bits/string2.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/stdbool.h \
-+  scripts/kconfig/lkc.h \
-+  scripts/kconfig/expr.h \
-+  /usr/include/libintl.h \
-+  /usr/include/locale.h \
-+  /usr/include/bits/locale.h \
-+  scripts/kconfig/lkc_proto.h \
-+  scripts/kconfig/lex.zconf.c \
-+  /usr/include/errno.h \
-+  /usr/include/bits/errno.h \
-+  /usr/include/linux/errno.h \
-+  /usr/include/asm/errno.h \
-+  /usr/include/asm-i486/errno.h \
-+  /usr/include/asm-generic/errno.h \
-+  /usr/include/asm-generic/errno-base.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/limits.h \
-+  /usr/lib/gcc/i486-linux-gnu/4.0.3/include/syslimits.h \
-+  /usr/include/limits.h \
-+  /usr/include/bits/posix1_lim.h \
-+  /usr/include/bits/local_lim.h \
-+  /usr/include/linux/limits.h \
-+  /usr/include/bits/posix2_lim.h \
-+  /usr/include/unistd.h \
-+  /usr/include/bits/posix_opt.h \
-+  /usr/include/bits/confname.h \
-+  /usr/include/getopt.h \
-+  scripts/kconfig/util.c \
-+  scripts/kconfig/confdata.c \
-+    $(wildcard include/config/.h) \
-+    $(wildcard include/config/notimestamp.h) \
-+  /usr/include/sys/stat.h \
-+  /usr/include/bits/stat.h \
-+  scripts/kconfig/expr.c \
-+  scripts/kconfig/symbol.c \
-+  /usr/include/regex.h \
-+  /usr/include/sys/utsname.h \
-+  /usr/include/bits/utsname.h \
-+  scripts/kconfig/menu.c \
-+
-+scripts/kconfig/zconf.tab.o: $(deps_scripts/kconfig/zconf.tab.o)
-+
-+$(deps_scripts/kconfig/zconf.tab.o):
-diff -Nurp pristine-linux-2.6.12/security/keys/keyring.c linux-2.6.12-xen/security/keys/keyring.c
---- pristine-linux-2.6.12/security/keys/keyring.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/security/keys/keyring.c	2006-02-25 00:12:33.785992137 +0100
-@@ -188,7 +188,11 @@ static void keyring_destroy(struct key *
- 
- 	if (keyring->description) {
- 		write_lock(&keyring_name_lock);
--		list_del(&keyring->type_data.link);
-+
-+		if (keyring->type_data.link.next != NULL &&
-+		    !list_empty(&keyring->type_data.link))
-+			list_del(&keyring->type_data.link);
-+
- 		write_unlock(&keyring_name_lock);
- 	}
- 
-diff -Nurp pristine-linux-2.6.12/security/keys/process_keys.c linux-2.6.12-xen/security/keys/process_keys.c
---- pristine-linux-2.6.12/security/keys/process_keys.c	2005-06-17 21:48:29.000000000 +0200
-+++ linux-2.6.12-xen/security/keys/process_keys.c	2006-02-25 00:12:33.786991986 +0100
-@@ -641,7 +641,7 @@ long join_session_keyring(const char *na
- 		keyring = keyring_alloc(name, tsk->uid, tsk->gid, 0, NULL);
- 		if (IS_ERR(keyring)) {
- 			ret = PTR_ERR(keyring);
--			goto error;
-+			goto error2;
- 		}
- 	}
- 	else if (IS_ERR(keyring)) {




More information about the Pkg-xen-changes mailing list