[kernel] r18294 - in dists/trunk/linux-2.6/debian: . config config/amd64 patches/features/all/rt patches/series

Uwe Kleine-König ukleinek-guest at alioth.debian.org
Thu Nov 17 07:50:34 UTC 2011


Author: ukleinek-guest
Date: Thu Nov 17 07:50:32 2011
New Revision: 18294

Log:
[amd64] reenable rt featureset with 3.2-rc1-52e4c2a05-rt2

Added:
   dists/trunk/linux-2.6/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05-rt2.patch
   dists/trunk/linux-2.6/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05.patch
   dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra
Modified:
   dists/trunk/linux-2.6/debian/changelog
   dists/trunk/linux-2.6/debian/config/amd64/defines
   dists/trunk/linux-2.6/debian/config/defines

Modified: dists/trunk/linux-2.6/debian/changelog
==============================================================================
--- dists/trunk/linux-2.6/debian/changelog	Thu Nov 17 07:50:22 2011	(r18293)
+++ dists/trunk/linux-2.6/debian/changelog	Thu Nov 17 07:50:32 2011	(r18294)
@@ -1,7 +1,11 @@
 linux-2.6 (3.2~rc1-1~experimental.1) UNRELEASED; urgency=low
 
+  [ Ben Hutchings ]
   * New upstream release candidate
 
+  [ Uwe Kleine-König ]
+  * [amd64] reenable rt featureset with 3.2-rc1-52e4c2a05-rt2
+
  -- Ben Hutchings <ben at decadent.org.uk>  Mon, 14 Nov 2011 15:21:10 +0000
 
 linux-2.6 (3.1.1-1) unstable; urgency=high

Modified: dists/trunk/linux-2.6/debian/config/amd64/defines
==============================================================================
--- dists/trunk/linux-2.6/debian/config/amd64/defines	Thu Nov 17 07:50:22 2011	(r18293)
+++ dists/trunk/linux-2.6/debian/config/amd64/defines	Thu Nov 17 07:50:32 2011	(r18294)
@@ -1,6 +1,6 @@
 [base]
-#featuresets:
-# rt
+featuresets:
+ rt
 flavours:
  amd64
 kernel-arch: x86

Modified: dists/trunk/linux-2.6/debian/config/defines
==============================================================================
--- dists/trunk/linux-2.6/debian/config/defines	Thu Nov 17 07:50:22 2011	(r18293)
+++ dists/trunk/linux-2.6/debian/config/defines	Thu Nov 17 07:50:32 2011	(r18294)
@@ -51,4 +51,4 @@
 s390-tools: s390-tools (>= 1.8.3-2~)
 
 [featureset-rt]
-enabled: false
+enabled: true

Added: dists/trunk/linux-2.6/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05-rt2.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux-2.6/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05-rt2.patch	Thu Nov 17 07:50:32 2011	(r18294)
@@ -0,0 +1,19675 @@
+Index: linux-3.2/arch/x86/kernel/apic/apic.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/apic/apic.c
++++ linux-3.2/arch/x86/kernel/apic/apic.c
+@@ -857,8 +857,8 @@ void __irq_entry smp_apic_timer_interrup
+ 	 * Besides, if we don't timer interrupts ignore the global
+ 	 * interrupt lock, which is the WrongThing (tm) to do.
+ 	 */
+-	exit_idle();
+ 	irq_enter();
++	exit_idle();
+ 	local_apic_timer_interrupt();
+ 	irq_exit();
+ 
+@@ -1790,8 +1790,8 @@ void smp_spurious_interrupt(struct pt_re
+ {
+ 	u32 v;
+ 
+-	exit_idle();
+ 	irq_enter();
++	exit_idle();
+ 	/*
+ 	 * Check if this really is a spurious interrupt and ACK it
+ 	 * if it is a vectored one.  Just in case...
+@@ -1827,8 +1827,8 @@ void smp_error_interrupt(struct pt_regs 
+ 		"Illegal register address",	/* APIC Error Bit 7 */
+ 	};
+ 
+-	exit_idle();
+ 	irq_enter();
++	exit_idle();
+ 	/* First tickle the hardware, only then report what went on. -- REW */
+ 	v0 = apic_read(APIC_ESR);
+ 	apic_write(APIC_ESR, 0);
+Index: linux-3.2/arch/x86/kernel/apic/io_apic.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/apic/io_apic.c
++++ linux-3.2/arch/x86/kernel/apic/io_apic.c
+@@ -2418,8 +2418,8 @@ asmlinkage void smp_irq_move_cleanup_int
+ 	unsigned vector, me;
+ 
+ 	ack_APIC_irq();
+-	exit_idle();
+ 	irq_enter();
++	exit_idle();
+ 
+ 	me = smp_processor_id();
+ 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+@@ -2518,7 +2518,8 @@ static void ack_apic_level(struct irq_da
+ 	irq_complete_move(cfg);
+ #ifdef CONFIG_GENERIC_PENDING_IRQ
+ 	/* If we are moving the irq we need to mask it */
+-	if (unlikely(irqd_is_setaffinity_pending(data))) {
++	if (unlikely(irqd_is_setaffinity_pending(data) &&
++		     !irqd_irq_inprogress(data))) {
+ 		do_unmask_irq = 1;
+ 		mask_ioapic(cfg);
+ 	}
+Index: linux-3.2/arch/x86/kernel/cpu/mcheck/therm_throt.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ linux-3.2/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -397,8 +397,8 @@ static void (*smp_thermal_vector)(void) 
+ 
+ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
+ {
+-	exit_idle();
+ 	irq_enter();
++	exit_idle();
+ 	inc_irq_stat(irq_thermal_count);
+ 	smp_thermal_vector();
+ 	irq_exit();
+Index: linux-3.2/arch/x86/kernel/cpu/mcheck/threshold.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/cpu/mcheck/threshold.c
++++ linux-3.2/arch/x86/kernel/cpu/mcheck/threshold.c
+@@ -19,8 +19,8 @@ void (*mce_threshold_vector)(void) = def
+ 
+ asmlinkage void smp_threshold_interrupt(void)
+ {
+-	exit_idle();
+ 	irq_enter();
++	exit_idle();
+ 	inc_irq_stat(irq_threshold_count);
+ 	mce_threshold_vector();
+ 	irq_exit();
+Index: linux-3.2/arch/x86/kernel/irq.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/irq.c
++++ linux-3.2/arch/x86/kernel/irq.c
+@@ -181,8 +181,8 @@ unsigned int __irq_entry do_IRQ(struct p
+ 	unsigned vector = ~regs->orig_ax;
+ 	unsigned irq;
+ 
+-	exit_idle();
+ 	irq_enter();
++	exit_idle();
+ 
+ 	irq = __this_cpu_read(vector_irq[vector]);
+ 
+@@ -209,10 +209,10 @@ void smp_x86_platform_ipi(struct pt_regs
+ 
+ 	ack_APIC_irq();
+ 
+-	exit_idle();
+-
+ 	irq_enter();
+ 
++	exit_idle();
++
+ 	inc_irq_stat(x86_platform_ipis);
+ 
+ 	if (x86_platform_ipi_callback)
+Index: linux-3.2/drivers/gpu/drm/drm_irq.c
+===================================================================
+--- linux-3.2.orig/drivers/gpu/drm/drm_irq.c
++++ linux-3.2/drivers/gpu/drm/drm_irq.c
+@@ -110,10 +110,7 @@ static void vblank_disable_and_save(stru
+ 	/* Prevent vblank irq processing while disabling vblank irqs,
+ 	 * so no updates of timestamps or count can happen after we've
+ 	 * disabled. Needed to prevent races in case of delayed irq's.
+-	 * Disable preemption, so vblank_time_lock is held as short as
+-	 * possible, even under a kernel with PREEMPT_RT patches.
+ 	 */
+-	preempt_disable();
+ 	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+ 
+ 	dev->driver->disable_vblank(dev, crtc);
+@@ -164,7 +161,6 @@ static void vblank_disable_and_save(stru
+ 	clear_vblank_timestamps(dev, crtc);
+ 
+ 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+-	preempt_enable();
+ }
+ 
+ static void vblank_disable_fn(unsigned long arg)
+@@ -889,10 +885,6 @@ int drm_vblank_get(struct drm_device *de
+ 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ 	/* Going from 0->1 means we have to enable interrupts again */
+ 	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+-		/* Disable preemption while holding vblank_time_lock. Do
+-		 * it explicitely to guard against PREEMPT_RT kernel.
+-		 */
+-		preempt_disable();
+ 		spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
+ 		if (!dev->vblank_enabled[crtc]) {
+ 			/* Enable vblank irqs under vblank_time_lock protection.
+@@ -912,7 +904,6 @@ int drm_vblank_get(struct drm_device *de
+ 			}
+ 		}
+ 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+-		preempt_enable();
+ 	} else {
+ 		if (!dev->vblank_enabled[crtc]) {
+ 			atomic_dec(&dev->vblank_refcount[crtc]);
+Index: linux-3.2/arch/x86/kernel/kprobes.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/kprobes.c
++++ linux-3.2/arch/x86/kernel/kprobes.c
+@@ -478,7 +478,6 @@ static void __kprobes setup_singlestep(s
+ 		 * stepping.
+ 		 */
+ 		regs->ip = (unsigned long)p->ainsn.insn;
+-		preempt_enable_no_resched();
+ 		return;
+ 	}
+ #endif
+Index: linux-3.2/arch/x86/kernel/hpet.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/hpet.c
++++ linux-3.2/arch/x86/kernel/hpet.c
+@@ -9,6 +9,7 @@
+ #include <linux/slab.h>
+ #include <linux/hpet.h>
+ #include <linux/init.h>
++#include <linux/dmi.h>
+ #include <linux/cpu.h>
+ #include <linux/pm.h>
+ #include <linux/io.h>
+@@ -568,6 +569,30 @@ static void init_one_hpet_msi_clockevent
+ #define RESERVE_TIMERS 0
+ #endif
+ 
++static int __init dmi_disable_hpet_msi(const struct dmi_system_id *d)
++{
++	hpet_msi_disable = 1;
++	return 0;
++}
++
++static struct dmi_system_id __initdata dmi_hpet_table[] = {
++	/*
++	 * MSI based per cpu timers lose interrupts when intel_idle()
++	 * is enabled - independent of the c-state. With idle=poll the
++	 * problem cannot be observed. We have no idea yet, whether
++	 * this is a W510 specific issue or a general chipset oddity.
++	 */
++	{
++	 .callback = dmi_disable_hpet_msi,
++	 .ident = "Lenovo W510",
++	 .matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
++		     },
++	 },
++	{}
++};
++
+ static void hpet_msi_capability_lookup(unsigned int start_timer)
+ {
+ 	unsigned int id;
+@@ -575,6 +600,8 @@ static void hpet_msi_capability_lookup(u
+ 	unsigned int num_timers_used = 0;
+ 	int i;
+ 
++	dmi_check_system(dmi_hpet_table);
++
+ 	if (hpet_msi_disable)
+ 		return;
+ 
+Index: linux-3.2/block/blk-core.c
+===================================================================
+--- linux-3.2.orig/block/blk-core.c
++++ linux-3.2/block/blk-core.c
+@@ -235,7 +235,7 @@ EXPORT_SYMBOL(blk_delay_queue);
+  **/
+ void blk_start_queue(struct request_queue *q)
+ {
+-	WARN_ON(!irqs_disabled());
++	WARN_ON_NONRT(!irqs_disabled());
+ 
+ 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ 	__blk_run_queue(q);
+@@ -300,7 +300,11 @@ void __blk_run_queue(struct request_queu
+ {
+ 	if (unlikely(blk_queue_stopped(q)))
+ 		return;
+-
++	/*
++	 * q->request_fn() can drop q->queue_lock and reenable
++	 * interrupts, but must return with q->queue_lock held and
++	 * interrupts disabled.
++	 */
+ 	q->request_fn(q);
+ }
+ EXPORT_SYMBOL(__blk_run_queue);
+@@ -2742,11 +2746,11 @@ static void queue_unplugged(struct reque
+ 	 * this lock).
+ 	 */
+ 	if (from_schedule) {
+-		spin_unlock(q->queue_lock);
++		spin_unlock_irq(q->queue_lock);
+ 		blk_run_queue_async(q);
+ 	} else {
+ 		__blk_run_queue(q);
+-		spin_unlock(q->queue_lock);
++		spin_unlock_irq(q->queue_lock);
+ 	}
+ 
+ }
+@@ -2772,7 +2776,6 @@ static void flush_plug_callbacks(struct 
+ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ {
+ 	struct request_queue *q;
+-	unsigned long flags;
+ 	struct request *rq;
+ 	LIST_HEAD(list);
+ 	unsigned int depth;
+@@ -2793,11 +2796,6 @@ void blk_flush_plug_list(struct blk_plug
+ 	q = NULL;
+ 	depth = 0;
+ 
+-	/*
+-	 * Save and disable interrupts here, to avoid doing it for every
+-	 * queue lock we have to take.
+-	 */
+-	local_irq_save(flags);
+ 	while (!list_empty(&list)) {
+ 		rq = list_entry_rq(list.next);
+ 		list_del_init(&rq->queuelist);
+@@ -2810,7 +2808,7 @@ void blk_flush_plug_list(struct blk_plug
+ 				queue_unplugged(q, depth, from_schedule);
+ 			q = rq->q;
+ 			depth = 0;
+-			spin_lock(q->queue_lock);
++			spin_lock_irq(q->queue_lock);
+ 		}
+ 		/*
+ 		 * rq is already accounted, so use raw insert
+@@ -2828,8 +2826,6 @@ void blk_flush_plug_list(struct blk_plug
+ 	 */
+ 	if (q)
+ 		queue_unplugged(q, depth, from_schedule);
+-
+-	local_irq_restore(flags);
+ }
+ 
+ void blk_finish_plug(struct blk_plug *plug)
+Index: linux-3.2/kernel/sched.c
+===================================================================
+--- linux-3.2.orig/kernel/sched.c
++++ linux-3.2/kernel/sched.c
+@@ -71,6 +71,7 @@
+ #include <linux/ctype.h>
+ #include <linux/ftrace.h>
+ #include <linux/slab.h>
++#include <linux/init_task.h>
+ 
+ #include <asm/tlb.h>
+ #include <asm/irq_regs.h>
+@@ -188,6 +189,7 @@ void init_rt_bandwidth(struct rt_bandwid
+ 
+ 	hrtimer_init(&rt_b->rt_period_timer,
+ 			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++	rt_b->rt_period_timer.irqsafe = 1;
+ 	rt_b->rt_period_timer.function = sched_rt_period_timer;
+ }
+ 
+@@ -940,7 +942,11 @@ late_initcall(sched_init_debug);
+  * Number of tasks to iterate in a single balance run.
+  * Limited because this is done with IRQs disabled.
+  */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ const_debug unsigned int sysctl_sched_nr_migrate = 32;
++#else
++const_debug unsigned int sysctl_sched_nr_migrate = 8;
++#endif
+ 
+ /*
+  * period over which we average the RT time consumption, measured
+@@ -1276,6 +1282,7 @@ static void init_rq_hrtick(struct rq *rq
+ 
+ 	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ 	rq->hrtick_timer.function = hrtick;
++	rq->hrtick_timer.irqsafe = 1;
+ }
+ #else	/* CONFIG_SCHED_HRTICK */
+ static inline void hrtick_clear(struct rq *rq)
+@@ -2563,7 +2570,12 @@ static int select_fallback_rq(int cpu, s
+ 		printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
+ 				task_pid_nr(p), p->comm, cpu);
+ 	}
+-
++	/*
++	 * Clear PF_THREAD_BOUND, otherwise we wreckage
++	 * migrate_disable/enable. See optimization for
++	 * PF_THREAD_BOUND tasks there.
++	 */
++	p->flags &= ~PF_THREAD_BOUND;
+ 	return dest_cpu;
+ }
+ 
+@@ -2643,10 +2655,6 @@ static void ttwu_activate(struct rq *rq,
+ {
+ 	activate_task(rq, p, en_flags);
+ 	p->on_rq = 1;
+-
+-	/* if a worker is waking up, notify workqueue */
+-	if (p->flags & PF_WQ_WORKER)
+-		wq_worker_waking_up(p, cpu_of(rq));
+ }
+ 
+ /*
+@@ -2824,8 +2832,25 @@ try_to_wake_up(struct task_struct *p, un
+ 
+ 	smp_wmb();
+ 	raw_spin_lock_irqsave(&p->pi_lock, flags);
+-	if (!(p->state & state))
++	if (!(p->state & state)) {
++		/*
++		 * The task might be running due to a spinlock sleeper
++		 * wakeup. Check the saved state and set it to running
++		 * if the wakeup condition is true.
++		 */
++		if (!(wake_flags & WF_LOCK_SLEEPER)) {
++			if (p->saved_state & state)
++				p->saved_state = TASK_RUNNING;
++		}
+ 		goto out;
++	}
++
++	/*
++	 * If this is a regular wakeup, then we can unconditionally
++	 * clear the saved state of a "lock sleeper".
++	 */
++	if (!(wake_flags & WF_LOCK_SLEEPER))
++		p->saved_state = TASK_RUNNING;
+ 
+ 	success = 1; /* we're going to change ->state */
+ 	cpu = task_cpu(p);
+@@ -2881,40 +2906,6 @@ out:
+ }
+ 
+ /**
+- * try_to_wake_up_local - try to wake up a local task with rq lock held
+- * @p: the thread to be awakened
+- *
+- * Put @p on the run-queue if it's not already there. The caller must
+- * ensure that this_rq() is locked, @p is bound to this_rq() and not
+- * the current task.
+- */
+-static void try_to_wake_up_local(struct task_struct *p)
+-{
+-	struct rq *rq = task_rq(p);
+-
+-	BUG_ON(rq != this_rq());
+-	BUG_ON(p == current);
+-	lockdep_assert_held(&rq->lock);
+-
+-	if (!raw_spin_trylock(&p->pi_lock)) {
+-		raw_spin_unlock(&rq->lock);
+-		raw_spin_lock(&p->pi_lock);
+-		raw_spin_lock(&rq->lock);
+-	}
+-
+-	if (!(p->state & TASK_NORMAL))
+-		goto out;
+-
+-	if (!p->on_rq)
+-		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+-
+-	ttwu_do_wakeup(rq, p, 0);
+-	ttwu_stat(p, smp_processor_id(), 0);
+-out:
+-	raw_spin_unlock(&p->pi_lock);
+-}
+-
+-/**
+  * wake_up_process - Wake up a specific process
+  * @p: The process to be woken up.
+  *
+@@ -2931,6 +2922,18 @@ int wake_up_process(struct task_struct *
+ }
+ EXPORT_SYMBOL(wake_up_process);
+ 
++/**
++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
++ * @p: The process to be woken up.
++ *
++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
++ * the nature of the wakeup.
++ */
++int wake_up_lock_sleeper(struct task_struct *p)
++{
++	return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
++}
++
+ int wake_up_state(struct task_struct *p, unsigned int state)
+ {
+ 	return try_to_wake_up(p, state, 0);
+@@ -3205,8 +3208,12 @@ static void finish_task_switch(struct rq
+ 	finish_lock_switch(rq, prev);
+ 
+ 	fire_sched_in_preempt_notifiers(current);
++	/*
++	 * We use mmdrop_delayed() here so we don't have to do the
++	 * full __mmdrop() when we are the last user.
++	 */
+ 	if (mm)
+-		mmdrop(mm);
++		mmdrop_delayed(mm);
+ 	if (unlikely(prev_state == TASK_DEAD)) {
+ 		/*
+ 		 * Remove function-return probe instances associated with this
+@@ -4353,6 +4360,126 @@ static inline void schedule_debug(struct
+ 	schedstat_inc(this_rq(), sched_count);
+ }
+ 
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
++#define MIGRATE_DISABLE_SET_AFFIN	(1<<30) /* Can't make a negative */
++#define migrate_disabled_updated(p)	((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
++#define migrate_disable_count(p)	((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
++
++static inline void update_migrate_disable(struct task_struct *p)
++{
++	const struct cpumask *mask;
++
++	if (likely(!p->migrate_disable))
++		return;
++
++	/* Did we already update affinity? */
++	if (unlikely(migrate_disabled_updated(p)))
++		return;
++
++	/*
++	 * Since this is always current we can get away with only locking
++	 * rq->lock, the ->cpus_allowed value can normally only be changed
++	 * while holding both p->pi_lock and rq->lock, but seeing that this
++	 * is current, we cannot actually be waking up, so all code that
++	 * relies on serialization against p->pi_lock is out of scope.
++	 *
++	 * Having rq->lock serializes us against things like
++	 * set_cpus_allowed_ptr() that can still happen concurrently.
++	 */
++	mask = tsk_cpus_allowed(p);
++
++	if (p->sched_class->set_cpus_allowed)
++		p->sched_class->set_cpus_allowed(p, mask);
++	p->rt.nr_cpus_allowed = cpumask_weight(mask);
++
++	/* Let migrate_enable know to fix things back up */
++	p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
++}
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++
++	if (in_atomic() || p->flags & PF_THREAD_BOUND) {
++#ifdef CONFIG_SCHED_DEBUG
++		p->migrate_disable_atomic++;
++#endif
++		return;
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
++	preempt_disable();
++	if (p->migrate_disable) {
++		p->migrate_disable++;
++		preempt_enable();
++		return;
++	}
++
++	pin_current_cpu();
++	p->migrate_disable = 1;
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++	const struct cpumask *mask;
++	unsigned long flags;
++	struct rq *rq;
++
++	if (in_atomic() || p->flags & PF_THREAD_BOUND) {
++#ifdef CONFIG_SCHED_DEBUG
++		p->migrate_disable_atomic--;
++#endif
++		return;
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++	WARN_ON_ONCE(p->migrate_disable <= 0);
++
++	preempt_disable();
++	if (migrate_disable_count(p) > 1) {
++		p->migrate_disable--;
++		preempt_enable();
++		return;
++	}
++
++	if (unlikely(migrate_disabled_updated(p))) {
++		/*
++		 * Undo whatever update_migrate_disable() did, also see there
++		 * about locking.
++		 */
++		rq = this_rq();
++		raw_spin_lock_irqsave(&rq->lock, flags);
++
++		/*
++		 * Clearing migrate_disable causes tsk_cpus_allowed to
++		 * show the tasks original cpu affinity.
++		 */
++		p->migrate_disable = 0;
++		mask = tsk_cpus_allowed(p);
++		if (p->sched_class->set_cpus_allowed)
++			p->sched_class->set_cpus_allowed(p, mask);
++		p->rt.nr_cpus_allowed = cpumask_weight(mask);
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	} else
++		p->migrate_disable = 0;
++
++	unpin_current_cpu();
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++#else
++static inline void update_migrate_disable(struct task_struct *p) { }
++#define migrate_disabled_updated(p)		0
++#endif
++
+ static void put_prev_task(struct rq *rq, struct task_struct *prev)
+ {
+ 	if (prev->on_rq || rq->skip_clock_update < 0)
+@@ -4412,6 +4539,8 @@ need_resched:
+ 
+ 	raw_spin_lock_irq(&rq->lock);
+ 
++	update_migrate_disable(prev);
++
+ 	switch_count = &prev->nivcsw;
+ 	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+ 		if (unlikely(signal_pending_state(prev->state, prev))) {
+@@ -4419,19 +4548,6 @@ need_resched:
+ 		} else {
+ 			deactivate_task(rq, prev, DEQUEUE_SLEEP);
+ 			prev->on_rq = 0;
+-
+-			/*
+-			 * If a worker went to sleep, notify and ask workqueue
+-			 * whether it wants to wake up a task to maintain
+-			 * concurrency.
+-			 */
+-			if (prev->flags & PF_WQ_WORKER) {
+-				struct task_struct *to_wakeup;
+-
+-				to_wakeup = wq_worker_sleeping(prev, cpu);
+-				if (to_wakeup)
+-					try_to_wake_up_local(to_wakeup);
+-			}
+ 		}
+ 		switch_count = &prev->nvcsw;
+ 	}
+@@ -4465,15 +4581,23 @@ need_resched:
+ 
+ 	post_schedule(rq);
+ 
+-	preempt_enable_no_resched();
++	__preempt_enable_no_resched();
+ 	if (need_resched())
+ 		goto need_resched;
+ }
+ 
+ static inline void sched_submit_work(struct task_struct *tsk)
+ {
+-	if (!tsk->state)
++	if (!tsk->state || tsk_is_pi_blocked(tsk))
+ 		return;
++
++	/*
++	 * If a worker went to sleep, notify and ask workqueue whether
++	 * it wants to wake up a task to maintain concurrency.
++	 */
++	if (tsk->flags & PF_WQ_WORKER)
++		wq_worker_sleeping(tsk);
++
+ 	/*
+ 	 * If we are going to sleep and we have plugged IO queued,
+ 	 * make sure to submit it to avoid deadlocks.
+@@ -4482,15 +4606,37 @@ static inline void sched_submit_work(str
+ 		blk_schedule_flush_plug(tsk);
+ }
+ 
++static inline void sched_update_worker(struct task_struct *tsk)
++{
++	if (tsk_is_pi_blocked(tsk))
++		return;
++
++	if (tsk->flags & PF_WQ_WORKER)
++		wq_worker_running(tsk);
++}
++
+ asmlinkage void __sched schedule(void)
+ {
+ 	struct task_struct *tsk = current;
+ 
+ 	sched_submit_work(tsk);
+ 	__schedule();
++	sched_update_worker(tsk);
+ }
+ EXPORT_SYMBOL(schedule);
+ 
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++	__preempt_enable_no_resched();
++	schedule();
++	preempt_disable();
++}
++
+ #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+ 
+ static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
+@@ -4555,7 +4701,16 @@ asmlinkage void __sched notrace preempt_
+ 
+ 	do {
+ 		add_preempt_count_notrace(PREEMPT_ACTIVE);
++		/*
++		 * The add/subtract must not be traced by the function
++		 * tracer. But we still want to account for the
++		 * preempt off latency tracer. Since the _notrace versions
++		 * of add/subtract skip the accounting for latency tracer
++		 * we must force it manually.
++		 */
++		start_critical_timings();
+ 		__schedule();
++		stop_critical_timings();
+ 		sub_preempt_count_notrace(PREEMPT_ACTIVE);
+ 
+ 		/*
+@@ -4999,6 +5154,24 @@ void rt_mutex_setprio(struct task_struct
+ 
+ 	rq = __task_rq_lock(p);
+ 
++	/*
++	 * Idle task boosting is a nono in general. There is one
++	 * exception, when PREEMPT_RT and NOHZ is active:
++	 *
++	 * The idle task calls get_next_timer_interrupt() and holds
++	 * the timer wheel base->lock on the CPU and another CPU wants
++	 * to access the timer (probably to cancel it). We can safely
++	 * ignore the boosting request, as the idle CPU runs this code
++	 * with interrupts disabled and will complete the lock
++	 * protected section without being interrupted. So there is no
++	 * real need to boost.
++	 */
++	if (unlikely(p == rq->idle)) {
++		WARN_ON(p != rq->curr);
++		WARN_ON(p->pi_blocked_on);
++		goto out_unlock;
++	}
++
+ 	trace_sched_pi_setprio(p, prio);
+ 	oldprio = p->prio;
+ 	prev_class = p->sched_class;
+@@ -5022,11 +5195,10 @@ void rt_mutex_setprio(struct task_struct
+ 		enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
+ 
+ 	check_class_changed(rq, p, prev_class, oldprio);
++out_unlock:
+ 	__task_rq_unlock(rq);
+ }
+-
+ #endif
+-
+ void set_user_nice(struct task_struct *p, long nice)
+ {
+ 	int old_prio, delta, on_rq;
+@@ -5706,7 +5878,7 @@ SYSCALL_DEFINE0(sched_yield)
+ 	__release(rq->lock);
+ 	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
+ 	do_raw_spin_unlock(&rq->lock);
+-	preempt_enable_no_resched();
++	__preempt_enable_no_resched();
+ 
+ 	schedule();
+ 
+@@ -5720,9 +5892,17 @@ static inline int should_resched(void)
+ 
+ static void __cond_resched(void)
+ {
+-	add_preempt_count(PREEMPT_ACTIVE);
+-	__schedule();
+-	sub_preempt_count(PREEMPT_ACTIVE);
++	do {
++		add_preempt_count(PREEMPT_ACTIVE);
++		__schedule();
++		sub_preempt_count(PREEMPT_ACTIVE);
++		/*
++		 * Check again in case we missed a preemption
++		 * opportunity between schedule and now.
++		 */
++		barrier();
++
++	} while (need_resched());
+ }
+ 
+ int __sched _cond_resched(void)
+@@ -5763,6 +5943,7 @@ int __cond_resched_lock(spinlock_t *lock
+ }
+ EXPORT_SYMBOL(__cond_resched_lock);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int __sched __cond_resched_softirq(void)
+ {
+ 	BUG_ON(!in_softirq());
+@@ -5776,6 +5957,7 @@ int __sched __cond_resched_softirq(void)
+ 	return 0;
+ }
+ EXPORT_SYMBOL(__cond_resched_softirq);
++#endif
+ 
+ /**
+  * yield - yield the current processor to other threads.
+@@ -6099,6 +6281,9 @@ void __cpuinit init_idle(struct task_str
+ 	 */
+ 	idle->sched_class = &idle_sched_class;
+ 	ftrace_graph_init_idle_task(idle, cpu);
++#if defined(CONFIG_SMP)
++	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
+ }
+ 
+ /*
+@@ -6151,11 +6336,12 @@ static inline void sched_init_granularit
+ #ifdef CONFIG_SMP
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+-	if (p->sched_class && p->sched_class->set_cpus_allowed)
+-		p->sched_class->set_cpus_allowed(p, new_mask);
+-
++	if (!migrate_disabled_updated(p)) {
++		if (p->sched_class && p->sched_class->set_cpus_allowed)
++			p->sched_class->set_cpus_allowed(p, new_mask);
++		p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
++	}
+ 	cpumask_copy(&p->cpus_allowed, new_mask);
+-	p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+ 
+ /*
+@@ -6206,7 +6392,7 @@ int set_cpus_allowed_ptr(struct task_str
+ 	do_set_cpus_allowed(p, new_mask);
+ 
+ 	/* Can the task run on the task's current CPU? If so, we're done */
+-	if (cpumask_test_cpu(task_cpu(p), new_mask))
++	if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ 		goto out;
+ 
+ 	dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+@@ -6295,6 +6481,8 @@ static int migration_cpu_stop(void *data
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+ 
++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
++
+ /*
+  * Ensures that the idle task is using init_mm right before its cpu goes
+  * offline.
+@@ -6307,7 +6495,12 @@ void idle_task_exit(void)
+ 
+ 	if (mm != &init_mm)
+ 		switch_mm(mm, &init_mm, current);
+-	mmdrop(mm);
++
++	/*
++	 * Defer the cleanup to an alive cpu. On RT we can neither
++	 * call mmdrop() nor mmdrop_delayed() from here.
++	 */
++	per_cpu(idle_last_mm, smp_processor_id()) = mm;
+ }
+ 
+ /*
+@@ -6652,6 +6845,12 @@ migration_call(struct notifier_block *nf
+ 		migrate_nr_uninterruptible(rq);
+ 		calc_global_load_remove(rq);
+ 		break;
++	case CPU_DEAD:
++		if (per_cpu(idle_last_mm, cpu)) {
++			mmdrop(per_cpu(idle_last_mm, cpu));
++			per_cpu(idle_last_mm, cpu) = NULL;
++		}
++		break;
+ #endif
+ 	}
+ 
+@@ -8358,7 +8557,8 @@ void __init sched_init(void)
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ static inline int preempt_count_equals(int preempt_offset)
+ {
+-	int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
++	int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
++		sched_rcu_preempt_depth();
+ 
+ 	return (nested == preempt_offset);
+ }
+Index: linux-3.2/kernel/workqueue.c
+===================================================================
+--- linux-3.2.orig/kernel/workqueue.c
++++ linux-3.2/kernel/workqueue.c
+@@ -41,6 +41,7 @@
+ #include <linux/debug_locks.h>
+ #include <linux/lockdep.h>
+ #include <linux/idr.h>
++#include <linux/delay.h>
+ 
+ #include "workqueue_sched.h"
+ 
+@@ -57,20 +58,10 @@ enum {
+ 	WORKER_DIE		= 1 << 1,	/* die die die */
+ 	WORKER_IDLE		= 1 << 2,	/* is idle */
+ 	WORKER_PREP		= 1 << 3,	/* preparing to run works */
+-	WORKER_ROGUE		= 1 << 4,	/* not bound to any cpu */
+-	WORKER_REBIND		= 1 << 5,	/* mom is home, come back */
+-	WORKER_CPU_INTENSIVE	= 1 << 6,	/* cpu intensive */
+-	WORKER_UNBOUND		= 1 << 7,	/* worker is unbound */
+-
+-	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
+-				  WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
+-
+-	/* gcwq->trustee_state */
+-	TRUSTEE_START		= 0,		/* start */
+-	TRUSTEE_IN_CHARGE	= 1,		/* trustee in charge of gcwq */
+-	TRUSTEE_BUTCHER		= 2,		/* butcher workers */
+-	TRUSTEE_RELEASE		= 3,		/* release workers */
+-	TRUSTEE_DONE		= 4,		/* trustee is done */
++	WORKER_CPU_INTENSIVE	= 1 << 4,	/* cpu intensive */
++	WORKER_UNBOUND		= 1 << 5,	/* worker is unbound */
++
++	WORKER_NOT_RUNNING	= WORKER_PREP | WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
+ 
+ 	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
+ 	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
+@@ -84,7 +75,6 @@ enum {
+ 						   (min two ticks) */
+ 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
+ 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
+-	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
+ 
+ 	/*
+ 	 * Rescue workers are used only on emergencies and shared by
+@@ -136,7 +126,7 @@ struct worker {
+ 	unsigned long		last_active;	/* L: last active timestamp */
+ 	unsigned int		flags;		/* X: flags */
+ 	int			id;		/* I: worker id */
+-	struct work_struct	rebind_work;	/* L: rebind worker to cpu */
++	int			sleeping;	/* None */
+ };
+ 
+ /*
+@@ -163,10 +153,8 @@ struct global_cwq {
+ 
+ 	struct ida		worker_ida;	/* L: for worker IDs */
+ 
+-	struct task_struct	*trustee;	/* L: for gcwq shutdown */
+-	unsigned int		trustee_state;	/* L: trustee state */
+-	wait_queue_head_t	trustee_wait;	/* trustee wait */
+ 	struct worker		*first_idle;	/* L: first idle worker */
++	wait_queue_head_t	idle_wait;
+ } ____cacheline_aligned_in_smp;
+ 
+ /*
+@@ -658,66 +646,58 @@ static void wake_up_worker(struct global
+ }
+ 
+ /**
+- * wq_worker_waking_up - a worker is waking up
+- * @task: task waking up
+- * @cpu: CPU @task is waking up to
+- *
+- * This function is called during try_to_wake_up() when a worker is
+- * being awoken.
++ * wq_worker_running - a worker is running again
++ * @task: task returning from sleep
+  *
+- * CONTEXT:
+- * spin_lock_irq(rq->lock)
++ * This function is called when a worker returns from schedule()
+  */
+-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
++void wq_worker_running(struct task_struct *task)
+ {
+ 	struct worker *worker = kthread_data(task);
+ 
++	if (!worker->sleeping)
++		return;
+ 	if (!(worker->flags & WORKER_NOT_RUNNING))
+-		atomic_inc(get_gcwq_nr_running(cpu));
++		atomic_inc(get_gcwq_nr_running(smp_processor_id()));
++	worker->sleeping = 0;
+ }
+ 
+ /**
+  * wq_worker_sleeping - a worker is going to sleep
+  * @task: task going to sleep
+- * @cpu: CPU in question, must be the current CPU number
+- *
+- * This function is called during schedule() when a busy worker is
+- * going to sleep.  Worker on the same cpu can be woken up by
+- * returning pointer to its task.
+  *
+- * CONTEXT:
+- * spin_lock_irq(rq->lock)
+- *
+- * RETURNS:
+- * Worker task on @cpu to wake up, %NULL if none.
++ * This function is called from schedule() when a busy worker is
++ * going to sleep.
+  */
+-struct task_struct *wq_worker_sleeping(struct task_struct *task,
+-				       unsigned int cpu)
++void wq_worker_sleeping(struct task_struct *task)
+ {
+-	struct worker *worker = kthread_data(task), *to_wakeup = NULL;
+-	struct global_cwq *gcwq = get_gcwq(cpu);
+-	atomic_t *nr_running = get_gcwq_nr_running(cpu);
++	struct worker *worker = kthread_data(task);
++	struct global_cwq *gcwq;
++	int cpu;
+ 
+ 	if (worker->flags & WORKER_NOT_RUNNING)
+-		return NULL;
++		return;
+ 
+-	/* this can only happen on the local cpu */
+-	BUG_ON(cpu != raw_smp_processor_id());
++	if (WARN_ON_ONCE(worker->sleeping))
++		return;
++
++	worker->sleeping = 1;
+ 
++	cpu = smp_processor_id();
++	gcwq = get_gcwq(cpu);
++	spin_lock_irq(&gcwq->lock);
+ 	/*
+ 	 * The counterpart of the following dec_and_test, implied mb,
+ 	 * worklist not empty test sequence is in insert_work().
+ 	 * Please read comment there.
+-	 *
+-	 * NOT_RUNNING is clear.  This means that trustee is not in
+-	 * charge and we're running on the local cpu w/ rq lock held
+-	 * and preemption disabled, which in turn means that none else
+-	 * could be manipulating idle_list, so dereferencing idle_list
+-	 * without gcwq lock is safe.
+-	 */
+-	if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
+-		to_wakeup = first_worker(gcwq);
+-	return to_wakeup ? to_wakeup->task : NULL;
++	 */
++	if (atomic_dec_and_test(get_gcwq_nr_running(cpu)) &&
++	    !list_empty(&gcwq->worklist)) {
++		worker = first_worker(gcwq);
++		if (worker)
++			wake_up_process(worker->task);
++	}
++	spin_unlock_irq(&gcwq->lock);
+ }
+ 
+ /**
+@@ -979,13 +959,38 @@ static bool is_chained_work(struct workq
+ 	return false;
+ }
+ 
+-static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
+-			 struct work_struct *work)
++static void ___queue_work(struct workqueue_struct *wq, struct global_cwq *gcwq,
++			  struct work_struct *work)
+ {
+-	struct global_cwq *gcwq;
+ 	struct cpu_workqueue_struct *cwq;
+ 	struct list_head *worklist;
+ 	unsigned int work_flags;
++
++	/* gcwq determined, get cwq and queue */
++	cwq = get_cwq(gcwq->cpu, wq);
++	trace_workqueue_queue_work(gcwq->cpu, cwq, work);
++
++	BUG_ON(!list_empty(&work->entry));
++
++	cwq->nr_in_flight[cwq->work_color]++;
++	work_flags = work_color_to_flags(cwq->work_color);
++
++	if (likely(cwq->nr_active < cwq->max_active)) {
++		trace_workqueue_activate_work(work);
++		cwq->nr_active++;
++		worklist = gcwq_determine_ins_pos(gcwq, cwq);
++	} else {
++		work_flags |= WORK_STRUCT_DELAYED;
++		worklist = &cwq->delayed_works;
++	}
++
++	insert_work(cwq, work, worklist, work_flags);
++}
++
++static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
++			 struct work_struct *work)
++{
++	struct global_cwq *gcwq;
+ 	unsigned long flags;
+ 
+ 	debug_work_activate(work);
+@@ -1031,27 +1036,32 @@ static void __queue_work(unsigned int cp
+ 		spin_lock_irqsave(&gcwq->lock, flags);
+ 	}
+ 
+-	/* gcwq determined, get cwq and queue */
+-	cwq = get_cwq(gcwq->cpu, wq);
+-	trace_workqueue_queue_work(cpu, cwq, work);
++	___queue_work(wq, gcwq, work);
+ 
+-	BUG_ON(!list_empty(&work->entry));
++	spin_unlock_irqrestore(&gcwq->lock, flags);
++}
+ 
+-	cwq->nr_in_flight[cwq->work_color]++;
+-	work_flags = work_color_to_flags(cwq->work_color);
++/**
++ * queue_work_on - queue work on specific cpu
++ * @cpu: CPU number to execute work on
++ * @wq: workqueue to use
++ * @work: work to queue
++ *
++ * Returns 0 if @work was already on a queue, non-zero otherwise.
++ *
++ * We queue the work to a specific CPU, the caller must ensure it
++ * can't go away.
++ */
++static int
++__queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
++{
++	int ret = 0;
+ 
+-	if (likely(cwq->nr_active < cwq->max_active)) {
+-		trace_workqueue_activate_work(work);
+-		cwq->nr_active++;
+-		worklist = gcwq_determine_ins_pos(gcwq, cwq);
+-	} else {
+-		work_flags |= WORK_STRUCT_DELAYED;
+-		worklist = &cwq->delayed_works;
++	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
++		__queue_work(cpu, wq, work);
++		ret = 1;
+ 	}
+-
+-	insert_work(cwq, work, worklist, work_flags);
+-
+-	spin_unlock_irqrestore(&gcwq->lock, flags);
++	return ret;
+ }
+ 
+ /**
+@@ -1068,34 +1078,19 @@ int queue_work(struct workqueue_struct *
+ {
+ 	int ret;
+ 
+-	ret = queue_work_on(get_cpu(), wq, work);
+-	put_cpu();
++	ret = __queue_work_on(get_cpu_light(), wq, work);
++	put_cpu_light();
+ 
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(queue_work);
+ 
+-/**
+- * queue_work_on - queue work on specific cpu
+- * @cpu: CPU number to execute work on
+- * @wq: workqueue to use
+- * @work: work to queue
+- *
+- * Returns 0 if @work was already on a queue, non-zero otherwise.
+- *
+- * We queue the work to a specific CPU, the caller must ensure it
+- * can't go away.
+- */
+ int
+ queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
+ {
+-	int ret = 0;
++	WARN_ON(wq->flags & WQ_NON_AFFINE);
+ 
+-	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+-		__queue_work(cpu, wq, work);
+-		ret = 1;
+-	}
+-	return ret;
++	return __queue_work_on(cpu, wq, work);
+ }
+ EXPORT_SYMBOL_GPL(queue_work_on);
+ 
+@@ -1141,6 +1136,8 @@ int queue_delayed_work_on(int cpu, struc
+ 	struct timer_list *timer = &dwork->timer;
+ 	struct work_struct *work = &dwork->work;
+ 
++	WARN_ON((wq->flags & WQ_NON_AFFINE) && cpu != -1);
++
+ 	if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ 		unsigned int lcpu;
+ 
+@@ -1206,12 +1203,13 @@ static void worker_enter_idle(struct wor
+ 	/* idle_list is LIFO */
+ 	list_add(&worker->entry, &gcwq->idle_list);
+ 
+-	if (likely(!(worker->flags & WORKER_ROGUE))) {
+-		if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
+-			mod_timer(&gcwq->idle_timer,
+-				  jiffies + IDLE_WORKER_TIMEOUT);
+-	} else
+-		wake_up_all(&gcwq->trustee_wait);
++	if (gcwq->nr_idle == gcwq->nr_workers)
++		wake_up_all(&gcwq->idle_wait);
++
++	if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer)) {
++		mod_timer(&gcwq->idle_timer,
++				jiffies + IDLE_WORKER_TIMEOUT);
++	}
+ 
+ 	/* sanity check nr_running */
+ 	WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
+@@ -1288,8 +1286,14 @@ __acquires(&gcwq->lock)
+ 			return false;
+ 		if (task_cpu(task) == gcwq->cpu &&
+ 		    cpumask_equal(&current->cpus_allowed,
+-				  get_cpu_mask(gcwq->cpu)))
++				  get_cpu_mask(gcwq->cpu))) {
++			/*
++			 * Since we're binding to a particular cpu and need to
++			 * stay there for correctness, mark us PF_THREAD_BOUND.
++			 */
++			task->flags |= PF_THREAD_BOUND;
+ 			return true;
++		}
+ 		spin_unlock_irq(&gcwq->lock);
+ 
+ 		/*
+@@ -1303,20 +1307,15 @@ __acquires(&gcwq->lock)
+ 	}
+ }
+ 
+-/*
+- * Function for worker->rebind_work used to rebind rogue busy workers
+- * to the associated cpu which is coming back online.  This is
+- * scheduled by cpu up but can race with other cpu hotplug operations
+- * and may be executed twice without intervening cpu down.
+- */
+-static void worker_rebind_fn(struct work_struct *work)
++static void worker_unbind_and_unlock(struct worker *worker)
+ {
+-	struct worker *worker = container_of(work, struct worker, rebind_work);
+ 	struct global_cwq *gcwq = worker->gcwq;
++	struct task_struct *task = worker->task;
+ 
+-	if (worker_maybe_bind_and_lock(worker))
+-		worker_clr_flags(worker, WORKER_REBIND);
+-
++	/*
++	 * Its no longer required we're PF_THREAD_BOUND, the work is done.
++	 */
++	task->flags &= ~PF_THREAD_BOUND;
+ 	spin_unlock_irq(&gcwq->lock);
+ }
+ 
+@@ -1328,7 +1327,6 @@ static struct worker *alloc_worker(void)
+ 	if (worker) {
+ 		INIT_LIST_HEAD(&worker->entry);
+ 		INIT_LIST_HEAD(&worker->scheduled);
+-		INIT_WORK(&worker->rebind_work, worker_rebind_fn);
+ 		/* on creation a worker is in !idle && prep state */
+ 		worker->flags = WORKER_PREP;
+ 	}
+@@ -1383,15 +1381,9 @@ static struct worker *create_worker(stru
+ 	if (IS_ERR(worker->task))
+ 		goto fail;
+ 
+-	/*
+-	 * A rogue worker will become a regular one if CPU comes
+-	 * online later on.  Make sure every worker has
+-	 * PF_THREAD_BOUND set.
+-	 */
+ 	if (bind && !on_unbound_cpu)
+ 		kthread_bind(worker->task, gcwq->cpu);
+ 	else {
+-		worker->task->flags |= PF_THREAD_BOUND;
+ 		if (on_unbound_cpu)
+ 			worker->flags |= WORKER_UNBOUND;
+ 	}
+@@ -1668,13 +1660,6 @@ static bool manage_workers(struct worker
+ 
+ 	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
+ 
+-	/*
+-	 * The trustee might be waiting to take over the manager
+-	 * position, tell it we're done.
+-	 */
+-	if (unlikely(gcwq->trustee))
+-		wake_up_all(&gcwq->trustee_wait);
+-
+ 	return ret;
+ }
+ 
+@@ -2075,7 +2060,7 @@ repeat:
+ 		if (keep_working(gcwq))
+ 			wake_up_worker(gcwq);
+ 
+-		spin_unlock_irq(&gcwq->lock);
++		worker_unbind_and_unlock(rescuer);
+ 	}
+ 
+ 	schedule();
+@@ -3024,7 +3009,6 @@ struct workqueue_struct *__alloc_workque
+ 		if (IS_ERR(rescuer->task))
+ 			goto err;
+ 
+-		rescuer->task->flags |= PF_THREAD_BOUND;
+ 		wake_up_process(rescuer->task);
+ 	}
+ 
+@@ -3214,171 +3198,76 @@ EXPORT_SYMBOL_GPL(work_busy);
+  * gcwqs serve mix of short, long and very long running works making
+  * blocked draining impractical.
+  *
+- * This is solved by allowing a gcwq to be detached from CPU, running
+- * it with unbound (rogue) workers and allowing it to be reattached
+- * later if the cpu comes back online.  A separate thread is created
+- * to govern a gcwq in such state and is called the trustee of the
+- * gcwq.
+- *
+- * Trustee states and their descriptions.
+- *
+- * START	Command state used on startup.  On CPU_DOWN_PREPARE, a
+- *		new trustee is started with this state.
+- *
+- * IN_CHARGE	Once started, trustee will enter this state after
+- *		assuming the manager role and making all existing
+- *		workers rogue.  DOWN_PREPARE waits for trustee to
+- *		enter this state.  After reaching IN_CHARGE, trustee
+- *		tries to execute the pending worklist until it's empty
+- *		and the state is set to BUTCHER, or the state is set
+- *		to RELEASE.
+- *
+- * BUTCHER	Command state which is set by the cpu callback after
+- *		the cpu has went down.  Once this state is set trustee
+- *		knows that there will be no new works on the worklist
+- *		and once the worklist is empty it can proceed to
+- *		killing idle workers.
+- *
+- * RELEASE	Command state which is set by the cpu callback if the
+- *		cpu down has been canceled or it has come online
+- *		again.  After recognizing this state, trustee stops
+- *		trying to drain or butcher and clears ROGUE, rebinds
+- *		all remaining workers back to the cpu and releases
+- *		manager role.
+- *
+- * DONE		Trustee will enter this state after BUTCHER or RELEASE
+- *		is complete.
+- *
+- *          trustee                 CPU                draining
+- *         took over                down               complete
+- * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
+- *                        |                     |                  ^
+- *                        | CPU is back online  v   return workers |
+- *                         ----------------> RELEASE --------------
+  */
+ 
+-/**
+- * trustee_wait_event_timeout - timed event wait for trustee
+- * @cond: condition to wait for
+- * @timeout: timeout in jiffies
+- *
+- * wait_event_timeout() for trustee to use.  Handles locking and
+- * checks for RELEASE request.
+- *
+- * CONTEXT:
+- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+- * multiple times.  To be used by trustee.
+- *
+- * RETURNS:
+- * Positive indicating left time if @cond is satisfied, 0 if timed
+- * out, -1 if canceled.
+- */
+-#define trustee_wait_event_timeout(cond, timeout) ({			\
+-	long __ret = (timeout);						\
+-	while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) &&	\
+-	       __ret) {							\
+-		spin_unlock_irq(&gcwq->lock);				\
+-		__wait_event_timeout(gcwq->trustee_wait, (cond) ||	\
+-			(gcwq->trustee_state == TRUSTEE_RELEASE),	\
+-			__ret);						\
+-		spin_lock_irq(&gcwq->lock);				\
+-	}								\
+-	gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);		\
+-})
++static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
++						unsigned long action,
++						void *hcpu)
++{
++	unsigned int cpu = (unsigned long)hcpu;
++	struct global_cwq *gcwq = get_gcwq(cpu);
++	struct worker *uninitialized_var(new_worker);
++	unsigned long flags;
+ 
+-/**
+- * trustee_wait_event - event wait for trustee
+- * @cond: condition to wait for
+- *
+- * wait_event() for trustee to use.  Automatically handles locking and
+- * checks for CANCEL request.
+- *
+- * CONTEXT:
+- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+- * multiple times.  To be used by trustee.
+- *
+- * RETURNS:
+- * 0 if @cond is satisfied, -1 if canceled.
+- */
+-#define trustee_wait_event(cond) ({					\
+-	long __ret1;							\
+-	__ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
+-	__ret1 < 0 ? -1 : 0;						\
+-})
++	action &= ~CPU_TASKS_FROZEN;
+ 
+-static int __cpuinit trustee_thread(void *__gcwq)
+-{
+-	struct global_cwq *gcwq = __gcwq;
+-	struct worker *worker;
+-	struct work_struct *work;
+-	struct hlist_node *pos;
+-	long rc;
+-	int i;
++	switch (action) {
++	case CPU_UP_PREPARE:
++		BUG_ON(gcwq->first_idle);
++		new_worker = create_worker(gcwq, false);
++		if (!new_worker)
++			return NOTIFY_BAD;
++	case CPU_UP_CANCELED:
++	case CPU_ONLINE:
++		break;
++	default:
++		return notifier_from_errno(0);
++	}
+ 
+-	BUG_ON(gcwq->cpu != smp_processor_id());
++	/* some are called w/ irq disabled, don't disturb irq status */
++	spin_lock_irqsave(&gcwq->lock, flags);
+ 
+-	spin_lock_irq(&gcwq->lock);
+-	/*
+-	 * Claim the manager position and make all workers rogue.
+-	 * Trustee must be bound to the target cpu and can't be
+-	 * cancelled.
+-	 */
+-	BUG_ON(gcwq->cpu != smp_processor_id());
+-	rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
+-	BUG_ON(rc < 0);
++	switch (action) {
++	case CPU_UP_PREPARE:
++		BUG_ON(gcwq->first_idle);
++		gcwq->first_idle = new_worker;
++		break;
+ 
+-	gcwq->flags |= GCWQ_MANAGING_WORKERS;
++	case CPU_UP_CANCELED:
++		destroy_worker(gcwq->first_idle);
++		gcwq->first_idle = NULL;
++		break;
+ 
+-	list_for_each_entry(worker, &gcwq->idle_list, entry)
+-		worker->flags |= WORKER_ROGUE;
++	case CPU_ONLINE:
++		spin_unlock_irq(&gcwq->lock);
++		kthread_bind(gcwq->first_idle->task, cpu);
++		spin_lock_irq(&gcwq->lock);
++		gcwq->flags |= GCWQ_MANAGE_WORKERS;
++		start_worker(gcwq->first_idle);
++		gcwq->first_idle = NULL;
++		break;
++	}
+ 
+-	for_each_busy_worker(worker, i, pos, gcwq)
+-		worker->flags |= WORKER_ROGUE;
++	spin_unlock_irqrestore(&gcwq->lock, flags);
+ 
+-	/*
+-	 * Call schedule() so that we cross rq->lock and thus can
+-	 * guarantee sched callbacks see the rogue flag.  This is
+-	 * necessary as scheduler callbacks may be invoked from other
+-	 * cpus.
+-	 */
+-	spin_unlock_irq(&gcwq->lock);
+-	schedule();
+-	spin_lock_irq(&gcwq->lock);
++	return notifier_from_errno(0);
++}
+ 
+-	/*
+-	 * Sched callbacks are disabled now.  Zap nr_running.  After
+-	 * this, nr_running stays zero and need_more_worker() and
+-	 * keep_working() are always true as long as the worklist is
+-	 * not empty.
+-	 */
+-	atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
++static void flush_gcwq(struct global_cwq *gcwq)
++{
++	struct work_struct *work, *nw;
++	struct worker *worker, *n;
++	LIST_HEAD(non_affine_works);
+ 
+-	spin_unlock_irq(&gcwq->lock);
+-	del_timer_sync(&gcwq->idle_timer);
+ 	spin_lock_irq(&gcwq->lock);
++	list_for_each_entry_safe(work, nw, &gcwq->worklist, entry) {
++		struct workqueue_struct *wq = get_work_cwq(work)->wq;
+ 
+-	/*
+-	 * We're now in charge.  Notify and proceed to drain.  We need
+-	 * to keep the gcwq running during the whole CPU down
+-	 * procedure as other cpu hotunplug callbacks may need to
+-	 * flush currently running tasks.
+-	 */
+-	gcwq->trustee_state = TRUSTEE_IN_CHARGE;
+-	wake_up_all(&gcwq->trustee_wait);
+-
+-	/*
+-	 * The original cpu is in the process of dying and may go away
+-	 * anytime now.  When that happens, we and all workers would
+-	 * be migrated to other cpus.  Try draining any left work.  We
+-	 * want to get it over with ASAP - spam rescuers, wake up as
+-	 * many idlers as necessary and create new ones till the
+-	 * worklist is empty.  Note that if the gcwq is frozen, there
+-	 * may be frozen works in freezable cwqs.  Don't declare
+-	 * completion while frozen.
+-	 */
+-	while (gcwq->nr_workers != gcwq->nr_idle ||
+-	       gcwq->flags & GCWQ_FREEZING ||
+-	       gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
++		if (wq->flags & WQ_NON_AFFINE)
++			list_move(&work->entry, &non_affine_works);
++	}
++
++	while (!list_empty(&gcwq->worklist)) {
+ 		int nr_works = 0;
+ 
+ 		list_for_each_entry(work, &gcwq->worklist, entry) {
+@@ -3392,189 +3281,54 @@ static int __cpuinit trustee_thread(void
+ 			wake_up_process(worker->task);
+ 		}
+ 
++		spin_unlock_irq(&gcwq->lock);
++
+ 		if (need_to_create_worker(gcwq)) {
+-			spin_unlock_irq(&gcwq->lock);
+-			worker = create_worker(gcwq, false);
+-			spin_lock_irq(&gcwq->lock);
+-			if (worker) {
+-				worker->flags |= WORKER_ROGUE;
++			worker = create_worker(gcwq, true);
++			if (worker)
+ 				start_worker(worker);
+-			}
+ 		}
+ 
+-		/* give a breather */
+-		if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
+-			break;
+-	}
+-
+-	/*
+-	 * Either all works have been scheduled and cpu is down, or
+-	 * cpu down has already been canceled.  Wait for and butcher
+-	 * all workers till we're canceled.
+-	 */
+-	do {
+-		rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
+-		while (!list_empty(&gcwq->idle_list))
+-			destroy_worker(list_first_entry(&gcwq->idle_list,
+-							struct worker, entry));
+-	} while (gcwq->nr_workers && rc >= 0);
+-
+-	/*
+-	 * At this point, either draining has completed and no worker
+-	 * is left, or cpu down has been canceled or the cpu is being
+-	 * brought back up.  There shouldn't be any idle one left.
+-	 * Tell the remaining busy ones to rebind once it finishes the
+-	 * currently scheduled works by scheduling the rebind_work.
+-	 */
+-	WARN_ON(!list_empty(&gcwq->idle_list));
+-
+-	for_each_busy_worker(worker, i, pos, gcwq) {
+-		struct work_struct *rebind_work = &worker->rebind_work;
++		wait_event_timeout(gcwq->idle_wait,
++				gcwq->nr_idle == gcwq->nr_workers, HZ/10);
+ 
+-		/*
+-		 * Rebind_work may race with future cpu hotplug
+-		 * operations.  Use a separate flag to mark that
+-		 * rebinding is scheduled.
+-		 */
+-		worker->flags |= WORKER_REBIND;
+-		worker->flags &= ~WORKER_ROGUE;
++		spin_lock_irq(&gcwq->lock);
++	}
+ 
+-		/* queue rebind_work, wq doesn't matter, use the default one */
+-		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
+-				     work_data_bits(rebind_work)))
+-			continue;
++	WARN_ON(gcwq->nr_workers != gcwq->nr_idle);
+ 
+-		debug_work_activate(rebind_work);
+-		insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
+-			    worker->scheduled.next,
+-			    work_color_to_flags(WORK_NO_COLOR));
+-	}
++	list_for_each_entry_safe(worker, n, &gcwq->idle_list, entry)
++		destroy_worker(worker);
+ 
+-	/* relinquish manager role */
+-	gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
++	WARN_ON(gcwq->nr_workers || gcwq->nr_idle);
+ 
+-	/* notify completion */
+-	gcwq->trustee = NULL;
+-	gcwq->trustee_state = TRUSTEE_DONE;
+-	wake_up_all(&gcwq->trustee_wait);
+ 	spin_unlock_irq(&gcwq->lock);
+-	return 0;
+-}
+ 
+-/**
+- * wait_trustee_state - wait for trustee to enter the specified state
+- * @gcwq: gcwq the trustee of interest belongs to
+- * @state: target state to wait for
+- *
+- * Wait for the trustee to reach @state.  DONE is already matched.
+- *
+- * CONTEXT:
+- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+- * multiple times.  To be used by cpu_callback.
+- */
+-static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
+-__releases(&gcwq->lock)
+-__acquires(&gcwq->lock)
+-{
+-	if (!(gcwq->trustee_state == state ||
+-	      gcwq->trustee_state == TRUSTEE_DONE)) {
+-		spin_unlock_irq(&gcwq->lock);
+-		__wait_event(gcwq->trustee_wait,
+-			     gcwq->trustee_state == state ||
+-			     gcwq->trustee_state == TRUSTEE_DONE);
+-		spin_lock_irq(&gcwq->lock);
++	gcwq = get_gcwq(get_cpu_light());
++	spin_lock_irq(&gcwq->lock);
++	list_for_each_entry_safe(work, nw, &non_affine_works, entry) {
++		list_del_init(&work->entry);
++		___queue_work(get_work_cwq(work)->wq, gcwq, work);
+ 	}
++	spin_unlock_irq(&gcwq->lock);
++	put_cpu_light();
+ }
+ 
+-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
++static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+ 						unsigned long action,
+ 						void *hcpu)
+ {
+ 	unsigned int cpu = (unsigned long)hcpu;
+ 	struct global_cwq *gcwq = get_gcwq(cpu);
+-	struct task_struct *new_trustee = NULL;
+-	struct worker *uninitialized_var(new_worker);
+-	unsigned long flags;
+ 
+ 	action &= ~CPU_TASKS_FROZEN;
+ 
+-	switch (action) {
+-	case CPU_DOWN_PREPARE:
+-		new_trustee = kthread_create(trustee_thread, gcwq,
+-					     "workqueue_trustee/%d\n", cpu);
+-		if (IS_ERR(new_trustee))
+-			return notifier_from_errno(PTR_ERR(new_trustee));
+-		kthread_bind(new_trustee, cpu);
+-		/* fall through */
+-	case CPU_UP_PREPARE:
+-		BUG_ON(gcwq->first_idle);
+-		new_worker = create_worker(gcwq, false);
+-		if (!new_worker) {
+-			if (new_trustee)
+-				kthread_stop(new_trustee);
+-			return NOTIFY_BAD;
+-		}
+-	}
++        switch (action) {
++        case CPU_DOWN_PREPARE:
++                flush_gcwq(gcwq);
++                break;
++        }
+ 
+-	/* some are called w/ irq disabled, don't disturb irq status */
+-	spin_lock_irqsave(&gcwq->lock, flags);
+-
+-	switch (action) {
+-	case CPU_DOWN_PREPARE:
+-		/* initialize trustee and tell it to acquire the gcwq */
+-		BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
+-		gcwq->trustee = new_trustee;
+-		gcwq->trustee_state = TRUSTEE_START;
+-		wake_up_process(gcwq->trustee);
+-		wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
+-		/* fall through */
+-	case CPU_UP_PREPARE:
+-		BUG_ON(gcwq->first_idle);
+-		gcwq->first_idle = new_worker;
+-		break;
+-
+-	case CPU_DYING:
+-		/*
+-		 * Before this, the trustee and all workers except for
+-		 * the ones which are still executing works from
+-		 * before the last CPU down must be on the cpu.  After
+-		 * this, they'll all be diasporas.
+-		 */
+-		gcwq->flags |= GCWQ_DISASSOCIATED;
+-		break;
+-
+-	case CPU_POST_DEAD:
+-		gcwq->trustee_state = TRUSTEE_BUTCHER;
+-		/* fall through */
+-	case CPU_UP_CANCELED:
+-		destroy_worker(gcwq->first_idle);
+-		gcwq->first_idle = NULL;
+-		break;
+-
+-	case CPU_DOWN_FAILED:
+-	case CPU_ONLINE:
+-		gcwq->flags &= ~GCWQ_DISASSOCIATED;
+-		if (gcwq->trustee_state != TRUSTEE_DONE) {
+-			gcwq->trustee_state = TRUSTEE_RELEASE;
+-			wake_up_process(gcwq->trustee);
+-			wait_trustee_state(gcwq, TRUSTEE_DONE);
+-		}
+-
+-		/*
+-		 * Trustee is done and there might be no worker left.
+-		 * Put the first_idle in and request a real manager to
+-		 * take a look.
+-		 */
+-		spin_unlock_irq(&gcwq->lock);
+-		kthread_bind(gcwq->first_idle->task, cpu);
+-		spin_lock_irq(&gcwq->lock);
+-		gcwq->flags |= GCWQ_MANAGE_WORKERS;
+-		start_worker(gcwq->first_idle);
+-		gcwq->first_idle = NULL;
+-		break;
+-	}
+-
+-	spin_unlock_irqrestore(&gcwq->lock, flags);
+ 
+ 	return notifier_from_errno(0);
+ }
+@@ -3772,7 +3526,8 @@ static int __init init_workqueues(void)
+ 	unsigned int cpu;
+ 	int i;
+ 
+-	cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
++	cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_ACTIVE);
++ 	hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_INACTIVE);
+ 
+ 	/* initialize gcwqs */
+ 	for_each_gcwq_cpu(cpu) {
+@@ -3795,9 +3550,7 @@ static int __init init_workqueues(void)
+ 			    (unsigned long)gcwq);
+ 
+ 		ida_init(&gcwq->worker_ida);
+-
+-		gcwq->trustee_state = TRUSTEE_DONE;
+-		init_waitqueue_head(&gcwq->trustee_wait);
++		init_waitqueue_head(&gcwq->idle_wait);
+ 	}
+ 
+ 	/* create the initial worker */
+Index: linux-3.2/kernel/workqueue_sched.h
+===================================================================
+--- linux-3.2.orig/kernel/workqueue_sched.h
++++ linux-3.2/kernel/workqueue_sched.h
+@@ -4,6 +4,5 @@
+  * Scheduler hooks for concurrency managed workqueue.  Only to be
+  * included from sched.c and workqueue.c.
+  */
+-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
+-struct task_struct *wq_worker_sleeping(struct task_struct *task,
+-				       unsigned int cpu);
++void wq_worker_running(struct task_struct *task);
++void wq_worker_sleeping(struct task_struct *task);
+Index: linux-3.2/arch/mips/kernel/signal.c
+===================================================================
+--- linux-3.2.orig/arch/mips/kernel/signal.c
++++ linux-3.2/arch/mips/kernel/signal.c
+@@ -604,6 +604,9 @@ static void do_signal(struct pt_regs *re
+ 	if (!user_mode(regs))
+ 		return;
+ 
++	local_irq_enable();
++	preempt_check_resched();
++
+ 	if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ 		oldset = &current->saved_sigmask;
+ 	else
+Index: linux-3.2/arch/mips/cavium-octeon/smp.c
+===================================================================
+--- linux-3.2.orig/arch/mips/cavium-octeon/smp.c
++++ linux-3.2/arch/mips/cavium-octeon/smp.c
+@@ -207,8 +207,9 @@ void octeon_prepare_cpus(unsigned int ma
+ 	 * the other bits alone.
+ 	 */
+ 	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
+-	if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED,
+-			"SMP-IPI", mailbox_interrupt)) {
++	if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
++			IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
++			mailbox_interrupt)) {
+ 		panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
+ 	}
+ }
+Index: linux-3.2/arch/arm/kernel/signal.c
+===================================================================
+--- linux-3.2.orig/arch/arm/kernel/signal.c
++++ linux-3.2/arch/arm/kernel/signal.c
+@@ -673,6 +673,9 @@ static void do_signal(struct pt_regs *re
+ 	if (!user_mode(regs))
+ 		return;
+ 
++	local_irq_enable();
++	preempt_check_resched();
++
+ 	/*
+ 	 * If we were from a system call, check for system call restarting...
+ 	 */
+Index: linux-3.2/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/platforms/85xx/mpc85xx_cds.c
++++ linux-3.2/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+@@ -177,7 +177,7 @@ static irqreturn_t mpc85xx_8259_cascade_
+ 
+ static struct irqaction mpc85xxcds_8259_irqaction = {
+ 	.handler = mpc85xx_8259_cascade_action,
+-	.flags = IRQF_SHARED,
++	.flags = IRQF_SHARED | IRQF_NO_THREAD,
+ 	.name = "8259 cascade",
+ };
+ #endif /* PPC_I8259 */
+Index: linux-3.2/arch/powerpc/platforms/wsp/opb_pic.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/platforms/wsp/opb_pic.c
++++ linux-3.2/arch/powerpc/platforms/wsp/opb_pic.c
+@@ -320,7 +320,8 @@ void __init opb_pic_init(void)
+ 		}
+ 
+ 		/* Attach opb interrupt handler to new virtual IRQ */
+-		rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb);
++		rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD,
++				 "OPB LS Cascade", opb);
+ 		if (rc) {
+ 			printk("opb: request_irq failed: %d\n", rc);
+ 			continue;
+Index: linux-3.2/arch/powerpc/kernel/smp.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/kernel/smp.c
++++ linux-3.2/arch/powerpc/kernel/smp.c
+@@ -187,8 +187,8 @@ int smp_request_message_ipi(int virq, in
+ 		return 1;
+ 	}
+ #endif
+-	err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU,
+-			  smp_ipi_name[msg], 0);
++	err = request_irq(virq, smp_ipi_action[msg],
++			  IRQF_PERCPU | IRQF_NO_THREAD, smp_ipi_name[msg], 0);
+ 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
+ 		virq, smp_ipi_name[msg], err);
+ 
+Index: linux-3.2/arch/powerpc/platforms/powermac/smp.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/platforms/powermac/smp.c
++++ linux-3.2/arch/powerpc/platforms/powermac/smp.c
+@@ -200,7 +200,7 @@ static int psurge_secondary_ipi_init(voi
+ 
+ 	if (psurge_secondary_virq)
+ 		rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
+-			IRQF_PERCPU, "IPI", NULL);
++				 IRQF_NO_THREAD | IRQF_PERCPU, "IPI", NULL);
+ 
+ 	if (rc)
+ 		pr_err("Failed to setup secondary cpu IPI\n");
+@@ -408,7 +408,7 @@ static int __init smp_psurge_kick_cpu(in
+ 
+ static struct irqaction psurge_irqaction = {
+ 	.handler = psurge_ipi_intr,
+-	.flags = IRQF_PERCPU,
++	.flags = IRQF_PERCPU | IRQF_NO_THREAD,
+ 	.name = "primary IPI",
+ };
+ 
+Index: linux-3.2/arch/powerpc/sysdev/xics/xics-common.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/sysdev/xics/xics-common.c
++++ linux-3.2/arch/powerpc/sysdev/xics/xics-common.c
+@@ -134,10 +134,11 @@ static void xics_request_ipi(void)
+ 	BUG_ON(ipi == NO_IRQ);
+ 
+ 	/*
+-	 * IPIs are marked IRQF_PERCPU. The handler was set in map.
++	 * IPIs are marked PERCPU and also IRQF_NO_THREAD as they must
++	 * run in hard interrupt context. The handler was set in map.
+ 	 */
+ 	BUG_ON(request_irq(ipi, icp_ops->ipi_action,
+-			   IRQF_PERCPU, "IPI", NULL));
++			   IRQF_NO_THREAD|IRQF_PERCPU, "IPI", NULL));
+ }
+ 
+ int __init xics_smp_probe(void)
+Index: linux-3.2/arch/powerpc/Kconfig
+===================================================================
+--- linux-3.2.orig/arch/powerpc/Kconfig
++++ linux-3.2/arch/powerpc/Kconfig
+@@ -69,10 +69,11 @@ config LOCKDEP_SUPPORT
+ 
+ config RWSEM_GENERIC_SPINLOCK
+ 	bool
++	default y if PREEMPT_RT_FULL
+ 
+ config RWSEM_XCHGADD_ALGORITHM
+ 	bool
+-	default y
++	default y if !PREEMPT_RT_FULL
+ 
+ config GENERIC_LOCKBREAK
+ 	bool
+@@ -132,6 +133,7 @@ config PPC
+ 	select IRQ_PER_CPU
+ 	select GENERIC_IRQ_SHOW
+ 	select GENERIC_IRQ_SHOW_LEVEL
++	select IRQ_FORCED_THREADING
+ 	select HAVE_RCU_TABLE_FREE if SMP
+ 	select HAVE_SYSCALL_TRACEPOINTS
+ 	select HAVE_BPF_JIT if (PPC64 && NET)
+@@ -274,7 +276,7 @@ menu "Kernel options"
+ 
+ config HIGHMEM
+ 	bool "High memory support"
+-	depends on PPC32
++	depends on PPC32 && !PREEMPT_RT_FULL
+ 
+ source kernel/time/Kconfig
+ source kernel/Kconfig.hz
+Index: linux-3.2/kernel/sched_rt.c
+===================================================================
+--- linux-3.2.orig/kernel/sched_rt.c
++++ linux-3.2/kernel/sched_rt.c
+@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq 
+ {
+ 	int more = 0;
+ 
++	if (!sched_feat(RT_RUNTIME_SHARE))
++		return more;
++
+ 	if (rt_rq->rt_time > rt_rq->rt_runtime) {
+ 		raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ 		more = do_balance_runtime(rt_rq);
+@@ -577,12 +580,9 @@ static inline int balance_runtime(struct
+ 
+ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+ {
+-	int i, idle = 1;
++	int i, idle = 1, throttled = 0;
+ 	const struct cpumask *span;
+ 
+-	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
+-		return 1;
+-
+ 	span = sched_rt_period_mask();
+ 	for_each_cpu(i, span) {
+ 		int enqueue = 0;
+@@ -617,12 +617,17 @@ static int do_sched_rt_period_timer(stru
+ 			if (!rt_rq_throttled(rt_rq))
+ 				enqueue = 1;
+ 		}
++		if (rt_rq->rt_throttled)
++			throttled = 1;
+ 
+ 		if (enqueue)
+ 			sched_rt_rq_enqueue(rt_rq);
+ 		raw_spin_unlock(&rq->lock);
+ 	}
+ 
++	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
++		return 1;
++
+ 	return idle;
+ }
+ 
+@@ -654,8 +659,24 @@ static int sched_rt_runtime_exceeded(str
+ 		return 0;
+ 
+ 	if (rt_rq->rt_time > runtime) {
+-		rt_rq->rt_throttled = 1;
+-		printk_once(KERN_WARNING "sched: RT throttling activated\n");
++		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
++
++		/*
++		 * Don't actually throttle groups that have no runtime assigned
++		 * but accrue some time due to boosting.
++		 */
++		if (likely(rt_b->rt_runtime)) {
++			rt_rq->rt_throttled = 1;
++			printk_once(KERN_WARNING "sched: RT throttling activated\n");
++		} else {
++			/*
++			 * In case we did anyway, make it go away,
++			 * replenishment is a joke, since it will replenish us
++			 * with exactly 0 ns.
++			 */
++			rt_rq->rt_time = 0;
++		}
++
+ 		if (rt_rq_throttled(rt_rq)) {
+ 			sched_rt_rq_dequeue(rt_rq);
+ 			return 1;
+@@ -683,7 +704,8 @@ static void update_curr_rt(struct rq *rq
+ 	if (unlikely((s64)delta_exec < 0))
+ 		delta_exec = 0;
+ 
+-	schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
++	schedstat_set(curr->se.statistics.exec_max,
++		      max(curr->se.statistics.exec_max, delta_exec));
+ 
+ 	curr->se.sum_exec_runtime += delta_exec;
+ 	account_group_exec_runtime(curr, delta_exec);
+Index: linux-3.2/include/linux/init_task.h
+===================================================================
+--- linux-3.2.orig/include/linux/init_task.h
++++ linux-3.2/include/linux/init_task.h
+@@ -126,6 +126,14 @@ extern struct cred init_cred;
+ # define INIT_PERF_EVENTS(tsk)
+ #endif
+ 
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define INIT_TIMER_LIST		.posix_timer_list = NULL,
++#else
++# define INIT_TIMER_LIST
++#endif
++
++#define INIT_TASK_COMM "swapper"
++
+ /*
+  *  INIT_TASK is used to set up the first task table, touch at
+  * your own risk!. Base=0, limit=0x1fffff (=2MB)
+@@ -162,7 +170,7 @@ extern struct cred init_cred;
+ 	.group_leader	= &tsk,						\
+ 	RCU_INIT_POINTER(.real_cred, &init_cred),			\
+ 	RCU_INIT_POINTER(.cred, &init_cred),				\
+-	.comm		= "swapper",					\
++	.comm		= INIT_TASK_COMM,				\
+ 	.thread		= INIT_THREAD,					\
+ 	.fs		= &init_fs,					\
+ 	.files		= &init_files,					\
+@@ -178,6 +186,7 @@ extern struct cred init_cred;
+ 	.cpu_timers	= INIT_CPU_TIMERS(tsk.cpu_timers),		\
+ 	.pi_lock	= __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock),	\
+ 	.timer_slack_ns = 50000, /* 50 usec default slack */		\
++	INIT_TIMER_LIST							\
+ 	.pids = {							\
+ 		[PIDTYPE_PID]  = INIT_PID_LINK(PIDTYPE_PID),		\
+ 		[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),		\
+Index: linux-3.2/kernel/signal.c
+===================================================================
+--- linux-3.2.orig/kernel/signal.c
++++ linux-3.2/kernel/signal.c
+@@ -344,13 +344,45 @@ static bool task_participate_group_stop(
+ 	return false;
+ }
+ 
++#ifdef __HAVE_ARCH_CMPXCHG
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++	struct sigqueue *q = t->sigqueue_cache;
++
++	if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
++		return NULL;
++	return q;
++}
++
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++	if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
++		return 0;
++	return 1;
++}
++
++#else
++
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++	return NULL;
++}
++
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++	return 1;
++}
++
++#endif
++
+ /*
+  * allocate a new signal queue record
+  * - this may be called without locks if and only if t == current, otherwise an
+  *   appropriate lock must be held to stop the target task from exiting
+  */
+ static struct sigqueue *
+-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
++		    int override_rlimit, int fromslab)
+ {
+ 	struct sigqueue *q = NULL;
+ 	struct user_struct *user;
+@@ -367,7 +399,10 @@ __sigqueue_alloc(int sig, struct task_st
+ 	if (override_rlimit ||
+ 	    atomic_read(&user->sigpending) <=
+ 			task_rlimit(t, RLIMIT_SIGPENDING)) {
+-		q = kmem_cache_alloc(sigqueue_cachep, flags);
++		if (!fromslab)
++			q = get_task_cache(t);
++		if (!q)
++			q = kmem_cache_alloc(sigqueue_cachep, flags);
+ 	} else {
+ 		print_dropped_signal(sig);
+ 	}
+@@ -384,6 +419,13 @@ __sigqueue_alloc(int sig, struct task_st
+ 	return q;
+ }
+ 
++static struct sigqueue *
++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
++		 int override_rlimit)
++{
++	return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
++}
++
+ static void __sigqueue_free(struct sigqueue *q)
+ {
+ 	if (q->flags & SIGQUEUE_PREALLOC)
+@@ -393,6 +435,21 @@ static void __sigqueue_free(struct sigqu
+ 	kmem_cache_free(sigqueue_cachep, q);
+ }
+ 
++static void sigqueue_free_current(struct sigqueue *q)
++{
++	struct user_struct *up;
++
++	if (q->flags & SIGQUEUE_PREALLOC)
++		return;
++
++	up = q->user;
++	if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
++		atomic_dec(&up->sigpending);
++		free_uid(up);
++	} else
++		  __sigqueue_free(q);
++}
++
+ void flush_sigqueue(struct sigpending *queue)
+ {
+ 	struct sigqueue *q;
+@@ -406,6 +463,21 @@ void flush_sigqueue(struct sigpending *q
+ }
+ 
+ /*
++ * Called from __exit_signal. Flush tsk->pending and
++ * tsk->sigqueue_cache
++ */
++void flush_task_sigqueue(struct task_struct *tsk)
++{
++	struct sigqueue *q;
++
++	flush_sigqueue(&tsk->pending);
++
++	q = get_task_cache(tsk);
++	if (q)
++		kmem_cache_free(sigqueue_cachep, q);
++}
++
++/*
+  * Flush all pending signals for a task.
+  */
+ void __flush_signals(struct task_struct *t)
+@@ -554,7 +626,7 @@ static void collect_signal(int sig, stru
+ still_pending:
+ 		list_del_init(&first->list);
+ 		copy_siginfo(info, &first->info);
+-		__sigqueue_free(first);
++		sigqueue_free_current(first);
+ 	} else {
+ 		/*
+ 		 * Ok, it wasn't in the queue.  This must be
+@@ -600,6 +672,8 @@ int dequeue_signal(struct task_struct *t
+ {
+ 	int signr;
+ 
++	WARN_ON_ONCE(tsk != current);
++
+ 	/* We only dequeue private signals from ourselves, we don't let
+ 	 * signalfd steal them
+ 	 */
+@@ -682,6 +756,9 @@ void signal_wake_up(struct task_struct *
+ 
+ 	set_tsk_thread_flag(t, TIF_SIGPENDING);
+ 
++	if (unlikely(t == current))
++		return;
++
+ 	/*
+ 	 * For SIGKILL, we want to wake it up in the stopped/traced/killable
+ 	 * case. We don't check t->state here because there is a race with it
+@@ -1252,12 +1329,12 @@ struct sighand_struct *__lock_task_sigha
+ 	struct sighand_struct *sighand;
+ 
+ 	for (;;) {
+-		local_irq_save(*flags);
++		local_irq_save_nort(*flags);
+ 		rcu_read_lock();
+ 		sighand = rcu_dereference(tsk->sighand);
+ 		if (unlikely(sighand == NULL)) {
+ 			rcu_read_unlock();
+-			local_irq_restore(*flags);
++			local_irq_restore_nort(*flags);
+ 			break;
+ 		}
+ 
+@@ -1268,7 +1345,7 @@ struct sighand_struct *__lock_task_sigha
+ 		}
+ 		spin_unlock(&sighand->siglock);
+ 		rcu_read_unlock();
+-		local_irq_restore(*flags);
++		local_irq_restore_nort(*flags);
+ 	}
+ 
+ 	return sighand;
+@@ -1515,7 +1592,8 @@ EXPORT_SYMBOL(kill_pid);
+  */
+ struct sigqueue *sigqueue_alloc(void)
+ {
+-	struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
++	/* Preallocated sigqueue objects always from the slabcache ! */
++	struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
+ 
+ 	if (q)
+ 		q->flags |= SIGQUEUE_PREALLOC;
+@@ -1857,15 +1935,7 @@ static void ptrace_stop(int exit_code, i
+ 		if (gstop_done && ptrace_reparented(current))
+ 			do_notify_parent_cldstop(current, false, why);
+ 
+-		/*
+-		 * Don't want to allow preemption here, because
+-		 * sys_ptrace() needs this task to be inactive.
+-		 *
+-		 * XXX: implement read_unlock_no_resched().
+-		 */
+-		preempt_disable();
+ 		read_unlock(&tasklist_lock);
+-		preempt_enable_no_resched();
+ 		schedule();
+ 	} else {
+ 		/*
+Index: linux-3.2/arch/arm/kernel/perf_event.c
+===================================================================
+--- linux-3.2.orig/arch/arm/kernel/perf_event.c
++++ linux-3.2/arch/arm/kernel/perf_event.c
+@@ -432,7 +432,7 @@ armpmu_reserve_hardware(struct arm_pmu *
+ 		}
+ 
+ 		err = request_irq(irq, handle_irq,
+-				  IRQF_DISABLED | IRQF_NOBALANCING,
++				  IRQF_NOBALANCING | IRQF_NO_THREAD,
+ 				  "arm-pmu", armpmu);
+ 		if (err) {
+ 			pr_err("unable to request IRQ%d for ARM PMU counters\n",
+Index: linux-3.2/arch/arm/Kconfig
+===================================================================
+--- linux-3.2.orig/arch/arm/Kconfig
++++ linux-3.2/arch/arm/Kconfig
+@@ -29,6 +29,7 @@ config ARM
+ 	select HAVE_GENERIC_HARDIRQS
+ 	select HAVE_SPARSE_IRQ
+ 	select GENERIC_IRQ_SHOW
++	select IRQ_FORCED_THREADING
+ 	select CPU_PM if (SUSPEND || CPU_IDLE)
+ 	help
+ 	  The ARM series is a line of low-power-consumption RISC chip designs
+@@ -1654,7 +1655,7 @@ config HAVE_ARCH_PFN_VALID
+ 
+ config HIGHMEM
+ 	bool "High Memory Support"
+-	depends on MMU
++	depends on MMU && !PREEMPT_RT_FULL
+ 	help
+ 	  The address space of ARM processors is only 4 Gigabytes large
+ 	  and it has to accommodate user address space, kernel address
+Index: linux-3.2/arch/arm/mach-exynos/platsmp.c
+===================================================================
+--- linux-3.2.orig/arch/arm/mach-exynos/platsmp.c
++++ linux-3.2/arch/arm/mach-exynos/platsmp.c
+@@ -63,7 +63,7 @@ static void __iomem *scu_base_addr(void)
+ 	return (void __iomem *)(S5P_VA_SCU);
+ }
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ static void __cpuinit exynos4_gic_secondary_init(void)
+ {
+@@ -108,8 +108,8 @@ void __cpuinit platform_secondary_init(u
+ 	/*
+ 	 * Synchronise with the boot thread.
+ 	 */
+-	spin_lock(&boot_lock);
+-	spin_unlock(&boot_lock);
++	raw_spin_lock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ }
+ 
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -120,7 +120,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * Set synchronisation state between this boot processor
+ 	 * and the secondary one
+ 	 */
+-	spin_lock(&boot_lock);
++	raw_spin_lock(&boot_lock);
+ 
+ 	/*
+ 	 * The secondary processor is waiting to be released from
+@@ -149,7 +149,7 @@ int __cpuinit boot_secondary(unsigned in
+ 
+ 		if (timeout == 0) {
+ 			printk(KERN_ERR "cpu1 power enable failed");
+-			spin_unlock(&boot_lock);
++			raw_spin_unlock(&boot_lock);
+ 			return -ETIMEDOUT;
+ 		}
+ 	}
+@@ -177,7 +177,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * now the secondary core is starting up let it run its
+ 	 * calibrations, then wait for it to finish
+ 	 */
+-	spin_unlock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ 
+ 	return pen_release != -1 ? -ENOSYS : 0;
+ }
+Index: linux-3.2/arch/arm/mach-msm/platsmp.c
+===================================================================
+--- linux-3.2.orig/arch/arm/mach-msm/platsmp.c
++++ linux-3.2/arch/arm/mach-msm/platsmp.c
+@@ -39,7 +39,7 @@ extern void msm_secondary_startup(void);
+  */
+ volatile int pen_release = -1;
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ static inline int get_core_count(void)
+ {
+@@ -69,8 +69,8 @@ void __cpuinit platform_secondary_init(u
+ 	/*
+ 	 * Synchronise with the boot thread.
+ 	 */
+-	spin_lock(&boot_lock);
+-	spin_unlock(&boot_lock);
++	raw_spin_lock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ }
+ 
+ static __cpuinit void prepare_cold_cpu(unsigned int cpu)
+@@ -107,7 +107,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * set synchronisation state between this boot processor
+ 	 * and the secondary one
+ 	 */
+-	spin_lock(&boot_lock);
++	raw_spin_lock(&boot_lock);
+ 
+ 	/*
+ 	 * The secondary processor is waiting to be released from
+@@ -141,7 +141,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * now the secondary core is starting up let it run its
+ 	 * calibrations, then wait for it to finish
+ 	 */
+-	spin_unlock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ 
+ 	return pen_release != -1 ? -ENOSYS : 0;
+ }
+Index: linux-3.2/arch/arm/mach-omap2/omap-smp.c
+===================================================================
+--- linux-3.2.orig/arch/arm/mach-omap2/omap-smp.c
++++ linux-3.2/arch/arm/mach-omap2/omap-smp.c
+@@ -29,7 +29,7 @@
+ /* SCU base address */
+ static void __iomem *scu_base;
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -43,8 +43,8 @@ void __cpuinit platform_secondary_init(u
+ 	/*
+ 	 * Synchronise with the boot thread.
+ 	 */
+-	spin_lock(&boot_lock);
+-	spin_unlock(&boot_lock);
++	raw_spin_lock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ }
+ 
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -53,7 +53,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * Set synchronisation state between this boot processor
+ 	 * and the secondary one
+ 	 */
+-	spin_lock(&boot_lock);
++	raw_spin_lock(&boot_lock);
+ 
+ 	/*
+ 	 * Update the AuxCoreBoot0 with boot state for secondary core.
+@@ -70,7 +70,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * Now the secondary core is starting up let it run its
+ 	 * calibrations, then wait for it to finish
+ 	 */
+-	spin_unlock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ 
+ 	return 0;
+ }
+Index: linux-3.2/arch/arm/mach-tegra/platsmp.c
+===================================================================
+--- linux-3.2.orig/arch/arm/mach-tegra/platsmp.c
++++ linux-3.2/arch/arm/mach-tegra/platsmp.c
+@@ -28,7 +28,7 @@
+ 
+ extern void tegra_secondary_startup(void);
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
+ 
+ #define EVP_CPU_RESET_VECTOR \
+@@ -50,8 +50,8 @@ void __cpuinit platform_secondary_init(u
+ 	/*
+ 	 * Synchronise with the boot thread.
+ 	 */
+-	spin_lock(&boot_lock);
+-	spin_unlock(&boot_lock);
++	raw_spin_lock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ }
+ 
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -65,7 +65,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * set synchronisation state between this boot processor
+ 	 * and the secondary one
+ 	 */
+-	spin_lock(&boot_lock);
++	raw_spin_lock(&boot_lock);
+ 
+ 
+ 	/* set the reset vector to point to the secondary_startup routine */
+@@ -101,7 +101,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * now the secondary core is starting up let it run its
+ 	 * calibrations, then wait for it to finish
+ 	 */
+-	spin_unlock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ 
+ 	return 0;
+ }
+Index: linux-3.2/arch/arm/mach-ux500/platsmp.c
+===================================================================
+--- linux-3.2.orig/arch/arm/mach-ux500/platsmp.c
++++ linux-3.2/arch/arm/mach-ux500/platsmp.c
+@@ -57,7 +57,7 @@ static void __iomem *scu_base_addr(void)
+ 	return NULL;
+ }
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -77,8 +77,8 @@ void __cpuinit platform_secondary_init(u
+ 	/*
+ 	 * Synchronise with the boot thread.
+ 	 */
+-	spin_lock(&boot_lock);
+-	spin_unlock(&boot_lock);
++	raw_spin_lock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ }
+ 
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -89,7 +89,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * set synchronisation state between this boot processor
+ 	 * and the secondary one
+ 	 */
+-	spin_lock(&boot_lock);
++	raw_spin_lock(&boot_lock);
+ 
+ 	/*
+ 	 * The secondary processor is waiting to be released from
+@@ -110,7 +110,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * now the secondary core is starting up let it run its
+ 	 * calibrations, then wait for it to finish
+ 	 */
+-	spin_unlock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ 
+ 	return pen_release != -1 ? -ENOSYS : 0;
+ }
+Index: linux-3.2/arch/arm/plat-versatile/platsmp.c
+===================================================================
+--- linux-3.2.orig/arch/arm/plat-versatile/platsmp.c
++++ linux-3.2/arch/arm/plat-versatile/platsmp.c
+@@ -37,7 +37,7 @@ static void __cpuinit write_pen_release(
+ 	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+ }
+ 
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ 
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -57,8 +57,8 @@ void __cpuinit platform_secondary_init(u
+ 	/*
+ 	 * Synchronise with the boot thread.
+ 	 */
+-	spin_lock(&boot_lock);
+-	spin_unlock(&boot_lock);
++	raw_spin_lock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ }
+ 
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -69,7 +69,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * Set synchronisation state between this boot processor
+ 	 * and the secondary one
+ 	 */
+-	spin_lock(&boot_lock);
++	raw_spin_lock(&boot_lock);
+ 
+ 	/*
+ 	 * This is really belt and braces; we hold unintended secondary
+@@ -99,7 +99,7 @@ int __cpuinit boot_secondary(unsigned in
+ 	 * now the secondary core is starting up let it run its
+ 	 * calibrations, then wait for it to finish
+ 	 */
+-	spin_unlock(&boot_lock);
++	raw_spin_unlock(&boot_lock);
+ 
+ 	return pen_release != -1 ? -ENOSYS : 0;
+ }
+Index: linux-3.2/include/linux/sched.h
+===================================================================
+--- linux-3.2.orig/include/linux/sched.h
++++ linux-3.2/include/linux/sched.h
+@@ -63,6 +63,7 @@ struct sched_param {
+ #include <linux/nodemask.h>
+ #include <linux/mm_types.h>
+ 
++#include <asm/kmap_types.h>
+ #include <asm/system.h>
+ #include <asm/page.h>
+ #include <asm/ptrace.h>
+@@ -91,6 +92,7 @@ struct sched_param {
+ #include <linux/latencytop.h>
+ #include <linux/cred.h>
+ #include <linux/llist.h>
++#include <linux/hardirq.h>
+ 
+ #include <asm/processor.h>
+ 
+@@ -359,6 +361,7 @@ extern signed long schedule_timeout_inte
+ extern signed long schedule_timeout_killable(signed long timeout);
+ extern signed long schedule_timeout_uninterruptible(signed long timeout);
+ asmlinkage void schedule(void);
++extern void schedule_preempt_disabled(void);
+ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
+ 
+ struct nsproxy;
+@@ -1070,6 +1073,7 @@ struct sched_domain;
+ #define WF_SYNC		0x01		/* waker goes to sleep after wakup */
+ #define WF_FORK		0x02		/* child wakeup after fork */
+ #define WF_MIGRATED	0x04		/* internal use, task got migrated */
++#define WF_LOCK_SLEEPER	0x08		/* wakeup spinlock "sleeper" */
+ 
+ #define ENQUEUE_WAKEUP		1
+ #define ENQUEUE_HEAD		2
+@@ -1219,6 +1223,7 @@ enum perf_event_task_context {
+ 
+ struct task_struct {
+ 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
++	volatile long saved_state;	/* saved state for "spinlock sleepers" */
+ 	void *stack;
+ 	atomic_t usage;
+ 	unsigned int flags;	/* per process flags, defined below */
+@@ -1255,6 +1260,12 @@ struct task_struct {
+ #endif
+ 
+ 	unsigned int policy;
++#ifdef CONFIG_PREEMPT_RT_FULL
++	int migrate_disable;
++#ifdef CONFIG_SCHED_DEBUG
++	int migrate_disable_atomic;
++#endif
++#endif
+ 	cpumask_t cpus_allowed;
+ 
+ #ifdef CONFIG_PREEMPT_RCU
+@@ -1353,6 +1364,9 @@ struct task_struct {
+ 
+ 	struct task_cputime cputime_expires;
+ 	struct list_head cpu_timers[3];
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct task_struct *posix_timer_list;
++#endif
+ 
+ /* process credentials */
+ 	const struct cred __rcu *real_cred; /* objective and real subjective task
+@@ -1386,6 +1400,7 @@ struct task_struct {
+ /* signal handlers */
+ 	struct signal_struct *signal;
+ 	struct sighand_struct *sighand;
++	struct sigqueue *sigqueue_cache;
+ 
+ 	sigset_t blocked, real_blocked;
+ 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
+@@ -1429,6 +1444,9 @@ struct task_struct {
+ 	/* mutex deadlock detection */
+ 	struct mutex_waiter *blocked_on;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++	int pagefault_disabled;
++#endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ 	unsigned int irq_events;
+ 	unsigned long hardirq_enable_ip;
+@@ -1561,6 +1579,12 @@ struct task_struct {
+ 	unsigned long trace;
+ 	/* bitmask and counter of trace recursion */
+ 	unsigned long trace_recursion;
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++	u64 preempt_timestamp_hist;
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++	unsigned long timer_offset;
++#endif
++#endif
+ #endif /* CONFIG_TRACING */
+ #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
+ 	struct memcg_batch_info {
+@@ -1573,10 +1597,26 @@ struct task_struct {
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+ 	atomic_t ptrace_bp_refcnt;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct rcu_head put_rcu;
++	int softirq_nestcnt;
++#endif
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++	int kmap_idx;
++	pte_t kmap_pte[KM_TYPE_NR];
++#endif
+ };
+ 
+-/* Future-safe accessor for struct task_struct's cpus_allowed. */
+-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
++#else
++static inline bool cur_pf_disabled(void) { return false; }
++#endif
++
++static inline bool pagefault_disabled(void)
++{
++	return in_atomic() || cur_pf_disabled();
++}
+ 
+ /*
+  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+@@ -1746,6 +1786,15 @@ extern struct pid *cad_pid;
+ extern void free_task(struct task_struct *tsk);
+ #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+ 
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __put_task_struct_cb(struct rcu_head *rhp);
++
++static inline void put_task_struct(struct task_struct *t)
++{
++	if (atomic_dec_and_test(&t->usage))
++		call_rcu(&t->put_rcu, __put_task_struct_cb);
++}
++#else
+ extern void __put_task_struct(struct task_struct *t);
+ 
+ static inline void put_task_struct(struct task_struct *t)
+@@ -1753,6 +1802,7 @@ static inline void put_task_struct(struc
+ 	if (atomic_dec_and_test(&t->usage))
+ 		__put_task_struct(t);
+ }
++#endif
+ 
+ extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+@@ -1778,6 +1828,7 @@ extern void thread_group_times(struct ta
+ #define PF_FROZEN	0x00010000	/* frozen for system suspend */
+ #define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
+ #define PF_KSWAPD	0x00040000	/* I am kswapd */
++#define PF_STOMPER	0x00080000	/* I am a stomp machine thread */
+ #define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
+ #define PF_KTHREAD	0x00200000	/* I am a kernel thread */
+ #define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
+@@ -2051,12 +2102,20 @@ extern unsigned int sysctl_sched_cfs_ban
+ extern int rt_mutex_getprio(struct task_struct *p);
+ extern void rt_mutex_setprio(struct task_struct *p, int prio);
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
++static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
++{
++	return tsk->pi_blocked_on != NULL;
++}
+ #else
+ static inline int rt_mutex_getprio(struct task_struct *p)
+ {
+ 	return p->normal_prio;
+ }
+ # define rt_mutex_adjust_pi(p)		do { } while (0)
++static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
++{
++	return false;
++}
+ #endif
+ 
+ extern bool yield_to(struct task_struct *p, bool preempt);
+@@ -2136,6 +2195,7 @@ extern void xtime_update(unsigned long t
+ 
+ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+ extern int wake_up_process(struct task_struct *tsk);
++extern int wake_up_lock_sleeper(struct task_struct * tsk);
+ extern void wake_up_new_task(struct task_struct *tsk);
+ #ifdef CONFIG_SMP
+  extern void kick_process(struct task_struct *tsk);
+@@ -2226,12 +2286,24 @@ extern struct mm_struct * mm_alloc(void)
+ 
+ /* mmdrop drops the mm and the page tables */
+ extern void __mmdrop(struct mm_struct *);
++
+ static inline void mmdrop(struct mm_struct * mm)
+ {
+ 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ 		__mmdrop(mm);
+ }
+ 
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __mmdrop_delayed(struct rcu_head *rhp);
++static inline void mmdrop_delayed(struct mm_struct *mm)
++{
++	if (atomic_dec_and_test(&mm->mm_count))
++		call_rcu(&mm->delayed_drop, __mmdrop_delayed);
++}
++#else
++# define mmdrop_delayed(mm)	mmdrop(mm)
++#endif
++
+ /* mmput gets rid of the mappings and all user-space */
+ extern void mmput(struct mm_struct *);
+ /* Grab a reference to a task's mm, if it is not already going away */
+@@ -2534,7 +2606,7 @@ extern int _cond_resched(void);
+ 
+ extern int __cond_resched_lock(spinlock_t *lock);
+ 
+-#ifdef CONFIG_PREEMPT_COUNT
++#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
+ #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
+ #else
+ #define PREEMPT_LOCK_OFFSET	0
+@@ -2545,12 +2617,16 @@ extern int __cond_resched_lock(spinlock_
+ 	__cond_resched_lock(lock);				\
+ })
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern int __cond_resched_softirq(void);
+ 
+ #define cond_resched_softirq() ({					\
+ 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
+ 	__cond_resched_softirq();					\
+ })
++#else
++# define cond_resched_softirq()		cond_resched()
++#endif
+ 
+ /*
+  * Does a critical section need to be broken due to another
+@@ -2613,6 +2689,26 @@ static inline void set_task_cpu(struct t
+ 
+ #endif /* CONFIG_SMP */
+ 
++static inline int __migrate_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++	return p->migrate_disable;
++#else
++	return 0;
++#endif
++}
++
++/* Future-safe accessor for struct task_struct's cpus_allowed. */
++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++	if (p->migrate_disable)
++		return cpumask_of(task_cpu(p));
++#endif
++
++	return &p->cpus_allowed;
++}
++
+ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
+ 
+Index: linux-3.2/arch/arm/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/arm/kernel/process.c
++++ linux-3.2/arch/arm/kernel/process.c
+@@ -211,9 +211,7 @@ void cpu_idle(void)
+ 		}
+ 		leds_event(led_idle_end);
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+@@ -488,6 +486,31 @@ unsigned long arch_randomize_brk(struct 
+ }
+ 
+ #ifdef CONFIG_MMU
++
++/*
++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock.  If the lock is not
++ * initialized by pgtable_page_ctor() then a coredump of the vector page will
++ * fail.
++ */
++static int __init vectors_user_mapping_init_page(void)
++{
++	struct page *page;
++	unsigned long addr = 0xffff0000;
++	pgd_t *pgd;
++	pud_t *pud;
++	pmd_t *pmd;
++
++	pgd = pgd_offset_k(addr);
++	pud = pud_offset(pgd, addr);
++	pmd = pmd_offset(pud, addr);
++	page = pmd_page(*(pmd));
++
++	pgtable_page_ctor(page);
++
++	return 0;
++}
++late_initcall(vectors_user_mapping_init_page);
++
+ /*
+  * The vectors page is always readable from user space for the
+  * atomic helpers and the signal restart code.  Let's declare a mapping
+Index: linux-3.2/arch/avr32/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/avr32/kernel/process.c
++++ linux-3.2/arch/avr32/kernel/process.c
+@@ -38,9 +38,7 @@ void cpu_idle(void)
+ 		while (!need_resched())
+ 			cpu_idle_sleep();
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/blackfin/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/blackfin/kernel/process.c
++++ linux-3.2/arch/blackfin/kernel/process.c
+@@ -92,9 +92,7 @@ void cpu_idle(void)
+ 		while (!need_resched())
+ 			idle();
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/cris/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/cris/kernel/process.c
++++ linux-3.2/arch/cris/kernel/process.c
+@@ -115,9 +115,7 @@ void cpu_idle (void)
+ 				idle = default_idle;
+ 			idle();
+ 		}
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/frv/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/frv/kernel/process.c
++++ linux-3.2/arch/frv/kernel/process.c
+@@ -92,9 +92,7 @@ void cpu_idle(void)
+ 				idle();
+ 		}
+ 
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/h8300/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/h8300/kernel/process.c
++++ linux-3.2/arch/h8300/kernel/process.c
+@@ -81,9 +81,7 @@ void cpu_idle(void)
+ 	while (1) {
+ 		while (!need_resched())
+ 			idle();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/ia64/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/ia64/kernel/process.c
++++ linux-3.2/arch/ia64/kernel/process.c
+@@ -330,9 +330,7 @@ cpu_idle (void)
+ 			normal_xtp();
+ #endif
+ 		}
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 		check_pgt_cache();
+ 		if (cpu_is_offline(cpu))
+ 			play_dead();
+Index: linux-3.2/arch/m32r/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/m32r/kernel/process.c
++++ linux-3.2/arch/m32r/kernel/process.c
+@@ -90,9 +90,7 @@ void cpu_idle (void)
+ 
+ 			idle();
+ 		}
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/m68k/kernel/process_mm.c
+===================================================================
+--- linux-3.2.orig/arch/m68k/kernel/process_mm.c
++++ linux-3.2/arch/m68k/kernel/process_mm.c
+@@ -94,9 +94,7 @@ void cpu_idle(void)
+ 	while (1) {
+ 		while (!need_resched())
+ 			idle();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/m68k/kernel/process_no.c
+===================================================================
+--- linux-3.2.orig/arch/m68k/kernel/process_no.c
++++ linux-3.2/arch/m68k/kernel/process_no.c
+@@ -73,9 +73,7 @@ void cpu_idle(void)
+ 	/* endless idle loop with no priority at all */
+ 	while (1) {
+ 		idle();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/microblaze/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/microblaze/kernel/process.c
++++ linux-3.2/arch/microblaze/kernel/process.c
+@@ -108,9 +108,7 @@ void cpu_idle(void)
+ 			idle();
+ 		tick_nohz_restart_sched_tick();
+ 
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 		check_pgt_cache();
+ 	}
+ }
+Index: linux-3.2/arch/mips/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/mips/kernel/process.c
++++ linux-3.2/arch/mips/kernel/process.c
+@@ -78,9 +78,7 @@ void __noreturn cpu_idle(void)
+ 			play_dead();
+ #endif
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/mn10300/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/mn10300/kernel/process.c
++++ linux-3.2/arch/mn10300/kernel/process.c
+@@ -123,9 +123,7 @@ void cpu_idle(void)
+ 			idle();
+ 		}
+ 
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/parisc/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/parisc/kernel/process.c
++++ linux-3.2/arch/parisc/kernel/process.c
+@@ -71,9 +71,7 @@ void cpu_idle(void)
+ 	while (1) {
+ 		while (!need_resched())
+ 			barrier();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 		check_pgt_cache();
+ 	}
+ }
+Index: linux-3.2/arch/powerpc/kernel/idle.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/kernel/idle.c
++++ linux-3.2/arch/powerpc/kernel/idle.c
+@@ -94,11 +94,11 @@ void cpu_idle(void)
+ 		HMT_medium();
+ 		ppc64_runlatch_on();
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		if (cpu_should_die())
++		if (cpu_should_die()) {
++			__preempt_enable_no_resched();
+ 			cpu_die();
+-		schedule();
+-		preempt_disable();
++		}
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/powerpc/platforms/iseries/setup.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/platforms/iseries/setup.c
++++ linux-3.2/arch/powerpc/platforms/iseries/setup.c
+@@ -582,9 +582,7 @@ static void iseries_shared_idle(void)
+ 		if (hvlpevent_is_pending())
+ 			process_iSeries_events();
+ 
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+@@ -611,9 +609,7 @@ static void iseries_dedicated_idle(void)
+ 
+ 		ppc64_runlatch_on();
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/s390/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/s390/kernel/process.c
++++ linux-3.2/arch/s390/kernel/process.c
+@@ -95,9 +95,7 @@ void cpu_idle(void)
+ 		while (!need_resched())
+ 			default_idle();
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/score/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/score/kernel/process.c
++++ linux-3.2/arch/score/kernel/process.c
+@@ -53,9 +53,7 @@ void __noreturn cpu_idle(void)
+ 		while (!need_resched())
+ 			barrier();
+ 
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/sh/kernel/idle.c
+===================================================================
+--- linux-3.2.orig/arch/sh/kernel/idle.c
++++ linux-3.2/arch/sh/kernel/idle.c
+@@ -112,9 +112,7 @@ void cpu_idle(void)
+ 		}
+ 
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/sparc/kernel/process_32.c
+===================================================================
+--- linux-3.2.orig/arch/sparc/kernel/process_32.c
++++ linux-3.2/arch/sparc/kernel/process_32.c
+@@ -113,9 +113,7 @@ void cpu_idle(void)
+ 			while (!need_resched())
+ 				cpu_relax();
+ 		}
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 		check_pgt_cache();
+ 	}
+ }
+@@ -138,9 +136,7 @@ void cpu_idle(void)
+ 			while (!need_resched())
+ 				cpu_relax();
+ 		}
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 		check_pgt_cache();
+ 	}
+ }
+Index: linux-3.2/arch/sparc/kernel/process_64.c
+===================================================================
+--- linux-3.2.orig/arch/sparc/kernel/process_64.c
++++ linux-3.2/arch/sparc/kernel/process_64.c
+@@ -102,15 +102,13 @@ void cpu_idle(void)
+ 
+ 		tick_nohz_restart_sched_tick();
+ 
+-		preempt_enable_no_resched();
+-
+ #ifdef CONFIG_HOTPLUG_CPU
+-		if (cpu_is_offline(cpu))
++		if (cpu_is_offline(cpu)) {
++			__preempt_enable_no_resched();
+ 			cpu_play_dead();
++		}
+ #endif
+-
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/tile/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/tile/kernel/process.c
++++ linux-3.2/arch/tile/kernel/process.c
+@@ -106,9 +106,7 @@ void cpu_idle(void)
+ 			current_thread_info()->status |= TS_POLLING;
+ 		}
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/x86/kernel/process_32.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/process_32.c
++++ linux-3.2/arch/x86/kernel/process_32.c
+@@ -39,6 +39,7 @@
+ #include <linux/io.h>
+ #include <linux/kdebug.h>
+ #include <linux/cpuidle.h>
++#include <linux/highmem.h>
+ 
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+@@ -117,9 +118,7 @@ void cpu_idle(void)
+ 			start_critical_timings();
+ 		}
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+@@ -352,6 +351,41 @@ __switch_to(struct task_struct *prev_p, 
+ 		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ 		__switch_to_xtra(prev_p, next_p, tss);
+ 
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++	/*
++	 * Save @prev's kmap_atomic stack
++	 */
++	prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx);
++	if (unlikely(prev_p->kmap_idx)) {
++		int i;
++
++		for (i = 0; i < prev_p->kmap_idx; i++) {
++			int idx = i + KM_TYPE_NR * smp_processor_id();
++
++			pte_t *ptep = kmap_pte - idx;
++			prev_p->kmap_pte[i] = *ptep;
++			kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
++		}
++
++		__this_cpu_write(__kmap_atomic_idx, 0);
++	}
++
++	/*
++	 * Restore @next_p's kmap_atomic stack
++	 */
++	if (unlikely(next_p->kmap_idx)) {
++		int i;
++
++		__this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx);
++
++		for (i = 0; i < next_p->kmap_idx; i++) {
++			int idx = i + KM_TYPE_NR * smp_processor_id();
++
++			set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++		}
++	}
++#endif
++
+ 	/* If we're going to preload the fpu context, make sure clts
+ 	   is run while we're batching the cpu state updates. */
+ 	if (preload_fpu)
+Index: linux-3.2/arch/x86/kernel/process_64.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/process_64.c
++++ linux-3.2/arch/x86/kernel/process_64.c
+@@ -150,9 +150,7 @@ void cpu_idle(void)
+ 		}
+ 
+ 		tick_nohz_restart_sched_tick();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/arch/xtensa/kernel/process.c
+===================================================================
+--- linux-3.2.orig/arch/xtensa/kernel/process.c
++++ linux-3.2/arch/xtensa/kernel/process.c
+@@ -113,9 +113,7 @@ void cpu_idle(void)
+ 	while (1) {
+ 		while (!need_resched())
+ 			platform_idle();
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 	}
+ }
+ 
+Index: linux-3.2/init/main.c
+===================================================================
+--- linux-3.2.orig/init/main.c
++++ linux-3.2/init/main.c
+@@ -68,6 +68,7 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/posix-timers.h>
+ 
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+@@ -378,11 +379,8 @@ static noinline void __init_refok rest_i
+ 	 * at least once to get things moving:
+ 	 */
+ 	init_idle_bootup_task(current);
+-	preempt_enable_no_resched();
+-	schedule();
+-
++	schedule_preempt_disabled();
+ 	/* Call into cpu_idle with preempt disabled */
+-	preempt_disable();
+ 	cpu_idle();
+ }
+ 
+@@ -492,6 +490,7 @@ asmlinkage void __init start_kernel(void
+  * Interrupts are still disabled. Do necessary setups, then
+  * enable them
+  */
++	softirq_early_init();
+ 	tick_init();
+ 	boot_cpu_init();
+ 	page_address_init();
+Index: linux-3.2/kernel/mutex.c
+===================================================================
+--- linux-3.2.orig/kernel/mutex.c
++++ linux-3.2/kernel/mutex.c
+@@ -240,9 +240,7 @@ __mutex_lock_common(struct mutex *lock, 
+ 
+ 		/* didn't get the lock, go to sleep: */
+ 		spin_unlock_mutex(&lock->wait_lock, flags);
+-		preempt_enable_no_resched();
+-		schedule();
+-		preempt_disable();
++		schedule_preempt_disabled();
+ 		spin_lock_mutex(&lock->wait_lock, flags);
+ 	}
+ 
+Index: linux-3.2/kernel/softirq.c
+===================================================================
+--- linux-3.2.orig/kernel/softirq.c
++++ linux-3.2/kernel/softirq.c
+@@ -24,6 +24,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/tick.h>
++#include <linux/locallock.h>
+ 
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+@@ -61,6 +62,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
+ 	"TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+ 
++#ifdef CONFIG_NO_HZ
++# ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * On preempt-rt a softirq might be blocked on a lock. There might be
++ * no other runnable task on this CPU because the lock owner runs on
++ * some other CPU. So we have to go into idle with the pending bit
++ * set. Therefor we need to check this otherwise we warn about false
++ * positives which confuses users and defeats the whole purpose of
++ * this test.
++ *
++ * This code is called with interrupts disabled.
++ */
++void softirq_check_pending_idle(void)
++{
++	static int rate_limit;
++	u32 warnpending = 0, pending = local_softirq_pending();
++
++	if (rate_limit >= 10)
++		return;
++
++	if (pending) {
++		struct task_struct *tsk;
++
++		tsk = __get_cpu_var(ksoftirqd);
++		/*
++		 * The wakeup code in rtmutex.c wakes up the task
++		 * _before_ it sets pi_blocked_on to NULL under
++		 * tsk->pi_lock. So we need to check for both: state
++		 * and pi_blocked_on.
++		 */
++		raw_spin_lock(&tsk->pi_lock);
++
++		if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
++			warnpending = 1;
++
++		raw_spin_unlock(&tsk->pi_lock);
++	}
++
++	if (warnpending) {
++		printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++		       pending);
++		rate_limit++;
++	}
++}
++# else
++/*
++ * On !PREEMPT_RT we just printk rate limited:
++ */
++void softirq_check_pending_idle(void)
++{
++	static int rate_limit;
++
++	if (rate_limit < 10) {
++		printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++		       local_softirq_pending());
++		rate_limit++;
++	}
++}
++# endif
++#endif
++
+ /*
+  * we cannot loop indefinitely here to avoid userspace starvation,
+  * but we also don't want to introduce a worst case 1/HZ latency
+@@ -76,6 +138,36 @@ static void wakeup_softirqd(void)
+ 		wake_up_process(tsk);
+ }
+ 
++static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
++{
++	struct softirq_action *h = softirq_vec;
++	unsigned int prev_count = preempt_count();
++
++	local_irq_enable();
++	for ( ; pending; h++, pending >>= 1) {
++		unsigned int vec_nr = h - softirq_vec;
++
++		if (!(pending & 1))
++			continue;
++
++		kstat_incr_softirqs_this_cpu(vec_nr);
++		trace_softirq_entry(vec_nr);
++		h->action(h);
++		trace_softirq_exit(vec_nr);
++		if (unlikely(prev_count != preempt_count())) {
++			printk(KERN_ERR
++ "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n",
++			       vec_nr, softirq_to_name[vec_nr], h->action,
++			       prev_count, (unsigned int) preempt_count());
++			preempt_count() = prev_count;
++		}
++		if (need_rcu_bh_qs)
++			rcu_bh_qs(cpu);
++	}
++	local_irq_disable();
++}
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+  * preempt_count and SOFTIRQ_OFFSET usage:
+  * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -206,7 +298,6 @@ EXPORT_SYMBOL(local_bh_enable_ip);
+ 
+ asmlinkage void __do_softirq(void)
+ {
+-	struct softirq_action *h;
+ 	__u32 pending;
+ 	int max_restart = MAX_SOFTIRQ_RESTART;
+ 	int cpu;
+@@ -215,7 +306,7 @@ asmlinkage void __do_softirq(void)
+ 	account_system_vtime(current);
+ 
+ 	__local_bh_disable((unsigned long)__builtin_return_address(0),
+-				SOFTIRQ_OFFSET);
++			   SOFTIRQ_OFFSET);
+ 	lockdep_softirq_enter();
+ 
+ 	cpu = smp_processor_id();
+@@ -223,36 +314,7 @@ restart:
+ 	/* Reset the pending bitmask before enabling irqs */
+ 	set_softirq_pending(0);
+ 
+-	local_irq_enable();
+-
+-	h = softirq_vec;
+-
+-	do {
+-		if (pending & 1) {
+-			unsigned int vec_nr = h - softirq_vec;
+-			int prev_count = preempt_count();
+-
+-			kstat_incr_softirqs_this_cpu(vec_nr);
+-
+-			trace_softirq_entry(vec_nr);
+-			h->action(h);
+-			trace_softirq_exit(vec_nr);
+-			if (unlikely(prev_count != preempt_count())) {
+-				printk(KERN_ERR "huh, entered softirq %u %s %p"
+-				       "with preempt_count %08x,"
+-				       " exited with %08x?\n", vec_nr,
+-				       softirq_to_name[vec_nr], h->action,
+-				       prev_count, preempt_count());
+-				preempt_count() = prev_count;
+-			}
+-
+-			rcu_bh_qs(cpu);
+-		}
+-		h++;
+-		pending >>= 1;
+-	} while (pending);
+-
+-	local_irq_disable();
++	handle_pending_softirqs(pending, cpu, 1);
+ 
+ 	pending = local_softirq_pending();
+ 	if (pending && --max_restart)
+@@ -267,6 +329,26 @@ restart:
+ 	__local_bh_enable(SOFTIRQ_OFFSET);
+ }
+ 
++/*
++ * Called with preemption disabled from run_ksoftirqd()
++ */
++static int ksoftirqd_do_softirq(int cpu)
++{
++	/*
++	 * Preempt disable stops cpu going offline.
++	 * If already offline, we'll be on wrong CPU:
++	 * don't process.
++	 */
++	if (cpu_is_offline(cpu))
++		return -1;
++
++	local_irq_disable();
++	if (local_softirq_pending())
++		__do_softirq();
++	local_irq_enable();
++	return 0;
++}
++
+ #ifndef __ARCH_HAS_DO_SOFTIRQ
+ 
+ asmlinkage void do_softirq(void)
+@@ -289,6 +371,184 @@ asmlinkage void do_softirq(void)
+ 
+ #endif
+ 
++static inline void local_bh_disable_nort(void) { local_bh_disable(); }
++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
++static inline void ksoftirqd_set_sched_params(void) { }
++static inline void ksoftirqd_clr_sched_params(void) { }
++
++#else /* !PREEMPT_RT_FULL */
++
++/*
++ * On RT we serialize softirq execution with a cpu local lock
++ */
++static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
++static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
++
++static void __do_softirq_common(int need_rcu_bh_qs);
++
++void __do_softirq(void)
++{
++	__do_softirq_common(0);
++}
++
++void __init softirq_early_init(void)
++{
++	local_irq_lock_init(local_softirq_lock);
++}
++
++void local_bh_disable(void)
++{
++	migrate_disable();
++	current->softirq_nestcnt++;
++}
++EXPORT_SYMBOL(local_bh_disable);
++
++void local_bh_enable(void)
++{
++	if (WARN_ON(current->softirq_nestcnt == 0))
++		return;
++
++	if ((current->softirq_nestcnt == 1) &&
++	    local_softirq_pending() &&
++	    local_trylock(local_softirq_lock)) {
++
++		local_irq_disable();
++		if (local_softirq_pending())
++			__do_softirq();
++		local_irq_enable();
++		local_unlock(local_softirq_lock);
++		WARN_ON(current->softirq_nestcnt != 1);
++	}
++	current->softirq_nestcnt--;
++	migrate_enable();
++}
++EXPORT_SYMBOL(local_bh_enable);
++
++void local_bh_enable_ip(unsigned long ip)
++{
++	local_bh_enable();
++}
++EXPORT_SYMBOL(local_bh_enable_ip);
++
++/* For tracing */
++int notrace __in_softirq(void)
++{
++	if (__get_cpu_var(local_softirq_lock).owner == current)
++		return __get_cpu_var(local_softirq_lock).nestcnt;
++	return 0;
++}
++
++int in_serving_softirq(void)
++{
++	int res;
++
++	preempt_disable();
++	res = __get_cpu_var(local_softirq_runner) == current;
++	preempt_enable();
++	return res;
++}
++EXPORT_SYMBOL(in_serving_softirq);
++
++/*
++ * Called with bh and local interrupts disabled. For full RT cpu must
++ * be pinned.
++ */
++static void __do_softirq_common(int need_rcu_bh_qs)
++{
++	u32 pending = local_softirq_pending();
++	int cpu = smp_processor_id();
++
++	current->softirq_nestcnt++;
++
++	/* Reset the pending bitmask before enabling irqs */
++	set_softirq_pending(0);
++
++	__get_cpu_var(local_softirq_runner) = current;
++
++	lockdep_softirq_enter();
++
++	handle_pending_softirqs(pending, cpu, need_rcu_bh_qs);
++
++	pending = local_softirq_pending();
++	if (pending)
++		wakeup_softirqd();
++
++	lockdep_softirq_exit();
++	__get_cpu_var(local_softirq_runner) = NULL;
++
++	current->softirq_nestcnt--;
++}
++
++static int __thread_do_softirq(int cpu)
++{
++	/*
++	 * Prevent the current cpu from going offline.
++	 * pin_current_cpu() can reenable preemption and block on the
++	 * hotplug mutex. When it returns, the current cpu is
++	 * pinned. It might be the wrong one, but the offline check
++	 * below catches that.
++	 */
++	pin_current_cpu();
++	/*
++	 * If called from ksoftirqd (cpu >= 0) we need to check
++	 * whether we are on the wrong cpu due to cpu offlining. If
++	 * called via thread_do_softirq() no action required.
++	 */
++	if (cpu >= 0 && cpu_is_offline(cpu)) {
++		unpin_current_cpu();
++		return -1;
++	}
++	preempt_enable();
++	local_lock(local_softirq_lock);
++	local_irq_disable();
++	/*
++	 * We cannot switch stacks on RT as we want to be able to
++	 * schedule!
++	 */
++	if (local_softirq_pending())
++		__do_softirq_common(cpu >= 0);
++	local_unlock(local_softirq_lock);
++	unpin_current_cpu();
++	preempt_disable();
++	local_irq_enable();
++	return 0;
++}
++
++/*
++ * Called from netif_rx_ni(). Preemption enabled.
++ */
++void thread_do_softirq(void)
++{
++	if (!in_serving_softirq()) {
++		preempt_disable();
++		__thread_do_softirq(-1);
++		preempt_enable();
++	}
++}
++
++static int ksoftirqd_do_softirq(int cpu)
++{
++	return __thread_do_softirq(cpu);
++}
++
++static inline void local_bh_disable_nort(void) { }
++static inline void _local_bh_enable_nort(void) { }
++
++static inline void ksoftirqd_set_sched_params(void)
++{
++	struct sched_param param = { .sched_priority = 1 };
++
++	sched_setscheduler(current, SCHED_FIFO, &param);
++}
++
++static inline void ksoftirqd_clr_sched_params(void)
++{
++	struct sched_param param = { .sched_priority = 0 };
++
++	sched_setscheduler(current, SCHED_NORMAL, &param);
++}
++
++#endif /* PREEMPT_RT_FULL */
+ /*
+  * Enter an interrupt context.
+  */
+@@ -302,9 +562,9 @@ void irq_enter(void)
+ 		 * Prevent raise_softirq from needlessly waking up ksoftirqd
+ 		 * here, as softirq will be serviced on return from interrupt.
+ 		 */
+-		local_bh_disable();
++		local_bh_disable_nort();
+ 		tick_check_idle(cpu);
+-		_local_bh_enable();
++		_local_bh_enable_nort();
+ 	}
+ 
+ 	__irq_enter();
+@@ -313,6 +573,7 @@ void irq_enter(void)
+ #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	if (!force_irqthreads)
+ 		__do_softirq();
+ 	else {
+@@ -321,10 +582,14 @@ static inline void invoke_softirq(void)
+ 		wakeup_softirqd();
+ 		__local_bh_enable(SOFTIRQ_OFFSET);
+ 	}
++#else
++	wakeup_softirqd();
++#endif
+ }
+ #else
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	if (!force_irqthreads)
+ 		do_softirq();
+ 	else {
+@@ -333,6 +598,9 @@ static inline void invoke_softirq(void)
+ 		wakeup_softirqd();
+ 		__local_bh_enable(SOFTIRQ_OFFSET);
+ 	}
++#else
++	wakeup_softirqd();
++#endif
+ }
+ #endif
+ 
+@@ -353,7 +621,7 @@ void irq_exit(void)
+ 	if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
+ 		tick_nohz_stop_sched_tick(0);
+ #endif
+-	preempt_enable_no_resched();
++	__preempt_enable_no_resched();
+ }
+ 
+ /*
+@@ -739,29 +1007,21 @@ void __init softirq_init(void)
+ 
+ static int run_ksoftirqd(void * __bind_cpu)
+ {
++	ksoftirqd_set_sched_params();
++
+ 	set_current_state(TASK_INTERRUPTIBLE);
+ 
+ 	while (!kthread_should_stop()) {
+ 		preempt_disable();
+-		if (!local_softirq_pending()) {
+-			preempt_enable_no_resched();
+-			schedule();
+-			preempt_disable();
+-		}
++		if (!local_softirq_pending())
++			schedule_preempt_disabled();
+ 
+ 		__set_current_state(TASK_RUNNING);
+ 
+ 		while (local_softirq_pending()) {
+-			/* Preempt disable stops cpu going offline.
+-			   If already offline, we'll be on wrong CPU:
+-			   don't process */
+-			if (cpu_is_offline((long)__bind_cpu))
++			if (ksoftirqd_do_softirq((long) __bind_cpu))
+ 				goto wait_to_die;
+-			local_irq_disable();
+-			if (local_softirq_pending())
+-				__do_softirq();
+-			local_irq_enable();
+-			preempt_enable_no_resched();
++			__preempt_enable_no_resched();
+ 			cond_resched();
+ 			preempt_disable();
+ 			rcu_note_context_switch((long)__bind_cpu);
+@@ -774,6 +1034,7 @@ static int run_ksoftirqd(void * __bind_c
+ 
+ wait_to_die:
+ 	preempt_enable();
++	ksoftirqd_clr_sched_params();
+ 	/* Wait for kthread_stop */
+ 	set_current_state(TASK_INTERRUPTIBLE);
+ 	while (!kthread_should_stop()) {
+@@ -850,9 +1111,8 @@ static int __cpuinit cpu_callback(struct
+ 	int hotcpu = (unsigned long)hcpu;
+ 	struct task_struct *p;
+ 
+-	switch (action) {
++	switch (action & ~CPU_TASKS_FROZEN) {
+ 	case CPU_UP_PREPARE:
+-	case CPU_UP_PREPARE_FROZEN:
+ 		p = kthread_create_on_node(run_ksoftirqd,
+ 					   hcpu,
+ 					   cpu_to_node(hotcpu),
+@@ -865,19 +1125,16 @@ static int __cpuinit cpu_callback(struct
+   		per_cpu(ksoftirqd, hotcpu) = p;
+  		break;
+ 	case CPU_ONLINE:
+-	case CPU_ONLINE_FROZEN:
+ 		wake_up_process(per_cpu(ksoftirqd, hotcpu));
+ 		break;
+ #ifdef CONFIG_HOTPLUG_CPU
+ 	case CPU_UP_CANCELED:
+-	case CPU_UP_CANCELED_FROZEN:
+ 		if (!per_cpu(ksoftirqd, hotcpu))
+ 			break;
+ 		/* Unbind so it can run.  Fall thru. */
+ 		kthread_bind(per_cpu(ksoftirqd, hotcpu),
+ 			     cpumask_any(cpu_online_mask));
+-	case CPU_DEAD:
+-	case CPU_DEAD_FROZEN: {
++	case CPU_POST_DEAD: {
+ 		static const struct sched_param param = {
+ 			.sched_priority = MAX_RT_PRIO-1
+ 		};
+Index: linux-3.2/kernel/posix-timers.c
+===================================================================
+--- linux-3.2.orig/kernel/posix-timers.c
++++ linux-3.2/kernel/posix-timers.c
+@@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_
+ static struct pid *good_sigevent(sigevent_t * event)
+ {
+ 	struct task_struct *rtn = current->group_leader;
++	int sig = event->sigev_signo;
+ 
+ 	if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
+ 		(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
+@@ -447,7 +448,8 @@ static struct pid *good_sigevent(sigeven
+ 		return NULL;
+ 
+ 	if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
+-	    ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
++	    (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
++	     sig_kernel_coredump(sig)))
+ 		return NULL;
+ 
+ 	return task_pid(rtn);
+@@ -764,6 +766,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
+ 	return overrun;
+ }
+ 
++/*
++ * Protected by RCU!
++ */
++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++	if (kc->timer_set == common_timer_set)
++		hrtimer_wait_for_timer(&timr->it.real.timer);
++	else
++		/* FIXME: Whacky hack for posix-cpu-timers */
++		schedule_timeout(1);
++#endif
++}
++
+ /* Set a POSIX.1b interval timer. */
+ /* timr->it_lock is taken. */
+ static int
+@@ -841,6 +857,7 @@ retry:
+ 	if (!timr)
+ 		return -EINVAL;
+ 
++	rcu_read_lock();
+ 	kc = clockid_to_kclock(timr->it_clock);
+ 	if (WARN_ON_ONCE(!kc || !kc->timer_set))
+ 		error = -EINVAL;
+@@ -849,9 +866,12 @@ retry:
+ 
+ 	unlock_timer(timr, flag);
+ 	if (error == TIMER_RETRY) {
++		timer_wait_for_callback(kc, timr);
+ 		rtn = NULL;	// We already got the old time...
++		rcu_read_unlock();
+ 		goto retry;
+ 	}
++	rcu_read_unlock();
+ 
+ 	if (old_setting && !error &&
+ 	    copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
+@@ -889,10 +909,15 @@ retry_delete:
+ 	if (!timer)
+ 		return -EINVAL;
+ 
++	rcu_read_lock();
+ 	if (timer_delete_hook(timer) == TIMER_RETRY) {
+ 		unlock_timer(timer, flags);
++		timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++					timer);
++		rcu_read_unlock();
+ 		goto retry_delete;
+ 	}
++	rcu_read_unlock();
+ 
+ 	spin_lock(&current->sighand->siglock);
+ 	list_del(&timer->list);
+@@ -918,8 +943,18 @@ static void itimer_delete(struct k_itime
+ retry_delete:
+ 	spin_lock_irqsave(&timer->it_lock, flags);
+ 
++	/* On RT we can race with a deletion */
++	if (!timer->it_signal) {
++		unlock_timer(timer, flags);
++		return;
++	}
++
+ 	if (timer_delete_hook(timer) == TIMER_RETRY) {
++		rcu_read_lock();
+ 		unlock_timer(timer, flags);
++		timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++					timer);
++		rcu_read_unlock();
+ 		goto retry_delete;
+ 	}
+ 	list_del(&timer->list);
+Index: linux-3.2/include/linux/signal.h
+===================================================================
+--- linux-3.2.orig/include/linux/signal.h
++++ linux-3.2/include/linux/signal.h
+@@ -229,6 +229,7 @@ static inline void init_sigpending(struc
+ }
+ 
+ extern void flush_sigqueue(struct sigpending *queue);
++extern void flush_task_sigqueue(struct task_struct *tsk);
+ 
+ /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
+ static inline int valid_signal(unsigned long sig)
+Index: linux-3.2/kernel/exit.c
+===================================================================
+--- linux-3.2.orig/kernel/exit.c
++++ linux-3.2/kernel/exit.c
+@@ -141,7 +141,7 @@ static void __exit_signal(struct task_st
+ 	 * Do this under ->siglock, we can race with another thread
+ 	 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
+ 	 */
+-	flush_sigqueue(&tsk->pending);
++	flush_task_sigqueue(tsk);
+ 	tsk->sighand = NULL;
+ 	spin_unlock(&sighand->siglock);
+ 
+Index: linux-3.2/kernel/fork.c
+===================================================================
+--- linux-3.2.orig/kernel/fork.c
++++ linux-3.2/kernel/fork.c
+@@ -86,7 +86,7 @@ int max_threads;		/* tunable limit on nr
+ 
+ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
+ 
+-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
++DEFINE_RWLOCK(tasklist_lock);  /* outer */
+ 
+ #ifdef CONFIG_PROVE_RCU
+ int lockdep_tasklist_lock_is_held(void)
+@@ -197,7 +197,18 @@ void __put_task_struct(struct task_struc
+ 	if (!profile_handoff_task(tsk))
+ 		free_task(tsk);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ EXPORT_SYMBOL_GPL(__put_task_struct);
++#else
++void __put_task_struct_cb(struct rcu_head *rhp)
++{
++	struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
++
++	__put_task_struct(tsk);
++
++}
++EXPORT_SYMBOL_GPL(__put_task_struct_cb);
++#endif
+ 
+ /*
+  * macro override instead of weak attribute alias, to workaround
+@@ -546,6 +557,19 @@ void __mmdrop(struct mm_struct *mm)
+ }
+ EXPORT_SYMBOL_GPL(__mmdrop);
+ 
++#ifdef CONFIG_PREEMPT_RT_BASE
++/*
++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
++ * want another facility to make this work.
++ */
++void __mmdrop_delayed(struct rcu_head *rhp)
++{
++	struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
++
++	__mmdrop(mm);
++}
++#endif
++
+ /*
+  * Decrement the use count and release all resources for an mm.
+  */
+@@ -1028,6 +1052,9 @@ void mm_init_owner(struct mm_struct *mm,
+  */
+ static void posix_cpu_timers_init(struct task_struct *tsk)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	tsk->posix_timer_list = NULL;
++#endif
+ 	tsk->cputime_expires.prof_exp = cputime_zero;
+ 	tsk->cputime_expires.virt_exp = cputime_zero;
+ 	tsk->cputime_expires.sched_exp = 0;
+@@ -1136,6 +1163,7 @@ static struct task_struct *copy_process(
+ 	spin_lock_init(&p->alloc_lock);
+ 
+ 	init_sigpending(&p->pending);
++	p->sigqueue_cache = NULL;
+ 
+ 	p->utime = cputime_zero;
+ 	p->stime = cputime_zero;
+@@ -1197,6 +1225,9 @@ static struct task_struct *copy_process(
+ 	p->hardirq_context = 0;
+ 	p->softirq_context = 0;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++	p->pagefault_disabled = 0;
++#endif
+ #ifdef CONFIG_LOCKDEP
+ 	p->lockdep_depth = 0; /* no locks held yet */
+ 	p->curr_chain_key = 0;
+Index: linux-3.2/kernel/sched_features.h
+===================================================================
+--- linux-3.2.orig/kernel/sched_features.h
++++ linux-3.2/kernel/sched_features.h
+@@ -60,10 +60,15 @@ SCHED_FEAT(OWNER_SPIN, 1)
+  */
+ SCHED_FEAT(NONTASK_POWER, 1)
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+  * Queue remote wakeups on the target CPU and process them
+  * using the scheduler IPI. Reduces rq->lock contention/bounces.
+  */
+ SCHED_FEAT(TTWU_QUEUE, 1)
++#else
++SCHED_FEAT(TTWU_QUEUE, 0)
++#endif
+ 
+ SCHED_FEAT(FORCE_SD_OVERLAP, 0)
++SCHED_FEAT(RT_RUNTIME_SHARE, 1)
+Index: linux-3.2/include/asm-generic/cmpxchg-local.h
+===================================================================
+--- linux-3.2.orig/include/asm-generic/cmpxchg-local.h
++++ linux-3.2/include/asm-generic/cmpxchg-local.h
+@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_lo
+ 	if (size == 8 && sizeof(unsigned long) != 8)
+ 		wrong_size_cmpxchg(ptr);
+ 
+-	local_irq_save(flags);
++	raw_local_irq_save(flags);
+ 	switch (size) {
+ 	case 1: prev = *(u8 *)ptr;
+ 		if (prev == old)
+@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_lo
+ 	default:
+ 		wrong_size_cmpxchg(ptr);
+ 	}
+-	local_irq_restore(flags);
++	raw_local_irq_restore(flags);
+ 	return prev;
+ }
+ 
+@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_gene
+ 	u64 prev;
+ 	unsigned long flags;
+ 
+-	local_irq_save(flags);
++	raw_local_irq_save(flags);
+ 	prev = *(u64 *)ptr;
+ 	if (prev == old)
+ 		*(u64 *)ptr = new;
+-	local_irq_restore(flags);
++	raw_local_irq_restore(flags);
+ 	return prev;
+ }
+ 
+Index: linux-3.2/drivers/char/random.c
+===================================================================
+--- linux-3.2.orig/drivers/char/random.c
++++ linux-3.2/drivers/char/random.c
+@@ -433,7 +433,7 @@ static struct entropy_store input_pool =
+ 	.poolinfo = &poolinfo_table[0],
+ 	.name = "input",
+ 	.limit = 1,
+-	.lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
++	.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+ 	.pool = input_pool_data
+ };
+ 
+@@ -442,7 +442,7 @@ static struct entropy_store blocking_poo
+ 	.name = "blocking",
+ 	.limit = 1,
+ 	.pull = &input_pool,
+-	.lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
++	.lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
+ 	.pool = blocking_pool_data
+ };
+ 
+@@ -450,7 +450,7 @@ static struct entropy_store nonblocking_
+ 	.poolinfo = &poolinfo_table[1],
+ 	.name = "nonblocking",
+ 	.pull = &input_pool,
+-	.lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
++	.lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
+ 	.pool = nonblocking_pool_data
+ };
+ 
+@@ -633,8 +633,11 @@ static void add_timer_randomness(struct 
+ 	preempt_disable();
+ 	/* if over the trickle threshold, use only 1 in 4096 samples */
+ 	if (input_pool.entropy_count > trickle_thresh &&
+-	    ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
+-		goto out;
++	    ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) {
++		preempt_enable();
++		return;
++	}
++	preempt_enable();
+ 
+ 	sample.jiffies = jiffies;
+ 	sample.cycles = get_cycles();
+@@ -676,8 +679,6 @@ static void add_timer_randomness(struct 
+ 		credit_entropy_bits(&input_pool,
+ 				    min_t(int, fls(delta>>1), 11));
+ 	}
+-out:
+-	preempt_enable();
+ }
+ 
+ void add_input_randomness(unsigned int type, unsigned int code,
+Index: linux-3.2/arch/arm/mach-at91/at91rm9200_time.c
+===================================================================
+--- linux-3.2.orig/arch/arm/mach-at91/at91rm9200_time.c
++++ linux-3.2/arch/arm/mach-at91/at91rm9200_time.c
+@@ -114,6 +114,7 @@ clkevt32k_mode(enum clock_event_mode mod
+ 	last_crtr = read_CRTR();
+ 	switch (mode) {
+ 	case CLOCK_EVT_MODE_PERIODIC:
++		setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ 		/* PIT for periodic irqs; fixed rate of 1/HZ */
+ 		irqmask = AT91_ST_PITS;
+ 		at91_sys_write(AT91_ST_PIMR, LATCH);
+@@ -127,6 +128,7 @@ clkevt32k_mode(enum clock_event_mode mod
+ 		break;
+ 	case CLOCK_EVT_MODE_SHUTDOWN:
+ 	case CLOCK_EVT_MODE_UNUSED:
++		remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ 	case CLOCK_EVT_MODE_RESUME:
+ 		irqmask = 0;
+ 		break;
+Index: linux-3.2/arch/arm/mach-at91/at91sam926x_time.c
+===================================================================
+--- linux-3.2.orig/arch/arm/mach-at91/at91sam926x_time.c
++++ linux-3.2/arch/arm/mach-at91/at91sam926x_time.c
+@@ -54,7 +54,7 @@ static struct clocksource pit_clk = {
+ 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+ 
+-
++static struct irqaction at91sam926x_pit_irq;
+ /*
+  * Clockevent device:  interrupts every 1/HZ (== pit_cycles * MCK/16)
+  */
+@@ -63,6 +63,9 @@ pit_clkevt_mode(enum clock_event_mode mo
+ {
+ 	switch (mode) {
+ 	case CLOCK_EVT_MODE_PERIODIC:
++		/* Set up irq handler */
++		setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
++
+ 		/* update clocksource counter */
+ 		pit_cnt += pit_cycle * PIT_PICNT(at91_sys_read(AT91_PIT_PIVR));
+ 		at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
+@@ -75,6 +78,7 @@ pit_clkevt_mode(enum clock_event_mode mo
+ 	case CLOCK_EVT_MODE_UNUSED:
+ 		/* disable irq, leaving the clocksource active */
+ 		at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
++		remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
+ 		break;
+ 	case CLOCK_EVT_MODE_RESUME:
+ 		break;
+Index: linux-3.2/drivers/clocksource/tcb_clksrc.c
+===================================================================
+--- linux-3.2.orig/drivers/clocksource/tcb_clksrc.c
++++ linux-3.2/drivers/clocksource/tcb_clksrc.c
+@@ -21,8 +21,7 @@
+  *     resolution better than 200 nsec).
+  *
+  *   - The third channel may be used to provide a 16-bit clockevent
+- *     source, used in either periodic or oneshot mode.  This runs
+- *     at 32 KiHZ, and can handle delays of up to two seconds.
++ *     source, used in either periodic or oneshot mode.
+  *
+  * A boot clocksource and clockevent source are also currently needed,
+  * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
+@@ -68,6 +67,7 @@ static struct clocksource clksrc = {
+ struct tc_clkevt_device {
+ 	struct clock_event_device	clkevt;
+ 	struct clk			*clk;
++	u32				freq;
+ 	void __iomem			*regs;
+ };
+ 
+@@ -76,13 +76,6 @@ static struct tc_clkevt_device *to_tc_cl
+ 	return container_of(clkevt, struct tc_clkevt_device, clkevt);
+ }
+ 
+-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
+- * because using one of the divided clocks would usually mean the
+- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
+- *
+- * A divided clock could be good for high resolution timers, since
+- * 30.5 usec resolution can seem "low".
+- */
+ static u32 timer_clock;
+ 
+ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
+@@ -105,11 +98,12 @@ static void tc_mode(enum clock_event_mod
+ 	case CLOCK_EVT_MODE_PERIODIC:
+ 		clk_enable(tcd->clk);
+ 
+-		/* slow clock, count up to RC, then irq and restart */
++		/* count up to RC, then irq and restart */
+ 		__raw_writel(timer_clock
+ 				| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ 				regs + ATMEL_TC_REG(2, CMR));
+-		__raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
++		__raw_writel((tcd->freq + HZ/2)/HZ,
++			     tcaddr + ATMEL_TC_REG(2, RC));
+ 
+ 		/* Enable clock and interrupts on RC compare */
+ 		__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+@@ -122,7 +116,7 @@ static void tc_mode(enum clock_event_mod
+ 	case CLOCK_EVT_MODE_ONESHOT:
+ 		clk_enable(tcd->clk);
+ 
+-		/* slow clock, count up to RC, then irq and stop */
++		/* count up to RC, then irq and stop */
+ 		__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
+ 				| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ 				regs + ATMEL_TC_REG(2, CMR));
+@@ -152,8 +146,12 @@ static struct tc_clkevt_device clkevt = 
+ 		.features	= CLOCK_EVT_FEAT_PERIODIC
+ 					| CLOCK_EVT_FEAT_ONESHOT,
+ 		.shift		= 32,
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ 		/* Should be lower than at91rm9200's system timer */
+ 		.rating		= 125,
++#else
++		.rating		= 200,
++#endif
+ 		.set_next_event	= tc_next_event,
+ 		.set_mode	= tc_mode,
+ 	},
+@@ -179,8 +177,9 @@ static struct irqaction tc_irqaction = {
+ 	.handler	= ch2_irq,
+ };
+ 
+-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
++static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
+ {
++	unsigned divisor = atmel_tc_divisors[divisor_idx];
+ 	struct clk *t2_clk = tc->clk[2];
+ 	int irq = tc->irq[2];
+ 
+@@ -188,11 +187,17 @@ static void __init setup_clkevents(struc
+ 	clkevt.clk = t2_clk;
+ 	tc_irqaction.dev_id = &clkevt;
+ 
+-	timer_clock = clk32k_divisor_idx;
++	timer_clock = divisor_idx;
+ 
+-	clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
+-	clkevt.clkevt.max_delta_ns
+-		= clockevent_delta2ns(0xffff, &clkevt.clkevt);
++	if (!divisor)
++		clkevt.freq = 32768;
++	else
++		clkevt.freq = clk_get_rate(t2_clk)/divisor;
++
++	clkevt.clkevt.mult = div_sc(clkevt.freq, NSEC_PER_SEC,
++				    clkevt.clkevt.shift);
++	clkevt.clkevt.max_delta_ns =
++		clockevent_delta2ns(0xffff, &clkevt.clkevt);
+ 	clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
+ 	clkevt.clkevt.cpumask = cpumask_of(0);
+ 
+@@ -295,8 +300,11 @@ static int __init tcb_clksrc_init(void)
+ 	clocksource_register(&clksrc);
+ 
+ 	/* channel 2:  periodic and oneshot timer support */
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ 	setup_clkevents(tc, clk32k_divisor_idx);
+-
++#else
++	setup_clkevents(tc, best_divisor_idx);
++#endif
+ 	return 0;
+ }
+ arch_initcall(tcb_clksrc_init);
+Index: linux-3.2/drivers/misc/Kconfig
+===================================================================
+--- linux-3.2.orig/drivers/misc/Kconfig
++++ linux-3.2/drivers/misc/Kconfig
+@@ -82,6 +82,7 @@ config AB8500_PWM
+ config ATMEL_TCLIB
+ 	bool "Atmel AT32/AT91 Timer/Counter Library"
+ 	depends on (AVR32 || ARCH_AT91)
++	default y if PREEMPT_RT_FULL
+ 	help
+ 	  Select this if you want a library to allocate the Timer/Counter
+ 	  blocks found on many Atmel processors.  This facilitates using
+@@ -97,8 +98,7 @@ config ATMEL_TCB_CLKSRC
+ 	  are combined to make a single 32-bit timer.
+ 
+ 	  When GENERIC_CLOCKEVENTS is defined, the third timer channel
+-	  may be used as a clock event device supporting oneshot mode
+-	  (delays of up to two seconds) based on the 32 KiHz clock.
++	  may be used as a clock event device supporting oneshot mode.
+ 
+ config ATMEL_TCB_CLKSRC_BLOCK
+ 	int
+@@ -112,6 +112,14 @@ config ATMEL_TCB_CLKSRC_BLOCK
+ 	  TC can be used for other purposes, such as PWM generation and
+ 	  interval timing.
+ 
++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
++	bool "TC Block use 32 KiHz clock"
++	depends on ATMEL_TCB_CLKSRC
++	default y if !PREEMPT_RT_FULL
++	help
++	  Select this to use 32 KiHz base clock rate as TC block clock
++	  source for clock events.
++
+ config IBM_ASM
+ 	tristate "Device driver for IBM RSA service processor"
+ 	depends on X86 && PCI && INPUT && EXPERIMENTAL
+@@ -133,6 +141,35 @@ config IBM_ASM
+ 	  for information on the specific driver level and support statement
+ 	  for your IBM server.
+ 
++config HWLAT_DETECTOR
++	tristate "Testing module to detect hardware-induced latencies"
++	depends on DEBUG_FS
++	depends on RING_BUFFER
++	default m
++	---help---
++	  A simple hardware latency detector. Use this module to detect
++	  large latencies introduced by the behavior of the underlying
++	  system firmware external to Linux. We do this using periodic
++	  use of stop_machine to grab all available CPUs and measure
++	  for unexplainable gaps in the CPU timestamp counter(s). By
++	  default, the module is not enabled until the "enable" file
++	  within the "hwlat_detector" debugfs directory is toggled.
++
++	  This module is often used to detect SMI (System Management
++	  Interrupts) on x86 systems, though is not x86 specific. To
++	  this end, we default to using a sample window of 1 second,
++	  during which we will sample for 0.5 seconds. If an SMI or
++	  similar event occurs during that time, it is recorded
++	  into an 8K samples global ring buffer until retreived.
++
++	  WARNING: This software should never be enabled (it can be built
++	  but should not be turned on after it is loaded) in a production
++	  environment where high latencies are a concern since the
++	  sampling mechanism actually introduces latencies for
++	  regular tasks while the CPU(s) are being held.
++
++	  If unsure, say N
++
+ config PHANTOM
+ 	tristate "Sensable PHANToM (PCI)"
+ 	depends on PCI
+Index: linux-3.2/drivers/net/ethernet/dec/tulip/tulip_core.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/dec/tulip/tulip_core.c
++++ linux-3.2/drivers/net/ethernet/dec/tulip/tulip_core.c
+@@ -1949,6 +1949,7 @@ static void __devexit tulip_remove_one (
+ 	pci_iounmap(pdev, tp->base_addr);
+ 	free_netdev (dev);
+ 	pci_release_regions (pdev);
++	pci_disable_device (pdev);
+ 	pci_set_drvdata (pdev, NULL);
+ 
+ 	/* pci_power_off (pdev, -1); */
+Index: linux-3.2/drivers/net/ethernet/realtek/8139too.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/realtek/8139too.c
++++ linux-3.2/drivers/net/ethernet/realtek/8139too.c
+@@ -2174,7 +2174,7 @@ static irqreturn_t rtl8139_interrupt (in
+  */
+ static void rtl8139_poll_controller(struct net_device *dev)
+ {
+-	disable_irq(dev->irq);
++	disable_irq_nosync(dev->irq);
+ 	rtl8139_interrupt(dev->irq, dev);
+ 	enable_irq(dev->irq);
+ }
+Index: linux-3.2/drivers/net/ethernet/ibm/ehea/ehea_main.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/ibm/ehea/ehea_main.c
++++ linux-3.2/drivers/net/ethernet/ibm/ehea/ehea_main.c
+@@ -1303,7 +1303,7 @@ static int ehea_reg_interrupts(struct ne
+ 			 "%s-queue%d", dev->name, i);
+ 		ret = ibmebus_request_irq(pr->eq->attr.ist1,
+ 					  ehea_recv_irq_handler,
+-					  IRQF_DISABLED, pr->int_send_name,
++					  IRQF_NO_THREAD, pr->int_send_name,
+ 					  pr);
+ 		if (ret) {
+ 			netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
+Index: linux-3.2/drivers/net/ethernet/cadence/at91_ether.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/cadence/at91_ether.c
++++ linux-3.2/drivers/net/ethernet/cadence/at91_ether.c
+@@ -200,7 +200,9 @@ static irqreturn_t at91ether_phy_interru
+ 	struct net_device *dev = (struct net_device *) dev_id;
+ 	struct at91_private *lp = netdev_priv(dev);
+ 	unsigned int phy;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&lp->lock, flags);
+ 	/*
+ 	 * This hander is triggered on both edges, but the PHY chips expect
+ 	 * level-triggering.  We therefore have to check if the PHY actually has
+@@ -242,6 +244,7 @@ static irqreturn_t at91ether_phy_interru
+ 
+ done:
+ 	disable_mdi();
++	spin_unlock_irqrestore(&lp->lock, flags);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -398,9 +401,11 @@ static void at91ether_check_link(unsigne
+ 	struct net_device *dev = (struct net_device *) dev_id;
+ 	struct at91_private *lp = netdev_priv(dev);
+ 
++	spin_lock_irq(&lp->lock);
+ 	enable_mdi();
+ 	update_linkspeed(dev, 1);
+ 	disable_mdi();
++	spin_unlock_irq(&lp->lock);
+ 
+ 	mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
+ }
+Index: linux-3.2/include/linux/preempt.h
+===================================================================
+--- linux-3.2.orig/include/linux/preempt.h
++++ linux-3.2/include/linux/preempt.h
+@@ -48,15 +48,21 @@ do { \
+ 	barrier(); \
+ } while (0)
+ 
+-#define preempt_enable_no_resched() \
++#define __preempt_enable_no_resched() \
+ do { \
+ 	barrier(); \
+ 	dec_preempt_count(); \
+ } while (0)
+ 
++#ifndef CONFIG_PREEMPT_RT_BASE
++# define preempt_enable_no_resched()	__preempt_enable_no_resched()
++#else
++# define preempt_enable_no_resched()	preempt_enable()
++#endif
++
+ #define preempt_enable() \
+ do { \
+-	preempt_enable_no_resched(); \
++	__preempt_enable_no_resched(); \
+ 	barrier(); \
+ 	preempt_check_resched(); \
+ } while (0)
+@@ -92,6 +98,7 @@ do { \
+ #else /* !CONFIG_PREEMPT_COUNT */
+ 
+ #define preempt_disable()		do { } while (0)
++#define __preempt_enable_no_resched()	do { } while (0)
+ #define preempt_enable_no_resched()	do { } while (0)
+ #define preempt_enable()		do { } while (0)
+ 
+@@ -101,6 +108,27 @@ do { \
+ 
+ #endif /* CONFIG_PREEMPT_COUNT */
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define preempt_disable_rt()		preempt_disable()
++# define preempt_enable_rt()		preempt_enable()
++# define preempt_disable_nort()		do { } while (0)
++# define preempt_enable_nort()		do { } while (0)
++# ifdef CONFIG_SMP
++   extern void migrate_disable(void);
++   extern void migrate_enable(void);
++# else /* CONFIG_SMP */
++#  define migrate_disable()		do { } while (0)
++#  define migrate_enable()		do { } while (0)
++# endif /* CONFIG_SMP */
++#else
++# define preempt_disable_rt()		do { } while (0)
++# define preempt_enable_rt()		do { } while (0)
++# define preempt_disable_nort()		preempt_disable()
++# define preempt_enable_nort()		preempt_enable()
++# define migrate_disable()		preempt_disable()
++# define migrate_enable()		preempt_enable()
++#endif
++
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+ 
+ struct preempt_notifier;
+Index: linux-3.2/include/linux/uaccess.h
+===================================================================
+--- linux-3.2.orig/include/linux/uaccess.h
++++ linux-3.2/include/linux/uaccess.h
+@@ -6,38 +6,37 @@
+ 
+ /*
+  * These routines enable/disable the pagefault handler in that
+- * it will not take any locks and go straight to the fixup table.
+- *
+- * They have great resemblance to the preempt_disable/enable calls
+- * and in fact they are identical; this is because currently there is
+- * no other way to make the pagefault handlers do this. So we do
+- * disable preemption but we don't necessarily care about that.
++ * it will not take any MM locks and go straight to the fixup table.
+  */
+-static inline void pagefault_disable(void)
++static inline void raw_pagefault_disable(void)
+ {
+ 	inc_preempt_count();
+-	/*
+-	 * make sure to have issued the store before a pagefault
+-	 * can hit.
+-	 */
+ 	barrier();
+ }
+ 
+-static inline void pagefault_enable(void)
++static inline void raw_pagefault_enable(void)
+ {
+-	/*
+-	 * make sure to issue those last loads/stores before enabling
+-	 * the pagefault handler again.
+-	 */
+ 	barrier();
+ 	dec_preempt_count();
+-	/*
+-	 * make sure we do..
+-	 */
+ 	barrier();
+ 	preempt_check_resched();
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
++static inline void pagefault_disable(void)
++{
++	raw_pagefault_disable();
++}
++
++static inline void pagefault_enable(void)
++{
++	raw_pagefault_enable();
++}
++#else
++extern void pagefault_disable(void);
++extern void pagefault_enable(void);
++#endif
++
+ #ifndef ARCH_HAS_NOCACHE_UACCESS
+ 
+ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+@@ -77,9 +76,9 @@ static inline unsigned long __copy_from_
+ 		mm_segment_t old_fs = get_fs();		\
+ 							\
+ 		set_fs(KERNEL_DS);			\
+-		pagefault_disable();			\
++		raw_pagefault_disable();		\
+ 		ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval));		\
+-		pagefault_enable();			\
++		raw_pagefault_enable();			\
+ 		set_fs(old_fs);				\
+ 		ret;					\
+ 	})
+Index: linux-3.2/mm/memory.c
+===================================================================
+--- linux-3.2.orig/mm/memory.c
++++ linux-3.2/mm/memory.c
+@@ -3436,6 +3436,32 @@ unlock:
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++void pagefault_disable(void)
++{
++	migrate_disable();
++	current->pagefault_disabled++;
++	/*
++	 * make sure to have issued the store before a pagefault
++	 * can hit.
++	 */
++	barrier();
++}
++EXPORT_SYMBOL_GPL(pagefault_disable);
++
++void pagefault_enable(void)
++{
++	/*
++	 * make sure to issue those last loads/stores before enabling
++	 * the pagefault handler again.
++	 */
++	barrier();
++	current->pagefault_disabled--;
++	migrate_enable();
++}
++EXPORT_SYMBOL_GPL(pagefault_enable);
++#endif
++
+ /*
+  * By the time we get here, we already hold the mm semaphore
+  */
+@@ -3984,3 +4010,35 @@ void copy_user_huge_page(struct page *ds
+ 	}
+ }
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
++
++#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0)
++/*
++ * Heinous hack, relies on the caller doing something like:
++ *
++ *   pte = alloc_pages(PGALLOC_GFP, 0);
++ *   if (pte)
++ *     pgtable_page_ctor(pte);
++ *   return pte;
++ *
++ * This ensures we release the page and return NULL when the
++ * lock allocation fails.
++ */
++struct page *pte_lock_init(struct page *page)
++{
++	page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
++	if (page->ptl) {
++		spin_lock_init(__pte_lockptr(page));
++	} else {
++		__free_page(page);
++		page = NULL;
++	}
++	return page;
++}
++
++void pte_lock_deinit(struct page *page)
++{
++	kfree(page->ptl);
++	page->mapping = NULL;
++}
++
++#endif
+Index: linux-3.2/arch/alpha/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/alpha/mm/fault.c
++++ linux-3.2/arch/alpha/mm/fault.c
+@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, uns
+ 
+ 	/* If we're in an interrupt context, or have no user context,
+ 	   we must not take the fault.  */
+-	if (!mm || in_atomic())
++	if (!mm || pagefault_disabled())
+ 		goto no_context;
+ 
+ #ifdef CONFIG_ALPHA_LARGE_VMALLOC
+Index: linux-3.2/arch/arm/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/arm/mm/fault.c
++++ linux-3.2/arch/arm/mm/fault.c
+@@ -294,7 +294,7 @@ do_page_fault(unsigned long addr, unsign
+ 	 * If we're in an interrupt or have no user
+ 	 * context, we must not take the fault..
+ 	 */
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto no_context;
+ 
+ 	/*
+Index: linux-3.2/arch/avr32/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/avr32/mm/fault.c
++++ linux-3.2/arch/avr32/mm/fault.c
+@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned l
+ 	 * If we're in an interrupt or have no user context, we must
+ 	 * not take the fault...
+ 	 */
+-	if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
++	if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
+ 		goto no_context;
+ 
+ 	local_irq_enable();
+Index: linux-3.2/arch/cris/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/cris/mm/fault.c
++++ linux-3.2/arch/cris/mm/fault.c
+@@ -111,7 +111,7 @@ do_page_fault(unsigned long address, str
+ 	 * user context, we must not take the fault.
+ 	 */
+ 
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto no_context;
+ 
+ 	down_read(&mm->mmap_sem);
+Index: linux-3.2/arch/frv/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/frv/mm/fault.c
++++ linux-3.2/arch/frv/mm/fault.c
+@@ -79,7 +79,7 @@ asmlinkage void do_page_fault(int datamm
+ 	 * If we're in an interrupt or have no user
+ 	 * context, we must not take the fault..
+ 	 */
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto no_context;
+ 
+ 	down_read(&mm->mmap_sem);
+Index: linux-3.2/arch/ia64/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/ia64/mm/fault.c
++++ linux-3.2/arch/ia64/mm/fault.c
+@@ -89,7 +89,7 @@ ia64_do_page_fault (unsigned long addres
+ 	/*
+ 	 * If we're in an interrupt or have no user context, we must not take the fault..
+ 	 */
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto no_context;
+ 
+ #ifdef CONFIG_VIRTUAL_MEM_MAP
+Index: linux-3.2/arch/m32r/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/m32r/mm/fault.c
++++ linux-3.2/arch/m32r/mm/fault.c
+@@ -115,7 +115,7 @@ asmlinkage void do_page_fault(struct pt_
+ 	 * If we're in an interrupt or have no user context or are running in an
+ 	 * atomic region then we must not take the fault..
+ 	 */
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto bad_area_nosemaphore;
+ 
+ 	/* When running in the kernel we expect faults to occur only to
+Index: linux-3.2/arch/m68k/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/m68k/mm/fault.c
++++ linux-3.2/arch/m68k/mm/fault.c
+@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs, 
+ 	 * If we're in an interrupt or have no user
+ 	 * context, we must not take the fault..
+ 	 */
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto no_context;
+ 
+ 	down_read(&mm->mmap_sem);
+Index: linux-3.2/arch/microblaze/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/microblaze/mm/fault.c
++++ linux-3.2/arch/microblaze/mm/fault.c
+@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs,
+ 	if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
+ 		is_write = 0;
+ 
+-	if (unlikely(in_atomic() || !mm)) {
++	if (unlikely(!mm || pagefault_disabled())) {
+ 		if (kernel_mode(regs))
+ 			goto bad_area_nosemaphore;
+ 
+Index: linux-3.2/arch/mips/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/mips/mm/fault.c
++++ linux-3.2/arch/mips/mm/fault.c
+@@ -88,7 +88,7 @@ asmlinkage void __kprobes do_page_fault(
+ 	 * If we're in an interrupt or have no user
+ 	 * context, we must not take the fault..
+ 	 */
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto bad_area_nosemaphore;
+ 
+ 	down_read(&mm->mmap_sem);
+Index: linux-3.2/arch/mn10300/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/mn10300/mm/fault.c
++++ linux-3.2/arch/mn10300/mm/fault.c
+@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_
+ 	 * If we're in an interrupt or have no user
+ 	 * context, we must not take the fault..
+ 	 */
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto no_context;
+ 
+ 	down_read(&mm->mmap_sem);
+Index: linux-3.2/arch/parisc/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/parisc/mm/fault.c
++++ linux-3.2/arch/parisc/mm/fault.c
+@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs,
+ 	unsigned long acc_type;
+ 	int fault;
+ 
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto no_context;
+ 
+ 	down_read(&mm->mmap_sem);
+Index: linux-3.2/arch/powerpc/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/mm/fault.c
++++ linux-3.2/arch/powerpc/mm/fault.c
+@@ -162,7 +162,7 @@ int __kprobes do_page_fault(struct pt_re
+ 	}
+ #endif
+ 
+-	if (in_atomic() || mm == NULL) {
++	if (!mm || pagefault_disabled()) {
+ 		if (!user_mode(regs))
+ 			return SIGSEGV;
+ 		/* in_atomic() in user mode is really bad,
+Index: linux-3.2/arch/s390/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/s390/mm/fault.c
++++ linux-3.2/arch/s390/mm/fault.c
+@@ -295,7 +295,8 @@ static inline int do_exception(struct pt
+ 	 * user context.
+ 	 */
+ 	fault = VM_FAULT_BADCONTEXT;
+-	if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
++	if (unlikely(!user_space_fault(trans_exc_code) ||
++		     !mm || pagefault_disabled()))
+ 		goto out;
+ 
+ 	address = trans_exc_code & __FAIL_ADDR_MASK;
+@@ -426,7 +427,8 @@ void __kprobes do_asce_exception(struct 
+ 	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
+ 
+-	if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
++	if (unlikely(!user_space_fault(trans_exc_code) ||
++		     !mm || pagefault_disabled()))
+ 		goto no_context;
+ 
+ 	down_read(&mm->mmap_sem);
+Index: linux-3.2/arch/score/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/score/mm/fault.c
++++ linux-3.2/arch/score/mm/fault.c
+@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_
+ 	* If we're in an interrupt or have no user
+ 	* context, we must not take the fault..
+ 	*/
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto bad_area_nosemaphore;
+ 
+ 	down_read(&mm->mmap_sem);
+Index: linux-3.2/arch/sh/mm/fault_32.c
+===================================================================
+--- linux-3.2.orig/arch/sh/mm/fault_32.c
++++ linux-3.2/arch/sh/mm/fault_32.c
+@@ -166,7 +166,7 @@ asmlinkage void __kprobes do_page_fault(
+ 	 * If we're in an interrupt, have no user context or are running
+ 	 * in an atomic region then we must not take the fault:
+ 	 */
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto no_context;
+ 
+ 	down_read(&mm->mmap_sem);
+Index: linux-3.2/arch/sparc/mm/fault_32.c
+===================================================================
+--- linux-3.2.orig/arch/sparc/mm/fault_32.c
++++ linux-3.2/arch/sparc/mm/fault_32.c
+@@ -247,8 +247,8 @@ asmlinkage void do_sparc_fault(struct pt
+ 	 * If we're in an interrupt or have no user
+ 	 * context, we must not take the fault..
+ 	 */
+-        if (in_atomic() || !mm)
+-                goto no_context;
++	if (!mm || pagefault_disabled())
++		goto no_context;
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ 
+Index: linux-3.2/arch/sparc/mm/fault_64.c
+===================================================================
+--- linux-3.2.orig/arch/sparc/mm/fault_64.c
++++ linux-3.2/arch/sparc/mm/fault_64.c
+@@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fau
+ 	 * If we're in an interrupt or have no user
+ 	 * context, we must not take the fault..
+ 	 */
+-	if (in_atomic() || !mm)
++	if (!mm || pagefault_disabled())
+ 		goto intr_or_no_mm;
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+Index: linux-3.2/arch/tile/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/tile/mm/fault.c
++++ linux-3.2/arch/tile/mm/fault.c
+@@ -346,7 +346,7 @@ static int handle_page_fault(struct pt_r
+ 	 * If we're in an interrupt, have no user context or are running in an
+ 	 * atomic region then we must not take the fault.
+ 	 */
+-	if (in_atomic() || !mm) {
++	if (!mm || pagefault_disabled()) {
+ 		vma = NULL;  /* happy compiler */
+ 		goto bad_area_nosemaphore;
+ 	}
+Index: linux-3.2/arch/um/kernel/trap.c
+===================================================================
+--- linux-3.2.orig/arch/um/kernel/trap.c
++++ linux-3.2/arch/um/kernel/trap.c
+@@ -37,7 +37,7 @@ int handle_page_fault(unsigned long addr
+ 	 * If the fault was during atomic operation, don't take the fault, just
+ 	 * fail.
+ 	 */
+-	if (in_atomic())
++	if (!mm || pagefault_disabled())
+ 		goto out_nosemaphore;
+ 
+ 	down_read(&mm->mmap_sem);
+Index: linux-3.2/arch/x86/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/x86/mm/fault.c
++++ linux-3.2/arch/x86/mm/fault.c
+@@ -1084,7 +1084,7 @@ do_page_fault(struct pt_regs *regs, unsi
+ 	 * If we're in an interrupt, have no user context or are running
+ 	 * in an atomic region then we must not take the fault:
+ 	 */
+-	if (unlikely(in_atomic() || !mm)) {
++	if (unlikely(!mm || pagefault_disabled())) {
+ 		bad_area_nosemaphore(regs, error_code, address);
+ 		return;
+ 	}
+Index: linux-3.2/arch/xtensa/mm/fault.c
+===================================================================
+--- linux-3.2.orig/arch/xtensa/mm/fault.c
++++ linux-3.2/arch/xtensa/mm/fault.c
+@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
+ 	/* If we're in an interrupt or have no user
+ 	 * context, we must not take the fault..
+ 	 */
+-	if (in_atomic() || !mm) {
++	if (!mm || pagefault_disabled()) {
+ 		bad_page_fault(regs, address, SIGSEGV);
+ 		return;
+ 	}
+Index: linux-3.2/mm/filemap.c
+===================================================================
+--- linux-3.2.orig/mm/filemap.c
++++ linux-3.2/mm/filemap.c
+@@ -2061,7 +2061,7 @@ size_t iov_iter_copy_from_user_atomic(st
+ 	char *kaddr;
+ 	size_t copied;
+ 
+-	BUG_ON(!in_atomic());
++	BUG_ON(!pagefault_disabled());
+ 	kaddr = kmap_atomic(page, KM_USER0);
+ 	if (likely(i->nr_segs == 1)) {
+ 		int left;
+Index: linux-3.2/arch/x86/mm/highmem_32.c
+===================================================================
+--- linux-3.2.orig/arch/x86/mm/highmem_32.c
++++ linux-3.2/arch/x86/mm/highmem_32.c
+@@ -43,7 +43,7 @@ void *kmap_atomic_prot(struct page *page
+ 	type = kmap_atomic_idx_push();
+ 	idx = type + KM_TYPE_NR*smp_processor_id();
+ 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+-	BUG_ON(!pte_none(*(kmap_pte-idx)));
++	WARN_ON(!pte_none(*(kmap_pte-idx)));
+ 	set_pte(kmap_pte-idx, mk_pte(page, prot));
+ 
+ 	return (void *)vaddr;
+Index: linux-3.2/include/linux/kernel.h
+===================================================================
+--- linux-3.2.orig/include/linux/kernel.h
++++ linux-3.2/include/linux/kernel.h
+@@ -356,7 +356,7 @@ extern enum system_states {
+ 	SYSTEM_HALT,
+ 	SYSTEM_POWER_OFF,
+ 	SYSTEM_RESTART,
+-	SYSTEM_SUSPEND_DISK,
++	SYSTEM_SUSPEND,
+ } system_state;
+ 
+ #define TAINT_PROPRIETARY_MODULE	0
+Index: linux-3.2/kernel/power/hibernate.c
+===================================================================
+--- linux-3.2.orig/kernel/power/hibernate.c
++++ linux-3.2/kernel/power/hibernate.c
+@@ -282,6 +282,8 @@ static int create_image(int platform_mod
+ 
+ 	local_irq_disable();
+ 
++	system_state = SYSTEM_SUSPEND;
++
+ 	error = syscore_suspend();
+ 	if (error) {
+ 		printk(KERN_ERR "PM: Some system devices failed to power down, "
+@@ -309,6 +311,7 @@ static int create_image(int platform_mod
+ 	syscore_resume();
+ 
+  Enable_irqs:
++	system_state = SYSTEM_RUNNING;
+ 	local_irq_enable();
+ 
+  Enable_cpus:
+@@ -420,6 +423,7 @@ static int resume_target_kernel(bool pla
+ 		goto Enable_cpus;
+ 
+ 	local_irq_disable();
++	system_state = SYSTEM_SUSPEND;
+ 
+ 	error = syscore_suspend();
+ 	if (error)
+@@ -453,6 +457,7 @@ static int resume_target_kernel(bool pla
+ 	syscore_resume();
+ 
+  Enable_irqs:
++	system_state = SYSTEM_RUNNING;
+ 	local_irq_enable();
+ 
+  Enable_cpus:
+@@ -532,6 +537,7 @@ int hibernation_platform_enter(void)
+ 		goto Platform_finish;
+ 
+ 	local_irq_disable();
++	system_state = SYSTEM_SUSPEND;
+ 	syscore_suspend();
+ 	if (pm_wakeup_pending()) {
+ 		error = -EAGAIN;
+@@ -544,6 +550,7 @@ int hibernation_platform_enter(void)
+ 
+  Power_up:
+ 	syscore_resume();
++	system_state = SYSTEM_RUNNING;
+ 	local_irq_enable();
+ 	enable_nonboot_cpus();
+ 
+Index: linux-3.2/kernel/power/suspend.c
+===================================================================
+--- linux-3.2.orig/kernel/power/suspend.c
++++ linux-3.2/kernel/power/suspend.c
+@@ -171,6 +171,8 @@ static int suspend_enter(suspend_state_t
+ 	arch_suspend_disable_irqs();
+ 	BUG_ON(!irqs_disabled());
+ 
++	system_state = SYSTEM_SUSPEND;
++
+ 	error = syscore_suspend();
+ 	if (!error) {
+ 		*wakeup = pm_wakeup_pending();
+@@ -181,6 +183,8 @@ static int suspend_enter(suspend_state_t
+ 		syscore_resume();
+ 	}
+ 
++	system_state = SYSTEM_RUNNING;
++
+ 	arch_suspend_enable_irqs();
+ 	BUG_ON(irqs_disabled());
+ 
+Index: linux-3.2/drivers/of/base.c
+===================================================================
+--- linux-3.2.orig/drivers/of/base.c
++++ linux-3.2/drivers/of/base.c
+@@ -54,7 +54,7 @@ static DEFINE_MUTEX(of_aliases_mutex);
+ /* use when traversing tree through the allnext, child, sibling,
+  * or parent members of struct device_node.
+  */
+-DEFINE_RWLOCK(devtree_lock);
++DEFINE_RAW_SPINLOCK(devtree_lock);
+ 
+ int of_n_addr_cells(struct device_node *np)
+ {
+@@ -163,16 +163,14 @@ void of_node_put(struct device_node *nod
+ EXPORT_SYMBOL(of_node_put);
+ #endif /* !CONFIG_SPARC */
+ 
+-struct property *of_find_property(const struct device_node *np,
+-				  const char *name,
+-				  int *lenp)
++static struct property *__of_find_property(const struct device_node *np,
++					   const char *name, int *lenp)
+ {
+ 	struct property *pp;
+ 
+ 	if (!np)
+ 		return NULL;
+ 
+-	read_lock(&devtree_lock);
+ 	for (pp = np->properties; pp != 0; pp = pp->next) {
+ 		if (of_prop_cmp(pp->name, name) == 0) {
+ 			if (lenp != 0)
+@@ -180,7 +178,20 @@ struct property *of_find_property(const 
+ 			break;
+ 		}
+ 	}
+-	read_unlock(&devtree_lock);
++
++	return pp;
++}
++
++struct property *of_find_property(const struct device_node *np,
++				  const char *name,
++				  int *lenp)
++{
++	struct property *pp;
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&devtree_lock, flags);
++	pp = __of_find_property(np, name, lenp);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 
+ 	return pp;
+ }
+@@ -198,13 +209,13 @@ struct device_node *of_find_all_nodes(st
+ {
+ 	struct device_node *np;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock(&devtree_lock);
+ 	np = prev ? prev->allnext : allnodes;
+ 	for (; np != NULL; np = np->allnext)
+ 		if (of_node_get(np))
+ 			break;
+ 	of_node_put(prev);
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock(&devtree_lock);
+ 	return np;
+ }
+ EXPORT_SYMBOL(of_find_all_nodes);
+@@ -213,8 +224,20 @@ EXPORT_SYMBOL(of_find_all_nodes);
+  * Find a property with a given name for a given node
+  * and return the value.
+  */
++static const void *__of_get_property(const struct device_node *np,
++				     const char *name, int *lenp)
++{
++	struct property *pp = __of_find_property(np, name, lenp);
++
++	return pp ? pp->value : NULL;
++}
++
++/*
++ * Find a property with a given name for a given node
++ * and return the value.
++ */
+ const void *of_get_property(const struct device_node *np, const char *name,
+-			 int *lenp)
++			    int *lenp)
+ {
+ 	struct property *pp = of_find_property(np, name, lenp);
+ 
+@@ -225,13 +248,13 @@ EXPORT_SYMBOL(of_get_property);
+ /** Checks if the given "compat" string matches one of the strings in
+  * the device's "compatible" property
+  */
+-int of_device_is_compatible(const struct device_node *device,
+-		const char *compat)
++static int __of_device_is_compatible(const struct device_node *device,
++				     const char *compat)
+ {
+ 	const char* cp;
+-	int cplen, l;
++	int uninitialized_var(cplen), l;
+ 
+-	cp = of_get_property(device, "compatible", &cplen);
++	cp = __of_get_property(device, "compatible", &cplen);
+ 	if (cp == NULL)
+ 		return 0;
+ 	while (cplen > 0) {
+@@ -244,6 +267,21 @@ int of_device_is_compatible(const struct
+ 
+ 	return 0;
+ }
++
++/** Checks if the given "compat" string matches one of the strings in
++ * the device's "compatible" property
++ */
++int of_device_is_compatible(const struct device_node *device,
++		const char *compat)
++{
++	unsigned long flags;
++	int res;
++
++	raw_spin_lock_irqsave(&devtree_lock, flags);
++	res = __of_device_is_compatible(device, compat);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
++	return res;
++}
+ EXPORT_SYMBOL(of_device_is_compatible);
+ 
+ /**
+@@ -303,13 +341,14 @@ EXPORT_SYMBOL(of_device_is_available);
+ struct device_node *of_get_parent(const struct device_node *node)
+ {
+ 	struct device_node *np;
++	unsigned long flags;
+ 
+ 	if (!node)
+ 		return NULL;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	np = of_node_get(node->parent);
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 	return np;
+ }
+ EXPORT_SYMBOL(of_get_parent);
+@@ -328,14 +367,15 @@ EXPORT_SYMBOL(of_get_parent);
+ struct device_node *of_get_next_parent(struct device_node *node)
+ {
+ 	struct device_node *parent;
++	unsigned long flags;
+ 
+ 	if (!node)
+ 		return NULL;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	parent = of_node_get(node->parent);
+ 	of_node_put(node);
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 	return parent;
+ }
+ 
+@@ -351,14 +391,15 @@ struct device_node *of_get_next_child(co
+ 	struct device_node *prev)
+ {
+ 	struct device_node *next;
++	unsigned long flags;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	next = prev ? prev->sibling : node->child;
+ 	for (; next; next = next->sibling)
+ 		if (of_node_get(next))
+ 			break;
+ 	of_node_put(prev);
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 	return next;
+ }
+ EXPORT_SYMBOL(of_get_next_child);
+@@ -373,14 +414,15 @@ EXPORT_SYMBOL(of_get_next_child);
+ struct device_node *of_find_node_by_path(const char *path)
+ {
+ 	struct device_node *np = allnodes;
++	unsigned long flags;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	for (; np; np = np->allnext) {
+ 		if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
+ 		    && of_node_get(np))
+ 			break;
+ 	}
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 	return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_path);
+@@ -400,15 +442,16 @@ struct device_node *of_find_node_by_name
+ 	const char *name)
+ {
+ 	struct device_node *np;
++	unsigned long flags;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	np = from ? from->allnext : allnodes;
+ 	for (; np; np = np->allnext)
+ 		if (np->name && (of_node_cmp(np->name, name) == 0)
+ 		    && of_node_get(np))
+ 			break;
+ 	of_node_put(from);
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 	return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_name);
+@@ -429,15 +472,16 @@ struct device_node *of_find_node_by_type
+ 	const char *type)
+ {
+ 	struct device_node *np;
++	unsigned long flags;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	np = from ? from->allnext : allnodes;
+ 	for (; np; np = np->allnext)
+ 		if (np->type && (of_node_cmp(np->type, type) == 0)
+ 		    && of_node_get(np))
+ 			break;
+ 	of_node_put(from);
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 	return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_type);
+@@ -460,18 +504,20 @@ struct device_node *of_find_compatible_n
+ 	const char *type, const char *compatible)
+ {
+ 	struct device_node *np;
++	unsigned long flags;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	np = from ? from->allnext : allnodes;
+ 	for (; np; np = np->allnext) {
+ 		if (type
+ 		    && !(np->type && (of_node_cmp(np->type, type) == 0)))
+ 			continue;
+-		if (of_device_is_compatible(np, compatible) && of_node_get(np))
++		if (__of_device_is_compatible(np, compatible) &&
++		    of_node_get(np))
+ 			break;
+ 	}
+ 	of_node_put(from);
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 	return np;
+ }
+ EXPORT_SYMBOL(of_find_compatible_node);
+@@ -493,8 +539,9 @@ struct device_node *of_find_node_with_pr
+ {
+ 	struct device_node *np;
+ 	struct property *pp;
++	unsigned long flags;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	np = from ? from->allnext : allnodes;
+ 	for (; np; np = np->allnext) {
+ 		for (pp = np->properties; pp != 0; pp = pp->next) {
+@@ -506,20 +553,14 @@ struct device_node *of_find_node_with_pr
+ 	}
+ out:
+ 	of_node_put(from);
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 	return np;
+ }
+ EXPORT_SYMBOL(of_find_node_with_property);
+ 
+-/**
+- * of_match_node - Tell if an device_node has a matching of_match structure
+- *	@matches:	array of of device match structures to search in
+- *	@node:		the of device structure to match against
+- *
+- *	Low level utility function used by device matching.
+- */
+-const struct of_device_id *of_match_node(const struct of_device_id *matches,
+-					 const struct device_node *node)
++static
++const struct of_device_id *__of_match_node(const struct of_device_id *matches,
++					   const struct device_node *node)
+ {
+ 	if (!matches)
+ 		return NULL;
+@@ -533,14 +574,33 @@ const struct of_device_id *of_match_node
+ 			match &= node->type
+ 				&& !strcmp(matches->type, node->type);
+ 		if (matches->compatible[0])
+-			match &= of_device_is_compatible(node,
+-						matches->compatible);
++			match &= __of_device_is_compatible(node,
++							   matches->compatible);
+ 		if (match)
+ 			return matches;
+ 		matches++;
+ 	}
+ 	return NULL;
+ }
++
++/**
++ * of_match_node - Tell if an device_node has a matching of_match structure
++ *	@matches:	array of of device match structures to search in
++ *	@node:		the of device structure to match against
++ *
++ *	Low level utility function used by device matching.
++ */
++const struct of_device_id *of_match_node(const struct of_device_id *matches,
++					 const struct device_node *node)
++{
++	const struct of_device_id *match;
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&devtree_lock, flags);
++	match = __of_match_node(matches, node);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
++	return match;
++}
+ EXPORT_SYMBOL(of_match_node);
+ 
+ /**
+@@ -559,15 +619,16 @@ struct device_node *of_find_matching_nod
+ 					  const struct of_device_id *matches)
+ {
+ 	struct device_node *np;
++	unsigned long flags;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	np = from ? from->allnext : allnodes;
+ 	for (; np; np = np->allnext) {
+-		if (of_match_node(matches, np) && of_node_get(np))
++		if (__of_match_node(matches, np) && of_node_get(np))
+ 			break;
+ 	}
+ 	of_node_put(from);
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 	return np;
+ }
+ EXPORT_SYMBOL(of_find_matching_node);
+@@ -610,12 +671,12 @@ struct device_node *of_find_node_by_phan
+ {
+ 	struct device_node *np;
+ 
+-	read_lock(&devtree_lock);
++	raw_spin_lock(&devtree_lock);
+ 	for (np = allnodes; np; np = np->allnext)
+ 		if (np->phandle == handle)
+ 			break;
+ 	of_node_get(np);
+-	read_unlock(&devtree_lock);
++	raw_spin_unlock(&devtree_lock);
+ 	return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_phandle);
+@@ -949,18 +1010,18 @@ int prom_add_property(struct device_node
+ 	unsigned long flags;
+ 
+ 	prop->next = NULL;
+-	write_lock_irqsave(&devtree_lock, flags);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	next = &np->properties;
+ 	while (*next) {
+ 		if (strcmp(prop->name, (*next)->name) == 0) {
+ 			/* duplicate ! don't insert it */
+-			write_unlock_irqrestore(&devtree_lock, flags);
++			raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 			return -1;
+ 		}
+ 		next = &(*next)->next;
+ 	}
+ 	*next = prop;
+-	write_unlock_irqrestore(&devtree_lock, flags);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 
+ #ifdef CONFIG_PROC_DEVICETREE
+ 	/* try to add to proc as well if it was initialized */
+@@ -985,7 +1046,7 @@ int prom_remove_property(struct device_n
+ 	unsigned long flags;
+ 	int found = 0;
+ 
+-	write_lock_irqsave(&devtree_lock, flags);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	next = &np->properties;
+ 	while (*next) {
+ 		if (*next == prop) {
+@@ -998,7 +1059,7 @@ int prom_remove_property(struct device_n
+ 		}
+ 		next = &(*next)->next;
+ 	}
+-	write_unlock_irqrestore(&devtree_lock, flags);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 
+ 	if (!found)
+ 		return -ENODEV;
+@@ -1028,7 +1089,7 @@ int prom_update_property(struct device_n
+ 	unsigned long flags;
+ 	int found = 0;
+ 
+-	write_lock_irqsave(&devtree_lock, flags);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	next = &np->properties;
+ 	while (*next) {
+ 		if (*next == oldprop) {
+@@ -1042,7 +1103,7 @@ int prom_update_property(struct device_n
+ 		}
+ 		next = &(*next)->next;
+ 	}
+-	write_unlock_irqrestore(&devtree_lock, flags);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ 
+ 	if (!found)
+ 		return -ENODEV;
+@@ -1072,12 +1133,12 @@ void of_attach_node(struct device_node *
+ {
+ 	unsigned long flags;
+ 
+-	write_lock_irqsave(&devtree_lock, flags);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 	np->sibling = np->parent->child;
+ 	np->allnext = allnodes;
+ 	np->parent->child = np;
+ 	allnodes = np;
+-	write_unlock_irqrestore(&devtree_lock, flags);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ }
+ 
+ /**
+@@ -1091,7 +1152,7 @@ void of_detach_node(struct device_node *
+ 	struct device_node *parent;
+ 	unsigned long flags;
+ 
+-	write_lock_irqsave(&devtree_lock, flags);
++	raw_spin_lock_irqsave(&devtree_lock, flags);
+ 
+ 	parent = np->parent;
+ 	if (!parent)
+@@ -1122,7 +1183,7 @@ void of_detach_node(struct device_node *
+ 	of_node_set_flag(np, OF_DETACHED);
+ 
+ out_unlock:
+-	write_unlock_irqrestore(&devtree_lock, flags);
++	raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ }
+ #endif /* defined(CONFIG_OF_DYNAMIC) */
+ 
+Index: linux-3.2/arch/sparc/kernel/prom_common.c
+===================================================================
+--- linux-3.2.orig/arch/sparc/kernel/prom_common.c
++++ linux-3.2/arch/sparc/kernel/prom_common.c
+@@ -67,7 +67,7 @@ int of_set_property(struct device_node *
+ 	err = -ENODEV;
+ 
+ 	mutex_lock(&of_set_property_mutex);
+-	write_lock(&devtree_lock);
++	raw_spin_lock(&devtree_lock);
+ 	prevp = &dp->properties;
+ 	while (*prevp) {
+ 		struct property *prop = *prevp;
+@@ -94,7 +94,7 @@ int of_set_property(struct device_node *
+ 		}
+ 		prevp = &(*prevp)->next;
+ 	}
+-	write_unlock(&devtree_lock);
++	raw_spin_unlock(&devtree_lock);
+ 	mutex_unlock(&of_set_property_mutex);
+ 
+ 	/* XXX Upate procfs if necessary... */
+Index: linux-3.2/include/linux/of.h
+===================================================================
+--- linux-3.2.orig/include/linux/of.h
++++ linux-3.2/include/linux/of.h
+@@ -71,7 +71,7 @@ struct device_node {
+ extern struct device_node *allnodes;
+ extern struct device_node *of_chosen;
+ extern struct device_node *of_aliases;
+-extern rwlock_t devtree_lock;
++extern raw_spinlock_t devtree_lock;
+ 
+ static inline bool of_have_populated_dt(void)
+ {
+Index: linux-3.2/include/linux/list.h
+===================================================================
+--- linux-3.2.orig/include/linux/list.h
++++ linux-3.2/include/linux/list.h
+@@ -362,6 +362,17 @@ static inline void list_splice_tail_init
+ 	list_entry((ptr)->next, type, member)
+ 
+ /**
++ * list_last_entry - get the last element from a list
++ * @ptr:	the list head to take the element from.
++ * @type:	the type of the struct this is embedded in.
++ * @member:	the name of the list_struct within the struct.
++ *
++ * Note, that list is expected to be not empty.
++ */
++#define list_last_entry(ptr, type, member) \
++	list_entry((ptr)->prev, type, member)
++
++/**
+  * list_for_each	-	iterate over a list
+  * @pos:	the &struct list_head to use as a loop cursor.
+  * @head:	the head for your list.
+Index: linux-3.2/mm/page_alloc.c
+===================================================================
+--- linux-3.2.orig/mm/page_alloc.c
++++ linux-3.2/mm/page_alloc.c
+@@ -57,6 +57,7 @@
+ #include <linux/ftrace_event.h>
+ #include <linux/memcontrol.h>
+ #include <linux/prefetch.h>
++#include <linux/locallock.h>
+ 
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+@@ -222,6 +223,18 @@ EXPORT_SYMBOL(nr_node_ids);
+ EXPORT_SYMBOL(nr_online_nodes);
+ #endif
+ 
++static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define cpu_lock_irqsave(cpu, flags)		\
++	spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags)
++# define cpu_unlock_irqrestore(cpu, flags)		\
++	spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags)
++#else
++# define cpu_lock_irqsave(cpu, flags)		local_irq_save(flags)
++# define cpu_unlock_irqrestore(cpu, flags)	local_irq_restore(flags)
++#endif
++
+ int page_group_by_mobility_disabled __read_mostly;
+ 
+ static void set_pageblock_migratetype(struct page *page, int migratetype)
+@@ -581,7 +594,7 @@ static inline int free_pages_check(struc
+ }
+ 
+ /*
+- * Frees a number of pages from the PCP lists
++ * Frees a number of pages which have been collected from the pcp lists.
+  * Assumes all pages on list are in same zone, and of same order.
+  * count is the number of pages to free.
+  *
+@@ -592,16 +605,42 @@ static inline int free_pages_check(struc
+  * pinned" detection logic.
+  */
+ static void free_pcppages_bulk(struct zone *zone, int count,
+-					struct per_cpu_pages *pcp)
++			       struct list_head *list)
+ {
+-	int migratetype = 0;
+-	int batch_free = 0;
+ 	int to_free = count;
++	unsigned long flags;
+ 
+-	spin_lock(&zone->lock);
++	spin_lock_irqsave(&zone->lock, flags);
+ 	zone->all_unreclaimable = 0;
+ 	zone->pages_scanned = 0;
+ 
++	while (!list_empty(list)) {
++		struct page *page = list_first_entry(list, struct page, lru);
++
++		/* must delete as __free_one_page list manipulates */
++		list_del(&page->lru);
++		/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
++		__free_one_page(page, zone, 0, page_private(page));
++		trace_mm_page_pcpu_drain(page, 0, page_private(page));
++		to_free--;
++	}
++	WARN_ON(to_free != 0);
++	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
++	spin_unlock_irqrestore(&zone->lock, flags);
++}
++
++/*
++ * Moves a number of pages from the PCP lists to free list which
++ * is freed outside of the locked region.
++ *
++ * Assumes all pages on list are in same zone, and of same order.
++ * count is the number of pages to free.
++ */
++static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
++			      struct list_head *dst)
++{
++	int migratetype = 0, batch_free = 0;
++
+ 	while (to_free) {
+ 		struct page *page;
+ 		struct list_head *list;
+@@ -617,7 +656,7 @@ static void free_pcppages_bulk(struct zo
+ 			batch_free++;
+ 			if (++migratetype == MIGRATE_PCPTYPES)
+ 				migratetype = 0;
+-			list = &pcp->lists[migratetype];
++			list = &src->lists[migratetype];
+ 		} while (list_empty(list));
+ 
+ 		/* This is the only non-empty list. Free them all. */
+@@ -625,28 +664,25 @@ static void free_pcppages_bulk(struct zo
+ 			batch_free = to_free;
+ 
+ 		do {
+-			page = list_entry(list->prev, struct page, lru);
+-			/* must delete as __free_one_page list manipulates */
++			page = list_last_entry(list, struct page, lru);
+ 			list_del(&page->lru);
+-			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+-			__free_one_page(page, zone, 0, page_private(page));
+-			trace_mm_page_pcpu_drain(page, 0, page_private(page));
++			list_add(&page->lru, dst);
+ 		} while (--to_free && --batch_free && !list_empty(list));
+ 	}
+-	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
+-	spin_unlock(&zone->lock);
+ }
+ 
+ static void free_one_page(struct zone *zone, struct page *page, int order,
+ 				int migratetype)
+ {
+-	spin_lock(&zone->lock);
++	unsigned long flags;
++
++	spin_lock_irqsave(&zone->lock, flags);
+ 	zone->all_unreclaimable = 0;
+ 	zone->pages_scanned = 0;
+ 
+ 	__free_one_page(page, zone, order, migratetype);
+ 	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+-	spin_unlock(&zone->lock);
++	spin_unlock_irqrestore(&zone->lock, flags);
+ }
+ 
+ static bool free_pages_prepare(struct page *page, unsigned int order)
+@@ -683,13 +719,13 @@ static void __free_pages_ok(struct page 
+ 	if (!free_pages_prepare(page, order))
+ 		return;
+ 
+-	local_irq_save(flags);
++	local_lock_irqsave(pa_lock, flags);
+ 	if (unlikely(wasMlocked))
+ 		free_page_mlock(page);
+ 	__count_vm_events(PGFREE, 1 << order);
+ 	free_one_page(page_zone(page), page, order,
+ 					get_pageblock_migratetype(page));
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(pa_lock, flags);
+ }
+ 
+ /*
+@@ -1065,16 +1101,18 @@ static int rmqueue_bulk(struct zone *zon
+ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ {
+ 	unsigned long flags;
++	LIST_HEAD(dst);
+ 	int to_drain;
+ 
+-	local_irq_save(flags);
++	local_lock_irqsave(pa_lock, flags);
+ 	if (pcp->count >= pcp->batch)
+ 		to_drain = pcp->batch;
+ 	else
+ 		to_drain = pcp->count;
+-	free_pcppages_bulk(zone, to_drain, pcp);
++	isolate_pcp_pages(to_drain, pcp, &dst);
+ 	pcp->count -= to_drain;
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(pa_lock, flags);
++	free_pcppages_bulk(zone, to_drain, &dst);
+ }
+ #endif
+ 
+@@ -1093,16 +1131,21 @@ static void drain_pages(unsigned int cpu
+ 	for_each_populated_zone(zone) {
+ 		struct per_cpu_pageset *pset;
+ 		struct per_cpu_pages *pcp;
++		LIST_HEAD(dst);
++		int count;
+ 
+-		local_irq_save(flags);
++		cpu_lock_irqsave(cpu, flags);
+ 		pset = per_cpu_ptr(zone->pageset, cpu);
+ 
+ 		pcp = &pset->pcp;
+-		if (pcp->count) {
+-			free_pcppages_bulk(zone, pcp->count, pcp);
++		count = pcp->count;
++		if (count) {
++			isolate_pcp_pages(count, pcp, &dst);
+ 			pcp->count = 0;
+ 		}
+-		local_irq_restore(flags);
++		cpu_unlock_irqrestore(cpu, flags);
++		if (count)
++			free_pcppages_bulk(zone, count, &dst);
+ 	}
+ }
+ 
+@@ -1119,7 +1162,14 @@ void drain_local_pages(void *arg)
+  */
+ void drain_all_pages(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	on_each_cpu(drain_local_pages, NULL, 1);
++#else
++	int i;
++
++	for_each_online_cpu(i)
++		drain_pages(i);
++#endif
+ }
+ 
+ #ifdef CONFIG_HIBERNATION
+@@ -1175,7 +1225,7 @@ void free_hot_cold_page(struct page *pag
+ 
+ 	migratetype = get_pageblock_migratetype(page);
+ 	set_page_private(page, migratetype);
+-	local_irq_save(flags);
++	local_lock_irqsave(pa_lock, flags);
+ 	if (unlikely(wasMlocked))
+ 		free_page_mlock(page);
+ 	__count_vm_event(PGFREE);
+@@ -1202,12 +1252,19 @@ void free_hot_cold_page(struct page *pag
+ 		list_add(&page->lru, &pcp->lists[migratetype]);
+ 	pcp->count++;
+ 	if (pcp->count >= pcp->high) {
+-		free_pcppages_bulk(zone, pcp->batch, pcp);
++		LIST_HEAD(dst);
++		int count;
++
++		isolate_pcp_pages(pcp->batch, pcp, &dst);
+ 		pcp->count -= pcp->batch;
++		count = pcp->batch;
++		local_unlock_irqrestore(pa_lock, flags);
++		free_pcppages_bulk(zone, count, &dst);
++		return;
+ 	}
+ 
+ out:
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(pa_lock, flags);
+ }
+ 
+ /*
+@@ -1302,7 +1359,7 @@ again:
+ 		struct per_cpu_pages *pcp;
+ 		struct list_head *list;
+ 
+-		local_irq_save(flags);
++		local_lock_irqsave(pa_lock, flags);
+ 		pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ 		list = &pcp->lists[migratetype];
+ 		if (list_empty(list)) {
+@@ -1334,17 +1391,19 @@ again:
+ 			 */
+ 			WARN_ON_ONCE(order > 1);
+ 		}
+-		spin_lock_irqsave(&zone->lock, flags);
++		local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
+ 		page = __rmqueue(zone, order, migratetype);
+-		spin_unlock(&zone->lock);
+-		if (!page)
++		if (!page) {
++			spin_unlock(&zone->lock);
+ 			goto failed;
++		}
+ 		__mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
++		spin_unlock(&zone->lock);
+ 	}
+ 
+ 	__count_zone_vm_events(PGALLOC, zone, 1 << order);
+ 	zone_statistics(preferred_zone, zone, gfp_flags);
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(pa_lock, flags);
+ 
+ 	VM_BUG_ON(bad_range(zone, page));
+ 	if (prep_new_page(page, order, gfp_flags))
+@@ -1352,7 +1411,7 @@ again:
+ 	return page;
+ 
+ failed:
+-	local_irq_restore(flags);
++	local_unlock_irqrestore(pa_lock, flags);
+ 	return NULL;
+ }
+ 
+@@ -1901,8 +1960,8 @@ __alloc_pages_direct_compact(gfp_t gfp_m
+ 	if (*did_some_progress != COMPACT_SKIPPED) {
+ 
+ 		/* Page migration frees to the PCP lists but we want merging */
+-		drain_pages(get_cpu());
+-		put_cpu();
++		drain_pages(get_cpu_light());
++		put_cpu_light();
+ 
+ 		page = get_page_from_freelist(gfp_mask, nodemask,
+ 				order, zonelist, high_zoneidx,
+@@ -3674,14 +3733,16 @@ static int __zone_pcp_update(void *data)
+ 	for_each_possible_cpu(cpu) {
+ 		struct per_cpu_pageset *pset;
+ 		struct per_cpu_pages *pcp;
++		LIST_HEAD(dst);
+ 
+ 		pset = per_cpu_ptr(zone->pageset, cpu);
+ 		pcp = &pset->pcp;
+ 
+-		local_irq_save(flags);
+-		free_pcppages_bulk(zone, pcp->count, pcp);
++		cpu_lock_irqsave(cpu, flags);
++		isolate_pcp_pages(pcp->count, pcp, &dst);
++		free_pcppages_bulk(zone, pcp->count, &dst);
+ 		setup_pageset(pset, batch);
+-		local_irq_restore(flags);
++		cpu_unlock_irqrestore(cpu, flags);
+ 	}
+ 	return 0;
+ }
+@@ -5047,6 +5108,7 @@ static int page_alloc_cpu_notify(struct 
+ void __init page_alloc_init(void)
+ {
+ 	hotcpu_notifier(page_alloc_cpu_notify, 0);
++	local_irq_lock_init(pa_lock);
+ }
+ 
+ /*
+Index: linux-3.2/mm/slab.c
+===================================================================
+--- linux-3.2.orig/mm/slab.c
++++ linux-3.2/mm/slab.c
+@@ -116,6 +116,7 @@
+ #include	<linux/kmemcheck.h>
+ #include	<linux/memory.h>
+ #include	<linux/prefetch.h>
++#include	<linux/locallock.h>
+ 
+ #include	<asm/cacheflush.h>
+ #include	<asm/tlbflush.h>
+@@ -718,12 +719,66 @@ static DEFINE_MUTEX(cache_chain_mutex);
+ static struct list_head cache_chain;
+ 
+ static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
++static DEFINE_PER_CPU(struct list_head, slab_free_list);
++static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
++
++#ifndef CONFIG_PREEMPT_RT_BASE
++# define slab_on_each_cpu(func, cp)	on_each_cpu(func, cp, 1)
++#else
++/*
++ * execute func() for all CPUs. On PREEMPT_RT we dont actually have
++ * to run on the remote CPUs - we only have to take their CPU-locks.
++ * (This is a rare operation, so cacheline bouncing is not an issue.)
++ */
++static void
++slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
++{
++	unsigned int i;
++
++	for_each_online_cpu(i)
++		func(arg, i);
++}
++#endif
++
++static void free_delayed(struct list_head *h)
++{
++	while(!list_empty(h)) {
++		struct page *page = list_first_entry(h, struct page, lru);
++
++		list_del(&page->lru);
++		__free_pages(page, page->index);
++	}
++}
++
++static void unlock_l3_and_free_delayed(spinlock_t *list_lock)
++{
++	LIST_HEAD(tmp);
++
++	list_splice_init(&__get_cpu_var(slab_free_list), &tmp);
++	local_spin_unlock_irq(slab_lock, list_lock);
++	free_delayed(&tmp);
++}
++
++static void unlock_slab_and_free_delayed(unsigned long flags)
++{
++	LIST_HEAD(tmp);
++
++	list_splice_init(&__get_cpu_var(slab_free_list), &tmp);
++	local_unlock_irqrestore(slab_lock, flags);
++	free_delayed(&tmp);
++}
+ 
+ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
+ {
+ 	return cachep->array[smp_processor_id()];
+ }
+ 
++static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep,
++						       int cpu)
++{
++	return cachep->array[cpu];
++}
++
+ static inline struct kmem_cache *__find_general_cachep(size_t size,
+ 							gfp_t gfpflags)
+ {
+@@ -1061,9 +1116,10 @@ static void reap_alien(struct kmem_cache
+ 	if (l3->alien) {
+ 		struct array_cache *ac = l3->alien[node];
+ 
+-		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
++		if (ac && ac->avail &&
++		    local_spin_trylock_irq(slab_lock, &ac->lock)) {
+ 			__drain_alien_cache(cachep, ac, node);
+-			spin_unlock_irq(&ac->lock);
++			local_spin_unlock_irq(slab_lock, &ac->lock);
+ 		}
+ 	}
+ }
+@@ -1078,9 +1134,9 @@ static void drain_alien_cache(struct kme
+ 	for_each_online_node(i) {
+ 		ac = alien[i];
+ 		if (ac) {
+-			spin_lock_irqsave(&ac->lock, flags);
++			local_spin_lock_irqsave(slab_lock, &ac->lock, flags);
+ 			__drain_alien_cache(cachep, ac, i);
+-			spin_unlock_irqrestore(&ac->lock, flags);
++			local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags);
+ 		}
+ 	}
+ }
+@@ -1159,11 +1215,11 @@ static int init_cache_nodelists_node(int
+ 			cachep->nodelists[node] = l3;
+ 		}
+ 
+-		spin_lock_irq(&cachep->nodelists[node]->list_lock);
++		local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
+ 		cachep->nodelists[node]->free_limit =
+ 			(1 + nr_cpus_node(node)) *
+ 			cachep->batchcount + cachep->num;
+-		spin_unlock_irq(&cachep->nodelists[node]->list_lock);
++		local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
+ 	}
+ 	return 0;
+ }
+@@ -1188,7 +1244,7 @@ static void __cpuinit cpuup_canceled(lon
+ 		if (!l3)
+ 			goto free_array_cache;
+ 
+-		spin_lock_irq(&l3->list_lock);
++		local_spin_lock_irq(slab_lock, &l3->list_lock);
+ 
+ 		/* Free limit for this kmem_list3 */
+ 		l3->free_limit -= cachep->batchcount;
+@@ -1196,7 +1252,7 @@ static void __cpuinit cpuup_canceled(lon
+ 			free_block(cachep, nc->entry, nc->avail, node);
+ 
+ 		if (!cpumask_empty(mask)) {
+-			spin_unlock_irq(&l3->list_lock);
++			unlock_l3_and_free_delayed(&l3->list_lock);
+ 			goto free_array_cache;
+ 		}
+ 
+@@ -1210,7 +1266,7 @@ static void __cpuinit cpuup_canceled(lon
+ 		alien = l3->alien;
+ 		l3->alien = NULL;
+ 
+-		spin_unlock_irq(&l3->list_lock);
++		unlock_l3_and_free_delayed(&l3->list_lock);
+ 
+ 		kfree(shared);
+ 		if (alien) {
+@@ -1284,7 +1340,7 @@ static int __cpuinit cpuup_prepare(long 
+ 		l3 = cachep->nodelists[node];
+ 		BUG_ON(!l3);
+ 
+-		spin_lock_irq(&l3->list_lock);
++		local_spin_lock_irq(slab_lock, &l3->list_lock);
+ 		if (!l3->shared) {
+ 			/*
+ 			 * We are serialised from CPU_DEAD or
+@@ -1299,7 +1355,7 @@ static int __cpuinit cpuup_prepare(long 
+ 			alien = NULL;
+ 		}
+ #endif
+-		spin_unlock_irq(&l3->list_lock);
++		local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ 		kfree(shared);
+ 		free_alien_cache(alien);
+ 		if (cachep->flags & SLAB_DEBUG_OBJECTS)
+@@ -1490,6 +1546,10 @@ void __init kmem_cache_init(void)
+ 	if (num_possible_nodes() == 1)
+ 		use_alien_caches = 0;
+ 
++	local_irq_lock_init(slab_lock);
++	for_each_possible_cpu(i)
++		INIT_LIST_HEAD(&per_cpu(slab_free_list, i));
++
+ 	for (i = 0; i < NUM_INIT_LISTS; i++) {
+ 		kmem_list3_init(&initkmem_list3[i]);
+ 		if (i < MAX_NUMNODES)
+@@ -1766,12 +1826,14 @@ static void *kmem_getpages(struct kmem_c
+ /*
+  * Interface to system's page release.
+  */
+-static void kmem_freepages(struct kmem_cache *cachep, void *addr)
++static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed)
+ {
+ 	unsigned long i = (1 << cachep->gfporder);
+-	struct page *page = virt_to_page(addr);
++	struct page *page, *basepage = virt_to_page(addr);
+ 	const unsigned long nr_freed = i;
+ 
++	page = basepage;
++
+ 	kmemcheck_free_shadow(page, cachep->gfporder);
+ 
+ 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
+@@ -1787,7 +1849,13 @@ static void kmem_freepages(struct kmem_c
+ 	}
+ 	if (current->reclaim_state)
+ 		current->reclaim_state->reclaimed_slab += nr_freed;
+-	free_pages((unsigned long)addr, cachep->gfporder);
++
++	if (!delayed) {
++		free_pages((unsigned long)addr, cachep->gfporder);
++	} else {
++		basepage->index = cachep->gfporder;
++		list_add(&basepage->lru, &__get_cpu_var(slab_free_list));
++	}
+ }
+ 
+ static void kmem_rcu_free(struct rcu_head *head)
+@@ -1795,7 +1863,7 @@ static void kmem_rcu_free(struct rcu_hea
+ 	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
+ 	struct kmem_cache *cachep = slab_rcu->cachep;
+ 
+-	kmem_freepages(cachep, slab_rcu->addr);
++	kmem_freepages(cachep, slab_rcu->addr, false);
+ 	if (OFF_SLAB(cachep))
+ 		kmem_cache_free(cachep->slabp_cache, slab_rcu);
+ }
+@@ -2014,7 +2082,8 @@ static void slab_destroy_debugcheck(stru
+  * Before calling the slab must have been unlinked from the cache.  The
+  * cache-lock is not held/needed.
+  */
+-static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
++static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp,
++			 bool delayed)
+ {
+ 	void *addr = slabp->s_mem - slabp->colouroff;
+ 
+@@ -2027,7 +2096,7 @@ static void slab_destroy(struct kmem_cac
+ 		slab_rcu->addr = addr;
+ 		call_rcu(&slab_rcu->head, kmem_rcu_free);
+ 	} else {
+-		kmem_freepages(cachep, addr);
++		kmem_freepages(cachep, addr, delayed);
+ 		if (OFF_SLAB(cachep))
+ 			kmem_cache_free(cachep->slabp_cache, slabp);
+ 	}
+@@ -2493,7 +2562,7 @@ EXPORT_SYMBOL(kmem_cache_create);
+ #if DEBUG
+ static void check_irq_off(void)
+ {
+-	BUG_ON(!irqs_disabled());
++	BUG_ON_NONRT(!irqs_disabled());
+ }
+ 
+ static void check_irq_on(void)
+@@ -2528,26 +2597,43 @@ static void drain_array(struct kmem_cach
+ 			struct array_cache *ac,
+ 			int force, int node);
+ 
+-static void do_drain(void *arg)
++static void __do_drain(void *arg, unsigned int cpu)
+ {
+ 	struct kmem_cache *cachep = arg;
+ 	struct array_cache *ac;
+-	int node = numa_mem_id();
++	int node = cpu_to_mem(cpu);
+ 
+-	check_irq_off();
+-	ac = cpu_cache_get(cachep);
++	ac = cpu_cache_get_on_cpu(cachep, cpu);
+ 	spin_lock(&cachep->nodelists[node]->list_lock);
+ 	free_block(cachep, ac->entry, ac->avail, node);
+ 	spin_unlock(&cachep->nodelists[node]->list_lock);
+ 	ac->avail = 0;
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_BASE
++static void do_drain(void *arg)
++{
++	__do_drain(arg, smp_processor_id());
++}
++#else
++static void do_drain(void *arg, int cpu)
++{
++	LIST_HEAD(tmp);
++
++	spin_lock_irq(&per_cpu(slab_lock, cpu).lock);
++	__do_drain(arg, cpu);
++	list_splice_init(&per_cpu(slab_free_list, cpu), &tmp);
++	spin_unlock_irq(&per_cpu(slab_lock, cpu).lock);
++	free_delayed(&tmp);
++}
++#endif
++
+ static void drain_cpu_caches(struct kmem_cache *cachep)
+ {
+ 	struct kmem_list3 *l3;
+ 	int node;
+ 
+-	on_each_cpu(do_drain, cachep, 1);
++	slab_on_each_cpu(do_drain, cachep);
+ 	check_irq_on();
+ 	for_each_online_node(node) {
+ 		l3 = cachep->nodelists[node];
+@@ -2578,10 +2664,10 @@ static int drain_freelist(struct kmem_ca
+ 	nr_freed = 0;
+ 	while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
+ 
+-		spin_lock_irq(&l3->list_lock);
++		local_spin_lock_irq(slab_lock, &l3->list_lock);
+ 		p = l3->slabs_free.prev;
+ 		if (p == &l3->slabs_free) {
+-			spin_unlock_irq(&l3->list_lock);
++			local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ 			goto out;
+ 		}
+ 
+@@ -2595,8 +2681,8 @@ static int drain_freelist(struct kmem_ca
+ 		 * to the cache.
+ 		 */
+ 		l3->free_objects -= cache->num;
+-		spin_unlock_irq(&l3->list_lock);
+-		slab_destroy(cache, slabp);
++		local_spin_unlock_irq(slab_lock, &l3->list_lock);
++		slab_destroy(cache, slabp, false);
+ 		nr_freed++;
+ 	}
+ out:
+@@ -2890,7 +2976,7 @@ static int cache_grow(struct kmem_cache 
+ 	offset *= cachep->colour_off;
+ 
+ 	if (local_flags & __GFP_WAIT)
+-		local_irq_enable();
++		local_unlock_irq(slab_lock);
+ 
+ 	/*
+ 	 * The test for missing atomic flag is performed here, rather than
+@@ -2920,7 +3006,7 @@ static int cache_grow(struct kmem_cache 
+ 	cache_init_objs(cachep, slabp);
+ 
+ 	if (local_flags & __GFP_WAIT)
+-		local_irq_disable();
++		local_lock_irq(slab_lock);
+ 	check_irq_off();
+ 	spin_lock(&l3->list_lock);
+ 
+@@ -2931,10 +3017,10 @@ static int cache_grow(struct kmem_cache 
+ 	spin_unlock(&l3->list_lock);
+ 	return 1;
+ opps1:
+-	kmem_freepages(cachep, objp);
++	kmem_freepages(cachep, objp, false);
+ failed:
+ 	if (local_flags & __GFP_WAIT)
+-		local_irq_disable();
++		local_lock_irq(slab_lock);
+ 	return 0;
+ }
+ 
+@@ -3326,11 +3412,11 @@ retry:
+ 		 * set and go into memory reserves if necessary.
+ 		 */
+ 		if (local_flags & __GFP_WAIT)
+-			local_irq_enable();
++			local_unlock_irq(slab_lock);
+ 		kmem_flagcheck(cache, flags);
+ 		obj = kmem_getpages(cache, local_flags, numa_mem_id());
+ 		if (local_flags & __GFP_WAIT)
+-			local_irq_disable();
++			local_lock_irq(slab_lock);
+ 		if (obj) {
+ 			/*
+ 			 * Insert into the appropriate per node queues
+@@ -3446,7 +3532,7 @@ __cache_alloc_node(struct kmem_cache *ca
+ 		return NULL;
+ 
+ 	cache_alloc_debugcheck_before(cachep, flags);
+-	local_irq_save(save_flags);
++	local_lock_irqsave(slab_lock, save_flags);
+ 
+ 	if (nodeid == NUMA_NO_NODE)
+ 		nodeid = slab_node;
+@@ -3471,7 +3557,7 @@ __cache_alloc_node(struct kmem_cache *ca
+ 	/* ___cache_alloc_node can fall back to other nodes */
+ 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
+   out:
+-	local_irq_restore(save_flags);
++	local_unlock_irqrestore(slab_lock, save_flags);
+ 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+ 	kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
+ 				 flags);
+@@ -3531,9 +3617,9 @@ __cache_alloc(struct kmem_cache *cachep,
+ 		return NULL;
+ 
+ 	cache_alloc_debugcheck_before(cachep, flags);
+-	local_irq_save(save_flags);
++	local_lock_irqsave(slab_lock, save_flags);
+ 	objp = __do_cache_alloc(cachep, flags);
+-	local_irq_restore(save_flags);
++	local_unlock_irqrestore(slab_lock, save_flags);
+ 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+ 	kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
+ 				 flags);
+@@ -3581,7 +3667,7 @@ static void free_block(struct kmem_cache
+ 				 * a different cache, refer to comments before
+ 				 * alloc_slabmgmt.
+ 				 */
+-				slab_destroy(cachep, slabp);
++				slab_destroy(cachep, slabp, true);
+ 			} else {
+ 				list_add(&slabp->list, &l3->slabs_free);
+ 			}
+@@ -3844,12 +3930,12 @@ void kmem_cache_free(struct kmem_cache *
+ {
+ 	unsigned long flags;
+ 
+-	local_irq_save(flags);
+ 	debug_check_no_locks_freed(objp, obj_size(cachep));
+ 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
+ 		debug_check_no_obj_freed(objp, obj_size(cachep));
++	local_lock_irqsave(slab_lock, flags);
+ 	__cache_free(cachep, objp, __builtin_return_address(0));
+-	local_irq_restore(flags);
++	unlock_slab_and_free_delayed(flags);
+ 
+ 	trace_kmem_cache_free(_RET_IP_, objp);
+ }
+@@ -3873,13 +3959,13 @@ void kfree(const void *objp)
+ 
+ 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
+ 		return;
+-	local_irq_save(flags);
+ 	kfree_debugcheck(objp);
+ 	c = virt_to_cache(objp);
+ 	debug_check_no_locks_freed(objp, obj_size(c));
+ 	debug_check_no_obj_freed(objp, obj_size(c));
++	local_lock_irqsave(slab_lock, flags);
+ 	__cache_free(c, (void *)objp, __builtin_return_address(0));
+-	local_irq_restore(flags);
++	unlock_slab_and_free_delayed(flags);
+ }
+ EXPORT_SYMBOL(kfree);
+ 
+@@ -3922,7 +4008,7 @@ static int alloc_kmemlist(struct kmem_ca
+ 		if (l3) {
+ 			struct array_cache *shared = l3->shared;
+ 
+-			spin_lock_irq(&l3->list_lock);
++			local_spin_lock_irq(slab_lock, &l3->list_lock);
+ 
+ 			if (shared)
+ 				free_block(cachep, shared->entry,
+@@ -3935,7 +4021,8 @@ static int alloc_kmemlist(struct kmem_ca
+ 			}
+ 			l3->free_limit = (1 + nr_cpus_node(node)) *
+ 					cachep->batchcount + cachep->num;
+-			spin_unlock_irq(&l3->list_lock);
++			unlock_l3_and_free_delayed(&l3->list_lock);
++
+ 			kfree(shared);
+ 			free_alien_cache(new_alien);
+ 			continue;
+@@ -3982,17 +4069,30 @@ struct ccupdate_struct {
+ 	struct array_cache *new[0];
+ };
+ 
+-static void do_ccupdate_local(void *info)
++static void __do_ccupdate_local(void *info, int cpu)
+ {
+ 	struct ccupdate_struct *new = info;
+ 	struct array_cache *old;
+ 
+-	check_irq_off();
+-	old = cpu_cache_get(new->cachep);
++	old = cpu_cache_get_on_cpu(new->cachep, cpu);
++
++	new->cachep->array[cpu] = new->new[cpu];
++	new->new[cpu] = old;
++}
+ 
+-	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
+-	new->new[smp_processor_id()] = old;
++#ifndef CONFIG_PREEMPT_RT_BASE
++static void do_ccupdate_local(void *info)
++{
++	__do_ccupdate_local(info, smp_processor_id());
+ }
++#else
++static void do_ccupdate_local(void *info, int cpu)
++{
++	spin_lock_irq(&per_cpu(slab_lock, cpu).lock);
++	__do_ccupdate_local(info, cpu);
++	spin_unlock_irq(&per_cpu(slab_lock, cpu).lock);
++}
++#endif
+ 
+ /* Always called with the cache_chain_mutex held */
+ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+@@ -4018,7 +4118,7 @@ static int do_tune_cpucache(struct kmem_
+ 	}
+ 	new->cachep = cachep;
+ 
+-	on_each_cpu(do_ccupdate_local, (void *)new, 1);
++	slab_on_each_cpu(do_ccupdate_local, (void *)new);
+ 
+ 	check_irq_on();
+ 	cachep->batchcount = batchcount;
+@@ -4029,9 +4129,11 @@ static int do_tune_cpucache(struct kmem_
+ 		struct array_cache *ccold = new->new[i];
+ 		if (!ccold)
+ 			continue;
+-		spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
++		local_spin_lock_irq(slab_lock,
++				    &cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ 		free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
+-		spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
++
++		unlock_l3_and_free_delayed(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ 		kfree(ccold);
+ 	}
+ 	kfree(new);
+@@ -4107,7 +4209,7 @@ static void drain_array(struct kmem_cach
+ 	if (ac->touched && !force) {
+ 		ac->touched = 0;
+ 	} else {
+-		spin_lock_irq(&l3->list_lock);
++		local_spin_lock_irq(slab_lock, &l3->list_lock);
+ 		if (ac->avail) {
+ 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
+ 			if (tofree > ac->avail)
+@@ -4117,7 +4219,7 @@ static void drain_array(struct kmem_cach
+ 			memmove(ac->entry, &(ac->entry[tofree]),
+ 				sizeof(void *) * ac->avail);
+ 		}
+-		spin_unlock_irq(&l3->list_lock);
++		local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ 	}
+ }
+ 
+@@ -4256,7 +4358,7 @@ static int s_show(struct seq_file *m, vo
+ 			continue;
+ 
+ 		check_irq_on();
+-		spin_lock_irq(&l3->list_lock);
++		local_spin_lock_irq(slab_lock, &l3->list_lock);
+ 
+ 		list_for_each_entry(slabp, &l3->slabs_full, list) {
+ 			if (slabp->inuse != cachep->num && !error)
+@@ -4281,7 +4383,7 @@ static int s_show(struct seq_file *m, vo
+ 		if (l3->shared)
+ 			shared_avail += l3->shared->avail;
+ 
+-		spin_unlock_irq(&l3->list_lock);
++		local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ 	}
+ 	num_slabs += active_slabs;
+ 	num_objs = num_slabs * cachep->num;
+@@ -4510,13 +4612,13 @@ static int leaks_show(struct seq_file *m
+ 			continue;
+ 
+ 		check_irq_on();
+-		spin_lock_irq(&l3->list_lock);
++		local_spin_lock_irq(slab_lock, &l3->list_lock);
+ 
+ 		list_for_each_entry(slabp, &l3->slabs_full, list)
+ 			handle_slab(n, cachep, slabp);
+ 		list_for_each_entry(slabp, &l3->slabs_partial, list)
+ 			handle_slab(n, cachep, slabp);
+-		spin_unlock_irq(&l3->list_lock);
++		local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ 	}
+ 	name = cachep->name;
+ 	if (n[0] == n[1]) {
+Index: linux-3.2/include/linux/pid.h
+===================================================================
+--- linux-3.2.orig/include/linux/pid.h
++++ linux-3.2/include/linux/pid.h
+@@ -2,6 +2,7 @@
+ #define _LINUX_PID_H
+ 
+ #include <linux/rcupdate.h>
++#include <linux/atomic.h>
+ 
+ enum pid_type
+ {
+Index: linux-3.2/include/linux/sysctl.h
+===================================================================
+--- linux-3.2.orig/include/linux/sysctl.h
++++ linux-3.2/include/linux/sysctl.h
+@@ -932,6 +932,7 @@ enum
+ #include <linux/list.h>
+ #include <linux/rcupdate.h>
+ #include <linux/wait.h>
++#include <linux/atomic.h>
+ 
+ /* For the /proc/sys support */
+ struct ctl_table;
+Index: linux-3.2/net/core/sock.c
+===================================================================
+--- linux-3.2.orig/net/core/sock.c
++++ linux-3.2/net/core/sock.c
+@@ -2035,12 +2035,11 @@ void lock_sock_nested(struct sock *sk, i
+ 	if (sk->sk_lock.owned)
+ 		__lock_sock(sk);
+ 	sk->sk_lock.owned = 1;
+-	spin_unlock(&sk->sk_lock.slock);
++	spin_unlock_bh(&sk->sk_lock.slock);
+ 	/*
+ 	 * The sk_lock has mutex_lock() semantics here:
+ 	 */
+ 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+-	local_bh_enable();
+ }
+ EXPORT_SYMBOL(lock_sock_nested);
+ 
+Index: linux-3.2/include/linux/interrupt.h
+===================================================================
+--- linux-3.2.orig/include/linux/interrupt.h
++++ linux-3.2/include/linux/interrupt.h
+@@ -219,7 +219,7 @@ extern void devm_free_irq(struct device 
+ #ifdef CONFIG_LOCKDEP
+ # define local_irq_enable_in_hardirq()	do { } while (0)
+ #else
+-# define local_irq_enable_in_hardirq()	local_irq_enable()
++# define local_irq_enable_in_hardirq()	local_irq_enable_nort()
+ #endif
+ 
+ extern void disable_irq_nosync(unsigned int irq);
+@@ -396,9 +396,13 @@ static inline int disable_irq_wake(unsig
+ 
+ 
+ #ifdef CONFIG_IRQ_FORCED_THREADING
+-extern bool force_irqthreads;
++# ifndef CONFIG_PREEMPT_RT_BASE
++   extern bool force_irqthreads;
++# else
++#  define force_irqthreads	(true)
++# endif
+ #else
+-#define force_irqthreads	(0)
++#define force_irqthreads	(false)
+ #endif
+ 
+ #ifndef __ARCH_SET_SOFTIRQ_PENDING
+@@ -452,8 +456,14 @@ struct softirq_action
+ 	void	(*action)(struct softirq_action *);
+ };
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
++static inline void thread_do_softirq(void) { do_softirq(); }
++#else
++extern void thread_do_softirq(void);
++#endif
++
+ extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+ extern void softirq_init(void);
+ static inline void __raise_softirq_irqoff(unsigned int nr)
+@@ -465,6 +475,8 @@ static inline void __raise_softirq_irqof
+ extern void raise_softirq_irqoff(unsigned int nr);
+ extern void raise_softirq(unsigned int nr);
+ 
++extern void softirq_check_pending_idle(void);
++
+ /* This is the worklist that queues up per-cpu softirq work.
+  *
+  * send_remote_sendirq() adds work to these lists, and
+@@ -642,6 +654,12 @@ void tasklet_hrtimer_cancel(struct taskl
+ 	tasklet_kill(&ttimer->tasklet);
+ }
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++extern void softirq_early_init(void);
++#else
++static inline void softirq_early_init(void) { }
++#endif
++
+ /*
+  * Autoprobing for irqs:
+  *
+Index: linux-3.2/net/core/dev.c
+===================================================================
+--- linux-3.2.orig/net/core/dev.c
++++ linux-3.2/net/core/dev.c
+@@ -222,14 +222,14 @@ static inline struct hlist_head *dev_ind
+ static inline void rps_lock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+-	spin_lock(&sd->input_pkt_queue.lock);
++	raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+ 
+ static inline void rps_unlock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+-	spin_unlock(&sd->input_pkt_queue.lock);
++	raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+ 
+@@ -3033,16 +3033,46 @@ int netif_rx_ni(struct sk_buff *skb)
+ {
+ 	int err;
+ 
+-	preempt_disable();
++	migrate_disable();
+ 	err = netif_rx(skb);
+ 	if (local_softirq_pending())
+-		do_softirq();
+-	preempt_enable();
++		thread_do_softirq();
++	migrate_enable();
+ 
+ 	return err;
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * RT runs ksoftirqd as a real time thread and the root_lock is a
++ * "sleeping spinlock". If the trylock fails then we can go into an
++ * infinite loop when ksoftirqd preempted the task which actually
++ * holds the lock, because we requeue q and raise NET_TX softirq
++ * causing ksoftirqd to loop forever.
++ *
++ * It's safe to use spin_lock on RT here as softirqs run in thread
++ * context and cannot deadlock against the thread which is holding
++ * root_lock.
++ *
++ * On !RT the trylock might fail, but there we bail out from the
++ * softirq loop after 10 attempts which we can't do on RT. And the
++ * task holding root_lock cannot be preempted, so the only downside of
++ * that trylock is that we need 10 loops to decide that we should have
++ * given up in the first one :)
++ */
++static inline int take_root_lock(spinlock_t *lock)
++{
++	spin_lock(lock);
++	return 1;
++}
++#else
++static inline int take_root_lock(spinlock_t *lock)
++{
++	return spin_trylock(lock);
++}
++#endif
++
+ static void net_tx_action(struct softirq_action *h)
+ {
+ 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
+@@ -3081,7 +3111,7 @@ static void net_tx_action(struct softirq
+ 			head = head->next_sched;
+ 
+ 			root_lock = qdisc_lock(q);
+-			if (spin_trylock(root_lock)) {
++			if (take_root_lock(root_lock)) {
+ 				smp_mb__before_clear_bit();
+ 				clear_bit(__QDISC_STATE_SCHED,
+ 					  &q->state);
+@@ -3406,7 +3436,7 @@ static void flush_backlog(void *arg)
+ 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
+ 		if (skb->dev == dev) {
+ 			__skb_unlink(skb, &sd->input_pkt_queue);
+-			kfree_skb(skb);
++			__skb_queue_tail(&sd->tofree_queue, skb);
+ 			input_queue_head_incr(sd);
+ 		}
+ 	}
+@@ -3415,10 +3445,13 @@ static void flush_backlog(void *arg)
+ 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ 		if (skb->dev == dev) {
+ 			__skb_unlink(skb, &sd->process_queue);
+-			kfree_skb(skb);
++			__skb_queue_tail(&sd->tofree_queue, skb);
+ 			input_queue_head_incr(sd);
+ 		}
+ 	}
++
++	if (!skb_queue_empty(&sd->tofree_queue))
++		raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ }
+ 
+ static int napi_gro_complete(struct sk_buff *skb)
+@@ -3895,10 +3928,17 @@ static void net_rx_action(struct softirq
+ 	struct softnet_data *sd = &__get_cpu_var(softnet_data);
+ 	unsigned long time_limit = jiffies + 2;
+ 	int budget = netdev_budget;
++	struct sk_buff *skb;
+ 	void *have;
+ 
+ 	local_irq_disable();
+ 
++	while ((skb = __skb_dequeue(&sd->tofree_queue))) {
++		local_irq_enable();
++		kfree_skb(skb);
++		local_irq_disable();
++	}
++
+ 	while (!list_empty(&sd->poll_list)) {
+ 		struct napi_struct *n;
+ 		int work, weight;
+@@ -6358,6 +6398,9 @@ static int dev_cpu_callback(struct notif
+ 		netif_rx(skb);
+ 		input_queue_head_incr(oldsd);
+ 	}
++	while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
++		kfree_skb(skb);
++	}
+ 
+ 	return NOTIFY_OK;
+ }
+@@ -6624,8 +6667,9 @@ static int __init net_dev_init(void)
+ 		struct softnet_data *sd = &per_cpu(softnet_data, i);
+ 
+ 		memset(sd, 0, sizeof(*sd));
+-		skb_queue_head_init(&sd->input_pkt_queue);
+-		skb_queue_head_init(&sd->process_queue);
++		skb_queue_head_init_raw(&sd->input_pkt_queue);
++		skb_queue_head_init_raw(&sd->process_queue);
++		skb_queue_head_init_raw(&sd->tofree_queue);
+ 		sd->completion_queue = NULL;
+ 		INIT_LIST_HEAD(&sd->poll_list);
+ 		sd->output_queue = NULL;
+Index: linux-3.2/arch/x86/kernel/entry_32.S
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/entry_32.S
++++ linux-3.2/arch/x86/kernel/entry_32.S
+@@ -626,7 +626,11 @@ work_notifysig:				# deal with pending s
+ 	jne work_notifysig_v86		# returning to kernel-space or
+ 					# vm86-space
+ 	xorl %edx, %edx
++	TRACE_IRQS_ON
++	ENABLE_INTERRUPTS(CLBR_NONE)
+ 	call do_notify_resume
++	DISABLE_INTERRUPTS(CLBR_ANY)
++	TRACE_IRQS_OFF
+ 	jmp resume_userspace_sig
+ 
+ 	ALIGN
+@@ -639,7 +643,11 @@ work_notifysig_v86:
+ 	movl %esp, %eax
+ #endif
+ 	xorl %edx, %edx
++	TRACE_IRQS_ON
++	ENABLE_INTERRUPTS(CLBR_NONE)
+ 	call do_notify_resume
++	DISABLE_INTERRUPTS(CLBR_ANY)
++	TRACE_IRQS_OFF
+ 	jmp resume_userspace_sig
+ END(work_pending)
+ 
+Index: linux-3.2/kernel/rcutree.c
+===================================================================
+--- linux-3.2.orig/kernel/rcutree.c
++++ linux-3.2/kernel/rcutree.c
+@@ -170,6 +170,12 @@ void rcu_sched_qs(int cpu)
+ 	rdp->passed_quiesce = 1;
+ }
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++void rcu_bh_qs(int cpu)
++{
++	rcu_preempt_qs(cpu);
++}
++#else
+ void rcu_bh_qs(int cpu)
+ {
+ 	struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+@@ -180,6 +186,7 @@ void rcu_bh_qs(int cpu)
+ 		trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
+ 	rdp->passed_quiesce = 1;
+ }
++#endif
+ 
+ /*
+  * Note a context switch.  This is a quiescent state for RCU-sched,
+@@ -225,6 +232,7 @@ long rcu_batches_completed_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+  * Return the number of RCU BH batches processed thus far for debug & stats.
+  */
+@@ -242,6 +250,7 @@ void rcu_bh_force_quiescent_state(void)
+ 	force_quiescent_state(&rcu_bh_state, 0);
+ }
+ EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
++#endif
+ 
+ /*
+  * Record the number of times rcutorture tests have been initiated and
+@@ -1221,7 +1230,7 @@ static void __rcu_offline_cpu(int cpu, s
+ 	else
+ 		raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ 	if (need_report & RCU_OFL_TASKS_EXP_GP)
+-		rcu_report_exp_rnp(rsp, rnp);
++		rcu_report_exp_rnp(rsp, rnp, true);
+ 	rcu_node_kthread_setaffinity(rnp, -1);
+ }
+ 
+@@ -1667,6 +1676,7 @@ void call_rcu_sched(struct rcu_head *hea
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_sched);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+  * Queue an RCU for invocation after a quicker grace period.
+  */
+@@ -1675,6 +1685,7 @@ void call_rcu_bh(struct rcu_head *head, 
+ 	__call_rcu(head, func, &rcu_bh_state);
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_bh);
++#endif
+ 
+ /**
+  * synchronize_sched - wait until an rcu-sched grace period has elapsed.
+@@ -1707,6 +1718,7 @@ void synchronize_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(synchronize_sched);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+  * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
+  *
+@@ -1723,6 +1735,7 @@ void synchronize_rcu_bh(void)
+ 	wait_rcu_gp(call_rcu_bh);
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
++#endif
+ 
+ /*
+  * Check to see if there is any immediate RCU-related work to be done
+@@ -1877,6 +1890,7 @@ static void _rcu_barrier(struct rcu_stat
+ 	mutex_unlock(&rcu_barrier_mutex);
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+  * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
+  */
+@@ -1885,6 +1899,7 @@ void rcu_barrier_bh(void)
+ 	_rcu_barrier(&rcu_bh_state, call_rcu_bh);
+ }
+ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
++#endif
+ 
+ /**
+  * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
+Index: linux-3.2/kernel/rcutree.h
+===================================================================
+--- linux-3.2.orig/kernel/rcutree.h
++++ linux-3.2/kernel/rcutree.h
+@@ -430,6 +430,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
+ /* Forward declarations for rcutree_plugin.h */
+ static void rcu_bootup_announce(void);
+ long rcu_batches_completed(void);
++static void rcu_preempt_qs(int cpu);
+ static void rcu_preempt_note_context_switch(int cpu);
+ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -451,7 +452,8 @@ static void rcu_preempt_check_callbacks(
+ static void rcu_preempt_process_callbacks(void);
+ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
+ #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
+-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
++static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
++			       bool wake);
+ #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
+ static int rcu_preempt_pending(int cpu);
+ static int rcu_preempt_needs_cpu(int cpu);
+Index: linux-3.2/kernel/rcutree_plugin.h
+===================================================================
+--- linux-3.2.orig/kernel/rcutree_plugin.h
++++ linux-3.2/kernel/rcutree_plugin.h
+@@ -336,7 +336,7 @@ static noinline void rcu_read_unlock_spe
+ 	}
+ 
+ 	/* Hardware IRQ handlers cannot block. */
+-	if (in_irq() || in_serving_softirq()) {
++	if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
+ 		local_irq_restore(flags);
+ 		return;
+ 	}
+@@ -407,7 +407,7 @@ static noinline void rcu_read_unlock_spe
+ 		 * then we need to report up the rcu_node hierarchy.
+ 		 */
+ 		if (!empty_exp && !rcu_preempted_readers_exp(rnp))
+-			rcu_report_exp_rnp(&rcu_preempt_state, rnp);
++			rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
+ 	} else {
+ 		local_irq_restore(flags);
+ 	}
+@@ -731,7 +731,8 @@ static int sync_rcu_preempt_exp_done(str
+  *
+  * Caller must hold sync_rcu_preempt_exp_mutex.
+  */
+-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
++static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
++			       bool wake)
+ {
+ 	unsigned long flags;
+ 	unsigned long mask;
+@@ -744,7 +745,8 @@ static void rcu_report_exp_rnp(struct rc
+ 		}
+ 		if (rnp->parent == NULL) {
+ 			raw_spin_unlock_irqrestore(&rnp->lock, flags);
+-			wake_up(&sync_rcu_preempt_exp_wq);
++			if (wake)
++				wake_up(&sync_rcu_preempt_exp_wq);
+ 			break;
+ 		}
+ 		mask = rnp->grpmask;
+@@ -777,7 +779,7 @@ sync_rcu_preempt_exp_init(struct rcu_sta
+ 		must_wait = 1;
+ 	}
+ 	if (!must_wait)
+-		rcu_report_exp_rnp(rsp, rnp);
++		rcu_report_exp_rnp(rsp, rnp, false);
+ }
+ 
+ /*
+@@ -1069,9 +1071,9 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedi
+  * report on tasks preempted in RCU read-side critical sections during
+  * expedited RCU grace periods.
+  */
+-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
++static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
++			       bool wake)
+ {
+-	return;
+ }
+ 
+ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
+@@ -1931,7 +1933,7 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
+ 
+ #endif /* #else #ifndef CONFIG_SMP */
+ 
+-#if !defined(CONFIG_RCU_FAST_NO_HZ)
++#if 1 /* !defined(CONFIG_RCU_FAST_NO_HZ) */
+ 
+ /*
+  * Check to see if any future RCU-related work will need to be done
+Index: linux-3.2/drivers/usb/gadget/ci13xxx_udc.c
+===================================================================
+--- linux-3.2.orig/drivers/usb/gadget/ci13xxx_udc.c
++++ linux-3.2/drivers/usb/gadget/ci13xxx_udc.c
+@@ -816,7 +816,7 @@ static struct {
+ } dbg_data = {
+ 	.idx = 0,
+ 	.tty = 0,
+-	.lck = __RW_LOCK_UNLOCKED(lck)
++	.lck = __RW_LOCK_UNLOCKED(dbg_data.lck)
+ };
+ 
+ /**
+Index: linux-3.2/fs/file.c
+===================================================================
+--- linux-3.2.orig/fs/file.c
++++ linux-3.2/fs/file.c
+@@ -105,14 +105,14 @@ void free_fdtable_rcu(struct rcu_head *r
+ 		kfree(fdt->open_fds);
+ 		kfree(fdt);
+ 	} else {
+-		fddef = &get_cpu_var(fdtable_defer_list);
++		fddef = &per_cpu(fdtable_defer_list, get_cpu_light());
+ 		spin_lock(&fddef->lock);
+ 		fdt->next = fddef->next;
+ 		fddef->next = fdt;
+ 		/* vmallocs are handled from the workqueue context */
+ 		schedule_work(&fddef->wq);
+ 		spin_unlock(&fddef->lock);
+-		put_cpu_var(fdtable_defer_list);
++		put_cpu_light();
+ 	}
+ }
+ 
+@@ -422,7 +422,7 @@ struct files_struct init_files = {
+ 		.close_on_exec	= (fd_set *)&init_files.close_on_exec_init,
+ 		.open_fds	= (fd_set *)&init_files.open_fds_init,
+ 	},
+-	.file_lock	= __SPIN_LOCK_UNLOCKED(init_task.file_lock),
++	.file_lock	= __SPIN_LOCK_UNLOCKED(init_files.file_lock),
+ };
+ 
+ /*
+Index: linux-3.2/include/linux/idr.h
+===================================================================
+--- linux-3.2.orig/include/linux/idr.h
++++ linux-3.2/include/linux/idr.h
+@@ -136,7 +136,7 @@ struct ida {
+ 	struct ida_bitmap	*free_bitmap;
+ };
+ 
+-#define IDA_INIT(name)		{ .idr = IDR_INIT(name), .free_bitmap = NULL, }
++#define IDA_INIT(name)		{ .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
+ #define DEFINE_IDA(name)	struct ida name = IDA_INIT(name)
+ 
+ int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
+Index: linux-3.2/kernel/cred.c
+===================================================================
+--- linux-3.2.orig/kernel/cred.c
++++ linux-3.2/kernel/cred.c
+@@ -35,7 +35,7 @@ static struct kmem_cache *cred_jar;
+ static struct thread_group_cred init_tgcred = {
+ 	.usage	= ATOMIC_INIT(2),
+ 	.tgid	= 0,
+-	.lock	= __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock),
++	.lock	= __SPIN_LOCK_UNLOCKED(init_tgcred.lock),
+ };
+ #endif
+ 
+Index: linux-3.2/include/linux/seqlock.h
+===================================================================
+--- linux-3.2.orig/include/linux/seqlock.h
++++ linux-3.2/include/linux/seqlock.h
+@@ -30,92 +30,12 @@
+ #include <linux/preempt.h>
+ #include <asm/processor.h>
+ 
+-typedef struct {
+-	unsigned sequence;
+-	spinlock_t lock;
+-} seqlock_t;
+-
+-/*
+- * These macros triggered gcc-3.x compile-time problems.  We think these are
+- * OK now.  Be cautious.
+- */
+-#define __SEQLOCK_UNLOCKED(lockname) \
+-		 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
+-
+-#define seqlock_init(x)					\
+-	do {						\
+-		(x)->sequence = 0;			\
+-		spin_lock_init(&(x)->lock);		\
+-	} while (0)
+-
+-#define DEFINE_SEQLOCK(x) \
+-		seqlock_t x = __SEQLOCK_UNLOCKED(x)
+-
+-/* Lock out other writers and update the count.
+- * Acts like a normal spin_lock/unlock.
+- * Don't need preempt_disable() because that is in the spin_lock already.
+- */
+-static inline void write_seqlock(seqlock_t *sl)
+-{
+-	spin_lock(&sl->lock);
+-	++sl->sequence;
+-	smp_wmb();
+-}
+-
+-static inline void write_sequnlock(seqlock_t *sl)
+-{
+-	smp_wmb();
+-	sl->sequence++;
+-	spin_unlock(&sl->lock);
+-}
+-
+-static inline int write_tryseqlock(seqlock_t *sl)
+-{
+-	int ret = spin_trylock(&sl->lock);
+-
+-	if (ret) {
+-		++sl->sequence;
+-		smp_wmb();
+-	}
+-	return ret;
+-}
+-
+-/* Start of read calculation -- fetch last complete writer token */
+-static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
+-{
+-	unsigned ret;
+-
+-repeat:
+-	ret = ACCESS_ONCE(sl->sequence);
+-	if (unlikely(ret & 1)) {
+-		cpu_relax();
+-		goto repeat;
+-	}
+-	smp_rmb();
+-
+-	return ret;
+-}
+-
+-/*
+- * Test if reader processed invalid data.
+- *
+- * If sequence value changed then writer changed data while in section.
+- */
+-static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
+-{
+-	smp_rmb();
+-
+-	return unlikely(sl->sequence != start);
+-}
+-
+-
+ /*
+  * Version using sequence counter only.
+  * This can be used when code has its own mutex protecting the
+  * updating starting before the write_seqcountbeqin() and ending
+  * after the write_seqcount_end().
+  */
+-
+ typedef struct seqcount {
+ 	unsigned sequence;
+ } seqcount_t;
+@@ -197,7 +117,6 @@ static inline int __read_seqcount_retry(
+ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+ {
+ 	smp_rmb();
+-
+ 	return __read_seqcount_retry(s, start);
+ }
+ 
+@@ -231,31 +150,154 @@ static inline void write_seqcount_barrie
+ 	s->sequence+=2;
+ }
+ 
++typedef struct {
++	struct seqcount seqcount;
++	raw_spinlock_t lock;
++} raw_seqlock_t;
++
++typedef struct {
++	struct seqcount seqcount;
++	spinlock_t lock;
++} seqlock_t;
++
++/*
++ * These macros triggered gcc-3.x compile-time problems.  We think these are
++ * OK now.  Be cautious.
++ */
++#define __RAW_SEQLOCK_UNLOCKED(lockname)			\
++	{							\
++		.seqcount = SEQCNT_ZERO,			\
++		.lock =	__RAW_SPIN_LOCK_UNLOCKED(lockname)	\
++	}
++
++#define raw_seqlock_init(x)					\
++	do {							\
++		seqcount_init(&(x)->seqcount);			\
++		raw_spin_lock_init(&(x)->lock);			\
++	} while (0)
++
++#define DEFINE_RAW_SEQLOCK(x) \
++		raw_seqlock_t x = __RAW_SEQLOCK_UNLOCKED(x)
++
++#define __SEQLOCK_UNLOCKED(lockname)			\
++	{						\
++		.seqcount = SEQCNT_ZERO,		\
++		.lock =	__SPIN_LOCK_UNLOCKED(lockname)	\
++	}
++
++#define seqlock_init(x)					\
++	do {						\
++		seqcount_init(&(x)->seqcount);		\
++		spin_lock_init(&(x)->lock);		\
++	} while (0)
++
++#define DEFINE_SEQLOCK(x) \
++		seqlock_t x = __SEQLOCK_UNLOCKED(x)
++
++#define read_seqbegin(sl)		read_seqcount_begin(&(sl)->seqcount)
++#define read_seqretry(sl, start)	read_seqcount_retry(&(sl)->seqcount, start)
++
+ /*
+- * Possible sw/hw IRQ protected versions of the interfaces.
++ * Lock out other writers and update the count.
++ * Acts like a normal spin_lock/unlock.
++ * Don't need preempt_disable() because that is in the spin_lock already.
+  */
++static inline void raw_write_seqlock(raw_seqlock_t *sl)
++{
++	raw_spin_lock(&sl->lock);
++	write_seqcount_begin(&sl->seqcount);
++}
++
++static inline void raw_write_sequnlock(raw_seqlock_t *sl)
++{
++	write_seqcount_end(&sl->seqcount);
++	raw_spin_unlock(&sl->lock);
++}
++
++static inline void raw_write_seqlock_irq(raw_seqlock_t *sl)
++{
++	raw_spin_lock_irq(&sl->lock);
++	write_seqcount_begin(&sl->seqcount);
++}
++
++static inline void raw_write_sequnlock_irq(raw_seqlock_t *sl)
++{
++	write_seqcount_end(&sl->seqcount);
++	raw_spin_unlock_irq(&sl->lock);
++}
++
++static inline unsigned long __raw_write_seqlock_irqsave(raw_seqlock_t *sl)
++{
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&sl->lock, flags);
++	write_seqcount_begin(&sl->seqcount);
++	return flags;
++}
++
++#define raw_write_seqlock_irqsave(lock, flags)				\
++	do { flags = __raw_write_seqlock_irqsave(lock); } while (0)
++
++static inline void
++raw_write_sequnlock_irqrestore(raw_seqlock_t *sl, unsigned long flags)
++{
++	write_seqcount_end(&sl->seqcount);
++	raw_spin_unlock_irqrestore(&sl->lock, flags);
++}
++
++static inline void write_seqlock(seqlock_t *sl)
++{
++	spin_lock(&sl->lock);
++	write_seqcount_begin(&sl->seqcount);
++}
++
++static inline void write_sequnlock(seqlock_t *sl)
++{
++	write_seqcount_end(&sl->seqcount);
++	spin_unlock(&sl->lock);
++}
++
++static inline void write_seqlock_bh(seqlock_t *sl)
++{
++	spin_lock_bh(&sl->lock);
++	write_seqcount_begin(&sl->seqcount);
++}
++
++static inline void write_sequnlock_bh(seqlock_t *sl)
++{
++	write_seqcount_end(&sl->seqcount);
++	spin_unlock_bh(&sl->lock);
++}
++
++static inline void write_seqlock_irq(seqlock_t *sl)
++{
++	spin_lock_irq(&sl->lock);
++	write_seqcount_begin(&sl->seqcount);
++}
++
++static inline void write_sequnlock_irq(seqlock_t *sl)
++{
++	write_seqcount_end(&sl->seqcount);
++	spin_unlock_irq(&sl->lock);
++}
++
++static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&sl->lock, flags);
++	write_seqcount_begin(&sl->seqcount);
++	return flags;
++}
++
+ #define write_seqlock_irqsave(lock, flags)				\
+-	do { local_irq_save(flags); write_seqlock(lock); } while (0)
+-#define write_seqlock_irq(lock)						\
+-	do { local_irq_disable();   write_seqlock(lock); } while (0)
+-#define write_seqlock_bh(lock)						\
+-        do { local_bh_disable();    write_seqlock(lock); } while (0)
+-
+-#define write_sequnlock_irqrestore(lock, flags)				\
+-	do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
+-#define write_sequnlock_irq(lock)					\
+-	do { write_sequnlock(lock); local_irq_enable(); } while(0)
+-#define write_sequnlock_bh(lock)					\
+-	do { write_sequnlock(lock); local_bh_enable(); } while(0)
+-
+-#define read_seqbegin_irqsave(lock, flags)				\
+-	({ local_irq_save(flags);   read_seqbegin(lock); })
+-
+-#define read_seqretry_irqrestore(lock, iv, flags)			\
+-	({								\
+-		int ret = read_seqretry(lock, iv);			\
+-		local_irq_restore(flags);				\
+-		ret;							\
+-	})
++	do { flags = __write_seqlock_irqsave(lock); } while (0)
++
++static inline void
++write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
++{
++	write_seqcount_end(&sl->seqcount);
++	spin_unlock_irqrestore(&sl->lock, flags);
++}
+ 
+ #endif /* __LINUX_SEQLOCK_H */
+Index: linux-3.2/arch/ia64/kernel/time.c
+===================================================================
+--- linux-3.2.orig/arch/ia64/kernel/time.c
++++ linux-3.2/arch/ia64/kernel/time.c
+@@ -36,7 +36,7 @@
+ static cycle_t itc_get_cycles(struct clocksource *cs);
+ 
+ struct fsyscall_gtod_data_t fsyscall_gtod_data = {
+-	.lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
++	.lock = __RAW_SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
+ };
+ 
+ struct itc_jitter_data_t itc_jitter_data;
+@@ -462,7 +462,7 @@ void update_vsyscall(struct timespec *wa
+ {
+         unsigned long flags;
+ 
+-        write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
++	raw_write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
+ 
+         /* copy fsyscall clock data */
+         fsyscall_gtod_data.clk_mask = c->mask;
+@@ -485,6 +485,6 @@ void update_vsyscall(struct timespec *wa
+ 		fsyscall_gtod_data.monotonic_time.tv_sec++;
+ 	}
+ 
+-        write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
++	raw_write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
+ }
+ 
+Index: linux-3.2/arch/x86/include/asm/vgtod.h
+===================================================================
+--- linux-3.2.orig/arch/x86/include/asm/vgtod.h
++++ linux-3.2/arch/x86/include/asm/vgtod.h
+@@ -5,7 +5,7 @@
+ #include <linux/clocksource.h>
+ 
+ struct vsyscall_gtod_data {
+-	seqlock_t	lock;
++	raw_seqlock_t	lock;
+ 
+ 	/* open coded 'struct timespec' */
+ 	time_t		wall_time_sec;
+Index: linux-3.2/arch/x86/kernel/vsyscall_64.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/vsyscall_64.c
++++ linux-3.2/arch/x86/kernel/vsyscall_64.c
+@@ -54,7 +54,7 @@
+ DEFINE_VVAR(int, vgetcpu_mode);
+ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
+ {
+-	.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
++	.lock = __RAW_SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
+ };
+ 
+ static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
+@@ -82,10 +82,10 @@ void update_vsyscall_tz(void)
+ {
+ 	unsigned long flags;
+ 
+-	write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
++	raw_write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ 	/* sys_tz has changed */
+ 	vsyscall_gtod_data.sys_tz = sys_tz;
+-	write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
++	raw_write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ }
+ 
+ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
+@@ -93,7 +93,7 @@ void update_vsyscall(struct timespec *wa
+ {
+ 	unsigned long flags;
+ 
+-	write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
++	raw_write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+ 
+ 	/* copy vsyscall data */
+ 	vsyscall_gtod_data.clock.vclock_mode	= clock->archdata.vclock_mode;
+@@ -106,7 +106,7 @@ void update_vsyscall(struct timespec *wa
+ 	vsyscall_gtod_data.wall_to_monotonic	= *wtm;
+ 	vsyscall_gtod_data.wall_time_coarse	= __current_kernel_time();
+ 
+-	write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
++	raw_write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ }
+ 
+ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+Index: linux-3.2/kernel/time/ntp.c
+===================================================================
+--- linux-3.2.orig/kernel/time/ntp.c
++++ linux-3.2/kernel/time/ntp.c
+@@ -358,7 +358,7 @@ static enum hrtimer_restart ntp_leap_sec
+ {
+ 	enum hrtimer_restart res = HRTIMER_NORESTART;
+ 
+-	write_seqlock(&xtime_lock);
++	raw_write_seqlock(&xtime_lock);
+ 
+ 	switch (time_state) {
+ 	case TIME_OK:
+@@ -388,7 +388,7 @@ static enum hrtimer_restart ntp_leap_sec
+ 		break;
+ 	}
+ 
+-	write_sequnlock(&xtime_lock);
++	raw_write_sequnlock(&xtime_lock);
+ 
+ 	return res;
+ }
+@@ -663,7 +663,7 @@ int do_adjtimex(struct timex *txc)
+ 
+ 	getnstimeofday(&ts);
+ 
+-	write_seqlock_irq(&xtime_lock);
++	raw_write_seqlock_irq(&xtime_lock);
+ 
+ 	if (txc->modes & ADJ_ADJTIME) {
+ 		long save_adjust = time_adjust;
+@@ -705,7 +705,7 @@ int do_adjtimex(struct timex *txc)
+ 	/* fill PPS status fields */
+ 	pps_fill_timex(txc);
+ 
+-	write_sequnlock_irq(&xtime_lock);
++	raw_write_sequnlock_irq(&xtime_lock);
+ 
+ 	txc->time.tv_sec = ts.tv_sec;
+ 	txc->time.tv_usec = ts.tv_nsec;
+@@ -903,7 +903,7 @@ void hardpps(const struct timespec *phas
+ 
+ 	pts_norm = pps_normalize_ts(*phase_ts);
+ 
+-	write_seqlock_irqsave(&xtime_lock, flags);
++	raw_write_seqlock_irqsave(&xtime_lock, flags);
+ 
+ 	/* clear the error bits, they will be set again if needed */
+ 	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
+@@ -916,7 +916,7 @@ void hardpps(const struct timespec *phas
+ 	 * just start the frequency interval */
+ 	if (unlikely(pps_fbase.tv_sec == 0)) {
+ 		pps_fbase = *raw_ts;
+-		write_sequnlock_irqrestore(&xtime_lock, flags);
++		raw_write_sequnlock_irqrestore(&xtime_lock, flags);
+ 		return;
+ 	}
+ 
+@@ -931,7 +931,7 @@ void hardpps(const struct timespec *phas
+ 		time_status |= STA_PPSJITTER;
+ 		/* restart the frequency calibration interval */
+ 		pps_fbase = *raw_ts;
+-		write_sequnlock_irqrestore(&xtime_lock, flags);
++		raw_write_sequnlock_irqrestore(&xtime_lock, flags);
+ 		pr_err("hardpps: PPSJITTER: bad pulse\n");
+ 		return;
+ 	}
+@@ -948,7 +948,7 @@ void hardpps(const struct timespec *phas
+ 
+ 	hardpps_update_phase(pts_norm.nsec);
+ 
+-	write_sequnlock_irqrestore(&xtime_lock, flags);
++	raw_write_sequnlock_irqrestore(&xtime_lock, flags);
+ }
+ EXPORT_SYMBOL(hardpps);
+ 
+Index: linux-3.2/kernel/time/tick-common.c
+===================================================================
+--- linux-3.2.orig/kernel/time/tick-common.c
++++ linux-3.2/kernel/time/tick-common.c
+@@ -63,13 +63,13 @@ int tick_is_oneshot_available(void)
+ static void tick_periodic(int cpu)
+ {
+ 	if (tick_do_timer_cpu == cpu) {
+-		write_seqlock(&xtime_lock);
++		raw_write_seqlock(&xtime_lock);
+ 
+ 		/* Keep track of the next tick event */
+ 		tick_next_period = ktime_add(tick_next_period, tick_period);
+ 
+ 		do_timer(1);
+-		write_sequnlock(&xtime_lock);
++		raw_write_sequnlock(&xtime_lock);
+ 	}
+ 
+ 	update_process_times(user_mode(get_irq_regs()));
+Index: linux-3.2/kernel/time/tick-internal.h
+===================================================================
+--- linux-3.2.orig/kernel/time/tick-internal.h
++++ linux-3.2/kernel/time/tick-internal.h
+@@ -141,4 +141,4 @@ static inline int tick_device_is_functio
+ #endif
+ 
+ extern void do_timer(unsigned long ticks);
+-extern seqlock_t xtime_lock;
++extern raw_seqlock_t xtime_lock;
+Index: linux-3.2/kernel/time/tick-sched.c
+===================================================================
+--- linux-3.2.orig/kernel/time/tick-sched.c
++++ linux-3.2/kernel/time/tick-sched.c
+@@ -56,7 +56,7 @@ static void tick_do_update_jiffies64(kti
+ 		return;
+ 
+ 	/* Reevalute with xtime_lock held */
+-	write_seqlock(&xtime_lock);
++	raw_write_seqlock(&xtime_lock);
+ 
+ 	delta = ktime_sub(now, last_jiffies_update);
+ 	if (delta.tv64 >= tick_period.tv64) {
+@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(kti
+ 		/* Keep the tick_next_period variable up to date */
+ 		tick_next_period = ktime_add(last_jiffies_update, tick_period);
+ 	}
+-	write_sequnlock(&xtime_lock);
++	raw_write_sequnlock(&xtime_lock);
+ }
+ 
+ /*
+@@ -89,12 +89,12 @@ static ktime_t tick_init_jiffy_update(vo
+ {
+ 	ktime_t period;
+ 
+-	write_seqlock(&xtime_lock);
++	raw_write_seqlock(&xtime_lock);
+ 	/* Did we start the jiffies update yet ? */
+ 	if (last_jiffies_update.tv64 == 0)
+ 		last_jiffies_update = tick_next_period;
+ 	period = last_jiffies_update;
+-	write_sequnlock(&xtime_lock);
++	raw_write_sequnlock(&xtime_lock);
+ 	return period;
+ }
+ 
+@@ -332,13 +332,7 @@ void tick_nohz_stop_sched_tick(int inidl
+ 		goto end;
+ 
+ 	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+-		static int ratelimit;
+-
+-		if (ratelimit < 10) {
+-			printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+-			       (unsigned int) local_softirq_pending());
+-			ratelimit++;
+-		}
++		softirq_check_pending_idle();
+ 		goto end;
+ 	}
+ 
+@@ -798,6 +792,7 @@ void tick_setup_sched_timer(void)
+ 	 * Emulate tick processing via per-CPU hrtimers:
+ 	 */
+ 	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++	ts->sched_timer.irqsafe = 1;
+ 	ts->sched_timer.function = tick_sched_timer;
+ 
+ 	/* Get the next period (per cpu) */
+Index: linux-3.2/kernel/time/timekeeping.c
+===================================================================
+--- linux-3.2.orig/kernel/time/timekeeping.c
++++ linux-3.2/kernel/time/timekeeping.c
+@@ -139,8 +139,7 @@ static inline s64 timekeeping_get_ns_raw
+  * This read-write spinlock protects us from races in SMP while
+  * playing with xtime.
+  */
+-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
+-
++__cacheline_aligned_in_smp DEFINE_RAW_SEQLOCK(xtime_lock);
+ 
+ /*
+  * The current time
+@@ -361,7 +360,7 @@ int do_settimeofday(const struct timespe
+ 	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ 		return -EINVAL;
+ 
+-	write_seqlock_irqsave(&xtime_lock, flags);
++	raw_write_seqlock_irqsave(&xtime_lock, flags);
+ 
+ 	timekeeping_forward_now();
+ 
+@@ -377,7 +376,7 @@ int do_settimeofday(const struct timespe
+ 	update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+ 				timekeeper.mult);
+ 
+-	write_sequnlock_irqrestore(&xtime_lock, flags);
++	raw_write_sequnlock_irqrestore(&xtime_lock, flags);
+ 
+ 	/* signal hrtimers about time change */
+ 	clock_was_set();
+@@ -401,7 +400,7 @@ int timekeeping_inject_offset(struct tim
+ 	if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ 		return -EINVAL;
+ 
+-	write_seqlock_irqsave(&xtime_lock, flags);
++	raw_write_seqlock_irqsave(&xtime_lock, flags);
+ 
+ 	timekeeping_forward_now();
+ 
+@@ -414,7 +413,7 @@ int timekeeping_inject_offset(struct tim
+ 	update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+ 				timekeeper.mult);
+ 
+-	write_sequnlock_irqrestore(&xtime_lock, flags);
++	raw_write_sequnlock_irqrestore(&xtime_lock, flags);
+ 
+ 	/* signal hrtimers about time change */
+ 	clock_was_set();
+@@ -568,7 +567,7 @@ void __init timekeeping_init(void)
+ 	read_persistent_clock(&now);
+ 	read_boot_clock(&boot);
+ 
+-	write_seqlock_irqsave(&xtime_lock, flags);
++	raw_write_seqlock_irqsave(&xtime_lock, flags);
+ 
+ 	ntp_init();
+ 
+@@ -589,7 +588,7 @@ void __init timekeeping_init(void)
+ 				-boot.tv_sec, -boot.tv_nsec);
+ 	total_sleep_time.tv_sec = 0;
+ 	total_sleep_time.tv_nsec = 0;
+-	write_sequnlock_irqrestore(&xtime_lock, flags);
++	raw_write_sequnlock_irqrestore(&xtime_lock, flags);
+ }
+ 
+ /* time in seconds when suspend began */
+@@ -636,7 +635,7 @@ void timekeeping_inject_sleeptime(struct
+ 	if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
+ 		return;
+ 
+-	write_seqlock_irqsave(&xtime_lock, flags);
++	raw_write_seqlock_irqsave(&xtime_lock, flags);
+ 	timekeeping_forward_now();
+ 
+ 	__timekeeping_inject_sleeptime(delta);
+@@ -646,7 +645,7 @@ void timekeeping_inject_sleeptime(struct
+ 	update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+ 				timekeeper.mult);
+ 
+-	write_sequnlock_irqrestore(&xtime_lock, flags);
++	raw_write_sequnlock_irqrestore(&xtime_lock, flags);
+ 
+ 	/* signal hrtimers about time change */
+ 	clock_was_set();
+@@ -669,7 +668,7 @@ static void timekeeping_resume(void)
+ 
+ 	clocksource_resume();
+ 
+-	write_seqlock_irqsave(&xtime_lock, flags);
++	raw_write_seqlock_irqsave(&xtime_lock, flags);
+ 
+ 	if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
+ 		ts = timespec_sub(ts, timekeeping_suspend_time);
+@@ -679,7 +678,7 @@ static void timekeeping_resume(void)
+ 	timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
+ 	timekeeper.ntp_error = 0;
+ 	timekeeping_suspended = 0;
+-	write_sequnlock_irqrestore(&xtime_lock, flags);
++	raw_write_sequnlock_irqrestore(&xtime_lock, flags);
+ 
+ 	touch_softlockup_watchdog();
+ 
+@@ -697,7 +696,7 @@ static int timekeeping_suspend(void)
+ 
+ 	read_persistent_clock(&timekeeping_suspend_time);
+ 
+-	write_seqlock_irqsave(&xtime_lock, flags);
++	raw_write_seqlock_irqsave(&xtime_lock, flags);
+ 	timekeeping_forward_now();
+ 	timekeeping_suspended = 1;
+ 
+@@ -720,7 +719,7 @@ static int timekeeping_suspend(void)
+ 		timekeeping_suspend_time =
+ 			timespec_add(timekeeping_suspend_time, delta_delta);
+ 	}
+-	write_sequnlock_irqrestore(&xtime_lock, flags);
++	raw_write_sequnlock_irqrestore(&xtime_lock, flags);
+ 
+ 	clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
+ 	clocksource_suspend();
+@@ -1149,7 +1148,7 @@ ktime_t ktime_get_monotonic_offset(void)
+  */
+ void xtime_update(unsigned long ticks)
+ {
+-	write_seqlock(&xtime_lock);
++	raw_write_seqlock(&xtime_lock);
+ 	do_timer(ticks);
+-	write_sequnlock(&xtime_lock);
++	raw_write_sequnlock(&xtime_lock);
+ }
+Index: linux-3.2/Documentation/trace/histograms.txt
+===================================================================
+--- /dev/null
++++ linux-3.2/Documentation/trace/histograms.txt
+@@ -0,0 +1,186 @@
++		Using the Linux Kernel Latency Histograms
++
++
++This document gives a short explanation how to enable, configure and use
++latency histograms. Latency histograms are primarily relevant in the
++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
++and are used in the quality management of the Linux real-time
++capabilities.
++
++
++* Purpose of latency histograms
++
++A latency histogram continuously accumulates the frequencies of latency
++data. There are two types of histograms
++- potential sources of latencies
++- effective latencies
++
++
++* Potential sources of latencies
++
++Potential sources of latencies are code segments where interrupts,
++preemption or both are disabled (aka critical sections). To create
++histograms of potential sources of latency, the kernel stores the time
++stamp at the start of a critical section, determines the time elapsed
++when the end of the section is reached, and increments the frequency
++counter of that latency value - irrespective of whether any concurrently
++running process is affected by latency or not.
++- Configuration items (in the Kernel hacking/Tracers submenu)
++  CONFIG_INTERRUPT_OFF_LATENCY
++  CONFIG_PREEMPT_OFF_LATENCY
++
++
++* Effective latencies
++
++Effective latencies are actually occuring during wakeup of a process. To
++determine effective latencies, the kernel stores the time stamp when a
++process is scheduled to be woken up, and determines the duration of the
++wakeup time shortly before control is passed over to this process. Note
++that the apparent latency in user space may be somewhat longer, since the
++process may be interrupted after control is passed over to it but before
++the execution in user space takes place. Simply measuring the interval
++between enqueuing and wakeup may also not appropriate in cases when a
++process is scheduled as a result of a timer expiration. The timer may have
++missed its deadline, e.g. due to disabled interrupts, but this latency
++would not be registered. Therefore, the offsets of missed timers are
++recorded in a separate histogram. If both wakeup latency and missed timer
++offsets are configured and enabled, a third histogram may be enabled that
++records the overall latency as a sum of the timer latency, if any, and the
++wakeup latency. This histogram is called "timerandwakeup".
++- Configuration items (in the Kernel hacking/Tracers submenu)
++  CONFIG_WAKEUP_LATENCY
++  CONFIG_MISSED_TIMER_OFSETS
++
++
++* Usage
++
++The interface to the administration of the latency histograms is located
++in the debugfs file system. To mount it, either enter
++
++mount -t sysfs nodev /sys
++mount -t debugfs nodev /sys/kernel/debug
++
++from shell command line level, or add
++
++nodev	/sys			sysfs	defaults	0 0
++nodev	/sys/kernel/debug	debugfs	defaults	0 0
++
++to the file /etc/fstab. All latency histogram related files are then
++available in the directory /sys/kernel/debug/tracing/latency_hist. A
++particular histogram type is enabled by writing non-zero to the related
++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
++Select "preemptirqsoff" for the histograms of potential sources of
++latencies and "wakeup" for histograms of effective latencies etc. The
++histogram data - one per CPU - are available in the files
++
++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
++
++The histograms are reset by writing non-zero to the file "reset" in a
++particular latency directory. To reset all latency data, use
++
++#!/bin/sh
++
++TRACINGDIR=/sys/kernel/debug/tracing
++HISTDIR=$TRACINGDIR/latency_hist
++
++if test -d $HISTDIR
++then
++  cd $HISTDIR
++  for i in `find . | grep /reset$`
++  do
++    echo 1 >$i
++  done
++fi
++
++
++* Data format
++
++Latency data are stored with a resolution of one microsecond. The
++maximum latency is 10,240 microseconds. The data are only valid, if the
++overflow register is empty. Every output line contains the latency in
++microseconds in the first row and the number of samples in the second
++row. To display only lines with a positive latency count, use, for
++example,
++
++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
++
++#Minimum latency: 0 microseconds.
++#Average latency: 0 microseconds.
++#Maximum latency: 25 microseconds.
++#Total samples: 3104770694
++#There are 0 samples greater or equal than 10240 microseconds
++#usecs	         samples
++    0	      2984486876
++    1	        49843506
++    2	        58219047
++    3	         5348126
++    4	         2187960
++    5	         3388262
++    6	          959289
++    7	          208294
++    8	           40420
++    9	            4485
++   10	           14918
++   11	           18340
++   12	           25052
++   13	           19455
++   14	            5602
++   15	             969
++   16	              47
++   17	              18
++   18	              14
++   19	               1
++   20	               3
++   21	               2
++   22	               5
++   23	               2
++   25	               1
++
++
++* Wakeup latency of a selected process
++
++To only collect wakeup latency data of a particular process, write the
++PID of the requested process to
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/pid
++
++PIDs are not considered, if this variable is set to 0.
++
++
++* Details of the process with the highest wakeup latency so far
++
++Selected data of the process that suffered from the highest wakeup
++latency that occurred in a particular CPU are available in the file
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
++
++In addition, other relevant system data at the time when the
++latency occurred are given.
++
++The format of the data is (all in one line):
++<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
++<- <PID> <Priority> <Command> <Timestamp>
++
++The value of <Timeroffset> is only relevant in the combined timer
++and wakeup latency recording. In the wakeup recording, it is
++always 0, in the missed_timer_offsets recording, it is the same
++as <Latency>.
++
++When retrospectively searching for the origin of a latency and
++tracing was not enabled, it may be helpful to know the name and
++some basic data of the task that (finally) was switching to the
++late real-tlme task. In addition to the victim's data, also the
++data of the possible culprit are therefore displayed after the
++"<-" symbol.
++
++Finally, the timestamp of the time when the latency occurred
++in <seconds>.<microseconds> after the most recent system boot
++is provided.
++
++These data are also reset when the wakeup histogram is reset.
+Index: linux-3.2/include/trace/events/hist.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/trace/events/hist.h
+@@ -0,0 +1,69 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM hist
++
++#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_HIST_H
++
++#include "latency_hist.h"
++#include <linux/tracepoint.h>
++
++#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
++#define trace_preemptirqsoff_hist(a,b)
++#else
++TRACE_EVENT(preemptirqsoff_hist,
++
++	TP_PROTO(int reason, int starthist),
++
++	TP_ARGS(reason, starthist),
++
++	TP_STRUCT__entry(
++		__field(int,	reason	)
++		__field(int,	starthist	)
++	),
++
++	TP_fast_assign(
++		__entry->reason		= reason;
++		__entry->starthist	= starthist;
++	),
++
++	TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
++		  __entry->starthist ? "start" : "stop")
++);
++#endif
++
++#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
++#define trace_hrtimer_interrupt(a,b,c,d)
++#else
++TRACE_EVENT(hrtimer_interrupt,
++
++	TP_PROTO(int cpu, long long offset, struct task_struct *curr, struct task_struct *task),
++
++	TP_ARGS(cpu, offset, curr, task),
++
++	TP_STRUCT__entry(
++		__field(int,		cpu	)
++		__field(long long,	offset	)
++		__array(char,		ccomm,	TASK_COMM_LEN)
++		__field(int,		cprio	)
++		__array(char,		tcomm,	TASK_COMM_LEN)
++		__field(int,		tprio	)
++	),
++
++	TP_fast_assign(
++		__entry->cpu	= cpu;
++		__entry->offset	= offset;
++		memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
++		__entry->cprio  = curr->prio;
++		memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>", task != NULL ? TASK_COMM_LEN : 7);
++		__entry->tprio  = task != NULL ? task->prio : -1;
++	),
++
++	TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
++		__entry->cpu, __entry->offset, __entry->ccomm, __entry->cprio, __entry->tcomm, __entry->tprio)
++);
++#endif
++
++#endif /* _TRACE_HIST_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+Index: linux-3.2/include/trace/events/latency_hist.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/trace/events/latency_hist.h
+@@ -0,0 +1,30 @@
++#ifndef _LATENCY_HIST_H
++#define _LATENCY_HIST_H
++
++enum hist_action {
++	IRQS_ON,
++	PREEMPT_ON,
++	TRACE_STOP,
++	IRQS_OFF,
++	PREEMPT_OFF,
++	TRACE_START,
++};
++
++static char *actions[] = {
++	"IRQS_ON",
++	"PREEMPT_ON",
++	"TRACE_STOP",
++	"IRQS_OFF",
++	"PREEMPT_OFF",
++	"TRACE_START",
++};
++
++static inline char *getaction(int action)
++{
++	if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
++		return(actions[action]);
++	return("unknown");
++}
++
++#endif /* _LATENCY_HIST_H */
++
+Index: linux-3.2/kernel/hrtimer.c
+===================================================================
+--- linux-3.2.orig/kernel/hrtimer.c
++++ linux-3.2/kernel/hrtimer.c
+@@ -49,6 +49,7 @@
+ #include <asm/uaccess.h>
+ 
+ #include <trace/events/timer.h>
++#include <trace/events/hist.h>
+ 
+ /*
+  * The timer bases:
+@@ -588,8 +589,7 @@ static int hrtimer_reprogram(struct hrti
+ 	 * When the callback is running, we do not reprogram the clock event
+ 	 * device. The timer callback is either running on a different CPU or
+ 	 * the callback is executed in the hrtimer_interrupt context. The
+-	 * reprogramming is handled either by the softirq, which called the
+-	 * callback or at the end of the hrtimer_interrupt.
++	 * reprogramming is handled at the end of the hrtimer_interrupt.
+ 	 */
+ 	if (hrtimer_callback_running(timer))
+ 		return 0;
+@@ -624,6 +624,9 @@ static int hrtimer_reprogram(struct hrti
+ 	return res;
+ }
+ 
++static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
++static int hrtimer_rt_defer(struct hrtimer *timer);
++
+ /*
+  * Initialize the high resolution related parts of cpu_base
+  */
+@@ -644,14 +647,23 @@ static inline int hrtimer_enqueue_reprog
+ 					    int wakeup)
+ {
+ 	if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+-		if (wakeup) {
+-			raw_spin_unlock(&base->cpu_base->lock);
+-			raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+-			raw_spin_lock(&base->cpu_base->lock);
+-		} else
+-			__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++		if (!wakeup)
++			return -ETIME;
+ 
+-		return 1;
++#ifdef CONFIG_PREEMPT_RT_BASE
++		/*
++		 * Move softirq based timers away from the rbtree in
++		 * case it expired already. Otherwise we would have a
++		 * stale base->first entry until the softirq runs.
++		 */
++		if (!hrtimer_rt_defer(timer))
++			return -ETIME;
++#endif
++		raw_spin_unlock(&base->cpu_base->lock);
++		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++		raw_spin_lock(&base->cpu_base->lock);
++
++		return 0;
+ 	}
+ 
+ 	return 0;
+@@ -732,6 +744,11 @@ static inline int hrtimer_enqueue_reprog
+ }
+ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
+ static inline void retrigger_next_event(void *arg) { }
++static inline int hrtimer_reprogram(struct hrtimer *timer,
++				    struct hrtimer_clock_base *base)
++{
++	return 0;
++}
+ 
+ #endif /* CONFIG_HIGH_RES_TIMERS */
+ 
+@@ -846,6 +863,32 @@ u64 hrtimer_forward(struct hrtimer *time
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_forward);
+ 
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define wake_up_timer_waiters(b)	wake_up(&(b)->wait)
++
++/**
++ * hrtimer_wait_for_timer - Wait for a running timer
++ *
++ * @timer:	timer to wait for
++ *
++ * The function waits in case the timers callback function is
++ * currently executed on the waitqueue of the timer base. The
++ * waitqueue is woken up after the timer callback function has
++ * finished execution.
++ */
++void hrtimer_wait_for_timer(const struct hrtimer *timer)
++{
++	struct hrtimer_clock_base *base = timer->base;
++
++	if (base && base->cpu_base && !timer->irqsafe)
++		wait_event(base->cpu_base->wait,
++			   !(timer->state & HRTIMER_STATE_CALLBACK));
++}
++
++#else
++# define wake_up_timer_waiters(b)	do { } while (0)
++#endif
++
+ /*
+  * enqueue_hrtimer - internal function to (re)start a timer
+  *
+@@ -888,6 +931,11 @@ static void __remove_hrtimer(struct hrti
+ 	if (!(timer->state & HRTIMER_STATE_ENQUEUED))
+ 		goto out;
+ 
++	if (unlikely(!list_empty(&timer->cb_entry))) {
++		list_del_init(&timer->cb_entry);
++		goto out;
++	}
++
+ 	if (&timer->node == timerqueue_getnext(&base->active)) {
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ 		/* Reprogram the clock event device. if enabled */
+@@ -983,8 +1031,20 @@ int __hrtimer_start_range_ns(struct hrti
+ 	 *
+ 	 * XXX send_remote_softirq() ?
+ 	 */
+-	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
+-		hrtimer_enqueue_reprogram(timer, new_base, wakeup);
++	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) {
++		ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup);
++		if (ret) {
++			/*
++			 * In case we failed to reprogram the timer (mostly
++			 * because out current timer is already elapsed),
++			 * remove it again and report a failure. This avoids
++			 * stale base->first entries.
++			 */
++			debug_deactivate(timer);
++			__remove_hrtimer(timer, new_base,
++					timer->state & HRTIMER_STATE_CALLBACK, 0);
++		}
++	}
+ 
+ 	unlock_hrtimer_base(timer, &flags);
+ 
+@@ -1070,7 +1130,7 @@ int hrtimer_cancel(struct hrtimer *timer
+ 
+ 		if (ret >= 0)
+ 			return ret;
+-		cpu_relax();
++		hrtimer_wait_for_timer(timer);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_cancel);
+@@ -1149,6 +1209,7 @@ static void __hrtimer_init(struct hrtime
+ 
+ 	base = hrtimer_clockid_to_base(clock_id);
+ 	timer->base = &cpu_base->clock_base[base];
++	INIT_LIST_HEAD(&timer->cb_entry);
+ 	timerqueue_init(&timer->node);
+ 
+ #ifdef CONFIG_TIMER_STATS
+@@ -1232,6 +1293,122 @@ static void __run_hrtimer(struct hrtimer
+ 	timer->state &= ~HRTIMER_STATE_CALLBACK;
+ }
+ 
++static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
++				 struct hrtimer_clock_base *base)
++{
++	/*
++	 * Note, we clear the callback flag before we requeue the
++	 * timer otherwise we trigger the callback_running() check
++	 * in hrtimer_reprogram().
++	 */
++	timer->state &= ~HRTIMER_STATE_CALLBACK;
++
++	if (restart != HRTIMER_NORESTART) {
++		BUG_ON(hrtimer_active(timer));
++		/*
++		 * Enqueue the timer, if it's the leftmost timer then
++		 * we need to reprogram it.
++		 */
++		if (!enqueue_hrtimer(timer, base))
++			return;
++
++#ifndef CONFIG_HIGH_RES_TIMERS
++	}
++#else
++		if (base->cpu_base->hres_active &&
++		    hrtimer_reprogram(timer, base))
++			goto requeue;
++
++	} else if (hrtimer_active(timer)) {
++		/*
++		 * If the timer was rearmed on another CPU, reprogram
++		 * the event device.
++		 */
++		if (&timer->node == base->active.next &&
++		    base->cpu_base->hres_active &&
++		    hrtimer_reprogram(timer, base))
++			goto requeue;
++	}
++	return;
++
++requeue:
++	/*
++	 * Timer is expired. Thus move it from tree to pending list
++	 * again.
++	 */
++	__remove_hrtimer(timer, base, timer->state, 0);
++	list_add_tail(&timer->cb_entry, &base->expired);
++#endif
++}
++
++/*
++ * The changes in mainline which removed the callback modes from
++ * hrtimer are not yet working with -rt. The non wakeup_process()
++ * based callbacks which involve sleeping locks need to be treated
++ * seperately.
++ */
++static void hrtimer_rt_run_pending(void)
++{
++	enum hrtimer_restart (*fn)(struct hrtimer *);
++	struct hrtimer_cpu_base *cpu_base;
++	struct hrtimer_clock_base *base;
++	struct hrtimer *timer;
++	int index, restart;
++
++	local_irq_disable();
++	cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
++
++	raw_spin_lock(&cpu_base->lock);
++
++	for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
++		base = &cpu_base->clock_base[index];
++
++		while (!list_empty(&base->expired)) {
++			timer = list_first_entry(&base->expired,
++						 struct hrtimer, cb_entry);
++
++			/*
++			 * Same as the above __run_hrtimer function
++			 * just we run with interrupts enabled.
++			 */
++			debug_hrtimer_deactivate(timer);
++			__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
++			timer_stats_account_hrtimer(timer);
++			fn = timer->function;
++
++			raw_spin_unlock_irq(&cpu_base->lock);
++			restart = fn(timer);
++			raw_spin_lock_irq(&cpu_base->lock);
++
++			hrtimer_rt_reprogram(restart, timer, base);
++		}
++	}
++
++	raw_spin_unlock_irq(&cpu_base->lock);
++
++	wake_up_timer_waiters(cpu_base);
++}
++
++static int hrtimer_rt_defer(struct hrtimer *timer)
++{
++	if (timer->irqsafe)
++		return 0;
++
++	__remove_hrtimer(timer, timer->base, timer->state, 0);
++	list_add_tail(&timer->cb_entry, &timer->base->expired);
++	return 1;
++}
++
++#else
++
++static inline void hrtimer_rt_run_pending(void) { }
++static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
++
++#endif
++
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ 
+ /*
+@@ -1242,7 +1419,7 @@ void hrtimer_interrupt(struct clock_even
+ {
+ 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ 	ktime_t expires_next, now, entry_time, delta;
+-	int i, retries = 0;
++	int i, retries = 0, raise = 0;
+ 
+ 	BUG_ON(!cpu_base->hres_active);
+ 	cpu_base->nr_events++;
+@@ -1278,6 +1455,14 @@ retry:
+ 
+ 			timer = container_of(node, struct hrtimer, node);
+ 
++			trace_hrtimer_interrupt(raw_smp_processor_id(),
++			    ktime_to_ns(ktime_sub(
++				hrtimer_get_expires(timer), basenow)),
++			    current,
++			    timer->function == hrtimer_wakeup ?
++			    container_of(timer, struct hrtimer_sleeper,
++				timer)->task : NULL);
++
+ 			/*
+ 			 * The immediate goal for using the softexpires is
+ 			 * minimizing wakeups, not running timers at the
+@@ -1301,7 +1486,10 @@ retry:
+ 				break;
+ 			}
+ 
+-			__run_hrtimer(timer, &basenow);
++			if (!hrtimer_rt_defer(timer))
++				__run_hrtimer(timer, &basenow);
++			else
++				raise = 1;
+ 		}
+ 	}
+ 
+@@ -1316,6 +1504,10 @@ retry:
+ 	if (expires_next.tv64 == KTIME_MAX ||
+ 	    !tick_program_event(expires_next, 0)) {
+ 		cpu_base->hang_detected = 0;
++
++		if (raise)
++			raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++
+ 		return;
+ 	}
+ 
+@@ -1391,17 +1583,17 @@ void hrtimer_peek_ahead_timers(void)
+ 	local_irq_restore(flags);
+ }
+ 
+-static void run_hrtimer_softirq(struct softirq_action *h)
+-{
+-	hrtimer_peek_ahead_timers();
+-}
+-
+ #else /* CONFIG_HIGH_RES_TIMERS */
+ 
+ static inline void __hrtimer_peek_ahead_timers(void) { }
+ 
+ #endif	/* !CONFIG_HIGH_RES_TIMERS */
+ 
++static void run_hrtimer_softirq(struct softirq_action *h)
++{
++	hrtimer_rt_run_pending();
++}
++
+ /*
+  * Called from timer softirq every jiffy, expire hrtimers:
+  *
+@@ -1434,7 +1626,7 @@ void hrtimer_run_queues(void)
+ 	struct timerqueue_node *node;
+ 	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ 	struct hrtimer_clock_base *base;
+-	int index, gettime = 1;
++	int index, gettime = 1, raise = 0;
+ 
+ 	if (hrtimer_hres_active())
+ 		return;
+@@ -1459,10 +1651,16 @@ void hrtimer_run_queues(void)
+ 					hrtimer_get_expires_tv64(timer))
+ 				break;
+ 
+-			__run_hrtimer(timer, &base->softirq_time);
++			if (!hrtimer_rt_defer(timer))
++				__run_hrtimer(timer, &base->softirq_time);
++			else
++				raise = 1;
+ 		}
+ 		raw_spin_unlock(&cpu_base->lock);
+ 	}
++
++	if (raise)
++		raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
+ 
+ /*
+@@ -1484,6 +1682,7 @@ static enum hrtimer_restart hrtimer_wake
+ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
+ {
+ 	sl->timer.function = hrtimer_wakeup;
++	sl->timer.irqsafe = 1;
+ 	sl->task = task;
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
+@@ -1622,9 +1821,13 @@ static void __cpuinit init_hrtimers_cpu(
+ 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+ 		cpu_base->clock_base[i].cpu_base = cpu_base;
+ 		timerqueue_init_head(&cpu_base->clock_base[i].active);
++		INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
+ 	}
+ 
+ 	hrtimer_init_hres(cpu_base);
++#ifdef CONFIG_PREEMPT_RT_BASE
++	init_waitqueue_head(&cpu_base->wait);
++#endif
+ }
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -1737,9 +1940,7 @@ void __init hrtimers_init(void)
+ 	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
+ 			  (void *)(long)smp_processor_id());
+ 	register_cpu_notifier(&hrtimers_nb);
+-#ifdef CONFIG_HIGH_RES_TIMERS
+ 	open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
+-#endif
+ }
+ 
+ /**
+Index: linux-3.2/kernel/trace/Kconfig
+===================================================================
+--- linux-3.2.orig/kernel/trace/Kconfig
++++ linux-3.2/kernel/trace/Kconfig
+@@ -192,6 +192,24 @@ config IRQSOFF_TRACER
+ 	  enabled. This option and the preempt-off timing option can be
+ 	  used together or separately.)
+ 
++config INTERRUPT_OFF_HIST
++	bool "Interrupts-off Latency Histogram"
++	depends on IRQSOFF_TRACER
++	help
++	  This option generates continuously updated histograms (one per cpu)
++	  of the duration of time periods with interrupts disabled. The
++	  histograms are disabled by default. To enable them, write a non-zero
++	  number to
++
++	      /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
++
++	  If PREEMPT_OFF_HIST is also selected, additional histograms (one
++	  per cpu) are generated that accumulate the duration of time periods
++	  when both interrupts and preemption are disabled. The histogram data
++	  will be located in the debug file system at
++
++	      /sys/kernel/debug/tracing/latency_hist/irqsoff
++
+ config PREEMPT_TRACER
+ 	bool "Preemption-off Latency Tracer"
+ 	default n
+@@ -214,6 +232,24 @@ config PREEMPT_TRACER
+ 	  enabled. This option and the irqs-off timing option can be
+ 	  used together or separately.)
+ 
++config PREEMPT_OFF_HIST
++	bool "Preemption-off Latency Histogram"
++	depends on PREEMPT_TRACER
++	help
++	  This option generates continuously updated histograms (one per cpu)
++	  of the duration of time periods with preemption disabled. The
++	  histograms are disabled by default. To enable them, write a non-zero
++	  number to
++
++	      /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
++
++	  If INTERRUPT_OFF_HIST is also selected, additional histograms (one
++	  per cpu) are generated that accumulate the duration of time periods
++	  when both interrupts and preemption are disabled. The histogram data
++	  will be located in the debug file system at
++
++	      /sys/kernel/debug/tracing/latency_hist/preemptoff
++
+ config SCHED_TRACER
+ 	bool "Scheduling Latency Tracer"
+ 	select GENERIC_TRACER
+@@ -223,6 +259,74 @@ config SCHED_TRACER
+ 	  This tracer tracks the latency of the highest priority task
+ 	  to be scheduled in, starting from the point it has woken up.
+ 
++config WAKEUP_LATENCY_HIST
++	bool "Scheduling Latency Histogram"
++	depends on SCHED_TRACER
++	help
++	  This option generates continuously updated histograms (one per cpu)
++	  of the scheduling latency of the highest priority task.
++	  The histograms are disabled by default. To enable them, write a
++	  non-zero number to
++
++	      /sys/kernel/debug/tracing/latency_hist/enable/wakeup
++
++	  Two different algorithms are used, one to determine the latency of
++	  processes that exclusively use the highest priority of the system and
++	  another one to determine the latency of processes that share the
++	  highest system priority with other processes. The former is used to
++	  improve hardware and system software, the latter to optimize the
++	  priority design of a given system. The histogram data will be
++	  located in the debug file system at
++
++	      /sys/kernel/debug/tracing/latency_hist/wakeup
++
++	  and
++
++	      /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
++
++	  If both Scheduling Latency Histogram and Missed Timer Offsets
++	  Histogram are selected, additional histogram data will be collected
++	  that contain, in addition to the wakeup latency, the timer latency, in
++	  case the wakeup was triggered by an expired timer. These histograms
++	  are available in the
++
++	      /sys/kernel/debug/tracing/latency_hist/timerandwakeup
++
++	  directory. They reflect the apparent interrupt and scheduling latency
++	  and are best suitable to determine the worst-case latency of a given
++	  system. To enable these histograms, write a non-zero number to
++
++	      /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
++
++config MISSED_TIMER_OFFSETS_HIST
++	depends on HIGH_RES_TIMERS
++	select GENERIC_TRACER
++	bool "Missed Timer Offsets Histogram"
++	help
++	  Generate a histogram of missed timer offsets in microseconds. The
++	  histograms are disabled by default. To enable them, write a non-zero
++	  number to
++
++	      /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
++
++	  The histogram data will be located in the debug file system at
++
++	      /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
++
++	  If both Scheduling Latency Histogram and Missed Timer Offsets
++	  Histogram are selected, additional histogram data will be collected
++	  that contain, in addition to the wakeup latency, the timer latency, in
++	  case the wakeup was triggered by an expired timer. These histograms
++	  are available in the
++
++	      /sys/kernel/debug/tracing/latency_hist/timerandwakeup
++
++	  directory. They reflect the apparent interrupt and scheduling latency
++	  and are best suitable to determine the worst-case latency of a given
++	  system. To enable these histograms, write a non-zero number to
++
++	      /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
++
+ config ENABLE_DEFAULT_TRACERS
+ 	bool "Trace process context switches and events"
+ 	depends on !GENERIC_TRACER
+Index: linux-3.2/kernel/trace/Makefile
+===================================================================
+--- linux-3.2.orig/kernel/trace/Makefile
++++ linux-3.2/kernel/trace/Makefile
+@@ -36,6 +36,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f
+ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
+ obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
+ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
++obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
++obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
++obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
++obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
+ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
+ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
+ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
+Index: linux-3.2/kernel/trace/latency_hist.c
+===================================================================
+--- /dev/null
++++ linux-3.2/kernel/trace/latency_hist.c
+@@ -0,0 +1,1170 @@
++/*
++ * kernel/trace/latency_hist.c
++ *
++ * Add support for histograms of preemption-off latency and
++ * interrupt-off latency and wakeup latency, it depends on
++ * Real-Time Preemption Support.
++ *
++ *  Copyright (C) 2005 MontaVista Software, Inc.
++ *  Yi Yang <yyang at ch.mvista.com>
++ *
++ *  Converted to work with the new latency tracer.
++ *  Copyright (C) 2008 Red Hat, Inc.
++ *    Steven Rostedt <srostedt at redhat.com>
++ *
++ */
++#include <linux/module.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++#include <linux/percpu.h>
++#include <linux/kallsyms.h>
++#include <linux/uaccess.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <asm/atomic.h>
++#include <asm/div64.h>
++
++#include "trace.h"
++#include <trace/events/sched.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/hist.h>
++
++enum {
++	IRQSOFF_LATENCY = 0,
++	PREEMPTOFF_LATENCY,
++	PREEMPTIRQSOFF_LATENCY,
++	WAKEUP_LATENCY,
++	WAKEUP_LATENCY_SHAREDPRIO,
++	MISSED_TIMER_OFFSETS,
++	TIMERANDWAKEUP_LATENCY,
++	MAX_LATENCY_TYPE,
++};
++
++#define MAX_ENTRY_NUM 10240
++
++struct hist_data {
++	atomic_t hist_mode; /* 0 log, 1 don't log */
++	long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
++	unsigned long min_lat;
++	unsigned long max_lat;
++	unsigned long long below_hist_bound_samples;
++	unsigned long long above_hist_bound_samples;
++	unsigned long long accumulate_lat;
++	unsigned long long total_samples;
++	unsigned long long hist_array[MAX_ENTRY_NUM];
++};
++
++struct enable_data {
++	int latency_type;
++	int enabled;
++};
++
++static char *latency_hist_dir_root = "latency_hist";
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
++static char *irqsoff_hist_dir = "irqsoff";
++static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
++static DEFINE_PER_CPU(int, hist_irqsoff_counting);
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
++static char *preemptoff_hist_dir = "preemptoff";
++static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
++static DEFINE_PER_CPU(int, hist_preemptoff_counting);
++#endif
++
++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
++static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
++static char *preemptirqsoff_hist_dir = "preemptirqsoff";
++static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
++static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
++#endif
++
++#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
++static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
++static struct enable_data preemptirqsoff_enabled_data = {
++	.latency_type = PREEMPTIRQSOFF_LATENCY,
++	.enabled = 0,
++};
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++struct maxlatproc_data {
++	char comm[FIELD_SIZEOF(struct task_struct, comm)];
++	char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
++	int pid;
++	int current_pid;
++	int prio;
++	int current_prio;
++	long latency;
++	long timeroffset;
++	cycle_t timestamp;
++};
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
++static char *wakeup_latency_hist_dir = "wakeup";
++static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
++static notrace void probe_wakeup_latency_hist_start(void *v,
++    struct task_struct *p, int success);
++static notrace void probe_wakeup_latency_hist_stop(void *v,
++    struct task_struct *prev, struct task_struct *next);
++static notrace void probe_sched_migrate_task(void *,
++    struct task_struct *task, int cpu);
++static struct enable_data wakeup_latency_enabled_data = {
++	.latency_type = WAKEUP_LATENCY,
++	.enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
++static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
++static DEFINE_PER_CPU(int, wakeup_sharedprio);
++static unsigned long wakeup_pid;
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
++static char *missed_timer_offsets_dir = "missed_timer_offsets";
++static notrace void probe_hrtimer_interrupt(void *v, int cpu,
++    long long offset, struct task_struct *curr, struct task_struct *task);
++static struct enable_data missed_timer_offsets_enabled_data = {
++	.latency_type = MISSED_TIMER_OFFSETS,
++	.enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
++static unsigned long missed_timer_offsets_pid;
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
++static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
++static struct enable_data timerandwakeup_enabled_data = {
++	.latency_type = TIMERANDWAKEUP_LATENCY,
++	.enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
++#endif
++
++void notrace latency_hist(int latency_type, int cpu, unsigned long latency,
++			  unsigned long timeroffset, cycle_t stop,
++			  struct task_struct *p)
++{
++	struct hist_data *my_hist;
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++	struct maxlatproc_data *mp = NULL;
++#endif
++
++	if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 ||
++	    latency_type >= MAX_LATENCY_TYPE)
++		return;
++
++	switch (latency_type) {
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++	case IRQSOFF_LATENCY:
++		my_hist = &per_cpu(irqsoff_hist, cpu);
++		break;
++#endif
++#ifdef CONFIG_PREEMPT_OFF_HIST
++	case PREEMPTOFF_LATENCY:
++		my_hist = &per_cpu(preemptoff_hist, cpu);
++		break;
++#endif
++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
++	case PREEMPTIRQSOFF_LATENCY:
++		my_hist = &per_cpu(preemptirqsoff_hist, cpu);
++		break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++	case WAKEUP_LATENCY:
++		my_hist = &per_cpu(wakeup_latency_hist, cpu);
++		mp = &per_cpu(wakeup_maxlatproc, cpu);
++		break;
++	case WAKEUP_LATENCY_SHAREDPRIO:
++		my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
++		mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
++		break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++	case MISSED_TIMER_OFFSETS:
++		my_hist = &per_cpu(missed_timer_offsets, cpu);
++		mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
++		break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++	case TIMERANDWAKEUP_LATENCY:
++		my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
++		mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
++		break;
++#endif
++
++	default:
++		return;
++	}
++
++	latency += my_hist->offset;
++
++	if (atomic_read(&my_hist->hist_mode) == 0)
++		return;
++
++	if (latency < 0 || latency >= MAX_ENTRY_NUM) {
++		if (latency < 0)
++			my_hist->below_hist_bound_samples++;
++		else
++			my_hist->above_hist_bound_samples++;
++	} else
++		my_hist->hist_array[latency]++;
++
++	if (unlikely(latency > my_hist->max_lat ||
++	    my_hist->min_lat == ULONG_MAX)) {
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++		if (latency_type == WAKEUP_LATENCY ||
++		    latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
++		    latency_type == MISSED_TIMER_OFFSETS ||
++		    latency_type == TIMERANDWAKEUP_LATENCY) {
++			strncpy(mp->comm, p->comm, sizeof(mp->comm));
++			strncpy(mp->current_comm, current->comm,
++			    sizeof(mp->current_comm));
++			mp->pid = task_pid_nr(p);
++			mp->current_pid = task_pid_nr(current);
++			mp->prio = p->prio;
++			mp->current_prio = current->prio;
++			mp->latency = latency;
++			mp->timeroffset = timeroffset;
++			mp->timestamp = stop;
++		}
++#endif
++		my_hist->max_lat = latency;
++	}
++	if (unlikely(latency < my_hist->min_lat))
++		my_hist->min_lat = latency;
++	my_hist->total_samples++;
++	my_hist->accumulate_lat += latency;
++}
++
++static void *l_start(struct seq_file *m, loff_t *pos)
++{
++	loff_t *index_ptr = NULL;
++	loff_t index = *pos;
++	struct hist_data *my_hist = m->private;
++
++	if (index == 0) {
++		char minstr[32], avgstr[32], maxstr[32];
++
++		atomic_dec(&my_hist->hist_mode);
++
++		if (likely(my_hist->total_samples)) {
++			unsigned long avg = (unsigned long)
++			    div64_u64(my_hist->accumulate_lat,
++			    my_hist->total_samples);
++			snprintf(minstr, sizeof(minstr), "%ld",
++			    (long) my_hist->min_lat - my_hist->offset);
++			snprintf(avgstr, sizeof(avgstr), "%ld",
++			    (long) avg - my_hist->offset);
++			snprintf(maxstr, sizeof(maxstr), "%ld",
++			    (long) my_hist->max_lat - my_hist->offset);
++		} else {
++			strcpy(minstr, "<undef>");
++			strcpy(avgstr, minstr);
++			strcpy(maxstr, minstr);
++		}
++
++		seq_printf(m, "#Minimum latency: %s microseconds\n"
++			   "#Average latency: %s microseconds\n"
++			   "#Maximum latency: %s microseconds\n"
++			   "#Total samples: %llu\n"
++			   "#There are %llu samples lower than %ld"
++			   " microseconds.\n"
++			   "#There are %llu samples greater or equal"
++			   " than %ld microseconds.\n"
++			   "#usecs\t%16s\n",
++			   minstr, avgstr, maxstr,
++			   my_hist->total_samples,
++			   my_hist->below_hist_bound_samples,
++			   -my_hist->offset,
++			   my_hist->above_hist_bound_samples,
++			   MAX_ENTRY_NUM - my_hist->offset,
++			   "samples");
++	}
++	if (index < MAX_ENTRY_NUM) {
++		index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
++		if (index_ptr)
++			*index_ptr = index;
++	}
++
++	return index_ptr;
++}
++
++static void *l_next(struct seq_file *m, void *p, loff_t *pos)
++{
++	loff_t *index_ptr = p;
++	struct hist_data *my_hist = m->private;
++
++	if (++*pos >= MAX_ENTRY_NUM) {
++		atomic_inc(&my_hist->hist_mode);
++		return NULL;
++	}
++	*index_ptr = *pos;
++	return index_ptr;
++}
++
++static void l_stop(struct seq_file *m, void *p)
++{
++	kfree(p);
++}
++
++static int l_show(struct seq_file *m, void *p)
++{
++	int index = *(loff_t *) p;
++	struct hist_data *my_hist = m->private;
++
++	seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
++	    my_hist->hist_array[index]);
++	return 0;
++}
++
++static struct seq_operations latency_hist_seq_op = {
++	.start = l_start,
++	.next  = l_next,
++	.stop  = l_stop,
++	.show  = l_show
++};
++
++static int latency_hist_open(struct inode *inode, struct file *file)
++{
++	int ret;
++
++	ret = seq_open(file, &latency_hist_seq_op);
++	if (!ret) {
++		struct seq_file *seq = file->private_data;
++		seq->private = inode->i_private;
++	}
++	return ret;
++}
++
++static struct file_operations latency_hist_fops = {
++	.open = latency_hist_open,
++	.read = seq_read,
++	.llseek = seq_lseek,
++	.release = seq_release,
++};
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static void clear_maxlatprocdata(struct maxlatproc_data *mp)
++{
++	mp->comm[0] = mp->current_comm[0] = '\0';
++	mp->prio = mp->current_prio = mp->pid = mp->current_pid =
++	    mp->latency = mp->timeroffset = -1;
++	mp->timestamp = 0;
++}
++#endif
++
++static void hist_reset(struct hist_data *hist)
++{
++	atomic_dec(&hist->hist_mode);
++
++	memset(hist->hist_array, 0, sizeof(hist->hist_array));
++	hist->below_hist_bound_samples = 0ULL;
++	hist->above_hist_bound_samples = 0ULL;
++	hist->min_lat = ULONG_MAX;
++	hist->max_lat = 0UL;
++	hist->total_samples = 0ULL;
++	hist->accumulate_lat = 0ULL;
++
++	atomic_inc(&hist->hist_mode);
++}
++
++static ssize_t
++latency_hist_reset(struct file *file, const char __user *a,
++		   size_t size, loff_t *off)
++{
++	int cpu;
++	struct hist_data *hist = NULL;
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++	struct maxlatproc_data *mp = NULL;
++#endif
++	off_t latency_type = (off_t) file->private_data;
++
++	for_each_online_cpu(cpu) {
++
++		switch (latency_type) {
++#ifdef CONFIG_PREEMPT_OFF_HIST
++		case PREEMPTOFF_LATENCY:
++			hist = &per_cpu(preemptoff_hist, cpu);
++			break;
++#endif
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++		case IRQSOFF_LATENCY:
++			hist = &per_cpu(irqsoff_hist, cpu);
++			break;
++#endif
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++		case PREEMPTIRQSOFF_LATENCY:
++			hist = &per_cpu(preemptirqsoff_hist, cpu);
++			break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++		case WAKEUP_LATENCY:
++			hist = &per_cpu(wakeup_latency_hist, cpu);
++			mp = &per_cpu(wakeup_maxlatproc, cpu);
++			break;
++		case WAKEUP_LATENCY_SHAREDPRIO:
++			hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
++			mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
++			break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++		case MISSED_TIMER_OFFSETS:
++			hist = &per_cpu(missed_timer_offsets, cpu);
++			mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
++			break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++		case TIMERANDWAKEUP_LATENCY:
++			hist = &per_cpu(timerandwakeup_latency_hist, cpu);
++			mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
++			break;
++#endif
++		}
++
++		hist_reset(hist);
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++		if (latency_type == WAKEUP_LATENCY ||
++		    latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
++		    latency_type == MISSED_TIMER_OFFSETS ||
++		    latency_type == TIMERANDWAKEUP_LATENCY)
++			clear_maxlatprocdata(mp);
++#endif
++	}
++
++	return size;
++}
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static ssize_t
++show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++	char buf[64];
++	int r;
++	unsigned long *this_pid = file->private_data;
++
++	r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
++	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++}
++
++static ssize_t do_pid(struct file *file, const char __user *ubuf,
++		      size_t cnt, loff_t *ppos)
++{
++	char buf[64];
++	unsigned long pid;
++	unsigned long *this_pid = file->private_data;
++
++	if (cnt >= sizeof(buf))
++		return -EINVAL;
++
++	if (copy_from_user(&buf, ubuf, cnt))
++		return -EFAULT;
++
++	buf[cnt] = '\0';
++
++	if (strict_strtoul(buf, 10, &pid))
++		return(-EINVAL);
++
++	*this_pid = pid;
++
++	return cnt;
++}
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static ssize_t
++show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++	int r;
++	struct maxlatproc_data *mp = file->private_data;
++	int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
++	unsigned long long t;
++	unsigned long usecs, secs;
++	char *buf;
++
++	if (mp->pid == -1 || mp->current_pid == -1) {
++		buf = "(none)\n";
++		return simple_read_from_buffer(ubuf, cnt, ppos, buf,
++		    strlen(buf));
++	}
++
++	buf = kmalloc(strmaxlen, GFP_KERNEL);
++	if (buf == NULL)
++		return -ENOMEM;
++
++	t = ns2usecs(mp->timestamp);
++	usecs = do_div(t, USEC_PER_SEC);
++	secs = (unsigned long) t;
++	r = snprintf(buf, strmaxlen,
++	    "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
++	    MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
++	    mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
++	    secs, usecs);
++	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++	kfree(buf);
++	return r;
++}
++#endif
++
++static ssize_t
++show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++	char buf[64];
++	struct enable_data *ed = file->private_data;
++	int r;
++
++	r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
++	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++}
++
++static ssize_t
++do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++	char buf[64];
++	long enable;
++	struct enable_data *ed = file->private_data;
++
++	if (cnt >= sizeof(buf))
++		return -EINVAL;
++
++	if (copy_from_user(&buf, ubuf, cnt))
++		return -EFAULT;
++
++	buf[cnt] = 0;
++
++	if (strict_strtol(buf, 10, &enable))
++		return(-EINVAL);
++
++	if ((enable && ed->enabled) || (!enable && !ed->enabled))
++		return cnt;
++
++	if (enable) {
++		int ret;
++
++		switch (ed->latency_type) {
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++		case PREEMPTIRQSOFF_LATENCY:
++			ret = register_trace_preemptirqsoff_hist(
++			    probe_preemptirqsoff_hist, NULL);
++			if (ret) {
++				pr_info("wakeup trace: Couldn't assign "
++				    "probe_preemptirqsoff_hist "
++				    "to trace_preemptirqsoff_hist\n");
++				return ret;
++			}
++			break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++		case WAKEUP_LATENCY:
++			ret = register_trace_sched_wakeup(
++			    probe_wakeup_latency_hist_start, NULL);
++			if (ret) {
++				pr_info("wakeup trace: Couldn't assign "
++				    "probe_wakeup_latency_hist_start "
++				    "to trace_sched_wakeup\n");
++				return ret;
++			}
++			ret = register_trace_sched_wakeup_new(
++			    probe_wakeup_latency_hist_start, NULL);
++			if (ret) {
++				pr_info("wakeup trace: Couldn't assign "
++				    "probe_wakeup_latency_hist_start "
++				    "to trace_sched_wakeup_new\n");
++				unregister_trace_sched_wakeup(
++				    probe_wakeup_latency_hist_start, NULL);
++				return ret;
++			}
++			ret = register_trace_sched_switch(
++			    probe_wakeup_latency_hist_stop, NULL);
++			if (ret) {
++				pr_info("wakeup trace: Couldn't assign "
++				    "probe_wakeup_latency_hist_stop "
++				    "to trace_sched_switch\n");
++				unregister_trace_sched_wakeup(
++				    probe_wakeup_latency_hist_start, NULL);
++				unregister_trace_sched_wakeup_new(
++				    probe_wakeup_latency_hist_start, NULL);
++				return ret;
++			}
++			ret = register_trace_sched_migrate_task(
++			    probe_sched_migrate_task, NULL);
++			if (ret) {
++				pr_info("wakeup trace: Couldn't assign "
++				    "probe_sched_migrate_task "
++				    "to trace_sched_migrate_task\n");
++				unregister_trace_sched_wakeup(
++				    probe_wakeup_latency_hist_start, NULL);
++				unregister_trace_sched_wakeup_new(
++				    probe_wakeup_latency_hist_start, NULL);
++				unregister_trace_sched_switch(
++				    probe_wakeup_latency_hist_stop, NULL);
++				return ret;
++			}
++			break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++		case MISSED_TIMER_OFFSETS:
++			ret = register_trace_hrtimer_interrupt(
++			    probe_hrtimer_interrupt, NULL);
++			if (ret) {
++				pr_info("wakeup trace: Couldn't assign "
++				    "probe_hrtimer_interrupt "
++				    "to trace_hrtimer_interrupt\n");
++				return ret;
++			}
++			break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++		case TIMERANDWAKEUP_LATENCY:
++			if (!wakeup_latency_enabled_data.enabled ||
++			    !missed_timer_offsets_enabled_data.enabled)
++				return -EINVAL;
++			break;
++#endif
++		default:
++			break;
++		}
++	} else {
++		switch (ed->latency_type) {
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++		case PREEMPTIRQSOFF_LATENCY:
++			{
++				int cpu;
++
++				unregister_trace_preemptirqsoff_hist(
++				    probe_preemptirqsoff_hist, NULL);
++				for_each_online_cpu(cpu) {
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++					per_cpu(hist_irqsoff_counting,
++					    cpu) = 0;
++#endif
++#ifdef CONFIG_PREEMPT_OFF_HIST
++					per_cpu(hist_preemptoff_counting,
++					    cpu) = 0;
++#endif
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++					per_cpu(hist_preemptirqsoff_counting,
++					    cpu) = 0;
++#endif
++				}
++			}
++			break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++		case WAKEUP_LATENCY:
++			{
++				int cpu;
++
++				unregister_trace_sched_wakeup(
++				    probe_wakeup_latency_hist_start, NULL);
++				unregister_trace_sched_wakeup_new(
++				    probe_wakeup_latency_hist_start, NULL);
++				unregister_trace_sched_switch(
++				    probe_wakeup_latency_hist_stop, NULL);
++				unregister_trace_sched_migrate_task(
++				    probe_sched_migrate_task, NULL);
++
++				for_each_online_cpu(cpu) {
++					per_cpu(wakeup_task, cpu) = NULL;
++					per_cpu(wakeup_sharedprio, cpu) = 0;
++				}
++			}
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++			timerandwakeup_enabled_data.enabled = 0;
++#endif
++			break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++		case MISSED_TIMER_OFFSETS:
++			unregister_trace_hrtimer_interrupt(
++			    probe_hrtimer_interrupt, NULL);
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++			timerandwakeup_enabled_data.enabled = 0;
++#endif
++			break;
++#endif
++		default:
++			break;
++		}
++	}
++	ed->enabled = enable;
++	return cnt;
++}
++
++static const struct file_operations latency_hist_reset_fops = {
++	.open = tracing_open_generic,
++	.write = latency_hist_reset,
++};
++
++static const struct file_operations enable_fops = {
++	.open = tracing_open_generic,
++	.read = show_enable,
++	.write = do_enable,
++};
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static const struct file_operations pid_fops = {
++	.open = tracing_open_generic,
++	.read = show_pid,
++	.write = do_pid,
++};
++
++static const struct file_operations maxlatproc_fops = {
++	.open = tracing_open_generic,
++	.read = show_maxlatproc,
++};
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++static notrace void probe_preemptirqsoff_hist(void *v, int reason,
++    int starthist)
++{
++	int cpu = raw_smp_processor_id();
++	int time_set = 0;
++
++	if (starthist) {
++		cycle_t uninitialized_var(start);
++
++		if (!preempt_count() && !irqs_disabled())
++			return;
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++		if ((reason == IRQS_OFF || reason == TRACE_START) &&
++		    !per_cpu(hist_irqsoff_counting, cpu)) {
++			per_cpu(hist_irqsoff_counting, cpu) = 1;
++			start = ftrace_now(cpu);
++			time_set++;
++			per_cpu(hist_irqsoff_start, cpu) = start;
++		}
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++		if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
++		    !per_cpu(hist_preemptoff_counting, cpu)) {
++			per_cpu(hist_preemptoff_counting, cpu) = 1;
++			if (!(time_set++))
++				start = ftrace_now(cpu);
++			per_cpu(hist_preemptoff_start, cpu) = start;
++		}
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++		if (per_cpu(hist_irqsoff_counting, cpu) &&
++		    per_cpu(hist_preemptoff_counting, cpu) &&
++		    !per_cpu(hist_preemptirqsoff_counting, cpu)) {
++			per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
++			if (!time_set)
++				start = ftrace_now(cpu);
++			per_cpu(hist_preemptirqsoff_start, cpu) = start;
++		}
++#endif
++	} else {
++		cycle_t uninitialized_var(stop);
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++		if ((reason == IRQS_ON || reason == TRACE_STOP) &&
++		    per_cpu(hist_irqsoff_counting, cpu)) {
++			cycle_t start = per_cpu(hist_irqsoff_start, cpu);
++
++			stop = ftrace_now(cpu);
++			time_set++;
++			if (start && stop >= start) {
++				unsigned long latency =
++				    nsecs_to_usecs(stop - start);
++
++				latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
++				    stop, NULL);
++			}
++			per_cpu(hist_irqsoff_counting, cpu) = 0;
++		}
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++		if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
++		    per_cpu(hist_preemptoff_counting, cpu)) {
++			cycle_t start = per_cpu(hist_preemptoff_start, cpu);
++
++			if (!(time_set++))
++				stop = ftrace_now(cpu);
++			if (start && stop >= start) {
++				unsigned long latency =
++				    nsecs_to_usecs(stop - start);
++
++				latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
++				    0, stop, NULL);
++			}
++			per_cpu(hist_preemptoff_counting, cpu) = 0;
++		}
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++		if ((!per_cpu(hist_irqsoff_counting, cpu) ||
++		     !per_cpu(hist_preemptoff_counting, cpu)) &&
++		   per_cpu(hist_preemptirqsoff_counting, cpu)) {
++			cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
++
++			if (!time_set)
++				stop = ftrace_now(cpu);
++			if (start && stop >= start) {
++				unsigned long latency =
++				    nsecs_to_usecs(stop - start);
++				latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
++				    latency, 0, stop, NULL);
++			}
++			per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
++		}
++#endif
++	}
++}
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++static DEFINE_RAW_SPINLOCK(wakeup_lock);
++static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
++    int cpu)
++{
++	int old_cpu = task_cpu(task);
++
++	if (cpu != old_cpu) {
++		unsigned long flags;
++		struct task_struct *cpu_wakeup_task;
++
++		raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++		cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
++		if (task == cpu_wakeup_task) {
++			put_task_struct(cpu_wakeup_task);
++			per_cpu(wakeup_task, old_cpu) = NULL;
++			cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
++			get_task_struct(cpu_wakeup_task);
++		}
++
++		raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++	}
++}
++
++static notrace void probe_wakeup_latency_hist_start(void *v,
++    struct task_struct *p, int success)
++{
++	unsigned long flags;
++	struct task_struct *curr = current;
++	int cpu = task_cpu(p);
++	struct task_struct *cpu_wakeup_task;
++
++	raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++	cpu_wakeup_task = per_cpu(wakeup_task, cpu);
++
++	if (wakeup_pid) {
++		if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
++		    p->prio == curr->prio)
++			per_cpu(wakeup_sharedprio, cpu) = 1;
++		if (likely(wakeup_pid != task_pid_nr(p)))
++			goto out;
++	} else {
++		if (likely(!rt_task(p)) ||
++		    (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
++		    p->prio > curr->prio)
++			goto out;
++		if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
++		    p->prio == curr->prio)
++			per_cpu(wakeup_sharedprio, cpu) = 1;
++	}
++
++	if (cpu_wakeup_task)
++		put_task_struct(cpu_wakeup_task);
++	cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
++	get_task_struct(cpu_wakeup_task);
++	cpu_wakeup_task->preempt_timestamp_hist =
++		ftrace_now(raw_smp_processor_id());
++out:
++	raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++}
++
++static notrace void probe_wakeup_latency_hist_stop(void *v,
++    struct task_struct *prev, struct task_struct *next)
++{
++	unsigned long flags;
++	int cpu = task_cpu(next);
++	unsigned long latency;
++	cycle_t stop;
++	struct task_struct *cpu_wakeup_task;
++
++	raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++	cpu_wakeup_task = per_cpu(wakeup_task, cpu);
++
++	if (cpu_wakeup_task == NULL)
++		goto out;
++
++	/* Already running? */
++	if (unlikely(current == cpu_wakeup_task))
++		goto out_reset;
++
++	if (next != cpu_wakeup_task) {
++		if (next->prio < cpu_wakeup_task->prio)
++			goto out_reset;
++
++		if (next->prio == cpu_wakeup_task->prio)
++			per_cpu(wakeup_sharedprio, cpu) = 1;
++
++		goto out;
++	}
++
++	/*
++	 * The task we are waiting for is about to be switched to.
++	 * Calculate latency and store it in histogram.
++	 */
++	stop = ftrace_now(raw_smp_processor_id());
++
++	latency = nsecs_to_usecs(stop - next->preempt_timestamp_hist);
++
++	if (per_cpu(wakeup_sharedprio, cpu)) {
++		latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
++		    next);
++		per_cpu(wakeup_sharedprio, cpu) = 0;
++	} else {
++		latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++		if (timerandwakeup_enabled_data.enabled) {
++			latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
++			    next->timer_offset + latency, next->timer_offset,
++			    stop, next);
++		}
++#endif
++	}
++
++out_reset:
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++	next->timer_offset = 0;
++#endif
++	put_task_struct(cpu_wakeup_task);
++	per_cpu(wakeup_task, cpu) = NULL;
++out:
++	raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++}
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++static notrace void probe_hrtimer_interrupt(void *v, int cpu,
++    long long latency_ns, struct task_struct *curr, struct task_struct *task)
++{
++	if (latency_ns <= 0 && task != NULL && rt_task(task) &&
++	    (task->prio < curr->prio ||
++	    (task->prio == curr->prio &&
++	    !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
++		unsigned long latency;
++		cycle_t now;
++
++		if (missed_timer_offsets_pid) {
++			if (likely(missed_timer_offsets_pid !=
++			    task_pid_nr(task)))
++				return;
++		}
++
++		now = ftrace_now(cpu);
++		latency = (unsigned long) div_s64(-latency_ns, 1000);
++		latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
++		    task);
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++		task->timer_offset = latency;
++#endif
++	}
++}
++#endif
++
++static __init int latency_hist_init(void)
++{
++	struct dentry *latency_hist_root = NULL;
++	struct dentry *dentry;
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++	struct dentry *dentry_sharedprio;
++#endif
++	struct dentry *entry;
++	struct dentry *enable_root;
++	int i = 0;
++	struct hist_data *my_hist;
++	char name[64];
++	char *cpufmt = "CPU%d";
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++	char *cpufmt_maxlatproc = "max_latency-CPU%d";
++	struct maxlatproc_data *mp = NULL;
++#endif
++
++	dentry = tracing_init_dentry();
++	latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
++	enable_root = debugfs_create_dir("enable", latency_hist_root);
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++	dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
++	for_each_possible_cpu(i) {
++		sprintf(name, cpufmt, i);
++		entry = debugfs_create_file(name, 0444, dentry,
++		    &per_cpu(irqsoff_hist, i), &latency_hist_fops);
++		my_hist = &per_cpu(irqsoff_hist, i);
++		atomic_set(&my_hist->hist_mode, 1);
++		my_hist->min_lat = 0xFFFFFFFFUL;
++	}
++	entry = debugfs_create_file("reset", 0644, dentry,
++	    (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++	dentry = debugfs_create_dir(preemptoff_hist_dir,
++	    latency_hist_root);
++	for_each_possible_cpu(i) {
++		sprintf(name, cpufmt, i);
++		entry = debugfs_create_file(name, 0444, dentry,
++		    &per_cpu(preemptoff_hist, i), &latency_hist_fops);
++		my_hist = &per_cpu(preemptoff_hist, i);
++		atomic_set(&my_hist->hist_mode, 1);
++		my_hist->min_lat = 0xFFFFFFFFUL;
++	}
++	entry = debugfs_create_file("reset", 0644, dentry,
++	    (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++	dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
++	    latency_hist_root);
++	for_each_possible_cpu(i) {
++		sprintf(name, cpufmt, i);
++		entry = debugfs_create_file(name, 0444, dentry,
++		    &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
++		my_hist = &per_cpu(preemptirqsoff_hist, i);
++		atomic_set(&my_hist->hist_mode, 1);
++		my_hist->min_lat = 0xFFFFFFFFUL;
++	}
++	entry = debugfs_create_file("reset", 0644, dentry,
++	    (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++	entry = debugfs_create_file("preemptirqsoff", 0644,
++	    enable_root, (void *)&preemptirqsoff_enabled_data,
++	    &enable_fops);
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++	dentry = debugfs_create_dir(wakeup_latency_hist_dir,
++	    latency_hist_root);
++	dentry_sharedprio = debugfs_create_dir(
++	    wakeup_latency_hist_dir_sharedprio, dentry);
++	for_each_possible_cpu(i) {
++		sprintf(name, cpufmt, i);
++
++		entry = debugfs_create_file(name, 0444, dentry,
++		    &per_cpu(wakeup_latency_hist, i),
++		    &latency_hist_fops);
++		my_hist = &per_cpu(wakeup_latency_hist, i);
++		atomic_set(&my_hist->hist_mode, 1);
++		my_hist->min_lat = 0xFFFFFFFFUL;
++
++		entry = debugfs_create_file(name, 0444, dentry_sharedprio,
++		    &per_cpu(wakeup_latency_hist_sharedprio, i),
++		    &latency_hist_fops);
++		my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
++		atomic_set(&my_hist->hist_mode, 1);
++		my_hist->min_lat = 0xFFFFFFFFUL;
++
++		sprintf(name, cpufmt_maxlatproc, i);
++
++		mp = &per_cpu(wakeup_maxlatproc, i);
++		entry = debugfs_create_file(name, 0444, dentry, mp,
++		    &maxlatproc_fops);
++		clear_maxlatprocdata(mp);
++
++		mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
++		entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
++		    &maxlatproc_fops);
++		clear_maxlatprocdata(mp);
++	}
++	entry = debugfs_create_file("pid", 0644, dentry,
++	    (void *)&wakeup_pid, &pid_fops);
++	entry = debugfs_create_file("reset", 0644, dentry,
++	    (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
++	entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
++	    (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
++	entry = debugfs_create_file("wakeup", 0644,
++	    enable_root, (void *)&wakeup_latency_enabled_data,
++	    &enable_fops);
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++	dentry = debugfs_create_dir(missed_timer_offsets_dir,
++	    latency_hist_root);
++	for_each_possible_cpu(i) {
++		sprintf(name, cpufmt, i);
++		entry = debugfs_create_file(name, 0444, dentry,
++		    &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
++		my_hist = &per_cpu(missed_timer_offsets, i);
++		atomic_set(&my_hist->hist_mode, 1);
++		my_hist->min_lat = 0xFFFFFFFFUL;
++
++		sprintf(name, cpufmt_maxlatproc, i);
++		mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
++		entry = debugfs_create_file(name, 0444, dentry, mp,
++		    &maxlatproc_fops);
++		clear_maxlatprocdata(mp);
++	}
++	entry = debugfs_create_file("pid", 0644, dentry,
++	    (void *)&missed_timer_offsets_pid, &pid_fops);
++	entry = debugfs_create_file("reset", 0644, dentry,
++	    (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
++	entry = debugfs_create_file("missed_timer_offsets", 0644,
++	    enable_root, (void *)&missed_timer_offsets_enabled_data,
++	    &enable_fops);
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++    defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++	dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
++	    latency_hist_root);
++	for_each_possible_cpu(i) {
++		sprintf(name, cpufmt, i);
++		entry = debugfs_create_file(name, 0444, dentry,
++		    &per_cpu(timerandwakeup_latency_hist, i),
++		    &latency_hist_fops);
++		my_hist = &per_cpu(timerandwakeup_latency_hist, i);
++		atomic_set(&my_hist->hist_mode, 1);
++		my_hist->min_lat = 0xFFFFFFFFUL;
++
++		sprintf(name, cpufmt_maxlatproc, i);
++		mp = &per_cpu(timerandwakeup_maxlatproc, i);
++		entry = debugfs_create_file(name, 0444, dentry, mp,
++		    &maxlatproc_fops);
++		clear_maxlatprocdata(mp);
++	}
++	entry = debugfs_create_file("reset", 0644, dentry,
++	    (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
++	entry = debugfs_create_file("timerandwakeup", 0644,
++	    enable_root, (void *)&timerandwakeup_enabled_data,
++	    &enable_fops);
++#endif
++	return 0;
++}
++
++__initcall(latency_hist_init);
+Index: linux-3.2/kernel/trace/trace_irqsoff.c
+===================================================================
+--- linux-3.2.orig/kernel/trace/trace_irqsoff.c
++++ linux-3.2/kernel/trace/trace_irqsoff.c
+@@ -17,6 +17,7 @@
+ #include <linux/fs.h>
+ 
+ #include "trace.h"
++#include <trace/events/hist.h>
+ 
+ static struct trace_array		*irqsoff_trace __read_mostly;
+ static int				tracer_enabled __read_mostly;
+@@ -426,11 +427,13 @@ void start_critical_timings(void)
+ {
+ 	if (preempt_trace() || irq_trace())
+ 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++	trace_preemptirqsoff_hist(TRACE_START, 1);
+ }
+ EXPORT_SYMBOL_GPL(start_critical_timings);
+ 
+ void stop_critical_timings(void)
+ {
++	trace_preemptirqsoff_hist(TRACE_STOP, 0);
+ 	if (preempt_trace() || irq_trace())
+ 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -440,6 +443,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+ #ifdef CONFIG_PROVE_LOCKING
+ void time_hardirqs_on(unsigned long a0, unsigned long a1)
+ {
++	trace_preemptirqsoff_hist(IRQS_ON, 0);
+ 	if (!preempt_trace() && irq_trace())
+ 		stop_critical_timing(a0, a1);
+ }
+@@ -448,6 +452,7 @@ void time_hardirqs_off(unsigned long a0,
+ {
+ 	if (!preempt_trace() && irq_trace())
+ 		start_critical_timing(a0, a1);
++	trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ 
+ #else /* !CONFIG_PROVE_LOCKING */
+@@ -473,6 +478,7 @@ inline void print_irqtrace_events(struct
+  */
+ void trace_hardirqs_on(void)
+ {
++	trace_preemptirqsoff_hist(IRQS_ON, 0);
+ 	if (!preempt_trace() && irq_trace())
+ 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -482,11 +488,13 @@ void trace_hardirqs_off(void)
+ {
+ 	if (!preempt_trace() && irq_trace())
+ 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++	trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off);
+ 
+ void trace_hardirqs_on_caller(unsigned long caller_addr)
+ {
++	trace_preemptirqsoff_hist(IRQS_ON, 0);
+ 	if (!preempt_trace() && irq_trace())
+ 		stop_critical_timing(CALLER_ADDR0, caller_addr);
+ }
+@@ -496,6 +504,7 @@ void trace_hardirqs_off_caller(unsigned 
+ {
+ 	if (!preempt_trace() && irq_trace())
+ 		start_critical_timing(CALLER_ADDR0, caller_addr);
++	trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off_caller);
+ 
+@@ -505,12 +514,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
+ #ifdef CONFIG_PREEMPT_TRACER
+ void trace_preempt_on(unsigned long a0, unsigned long a1)
+ {
++	trace_preemptirqsoff_hist(PREEMPT_ON, 0);
+ 	if (preempt_trace() && !irq_trace())
+ 		stop_critical_timing(a0, a1);
+ }
+ 
+ void trace_preempt_off(unsigned long a0, unsigned long a1)
+ {
++	trace_preemptirqsoff_hist(PREEMPT_ON, 1);
+ 	if (preempt_trace() && !irq_trace())
+ 		start_critical_timing(a0, a1);
+ }
+Index: linux-3.2/Documentation/hwlat_detector.txt
+===================================================================
+--- /dev/null
++++ linux-3.2/Documentation/hwlat_detector.txt
+@@ -0,0 +1,64 @@
++Introduction:
++-------------
++
++The module hwlat_detector is a special purpose kernel module that is used to
++detect large system latencies induced by the behavior of certain underlying
++hardware or firmware, independent of Linux itself. The code was developed
++originally to detect SMIs (System Management Interrupts) on x86 systems,
++however there is nothing x86 specific about this patchset. It was
++originally written for use by the "RT" patch since the Real Time
++kernel is highly latency sensitive.
++
++SMIs are usually not serviced by the Linux kernel, which typically does not
++even know that they are occuring. SMIs are instead are set up by BIOS code
++and are serviced by BIOS code, usually for "critical" events such as
++management of thermal sensors and fans. Sometimes though, SMIs are used for
++other tasks and those tasks can spend an inordinate amount of time in the
++handler (sometimes measured in milliseconds). Obviously this is a problem if
++you are trying to keep event service latencies down in the microsecond range.
++
++The hardware latency detector works by hogging all of the cpus for configurable
++amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
++for some period, then looking for gaps in the TSC data. Any gap indicates a
++time when the polling was interrupted and since the machine is stopped and
++interrupts turned off the only thing that could do that would be an SMI.
++
++Note that the SMI detector should *NEVER* be used in a production environment.
++It is intended to be run manually to determine if the hardware platform has a
++problem with long system firmware service routines.
++
++Usage:
++------
++
++Loading the module hwlat_detector passing the parameter "enabled=1" (or by
++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
++step required to start the hwlat_detector. It is possible to redefine the
++threshold in microseconds (us) above which latency spikes will be taken
++into account (parameter "threshold=").
++
++Example:
++
++	# modprobe hwlat_detector enabled=1 threshold=100
++
++After the module is loaded, it creates a directory named "hwlat_detector" under
++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
++to have debugfs mounted, which might be on /sys/debug on your system.
++
++The /debug/hwlat_detector interface contains the following files:
++
++count			- number of latency spikes observed since last reset
++enable			- a global enable/disable toggle (0/1), resets count
++max			- maximum hardware latency actually observed (usecs)
++sample			- a pipe from which to read current raw sample data
++			  in the format <timestamp> <latency observed usecs>
++			  (can be opened O_NONBLOCK for a single sample)
++threshold		- minimum latency value to be considered (usecs)
++width			- time period to sample with CPUs held (usecs)
++			  must be less than the total window size (enforced)
++window			- total period of sampling, width being inside (usecs)
++
++By default we will set width to 500,000 and window to 1,000,000, meaning that
++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
++observe any latencies that exceed the threshold (initially 100 usecs),
++then we write to a global sample ring buffer of 8K samples, which is
++consumed by reading from the "sample" (pipe) debugfs file interface.
+Index: linux-3.2/MAINTAINERS
+===================================================================
+--- linux-3.2.orig/MAINTAINERS
++++ linux-3.2/MAINTAINERS
+@@ -3008,6 +3008,15 @@ L:	linuxppc-dev at lists.ozlabs.org
+ S:	Odd Fixes
+ F:	drivers/tty/hvc/
+ 
++HARDWARE LATENCY DETECTOR
++P:	Jon Masters
++M:	jcm at jonmasters.org
++W:	http://www.kernel.org/pub/linux/kernel/people/jcm/hwlat_detector/
++S:	Supported
++L:	linux-kernel at vger.kernel.org
++F:	Documentation/hwlat_detector.txt
++F:	drivers/misc/hwlat_detector.c
++
+ HARDWARE MONITORING
+ M:	Jean Delvare <khali at linux-fr.org>
+ M:	Guenter Roeck <guenter.roeck at ericsson.com>
+Index: linux-3.2/drivers/misc/Makefile
+===================================================================
+--- linux-3.2.orig/drivers/misc/Makefile
++++ linux-3.2/drivers/misc/Makefile
+@@ -48,3 +48,4 @@ obj-y				+= lis3lv02d/
+ obj-y				+= carma/
+ obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
+ obj-$(CONFIG_ALTERA_STAPL)	+=altera-stapl/
++obj-$(CONFIG_HWLAT_DETECTOR)	+= hwlat_detector.o
+Index: linux-3.2/drivers/misc/hwlat_detector.c
+===================================================================
+--- /dev/null
++++ linux-3.2/drivers/misc/hwlat_detector.c
+@@ -0,0 +1,1212 @@
++/*
++ * hwlat_detector.c - A simple Hardware Latency detector.
++ *
++ * Use this module to detect large system latencies induced by the behavior of
++ * certain underlying system hardware or firmware, independent of Linux itself.
++ * The code was developed originally to detect the presence of SMIs on Intel
++ * and AMD systems, although there is no dependency upon x86 herein.
++ *
++ * The classical example usage of this module is in detecting the presence of
++ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
++ * somewhat special form of hardware interrupt spawned from earlier CPU debug
++ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
++ * LPC (or other device) to generate a special interrupt under certain
++ * circumstances, for example, upon expiration of a special SMI timer device,
++ * due to certain external thermal readings, on certain I/O address accesses,
++ * and other situations. An SMI hits a special CPU pin, triggers a special
++ * SMI mode (complete with special memory map), and the OS is unaware.
++ *
++ * Although certain hardware-inducing latencies are necessary (for example,
++ * a modern system often requires an SMI handler for correct thermal control
++ * and remote management) they can wreak havoc upon any OS-level performance
++ * guarantees toward low-latency, especially when the OS is not even made
++ * aware of the presence of these interrupts. For this reason, we need a
++ * somewhat brute force mechanism to detect these interrupts. In this case,
++ * we do it by hogging all of the CPU(s) for configurable timer intervals,
++ * sampling the built-in CPU timer, looking for discontiguous readings.
++ *
++ * WARNING: This implementation necessarily introduces latencies. Therefore,
++ *          you should NEVER use this module in a production environment
++ *          requiring any kind of low-latency performance guarantee(s).
++ *
++ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm at redhat.com>
++ *
++ * Includes useful feedback from Clark Williams <clark at redhat.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/ring_buffer.h>
++#include <linux/stop_machine.h>
++#include <linux/time.h>
++#include <linux/hrtimer.h>
++#include <linux/kthread.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++#include <linux/uaccess.h>
++#include <linux/version.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++
++#define BUF_SIZE_DEFAULT	262144UL		/* 8K*(sizeof(entry)) */
++#define BUF_FLAGS		(RB_FL_OVERWRITE)	/* no block on full */
++#define U64STR_SIZE		22			/* 20 digits max */
++
++#define VERSION			"1.0.0"
++#define BANNER			"hwlat_detector: "
++#define DRVNAME			"hwlat_detector"
++#define DEFAULT_SAMPLE_WINDOW	1000000			/* 1s */
++#define DEFAULT_SAMPLE_WIDTH	500000			/* 0.5s */
++#define DEFAULT_LAT_THRESHOLD	10			/* 10us */
++
++/* Module metadata */
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jon Masters <jcm at redhat.com>");
++MODULE_DESCRIPTION("A simple hardware latency detector");
++MODULE_VERSION(VERSION);
++
++/* Module parameters */
++
++static int debug;
++static int enabled;
++static int threshold;
++
++module_param(debug, int, 0);			/* enable debug */
++module_param(enabled, int, 0);			/* enable detector */
++module_param(threshold, int, 0);		/* latency threshold */
++
++/* Buffering and sampling */
++
++static struct ring_buffer *ring_buffer;		/* sample buffer */
++static DEFINE_MUTEX(ring_buffer_mutex);		/* lock changes */
++static unsigned long buf_size = BUF_SIZE_DEFAULT;
++static struct task_struct *kthread;		/* sampling thread */
++
++/* DebugFS filesystem entries */
++
++static struct dentry *debug_dir;		/* debugfs directory */
++static struct dentry *debug_max;		/* maximum TSC delta */
++static struct dentry *debug_count;		/* total detect count */
++static struct dentry *debug_sample_width;	/* sample width us */
++static struct dentry *debug_sample_window;	/* sample window us */
++static struct dentry *debug_sample;		/* raw samples us */
++static struct dentry *debug_threshold;		/* threshold us */
++static struct dentry *debug_enable;         	/* enable/disable */
++
++/* Individual samples and global state */
++
++struct sample;					/* latency sample */
++struct data;					/* Global state */
++
++/* Sampling functions */
++static int __buffer_add_sample(struct sample *sample);
++static struct sample *buffer_get_sample(struct sample *sample);
++static int get_sample(void *unused);
++
++/* Threading and state */
++static int kthread_fn(void *unused);
++static int start_kthread(void);
++static int stop_kthread(void);
++static void __reset_stats(void);
++static int init_stats(void);
++
++/* Debugfs interface */
++static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
++				size_t cnt, loff_t *ppos, const u64 *entry);
++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
++				 size_t cnt, loff_t *ppos, u64 *entry);
++static int debug_sample_fopen(struct inode *inode, struct file *filp);
++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
++				  size_t cnt, loff_t *ppos);
++static int debug_sample_release(struct inode *inode, struct file *filp);
++static int debug_enable_fopen(struct inode *inode, struct file *filp);
++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
++				  size_t cnt, loff_t *ppos);
++static ssize_t debug_enable_fwrite(struct file *file,
++				   const char __user *user_buffer,
++				   size_t user_size, loff_t *offset);
++
++/* Initialization functions */
++static int init_debugfs(void);
++static void free_debugfs(void);
++static int detector_init(void);
++static void detector_exit(void);
++
++/* Individual latency samples are stored here when detected and packed into
++ * the ring_buffer circular buffer, where they are overwritten when
++ * more than buf_size/sizeof(sample) samples are received. */
++struct sample {
++	u64		seqnum;		/* unique sequence */
++	u64		duration;	/* ktime delta */
++	struct timespec	timestamp;	/* wall time */
++	unsigned long   lost;
++};
++
++/* keep the global state somewhere. Mostly used under stop_machine. */
++static struct data {
++
++	struct mutex lock;		/* protect changes */
++
++	u64	count;			/* total since reset */
++	u64	max_sample;		/* max hardware latency */
++	u64	threshold;		/* sample threshold level */
++
++	u64	sample_window;		/* total sampling window (on+off) */
++	u64	sample_width;		/* active sampling portion of window */
++
++	atomic_t sample_open;		/* whether the sample file is open */
++
++	wait_queue_head_t wq;		/* waitqeue for new sample values */
++
++} data;
++
++/**
++ * __buffer_add_sample - add a new latency sample recording to the ring buffer
++ * @sample: The new latency sample value
++ *
++ * This receives a new latency sample and records it in a global ring buffer.
++ * No additional locking is used in this case - suited for stop_machine use.
++ */
++static int __buffer_add_sample(struct sample *sample)
++{
++	return ring_buffer_write(ring_buffer,
++				 sizeof(struct sample), sample);
++}
++
++/**
++ * buffer_get_sample - remove a hardware latency sample from the ring buffer
++ * @sample: Pre-allocated storage for the sample
++ *
++ * This retrieves a hardware latency sample from the global circular buffer
++ */
++static struct sample *buffer_get_sample(struct sample *sample)
++{
++	struct ring_buffer_event *e = NULL;
++	struct sample *s = NULL;
++	unsigned int cpu = 0;
++
++	if (!sample)
++		return NULL;
++
++	mutex_lock(&ring_buffer_mutex);
++	for_each_online_cpu(cpu) {
++		e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost);
++		if (e)
++			break;
++	}
++
++	if (e) {
++		s = ring_buffer_event_data(e);
++		memcpy(sample, s, sizeof(struct sample));
++	} else
++		sample = NULL;
++	mutex_unlock(&ring_buffer_mutex);
++
++	return sample;
++}
++
++/**
++ * get_sample - sample the CPU TSC and look for likely hardware latencies
++ * @unused: This is not used but is a part of the stop_machine API
++ *
++ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
++ * hardware-induced latency. Called under stop_machine, with data.lock held.
++ */
++static int get_sample(void *unused)
++{
++	ktime_t start, t1, t2;
++	s64 diff, total = 0;
++	u64 sample = 0;
++	int ret = 1;
++
++	start = ktime_get(); /* start timestamp */
++
++	do {
++
++		t1 = ktime_get();	/* we'll look for a discontinuity */
++		t2 = ktime_get();
++
++		total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
++		diff = ktime_to_us(ktime_sub(t2, t1));     /* current diff */
++
++		/* This shouldn't happen */
++		if (diff < 0) {
++			printk(KERN_ERR BANNER "time running backwards\n");
++			goto out;
++		}
++
++		if (diff > sample)
++			sample = diff; /* only want highest value */
++
++	} while (total <= data.sample_width);
++
++	/* If we exceed the threshold value, we have found a hardware latency */
++	if (sample > data.threshold) {
++		struct sample s;
++
++		data.count++;
++		s.seqnum = data.count;
++		s.duration = sample;
++		s.timestamp = CURRENT_TIME;
++		__buffer_add_sample(&s);
++
++		/* Keep a running maximum ever recorded hardware latency */
++		if (sample > data.max_sample)
++			data.max_sample = sample;
++	}
++
++	ret = 0;
++out:
++	return ret;
++}
++
++/*
++ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
++ * @unused: A required part of the kthread API.
++ *
++ * Used to periodically sample the CPU TSC via a call to get_sample. We
++ * use stop_machine, whith does (intentionally) introduce latency since we
++ * need to ensure nothing else might be running (and thus pre-empting).
++ * Obviously this should never be used in production environments.
++ *
++ * stop_machine will schedule us typically only on CPU0 which is fine for
++ * almost every real-world hardware latency situation - but we might later
++ * generalize this if we find there are any actualy systems with alternate
++ * SMI delivery or other non CPU0 hardware latencies.
++ */
++static int kthread_fn(void *unused)
++{
++	int err = 0;
++	u64 interval = 0;
++
++	while (!kthread_should_stop()) {
++
++		mutex_lock(&data.lock);
++
++		err = stop_machine(get_sample, unused, 0);
++		if (err) {
++			/* Houston, we have a problem */
++			mutex_unlock(&data.lock);
++			goto err_out;
++		}
++
++		wake_up(&data.wq); /* wake up reader(s) */
++
++		interval = data.sample_window - data.sample_width;
++		do_div(interval, USEC_PER_MSEC); /* modifies interval value */
++
++		mutex_unlock(&data.lock);
++
++		if (msleep_interruptible(interval))
++			goto out;
++	}
++		goto out;
++err_out:
++	printk(KERN_ERR BANNER "could not call stop_machine, disabling\n");
++	enabled = 0;
++out:
++	return err;
++
++}
++
++/**
++ * start_kthread - Kick off the hardware latency sampling/detector kthread
++ *
++ * This starts a kernel thread that will sit and sample the CPU timestamp
++ * counter (TSC or similar) and look for potential hardware latencies.
++ */
++static int start_kthread(void)
++{
++	kthread = kthread_run(kthread_fn, NULL,
++					DRVNAME);
++	if (IS_ERR(kthread)) {
++		printk(KERN_ERR BANNER "could not start sampling thread\n");
++		enabled = 0;
++		return -ENOMEM;
++	}
++
++	return 0;
++}
++
++/**
++ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
++ *
++ * This kicks the running hardware latency sampling/detector kernel thread and
++ * tells it to stop sampling now. Use this on unload and at system shutdown.
++ */
++static int stop_kthread(void)
++{
++	int ret;
++
++	ret = kthread_stop(kthread);
++
++	return ret;
++}
++
++/**
++ * __reset_stats - Reset statistics for the hardware latency detector
++ *
++ * We use data to store various statistics and global state. We call this
++ * function in order to reset those when "enable" is toggled on or off, and
++ * also at initialization. Should be called with data.lock held.
++ */
++static void __reset_stats(void)
++{
++	data.count = 0;
++	data.max_sample = 0;
++	ring_buffer_reset(ring_buffer); /* flush out old sample entries */
++}
++
++/**
++ * init_stats - Setup global state statistics for the hardware latency detector
++ *
++ * We use data to store various statistics and global state. We also use
++ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
++ * induced system latencies. This function initializes these structures and
++ * allocates the global ring buffer also.
++ */
++static int init_stats(void)
++{
++	int ret = -ENOMEM;
++
++	mutex_init(&data.lock);
++	init_waitqueue_head(&data.wq);
++	atomic_set(&data.sample_open, 0);
++
++	ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
++
++	if (WARN(!ring_buffer, KERN_ERR BANNER
++			       "failed to allocate ring buffer!\n"))
++		goto out;
++
++	__reset_stats();
++	data.threshold = DEFAULT_LAT_THRESHOLD;	    /* threshold us */
++	data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
++	data.sample_width = DEFAULT_SAMPLE_WIDTH;   /* width us */
++
++	ret = 0;
++
++out:
++	return ret;
++
++}
++
++/*
++ * simple_data_read - Wrapper read function for global state debugfs entries
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ * @entry: The entry to read from
++ *
++ * This function provides a generic read implementation for the global state
++ * "data" structure debugfs filesystem entries. It would be nice to use
++ * simple_attr_read directly, but we need to make sure that the data.lock
++ * spinlock is held during the actual read (even though we likely won't ever
++ * actually race here as the updater runs under a stop_machine context).
++ */
++static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
++				size_t cnt, loff_t *ppos, const u64 *entry)
++{
++	char buf[U64STR_SIZE];
++	u64 val = 0;
++	int len = 0;
++
++	memset(buf, 0, sizeof(buf));
++
++	if (!entry)
++		return -EFAULT;
++
++	mutex_lock(&data.lock);
++	val = *entry;
++	mutex_unlock(&data.lock);
++
++	len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
++
++	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
++
++}
++
++/*
++ * simple_data_write - Wrapper write function for global state debugfs entries
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to write value from
++ * @cnt: The maximum number of bytes to write
++ * @ppos: The current "file" position
++ * @entry: The entry to write to
++ *
++ * This function provides a generic write implementation for the global state
++ * "data" structure debugfs filesystem entries. It would be nice to use
++ * simple_attr_write directly, but we need to make sure that the data.lock
++ * spinlock is held during the actual write (even though we likely won't ever
++ * actually race here as the updater runs under a stop_machine context).
++ */
++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
++				 size_t cnt, loff_t *ppos, u64 *entry)
++{
++	char buf[U64STR_SIZE];
++	int csize = min(cnt, sizeof(buf));
++	u64 val = 0;
++	int err = 0;
++
++	memset(buf, '\0', sizeof(buf));
++	if (copy_from_user(buf, ubuf, csize))
++		return -EFAULT;
++
++	buf[U64STR_SIZE-1] = '\0';			/* just in case */
++	err = strict_strtoull(buf, 10, &val);
++	if (err)
++		return -EINVAL;
++
++	mutex_lock(&data.lock);
++	*entry = val;
++	mutex_unlock(&data.lock);
++
++	return csize;
++}
++
++/**
++ * debug_count_fopen - Open function for "count" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "count" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_count_fopen(struct inode *inode, struct file *filp)
++{
++	return 0;
++}
++
++/**
++ * debug_count_fread - Read function for "count" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "count" debugfs
++ * interface to the hardware latency detector. Can be used to read the
++ * number of latency readings exceeding the configured threshold since
++ * the detector was last reset (e.g. by writing a zero into "count").
++ */
++static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
++				     size_t cnt, loff_t *ppos)
++{
++	return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
++}
++
++/**
++ * debug_count_fwrite - Write function for "count" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "count" debugfs
++ * interface to the hardware latency detector. Can be used to write a
++ * desired value, especially to zero the total count.
++ */
++static ssize_t  debug_count_fwrite(struct file *filp,
++				       const char __user *ubuf,
++				       size_t cnt,
++				       loff_t *ppos)
++{
++	return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
++}
++
++/**
++ * debug_enable_fopen - Dummy open function for "enable" debugfs interface
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "enable" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_enable_fopen(struct inode *inode, struct file *filp)
++{
++	return 0;
++}
++
++/**
++ * debug_enable_fread - Read function for "enable" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "enable" debugfs
++ * interface to the hardware latency detector. Can be used to determine
++ * whether the detector is currently enabled ("0\n" or "1\n" returned).
++ */
++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
++				      size_t cnt, loff_t *ppos)
++{
++	char buf[4];
++
++	if ((cnt < sizeof(buf)) || (*ppos))
++		return 0;
++
++	buf[0] = enabled ? '1' : '0';
++	buf[1] = '\n';
++	buf[2] = '\0';
++	if (copy_to_user(ubuf, buf, strlen(buf)))
++		return -EFAULT;
++	return *ppos = strlen(buf);
++}
++
++/**
++ * debug_enable_fwrite - Write function for "enable" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "enable" debugfs
++ * interface to the hardware latency detector. Can be used to enable or
++ * disable the detector, which will have the side-effect of possibly
++ * also resetting the global stats and kicking off the measuring
++ * kthread (on an enable) or the converse (upon a disable).
++ */
++static ssize_t  debug_enable_fwrite(struct file *filp,
++					const char __user *ubuf,
++					size_t cnt,
++					loff_t *ppos)
++{
++	char buf[4];
++	int csize = min(cnt, sizeof(buf));
++	long val = 0;
++	int err = 0;
++
++	memset(buf, '\0', sizeof(buf));
++	if (copy_from_user(buf, ubuf, csize))
++		return -EFAULT;
++
++	buf[sizeof(buf)-1] = '\0';			/* just in case */
++	err = strict_strtoul(buf, 10, &val);
++	if (0 != err)
++		return -EINVAL;
++
++	if (val) {
++		if (enabled)
++			goto unlock;
++		enabled = 1;
++		__reset_stats();
++		if (start_kthread())
++			return -EFAULT;
++	} else {
++		if (!enabled)
++			goto unlock;
++		enabled = 0;
++		err = stop_kthread();
++		if (err) {
++			printk(KERN_ERR BANNER "cannot stop kthread\n");
++			return -EFAULT;
++		}
++		wake_up(&data.wq);		/* reader(s) should return */
++	}
++unlock:
++	return csize;
++}
++
++/**
++ * debug_max_fopen - Open function for "max" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "max" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_max_fopen(struct inode *inode, struct file *filp)
++{
++	return 0;
++}
++
++/**
++ * debug_max_fread - Read function for "max" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "max" debugfs
++ * interface to the hardware latency detector. Can be used to determine
++ * the maximum latency value observed since it was last reset.
++ */
++static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
++				   size_t cnt, loff_t *ppos)
++{
++	return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
++}
++
++/**
++ * debug_max_fwrite - Write function for "max" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "max" debugfs
++ * interface to the hardware latency detector. Can be used to reset the
++ * maximum or set it to some other desired value - if, then, subsequent
++ * measurements exceed this value, the maximum will be updated.
++ */
++static ssize_t  debug_max_fwrite(struct file *filp,
++				     const char __user *ubuf,
++				     size_t cnt,
++				     loff_t *ppos)
++{
++	return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
++}
++
++
++/**
++ * debug_sample_fopen - An open function for "sample" debugfs interface
++ * @inode: The in-kernel inode representation of this debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function handles opening the "sample" file within the hardware
++ * latency detector debugfs directory interface. This file is used to read
++ * raw samples from the global ring_buffer and allows the user to see a
++ * running latency history. Can be opened blocking or non-blocking,
++ * affecting whether it behaves as a buffer read pipe, or does not.
++ * Implements simple locking to prevent multiple simultaneous use.
++ */
++static int debug_sample_fopen(struct inode *inode, struct file *filp)
++{
++	if (!atomic_add_unless(&data.sample_open, 1, 1))
++		return -EBUSY;
++	else
++		return 0;
++}
++
++/**
++ * debug_sample_fread - A read function for "sample" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that will contain the samples read
++ * @cnt: The maximum bytes to read from the debugfs "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function handles reading from the "sample" file within the hardware
++ * latency detector debugfs directory interface. This file is used to read
++ * raw samples from the global ring_buffer and allows the user to see a
++ * running latency history. By default this will block pending a new
++ * value written into the sample buffer, unless there are already a
++ * number of value(s) waiting in the buffer, or the sample file was
++ * previously opened in a non-blocking mode of operation.
++ */
++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
++					size_t cnt, loff_t *ppos)
++{
++	int len = 0;
++	char buf[64];
++	struct sample *sample = NULL;
++
++	if (!enabled)
++		return 0;
++
++	sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
++	if (!sample)
++		return -ENOMEM;
++
++	while (!buffer_get_sample(sample)) {
++
++		DEFINE_WAIT(wait);
++
++		if (filp->f_flags & O_NONBLOCK) {
++			len = -EAGAIN;
++			goto out;
++		}
++
++		prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
++		schedule();
++		finish_wait(&data.wq, &wait);
++
++		if (signal_pending(current)) {
++			len = -EINTR;
++			goto out;
++		}
++
++		if (!enabled) {			/* enable was toggled */
++			len = 0;
++			goto out;
++		}
++	}
++
++	len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n",
++		      sample->timestamp.tv_sec,
++		      sample->timestamp.tv_nsec,
++		      sample->duration);
++
++
++	/* handling partial reads is more trouble than it's worth */
++	if (len > cnt)
++		goto out;
++
++	if (copy_to_user(ubuf, buf, len))
++		len = -EFAULT;
++
++out:
++	kfree(sample);
++	return len;
++}
++
++/**
++ * debug_sample_release - Release function for "sample" debugfs interface
++ * @inode: The in-kernel inode represenation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function completes the close of the debugfs interface "sample" file.
++ * Frees the sample_open "lock" so that other users may open the interface.
++ */
++static int debug_sample_release(struct inode *inode, struct file *filp)
++{
++	atomic_dec(&data.sample_open);
++
++	return 0;
++}
++
++/**
++ * debug_threshold_fopen - Open function for "threshold" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "threshold" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_threshold_fopen(struct inode *inode, struct file *filp)
++{
++	return 0;
++}
++
++/**
++ * debug_threshold_fread - Read function for "threshold" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "threshold" debugfs
++ * interface to the hardware latency detector. It can be used to determine
++ * the current threshold level at which a latency will be recorded in the
++ * global ring buffer, typically on the order of 10us.
++ */
++static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
++					 size_t cnt, loff_t *ppos)
++{
++	return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
++}
++
++/**
++ * debug_threshold_fwrite - Write function for "threshold" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "threshold" debugfs
++ * interface to the hardware latency detector. It can be used to configure
++ * the threshold level at which any subsequently detected latencies will
++ * be recorded into the global ring buffer.
++ */
++static ssize_t  debug_threshold_fwrite(struct file *filp,
++					const char __user *ubuf,
++					size_t cnt,
++					loff_t *ppos)
++{
++	int ret;
++
++	ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
++
++	if (enabled)
++		wake_up_process(kthread);
++
++	return ret;
++}
++
++/**
++ * debug_width_fopen - Open function for "width" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "width" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_width_fopen(struct inode *inode, struct file *filp)
++{
++	return 0;
++}
++
++/**
++ * debug_width_fread - Read function for "width" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "width" debugfs
++ * interface to the hardware latency detector. It can be used to determine
++ * for how many us of the total window us we will actively sample for any
++ * hardware-induced latecy periods. Obviously, it is not possible to
++ * sample constantly and have the system respond to a sample reader, or,
++ * worse, without having the system appear to have gone out to lunch.
++ */
++static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
++				     size_t cnt, loff_t *ppos)
++{
++	return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
++}
++
++/**
++ * debug_width_fwrite - Write function for "width" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "width" debugfs
++ * interface to the hardware latency detector. It can be used to configure
++ * for how many us of the total window us we will actively sample for any
++ * hardware-induced latency periods. Obviously, it is not possible to
++ * sample constantly and have the system respond to a sample reader, or,
++ * worse, without having the system appear to have gone out to lunch. It
++ * is enforced that width is less that the total window size.
++ */
++static ssize_t  debug_width_fwrite(struct file *filp,
++				       const char __user *ubuf,
++				       size_t cnt,
++				       loff_t *ppos)
++{
++	char buf[U64STR_SIZE];
++	int csize = min(cnt, sizeof(buf));
++	u64 val = 0;
++	int err = 0;
++
++	memset(buf, '\0', sizeof(buf));
++	if (copy_from_user(buf, ubuf, csize))
++		return -EFAULT;
++
++	buf[U64STR_SIZE-1] = '\0';			/* just in case */
++	err = strict_strtoull(buf, 10, &val);
++	if (0 != err)
++		return -EINVAL;
++
++	mutex_lock(&data.lock);
++	if (val < data.sample_window)
++		data.sample_width = val;
++	else {
++		mutex_unlock(&data.lock);
++		return -EINVAL;
++	}
++	mutex_unlock(&data.lock);
++
++	if (enabled)
++		wake_up_process(kthread);
++
++	return csize;
++}
++
++/**
++ * debug_window_fopen - Open function for "window" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "window" debugfs
++ * interface to the hardware latency detector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs.
++ */
++static int debug_window_fopen(struct inode *inode, struct file *filp)
++{
++	return 0;
++}
++
++/**
++ * debug_window_fread - Read function for "window" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "window" debugfs
++ * interface to the hardware latency detector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs. Can be used to read the total window size.
++ */
++static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
++				      size_t cnt, loff_t *ppos)
++{
++	return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
++}
++
++/**
++ * debug_window_fwrite - Write function for "window" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "window" debufds
++ * interface to the hardware latency detetector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs. Can be used to write a new total window size. It
++ * is enfoced that any value written must be greater than the sample width
++ * size, or an error results.
++ */
++static ssize_t  debug_window_fwrite(struct file *filp,
++					const char __user *ubuf,
++					size_t cnt,
++					loff_t *ppos)
++{
++	char buf[U64STR_SIZE];
++	int csize = min(cnt, sizeof(buf));
++	u64 val = 0;
++	int err = 0;
++
++	memset(buf, '\0', sizeof(buf));
++	if (copy_from_user(buf, ubuf, csize))
++		return -EFAULT;
++
++	buf[U64STR_SIZE-1] = '\0';			/* just in case */
++	err = strict_strtoull(buf, 10, &val);
++	if (0 != err)
++		return -EINVAL;
++
++	mutex_lock(&data.lock);
++	if (data.sample_width < val)
++		data.sample_window = val;
++	else {
++		mutex_unlock(&data.lock);
++		return -EINVAL;
++	}
++	mutex_unlock(&data.lock);
++
++	return csize;
++}
++
++/*
++ * Function pointers for the "count" debugfs file operations
++ */
++static const struct file_operations count_fops = {
++	.open		= debug_count_fopen,
++	.read		= debug_count_fread,
++	.write		= debug_count_fwrite,
++	.owner		= THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "enable" debugfs file operations
++ */
++static const struct file_operations enable_fops = {
++	.open		= debug_enable_fopen,
++	.read		= debug_enable_fread,
++	.write		= debug_enable_fwrite,
++	.owner		= THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "max" debugfs file operations
++ */
++static const struct file_operations max_fops = {
++	.open		= debug_max_fopen,
++	.read		= debug_max_fread,
++	.write		= debug_max_fwrite,
++	.owner		= THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "sample" debugfs file operations
++ */
++static const struct file_operations sample_fops = {
++	.open 		= debug_sample_fopen,
++	.read		= debug_sample_fread,
++	.release	= debug_sample_release,
++	.owner		= THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "threshold" debugfs file operations
++ */
++static const struct file_operations threshold_fops = {
++	.open		= debug_threshold_fopen,
++	.read		= debug_threshold_fread,
++	.write		= debug_threshold_fwrite,
++	.owner		= THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "width" debugfs file operations
++ */
++static const struct file_operations width_fops = {
++	.open		= debug_width_fopen,
++	.read		= debug_width_fread,
++	.write		= debug_width_fwrite,
++	.owner		= THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "window" debugfs file operations
++ */
++static const struct file_operations window_fops = {
++	.open		= debug_window_fopen,
++	.read		= debug_window_fread,
++	.write		= debug_window_fwrite,
++	.owner		= THIS_MODULE,
++};
++
++/**
++ * init_debugfs - A function to initialize the debugfs interface files
++ *
++ * This function creates entries in debugfs for "hwlat_detector", including
++ * files to read values from the detector, current samples, and the
++ * maximum sample that has been captured since the hardware latency
++ * dectector was started.
++ */
++static int init_debugfs(void)
++{
++	int ret = -ENOMEM;
++
++	debug_dir = debugfs_create_dir(DRVNAME, NULL);
++	if (!debug_dir)
++		goto err_debug_dir;
++
++	debug_sample = debugfs_create_file("sample", 0444,
++					       debug_dir, NULL,
++					       &sample_fops);
++	if (!debug_sample)
++		goto err_sample;
++
++	debug_count = debugfs_create_file("count", 0444,
++					      debug_dir, NULL,
++					      &count_fops);
++	if (!debug_count)
++		goto err_count;
++
++	debug_max = debugfs_create_file("max", 0444,
++					    debug_dir, NULL,
++					    &max_fops);
++	if (!debug_max)
++		goto err_max;
++
++	debug_sample_window = debugfs_create_file("window", 0644,
++						      debug_dir, NULL,
++						      &window_fops);
++	if (!debug_sample_window)
++		goto err_window;
++
++	debug_sample_width = debugfs_create_file("width", 0644,
++						     debug_dir, NULL,
++						     &width_fops);
++	if (!debug_sample_width)
++		goto err_width;
++
++	debug_threshold = debugfs_create_file("threshold", 0644,
++						  debug_dir, NULL,
++						  &threshold_fops);
++	if (!debug_threshold)
++		goto err_threshold;
++
++	debug_enable = debugfs_create_file("enable", 0644,
++					       debug_dir, &enabled,
++					       &enable_fops);
++	if (!debug_enable)
++		goto err_enable;
++
++	else {
++		ret = 0;
++		goto out;
++	}
++
++err_enable:
++	debugfs_remove(debug_threshold);
++err_threshold:
++	debugfs_remove(debug_sample_width);
++err_width:
++	debugfs_remove(debug_sample_window);
++err_window:
++	debugfs_remove(debug_max);
++err_max:
++	debugfs_remove(debug_count);
++err_count:
++	debugfs_remove(debug_sample);
++err_sample:
++	debugfs_remove(debug_dir);
++err_debug_dir:
++out:
++	return ret;
++}
++
++/**
++ * free_debugfs - A function to cleanup the debugfs file interface
++ */
++static void free_debugfs(void)
++{
++	/* could also use a debugfs_remove_recursive */
++	debugfs_remove(debug_enable);
++	debugfs_remove(debug_threshold);
++	debugfs_remove(debug_sample_width);
++	debugfs_remove(debug_sample_window);
++	debugfs_remove(debug_max);
++	debugfs_remove(debug_count);
++	debugfs_remove(debug_sample);
++	debugfs_remove(debug_dir);
++}
++
++/**
++ * detector_init - Standard module initialization code
++ */
++static int detector_init(void)
++{
++	int ret = -ENOMEM;
++
++	printk(KERN_INFO BANNER "version %s\n", VERSION);
++
++	ret = init_stats();
++	if (0 != ret)
++		goto out;
++
++	ret = init_debugfs();
++	if (0 != ret)
++		goto err_stats;
++
++	if (enabled)
++		ret = start_kthread();
++
++	goto out;
++
++err_stats:
++	ring_buffer_free(ring_buffer);
++out:
++	return ret;
++
++}
++
++/**
++ * detector_exit - Standard module cleanup code
++ */
++static void detector_exit(void)
++{
++	int err;
++
++	if (enabled) {
++		enabled = 0;
++		err = stop_kthread();
++		if (err)
++			printk(KERN_ERR BANNER "cannot stop kthread\n");
++	}
++
++	free_debugfs();
++	ring_buffer_free(ring_buffer);	/* free up the ring buffer */
++
++}
++
++module_init(detector_init);
++module_exit(detector_exit);
+Index: linux-3.2/localversion-rt
+===================================================================
+Index: linux-3.2/arch/arm/kernel/early_printk.c
+===================================================================
+--- linux-3.2.orig/arch/arm/kernel/early_printk.c
++++ linux-3.2/arch/arm/kernel/early_printk.c
+@@ -29,28 +29,17 @@ static void early_console_write(struct c
+ 	early_write(s, n);
+ }
+ 
+-static struct console early_console = {
++static struct console early_console_dev = {
+ 	.name =		"earlycon",
+ 	.write =	early_console_write,
+ 	.flags =	CON_PRINTBUFFER | CON_BOOT,
+ 	.index =	-1,
+ };
+ 
+-asmlinkage void early_printk(const char *fmt, ...)
+-{
+-	char buf[512];
+-	int n;
+-	va_list ap;
+-
+-	va_start(ap, fmt);
+-	n = vscnprintf(buf, sizeof(buf), fmt, ap);
+-	early_write(buf, n);
+-	va_end(ap);
+-}
+-
+ static int __init setup_early_printk(char *buf)
+ {
+-	register_console(&early_console);
++	early_console = &early_console_dev;
++	register_console(&early_console_dev);
+ 	return 0;
+ }
+ 
+Index: linux-3.2/arch/blackfin/kernel/early_printk.c
+===================================================================
+--- linux-3.2.orig/arch/blackfin/kernel/early_printk.c
++++ linux-3.2/arch/blackfin/kernel/early_printk.c
+@@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_
+ extern struct console *bfin_jc_early_init(void);
+ #endif
+ 
+-static struct console *early_console;
+-
+ /* Default console */
+ #define DEFAULT_PORT 0
+ #define DEFAULT_CFLAG CS8|B57600
+Index: linux-3.2/arch/microblaze/kernel/early_printk.c
+===================================================================
+--- linux-3.2.orig/arch/microblaze/kernel/early_printk.c
++++ linux-3.2/arch/microblaze/kernel/early_printk.c
+@@ -21,7 +21,6 @@
+ #include <asm/setup.h>
+ #include <asm/prom.h>
+ 
+-static u32 early_console_initialized;
+ static u32 base_addr;
+ 
+ #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+@@ -109,27 +108,11 @@ static struct console early_serial_uart1
+ };
+ #endif /* CONFIG_SERIAL_8250_CONSOLE */
+ 
+-static struct console *early_console;
+-
+-void early_printk(const char *fmt, ...)
+-{
+-	char buf[512];
+-	int n;
+-	va_list ap;
+-
+-	if (early_console_initialized) {
+-		va_start(ap, fmt);
+-		n = vscnprintf(buf, 512, fmt, ap);
+-		early_console->write(early_console, buf, n);
+-		va_end(ap);
+-	}
+-}
+-
+ int __init setup_early_printk(char *opt)
+ {
+ 	int version = 0;
+ 
+-	if (early_console_initialized)
++	if (early_console)
+ 		return 1;
+ 
+ 	base_addr = of_early_console(&version);
+@@ -159,7 +142,6 @@ int __init setup_early_printk(char *opt)
+ 		}
+ 
+ 		register_console(early_console);
+-		early_console_initialized = 1;
+ 		return 0;
+ 	}
+ 	return 1;
+@@ -169,7 +151,7 @@ int __init setup_early_printk(char *opt)
+  * only for early console because of performance degression */
+ void __init remap_early_printk(void)
+ {
+-	if (!early_console_initialized || !early_console)
++	if (!early_console)
+ 		return;
+ 	printk(KERN_INFO "early_printk_console remaping from 0x%x to ",
+ 								base_addr);
+@@ -179,9 +161,9 @@ void __init remap_early_printk(void)
+ 
+ void __init disable_early_printk(void)
+ {
+-	if (!early_console_initialized || !early_console)
++	if (!early_console)
+ 		return;
+ 	printk(KERN_WARNING "disabling early console\n");
+ 	unregister_console(early_console);
+-	early_console_initialized = 0;
++	early_console = NULL;
+ }
+Index: linux-3.2/arch/mips/kernel/early_printk.c
+===================================================================
+--- linux-3.2.orig/arch/mips/kernel/early_printk.c
++++ linux-3.2/arch/mips/kernel/early_printk.c
+@@ -25,20 +25,18 @@ early_console_write(struct console *con,
+ 	}
+ }
+ 
+-static struct console early_console __initdata = {
++static struct console early_console_prom = {
+ 	.name	= "early",
+ 	.write	= early_console_write,
+ 	.flags	= CON_PRINTBUFFER | CON_BOOT,
+ 	.index	= -1
+ };
+ 
+-static int early_console_initialized __initdata;
+-
+ void __init setup_early_printk(void)
+ {
+-	if (early_console_initialized)
++	if (early_console)
+ 		return;
+-	early_console_initialized = 1;
++	early_console = &early_console_prom;
+ 
+-	register_console(&early_console);
++	register_console(&early_console_prom);
+ }
+Index: linux-3.2/arch/powerpc/kernel/udbg.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/kernel/udbg.c
++++ linux-3.2/arch/powerpc/kernel/udbg.c
+@@ -182,15 +182,13 @@ static struct console udbg_console = {
+ 	.index	= 0,
+ };
+ 
+-static int early_console_initialized;
+-
+ /*
+  * Called by setup_system after ppc_md->probe and ppc_md->early_init.
+  * Call it again after setting udbg_putc in ppc_md->setup_arch.
+  */
+ void __init register_early_udbg_console(void)
+ {
+-	if (early_console_initialized)
++	if (early_console)
+ 		return;
+ 
+ 	if (!udbg_putc)
+@@ -200,7 +198,7 @@ void __init register_early_udbg_console(
+ 		printk(KERN_INFO "early console immortal !\n");
+ 		udbg_console.flags &= ~CON_BOOT;
+ 	}
+-	early_console_initialized = 1;
++	early_console = &udbg_console;
+ 	register_console(&udbg_console);
+ }
+ 
+Index: linux-3.2/arch/sh/kernel/sh_bios.c
+===================================================================
+--- linux-3.2.orig/arch/sh/kernel/sh_bios.c
++++ linux-3.2/arch/sh/kernel/sh_bios.c
+@@ -144,8 +144,6 @@ static struct console bios_console = {
+ 	.index		= -1,
+ };
+ 
+-static struct console *early_console;
+-
+ static int __init setup_early_printk(char *buf)
+ {
+ 	int keep_early = 0;
+Index: linux-3.2/arch/sparc/kernel/setup_32.c
+===================================================================
+--- linux-3.2.orig/arch/sparc/kernel/setup_32.c
++++ linux-3.2/arch/sparc/kernel/setup_32.c
+@@ -221,6 +221,7 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	boot_flags_init(*cmdline_p);
+ 
++	early_console = &prom_early_console;
+ 	register_console(&prom_early_console);
+ 
+ 	/* Set sparc_cpu_model */
+Index: linux-3.2/arch/sparc/kernel/setup_64.c
+===================================================================
+--- linux-3.2.orig/arch/sparc/kernel/setup_64.c
++++ linux-3.2/arch/sparc/kernel/setup_64.c
+@@ -477,6 +477,12 @@ static void __init init_sparc64_elf_hwca
+ 		popc_patch();
+ }
+ 
++static inline void register_prom_console(void)
++{
++	early_console = &prom_early_console;
++	register_console(&prom_early_console);
++}
++
+ void __init setup_arch(char **cmdline_p)
+ {
+ 	/* Initialize PROM console and command line. */
+@@ -488,7 +494,7 @@ void __init setup_arch(char **cmdline_p)
+ #ifdef CONFIG_EARLYFB
+ 	if (btext_find_display())
+ #endif
+-		register_console(&prom_early_console);
++		register_prom_console();
+ 
+ 	if (tlb_type == hypervisor)
+ 		printk("ARCH: SUN4V\n");
+Index: linux-3.2/arch/tile/kernel/early_printk.c
+===================================================================
+--- linux-3.2.orig/arch/tile/kernel/early_printk.c
++++ linux-3.2/arch/tile/kernel/early_printk.c
+@@ -32,25 +32,8 @@ static struct console early_hv_console =
+ };
+ 
+ /* Direct interface for emergencies */
+-static struct console *early_console = &early_hv_console;
+-static int early_console_initialized;
+ static int early_console_complete;
+ 
+-static void early_vprintk(const char *fmt, va_list ap)
+-{
+-	char buf[512];
+-	int n = vscnprintf(buf, sizeof(buf), fmt, ap);
+-	early_console->write(early_console, buf, n);
+-}
+-
+-void early_printk(const char *fmt, ...)
+-{
+-	va_list ap;
+-	va_start(ap, fmt);
+-	early_vprintk(fmt, ap);
+-	va_end(ap);
+-}
+-
+ void early_panic(const char *fmt, ...)
+ {
+ 	va_list ap;
+@@ -68,14 +51,13 @@ static int __initdata keep_early;
+ 
+ static int __init setup_early_printk(char *str)
+ {
+-	if (early_console_initialized)
++	if (early_console)
+ 		return 1;
+ 
+ 	if (str != NULL && strncmp(str, "keep", 4) == 0)
+ 		keep_early = 1;
+ 
+ 	early_console = &early_hv_console;
+-	early_console_initialized = 1;
+ 	register_console(early_console);
+ 
+ 	return 0;
+@@ -84,12 +66,12 @@ static int __init setup_early_printk(cha
+ void __init disable_early_printk(void)
+ {
+ 	early_console_complete = 1;
+-	if (!early_console_initialized || !early_console)
++	if (!early_console)
+ 		return;
+ 	if (!keep_early) {
+ 		early_printk("disabling early console\n");
+ 		unregister_console(early_console);
+-		early_console_initialized = 0;
++		early_console = NULL;
+ 	} else {
+ 		early_printk("keeping early console\n");
+ 	}
+@@ -97,7 +79,7 @@ void __init disable_early_printk(void)
+ 
+ void warn_early_printk(void)
+ {
+-	if (early_console_complete || early_console_initialized)
++	if (early_console_complete || early_console)
+ 		return;
+ 	early_printk("\
+ Machine shutting down before console output is fully initialized.\n\
+Index: linux-3.2/arch/um/kernel/early_printk.c
+===================================================================
+--- linux-3.2.orig/arch/um/kernel/early_printk.c
++++ linux-3.2/arch/um/kernel/early_printk.c
+@@ -16,7 +16,7 @@ static void early_console_write(struct c
+ 	um_early_printk(s, n);
+ }
+ 
+-static struct console early_console = {
++static struct console early_console_dev = {
+ 	.name = "earlycon",
+ 	.write = early_console_write,
+ 	.flags = CON_BOOT,
+@@ -25,8 +25,10 @@ static struct console early_console = {
+ 
+ static int __init setup_early_printk(char *buf)
+ {
+-	register_console(&early_console);
+-
++	if (!early_console) {
++		early_console = &early_console_dev;
++		register_console(&early_console_dev);
++	}
+ 	return 0;
+ }
+ 
+Index: linux-3.2/arch/unicore32/kernel/early_printk.c
+===================================================================
+--- linux-3.2.orig/arch/unicore32/kernel/early_printk.c
++++ linux-3.2/arch/unicore32/kernel/early_printk.c
+@@ -33,21 +33,17 @@ static struct console early_ocd_console 
+ 	.index =	-1,
+ };
+ 
+-/* Direct interface for emergencies */
+-static struct console *early_console = &early_ocd_console;
+-
+-static int __initdata keep_early;
+-
+ static int __init setup_early_printk(char *buf)
+ {
+-	if (!buf)
++	int keep_early;
++
++	if (!buf || early_console)
+ 		return 0;
+ 
+ 	if (strstr(buf, "keep"))
+ 		keep_early = 1;
+ 
+-	if (!strncmp(buf, "ocd", 3))
+-		early_console = &early_ocd_console;
++	early_console = &early_ocd_console;
+ 
+ 	if (keep_early)
+ 		early_console->flags &= ~CON_BOOT;
+Index: linux-3.2/arch/x86/kernel/early_printk.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/early_printk.c
++++ linux-3.2/arch/x86/kernel/early_printk.c
+@@ -169,25 +169,9 @@ static struct console early_serial_conso
+ 	.index =	-1,
+ };
+ 
+-/* Direct interface for emergencies */
+-static struct console *early_console = &early_vga_console;
+-static int __initdata early_console_initialized;
+-
+-asmlinkage void early_printk(const char *fmt, ...)
+-{
+-	char buf[512];
+-	int n;
+-	va_list ap;
+-
+-	va_start(ap, fmt);
+-	n = vscnprintf(buf, sizeof(buf), fmt, ap);
+-	early_console->write(early_console, buf, n);
+-	va_end(ap);
+-}
+-
+ static inline void early_console_register(struct console *con, int keep_early)
+ {
+-	if (early_console->index != -1) {
++	if (con->index != -1) {
+ 		printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
+ 		       con->name);
+ 		return;
+@@ -207,9 +191,8 @@ static int __init setup_early_printk(cha
+ 	if (!buf)
+ 		return 0;
+ 
+-	if (early_console_initialized)
++	if (early_console)
+ 		return 0;
+-	early_console_initialized = 1;
+ 
+ 	keep = (strstr(buf, "keep") != NULL);
+ 
+Index: linux-3.2/include/linux/console.h
+===================================================================
+--- linux-3.2.orig/include/linux/console.h
++++ linux-3.2/include/linux/console.h
+@@ -133,6 +133,7 @@ struct console {
+ 	for (con = console_drivers; con != NULL; con = con->next)
+ 
+ extern int console_set_on_cmdline;
++extern struct console *early_console;
+ 
+ extern int add_preferred_console(char *name, int idx, char *options);
+ extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
+Index: linux-3.2/include/linux/printk.h
+===================================================================
+--- linux-3.2.orig/include/linux/printk.h
++++ linux-3.2/include/linux/printk.h
+@@ -88,8 +88,15 @@ int no_printk(const char *fmt, ...)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_EARLY_PRINTK
+ extern asmlinkage __printf(1, 2)
+ void early_printk(const char *fmt, ...);
++extern void printk_kill(void);
++#else
++static inline __printf(1, 2) __cold
++void early_printk(const char *s, ...) { }
++static inline void printk_kill(void) { }
++#endif
+ 
+ extern int printk_needs_cpu(int cpu);
+ extern void printk_tick(void);
+@@ -109,7 +116,6 @@ extern int __printk_ratelimit(const char
+ #define printk_ratelimit() __printk_ratelimit(__func__)
+ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+ 				   unsigned int interval_msec);
+-
+ extern int printk_delay_msec;
+ extern int dmesg_restrict;
+ extern int kptr_restrict;
+Index: linux-3.2/kernel/printk.c
+===================================================================
+--- linux-3.2.orig/kernel/printk.c
++++ linux-3.2/kernel/printk.c
+@@ -21,6 +21,7 @@
+ #include <linux/tty.h>
+ #include <linux/tty_driver.h>
+ #include <linux/console.h>
++#include <linux/sysrq.h>
+ #include <linux/init.h>
+ #include <linux/jiffies.h>
+ #include <linux/nmi.h>
+@@ -44,13 +45,6 @@
+ 
+ #include <asm/uaccess.h>
+ 
+-/*
+- * Architectures can override it:
+- */
+-void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
+-{
+-}
+-
+ #define __LOG_BUF_LEN	(1 << CONFIG_LOG_BUF_SHIFT)
+ 
+ /* printk's without a loglevel use this.. */
+@@ -511,6 +505,7 @@ static void __call_console_drivers(unsig
+ {
+ 	struct console *con;
+ 
++	migrate_disable();
+ 	for_each_console(con) {
+ 		if (exclusive_console && con != exclusive_console)
+ 			continue;
+@@ -519,7 +514,61 @@ static void __call_console_drivers(unsig
+ 				(con->flags & CON_ANYTIME)))
+ 			con->write(con, &LOG_BUF(start), end - start);
+ 	}
++	migrate_enable();
++}
++
++#ifdef CONFIG_EARLY_PRINTK
++struct console *early_console;
++
++static void early_vprintk(const char *fmt, va_list ap)
++{
++	char buf[512];
++	int n = vscnprintf(buf, sizeof(buf), fmt, ap);
++	if (early_console)
++		early_console->write(early_console, buf, n);
++}
++
++asmlinkage void early_printk(const char *fmt, ...)
++{
++	va_list ap;
++	va_start(ap, fmt);
++	early_vprintk(fmt, ap);
++	va_end(ap);
++}
++
++/*
++ * This is independent of any log levels - a global
++ * kill switch that turns off all of printk.
++ *
++ * Used by the NMI watchdog if early-printk is enabled.
++ */
++static int __read_mostly printk_killswitch;
++
++static int __init force_early_printk_setup(char *str)
++{
++	printk_killswitch = 1;
++	return 0;
++}
++early_param("force_early_printk", force_early_printk_setup);
++
++void printk_kill(void)
++{
++	printk_killswitch = 1;
++}
++
++static int forced_early_printk(const char *fmt, va_list ap)
++{
++	if (!printk_killswitch)
++		return 0;
++	early_vprintk(fmt, ap);
++	return 1;
++}
++#else
++static inline int forced_early_printk(const char *fmt, va_list ap)
++{
++	return 0;
+ }
++#endif
+ 
+ static int __read_mostly ignore_loglevel;
+ 
+@@ -781,12 +830,18 @@ static inline int can_use_console(unsign
+  * interrupts disabled. It should return with 'lockbuf_lock'
+  * released but interrupts still disabled.
+  */
+-static int console_trylock_for_printk(unsigned int cpu)
++static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
+ 	__releases(&logbuf_lock)
+ {
+ 	int retval = 0, wake = 0;
++#ifdef CONFIG_PREEMPT_RT_FULL
++	int lock = (!early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
++		!preempt_count()) || sysrq_in_progress;
++#else
++	int lock = 1;
++#endif
+ 
+-	if (console_trylock()) {
++	if (lock && console_trylock()) {
+ 		retval = 1;
+ 
+ 		/*
+@@ -837,6 +892,13 @@ asmlinkage int vprintk(const char *fmt, 
+ 	size_t plen;
+ 	char special;
+ 
++	/*
++	 * Fall back to early_printk if a debugging subsystem has
++	 * killed printk output
++	 */
++	if (unlikely(forced_early_printk(fmt, args)))
++		return 1;
++
+ 	boot_delay_msec();
+ 	printk_delay();
+ 
+@@ -957,8 +1019,15 @@ asmlinkage int vprintk(const char *fmt, 
+ 	 * will release 'logbuf_lock' regardless of whether it
+ 	 * actually gets the semaphore or not.
+ 	 */
+-	if (console_trylock_for_printk(this_cpu))
++	if (console_trylock_for_printk(this_cpu, flags)) {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 		console_unlock();
++#else
++		raw_local_irq_restore(flags);
++		console_unlock();
++		raw_local_irq_save(flags);
++#endif
++	}
+ 
+ 	lockdep_on();
+ out_restore_irqs:
+@@ -1221,8 +1290,8 @@ void printk_tick(void)
+ 
+ int printk_needs_cpu(int cpu)
+ {
+-	if (cpu_is_offline(cpu))
+-		printk_tick();
++	if (unlikely(cpu_is_offline(cpu)))
++		__this_cpu_write(printk_pending, 0);
+ 	return __this_cpu_read(printk_pending);
+ }
+ 
+@@ -1268,11 +1337,16 @@ again:
+ 		_con_start = con_start;
+ 		_log_end = log_end;
+ 		con_start = log_end;		/* Flush */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 		raw_spin_unlock(&logbuf_lock);
+ 		stop_critical_timings();	/* don't trace print latency */
+ 		call_console_drivers(_con_start, _log_end);
+ 		start_critical_timings();
+ 		local_irq_restore(flags);
++#else
++		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++		call_console_drivers(_con_start, _log_end);
++#endif
+ 	}
+ 	console_locked = 0;
+ 
+Index: linux-3.2/kernel/watchdog.c
+===================================================================
+--- linux-3.2.orig/kernel/watchdog.c
++++ linux-3.2/kernel/watchdog.c
+@@ -201,6 +201,8 @@ static int is_softlockup(unsigned long t
+ 
+ #ifdef CONFIG_HARDLOCKUP_DETECTOR
+ 
++static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
++
+ static struct perf_event_attr wd_hw_attr = {
+ 	.type		= PERF_TYPE_HARDWARE,
+ 	.config		= PERF_COUNT_HW_CPU_CYCLES,
+@@ -235,10 +237,19 @@ static void watchdog_overflow_callback(s
+ 		if (__this_cpu_read(hard_watchdog_warn) == true)
+ 			return;
+ 
+-		if (hardlockup_panic)
++		/*
++		 * If early-printk is enabled then make sure we do not
++		 * lock up in printk() and kill console logging:
++		 */
++		printk_kill();
++
++		if (hardlockup_panic) {
+ 			panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+-		else
++		} else {
++			raw_spin_lock(&watchdog_output_lock);
+ 			WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
++			raw_spin_unlock(&watchdog_output_lock);
++		}
+ 
+ 		__this_cpu_write(hard_watchdog_warn, true);
+ 		return;
+@@ -425,6 +436,7 @@ static void watchdog_prepare_cpu(int cpu
+ 	WARN_ON(per_cpu(softlockup_watchdog, cpu));
+ 	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ 	hrtimer->function = watchdog_timer_fn;
++	hrtimer->irqsafe = 1;
+ }
+ 
+ static int watchdog_enable(int cpu)
+Index: linux-3.2/kernel/Kconfig.preempt
+===================================================================
+--- linux-3.2.orig/kernel/Kconfig.preempt
++++ linux-3.2/kernel/Kconfig.preempt
+@@ -1,3 +1,10 @@
++config PREEMPT
++	bool
++	select PREEMPT_COUNT
++
++config PREEMPT_RT_BASE
++	bool
++	select PREEMPT
+ 
+ choice
+ 	prompt "Preemption Model"
+@@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY
+ 
+ 	  Select this if you are building a kernel for a desktop system.
+ 
+-config PREEMPT
++config PREEMPT__LL
+ 	bool "Preemptible Kernel (Low-Latency Desktop)"
+-	select PREEMPT_COUNT
++	select PREEMPT
+ 	help
+ 	  This option reduces the latency of the kernel by making
+ 	  all kernel code (that is not executing in a critical section)
+@@ -51,6 +58,21 @@ config PREEMPT
+ 	  embedded system with latency requirements in the milliseconds
+ 	  range.
+ 
++config PREEMPT_RTB
++	bool "Preemptible Kernel (Basic RT)"
++	select PREEMPT_RT_BASE
++	help
++	  This option is basically the same as (Low-Latency Desktop) but
++	  enables changes which are preliminary for the full preemptiple
++	  RT kernel.
++
++config PREEMPT_RT_FULL
++	bool "Fully Preemptible Kernel (RT)"
++	depends on IRQ_FORCED_THREADING
++	select PREEMPT_RT_BASE
++	help
++	  All and everything
++
+ endchoice
+ 
+ config PREEMPT_COUNT
+Index: linux-3.2/include/asm-generic/bug.h
+===================================================================
+--- linux-3.2.orig/include/asm-generic/bug.h
++++ linux-3.2/include/asm-generic/bug.h
+@@ -3,6 +3,10 @@
+ 
+ #include <linux/compiler.h>
+ 
++#ifndef __ASSEMBLY__
++extern void __WARN_ON(const char *func, const char *file, const int line);
++#endif /* __ASSEMBLY__ */
++
+ #ifdef CONFIG_BUG
+ 
+ #ifdef CONFIG_GENERIC_BUG
+@@ -202,4 +206,18 @@ extern void warn_slowpath_null(const cha
+ # define WARN_ON_SMP(x)			({0;})
+ #endif
+ 
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define BUG_ON_RT(c)			BUG_ON(c)
++# define BUG_ON_NONRT(c)		do { } while (0)
++# define WARN_ON_RT(condition)		WARN_ON(condition)
++# define WARN_ON_NONRT(condition)	do { } while (0)
++# define WARN_ON_ONCE_NONRT(condition)	do { } while (0)
++#else
++# define BUG_ON_RT(c)			do { } while (0)
++# define BUG_ON_NONRT(c)		BUG_ON(c)
++# define WARN_ON_RT(condition)		do { } while (0)
++# define WARN_ON_NONRT(condition)	WARN_ON(condition)
++# define WARN_ON_ONCE_NONRT(condition)	WARN_ON_ONCE(condition)
++#endif
++
+ #endif
+Index: linux-3.2/include/linux/irqflags.h
+===================================================================
+--- linux-3.2.orig/include/linux/irqflags.h
++++ linux-3.2/include/linux/irqflags.h
+@@ -25,8 +25,6 @@
+ # define trace_softirqs_enabled(p)	((p)->softirqs_enabled)
+ # define trace_hardirq_enter()	do { current->hardirq_context++; } while (0)
+ # define trace_hardirq_exit()	do { current->hardirq_context--; } while (0)
+-# define lockdep_softirq_enter()	do { current->softirq_context++; } while (0)
+-# define lockdep_softirq_exit()	do { current->softirq_context--; } while (0)
+ # define INIT_TRACE_IRQFLAGS	.softirqs_enabled = 1,
+ #else
+ # define trace_hardirqs_on()		do { } while (0)
+@@ -39,9 +37,15 @@
+ # define trace_softirqs_enabled(p)	0
+ # define trace_hardirq_enter()		do { } while (0)
+ # define trace_hardirq_exit()		do { } while (0)
++# define INIT_TRACE_IRQFLAGS
++#endif
++
++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
++# define lockdep_softirq_exit()	 do { current->softirq_context--; } while (0)
++#else
+ # define lockdep_softirq_enter()	do { } while (0)
+ # define lockdep_softirq_exit()		do { } while (0)
+-# define INIT_TRACE_IRQFLAGS
+ #endif
+ 
+ #if defined(CONFIG_IRQSOFF_TRACER) || \
+@@ -147,4 +151,23 @@
+ 
+ #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
+ 
++/*
++ * local_irq* variants depending on RT/!RT
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define local_irq_disable_nort()	do { } while (0)
++# define local_irq_enable_nort()	do { } while (0)
++# define local_irq_save_nort(flags)	do { local_save_flags(flags); } while (0)
++# define local_irq_restore_nort(flags)	do { (void)(flags); } while (0)
++# define local_irq_disable_rt()		local_irq_disable()
++# define local_irq_enable_rt()		local_irq_enable()
++#else
++# define local_irq_disable_nort()	local_irq_disable()
++# define local_irq_enable_nort()	local_irq_enable()
++# define local_irq_save_nort(flags)	local_irq_save(flags)
++# define local_irq_restore_nort(flags)	local_irq_restore(flags)
++# define local_irq_disable_rt()		do { } while (0)
++# define local_irq_enable_rt()		do { } while (0)
++#endif
++
+ #endif
+Index: linux-3.2/drivers/ata/libata-sff.c
+===================================================================
+--- linux-3.2.orig/drivers/ata/libata-sff.c
++++ linux-3.2/drivers/ata/libata-sff.c
+@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str
+ 	unsigned long flags;
+ 	unsigned int consumed;
+ 
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 	consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ 
+ 	return consumed;
+ }
+@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu
+ 		unsigned long flags;
+ 
+ 		/* FIXME: use a bounce buffer */
+-		local_irq_save(flags);
++		local_irq_save_nort(flags);
+ 		buf = kmap_atomic(page, KM_IRQ0);
+ 
+ 		/* do the actual data transfer */
+@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu
+ 				       do_write);
+ 
+ 		kunmap_atomic(buf, KM_IRQ0);
+-		local_irq_restore(flags);
++		local_irq_restore_nort(flags);
+ 	} else {
+ 		buf = page_address(page);
+ 		ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
+@@ -864,7 +864,7 @@ next_sg:
+ 		unsigned long flags;
+ 
+ 		/* FIXME: use bounce buffer */
+-		local_irq_save(flags);
++		local_irq_save_nort(flags);
+ 		buf = kmap_atomic(page, KM_IRQ0);
+ 
+ 		/* do the actual data transfer */
+@@ -872,7 +872,7 @@ next_sg:
+ 								count, rw);
+ 
+ 		kunmap_atomic(buf, KM_IRQ0);
+-		local_irq_restore(flags);
++		local_irq_restore_nort(flags);
+ 	} else {
+ 		buf = page_address(page);
+ 		consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
+Index: linux-3.2/drivers/ide/alim15x3.c
+===================================================================
+--- linux-3.2.orig/drivers/ide/alim15x3.c
++++ linux-3.2/drivers/ide/alim15x3.c
+@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct p
+ 
+ 	isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+ 
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 
+ 	if (m5229_revision < 0xC2) {
+ 		/*
+@@ -325,7 +325,7 @@ out:
+ 	}
+ 	pci_dev_put(north);
+ 	pci_dev_put(isa_dev);
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ 	return 0;
+ }
+ 
+Index: linux-3.2/drivers/ide/hpt366.c
+===================================================================
+--- linux-3.2.orig/drivers/ide/hpt366.c
++++ linux-3.2/drivers/ide/hpt366.c
+@@ -1241,7 +1241,7 @@ static int __devinit init_dma_hpt366(ide
+ 
+ 	dma_old = inb(base + 2);
+ 
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 
+ 	dma_new = dma_old;
+ 	pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
+@@ -1252,7 +1252,7 @@ static int __devinit init_dma_hpt366(ide
+ 	if (dma_new != dma_old)
+ 		outb(dma_new, base + 2);
+ 
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ 
+ 	printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx\n",
+ 			 hwif->name, base, base + 7);
+Index: linux-3.2/drivers/ide/ide-io-std.c
+===================================================================
+--- linux-3.2.orig/drivers/ide/ide-io-std.c
++++ linux-3.2/drivers/ide/ide-io-std.c
+@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, 
+ 		unsigned long uninitialized_var(flags);
+ 
+ 		if ((io_32bit & 2) && !mmio) {
+-			local_irq_save(flags);
++			local_irq_save_nort(flags);
+ 			ata_vlb_sync(io_ports->nsect_addr);
+ 		}
+ 
+@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, 
+ 			insl(data_addr, buf, words);
+ 
+ 		if ((io_32bit & 2) && !mmio)
+-			local_irq_restore(flags);
++			local_irq_restore_nort(flags);
+ 
+ 		if (((len + 1) & 3) < 2)
+ 			return;
+@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive,
+ 		unsigned long uninitialized_var(flags);
+ 
+ 		if ((io_32bit & 2) && !mmio) {
+-			local_irq_save(flags);
++			local_irq_save_nort(flags);
+ 			ata_vlb_sync(io_ports->nsect_addr);
+ 		}
+ 
+@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive,
+ 			outsl(data_addr, buf, words);
+ 
+ 		if ((io_32bit & 2) && !mmio)
+-			local_irq_restore(flags);
++			local_irq_restore_nort(flags);
+ 
+ 		if (((len + 1) & 3) < 2)
+ 			return;
+Index: linux-3.2/drivers/ide/ide-io.c
+===================================================================
+--- linux-3.2.orig/drivers/ide/ide-io.c
++++ linux-3.2/drivers/ide/ide-io.c
+@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat
+ 		/* disable_irq_nosync ?? */
+ 		disable_irq(hwif->irq);
+ 		/* local CPU only, as if we were handling an interrupt */
+-		local_irq_disable();
++		local_irq_disable_nort();
+ 		if (hwif->polling) {
+ 			startstop = handler(drive);
+ 		} else if (drive_is_ready(drive)) {
+Index: linux-3.2/drivers/ide/ide-iops.c
+===================================================================
+--- linux-3.2.orig/drivers/ide/ide-iops.c
++++ linux-3.2/drivers/ide/ide-iops.c
+@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, 
+ 				if ((stat & ATA_BUSY) == 0)
+ 					break;
+ 
+-				local_irq_restore(flags);
++				local_irq_restore_nort(flags);
+ 				*rstat = stat;
+ 				return -EBUSY;
+ 			}
+ 		}
+-		local_irq_restore(flags);
++		local_irq_restore_nort(flags);
+ 	}
+ 	/*
+ 	 * Allow status to settle, then read it again.
+Index: linux-3.2/drivers/ide/ide-probe.c
+===================================================================
+--- linux-3.2.orig/drivers/ide/ide-probe.c
++++ linux-3.2/drivers/ide/ide-probe.c
+@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri
+ 	int bswap = 1;
+ 
+ 	/* local CPU only; some systems need this */
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 	/* read 512 bytes of id info */
+ 	hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ 
+ 	drive->dev_flags |= IDE_DFLAG_ID_READ;
+ #ifdef DEBUG
+Index: linux-3.2/drivers/ide/ide-taskfile.c
+===================================================================
+--- linux-3.2.orig/drivers/ide/ide-taskfile.c
++++ linux-3.2/drivers/ide/ide-taskfile.c
+@@ -251,7 +251,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
+ 
+ 		page_is_high = PageHighMem(page);
+ 		if (page_is_high)
+-			local_irq_save(flags);
++			local_irq_save_nort(flags);
+ 
+ 		buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
+ 
+@@ -272,7 +272,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
+ 		kunmap_atomic(buf, KM_BIO_SRC_IRQ);
+ 
+ 		if (page_is_high)
+-			local_irq_restore(flags);
++			local_irq_restore_nort(flags);
+ 
+ 		len -= nr_bytes;
+ 	}
+@@ -415,7 +415,7 @@ static ide_startstop_t pre_task_out_intr
+ 	}
+ 
+ 	if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
+-		local_irq_disable();
++		local_irq_disable_nort();
+ 
+ 	ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
+ 
+Index: linux-3.2/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+===================================================================
+--- linux-3.2.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ linux-3.2/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -801,7 +801,7 @@ void ipoib_mcast_restart_task(struct wor
+ 
+ 	ipoib_mcast_stop_thread(dev, 0);
+ 
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 	netif_addr_lock(dev);
+ 	spin_lock(&priv->lock);
+ 
+@@ -883,7 +883,7 @@ void ipoib_mcast_restart_task(struct wor
+ 
+ 	spin_unlock(&priv->lock);
+ 	netif_addr_unlock(dev);
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ 
+ 	/* We have to cancel outside of the spinlock */
+ 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
+Index: linux-3.2/drivers/input/gameport/gameport.c
+===================================================================
+--- linux-3.2.orig/drivers/input/gameport/gameport.c
++++ linux-3.2/drivers/input/gameport/gameport.c
+@@ -87,12 +87,12 @@ static int gameport_measure_speed(struct
+ 	tx = 1 << 30;
+ 
+ 	for(i = 0; i < 50; i++) {
+-		local_irq_save(flags);
++		local_irq_save_nort(flags);
+ 		GET_TIME(t1);
+ 		for (t = 0; t < 50; t++) gameport_read(gameport);
+ 		GET_TIME(t2);
+ 		GET_TIME(t3);
+-		local_irq_restore(flags);
++		local_irq_restore_nort(flags);
+ 		udelay(i * 10);
+ 		if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
+ 	}
+@@ -111,11 +111,11 @@ static int gameport_measure_speed(struct
+ 	tx = 1 << 30;
+ 
+ 	for(i = 0; i < 50; i++) {
+-		local_irq_save(flags);
++		local_irq_save_nort(flags);
+ 		rdtscl(t1);
+ 		for (t = 0; t < 50; t++) gameport_read(gameport);
+ 		rdtscl(t2);
+-		local_irq_restore(flags);
++		local_irq_restore_nort(flags);
+ 		udelay(i * 10);
+ 		if (t2 - t1 < tx) tx = t2 - t1;
+ 	}
+Index: linux-3.2/arch/x86/include/asm/acpi.h
+===================================================================
+--- linux-3.2.orig/arch/x86/include/asm/acpi.h
++++ linux-3.2/arch/x86/include/asm/acpi.h
+@@ -51,8 +51,8 @@
+ 
+ #define ACPI_ASM_MACROS
+ #define BREAKPOINT3
+-#define ACPI_DISABLE_IRQS() local_irq_disable()
+-#define ACPI_ENABLE_IRQS()  local_irq_enable()
++#define ACPI_DISABLE_IRQS() local_irq_disable_nort()
++#define ACPI_ENABLE_IRQS()  local_irq_enable_nort()
+ #define ACPI_FLUSH_CPU_CACHE()	wbinvd()
+ 
+ int __acpi_acquire_global_lock(unsigned int *lock);
+Index: linux-3.2/kernel/user.c
+===================================================================
+--- linux-3.2.orig/kernel/user.c
++++ linux-3.2/kernel/user.c
+@@ -129,11 +129,11 @@ void free_uid(struct user_struct *up)
+ 	if (!up)
+ 		return;
+ 
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 	if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+ 		free_user(up, flags);
+ 	else
+-		local_irq_restore(flags);
++		local_irq_restore_nort(flags);
+ }
+ 
+ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
+Index: linux-3.2/kernel/res_counter.c
+===================================================================
+--- linux-3.2.orig/kernel/res_counter.c
++++ linux-3.2/kernel/res_counter.c
+@@ -43,7 +43,7 @@ int res_counter_charge(struct res_counte
+ 	struct res_counter *c, *u;
+ 
+ 	*limit_fail_at = NULL;
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 	for (c = counter; c != NULL; c = c->parent) {
+ 		spin_lock(&c->lock);
+ 		ret = res_counter_charge_locked(c, val);
+@@ -62,7 +62,7 @@ undo:
+ 		spin_unlock(&u->lock);
+ 	}
+ done:
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ 	return ret;
+ }
+ 
+@@ -79,13 +79,13 @@ void res_counter_uncharge(struct res_cou
+ 	unsigned long flags;
+ 	struct res_counter *c;
+ 
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 	for (c = counter; c != NULL; c = c->parent) {
+ 		spin_lock(&c->lock);
+ 		res_counter_uncharge_locked(c, val);
+ 		spin_unlock(&c->lock);
+ 	}
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ }
+ 
+ 
+Index: linux-3.2/drivers/usb/core/hcd.c
+===================================================================
+--- linux-3.2.orig/drivers/usb/core/hcd.c
++++ linux-3.2/drivers/usb/core/hcd.c
+@@ -2146,7 +2146,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
+ 	 * when the first handler doesn't use it.  So let's just
+ 	 * assume it's never used.
+ 	 */
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 
+ 	if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) {
+ 		rc = IRQ_NONE;
+@@ -2159,7 +2159,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
+ 		rc = IRQ_HANDLED;
+ 	}
+ 
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ 	return rc;
+ }
+ EXPORT_SYMBOL_GPL(usb_hcd_irq);
+Index: linux-3.2/drivers/tty/tty_ldisc.c
+===================================================================
+--- linux-3.2.orig/drivers/tty/tty_ldisc.c
++++ linux-3.2/drivers/tty/tty_ldisc.c
+@@ -70,7 +70,7 @@ static void put_ldisc(struct tty_ldisc *
+ 	 * We really want an "atomic_dec_and_lock_irqsave()",
+ 	 * but we don't have it, so this does it by hand.
+ 	 */
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 	if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
+ 		struct tty_ldisc_ops *ldo = ld->ops;
+ 
+@@ -81,7 +81,7 @@ static void put_ldisc(struct tty_ldisc *
+ 		kfree(ld);
+ 		return;
+ 	}
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ 	wake_up(&tty_ldisc_idle);
+ }
+ 
+Index: linux-3.2/lib/scatterlist.c
+===================================================================
+--- linux-3.2.orig/lib/scatterlist.c
++++ linux-3.2/lib/scatterlist.c
+@@ -423,7 +423,7 @@ void sg_miter_stop(struct sg_mapping_ite
+ 			flush_kernel_dcache_page(miter->page);
+ 
+ 		if (miter->__flags & SG_MITER_ATOMIC) {
+-			WARN_ON(!irqs_disabled());
++			WARN_ON_NONRT(!irqs_disabled());
+ 			kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
+ 		} else
+ 			kunmap(miter->page);
+@@ -463,7 +463,7 @@ static size_t sg_copy_buffer(struct scat
+ 
+ 	sg_miter_start(&miter, sgl, nents, sg_flags);
+ 
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 
+ 	while (sg_miter_next(&miter) && offset < buflen) {
+ 		unsigned int len;
+@@ -480,7 +480,7 @@ static size_t sg_copy_buffer(struct scat
+ 
+ 	sg_miter_stop(&miter);
+ 
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ 	return offset;
+ }
+ 
+Index: linux-3.2/net/mac80211/rx.c
+===================================================================
+--- linux-3.2.orig/net/mac80211/rx.c
++++ linux-3.2/net/mac80211/rx.c
+@@ -2949,7 +2949,7 @@ void ieee80211_rx(struct ieee80211_hw *h
+ 	struct ieee80211_supported_band *sband;
+ 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ 
+-	WARN_ON_ONCE(softirq_count() == 0);
++	WARN_ON_ONCE_NONRT(softirq_count() == 0);
+ 
+ 	if (WARN_ON(status->band < 0 ||
+ 		    status->band >= IEEE80211_NUM_BANDS))
+Index: linux-3.2/include/linux/page_cgroup.h
+===================================================================
+--- linux-3.2.orig/include/linux/page_cgroup.h
++++ linux-3.2/include/linux/page_cgroup.h
+@@ -30,6 +30,10 @@ enum {
+  */
+ struct page_cgroup {
+ 	unsigned long flags;
++#ifdef CONFIG_PREEMPT_RT_BASE
++	spinlock_t pcg_lock;
++	spinlock_t pcm_lock;
++#endif
+ 	struct mem_cgroup *mem_cgroup;
+ 	struct list_head lru;		/* per cgroup LRU list */
+ };
+@@ -96,30 +100,54 @@ static inline void lock_page_cgroup(stru
+ 	 * Don't take this lock in IRQ context.
+ 	 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
+ 	 */
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	bit_spin_lock(PCG_LOCK, &pc->flags);
++#else
++	spin_lock(&pc->pcg_lock);
++#endif
+ }
+ 
+ static inline void unlock_page_cgroup(struct page_cgroup *pc)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	bit_spin_unlock(PCG_LOCK, &pc->flags);
++#else
++	spin_unlock(&pc->pcg_lock);
++#endif
+ }
+ 
+ static inline void move_lock_page_cgroup(struct page_cgroup *pc,
+ 	unsigned long *flags)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	/*
+ 	 * We know updates to pc->flags of page cache's stats are from both of
+ 	 * usual context or IRQ context. Disable IRQ to avoid deadlock.
+ 	 */
+ 	local_irq_save(*flags);
+ 	bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
++#else
++	spin_lock_irqsave(&pc->pcm_lock, *flags);
++#endif
+ }
+ 
+ static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
+ 	unsigned long *flags)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
+ 	local_irq_restore(*flags);
++#else
++	spin_unlock_irqrestore(&pc->pcm_lock, *flags);
++#endif
++}
++
++static inline void page_cgroup_lock_init(struct page_cgroup *pc)
++{
++#ifdef CONFIG_PREEMPT_RT_BASE
++	spin_lock_init(&pc->pcg_lock);
++	spin_lock_init(&pc->pcm_lock);
++#endif
+ }
+ 
+ #ifdef CONFIG_SPARSEMEM
+Index: linux-3.2/mm/page_cgroup.c
+===================================================================
+--- linux-3.2.orig/mm/page_cgroup.c
++++ linux-3.2/mm/page_cgroup.c
+@@ -17,6 +17,7 @@ static void __meminit init_page_cgroup(s
+ 	set_page_cgroup_array_id(pc, id);
+ 	pc->mem_cgroup = NULL;
+ 	INIT_LIST_HEAD(&pc->lru);
++	page_cgroup_lock_init(pc);
+ }
+ static unsigned long total_usage;
+ 
+Index: linux-3.2/fs/buffer.c
+===================================================================
+--- linux-3.2.orig/fs/buffer.c
++++ linux-3.2/fs/buffer.c
+@@ -331,8 +331,7 @@ static void end_buffer_async_read(struct
+ 	 * decide that the page is now completely done.
+ 	 */
+ 	first = page_buffers(page);
+-	local_irq_save(flags);
+-	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++	flags = bh_uptodate_lock_irqsave(first);
+ 	clear_buffer_async_read(bh);
+ 	unlock_buffer(bh);
+ 	tmp = bh;
+@@ -345,8 +344,7 @@ static void end_buffer_async_read(struct
+ 		}
+ 		tmp = tmp->b_this_page;
+ 	} while (tmp != bh);
+-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+-	local_irq_restore(flags);
++	bh_uptodate_unlock_irqrestore(first, flags);
+ 
+ 	/*
+ 	 * If none of the buffers had errors and they are all
+@@ -358,9 +356,7 @@ static void end_buffer_async_read(struct
+ 	return;
+ 
+ still_busy:
+-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+-	local_irq_restore(flags);
+-	return;
++	bh_uptodate_unlock_irqrestore(first, flags);
+ }
+ 
+ /*
+@@ -394,8 +390,7 @@ void end_buffer_async_write(struct buffe
+ 	}
+ 
+ 	first = page_buffers(page);
+-	local_irq_save(flags);
+-	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++	flags = bh_uptodate_lock_irqsave(first);
+ 
+ 	clear_buffer_async_write(bh);
+ 	unlock_buffer(bh);
+@@ -407,15 +402,12 @@ void end_buffer_async_write(struct buffe
+ 		}
+ 		tmp = tmp->b_this_page;
+ 	}
+-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+-	local_irq_restore(flags);
++	bh_uptodate_unlock_irqrestore(first, flags);
+ 	end_page_writeback(page);
+ 	return;
+ 
+ still_busy:
+-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+-	local_irq_restore(flags);
+-	return;
++	bh_uptodate_unlock_irqrestore(first, flags);
+ }
+ EXPORT_SYMBOL(end_buffer_async_write);
+ 
+@@ -3223,6 +3215,7 @@ struct buffer_head *alloc_buffer_head(gf
+ 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
+ 	if (ret) {
+ 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
++		buffer_head_init_locks(ret);
+ 		preempt_disable();
+ 		__this_cpu_inc(bh_accounting.nr);
+ 		recalc_bh_state();
+Index: linux-3.2/fs/ntfs/aops.c
+===================================================================
+--- linux-3.2.orig/fs/ntfs/aops.c
++++ linux-3.2/fs/ntfs/aops.c
+@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s
+ 				"0x%llx.", (unsigned long long)bh->b_blocknr);
+ 	}
+ 	first = page_buffers(page);
+-	local_irq_save(flags);
+-	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++	flags = bh_uptodate_lock_irqsave(first);
+ 	clear_buffer_async_read(bh);
+ 	unlock_buffer(bh);
+ 	tmp = bh;
+@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(s
+ 		}
+ 		tmp = tmp->b_this_page;
+ 	} while (tmp != bh);
+-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+-	local_irq_restore(flags);
++	bh_uptodate_unlock_irqrestore(first, flags);
+ 	/*
+ 	 * If none of the buffers had errors then we can set the page uptodate,
+ 	 * but we first have to perform the post read mst fixups, if the
+@@ -146,13 +144,13 @@ static void ntfs_end_buffer_async_read(s
+ 		recs = PAGE_CACHE_SIZE / rec_size;
+ 		/* Should have been verified before we got here... */
+ 		BUG_ON(!recs);
+-		local_irq_save(flags);
++		local_irq_save_nort(flags);
+ 		kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+ 		for (i = 0; i < recs; i++)
+ 			post_read_mst_fixup((NTFS_RECORD*)(kaddr +
+ 					i * rec_size), rec_size);
+ 		kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
+-		local_irq_restore(flags);
++		local_irq_restore_nort(flags);
+ 		flush_dcache_page(page);
+ 		if (likely(page_uptodate && !PageError(page)))
+ 			SetPageUptodate(page);
+@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(s
+ 	unlock_page(page);
+ 	return;
+ still_busy:
+-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+-	local_irq_restore(flags);
+-	return;
++	bh_uptodate_unlock_irqrestore(first, flags);
+ }
+ 
+ /**
+Index: linux-3.2/include/linux/buffer_head.h
+===================================================================
+--- linux-3.2.orig/include/linux/buffer_head.h
++++ linux-3.2/include/linux/buffer_head.h
+@@ -72,8 +72,52 @@ struct buffer_head {
+ 	struct address_space *b_assoc_map;	/* mapping this buffer is
+ 						   associated with */
+ 	atomic_t b_count;		/* users using this buffer_head */
++#ifdef CONFIG_PREEMPT_RT_BASE
++	spinlock_t b_uptodate_lock;
++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
++    defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
++	spinlock_t b_state_lock;
++	spinlock_t b_journal_head_lock;
++#endif
++#endif
+ };
+ 
++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
++{
++	unsigned long flags;
++
++#ifndef CONFIG_PREEMPT_RT_BASE
++	local_irq_save(flags);
++	bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
++#else
++	spin_lock_irqsave(&bh->b_uptodate_lock, flags);
++#endif
++	return flags;
++}
++
++static inline void
++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
++{
++#ifndef CONFIG_PREEMPT_RT_BASE
++	bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
++	local_irq_restore(flags);
++#else
++	spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
++#endif
++}
++
++static inline void buffer_head_init_locks(struct buffer_head *bh)
++{
++#ifdef CONFIG_PREEMPT_RT_BASE
++	spin_lock_init(&bh->b_uptodate_lock);
++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
++    defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
++	spin_lock_init(&bh->b_state_lock);
++	spin_lock_init(&bh->b_journal_head_lock);
++#endif
++#endif
++}
++
+ /*
+  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
+  * and buffer_foo() functions.
+Index: linux-3.2/include/linux/jbd_common.h
+===================================================================
+--- linux-3.2.orig/include/linux/jbd_common.h
++++ linux-3.2/include/linux/jbd_common.h
+@@ -37,32 +37,56 @@ static inline struct journal_head *bh2jh
+ 
+ static inline void jbd_lock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	bit_spin_lock(BH_State, &bh->b_state);
++#else
++	spin_lock(&bh->b_state_lock);
++#endif
+ }
+ 
+ static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	return bit_spin_trylock(BH_State, &bh->b_state);
++#else
++	return spin_trylock(&bh->b_state_lock);
++#endif
+ }
+ 
+ static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	return bit_spin_is_locked(BH_State, &bh->b_state);
++#else
++	return spin_is_locked(&bh->b_state_lock);
++#endif
+ }
+ 
+ static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	bit_spin_unlock(BH_State, &bh->b_state);
++#else
++	spin_unlock(&bh->b_state_lock);
++#endif
+ }
+ 
+ static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	bit_spin_lock(BH_JournalHead, &bh->b_state);
++#else
++	spin_lock(&bh->b_journal_head_lock);
++#endif
+ }
+ 
+ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	bit_spin_unlock(BH_JournalHead, &bh->b_state);
++#else
++	spin_unlock(&bh->b_journal_head_lock);
++#endif
+ }
+ 
+ #endif
+Index: linux-3.2/lib/Kconfig.debug
+===================================================================
+--- linux-3.2.orig/lib/Kconfig.debug
++++ linux-3.2/lib/Kconfig.debug
+@@ -62,6 +62,28 @@ config MAGIC_SYSRQ
+ 	  keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
+ 	  unless you really know what this hack does.
+ 
++config MAGIC_SYSRQ_FORCE_PRINTK
++	bool "Force printk from Magic SysRq"
++	depends on MAGIC_SYSRQ && PREEMPT_RT_FULL
++	default n
++	help
++	  Allow the output from Magic SysRq to be output immediately, even if
++	  this causes large latencies.  This can cause performance problems
++	  for real-time processes.
++
++	  If PREEMPT_RT_FULL, printk() will not try to acquire the console lock
++	  when interrupts or preemption are disabled.  If the console lock is
++	  not acquired the printk() output will be buffered, but will not be
++	  output immediately.  Some drivers call into the Magic SysRq code
++	  with interrupts or preemption disabled, so the output of Magic SysRq
++	  will be buffered instead of printing immediately if this option is
++	  not selected.
++
++	  Even with this option selected, Magic SysRq output will be delayed
++	  if the attempt to acquire the console lock fails.
++
++	  Don't say Y unless you really know what this hack does.
++
+ config MAGIC_SYSRQ_DEFAULT_MASK
+ 	hex "Default mask for Magic SysRq keys on the console"
+ 	depends on MAGIC_SYSRQ
+@@ -151,7 +173,7 @@ config DEBUG_KERNEL
+ 
+ config DEBUG_SHIRQ
+ 	bool "Debug shared IRQ handlers"
+-	depends on DEBUG_KERNEL && GENERIC_HARDIRQS
++	depends on DEBUG_KERNEL && GENERIC_HARDIRQS && !PREEMPT_RT_BASE
+ 	help
+ 	  Enable this to generate a spurious interrupt as soon as a shared
+ 	  interrupt handler is registered, and just before one is deregistered.
+Index: linux-3.2/kernel/irq/handle.c
+===================================================================
+--- linux-3.2.orig/kernel/irq/handle.c
++++ linux-3.2/kernel/irq/handle.c
+@@ -156,8 +156,11 @@ handle_irq_event_percpu(struct irq_desc 
+ 		action = action->next;
+ 	} while (action);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
++	/* FIXME: Can we unbreak that ? */
+ 	if (random & IRQF_SAMPLE_RANDOM)
+ 		add_interrupt_randomness(irq);
++#endif
+ 
+ 	if (!noirqdebug)
+ 		note_interrupt(irq, desc, retval);
+Index: linux-3.2/kernel/irq/spurious.c
+===================================================================
+--- linux-3.2.orig/kernel/irq/spurious.c
++++ linux-3.2/kernel/irq/spurious.c
+@@ -339,6 +339,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
+ 
+ static int __init irqfixup_setup(char *str)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	printk(KERN_WARNING "irqfixup boot option not supported "
++		"w/ CONFIG_PREEMPT_RT\n");
++	return 1;
++#endif
+ 	irqfixup = 1;
+ 	printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
+ 	printk(KERN_WARNING "This may impact system performance.\n");
+@@ -351,6 +356,11 @@ module_param(irqfixup, int, 0644);
+ 
+ static int __init irqpoll_setup(char *str)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	printk(KERN_WARNING "irqpoll boot option not supported "
++		"w/ CONFIG_PREEMPT_RT\n");
++	return 1;
++#endif
+ 	irqfixup = 2;
+ 	printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
+ 				"enabled\n");
+Index: linux-3.2/kernel/irq/manage.c
+===================================================================
+--- linux-3.2.orig/kernel/irq/manage.c
++++ linux-3.2/kernel/irq/manage.c
+@@ -18,6 +18,7 @@
+ #include "internals.h"
+ 
+ #ifdef CONFIG_IRQ_FORCED_THREADING
++# ifndef CONFIG_PREEMPT_RT_BASE
+ __read_mostly bool force_irqthreads;
+ 
+ static int __init setup_forced_irqthreads(char *arg)
+@@ -26,6 +27,7 @@ static int __init setup_forced_irqthread
+ 	return 0;
+ }
+ early_param("threadirqs", setup_forced_irqthreads);
++# endif
+ #endif
+ 
+ /**
+Index: linux-3.2/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ linux-3.2/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -2236,11 +2236,8 @@ static netdev_tx_t atl1c_xmit_frame(stru
+ 	}
+ 
+ 	tpd_req = atl1c_cal_tpd_req(skb);
+-	if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
+-		if (netif_msg_pktdata(adapter))
+-			dev_info(&adapter->pdev->dev, "tx locked\n");
+-		return NETDEV_TX_LOCKED;
+-	}
++	spin_lock_irqsave(&adapter->tx_lock, flags);
++
+ 	if (skb->mark == 0x01)
+ 		type = atl1c_trans_high;
+ 	else
+Index: linux-3.2/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ linux-3.2/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1819,8 +1819,7 @@ static netdev_tx_t atl1e_xmit_frame(stru
+ 		return NETDEV_TX_OK;
+ 	}
+ 	tpd_req = atl1e_cal_tdp_req(skb);
+-	if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
+-		return NETDEV_TX_LOCKED;
++	spin_lock_irqsave(&adapter->tx_lock, flags);
+ 
+ 	if (atl1e_tpd_avail(adapter) < tpd_req) {
+ 		/* no enough descriptor, just stop queue */
+Index: linux-3.2/drivers/net/ethernet/chelsio/cxgb/sge.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/chelsio/cxgb/sge.c
++++ linux-3.2/drivers/net/ethernet/chelsio/cxgb/sge.c
+@@ -1678,8 +1678,7 @@ static int t1_sge_tx(struct sk_buff *skb
+ 	struct cmdQ *q = &sge->cmdQ[qid];
+ 	unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
+ 
+-	if (!spin_trylock(&q->lock))
+-		return NETDEV_TX_LOCKED;
++	spin_lock(&q->lock);
+ 
+ 	reclaim_completed_tx(sge, q);
+ 
+Index: linux-3.2/drivers/net/ethernet/neterion/s2io.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/neterion/s2io.c
++++ linux-3.2/drivers/net/ethernet/neterion/s2io.c
+@@ -4090,12 +4090,7 @@ static netdev_tx_t s2io_xmit(struct sk_b
+ 			[skb->priority & (MAX_TX_FIFOS - 1)];
+ 	fifo = &mac_control->fifos[queue];
+ 
+-	if (do_spin_lock)
+-		spin_lock_irqsave(&fifo->tx_lock, flags);
+-	else {
+-		if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
+-			return NETDEV_TX_LOCKED;
+-	}
++	spin_lock_irqsave(&fifo->tx_lock, flags);
+ 
+ 	if (sp->config.multiq) {
+ 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
+Index: linux-3.2/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ linux-3.2/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -1931,10 +1931,9 @@ static int pch_gbe_xmit_frame(struct sk_
+ 		adapter->stats.tx_length_errors++;
+ 		return NETDEV_TX_OK;
+ 	}
+-	if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
+-		/* Collision - tell upper layer to requeue */
+-		return NETDEV_TX_LOCKED;
+-	}
++
++	spin_lock_irqsave(&tx_ring->tx_lock, flags);
++
+ 	if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
+ 		netif_stop_queue(netdev);
+ 		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+Index: linux-3.2/drivers/net/ethernet/tehuti/tehuti.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/tehuti/tehuti.c
++++ linux-3.2/drivers/net/ethernet/tehuti/tehuti.c
+@@ -1605,13 +1605,8 @@ static netdev_tx_t bdx_tx_transmit(struc
+ 	unsigned long flags;
+ 
+ 	ENTER;
+-	local_irq_save(flags);
+-	if (!spin_trylock(&priv->tx_lock)) {
+-		local_irq_restore(flags);
+-		DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
+-		    BDX_DRV_NAME, ndev->name);
+-		return NETDEV_TX_LOCKED;
+-	}
++
++	spin_lock_irqsave(&priv->tx_lock, flags);
+ 
+ 	/* build tx descriptor */
+ 	BDX_ASSERT(f->m.wptr >= f->m.memsz);	/* started with valid wptr */
+Index: linux-3.2/drivers/net/rionet.c
+===================================================================
+--- linux-3.2.orig/drivers/net/rionet.c
++++ linux-3.2/drivers/net/rionet.c
+@@ -176,11 +176,7 @@ static int rionet_start_xmit(struct sk_b
+ 	u16 destid;
+ 	unsigned long flags;
+ 
+-	local_irq_save(flags);
+-	if (!spin_trylock(&rnet->tx_lock)) {
+-		local_irq_restore(flags);
+-		return NETDEV_TX_LOCKED;
+-	}
++	spin_lock_irqsave(&rnet->tx_lock, flags);
+ 
+ 	if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
+ 		netif_stop_queue(ndev);
+Index: linux-3.2/drivers/net/ethernet/3com/3c59x.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/3com/3c59x.c
++++ linux-3.2/drivers/net/ethernet/3com/3c59x.c
+@@ -843,9 +843,9 @@ static void poll_vortex(struct net_devic
+ {
+ 	struct vortex_private *vp = netdev_priv(dev);
+ 	unsigned long flags;
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 	(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ }
+ #endif
+ 
+@@ -1921,12 +1921,12 @@ static void vortex_tx_timeout(struct net
+ 			 * Block interrupts because vortex_interrupt does a bare spin_lock()
+ 			 */
+ 			unsigned long flags;
+-			local_irq_save(flags);
++			local_irq_save_nort(flags);
+ 			if (vp->full_bus_master_tx)
+ 				boomerang_interrupt(dev->irq, dev);
+ 			else
+ 				vortex_interrupt(dev->irq, dev);
+-			local_irq_restore(flags);
++			local_irq_restore_nort(flags);
+ 		}
+ 	}
+ 
+Index: linux-3.2/drivers/net/ethernet/freescale/gianfar.c
+===================================================================
+--- linux-3.2.orig/drivers/net/ethernet/freescale/gianfar.c
++++ linux-3.2/drivers/net/ethernet/freescale/gianfar.c
+@@ -1671,7 +1671,7 @@ void stop_gfar(struct net_device *dev)
+ 
+ 
+ 	/* Lock it down */
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 	lock_tx_qs(priv);
+ 	lock_rx_qs(priv);
+ 
+@@ -1679,7 +1679,7 @@ void stop_gfar(struct net_device *dev)
+ 
+ 	unlock_rx_qs(priv);
+ 	unlock_tx_qs(priv);
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ 
+ 	/* Free the IRQs */
+ 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+@@ -2949,7 +2949,7 @@ static void adjust_link(struct net_devic
+ 	struct phy_device *phydev = priv->phydev;
+ 	int new_state = 0;
+ 
+-	local_irq_save(flags);
++	local_irq_save_nort(flags);
+ 	lock_tx_qs(priv);
+ 
+ 	if (phydev->link) {
+@@ -3016,7 +3016,7 @@ static void adjust_link(struct net_devic
+ 	if (new_state && netif_msg_link(priv))
+ 		phy_print_status(phydev);
+ 	unlock_tx_qs(priv);
+-	local_irq_restore(flags);
++	local_irq_restore_nort(flags);
+ }
+ 
+ /* Update the hash table based on the current list of multicast
+Index: linux-3.2/drivers/usb/host/ohci-hcd.c
+===================================================================
+--- linux-3.2.orig/drivers/usb/host/ohci-hcd.c
++++ linux-3.2/drivers/usb/host/ohci-hcd.c
+@@ -833,9 +833,13 @@ static irqreturn_t ohci_irq (struct usb_
+ 	}
+ 
+ 	if (ints & OHCI_INTR_WDH) {
+-		spin_lock (&ohci->lock);
+-		dl_done_list (ohci);
+-		spin_unlock (&ohci->lock);
++		if (ohci->hcca->done_head == 0) {
++			ints &= ~OHCI_INTR_WDH;
++		} else {
++			spin_lock (&ohci->lock);
++			dl_done_list (ohci);
++			spin_unlock (&ohci->lock);
++		}
+ 	}
+ 
+ 	if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) {
+Index: linux-3.2/include/linux/percpu.h
+===================================================================
+--- linux-3.2.orig/include/linux/percpu.h
++++ linux-3.2/include/linux/percpu.h
+@@ -48,6 +48,31 @@
+ 	preempt_enable();				\
+ } while (0)
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define get_local_var(var)	get_cpu_var(var)
++# define put_local_var(var)	put_cpu_var(var)
++# define get_local_ptr(var)	get_cpu_ptr(var)
++# define put_local_ptr(var)	put_cpu_ptr(var)
++#else
++# define get_local_var(var) (*({			\
++	migrate_disable();				\
++	&__get_cpu_var(var); }))
++
++# define put_local_var(var) do {			\
++	(void)&(var);					\
++	migrate_enable();				\
++} while (0)
++
++# define get_local_ptr(var) ({				\
++	migrate_disable();				\
++	this_cpu_ptr(var); })
++
++# define put_local_ptr(var) do {			\
++	(void)(var);					\
++	migrate_enable();				\
++} while (0)
++#endif
++
+ /* minimum unit size, also is the maximum supported allocation size */
+ #define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
+ 
+Index: linux-3.2/include/linux/locallock.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/linux/locallock.h
+@@ -0,0 +1,230 @@
++#ifndef _LINUX_LOCALLOCK_H
++#define _LINUX_LOCALLOCK_H
++
++#include <linux/spinlock.h>
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++# define LL_WARN(cond)	WARN_ON(cond)
++#else
++# define LL_WARN(cond)	do { } while (0)
++#endif
++
++/*
++ * per cpu lock based substitute for local_irq_*()
++ */
++struct local_irq_lock {
++	spinlock_t		lock;
++	struct task_struct	*owner;
++	int			nestcnt;
++	unsigned long		flags;
++};
++
++#define DEFINE_LOCAL_IRQ_LOCK(lvar)					\
++	DEFINE_PER_CPU(struct local_irq_lock, lvar) = {			\
++		.lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
++
++#define local_irq_lock_init(lvar)					\
++	do {								\
++		int __cpu;						\
++		for_each_possible_cpu(__cpu)				\
++			spin_lock_init(&per_cpu(lvar, __cpu).lock);	\
++	} while (0)
++
++static inline void __local_lock(struct local_irq_lock *lv)
++{
++	if (lv->owner != current) {
++		spin_lock(&lv->lock);
++		LL_WARN(lv->owner);
++		LL_WARN(lv->nestcnt);
++		lv->owner = current;
++	}
++	lv->nestcnt++;
++}
++
++#define local_lock(lvar)					\
++	do { __local_lock(&get_local_var(lvar)); } while (0)
++
++static inline int __local_trylock(struct local_irq_lock *lv)
++{
++	if (lv->owner != current && spin_trylock(&lv->lock)) {
++		LL_WARN(lv->owner);
++		LL_WARN(lv->nestcnt);
++		lv->owner = current;
++		lv->nestcnt = 1;
++		return 1;
++	}
++	return 0;
++}
++
++#define local_trylock(lvar)						\
++	({								\
++		int __locked;						\
++		__locked = __local_trylock(&get_local_var(lvar));	\
++		if (!__locked)						\
++			put_local_var(lvar);				\
++		__locked;						\
++	})
++
++static inline void __local_unlock(struct local_irq_lock *lv)
++{
++	LL_WARN(lv->nestcnt == 0);
++	LL_WARN(lv->owner != current);
++	if (--lv->nestcnt)
++		return;
++
++	lv->owner = NULL;
++	spin_unlock(&lv->lock);
++}
++
++#define local_unlock(lvar)					\
++	do {							\
++		__local_unlock(&__get_cpu_var(lvar));		\
++		put_local_var(lvar);				\
++	} while (0)
++
++static inline void __local_lock_irq(struct local_irq_lock *lv)
++{
++	spin_lock_irqsave(&lv->lock, lv->flags);
++	LL_WARN(lv->owner);
++	LL_WARN(lv->nestcnt);
++	lv->owner = current;
++	lv->nestcnt = 1;
++}
++
++#define local_lock_irq(lvar)						\
++	do { __local_lock_irq(&get_local_var(lvar)); } while (0)
++
++static inline void __local_unlock_irq(struct local_irq_lock *lv)
++{
++	LL_WARN(!lv->nestcnt);
++	LL_WARN(lv->owner != current);
++	lv->owner = NULL;
++	lv->nestcnt = 0;
++	spin_unlock_irq(&lv->lock);
++}
++
++#define local_unlock_irq(lvar)						\
++	do {								\
++		__local_unlock_irq(&__get_cpu_var(lvar));		\
++		put_local_var(lvar);					\
++	} while (0)
++
++static inline int __local_lock_irqsave(struct local_irq_lock *lv)
++{
++	if (lv->owner != current) {
++		__local_lock_irq(lv);
++		return 0;
++	} else {
++		lv->nestcnt++;
++		return 1;
++	}
++}
++
++#define local_lock_irqsave(lvar, _flags)				\
++	do {								\
++		if (__local_lock_irqsave(&get_local_var(lvar)))		\
++			put_local_var(lvar);				\
++		_flags = __get_cpu_var(lvar).flags;			\
++	} while (0)
++
++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
++					    unsigned long flags)
++{
++	LL_WARN(!lv->nestcnt);
++	LL_WARN(lv->owner != current);
++	if (--lv->nestcnt)
++		return 0;
++
++	lv->owner = NULL;
++	spin_unlock_irqrestore(&lv->lock, lv->flags);
++	return 1;
++}
++
++#define local_unlock_irqrestore(lvar, flags)				\
++	do {								\
++		if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \
++			put_local_var(lvar);				\
++	} while (0)
++
++#define local_spin_trylock_irq(lvar, lock)				\
++	({								\
++		int __locked;						\
++		local_lock_irq(lvar);					\
++		__locked = spin_trylock(lock);				\
++		if (!__locked)						\
++			local_unlock_irq(lvar);				\
++		__locked;						\
++	})
++
++#define local_spin_lock_irq(lvar, lock)					\
++	do {								\
++		local_lock_irq(lvar);					\
++		spin_lock(lock);					\
++	} while (0)
++
++#define local_spin_unlock_irq(lvar, lock)				\
++	do {								\
++		spin_unlock(lock);					\
++		local_unlock_irq(lvar);					\
++	} while (0)
++
++#define local_spin_lock_irqsave(lvar, lock, flags)			\
++	do {								\
++		local_lock_irqsave(lvar, flags);			\
++		spin_lock(lock);					\
++	} while (0)
++
++#define local_spin_unlock_irqrestore(lvar, lock, flags)			\
++	do {								\
++		spin_unlock(lock);					\
++		local_unlock_irqrestore(lvar, flags);			\
++	} while (0)
++
++#define get_locked_var(lvar, var)					\
++	(*({								\
++		local_lock(lvar);					\
++		&__get_cpu_var(var);					\
++	}))
++
++#define put_locked_var(lvar, var)		local_unlock(lvar)
++
++#define local_lock_cpu(lvar)						\
++	({								\
++		local_lock(lvar);					\
++		smp_processor_id();					\
++	})
++
++#define local_unlock_cpu(lvar)			local_unlock(lvar)
++
++#else /* PREEMPT_RT_BASE */
++
++#define DEFINE_LOCAL_IRQ_LOCK(lvar)		__typeof__(const int) lvar
++
++static inline void local_irq_lock_init(int lvar) { }
++
++#define local_lock(lvar)			preempt_disable()
++#define local_unlock(lvar)			preempt_enable()
++#define local_lock_irq(lvar)			local_irq_disable()
++#define local_unlock_irq(lvar)			local_irq_enable()
++#define local_lock_irqsave(lvar, flags)		local_irq_save(flags)
++#define local_unlock_irqrestore(lvar, flags)	local_irq_restore(flags)
++
++#define local_spin_trylock_irq(lvar, lock)	spin_trylock_irq(lock)
++#define local_spin_lock_irq(lvar, lock)		spin_lock_irq(lock)
++#define local_spin_unlock_irq(lvar, lock)	spin_unlock_irq(lock)
++#define local_spin_lock_irqsave(lvar, lock, flags)	\
++	spin_lock_irqsave(lock, flags)
++#define local_spin_unlock_irqrestore(lvar, lock, flags)	\
++	spin_unlock_irqrestore(lock, flags)
++
++#define get_locked_var(lvar, var)		get_cpu_var(var)
++#define put_locked_var(lvar, var)		put_cpu_var(var)
++
++#define local_lock_cpu(lvar)			get_cpu()
++#define local_unlock_cpu(lvar)			put_cpu()
++
++#endif
++
++#endif
+Index: linux-3.2/include/linux/smp.h
+===================================================================
+--- linux-3.2.orig/include/linux/smp.h
++++ linux-3.2/include/linux/smp.h
+@@ -80,7 +80,6 @@ void __smp_call_function_single(int cpui
+ 
+ int smp_call_function_any(const struct cpumask *mask,
+ 			  smp_call_func_t func, void *info, int wait);
+-
+ /*
+  * Generic and arch helpers
+  */
+@@ -173,6 +172,9 @@ smp_call_function_any(const struct cpuma
+ #define get_cpu()		({ preempt_disable(); smp_processor_id(); })
+ #define put_cpu()		preempt_enable()
+ 
++#define get_cpu_light()		({ migrate_disable(); smp_processor_id(); })
++#define put_cpu_light()		migrate_enable()
++
+ /*
+  * Callback to arch code if there's nosmp or maxcpus=0 on the
+  * boot command line:
+Index: linux-3.2/mm/swap.c
+===================================================================
+--- linux-3.2.orig/mm/swap.c
++++ linux-3.2/mm/swap.c
+@@ -31,6 +31,7 @@
+ #include <linux/backing-dev.h>
+ #include <linux/memcontrol.h>
+ #include <linux/gfp.h>
++#include <linux/locallock.h>
+ 
+ #include "internal.h"
+ 
+@@ -41,6 +42,9 @@ static DEFINE_PER_CPU(struct pagevec[NR_
+ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
+ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+ 
++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
++static DEFINE_LOCAL_IRQ_LOCK(swap_lock);
++
+ /*
+  * This path almost never happens for VM activity - pages are normally
+  * freed via pagevecs.  But it gets used by networking.
+@@ -267,11 +271,11 @@ void rotate_reclaimable_page(struct page
+ 		unsigned long flags;
+ 
+ 		page_cache_get(page);
+-		local_irq_save(flags);
++		local_lock_irqsave(rotate_lock, flags);
+ 		pvec = &__get_cpu_var(lru_rotate_pvecs);
+ 		if (!pagevec_add(pvec, page))
+ 			pagevec_move_tail(pvec);
+-		local_irq_restore(flags);
++		local_unlock_irqrestore(rotate_lock, flags);
+ 	}
+ }
+ 
+@@ -327,12 +331,13 @@ static void activate_page_drain(int cpu)
+ void activate_page(struct page *page)
+ {
+ 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+-		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
++		struct pagevec *pvec = &get_locked_var(swap_lock,
++						       activate_page_pvecs);
+ 
+ 		page_cache_get(page);
+ 		if (!pagevec_add(pvec, page))
+ 			pagevec_lru_move_fn(pvec, __activate_page, NULL);
+-		put_cpu_var(activate_page_pvecs);
++		put_locked_var(swap_lock, activate_page_pvecs);
+ 	}
+ }
+ 
+@@ -373,12 +378,12 @@ EXPORT_SYMBOL(mark_page_accessed);
+ 
+ void __lru_cache_add(struct page *page, enum lru_list lru)
+ {
+-	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
++	struct pagevec *pvec = &get_locked_var(swap_lock, lru_add_pvecs)[lru];
+ 
+ 	page_cache_get(page);
+ 	if (!pagevec_add(pvec, page))
+ 		____pagevec_lru_add(pvec, lru);
+-	put_cpu_var(lru_add_pvecs);
++	put_locked_var(swap_lock, lru_add_pvecs);
+ }
+ EXPORT_SYMBOL(__lru_cache_add);
+ 
+@@ -512,9 +517,9 @@ static void drain_cpu_pagevecs(int cpu)
+ 		unsigned long flags;
+ 
+ 		/* No harm done if a racing interrupt already did this */
+-		local_irq_save(flags);
++		local_lock_irqsave(rotate_lock, flags);
+ 		pagevec_move_tail(pvec);
+-		local_irq_restore(flags);
++		local_unlock_irqrestore(rotate_lock, flags);
+ 	}
+ 
+ 	pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+@@ -542,18 +547,19 @@ void deactivate_page(struct page *page)
+ 		return;
+ 
+ 	if (likely(get_page_unless_zero(page))) {
+-		struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
++		struct pagevec *pvec = &get_locked_var(swap_lock,
++						       lru_deactivate_pvecs);
+ 
+ 		if (!pagevec_add(pvec, page))
+ 			pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+-		put_cpu_var(lru_deactivate_pvecs);
++		put_locked_var(swap_lock, lru_deactivate_pvecs);
+ 	}
+ }
+ 
+ void lru_add_drain(void)
+ {
+-	drain_cpu_pagevecs(get_cpu());
+-	put_cpu();
++	drain_cpu_pagevecs(local_lock_cpu(swap_lock));
++	local_unlock_cpu(swap_lock);
+ }
+ 
+ static void lru_add_drain_per_cpu(struct work_struct *dummy)
+@@ -783,6 +789,9 @@ void __init swap_setup(void)
+ {
+ 	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
+ 
++	local_irq_lock_init(rotate_lock);
++	local_irq_lock_init(swap_lock);
++
+ #ifdef CONFIG_SWAP
+ 	bdi_init(swapper_space.backing_dev_info);
+ #endif
+Index: linux-3.2/mm/vmscan.c
+===================================================================
+--- linux-3.2.orig/mm/vmscan.c
++++ linux-3.2/mm/vmscan.c
+@@ -1344,8 +1344,8 @@ static int too_many_isolated(struct zone
+  */
+ static noinline_for_stack void
+ putback_lru_pages(struct zone *zone, struct scan_control *sc,
+-				unsigned long nr_anon, unsigned long nr_file,
+-				struct list_head *page_list)
++		  unsigned long nr_anon, unsigned long nr_file,
++		  struct list_head *page_list, unsigned long nr_reclaimed)
+ {
+ 	struct page *page;
+ 	struct pagevec pvec;
+@@ -1356,7 +1356,12 @@ putback_lru_pages(struct zone *zone, str
+ 	/*
+ 	 * Put back any unfreeable pages.
+ 	 */
+-	spin_lock(&zone->lru_lock);
++	spin_lock_irq(&zone->lru_lock);
++
++	if (current_is_kswapd())
++		__count_vm_events(KSWAPD_STEAL, nr_reclaimed);
++	__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
++
+ 	while (!list_empty(page_list)) {
+ 		int lru;
+ 		page = lru_to_page(page_list);
+@@ -1539,12 +1544,7 @@ shrink_inactive_list(unsigned long nr_to
+ 					priority, &nr_dirty, &nr_writeback);
+ 	}
+ 
+-	local_irq_disable();
+-	if (current_is_kswapd())
+-		__count_vm_events(KSWAPD_STEAL, nr_reclaimed);
+-	__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
+-
+-	putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
++	putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list, nr_reclaimed);
+ 
+ 	/*
+ 	 * If reclaim is isolating dirty pages under writeback, it implies
+Index: linux-3.2/include/linux/vmstat.h
+===================================================================
+--- linux-3.2.orig/include/linux/vmstat.h
++++ linux-3.2/include/linux/vmstat.h
+@@ -29,7 +29,9 @@ DECLARE_PER_CPU(struct vm_event_state, v
+ 
+ static inline void __count_vm_event(enum vm_event_item item)
+ {
++	preempt_disable_rt();
+ 	__this_cpu_inc(vm_event_states.event[item]);
++	preempt_enable_rt();
+ }
+ 
+ static inline void count_vm_event(enum vm_event_item item)
+@@ -39,7 +41,9 @@ static inline void count_vm_event(enum v
+ 
+ static inline void __count_vm_events(enum vm_event_item item, long delta)
+ {
++	preempt_disable_rt();
+ 	__this_cpu_add(vm_event_states.event[item], delta);
++	preempt_enable_rt();
+ }
+ 
+ static inline void count_vm_events(enum vm_event_item item, long delta)
+Index: linux-3.2/mm/vmstat.c
+===================================================================
+--- linux-3.2.orig/mm/vmstat.c
++++ linux-3.2/mm/vmstat.c
+@@ -216,6 +216,7 @@ void __mod_zone_page_state(struct zone *
+ 	long x;
+ 	long t;
+ 
++	preempt_disable_rt();
+ 	x = delta + __this_cpu_read(*p);
+ 
+ 	t = __this_cpu_read(pcp->stat_threshold);
+@@ -225,6 +226,7 @@ void __mod_zone_page_state(struct zone *
+ 		x = 0;
+ 	}
+ 	__this_cpu_write(*p, x);
++	preempt_enable_rt();
+ }
+ EXPORT_SYMBOL(__mod_zone_page_state);
+ 
+@@ -257,6 +259,7 @@ void __inc_zone_state(struct zone *zone,
+ 	s8 __percpu *p = pcp->vm_stat_diff + item;
+ 	s8 v, t;
+ 
++	preempt_disable_rt();
+ 	v = __this_cpu_inc_return(*p);
+ 	t = __this_cpu_read(pcp->stat_threshold);
+ 	if (unlikely(v > t)) {
+@@ -265,6 +268,7 @@ void __inc_zone_state(struct zone *zone,
+ 		zone_page_state_add(v + overstep, zone, item);
+ 		__this_cpu_write(*p, -overstep);
+ 	}
++	preempt_enable_rt();
+ }
+ 
+ void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+@@ -279,6 +283,7 @@ void __dec_zone_state(struct zone *zone,
+ 	s8 __percpu *p = pcp->vm_stat_diff + item;
+ 	s8 v, t;
+ 
++	preempt_disable_rt();
+ 	v = __this_cpu_dec_return(*p);
+ 	t = __this_cpu_read(pcp->stat_threshold);
+ 	if (unlikely(v < - t)) {
+@@ -287,6 +292,7 @@ void __dec_zone_state(struct zone *zone,
+ 		zone_page_state_add(v - overstep, zone, item);
+ 		__this_cpu_write(*p, overstep);
+ 	}
++	preempt_enable_rt();
+ }
+ 
+ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
+Index: linux-3.2/include/linux/mm.h
+===================================================================
+--- linux-3.2.orig/include/linux/mm.h
++++ linux-3.2/include/linux/mm.h
+@@ -1194,27 +1194,59 @@ static inline pmd_t *pmd_alloc(struct mm
+  * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
+  * When freeing, reset page->mapping so free_pages_check won't complain.
+  */
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #define __pte_lockptr(page)	&((page)->ptl)
+-#define pte_lock_init(_page)	do {					\
+-	spin_lock_init(__pte_lockptr(_page));				\
+-} while (0)
++
++static inline struct page *pte_lock_init(struct page *page)
++{
++	spin_lock_init(__pte_lockptr(page));
++	return page;
++}
++
+ #define pte_lock_deinit(page)	((page)->mapping = NULL)
++
++#else /* !PREEMPT_RT_FULL */
++
++/*
++ * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the
++ * page frame, hence it only has a pointer and we need to dynamically
++ * allocate the lock when we allocate PTE-pages.
++ *
++ * This is an overall win, since only a small fraction of the pages
++ * will be PTE pages under normal circumstances.
++ */
++
++#define __pte_lockptr(page)	((page)->ptl)
++
++extern struct page *pte_lock_init(struct page *page);
++extern void pte_lock_deinit(struct page *page);
++
++#endif /* PREEMPT_RT_FULL */
++
+ #define pte_lockptr(mm, pmd)	({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
+ #else	/* !USE_SPLIT_PTLOCKS */
+ /*
+  * We use mm->page_table_lock to guard all pagetable pages of the mm.
+  */
+-#define pte_lock_init(page)	do {} while (0)
++static inline struct page *pte_lock_init(struct page *page) { return page; }
+ #define pte_lock_deinit(page)	do {} while (0)
+ #define pte_lockptr(mm, pmd)	({(void)(pmd); &(mm)->page_table_lock;})
+ #endif /* USE_SPLIT_PTLOCKS */
+ 
+-static inline void pgtable_page_ctor(struct page *page)
++static inline struct page *__pgtable_page_ctor(struct page *page)
+ {
+-	pte_lock_init(page);
+-	inc_zone_page_state(page, NR_PAGETABLE);
++	page = pte_lock_init(page);
++	if (page)
++		inc_zone_page_state(page, NR_PAGETABLE);
++	return page;
+ }
+ 
++#define pgtable_page_ctor(page)				\
++do {							\
++	page = __pgtable_page_ctor(page);		\
++} while (0)
++
+ static inline void pgtable_page_dtor(struct page *page)
+ {
+ 	pte_lock_deinit(page);
+Index: linux-3.2/include/linux/mm_types.h
+===================================================================
+--- linux-3.2.orig/include/linux/mm_types.h
++++ linux-3.2/include/linux/mm_types.h
+@@ -12,6 +12,7 @@
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+ #include <linux/page-debug-flags.h>
++#include <linux/rcupdate.h>
+ #include <asm/page.h>
+ #include <asm/mmu.h>
+ 
+@@ -118,7 +119,11 @@ struct page {
+ 						 * system if PG_buddy is set.
+ 						 */
+ #if USE_SPLIT_PTLOCKS
+-		spinlock_t ptl;
++# ifndef CONFIG_PREEMPT_RT_FULL
++	    spinlock_t ptl;
++# else
++	    spinlock_t *ptl;
++# endif
+ #endif
+ 		struct kmem_cache *slab;	/* SLUB: Pointer to slab */
+ 		struct page *first_page;	/* Compound tail pages */
+@@ -389,6 +394,9 @@ struct mm_struct {
+ #ifdef CONFIG_CPUMASK_OFFSTACK
+ 	struct cpumask cpumask_allocation;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct rcu_head delayed_drop;
++#endif
+ };
+ 
+ static inline void mm_init_cpumask(struct mm_struct *mm)
+Index: linux-3.2/init/Kconfig
+===================================================================
+--- linux-3.2.orig/init/Kconfig
++++ linux-3.2/init/Kconfig
+@@ -1239,6 +1239,7 @@ config SLAB
+ 
+ config SLUB
+ 	bool "SLUB (Unqueued Allocator)"
++	depends on !PREEMPT_RT_FULL
+ 	help
+ 	   SLUB is a slab allocator that minimizes cache line usage
+ 	   instead of managing queues of cached objects (SLAB approach).
+@@ -1250,6 +1251,7 @@ config SLUB
+ config SLOB
+ 	depends on EXPERT
+ 	bool "SLOB (Simple Allocator)"
++	depends on !PREEMPT_RT_FULL
+ 	help
+ 	   SLOB replaces the stock allocator with a drastically simpler
+ 	   allocator. SLOB is generally more space efficient but
+Index: linux-3.2/include/linux/radix-tree.h
+===================================================================
+--- linux-3.2.orig/include/linux/radix-tree.h
++++ linux-3.2/include/linux/radix-tree.h
+@@ -231,7 +231,13 @@ unsigned long radix_tree_next_hole(struc
+ 				unsigned long index, unsigned long max_scan);
+ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
+ 				unsigned long index, unsigned long max_scan);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int radix_tree_preload(gfp_t gfp_mask);
++#else
++static inline int radix_tree_preload(gfp_t gm) { return 0; }
++#endif
++
+ void radix_tree_init(void);
+ void *radix_tree_tag_set(struct radix_tree_root *root,
+ 			unsigned long index, unsigned int tag);
+@@ -256,7 +262,7 @@ unsigned long radix_tree_locate_item(str
+ 
+ static inline void radix_tree_preload_end(void)
+ {
+-	preempt_enable();
++	preempt_enable_nort();
+ }
+ 
+ #endif /* _LINUX_RADIX_TREE_H */
+Index: linux-3.2/lib/radix-tree.c
+===================================================================
+--- linux-3.2.orig/lib/radix-tree.c
++++ linux-3.2/lib/radix-tree.c
+@@ -166,12 +166,13 @@ radix_tree_node_alloc(struct radix_tree_
+ 		 * succeed in getting a node here (and never reach
+ 		 * kmem_cache_alloc)
+ 		 */
+-		rtp = &__get_cpu_var(radix_tree_preloads);
++		rtp = &get_cpu_var(radix_tree_preloads);
+ 		if (rtp->nr) {
+ 			ret = rtp->nodes[rtp->nr - 1];
+ 			rtp->nodes[rtp->nr - 1] = NULL;
+ 			rtp->nr--;
+ 		}
++		put_cpu_var(radix_tree_preloads);
+ 	}
+ 	if (ret == NULL)
+ 		ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+@@ -206,6 +207,7 @@ radix_tree_node_free(struct radix_tree_n
+ 	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+  * Load up this CPU's radix_tree_node buffer with sufficient objects to
+  * ensure that the addition of a single element in the tree cannot fail.  On
+@@ -240,6 +242,7 @@ out:
+ 	return ret;
+ }
+ EXPORT_SYMBOL(radix_tree_preload);
++#endif
+ 
+ /*
+  *	Return the maximum key which can be store into a
+Index: linux-3.2/kernel/panic.c
+===================================================================
+--- linux-3.2.orig/kernel/panic.c
++++ linux-3.2/kernel/panic.c
+@@ -334,9 +334,11 @@ static u64 oops_id;
+ 
+ static int init_oops_id(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	if (!oops_id)
+ 		get_random_bytes(&oops_id, sizeof(oops_id));
+ 	else
++#endif
+ 		oops_id++;
+ 
+ 	return 0;
+Index: linux-3.2/ipc/mqueue.c
+===================================================================
+--- linux-3.2.orig/ipc/mqueue.c
++++ linux-3.2/ipc/mqueue.c
+@@ -820,12 +820,17 @@ static inline void pipelined_send(struct
+ 				  struct msg_msg *message,
+ 				  struct ext_wait_queue *receiver)
+ {
++	/*
++	 * Keep them in one critical section for PREEMPT_RT:
++	 */
++	preempt_disable_rt();
+ 	receiver->msg = message;
+ 	list_del(&receiver->list);
+ 	receiver->state = STATE_PENDING;
+ 	wake_up_process(receiver->task);
+ 	smp_wmb();
+ 	receiver->state = STATE_READY;
++	preempt_enable_rt();
+ }
+ 
+ /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
+@@ -839,15 +844,19 @@ static inline void pipelined_receive(str
+ 		wake_up_interruptible(&info->wait_q);
+ 		return;
+ 	}
++	/*
++	 * Keep them in one critical section for PREEMPT_RT:
++	 */
++	preempt_disable_rt();
+ 	msg_insert(sender->msg, info);
+ 	list_del(&sender->list);
+ 	sender->state = STATE_PENDING;
+ 	wake_up_process(sender->task);
+ 	smp_wmb();
+ 	sender->state = STATE_READY;
++	preempt_enable_rt();
+ }
+-
+-SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
++ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
+ 		size_t, msg_len, unsigned int, msg_prio,
+ 		const struct timespec __user *, u_abs_timeout)
+ {
+Index: linux-3.2/ipc/msg.c
+===================================================================
+--- linux-3.2.orig/ipc/msg.c
++++ linux-3.2/ipc/msg.c
+@@ -259,12 +259,20 @@ static void expunge_all(struct msg_queue
+ 	while (tmp != &msq->q_receivers) {
+ 		struct msg_receiver *msr;
+ 
++		/*
++		 * Make sure that the wakeup doesnt preempt
++		 * this CPU prematurely. (on PREEMPT_RT)
++		 */
++		preempt_disable_rt();
++
+ 		msr = list_entry(tmp, struct msg_receiver, r_list);
+ 		tmp = tmp->next;
+ 		msr->r_msg = NULL;
+ 		wake_up_process(msr->r_tsk);
+ 		smp_mb();
+ 		msr->r_msg = ERR_PTR(res);
++
++		preempt_enable_rt();
+ 	}
+ }
+ 
+@@ -611,6 +619,12 @@ static inline int pipelined_send(struct 
+ 		    !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
+ 					       msr->r_msgtype, msr->r_mode)) {
+ 
++			/*
++			 * Make sure that the wakeup doesnt preempt
++			 * this CPU prematurely. (on PREEMPT_RT)
++			 */
++			preempt_disable_rt();
++
+ 			list_del(&msr->r_list);
+ 			if (msr->r_maxsize < msg->m_ts) {
+ 				msr->r_msg = NULL;
+@@ -624,9 +638,11 @@ static inline int pipelined_send(struct 
+ 				wake_up_process(msr->r_tsk);
+ 				smp_mb();
+ 				msr->r_msg = msg;
++				preempt_enable_rt();
+ 
+ 				return 1;
+ 			}
++			preempt_enable_rt();
+ 		}
+ 	}
+ 	return 0;
+Index: linux-3.2/kernel/relay.c
+===================================================================
+--- linux-3.2.orig/kernel/relay.c
++++ linux-3.2/kernel/relay.c
+@@ -336,6 +336,10 @@ static void wakeup_readers(unsigned long
+ {
+ 	struct rchan_buf *buf = (struct rchan_buf *)data;
+ 	wake_up_interruptible(&buf->read_wait);
++	/*
++	 * Stupid polling for now:
++	 */
++	mod_timer(&buf->timer, jiffies + 1);
+ }
+ 
+ /**
+@@ -353,6 +357,7 @@ static void __relay_reset(struct rchan_b
+ 		init_waitqueue_head(&buf->read_wait);
+ 		kref_init(&buf->kref);
+ 		setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
++		mod_timer(&buf->timer, jiffies + 1);
+ 	} else
+ 		del_timer_sync(&buf->timer);
+ 
+@@ -733,15 +738,6 @@ size_t relay_switch_subbuf(struct rchan_
+ 		else
+ 			buf->early_bytes += buf->chan->subbuf_size -
+ 					    buf->padding[old_subbuf];
+-		smp_mb();
+-		if (waitqueue_active(&buf->read_wait))
+-			/*
+-			 * Calling wake_up_interruptible() from here
+-			 * will deadlock if we happen to be logging
+-			 * from the scheduler (trying to re-grab
+-			 * rq->lock), so defer it.
+-			 */
+-			mod_timer(&buf->timer, jiffies + 1);
+ 	}
+ 
+ 	old = buf->data;
+Index: linux-3.2/net/ipv4/route.c
+===================================================================
+--- linux-3.2.orig/net/ipv4/route.c
++++ linux-3.2/net/ipv4/route.c
+@@ -245,7 +245,7 @@ struct rt_hash_bucket {
+ };
+ 
+ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
+-	defined(CONFIG_PROVE_LOCKING)
++	defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_PREEMPT_RT_FULL)
+ /*
+  * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
+  * The size of this table is a power of two and depends on the number of CPUS.
+Index: linux-3.2/include/linux/timer.h
+===================================================================
+--- linux-3.2.orig/include/linux/timer.h
++++ linux-3.2/include/linux/timer.h
+@@ -276,7 +276,7 @@ extern void add_timer(struct timer_list 
+ 
+ extern int try_to_del_timer_sync(struct timer_list *timer);
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+   extern int del_timer_sync(struct timer_list *timer);
+ #else
+ # define del_timer_sync(t)		del_timer(t)
+Index: linux-3.2/kernel/timer.c
+===================================================================
+--- linux-3.2.orig/kernel/timer.c
++++ linux-3.2/kernel/timer.c
+@@ -75,6 +75,7 @@ struct tvec_root {
+ struct tvec_base {
+ 	spinlock_t lock;
+ 	struct timer_list *running_timer;
++	wait_queue_head_t wait_for_running_timer;
+ 	unsigned long timer_jiffies;
+ 	unsigned long next_timer;
+ 	struct tvec_root tv1;
+@@ -653,6 +654,36 @@ static struct tvec_base *lock_timer_base
+ 	}
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
++static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
++						  struct tvec_base *old,
++						  struct tvec_base *new)
++{
++	/* See the comment in lock_timer_base() */
++	timer_set_base(timer, NULL);
++	spin_unlock(&old->lock);
++	spin_lock(&new->lock);
++	timer_set_base(timer, new);
++	return new;
++}
++#else
++static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
++						  struct tvec_base *old,
++						  struct tvec_base *new)
++{
++	/*
++	 * We cannot do the above because we might be preempted and
++	 * then the preempter would see NULL and loop forever.
++	 */
++	if (spin_trylock(&new->lock)) {
++		timer_set_base(timer, new);
++		spin_unlock(&old->lock);
++		return new;
++	}
++	return old;
++}
++#endif
++
+ static inline int
+ __mod_timer(struct timer_list *timer, unsigned long expires,
+ 						bool pending_only, int pinned)
+@@ -679,12 +710,15 @@ __mod_timer(struct timer_list *timer, un
+ 
+ 	debug_activate(timer, expires);
+ 
++	preempt_disable_rt();
+ 	cpu = smp_processor_id();
+ 
+ #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
+ 	if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
+ 		cpu = get_nohz_timer_target();
+ #endif
++	preempt_enable_rt();
++
+ 	new_base = per_cpu(tvec_bases, cpu);
+ 
+ 	if (base != new_base) {
+@@ -695,14 +729,8 @@ __mod_timer(struct timer_list *timer, un
+ 		 * handler yet has not finished. This also guarantees that
+ 		 * the timer is serialized wrt itself.
+ 		 */
+-		if (likely(base->running_timer != timer)) {
+-			/* See the comment in lock_timer_base() */
+-			timer_set_base(timer, NULL);
+-			spin_unlock(&base->lock);
+-			base = new_base;
+-			spin_lock(&base->lock);
+-			timer_set_base(timer, base);
+-		}
++		if (likely(base->running_timer != timer))
++			base = switch_timer_base(timer, base, new_base);
+ 	}
+ 
+ 	timer->expires = expires;
+@@ -885,6 +913,29 @@ void add_timer_on(struct timer_list *tim
+ }
+ EXPORT_SYMBOL_GPL(add_timer_on);
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * Wait for a running timer
++ */
++static void wait_for_running_timer(struct timer_list *timer)
++{
++	struct tvec_base *base = timer->base;
++
++	if (base->running_timer == timer)
++		wait_event(base->wait_for_running_timer,
++			   base->running_timer != timer);
++}
++
++# define wakeup_timer_waiters(b)	wake_up(&(b)->wait_for_tunning_timer)
++#else
++static inline void wait_for_running_timer(struct timer_list *timer)
++{
++	cpu_relax();
++}
++
++# define wakeup_timer_waiters(b)	do { } while (0)
++#endif
++
+ /**
+  * del_timer - deactive a timer.
+  * @timer: the timer to be deactivated
+@@ -953,7 +1004,7 @@ out:
+ }
+ EXPORT_SYMBOL(try_to_del_timer_sync);
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ /**
+  * del_timer_sync - deactivate a timer and wait for the handler to finish.
+  * @timer: the timer to be deactivated
+@@ -1013,7 +1064,7 @@ int del_timer_sync(struct timer_list *ti
+ 		int ret = try_to_del_timer_sync(timer);
+ 		if (ret >= 0)
+ 			return ret;
+-		cpu_relax();
++		wait_for_running_timer(timer);
+ 	}
+ }
+ EXPORT_SYMBOL(del_timer_sync);
+@@ -1124,10 +1175,11 @@ static inline void __run_timers(struct t
+ 
+ 			spin_unlock_irq(&base->lock);
+ 			call_timer_fn(timer, fn, data);
++			base->running_timer = NULL;
+ 			spin_lock_irq(&base->lock);
+ 		}
+ 	}
+-	base->running_timer = NULL;
++	wake_up(&base->wait_for_running_timer);
+ 	spin_unlock_irq(&base->lock);
+ }
+ 
+@@ -1266,6 +1318,23 @@ unsigned long get_next_timer_interrupt(u
+ 	 */
+ 	if (cpu_is_offline(smp_processor_id()))
+ 		return now + NEXT_TIMER_MAX_DELTA;
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++	/*
++	 * On PREEMPT_RT we cannot sleep here. If the trylock does not
++	 * succeed then we return the worst-case 'expires in 1 tick'
++	 * value. We use the rt functions here directly to avoid a
++	 * migrate_disable() call.
++	 */
++	if (spin_do_trylock(&base->lock)) {
++		if (time_before_eq(base->next_timer, base->timer_jiffies))
++			base->next_timer = __next_timer_interrupt(base);
++		expires = base->next_timer;
++		rt_spin_unlock(&base->lock);
++	} else {
++		expires = now + 1;
++	}
++#else
+ 	spin_lock(&base->lock);
+ 	if (time_before_eq(base->next_timer, base->timer_jiffies))
+ 		base->next_timer = __next_timer_interrupt(base);
+@@ -1274,7 +1343,7 @@ unsigned long get_next_timer_interrupt(u
+ 
+ 	if (time_before_eq(expires, now))
+ 		return now;
+-
++#endif
+ 	return cmp_next_hrtimer_event(now, expires);
+ }
+ #endif
+@@ -1290,14 +1359,13 @@ void update_process_times(int user_tick)
+ 
+ 	/* Note: this timer irq context must be accounted for as well. */
+ 	account_process_tick(p, user_tick);
++	scheduler_tick();
+ 	run_local_timers();
+ 	rcu_check_callbacks(cpu, user_tick);
+-	printk_tick();
+-#ifdef CONFIG_IRQ_WORK
++#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
+ 	if (in_irq())
+ 		irq_work_run();
+ #endif
+-	scheduler_tick();
+ 	run_posix_cpu_timers(p);
+ }
+ 
+@@ -1308,6 +1376,11 @@ static void run_timer_softirq(struct sof
+ {
+ 	struct tvec_base *base = __this_cpu_read(tvec_bases);
+ 
++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
++	irq_work_run();
++#endif
++
++	printk_tick();
+ 	hrtimer_run_pending();
+ 
+ 	if (time_after_eq(jiffies, base->timer_jiffies))
+@@ -1634,6 +1707,7 @@ static int __cpuinit init_timers_cpu(int
+ 	}
+ 
+ 	spin_lock_init(&base->lock);
++	init_waitqueue_head(&base->wait_for_running_timer);
+ 
+ 	for (j = 0; j < TVN_SIZE; j++) {
+ 		INIT_LIST_HEAD(base->tv5.vec + j);
+@@ -1669,6 +1743,7 @@ static void __cpuinit migrate_timers(int
+ {
+ 	struct tvec_base *old_base;
+ 	struct tvec_base *new_base;
++	unsigned long flags;
+ 	int i;
+ 
+ 	BUG_ON(cpu_online(cpu));
+@@ -1678,8 +1753,11 @@ static void __cpuinit migrate_timers(int
+ 	 * The caller is globally serialized and nobody else
+ 	 * takes two locks at once, deadlock is not possible.
+ 	 */
+-	spin_lock_irq(&new_base->lock);
+-	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
++	local_irq_save(flags);
++	while (!spin_trylock(&new_base->lock))
++		cpu_relax();
++	while (!spin_trylock(&old_base->lock))
++		cpu_relax();
+ 
+ 	BUG_ON(old_base->running_timer);
+ 
+@@ -1693,7 +1771,9 @@ static void __cpuinit migrate_timers(int
+ 	}
+ 
+ 	spin_unlock(&old_base->lock);
+-	spin_unlock_irq(&new_base->lock);
++	spin_unlock(&new_base->lock);
++	local_irq_restore(flags);
++
+ 	put_cpu_var(tvec_bases);
+ }
+ #endif /* CONFIG_HOTPLUG_CPU */
+Index: linux-3.2/include/linux/hrtimer.h
+===================================================================
+--- linux-3.2.orig/include/linux/hrtimer.h
++++ linux-3.2/include/linux/hrtimer.h
+@@ -111,6 +111,8 @@ struct hrtimer {
+ 	enum hrtimer_restart		(*function)(struct hrtimer *);
+ 	struct hrtimer_clock_base	*base;
+ 	unsigned long			state;
++	struct list_head		cb_entry;
++	int				irqsafe;
+ #ifdef CONFIG_TIMER_STATS
+ 	int				start_pid;
+ 	void				*start_site;
+@@ -147,6 +149,7 @@ struct hrtimer_clock_base {
+ 	int			index;
+ 	clockid_t		clockid;
+ 	struct timerqueue_head	active;
++	struct list_head	expired;
+ 	ktime_t			resolution;
+ 	ktime_t			(*get_time)(void);
+ 	ktime_t			softirq_time;
+@@ -187,6 +190,9 @@ struct hrtimer_cpu_base {
+ 	unsigned long			nr_hangs;
+ 	ktime_t				max_hang_time;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++	wait_queue_head_t		wait;
++#endif
+ 	struct hrtimer_clock_base	clock_base[HRTIMER_MAX_CLOCK_BASES];
+ };
+ 
+@@ -374,6 +380,13 @@ static inline int hrtimer_restart(struct
+ 	return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+ }
+ 
++/* Softirq preemption could deadlock timer removal */
++#ifdef CONFIG_PREEMPT_RT_BASE
++  extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
++#else
++# define hrtimer_wait_for_timer(timer)	do { cpu_relax(); } while (0)
++#endif
++
+ /* Query timers: */
+ extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
+Index: linux-3.2/kernel/itimer.c
+===================================================================
+--- linux-3.2.orig/kernel/itimer.c
++++ linux-3.2/kernel/itimer.c
+@@ -214,6 +214,7 @@ again:
+ 		/* We are sharing ->siglock with it_real_fn() */
+ 		if (hrtimer_try_to_cancel(timer) < 0) {
+ 			spin_unlock_irq(&tsk->sighand->siglock);
++			hrtimer_wait_for_timer(&tsk->signal->real_timer);
+ 			goto again;
+ 		}
+ 		expires = timeval_to_ktime(value->it_value);
+Index: linux-3.2/kernel/posix-cpu-timers.c
+===================================================================
+--- linux-3.2.orig/kernel/posix-cpu-timers.c
++++ linux-3.2/kernel/posix-cpu-timers.c
+@@ -701,7 +701,7 @@ static int posix_cpu_timer_set(struct k_
+ 	/*
+ 	 * Disarm any old timer after extracting its expiry time.
+ 	 */
+-	BUG_ON(!irqs_disabled());
++	BUG_ON_NONRT(!irqs_disabled());
+ 
+ 	ret = 0;
+ 	old_incr = timer->it.cpu.incr;
+@@ -1223,7 +1223,7 @@ void posix_cpu_timer_schedule(struct k_i
+ 	/*
+ 	 * Now re-arm for the new expiry time.
+ 	 */
+-	BUG_ON(!irqs_disabled());
++	BUG_ON_NONRT(!irqs_disabled());
+ 	arm_timer(timer);
+ 	spin_unlock(&p->sighand->siglock);
+ 
+@@ -1290,10 +1290,11 @@ static inline int fastpath_timer_check(s
+ 	sig = tsk->signal;
+ 	if (sig->cputimer.running) {
+ 		struct task_cputime group_sample;
++		unsigned long flags;
+ 
+-		raw_spin_lock(&sig->cputimer.lock);
++		raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
+ 		group_sample = sig->cputimer.cputime;
+-		raw_spin_unlock(&sig->cputimer.lock);
++		raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
+ 
+ 		if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+ 			return 1;
+@@ -1307,13 +1308,13 @@ static inline int fastpath_timer_check(s
+  * already updated our counts.  We need to check if any timers fire now.
+  * Interrupts are disabled.
+  */
+-void run_posix_cpu_timers(struct task_struct *tsk)
++static void __run_posix_cpu_timers(struct task_struct *tsk)
+ {
+ 	LIST_HEAD(firing);
+ 	struct k_itimer *timer, *next;
+ 	unsigned long flags;
+ 
+-	BUG_ON(!irqs_disabled());
++	BUG_ON_NONRT(!irqs_disabled());
+ 
+ 	/*
+ 	 * The fast path checks that there are no expired thread or thread
+@@ -1371,6 +1372,190 @@ void run_posix_cpu_timers(struct task_st
+ 	}
+ }
+ 
++#ifdef CONFIG_PREEMPT_RT_BASE
++#include <linux/kthread.h>
++#include <linux/cpu.h>
++DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
++
++static int posix_cpu_timers_thread(void *data)
++{
++	int cpu = (long)data;
++
++	BUG_ON(per_cpu(posix_timer_task,cpu) != current);
++
++	while (!kthread_should_stop()) {
++		struct task_struct *tsk = NULL;
++		struct task_struct *next = NULL;
++
++		if (cpu_is_offline(cpu))
++			goto wait_to_die;
++
++		/* grab task list */
++		raw_local_irq_disable();
++		tsk = per_cpu(posix_timer_tasklist, cpu);
++		per_cpu(posix_timer_tasklist, cpu) = NULL;
++		raw_local_irq_enable();
++
++		/* its possible the list is empty, just return */
++		if (!tsk) {
++			set_current_state(TASK_INTERRUPTIBLE);
++			schedule();
++			__set_current_state(TASK_RUNNING);
++			continue;
++		}
++
++		/* Process task list */
++		while (1) {
++			/* save next */
++			next = tsk->posix_timer_list;
++
++			/* run the task timers, clear its ptr and
++			 * unreference it
++			 */
++			__run_posix_cpu_timers(tsk);
++			tsk->posix_timer_list = NULL;
++			put_task_struct(tsk);
++
++			/* check if this is the last on the list */
++			if (next == tsk)
++				break;
++			tsk = next;
++		}
++	}
++	return 0;
++
++wait_to_die:
++	/* Wait for kthread_stop */
++	set_current_state(TASK_INTERRUPTIBLE);
++	while (!kthread_should_stop()) {
++		schedule();
++		set_current_state(TASK_INTERRUPTIBLE);
++	}
++	__set_current_state(TASK_RUNNING);
++	return 0;
++}
++
++static inline int __fastpath_timer_check(struct task_struct *tsk)
++{
++	/* tsk == current, ensure it is safe to use ->signal/sighand */
++	if (unlikely(tsk->exit_state))
++		return 0;
++
++	if (!task_cputime_zero(&tsk->cputime_expires))
++			return 1;
++
++	if (!task_cputime_zero(&tsk->signal->cputime_expires))
++			return 1;
++
++	return 0;
++}
++
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++	unsigned long cpu = smp_processor_id();
++	struct task_struct *tasklist;
++
++	BUG_ON(!irqs_disabled());
++	if(!per_cpu(posix_timer_task, cpu))
++		return;
++	/* get per-cpu references */
++	tasklist = per_cpu(posix_timer_tasklist, cpu);
++
++	/* check to see if we're already queued */
++	if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
++		get_task_struct(tsk);
++		if (tasklist) {
++			tsk->posix_timer_list = tasklist;
++		} else {
++			/*
++			 * The list is terminated by a self-pointing
++			 * task_struct
++			 */
++			tsk->posix_timer_list = tsk;
++		}
++		per_cpu(posix_timer_tasklist, cpu) = tsk;
++
++		wake_up_process(per_cpu(posix_timer_task, cpu));
++	}
++}
++
++/*
++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int posix_cpu_thread_call(struct notifier_block *nfb,
++				 unsigned long action, void *hcpu)
++{
++	int cpu = (long)hcpu;
++	struct task_struct *p;
++	struct sched_param param;
++
++	switch (action) {
++	case CPU_UP_PREPARE:
++		p = kthread_create(posix_cpu_timers_thread, hcpu,
++					"posixcputmr/%d",cpu);
++		if (IS_ERR(p))
++			return NOTIFY_BAD;
++		p->flags |= PF_NOFREEZE;
++		kthread_bind(p, cpu);
++		/* Must be high prio to avoid getting starved */
++		param.sched_priority = MAX_RT_PRIO-1;
++		sched_setscheduler(p, SCHED_FIFO, &param);
++		per_cpu(posix_timer_task,cpu) = p;
++		break;
++	case CPU_ONLINE:
++		/* Strictly unneccessary, as first user will wake it. */
++		wake_up_process(per_cpu(posix_timer_task,cpu));
++		break;
++#ifdef CONFIG_HOTPLUG_CPU
++	case CPU_UP_CANCELED:
++		/* Unbind it from offline cpu so it can run.  Fall thru. */
++		kthread_bind(per_cpu(posix_timer_task,cpu),
++			     any_online_cpu(cpu_online_map));
++		kthread_stop(per_cpu(posix_timer_task,cpu));
++		per_cpu(posix_timer_task,cpu) = NULL;
++		break;
++	case CPU_DEAD:
++		kthread_stop(per_cpu(posix_timer_task,cpu));
++		per_cpu(posix_timer_task,cpu) = NULL;
++		break;
++#endif
++	}
++	return NOTIFY_OK;
++}
++
++/* Register at highest priority so that task migration (migrate_all_tasks)
++ * happens before everything else.
++ */
++static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
++	.notifier_call = posix_cpu_thread_call,
++	.priority = 10
++};
++
++static int __init posix_cpu_thread_init(void)
++{
++	void *hcpu = (void *)(long)smp_processor_id();
++	/* Start one for boot CPU. */
++	unsigned long cpu;
++
++	/* init the per-cpu posix_timer_tasklets */
++	for_each_cpu_mask(cpu, cpu_possible_map)
++		per_cpu(posix_timer_tasklist, cpu) = NULL;
++
++	posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
++	posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
++	register_cpu_notifier(&posix_cpu_thread_notifier);
++	return 0;
++}
++early_initcall(posix_cpu_thread_init);
++#else /* CONFIG_PREEMPT_RT_BASE */
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++	__run_posix_cpu_timers(tsk);
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ /*
+  * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
+  * The tsk->sighand->siglock must be held by the caller.
+Index: linux-3.2/include/linux/rcupdate.h
+===================================================================
+--- linux-3.2.orig/include/linux/rcupdate.h
++++ linux-3.2/include/linux/rcupdate.h
+@@ -92,6 +92,9 @@ extern void call_rcu(struct rcu_head *he
+ 
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++#define call_rcu_bh	call_rcu
++#else
+ /**
+  * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+  * @head: structure to be used for queueing the RCU updates.
+@@ -112,6 +115,7 @@ extern void call_rcu(struct rcu_head *he
+  */
+ extern void call_rcu_bh(struct rcu_head *head,
+ 			void (*func)(struct rcu_head *head));
++#endif
+ 
+ /**
+  * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
+@@ -147,6 +151,11 @@ void synchronize_rcu(void);
+  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
+  */
+ #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
++#ifndef CONFIG_PREEMPT_RT_FULL
++#define sched_rcu_preempt_depth()	rcu_preempt_depth()
++#else
++static inline int sched_rcu_preempt_depth(void) { return 0; }
++#endif
+ 
+ #else /* #ifdef CONFIG_PREEMPT_RCU */
+ 
+@@ -170,6 +179,8 @@ static inline int rcu_preempt_depth(void
+ 	return 0;
+ }
+ 
++#define sched_rcu_preempt_depth()	rcu_preempt_depth()
++
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+ 
+ /* Internal to kernel */
+@@ -274,7 +285,14 @@ static inline int rcu_read_lock_held(voi
+  * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
+  * hell.
+  */
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline int rcu_read_lock_bh_held(void)
++{
++	return rcu_read_lock_held();
++}
++#else
+ extern int rcu_read_lock_bh_held(void);
++#endif
+ 
+ /**
+  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
+@@ -677,8 +695,12 @@ static inline void rcu_read_unlock(void)
+ static inline void rcu_read_lock_bh(void)
+ {
+ 	local_bh_disable();
++#ifdef CONFIG_PREEMPT_RT_FULL
++	rcu_read_lock();
++#else
+ 	__acquire(RCU_BH);
+ 	rcu_read_acquire_bh();
++#endif
+ }
+ 
+ /*
+@@ -688,8 +710,12 @@ static inline void rcu_read_lock_bh(void
+  */
+ static inline void rcu_read_unlock_bh(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++	rcu_read_unlock();
++#else
+ 	rcu_read_release_bh();
+ 	__release(RCU_BH);
++#endif
+ 	local_bh_enable();
+ }
+ 
+Index: linux-3.2/kernel/sched_fair.c
+===================================================================
+--- linux-3.2.orig/kernel/sched_fair.c
++++ linux-3.2/kernel/sched_fair.c
+@@ -2806,6 +2806,10 @@ balance_tasks(struct rq *this_rq, int th
+ 		 */
+ 		if (idle == CPU_NEWLY_IDLE)
+ 			break;
++
++		if (raw_spin_is_contended(&this_rq->lock) ||
++		    raw_spin_is_contended(&busiest->lock))
++			break;
+ #endif
+ 
+ 		/*
+@@ -2946,6 +2950,20 @@ load_balance_fair(struct rq *this_rq, in
+ 		rem_load_move -= moved_load;
+ 		if (rem_load_move < 0)
+ 			break;
++
++#ifdef CONFIG_PREEMPT
++		/*
++		 * NEWIDLE balancing is a source of latency, so preemptible
++		 * kernels will stop after the first task is pulled to minimize
++		 * the critical section.
++		 */
++		if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
++			break;
++
++		if (raw_spin_is_contended(&this_rq->lock) ||
++		    raw_spin_is_contended(&busiest->lock))
++			break;
++#endif
+ 	}
+ 	rcu_read_unlock();
+ 
+Index: linux-3.2/kernel/stop_machine.c
+===================================================================
+--- linux-3.2.orig/kernel/stop_machine.c
++++ linux-3.2/kernel/stop_machine.c
+@@ -29,12 +29,12 @@ struct cpu_stop_done {
+ 	atomic_t		nr_todo;	/* nr left to execute */
+ 	bool			executed;	/* actually executed? */
+ 	int			ret;		/* collected return value */
+-	struct completion	completion;	/* fired if nr_todo reaches 0 */
++	struct task_struct	*waiter;	/* woken when nr_todo reaches 0 */
+ };
+ 
+ /* the actual stopper, one per every possible cpu, enabled on online cpus */
+ struct cpu_stopper {
+-	spinlock_t		lock;
++	raw_spinlock_t		lock;
+ 	bool			enabled;	/* is this stopper enabled? */
+ 	struct list_head	works;		/* list of pending works */
+ 	struct task_struct	*thread;	/* stopper thread */
+@@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cp
+ {
+ 	memset(done, 0, sizeof(*done));
+ 	atomic_set(&done->nr_todo, nr_todo);
+-	init_completion(&done->completion);
++	done->waiter = current;
+ }
+ 
+ /* signal completion unless @done is NULL */
+@@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct 
+ 	if (done) {
+ 		if (executed)
+ 			done->executed = true;
+-		if (atomic_dec_and_test(&done->nr_todo))
+-			complete(&done->completion);
++		if (atomic_dec_and_test(&done->nr_todo)) {
++			wake_up_process(done->waiter);
++			done->waiter = NULL;
++		}
+ 	}
+ }
+ 
+@@ -67,7 +69,7 @@ static void cpu_stop_queue_work(struct c
+ {
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&stopper->lock, flags);
++	raw_spin_lock_irqsave(&stopper->lock, flags);
+ 
+ 	if (stopper->enabled) {
+ 		list_add_tail(&work->list, &stopper->works);
+@@ -75,7 +77,23 @@ static void cpu_stop_queue_work(struct c
+ 	} else
+ 		cpu_stop_signal_done(work->done, false);
+ 
+-	spin_unlock_irqrestore(&stopper->lock, flags);
++	raw_spin_unlock_irqrestore(&stopper->lock, flags);
++}
++
++static void wait_for_stop_done(struct cpu_stop_done *done)
++{
++	set_current_state(TASK_UNINTERRUPTIBLE);
++	while (atomic_read(&done->nr_todo)) {
++		schedule();
++		set_current_state(TASK_UNINTERRUPTIBLE);
++	}
++	/*
++	 * We need to wait until cpu_stop_signal_done() has cleared
++	 * done->waiter.
++	 */
++	while (done->waiter)
++		cpu_relax();
++	set_current_state(TASK_RUNNING);
+ }
+ 
+ /**
+@@ -109,7 +127,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s
+ 
+ 	cpu_stop_init_done(&done, 1);
+ 	cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
+-	wait_for_completion(&done.completion);
++	wait_for_stop_done(&done);
+ 	return done.executed ? done.ret : -ENOENT;
+ }
+ 
+@@ -135,6 +153,7 @@ void stop_one_cpu_nowait(unsigned int cp
+ 
+ /* static data for stop_cpus */
+ static DEFINE_MUTEX(stop_cpus_mutex);
++static DEFINE_MUTEX(stopper_lock);
+ static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
+ 
+ static void queue_stop_cpus_work(const struct cpumask *cpumask,
+@@ -153,15 +172,14 @@ static void queue_stop_cpus_work(const s
+ 	}
+ 
+ 	/*
+-	 * Disable preemption while queueing to avoid getting
+-	 * preempted by a stopper which might wait for other stoppers
+-	 * to enter @fn which can lead to deadlock.
++	 * Make sure that all work is queued on all cpus before we
++	 * any of the cpus can execute it.
+ 	 */
+-	preempt_disable();
++	mutex_lock(&stopper_lock);
+ 	for_each_cpu(cpu, cpumask)
+ 		cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
+ 				    &per_cpu(stop_cpus_work, cpu));
+-	preempt_enable();
++	mutex_unlock(&stopper_lock);
+ }
+ 
+ static int __stop_cpus(const struct cpumask *cpumask,
+@@ -171,7 +189,7 @@ static int __stop_cpus(const struct cpum
+ 
+ 	cpu_stop_init_done(&done, cpumask_weight(cpumask));
+ 	queue_stop_cpus_work(cpumask, fn, arg, &done);
+-	wait_for_completion(&done.completion);
++	wait_for_stop_done(&done);
+ 	return done.executed ? done.ret : -ENOENT;
+ }
+ 
+@@ -259,13 +277,13 @@ repeat:
+ 	}
+ 
+ 	work = NULL;
+-	spin_lock_irq(&stopper->lock);
++	raw_spin_lock_irq(&stopper->lock);
+ 	if (!list_empty(&stopper->works)) {
+ 		work = list_first_entry(&stopper->works,
+ 					struct cpu_stop_work, list);
+ 		list_del_init(&work->list);
+ 	}
+-	spin_unlock_irq(&stopper->lock);
++	raw_spin_unlock_irq(&stopper->lock);
+ 
+ 	if (work) {
+ 		cpu_stop_fn_t fn = work->fn;
+@@ -275,6 +293,16 @@ repeat:
+ 
+ 		__set_current_state(TASK_RUNNING);
+ 
++		/*
++		 * Wait until the stopper finished scheduling on all
++		 * cpus
++		 */
++		mutex_lock(&stopper_lock);
++		/*
++		 * Let other cpu threads continue as well
++		 */
++		mutex_unlock(&stopper_lock);
++
+ 		/* cpu stop callbacks are not allowed to sleep */
+ 		preempt_disable();
+ 
+@@ -289,7 +317,13 @@ repeat:
+ 			  kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
+ 					  ksym_buf), arg);
+ 
++		/*
++		 * Make sure that the wakeup and setting done->waiter
++		 * to NULL is atomic.
++		 */
++		local_irq_disable();
+ 		cpu_stop_signal_done(done, true);
++		local_irq_enable();
+ 	} else
+ 		schedule();
+ 
+@@ -317,6 +351,7 @@ static int __cpuinit cpu_stop_cpu_callba
+ 		if (IS_ERR(p))
+ 			return notifier_from_errno(PTR_ERR(p));
+ 		get_task_struct(p);
++		p->flags |= PF_STOMPER;
+ 		kthread_bind(p, cpu);
+ 		sched_set_stop_task(cpu, p);
+ 		stopper->thread = p;
+@@ -326,9 +361,9 @@ static int __cpuinit cpu_stop_cpu_callba
+ 		/* strictly unnecessary, as first user will wake it */
+ 		wake_up_process(stopper->thread);
+ 		/* mark enabled */
+-		spin_lock_irq(&stopper->lock);
++		raw_spin_lock_irq(&stopper->lock);
+ 		stopper->enabled = true;
+-		spin_unlock_irq(&stopper->lock);
++		raw_spin_unlock_irq(&stopper->lock);
+ 		break;
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -341,11 +376,11 @@ static int __cpuinit cpu_stop_cpu_callba
+ 		/* kill the stopper */
+ 		kthread_stop(stopper->thread);
+ 		/* drain remaining works */
+-		spin_lock_irq(&stopper->lock);
++		raw_spin_lock_irq(&stopper->lock);
+ 		list_for_each_entry(work, &stopper->works, list)
+ 			cpu_stop_signal_done(work->done, false);
+ 		stopper->enabled = false;
+-		spin_unlock_irq(&stopper->lock);
++		raw_spin_unlock_irq(&stopper->lock);
+ 		/* release the stopper */
+ 		put_task_struct(stopper->thread);
+ 		stopper->thread = NULL;
+@@ -376,7 +411,7 @@ static int __init cpu_stop_init(void)
+ 	for_each_possible_cpu(cpu) {
+ 		struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+ 
+-		spin_lock_init(&stopper->lock);
++		raw_spin_lock_init(&stopper->lock);
+ 		INIT_LIST_HEAD(&stopper->works);
+ 	}
+ 
+@@ -570,7 +605,7 @@ int stop_machine_from_inactive_cpu(int (
+ 	ret = stop_machine_cpu_stop(&smdata);
+ 
+ 	/* Busy wait for completion. */
+-	while (!completion_done(&done.completion))
++	while (atomic_read(&done.nr_todo))
+ 		cpu_relax();
+ 
+ 	mutex_unlock(&stop_cpus_mutex);
+Index: linux-3.2/include/linux/cpu.h
+===================================================================
+--- linux-3.2.orig/include/linux/cpu.h
++++ linux-3.2/include/linux/cpu.h
+@@ -66,8 +66,10 @@ enum {
+ 	/* migration should happen before other stuff but after perf */
+ 	CPU_PRI_PERF		= 20,
+ 	CPU_PRI_MIGRATION	= 10,
+-	/* prepare workqueues for other notifiers */
+-	CPU_PRI_WORKQUEUE	= 5,
++
++	CPU_PRI_WORKQUEUE_ACTIVE	= 5,  /* prepare workqueues for others */
++	CPU_PRI_NORMAL			= 0,
++	CPU_PRI_WORKQUEUE_INACTIVE	= -5, /* flush workqueues after others */
+ };
+ 
+ #define CPU_ONLINE		0x0002 /* CPU (unsigned)v is up */
+@@ -167,6 +169,8 @@ extern struct sysdev_class cpu_sysdev_cl
+ 
+ extern void get_online_cpus(void);
+ extern void put_online_cpus(void);
++extern void pin_current_cpu(void);
++extern void unpin_current_cpu(void);
+ #define hotcpu_notifier(fn, pri)	cpu_notifier(fn, pri)
+ #define register_hotcpu_notifier(nb)	register_cpu_notifier(nb)
+ #define unregister_hotcpu_notifier(nb)	unregister_cpu_notifier(nb)
+@@ -189,6 +193,8 @@ static inline void cpu_hotplug_driver_un
+ 
+ #define get_online_cpus()	do { } while (0)
+ #define put_online_cpus()	do { } while (0)
++static inline void pin_current_cpu(void) { }
++static inline void unpin_current_cpu(void) { }
+ #define hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
+ /* These aren't inline functions due to a GCC bug. */
+ #define register_hotcpu_notifier(nb)	({ (void)(nb); 0; })
+Index: linux-3.2/kernel/cpu.c
+===================================================================
+--- linux-3.2.orig/kernel/cpu.c
++++ linux-3.2/kernel/cpu.c
+@@ -58,6 +58,104 @@ static struct {
+ 	.refcount = 0,
+ };
+ 
++struct hotplug_pcp {
++	struct task_struct *unplug;
++	int refcount;
++	struct completion synced;
++};
++
++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
++
++/**
++ * pin_current_cpu - Prevent the current cpu from being unplugged
++ *
++ * Lightweight version of get_online_cpus() to prevent cpu from being
++ * unplugged when code runs in a migration disabled region.
++ *
++ * Must be called with preemption disabled (preempt_count = 1)!
++ */
++void pin_current_cpu(void)
++{
++	struct hotplug_pcp *hp;
++
++retry:
++	hp = &__get_cpu_var(hotplug_pcp);
++
++	if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
++	    hp->unplug == current || (current->flags & PF_STOMPER)) {
++		hp->refcount++;
++		return;
++	}
++	preempt_enable();
++	mutex_lock(&cpu_hotplug.lock);
++	mutex_unlock(&cpu_hotplug.lock);
++	preempt_disable();
++	goto retry;
++}
++
++/**
++ * unpin_current_cpu - Allow unplug of current cpu
++ *
++ * Must be called with preemption or interrupts disabled!
++ */
++void unpin_current_cpu(void)
++{
++	struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
++
++	WARN_ON(hp->refcount <= 0);
++
++	/* This is safe. sync_unplug_thread is pinned to this cpu */
++	if (!--hp->refcount && hp->unplug && hp->unplug != current &&
++	    !(current->flags & PF_STOMPER))
++		wake_up_process(hp->unplug);
++}
++
++/*
++ * FIXME: Is this really correct under all circumstances ?
++ */
++static int sync_unplug_thread(void *data)
++{
++	struct hotplug_pcp *hp = data;
++
++	preempt_disable();
++	hp->unplug = current;
++	set_current_state(TASK_UNINTERRUPTIBLE);
++	while (hp->refcount) {
++		schedule_preempt_disabled();
++		set_current_state(TASK_UNINTERRUPTIBLE);
++	}
++	set_current_state(TASK_RUNNING);
++	preempt_enable();
++	complete(&hp->synced);
++	return 0;
++}
++
++/*
++ * Start the sync_unplug_thread on the target cpu and wait for it to
++ * complete.
++ */
++static int cpu_unplug_begin(unsigned int cpu)
++{
++	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++	struct task_struct *tsk;
++
++	init_completion(&hp->synced);
++	tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
++	if (IS_ERR(tsk))
++		return (PTR_ERR(tsk));
++	kthread_bind(tsk, cpu);
++	wake_up_process(tsk);
++	wait_for_completion(&hp->synced);
++	return 0;
++}
++
++static void cpu_unplug_done(unsigned int cpu)
++{
++	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++	hp->unplug = NULL;
++}
++
+ void get_online_cpus(void)
+ {
+ 	might_sleep();
+@@ -211,13 +309,14 @@ static int __ref take_cpu_down(void *_pa
+ /* Requires cpu_add_remove_lock to be held */
+ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ {
+-	int err, nr_calls = 0;
++	int mycpu, err, nr_calls = 0;
+ 	void *hcpu = (void *)(long)cpu;
+ 	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
+ 	struct take_cpu_down_param tcd_param = {
+ 		.mod = mod,
+ 		.hcpu = hcpu,
+ 	};
++	cpumask_var_t cpumask;
+ 
+ 	if (num_online_cpus() == 1)
+ 		return -EBUSY;
+@@ -225,7 +324,26 @@ static int __ref _cpu_down(unsigned int 
+ 	if (!cpu_online(cpu))
+ 		return -EINVAL;
+ 
++	/* Move the downtaker off the unplug cpu */
++	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
++		return -ENOMEM;
++	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
++	set_cpus_allowed_ptr(current, cpumask);
++	free_cpumask_var(cpumask);
++	migrate_disable();
++	mycpu = smp_processor_id();
++	if (mycpu == cpu) {
++		printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
++		migrate_enable();
++		return -EBUSY;
++	}
++
+ 	cpu_hotplug_begin();
++	err = cpu_unplug_begin(cpu);
++	if (err) {
++		printk("cpu_unplug_begin(%d) failed\n", cpu);
++		goto out_cancel;
++	}
+ 
+ 	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+ 	if (err) {
+@@ -264,6 +382,9 @@ static int __ref _cpu_down(unsigned int 
+ 	check_for_tasks(cpu);
+ 
+ out_release:
++	cpu_unplug_done(cpu);
++out_cancel:
++	migrate_enable();
+ 	cpu_hotplug_done();
+ 	if (!err)
+ 		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
+Index: linux-3.2/lib/smp_processor_id.c
+===================================================================
+--- linux-3.2.orig/lib/smp_processor_id.c
++++ linux-3.2/lib/smp_processor_id.c
+@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor
+ 	if (!printk_ratelimit())
+ 		goto out_enable;
+ 
+-	printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
+-			"code: %s/%d\n",
+-			preempt_count() - 1, current->comm, current->pid);
++	printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
++	       "code: %s/%d\n", preempt_count() - 1,
++	       __migrate_disabled(current), current->comm, current->pid);
+ 	print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ 	dump_stack();
+ 
+Index: linux-3.2/include/linux/ftrace_event.h
+===================================================================
+--- linux-3.2.orig/include/linux/ftrace_event.h
++++ linux-3.2/include/linux/ftrace_event.h
+@@ -49,7 +49,8 @@ struct trace_entry {
+ 	unsigned char		flags;
+ 	unsigned char		preempt_count;
+ 	int			pid;
+-	int			padding;
++	unsigned short		migrate_disable;
++	unsigned short		padding;
+ };
+ 
+ #define FTRACE_MAX_EVENT						\
+Index: linux-3.2/kernel/trace/trace.c
+===================================================================
+--- linux-3.2.orig/kernel/trace/trace.c
++++ linux-3.2/kernel/trace/trace.c
+@@ -359,11 +359,13 @@ static DECLARE_DELAYED_WORK(wakeup_work,
+  */
+ void trace_wake_up(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	const unsigned long delay = msecs_to_jiffies(2);
+ 
+ 	if (trace_flags & TRACE_ITER_BLOCK)
+ 		return;
+ 	schedule_delayed_work(&wakeup_work, delay);
++#endif
+ }
+ 
+ static int __init set_buf_size(char *str)
+@@ -719,6 +721,12 @@ update_max_tr_single(struct trace_array 
+ }
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
++static void default_wait_pipe(struct trace_iterator *iter);
++#else
++#define default_wait_pipe	poll_wait_pipe
++#endif
++
+ /**
+  * register_tracer - register a tracer with the ftrace system.
+  * @type - the plugin for the tracer
+@@ -1123,6 +1131,8 @@ tracing_generic_entry_update(struct trac
+ 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+ 		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ 		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
++
++	entry->migrate_disable	= (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
+ }
+ EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
+ 
+@@ -1850,9 +1860,10 @@ static void print_lat_help_header(struct
+ 	seq_puts(m, "#                | / _----=> need-resched    \n");
+ 	seq_puts(m, "#                || / _---=> hardirq/softirq \n");
+ 	seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
+-	seq_puts(m, "#                |||| /     delay             \n");
+-	seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
+-	seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
++	seq_puts(m, "#                |||| / _--=> migrate-disable\n");
++	seq_puts(m, "#                ||||| /     delay           \n");
++	seq_puts(m, "#  cmd     pid   |||||| time  |   caller     \n");
++	seq_puts(m, "#     \\   /      |||||  \\   |   /          \n");
+ }
+ 
+ static void print_func_help_header(struct seq_file *m)
+@@ -3189,6 +3200,7 @@ static int tracing_release_pipe(struct i
+ 	return 0;
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static unsigned int
+ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+ {
+@@ -3210,8 +3222,7 @@ tracing_poll_pipe(struct file *filp, pol
+ 	}
+ }
+ 
+-
+-void default_wait_pipe(struct trace_iterator *iter)
++static void default_wait_pipe(struct trace_iterator *iter)
+ {
+ 	DEFINE_WAIT(wait);
+ 
+@@ -3222,6 +3233,20 @@ void default_wait_pipe(struct trace_iter
+ 
+ 	finish_wait(&trace_wait, &wait);
+ }
++#else
++static unsigned int
++tracing_poll_pipe(struct file *filp, poll_table *poll_table)
++{
++	struct trace_iterator *iter = filp->private_data;
++
++	if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter))
++		return POLLIN | POLLRDNORM;
++	poll_wait_pipe(iter);
++	if (!trace_empty(iter))
++		return POLLIN | POLLRDNORM;
++	return 0;
++}
++#endif
+ 
+ /*
+  * This is a make-shift waitqueue.
+Index: linux-3.2/kernel/trace/trace_events.c
+===================================================================
+--- linux-3.2.orig/kernel/trace/trace_events.c
++++ linux-3.2/kernel/trace/trace_events.c
+@@ -116,6 +116,7 @@ static int trace_define_common_fields(vo
+ 	__common_field(unsigned char, flags);
+ 	__common_field(unsigned char, preempt_count);
+ 	__common_field(int, pid);
++	__common_field(unsigned short, migrate_disable);
+ 	__common_field(int, padding);
+ 
+ 	return ret;
+Index: linux-3.2/kernel/trace/trace_output.c
+===================================================================
+--- linux-3.2.orig/kernel/trace/trace_output.c
++++ linux-3.2/kernel/trace/trace_output.c
+@@ -591,6 +591,11 @@ int trace_print_lat_fmt(struct trace_seq
+ 	else
+ 		ret = trace_seq_putc(s, '.');
+ 
++	if (entry->migrate_disable)
++		ret = trace_seq_printf(s, "%x", entry->migrate_disable);
++	else
++		ret = trace_seq_putc(s, '.');
++
+ 	return ret;
+ }
+ 
+Index: linux-3.2/kernel/sched_debug.c
+===================================================================
+--- linux-3.2.orig/kernel/sched_debug.c
++++ linux-3.2/kernel/sched_debug.c
+@@ -235,6 +235,9 @@ void print_rt_rq(struct seq_file *m, int
+ 	P(rt_throttled);
+ 	PN(rt_time);
+ 	PN(rt_runtime);
++#ifdef CONFIG_SMP
++	P(rt_nr_migratory);
++#endif
+ 
+ #undef PN
+ #undef P
+@@ -484,6 +487,10 @@ void proc_sched_show_task(struct task_st
+ 	P(se.load.weight);
+ 	P(policy);
+ 	P(prio);
++#ifdef CONFIG_PREEMPT_RT_FULL
++	P(migrate_disable);
++#endif
++	P(rt.nr_cpus_allowed);
+ #undef PN
+ #undef __PN
+ #undef P
+Index: linux-3.2/kernel/trace/trace.h
+===================================================================
+--- linux-3.2.orig/kernel/trace/trace.h
++++ linux-3.2/kernel/trace/trace.h
+@@ -345,7 +345,6 @@ void trace_init_global_iter(struct trace
+ 
+ void tracing_iter_reset(struct trace_iterator *iter, int cpu);
+ 
+-void default_wait_pipe(struct trace_iterator *iter);
+ void poll_wait_pipe(struct trace_iterator *iter);
+ 
+ void ftrace(struct trace_array *tr,
+Index: linux-3.2/kernel/trace/ring_buffer.c
+===================================================================
+--- linux-3.2.orig/kernel/trace/ring_buffer.c
++++ linux-3.2/kernel/trace/ring_buffer.c
+@@ -478,7 +478,7 @@ struct ring_buffer_per_cpu {
+ 	int				cpu;
+ 	atomic_t			record_disabled;
+ 	struct ring_buffer		*buffer;
+-	raw_spinlock_t			reader_lock;	/* serialize readers */
++	spinlock_t			reader_lock;	/* serialize readers */
+ 	arch_spinlock_t			lock;
+ 	struct lock_class_key		lock_key;
+ 	struct list_head		*pages;
+@@ -1049,6 +1049,44 @@ static int rb_allocate_pages(struct ring
+ 	return -ENOMEM;
+ }
+ 
++static inline int ok_to_lock(void)
++{
++	if (in_nmi())
++		return 0;
++#ifdef CONFIG_PREEMPT_RT_FULL
++	if (in_atomic())
++		return 0;
++#endif
++	return 1;
++}
++
++static int
++read_buffer_lock(struct ring_buffer_per_cpu *cpu_buffer,
++		 unsigned long *flags)
++{
++	/*
++	 * If an NMI die dumps out the content of the ring buffer
++	 * do not grab locks. We also permanently disable the ring
++	 * buffer too. A one time deal is all you get from reading
++	 * the ring buffer from an NMI.
++	 */
++	if (!ok_to_lock()) {
++		if (spin_trylock_irqsave(&cpu_buffer->reader_lock, *flags))
++			return 1;
++		tracing_off_permanent();
++		return 0;
++	}
++	spin_lock_irqsave(&cpu_buffer->reader_lock, *flags);
++	return 1;
++}
++
++static void
++read_buffer_unlock(struct ring_buffer_per_cpu *cpu_buffer,
++		   unsigned long flags, int locked)
++{
++	if (locked)
++		spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++}
+ static struct ring_buffer_per_cpu *
+ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
+ {
+@@ -1064,7 +1102,7 @@ rb_allocate_cpu_buffer(struct ring_buffe
+ 
+ 	cpu_buffer->cpu = cpu;
+ 	cpu_buffer->buffer = buffer;
+-	raw_spin_lock_init(&cpu_buffer->reader_lock);
++	spin_lock_init(&cpu_buffer->reader_lock);
+ 	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
+ 	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+ 
+@@ -1259,9 +1297,11 @@ rb_remove_pages(struct ring_buffer_per_c
+ {
+ 	struct buffer_page *bpage;
+ 	struct list_head *p;
++	unsigned long flags;
+ 	unsigned i;
++	int locked;
+ 
+-	raw_spin_lock_irq(&cpu_buffer->reader_lock);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+ 	rb_head_page_deactivate(cpu_buffer);
+ 
+ 	for (i = 0; i < nr_pages; i++) {
+@@ -1279,7 +1319,7 @@ rb_remove_pages(struct ring_buffer_per_c
+ 	rb_check_pages(cpu_buffer);
+ 
+ out:
+-	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
++	read_buffer_unlock(cpu_buffer, flags, locked);
+ }
+ 
+ static void
+@@ -1288,9 +1328,11 @@ rb_insert_pages(struct ring_buffer_per_c
+ {
+ 	struct buffer_page *bpage;
+ 	struct list_head *p;
++	unsigned long flags;
+ 	unsigned i;
++	int locked;
+ 
+-	raw_spin_lock_irq(&cpu_buffer->reader_lock);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+ 	rb_head_page_deactivate(cpu_buffer);
+ 
+ 	for (i = 0; i < nr_pages; i++) {
+@@ -1305,7 +1347,7 @@ rb_insert_pages(struct ring_buffer_per_c
+ 	rb_check_pages(cpu_buffer);
+ 
+ out:
+-	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
++	read_buffer_unlock(cpu_buffer, flags, locked);
+ }
+ 
+ /**
+@@ -2689,7 +2731,7 @@ unsigned long ring_buffer_oldest_event_t
+ 		return 0;
+ 
+ 	cpu_buffer = buffer->buffers[cpu];
+-	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ 	/*
+ 	 * if the tail is on reader_page, oldest time stamp is on the reader
+ 	 * page
+@@ -2699,7 +2741,7 @@ unsigned long ring_buffer_oldest_event_t
+ 	else
+ 		bpage = rb_set_head_page(cpu_buffer);
+ 	ret = bpage->page->time_stamp;
+-	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ 
+ 	return ret;
+ }
+@@ -2863,15 +2905,16 @@ void ring_buffer_iter_reset(struct ring_
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	unsigned long flags;
++	int locked;
+ 
+ 	if (!iter)
+ 		return;
+ 
+ 	cpu_buffer = iter->cpu_buffer;
+ 
+-	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+ 	rb_iter_reset(iter);
+-	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++	read_buffer_unlock(cpu_buffer, flags, locked);
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
+ 
+@@ -3289,21 +3332,6 @@ rb_iter_peek(struct ring_buffer_iter *it
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
+ 
+-static inline int rb_ok_to_lock(void)
+-{
+-	/*
+-	 * If an NMI die dumps out the content of the ring buffer
+-	 * do not grab locks. We also permanently disable the ring
+-	 * buffer too. A one time deal is all you get from reading
+-	 * the ring buffer from an NMI.
+-	 */
+-	if (likely(!in_nmi()))
+-		return 1;
+-
+-	tracing_off_permanent();
+-	return 0;
+-}
+-
+ /**
+  * ring_buffer_peek - peek at the next event to be read
+  * @buffer: The ring buffer to read
+@@ -3321,22 +3349,17 @@ ring_buffer_peek(struct ring_buffer *buf
+ 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ 	struct ring_buffer_event *event;
+ 	unsigned long flags;
+-	int dolock;
++	int locked;
+ 
+ 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ 		return NULL;
+ 
+-	dolock = rb_ok_to_lock();
+  again:
+-	local_irq_save(flags);
+-	if (dolock)
+-		raw_spin_lock(&cpu_buffer->reader_lock);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+ 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
+ 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ 		rb_advance_reader(cpu_buffer);
+-	if (dolock)
+-		raw_spin_unlock(&cpu_buffer->reader_lock);
+-	local_irq_restore(flags);
++	read_buffer_unlock(cpu_buffer, flags, locked);
+ 
+ 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ 		goto again;
+@@ -3358,11 +3381,12 @@ ring_buffer_iter_peek(struct ring_buffer
+ 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ 	struct ring_buffer_event *event;
+ 	unsigned long flags;
++	int locked;
+ 
+  again:
+-	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+ 	event = rb_iter_peek(iter, ts);
+-	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++	read_buffer_unlock(cpu_buffer, flags, locked);
+ 
+ 	if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ 		goto again;
+@@ -3388,9 +3412,7 @@ ring_buffer_consume(struct ring_buffer *
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	struct ring_buffer_event *event = NULL;
+ 	unsigned long flags;
+-	int dolock;
+-
+-	dolock = rb_ok_to_lock();
++	int locked;
+ 
+  again:
+ 	/* might be called in atomic */
+@@ -3400,9 +3422,7 @@ ring_buffer_consume(struct ring_buffer *
+ 		goto out;
+ 
+ 	cpu_buffer = buffer->buffers[cpu];
+-	local_irq_save(flags);
+-	if (dolock)
+-		raw_spin_lock(&cpu_buffer->reader_lock);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+ 
+ 	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
+ 	if (event) {
+@@ -3410,9 +3430,8 @@ ring_buffer_consume(struct ring_buffer *
+ 		rb_advance_reader(cpu_buffer);
+ 	}
+ 
+-	if (dolock)
+-		raw_spin_unlock(&cpu_buffer->reader_lock);
+-	local_irq_restore(flags);
++	read_buffer_unlock(cpu_buffer, flags, locked);
++
+ 
+  out:
+ 	preempt_enable();
+@@ -3497,17 +3516,18 @@ ring_buffer_read_start(struct ring_buffe
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	unsigned long flags;
++	int locked;
+ 
+ 	if (!iter)
+ 		return;
+ 
+ 	cpu_buffer = iter->cpu_buffer;
+ 
+-	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+ 	arch_spin_lock(&cpu_buffer->lock);
+ 	rb_iter_reset(iter);
+ 	arch_spin_unlock(&cpu_buffer->lock);
+-	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++	read_buffer_unlock(cpu_buffer, flags, locked);
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_read_start);
+ 
+@@ -3541,8 +3561,9 @@ ring_buffer_read(struct ring_buffer_iter
+ 	struct ring_buffer_event *event;
+ 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ 	unsigned long flags;
++	int locked;
+ 
+-	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+  again:
+ 	event = rb_iter_peek(iter, ts);
+ 	if (!event)
+@@ -3553,7 +3574,7 @@ ring_buffer_read(struct ring_buffer_iter
+ 
+ 	rb_advance_iter(iter);
+  out:
+-	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++	read_buffer_unlock(cpu_buffer, flags, locked);
+ 
+ 	return event;
+ }
+@@ -3618,13 +3639,14 @@ void ring_buffer_reset_cpu(struct ring_b
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ 	unsigned long flags;
++	int locked;
+ 
+ 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ 		return;
+ 
+ 	atomic_inc(&cpu_buffer->record_disabled);
+ 
+-	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+ 
+ 	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
+ 		goto out;
+@@ -3636,7 +3658,7 @@ void ring_buffer_reset_cpu(struct ring_b
+ 	arch_spin_unlock(&cpu_buffer->lock);
+ 
+  out:
+-	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++	read_buffer_unlock(cpu_buffer, flags, locked);
+ 
+ 	atomic_dec(&cpu_buffer->record_disabled);
+ }
+@@ -3663,22 +3685,16 @@ int ring_buffer_empty(struct ring_buffer
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	unsigned long flags;
+-	int dolock;
++	int locked;
+ 	int cpu;
+ 	int ret;
+ 
+-	dolock = rb_ok_to_lock();
+-
+ 	/* yes this is racy, but if you don't like the race, lock the buffer */
+ 	for_each_buffer_cpu(buffer, cpu) {
+ 		cpu_buffer = buffer->buffers[cpu];
+-		local_irq_save(flags);
+-		if (dolock)
+-			raw_spin_lock(&cpu_buffer->reader_lock);
++		locked = read_buffer_lock(cpu_buffer, &flags);
+ 		ret = rb_per_cpu_empty(cpu_buffer);
+-		if (dolock)
+-			raw_spin_unlock(&cpu_buffer->reader_lock);
+-		local_irq_restore(flags);
++		read_buffer_unlock(cpu_buffer, flags, locked);
+ 
+ 		if (!ret)
+ 			return 0;
+@@ -3697,22 +3713,16 @@ int ring_buffer_empty_cpu(struct ring_bu
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	unsigned long flags;
+-	int dolock;
++	int locked;
+ 	int ret;
+ 
+ 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ 		return 1;
+ 
+-	dolock = rb_ok_to_lock();
+-
+ 	cpu_buffer = buffer->buffers[cpu];
+-	local_irq_save(flags);
+-	if (dolock)
+-		raw_spin_lock(&cpu_buffer->reader_lock);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+ 	ret = rb_per_cpu_empty(cpu_buffer);
+-	if (dolock)
+-		raw_spin_unlock(&cpu_buffer->reader_lock);
+-	local_irq_restore(flags);
++	read_buffer_unlock(cpu_buffer, flags, locked);
+ 
+ 	return ret;
+ }
+@@ -3887,6 +3897,7 @@ int ring_buffer_read_page(struct ring_bu
+ 	unsigned int commit;
+ 	unsigned int read;
+ 	u64 save_timestamp;
++	int locked;
+ 	int ret = -1;
+ 
+ 	if (!cpumask_test_cpu(cpu, buffer->cpumask))
+@@ -3908,7 +3919,7 @@ int ring_buffer_read_page(struct ring_bu
+ 	if (!bpage)
+ 		goto out;
+ 
+-	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++	locked = read_buffer_lock(cpu_buffer, &flags);
+ 
+ 	reader = rb_get_reader_page(cpu_buffer);
+ 	if (!reader)
+@@ -4032,7 +4043,7 @@ int ring_buffer_read_page(struct ring_bu
+ 		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
+ 
+  out_unlock:
+-	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++	read_buffer_unlock(cpu_buffer, flags, locked);
+ 
+  out:
+ 	return ret;
+Index: linux-3.2/kernel/lockdep.c
+===================================================================
+--- linux-3.2.orig/kernel/lockdep.c
++++ linux-3.2/kernel/lockdep.c
+@@ -3488,6 +3488,7 @@ static void check_flags(unsigned long fl
+ 		}
+ 	}
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	/*
+ 	 * We dont accurately track softirq state in e.g.
+ 	 * hardirq contexts (such as on 4KSTACKS), so only
+@@ -3502,6 +3503,7 @@ static void check_flags(unsigned long fl
+ 			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
+ 		}
+ 	}
++#endif
+ 
+ 	if (!debug_locks)
+ 		print_irqtrace_events(current);
+Index: linux-3.2/kernel/Kconfig.locks
+===================================================================
+--- linux-3.2.orig/kernel/Kconfig.locks
++++ linux-3.2/kernel/Kconfig.locks
+@@ -199,4 +199,4 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
+ 	def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+ 
+ config MUTEX_SPIN_ON_OWNER
+-	def_bool SMP && !DEBUG_MUTEXES
++	def_bool SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL
+Index: linux-3.2/include/linux/hardirq.h
+===================================================================
+--- linux-3.2.orig/include/linux/hardirq.h
++++ linux-3.2/include/linux/hardirq.h
+@@ -60,7 +60,11 @@
+ #define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT)
+ #define NMI_OFFSET	(1UL << NMI_SHIFT)
+ 
+-#define SOFTIRQ_DISABLE_OFFSET	(2 * SOFTIRQ_OFFSET)
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define SOFTIRQ_DISABLE_OFFSET	(2 * SOFTIRQ_OFFSET)
++#else
++# define SOFTIRQ_DISABLE_OFFSET (0)
++#endif
+ 
+ #ifndef PREEMPT_ACTIVE
+ #define PREEMPT_ACTIVE_BITS	1
+@@ -73,10 +77,17 @@
+ #endif
+ 
+ #define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
+-#define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
+ #define irq_count()	(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ 				 | NMI_MASK))
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
++# define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)
++#else
++# define softirq_count()	(0UL)
++extern int in_serving_softirq(void);
++#endif
++
+ /*
+  * Are we doing bottom half or hardware interrupt processing?
+  * Are we in a softirq context? Interrupt context?
+@@ -86,7 +97,6 @@
+ #define in_irq()		(hardirq_count())
+ #define in_softirq()		(softirq_count())
+ #define in_interrupt()		(irq_count())
+-#define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)
+ 
+ /*
+  * Are we in NMI context?
+Index: linux-3.2/arch/powerpc/kernel/irq.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/kernel/irq.c
++++ linux-3.2/arch/powerpc/kernel/irq.c
+@@ -443,6 +443,7 @@ void irq_ctx_init(void)
+ 	}
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline void do_softirq_onstack(void)
+ {
+ 	struct thread_info *curtp, *irqtp;
+@@ -479,7 +480,7 @@ void do_softirq(void)
+ 
+ 	local_irq_restore(flags);
+ }
+-
++#endif
+ 
+ /*
+  * IRQ controller and virtual interrupts
+Index: linux-3.2/arch/powerpc/kernel/misc_32.S
+===================================================================
+--- linux-3.2.orig/arch/powerpc/kernel/misc_32.S
++++ linux-3.2/arch/powerpc/kernel/misc_32.S
+@@ -36,6 +36,7 @@
+ 
+ 	.text
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ 	mflr	r0
+ 	stw	r0,4(r1)
+@@ -46,6 +47,7 @@ _GLOBAL(call_do_softirq)
+ 	lwz	r0,4(r1)
+ 	mtlr	r0
+ 	blr
++#endif
+ 
+ _GLOBAL(call_handle_irq)
+ 	mflr	r0
+Index: linux-3.2/arch/powerpc/kernel/misc_64.S
+===================================================================
+--- linux-3.2.orig/arch/powerpc/kernel/misc_64.S
++++ linux-3.2/arch/powerpc/kernel/misc_64.S
+@@ -29,6 +29,7 @@
+ 
+ 	.text
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ 	mflr	r0
+ 	std	r0,16(r1)
+@@ -39,6 +40,7 @@ _GLOBAL(call_do_softirq)
+ 	ld	r0,16(r1)
+ 	mtlr	r0
+ 	blr
++#endif
+ 
+ _GLOBAL(call_handle_irq)
+ 	ld	r8,0(r6)
+Index: linux-3.2/arch/sh/kernel/irq.c
+===================================================================
+--- linux-3.2.orig/arch/sh/kernel/irq.c
++++ linux-3.2/arch/sh/kernel/irq.c
+@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu)
+ 	hardirq_ctx[cpu] = NULL;
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void)
+ {
+ 	unsigned long flags;
+@@ -191,6 +192,7 @@ asmlinkage void do_softirq(void)
+ 
+ 	local_irq_restore(flags);
+ }
++#endif
+ #else
+ static inline void handle_one_irq(unsigned int irq)
+ {
+Index: linux-3.2/arch/sparc/kernel/irq_64.c
+===================================================================
+--- linux-3.2.orig/arch/sparc/kernel/irq_64.c
++++ linux-3.2/arch/sparc/kernel/irq_64.c
+@@ -699,6 +699,7 @@ void __irq_entry handler_irq(int pil, st
+ 	set_irq_regs(old_regs);
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq(void)
+ {
+ 	unsigned long flags;
+@@ -724,6 +725,7 @@ void do_softirq(void)
+ 
+ 	local_irq_restore(flags);
+ }
++#endif
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+ void fixup_irqs(void)
+Index: linux-3.2/arch/x86/kernel/entry_64.S
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/entry_64.S
++++ linux-3.2/arch/x86/kernel/entry_64.S
+@@ -1192,6 +1192,7 @@ ENTRY(kernel_execve)
+ 	CFI_ENDPROC
+ END(kernel_execve)
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(call_softirq)
+ 	CFI_STARTPROC
+@@ -1211,6 +1212,7 @@ ENTRY(call_softirq)
+ 	ret
+ 	CFI_ENDPROC
+ END(call_softirq)
++#endif
+ 
+ #ifdef CONFIG_XEN
+ zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
+Index: linux-3.2/arch/x86/kernel/irq_32.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/irq_32.c
++++ linux-3.2/arch/x86/kernel/irq_32.c
+@@ -149,6 +149,7 @@ void __cpuinit irq_ctx_init(int cpu)
+ 	       cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void)
+ {
+ 	unsigned long flags;
+@@ -179,6 +180,7 @@ asmlinkage void do_softirq(void)
+ 
+ 	local_irq_restore(flags);
+ }
++#endif
+ 
+ bool handle_irq(unsigned irq, struct pt_regs *regs)
+ {
+Index: linux-3.2/arch/x86/kernel/irq_64.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/irq_64.c
++++ linux-3.2/arch/x86/kernel/irq_64.c
+@@ -62,7 +62,7 @@ bool handle_irq(unsigned irq, struct pt_
+ 	return true;
+ }
+ 
+-
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern void call_softirq(void);
+ 
+ asmlinkage void do_softirq(void)
+@@ -82,3 +82,4 @@ asmlinkage void do_softirq(void)
+ 	}
+ 	local_irq_restore(flags);
+ }
++#endif
+Index: linux-3.2/drivers/md/raid5.c
+===================================================================
+--- linux-3.2.orig/drivers/md/raid5.c
++++ linux-3.2/drivers/md/raid5.c
+@@ -1245,8 +1245,9 @@ static void __raid_run_ops(struct stripe
+ 	struct raid5_percpu *percpu;
+ 	unsigned long cpu;
+ 
+-	cpu = get_cpu();
++	cpu = get_cpu_light();
+ 	percpu = per_cpu_ptr(conf->percpu, cpu);
++	spin_lock(&percpu->lock);
+ 	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
+ 		ops_run_biofill(sh);
+ 		overlap_clear++;
+@@ -1298,7 +1299,8 @@ static void __raid_run_ops(struct stripe
+ 			if (test_and_clear_bit(R5_Overlap, &dev->flags))
+ 				wake_up(&sh->raid_conf->wait_for_overlap);
+ 		}
+-	put_cpu();
++	spin_unlock(&percpu->lock);
++	put_cpu_light();
+ }
+ 
+ #ifdef CONFIG_MULTICORE_RAID456
+@@ -4531,6 +4533,7 @@ static int raid5_alloc_percpu(struct r5c
+ 			break;
+ 		}
+ 		per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
++		spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
+ 	}
+ #ifdef CONFIG_HOTPLUG_CPU
+ 	conf->cpu_notify.notifier_call = raid456_cpu_notify;
+Index: linux-3.2/drivers/md/raid5.h
+===================================================================
+--- linux-3.2.orig/drivers/md/raid5.h
++++ linux-3.2/drivers/md/raid5.h
+@@ -405,6 +405,7 @@ struct r5conf {
+ 	int			recovery_disabled;
+ 	/* per cpu variables */
+ 	struct raid5_percpu {
++		spinlock_t	lock;	     /* Protection for -RT */
+ 		struct page	*spare_page; /* Used when checking P/Q in raid6 */
+ 		void		*scribble;   /* space for constructing buffer
+ 					      * lists and performing address
+Index: linux-3.2/include/linux/rtmutex.h
+===================================================================
+--- linux-3.2.orig/include/linux/rtmutex.h
++++ linux-3.2/include/linux/rtmutex.h
+@@ -14,7 +14,7 @@
+ 
+ #include <linux/linkage.h>
+ #include <linux/plist.h>
+-#include <linux/spinlock_types.h>
++#include <linux/spinlock_types_raw.h>
+ 
+ extern int max_lock_depth; /* for sysctl */
+ 
+@@ -29,9 +29,10 @@ struct rt_mutex {
+ 	raw_spinlock_t		wait_lock;
+ 	struct plist_head	wait_list;
+ 	struct task_struct	*owner;
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+ 	int			save_state;
+-	const char 		*name, *file;
++#ifdef CONFIG_DEBUG_RT_MUTEXES
++	const char		*file;
++	const char		*name;
+ 	int			line;
+ 	void			*magic;
+ #endif
+@@ -56,19 +57,39 @@ struct hrtimer_sleeper;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+ 	, .name = #mutexname, .file = __FILE__, .line = __LINE__
+-# define rt_mutex_init(mutex)			__rt_mutex_init(mutex, __func__)
++
++# define rt_mutex_init(mutex)					\
++	do {							\
++		raw_spin_lock_init(&(mutex)->wait_lock);	\
++		__rt_mutex_init(mutex, #mutex);			\
++	} while (0)
++
+  extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+ #else
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+-# define rt_mutex_init(mutex)			__rt_mutex_init(mutex, NULL)
++
++# define rt_mutex_init(mutex)					\
++	do {							\
++		raw_spin_lock_init(&(mutex)->wait_lock);	\
++		__rt_mutex_init(mutex, #mutex);			\
++	} while (0)
++
+ # define rt_mutex_debug_task_free(t)			do { } while (0)
+ #endif
+ 
+-#define __RT_MUTEX_INITIALIZER(mutexname) \
+-	{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++	.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ 	, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
+ 	, .owner = NULL \
+-	__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
++	__DEBUG_RT_MUTEX_INITIALIZER(mutexname)
++
++
++#define __RT_MUTEX_INITIALIZER(mutexname) \
++	{ __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
++
++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
++	{ __RT_MUTEX_INITIALIZER_PLAIN(mutexname)    \
++	  , .save_state = 1 }
+ 
+ #define DEFINE_RT_MUTEX(mutexname) \
+ 	struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+@@ -90,6 +111,7 @@ extern void rt_mutex_destroy(struct rt_m
+ extern void rt_mutex_lock(struct rt_mutex *lock);
+ extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
+ 						int detect_deadlock);
++extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock);
+ extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+ 					struct hrtimer_sleeper *timeout,
+ 					int detect_deadlock);
+Index: linux-3.2/kernel/rtmutex.c
+===================================================================
+--- linux-3.2.orig/kernel/rtmutex.c
++++ linux-3.2/kernel/rtmutex.c
+@@ -8,6 +8,12 @@
+  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
+  *  Copyright (C) 2006 Esben Nielsen
+  *
++ * Adaptive Spinlocks:
++ *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
++ *                                   and Peter Morreale,
++ * Adaptive Spinlocks simplification:
++ *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt at redhat.com>
++ *
+  *  See Documentation/rt-mutex-design.txt for details.
+  */
+ #include <linux/spinlock.h>
+@@ -67,6 +73,11 @@ static void fixup_rt_mutex_waiters(struc
+ 		clear_rt_mutex_waiters(lock);
+ }
+ 
++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
++{
++	return waiter && waiter != PI_WAKEUP_INPROGRESS;
++}
++
+ /*
+  * We can speed up the acquire/release, if the architecture
+  * supports cmpxchg and if there's no debugging state to be set up
+@@ -90,6 +101,12 @@ static inline void mark_rt_mutex_waiters
+ }
+ #endif
+ 
++static inline void init_lists(struct rt_mutex *lock)
++{
++	if (unlikely(!lock->wait_list.node_list.prev))
++		plist_head_init(&lock->wait_list);
++}
++
+ /*
+  * Calculate task priority from the waiter list priority
+  *
+@@ -136,6 +153,14 @@ static void rt_mutex_adjust_prio(struct 
+ 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ }
+ 
++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
++{
++	if (waiter->savestate)
++		wake_up_lock_sleeper(waiter->task);
++	else
++		wake_up_process(waiter->task);
++}
++
+ /*
+  * Max number of times we'll walk the boosting chain:
+  */
+@@ -196,7 +221,7 @@ static int rt_mutex_adjust_prio_chain(st
+ 	 * reached or the state of the chain has changed while we
+ 	 * dropped the locks.
+ 	 */
+-	if (!waiter)
++	if (!rt_mutex_real_waiter(waiter))
+ 		goto out_unlock_pi;
+ 
+ 	/*
+@@ -247,13 +272,15 @@ static int rt_mutex_adjust_prio_chain(st
+ 	/* Release the task */
+ 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 	if (!rt_mutex_owner(lock)) {
++		struct rt_mutex_waiter *lock_top_waiter;
++
+ 		/*
+ 		 * If the requeue above changed the top waiter, then we need
+ 		 * to wake the new top waiter up to try to get the lock.
+ 		 */
+-
+-		if (top_waiter != rt_mutex_top_waiter(lock))
+-			wake_up_process(rt_mutex_top_waiter(lock)->task);
++		lock_top_waiter = rt_mutex_top_waiter(lock);
++		if (top_waiter != lock_top_waiter)
++			rt_mutex_wake_waiter(lock_top_waiter);
+ 		raw_spin_unlock(&lock->wait_lock);
+ 		goto out_put_task;
+ 	}
+@@ -298,6 +325,25 @@ static int rt_mutex_adjust_prio_chain(st
+ 	return ret;
+ }
+ 
++
++#define STEAL_NORMAL  0
++#define STEAL_LATERAL 1
++
++/*
++ * Note that RT tasks are excluded from lateral-steals to prevent the
++ * introduction of an unbounded latency
++ */
++static inline int lock_is_stealable(struct task_struct *task,
++				    struct task_struct *pendowner, int mode)
++{
++    if (mode == STEAL_NORMAL || rt_task(task)) {
++	    if (task->prio >= pendowner->prio)
++		    return 0;
++    } else if (task->prio > pendowner->prio)
++	    return 0;
++    return 1;
++}
++
+ /*
+  * Try to take an rt-mutex
+  *
+@@ -307,8 +353,9 @@ static int rt_mutex_adjust_prio_chain(st
+  * @task:   the task which wants to acquire the lock
+  * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
+  */
+-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+-		struct rt_mutex_waiter *waiter)
++static int
++__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
++		       struct rt_mutex_waiter *waiter, int mode)
+ {
+ 	/*
+ 	 * We have to be careful here if the atomic speedups are
+@@ -341,12 +388,14 @@ static int try_to_take_rt_mutex(struct r
+ 	 * 3) it is top waiter
+ 	 */
+ 	if (rt_mutex_has_waiters(lock)) {
+-		if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
+-			if (!waiter || waiter != rt_mutex_top_waiter(lock))
+-				return 0;
+-		}
++		struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
++
++		if (task != pown && !lock_is_stealable(task, pown, mode))
++			return 0;
+ 	}
+ 
++	/* We got the lock. */
++
+ 	if (waiter || rt_mutex_has_waiters(lock)) {
+ 		unsigned long flags;
+ 		struct rt_mutex_waiter *top;
+@@ -371,7 +420,6 @@ static int try_to_take_rt_mutex(struct r
+ 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 	}
+ 
+-	/* We got the lock. */
+ 	debug_rt_mutex_lock(lock);
+ 
+ 	rt_mutex_set_owner(lock, task);
+@@ -381,6 +429,13 @@ static int try_to_take_rt_mutex(struct r
+ 	return 1;
+ }
+ 
++static inline int
++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
++		     struct rt_mutex_waiter *waiter)
++{
++	return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
++}
++
+ /*
+  * Task blocks on lock.
+  *
+@@ -399,6 +454,23 @@ static int task_blocks_on_rt_mutex(struc
+ 	int chain_walk = 0, res;
+ 
+ 	raw_spin_lock_irqsave(&task->pi_lock, flags);
++
++	/*
++	 * In the case of futex requeue PI, this will be a proxy
++	 * lock. The task will wake unaware that it is enqueueed on
++	 * this lock. Avoid blocking on two locks and corrupting
++	 * pi_blocked_on via the PI_WAKEUP_INPROGRESS
++	 * flag. futex_wait_requeue_pi() sets this when it wakes up
++	 * before requeue (due to a signal or timeout). Do not enqueue
++	 * the task if PI_WAKEUP_INPROGRESS is set.
++	 */
++	if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
++		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++		return -EAGAIN;
++	}
++
++	BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
++
+ 	__rt_mutex_adjust_prio(task);
+ 	waiter->task = task;
+ 	waiter->lock = lock;
+@@ -423,7 +495,7 @@ static int task_blocks_on_rt_mutex(struc
+ 		plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
+ 
+ 		__rt_mutex_adjust_prio(owner);
+-		if (owner->pi_blocked_on)
++		if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ 			chain_walk = 1;
+ 		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ 	}
+@@ -478,7 +550,7 @@ static void wakeup_next_waiter(struct rt
+ 
+ 	raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+ 
+-	wake_up_process(waiter->task);
++	rt_mutex_wake_waiter(waiter);
+ }
+ 
+ /*
+@@ -517,7 +589,7 @@ static void remove_waiter(struct rt_mute
+ 		}
+ 		__rt_mutex_adjust_prio(owner);
+ 
+-		if (owner->pi_blocked_on)
++		if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ 			chain_walk = 1;
+ 
+ 		raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+@@ -551,23 +623,316 @@ void rt_mutex_adjust_pi(struct task_stru
+ 	raw_spin_lock_irqsave(&task->pi_lock, flags);
+ 
+ 	waiter = task->pi_blocked_on;
+-	if (!waiter || waiter->list_entry.prio == task->prio) {
++	if (!rt_mutex_real_waiter(waiter) ||
++	    waiter->list_entry.prio == task->prio) {
+ 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 		return;
+ 	}
+ 
+-	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+-
+ 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
+ 	get_task_struct(task);
++	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ 	rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
+ }
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * preemptible spin_lock functions:
++ */
++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
++					 void  (*slowfn)(struct rt_mutex *lock))
++{
++	might_sleep();
++
++	if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
++		rt_mutex_deadlock_account_lock(lock, current);
++	else
++		slowfn(lock);
++}
++
++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
++					   void  (*slowfn)(struct rt_mutex *lock))
++{
++	if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
++		rt_mutex_deadlock_account_unlock(current);
++	else
++		slowfn(lock);
++}
++
++#ifdef CONFIG_SMP
++/*
++ * Note that owner is a speculative pointer and dereferencing relies
++ * on rcu_read_lock() and the check against the lock owner.
++ */
++static int adaptive_wait(struct rt_mutex *lock,
++			 struct task_struct *owner)
++{
++	int res = 0;
++
++	rcu_read_lock();
++	for (;;) {
++		if (owner != rt_mutex_owner(lock))
++			break;
++		/*
++		 * Ensure that owner->on_cpu is dereferenced _after_
++		 * checking the above to be valid.
++		 */
++		barrier();
++		if (!owner->on_cpu) {
++			res = 1;
++			break;
++		}
++		cpu_relax();
++	}
++	rcu_read_unlock();
++	return res;
++}
++#else
++static int adaptive_wait(struct rt_mutex *lock,
++			 struct task_struct *orig_owner)
++{
++	return 1;
++}
++#endif
++
++# define pi_lock(lock)			raw_spin_lock_irq(lock)
++# define pi_unlock(lock)		raw_spin_unlock_irq(lock)
++
++/*
++ * Slow path lock function spin_lock style: this variant is very
++ * careful not to miss any non-lock wakeups.
++ *
++ * We store the current state under p->pi_lock in p->saved_state and
++ * the try_to_wake_up() code handles this accordingly.
++ */
++static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
++{
++	struct task_struct *lock_owner, *self = current;
++	struct rt_mutex_waiter waiter, *top_waiter;
++	int ret;
++
++	rt_mutex_init_waiter(&waiter, true);
++
++	raw_spin_lock(&lock->wait_lock);
++	init_lists(lock);
++
++	if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
++		raw_spin_unlock(&lock->wait_lock);
++		return;
++	}
++
++	BUG_ON(rt_mutex_owner(lock) == self);
++
++	/*
++	 * We save whatever state the task is in and we'll restore it
++	 * after acquiring the lock taking real wakeups into account
++	 * as well. We are serialized via pi_lock against wakeups. See
++	 * try_to_wake_up().
++	 */
++	pi_lock(&self->pi_lock);
++	self->saved_state = self->state;
++	__set_current_state(TASK_UNINTERRUPTIBLE);
++	pi_unlock(&self->pi_lock);
++
++	ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
++	BUG_ON(ret);
++
++	for (;;) {
++		/* Try to acquire the lock again. */
++		if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
++			break;
++
++		top_waiter = rt_mutex_top_waiter(lock);
++		lock_owner = rt_mutex_owner(lock);
++
++		raw_spin_unlock(&lock->wait_lock);
++
++		debug_rt_mutex_print_deadlock(&waiter);
++
++		if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
++			schedule_rt_mutex(lock);
++
++		raw_spin_lock(&lock->wait_lock);
++
++		pi_lock(&self->pi_lock);
++		__set_current_state(TASK_UNINTERRUPTIBLE);
++		pi_unlock(&self->pi_lock);
++	}
++
++	/*
++	 * Restore the task state to current->saved_state. We set it
++	 * to the original state above and the try_to_wake_up() code
++	 * has possibly updated it when a real (non-rtmutex) wakeup
++	 * happened while we were blocked. Clear saved_state so
++	 * try_to_wakeup() does not get confused.
++	 */
++	pi_lock(&self->pi_lock);
++	__set_current_state(self->saved_state);
++	self->saved_state = TASK_RUNNING;
++	pi_unlock(&self->pi_lock);
++
++	/*
++	 * try_to_take_rt_mutex() sets the waiter bit
++	 * unconditionally. We might have to fix that up:
++	 */
++	fixup_rt_mutex_waiters(lock);
++
++	BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
++	BUG_ON(!plist_node_empty(&waiter.list_entry));
++
++	raw_spin_unlock(&lock->wait_lock);
++
++	debug_rt_mutex_free_waiter(&waiter);
++}
++
++/*
++ * Slow path to release a rt_mutex spin_lock style
++ */
++static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
++{
++	raw_spin_lock(&lock->wait_lock);
++
++	debug_rt_mutex_unlock(lock);
++
++	rt_mutex_deadlock_account_unlock(current);
++
++	if (!rt_mutex_has_waiters(lock)) {
++		lock->owner = NULL;
++		raw_spin_unlock(&lock->wait_lock);
++		return;
++	}
++
++	wakeup_next_waiter(lock);
++
++	raw_spin_unlock(&lock->wait_lock);
++
++	/* Undo pi boosting.when necessary */
++	rt_mutex_adjust_prio(current);
++}
++
++void __lockfunc rt_spin_lock(spinlock_t *lock)
++{
++	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock);
++
++void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
++{
++	rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
++}
++EXPORT_SYMBOL(__rt_spin_lock);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
++{
++	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock_nested);
++#endif
++
++void __lockfunc rt_spin_unlock(spinlock_t *lock)
++{
++	/* NOTE: we always pass in '1' for nested, for simplicity */
++	spin_release(&lock->dep_map, 1, _RET_IP_);
++	rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(rt_spin_unlock);
++
++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
++{
++	rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(__rt_spin_unlock);
++
++/*
++ * Wait for the lock to get unlocked: instead of polling for an unlock
++ * (like raw spinlocks do), we lock and unlock, to force the kernel to
++ * schedule if there's contention:
++ */
++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
++{
++	spin_lock(lock);
++	spin_unlock(lock);
++}
++EXPORT_SYMBOL(rt_spin_unlock_wait);
++
++int __lockfunc rt_spin_trylock(spinlock_t *lock)
++{
++	int ret = rt_mutex_trylock(&lock->lock);
++
++	if (ret)
++		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++	return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock);
++
++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
++{
++	int ret;
++
++	local_bh_disable();
++	ret = rt_mutex_trylock(&lock->lock);
++	if (ret) {
++		migrate_disable();
++		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++	} else
++		local_bh_enable();
++	return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_bh);
++
++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
++{
++	int ret;
++
++	*flags = 0;
++	migrate_disable();
++	ret = rt_mutex_trylock(&lock->lock);
++	if (ret)
++		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++	else
++		migrate_enable();
++	return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_irqsave);
++
++int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
++{
++	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
++	if (atomic_add_unless(atomic, -1, 1))
++		return 0;
++	migrate_disable();
++	rt_spin_lock(lock);
++	if (atomic_dec_and_test(atomic))
++		return 1;
++	rt_spin_unlock(lock);
++	migrate_enable();
++	return 0;
++}
++EXPORT_SYMBOL(atomic_dec_and_spin_lock);
++
++void
++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	/*
++	 * Make sure we are not reinitializing a held lock:
++	 */
++	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
++	lockdep_init_map(&lock->dep_map, name, key, 0);
++#endif
++}
++EXPORT_SYMBOL(__rt_spin_lock_init);
++
++#endif /* PREEMPT_RT_FULL */
++
+ /**
+  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
+  * @lock:		 the rt_mutex to take
+  * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
+- * 			 or TASK_UNINTERRUPTIBLE)
++ *			 or TASK_UNINTERRUPTIBLE)
+  * @timeout:		 the pre-initialized and started timer, or NULL for none
+  * @waiter:		 the pre-initialized rt_mutex_waiter
+  *
+@@ -631,9 +996,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ 	struct rt_mutex_waiter waiter;
+ 	int ret = 0;
+ 
+-	debug_rt_mutex_init_waiter(&waiter);
++	rt_mutex_init_waiter(&waiter, false);
+ 
+ 	raw_spin_lock(&lock->wait_lock);
++	init_lists(lock);
+ 
+ 	/* Try to acquire the lock again: */
+ 	if (try_to_take_rt_mutex(lock, current, NULL)) {
+@@ -686,6 +1052,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
+ 	int ret = 0;
+ 
+ 	raw_spin_lock(&lock->wait_lock);
++	init_lists(lock);
+ 
+ 	if (likely(rt_mutex_owner(lock) != current)) {
+ 
+@@ -799,12 +1166,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+ /**
+  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
+  *
+- * @lock: 		the rt_mutex to be locked
++ * @lock:		the rt_mutex to be locked
+  * @detect_deadlock:	deadlock detection on/off
+  *
+  * Returns:
+- *  0 		on success
+- * -EINTR 	when interrupted by a signal
++ *  0		on success
++ * -EINTR	when interrupted by a signal
+  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
+  */
+ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
+@@ -818,17 +1185,38 @@ int __sched rt_mutex_lock_interruptible(
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+ 
+ /**
++ * rt_mutex_lock_killable - lock a rt_mutex killable
++ *
++ * @lock:		the rt_mutex to be locked
++ * @detect_deadlock:	deadlock detection on/off
++ *
++ * Returns:
++ *  0		on success
++ * -EINTR	when interrupted by a signal
++ * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
++ */
++int __sched rt_mutex_lock_killable(struct rt_mutex *lock,
++				   int detect_deadlock)
++{
++	might_sleep();
++
++	return rt_mutex_fastlock(lock, TASK_KILLABLE,
++				 detect_deadlock, rt_mutex_slowlock);
++}
++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
++
++/**
+  * rt_mutex_timed_lock - lock a rt_mutex interruptible
+  *			the timeout structure is provided
+  *			by the caller
+  *
+- * @lock: 		the rt_mutex to be locked
++ * @lock:		the rt_mutex to be locked
+  * @timeout:		timeout structure or NULL (no timeout)
+  * @detect_deadlock:	deadlock detection on/off
+  *
+  * Returns:
+- *  0 		on success
+- * -EINTR 	when interrupted by a signal
++ *  0		on success
++ * -EINTR	when interrupted by a signal
+  * -ETIMEDOUT	when the timeout expired
+  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
+  */
+@@ -897,7 +1285,6 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+ {
+ 	lock->owner = NULL;
+-	raw_spin_lock_init(&lock->wait_lock);
+ 	plist_head_init(&lock->wait_list);
+ 
+ 	debug_rt_mutex_init(lock, name);
+@@ -917,7 +1304,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ 				struct task_struct *proxy_owner)
+ {
+-	__rt_mutex_init(lock, NULL);
++	rt_mutex_init(lock);
+ 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
+ 	rt_mutex_set_owner(lock, proxy_owner);
+ 	rt_mutex_deadlock_account_lock(lock, proxy_owner);
+Index: linux-3.2/kernel/futex.c
+===================================================================
+--- linux-3.2.orig/kernel/futex.c
++++ linux-3.2/kernel/futex.c
+@@ -1410,6 +1410,16 @@ retry_private:
+ 				requeue_pi_wake_futex(this, &key2, hb2);
+ 				drop_count++;
+ 				continue;
++			} else if (ret == -EAGAIN) {
++				/*
++				 * Waiter was woken by timeout or
++				 * signal and has set pi_blocked_on to
++				 * PI_WAKEUP_INPROGRESS before we
++				 * tried to enqueue it on the rtmutex.
++				 */
++				this->pi_state = NULL;
++				free_pi_state(pi_state);
++				continue;
+ 			} else if (ret) {
+ 				/* -EDEADLK */
+ 				this->pi_state = NULL;
+@@ -2254,7 +2264,7 @@ static int futex_wait_requeue_pi(u32 __u
+ 	struct hrtimer_sleeper timeout, *to = NULL;
+ 	struct rt_mutex_waiter rt_waiter;
+ 	struct rt_mutex *pi_mutex = NULL;
+-	struct futex_hash_bucket *hb;
++	struct futex_hash_bucket *hb, *hb2;
+ 	union futex_key key2 = FUTEX_KEY_INIT;
+ 	struct futex_q q = futex_q_init;
+ 	int res, ret;
+@@ -2276,8 +2286,7 @@ static int futex_wait_requeue_pi(u32 __u
+ 	 * The waiter is allocated on our stack, manipulated by the requeue
+ 	 * code while we sleep on uaddr.
+ 	 */
+-	debug_rt_mutex_init_waiter(&rt_waiter);
+-	rt_waiter.task = NULL;
++	rt_mutex_init_waiter(&rt_waiter, false);
+ 
+ 	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
+ 	if (unlikely(ret != 0))
+@@ -2298,20 +2307,55 @@ static int futex_wait_requeue_pi(u32 __u
+ 	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ 	futex_wait_queue_me(hb, &q, to);
+ 
+-	spin_lock(&hb->lock);
+-	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+-	spin_unlock(&hb->lock);
+-	if (ret)
+-		goto out_put_keys;
++	/*
++	 * On RT we must avoid races with requeue and trying to block
++	 * on two mutexes (hb->lock and uaddr2's rtmutex) by
++	 * serializing access to pi_blocked_on with pi_lock.
++	 */
++	raw_spin_lock_irq(&current->pi_lock);
++	if (current->pi_blocked_on) {
++		/*
++		 * We have been requeued or are in the process of
++		 * being requeued.
++		 */
++		raw_spin_unlock_irq(&current->pi_lock);
++	} else {
++		/*
++		 * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
++		 * prevents a concurrent requeue from moving us to the
++		 * uaddr2 rtmutex. After that we can safely acquire
++		 * (and possibly block on) hb->lock.
++		 */
++		current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
++		raw_spin_unlock_irq(&current->pi_lock);
++
++		spin_lock(&hb->lock);
++
++		/*
++		 * Clean up pi_blocked_on. We might leak it otherwise
++		 * when we succeeded with the hb->lock in the fast
++		 * path.
++		 */
++		raw_spin_lock_irq(&current->pi_lock);
++		current->pi_blocked_on = NULL;
++		raw_spin_unlock_irq(&current->pi_lock);
++
++		ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
++		spin_unlock(&hb->lock);
++		if (ret)
++			goto out_put_keys;
++	}
+ 
+ 	/*
+-	 * In order for us to be here, we know our q.key == key2, and since
+-	 * we took the hb->lock above, we also know that futex_requeue() has
+-	 * completed and we no longer have to concern ourselves with a wakeup
+-	 * race with the atomic proxy lock acquisition by the requeue code. The
+-	 * futex_requeue dropped our key1 reference and incremented our key2
+-	 * reference count.
++	 * In order to be here, we have either been requeued, are in
++	 * the process of being requeued, or requeue successfully
++	 * acquired uaddr2 on our behalf.  If pi_blocked_on was
++	 * non-null above, we may be racing with a requeue.  Do not
++	 * rely on q->lock_ptr to be hb2->lock until after blocking on
++	 * hb->lock or hb2->lock. The futex_requeue dropped our key1
++	 * reference and incremented our key2 reference count.
+ 	 */
++	hb2 = hash_futex(&key2);
+ 
+ 	/* Check if the requeue code acquired the second futex for us. */
+ 	if (!q.rt_waiter) {
+@@ -2320,9 +2364,10 @@ static int futex_wait_requeue_pi(u32 __u
+ 		 * did a lock-steal - fix up the PI-state in that case.
+ 		 */
+ 		if (q.pi_state && (q.pi_state->owner != current)) {
+-			spin_lock(q.lock_ptr);
++			spin_lock(&hb2->lock);
++			BUG_ON(&hb2->lock != q.lock_ptr);
+ 			ret = fixup_pi_state_owner(uaddr2, &q, current);
+-			spin_unlock(q.lock_ptr);
++			spin_unlock(&hb2->lock);
+ 		}
+ 	} else {
+ 		/*
+@@ -2335,7 +2380,8 @@ static int futex_wait_requeue_pi(u32 __u
+ 		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
+ 		debug_rt_mutex_free_waiter(&rt_waiter);
+ 
+-		spin_lock(q.lock_ptr);
++		spin_lock(&hb2->lock);
++		BUG_ON(&hb2->lock != q.lock_ptr);
+ 		/*
+ 		 * Fixup the pi_state owner and possibly acquire the lock if we
+ 		 * haven't already.
+Index: linux-3.2/kernel/rtmutex_common.h
+===================================================================
+--- linux-3.2.orig/kernel/rtmutex_common.h
++++ linux-3.2/kernel/rtmutex_common.h
+@@ -49,6 +49,7 @@ struct rt_mutex_waiter {
+ 	struct plist_node	pi_list_entry;
+ 	struct task_struct	*task;
+ 	struct rt_mutex		*lock;
++	bool			savestate;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ 	unsigned long		ip;
+ 	struct pid		*deadlock_task_pid;
+@@ -103,6 +104,8 @@ static inline struct task_struct *rt_mut
+ /*
+  * PI-futex support (proxy locking functions, etc.):
+  */
++#define PI_WAKEUP_INPROGRESS	((struct rt_mutex_waiter *) 1)
++
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ 				       struct task_struct *proxy_owner);
+@@ -123,4 +126,12 @@ extern int rt_mutex_finish_proxy_lock(st
+ # include "rtmutex.h"
+ #endif
+ 
++static inline void
++rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
++{
++	debug_rt_mutex_init_waiter(waiter);
++	waiter->task = NULL;
++	waiter->savestate = savestate;
++}
++
+ #endif
+Index: linux-3.2/include/linux/rwlock_types.h
+===================================================================
+--- linux-3.2.orig/include/linux/rwlock_types.h
++++ linux-3.2/include/linux/rwlock_types.h
+@@ -1,6 +1,10 @@
+ #ifndef __LINUX_RWLOCK_TYPES_H
+ #define __LINUX_RWLOCK_TYPES_H
+ 
++#if !defined(__LINUX_SPINLOCK_TYPES_H)
++# error "Do not include directly, include spinlock_types.h"
++#endif
++
+ /*
+  * include/linux/rwlock_types.h - generic rwlock type definitions
+  *				  and initializers
+@@ -43,6 +47,7 @@ typedef struct {
+ 				RW_DEP_MAP_INIT(lockname) }
+ #endif
+ 
+-#define DEFINE_RWLOCK(x)	rwlock_t x = __RW_LOCK_UNLOCKED(x)
++#define DEFINE_RWLOCK(name) \
++	rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+ 
+ #endif /* __LINUX_RWLOCK_TYPES_H */
+Index: linux-3.2/include/linux/spinlock_types.h
+===================================================================
+--- linux-3.2.orig/include/linux/spinlock_types.h
++++ linux-3.2/include/linux/spinlock_types.h
+@@ -9,80 +9,15 @@
+  * Released under the General Public License (GPL).
+  */
+ 
+-#if defined(CONFIG_SMP)
+-# include <asm/spinlock_types.h>
+-#else
+-# include <linux/spinlock_types_up.h>
+-#endif
+-
+-#include <linux/lockdep.h>
+-
+-typedef struct raw_spinlock {
+-	arch_spinlock_t raw_lock;
+-#ifdef CONFIG_GENERIC_LOCKBREAK
+-	unsigned int break_lock;
+-#endif
+-#ifdef CONFIG_DEBUG_SPINLOCK
+-	unsigned int magic, owner_cpu;
+-	void *owner;
+-#endif
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-	struct lockdep_map dep_map;
+-#endif
+-} raw_spinlock_t;
+-
+-#define SPINLOCK_MAGIC		0xdead4ead
+-
+-#define SPINLOCK_OWNER_INIT	((void *)-1L)
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define SPIN_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname }
+-#else
+-# define SPIN_DEP_MAP_INIT(lockname)
+-#endif
++#include <linux/spinlock_types_raw.h>
+ 
+-#ifdef CONFIG_DEBUG_SPINLOCK
+-# define SPIN_DEBUG_INIT(lockname)		\
+-	.magic = SPINLOCK_MAGIC,		\
+-	.owner_cpu = -1,			\
+-	.owner = SPINLOCK_OWNER_INIT,
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_types_nort.h>
++# include <linux/rwlock_types.h>
+ #else
+-# define SPIN_DEBUG_INIT(lockname)
++# include <linux/rtmutex.h>
++# include <linux/spinlock_types_rt.h>
++# include <linux/rwlock_types_rt.h>
+ #endif
+ 
+-#define __RAW_SPIN_LOCK_INITIALIZER(lockname)	\
+-	{					\
+-	.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,	\
+-	SPIN_DEBUG_INIT(lockname)		\
+-	SPIN_DEP_MAP_INIT(lockname) }
+-
+-#define __RAW_SPIN_LOCK_UNLOCKED(lockname)	\
+-	(raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_RAW_SPINLOCK(x)	raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+-
+-typedef struct spinlock {
+-	union {
+-		struct raw_spinlock rlock;
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+-		struct {
+-			u8 __padding[LOCK_PADSIZE];
+-			struct lockdep_map dep_map;
+-		};
+-#endif
+-	};
+-} spinlock_t;
+-
+-#define __SPIN_LOCK_INITIALIZER(lockname) \
+-	{ { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+-
+-#define __SPIN_LOCK_UNLOCKED(lockname) \
+-	(spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_SPINLOCK(x)	spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+-
+-#include <linux/rwlock_types.h>
+-
+ #endif /* __LINUX_SPINLOCK_TYPES_H */
+Index: linux-3.2/include/linux/spinlock_types_nort.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/linux/spinlock_types_nort.h
+@@ -0,0 +1,33 @@
++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
++#define __LINUX_SPINLOCK_TYPES_NORT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * The non RT version maps spinlocks to raw_spinlocks
++ */
++typedef struct spinlock {
++	union {
++		struct raw_spinlock rlock;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
++		struct {
++			u8 __padding[LOCK_PADSIZE];
++			struct lockdep_map dep_map;
++		};
++#endif
++	};
++} spinlock_t;
++
++#define __SPIN_LOCK_INITIALIZER(lockname) \
++	{ { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
++
++#define __SPIN_LOCK_UNLOCKED(lockname) \
++	(spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_SPINLOCK(x)	spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
++
++#endif
+Index: linux-3.2/include/linux/spinlock_types_raw.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/linux/spinlock_types_raw.h
+@@ -0,0 +1,56 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
++#define __LINUX_SPINLOCK_TYPES_RAW_H
++
++#if defined(CONFIG_SMP)
++# include <asm/spinlock_types.h>
++#else
++# include <linux/spinlock_types_up.h>
++#endif
++
++#include <linux/lockdep.h>
++
++typedef struct raw_spinlock {
++	arch_spinlock_t raw_lock;
++#ifdef CONFIG_GENERIC_LOCKBREAK
++	unsigned int break_lock;
++#endif
++#ifdef CONFIG_DEBUG_SPINLOCK
++	unsigned int magic, owner_cpu;
++	void *owner;
++#endif
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	struct lockdep_map dep_map;
++#endif
++} raw_spinlock_t;
++
++#define SPINLOCK_MAGIC		0xdead4ead
++
++#define SPINLOCK_OWNER_INIT	((void *)-1L)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define SPIN_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname }
++#else
++# define SPIN_DEP_MAP_INIT(lockname)
++#endif
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++# define SPIN_DEBUG_INIT(lockname)		\
++	.magic = SPINLOCK_MAGIC,		\
++	.owner_cpu = -1,			\
++	.owner = SPINLOCK_OWNER_INIT,
++#else
++# define SPIN_DEBUG_INIT(lockname)
++#endif
++
++#define __RAW_SPIN_LOCK_INITIALIZER(lockname)	\
++	{					\
++	.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,	\
++	SPIN_DEBUG_INIT(lockname)		\
++	SPIN_DEP_MAP_INIT(lockname) }
++
++#define __RAW_SPIN_LOCK_UNLOCKED(lockname)	\
++	(raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_RAW_SPINLOCK(x)	raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
++
++#endif
+Index: linux-3.2/include/linux/rwlock_types_rt.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/linux/rwlock_types_rt.h
+@@ -0,0 +1,33 @@
++#ifndef __LINUX_RWLOCK_TYPES_RT_H
++#define __LINUX_RWLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * rwlocks - rtmutex which allows single reader recursion
++ */
++typedef struct {
++	struct rt_mutex		lock;
++	int			read_depth;
++	unsigned int		break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	struct lockdep_map	dep_map;
++#endif
++} rwlock_t;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define RW_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname }
++#else
++# define RW_DEP_MAP_INIT(lockname)
++#endif
++
++#define __RW_LOCK_UNLOCKED(name) \
++	{ .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock),	\
++	  RW_DEP_MAP_INIT(name) }
++
++#define DEFINE_RWLOCK(name) \
++	rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
++
++#endif
+Index: linux-3.2/include/linux/spinlock_types_rt.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/linux/spinlock_types_rt.h
+@@ -0,0 +1,49 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RT_H
++#define __LINUX_SPINLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
++ */
++typedef struct spinlock {
++	struct rt_mutex		lock;
++	unsigned int		break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	struct lockdep_map	dep_map;
++#endif
++} spinlock_t;
++
++#ifdef CONFIG_DEBUG_RT_MUTEXES
++# define __RT_SPIN_INITIALIZER(name) \
++	{ \
++	.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++	.save_state = 1, \
++	.file = __FILE__, \
++	.line = __LINE__ , \
++	}
++#else
++# define __RT_SPIN_INITIALIZER(name) \
++	{								\
++	.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),		\
++	.save_state = 1, \
++	}
++#endif
++
++/*
++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
++*/
++
++#define __SPIN_LOCK_UNLOCKED(name)			\
++	{ .lock = __RT_SPIN_INITIALIZER(name.lock),		\
++	  SPIN_DEP_MAP_INIT(name) }
++
++#define __DEFINE_SPINLOCK(name) \
++	spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
++
++#define DEFINE_SPINLOCK(name) \
++	spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
++
++#endif
+Index: linux-3.2/include/linux/mutex.h
+===================================================================
+--- linux-3.2.orig/include/linux/mutex.h
++++ linux-3.2/include/linux/mutex.h
+@@ -17,6 +17,17 @@
+ 
+ #include <linux/atomic.h>
+ 
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
++		, .dep_map = { .name = #lockname }
++#else
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
++#endif
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/mutex_rt.h>
++#else
++
+ /*
+  * Simple, straightforward mutexes with strict semantics:
+  *
+@@ -95,13 +106,6 @@ do {							\
+ static inline void mutex_destroy(struct mutex *lock) {}
+ #endif
+ 
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+-		, .dep_map = { .name = #lockname }
+-#else
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+-#endif
+-
+ #define __MUTEX_INITIALIZER(lockname) \
+ 		{ .count = ATOMIC_INIT(1) \
+ 		, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+@@ -167,6 +171,9 @@ extern int __must_check mutex_lock_killa
+  */
+ extern int mutex_trylock(struct mutex *lock);
+ extern void mutex_unlock(struct mutex *lock);
++
++#endif /* !PREEMPT_RT_FULL */
++
+ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+ 
+ #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
+Index: linux-3.2/include/linux/mutex_rt.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/linux/mutex_rt.h
+@@ -0,0 +1,84 @@
++#ifndef __LINUX_MUTEX_RT_H
++#define __LINUX_MUTEX_RT_H
++
++#ifndef __LINUX_MUTEX_H
++#error "Please include mutex.h"
++#endif
++
++#include <linux/rtmutex.h>
++
++/* FIXME: Just for __lockfunc */
++#include <linux/spinlock.h>
++
++struct mutex {
++	struct rt_mutex		lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	struct lockdep_map	dep_map;
++#endif
++};
++
++#define __MUTEX_INITIALIZER(mutexname)					\
++	{								\
++		.lock = __RT_MUTEX_INITIALIZER(mutexname.lock)		\
++		__DEP_MAP_MUTEX_INITIALIZER(mutexname)			\
++	}
++
++#define DEFINE_MUTEX(mutexname)						\
++	struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
++
++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
++extern void __lockfunc _mutex_lock(struct mutex *lock);
++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
++extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_trylock(struct mutex *lock);
++extern void __lockfunc _mutex_unlock(struct mutex *lock);
++
++#define mutex_is_locked(l)		rt_mutex_is_locked(&(l)->lock)
++#define mutex_lock(l)			_mutex_lock(l)
++#define mutex_lock_interruptible(l)	_mutex_lock_interruptible(l)
++#define mutex_lock_killable(l)		_mutex_lock_killable(l)
++#define mutex_trylock(l)		_mutex_trylock(l)
++#define mutex_unlock(l)			_mutex_unlock(l)
++#define mutex_destroy(l)		rt_mutex_destroy(&(l)->lock)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define mutex_lock_nested(l, s)	_mutex_lock_nested(l, s)
++# define mutex_lock_interruptible_nested(l, s) \
++					_mutex_lock_interruptible_nested(l, s)
++# define mutex_lock_killable_nested(l, s) \
++					_mutex_lock_killable_nested(l, s)
++
++# define mutex_lock_nest_lock(lock, nest_lock)				\
++do {									\
++	typecheck(struct lockdep_map *, &(nest_lock)->dep_map);		\
++	_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map);		\
++} while (0)
++
++#else
++# define mutex_lock_nested(l, s)	_mutex_lock(l)
++# define mutex_lock_interruptible_nested(l, s) \
++					_mutex_lock_interruptible(l)
++# define mutex_lock_killable_nested(l, s) \
++					_mutex_lock_killable(l)
++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
++#endif
++
++# define mutex_init(mutex)				\
++do {							\
++	static struct lock_class_key __key;		\
++							\
++	rt_mutex_init(&(mutex)->lock);			\
++	__mutex_do_init((mutex), #mutex, &__key);	\
++} while (0)
++
++# define __mutex_init(mutex, name, key)			\
++do {							\
++	rt_mutex_init(&(mutex)->lock);			\
++	__mutex_do_init((mutex), name, key);		\
++} while (0)
++
++#endif
+Index: linux-3.2/include/linux/rwsem.h
+===================================================================
+--- linux-3.2.orig/include/linux/rwsem.h
++++ linux-3.2/include/linux/rwsem.h
+@@ -17,6 +17,10 @@
+ #include <asm/system.h>
+ #include <linux/atomic.h>
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++#include <linux/rwsem_rt.h>
++#else /* PREEMPT_RT_FULL */
++
+ struct rw_semaphore;
+ 
+ #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+@@ -131,4 +135,6 @@ extern void down_write_nested(struct rw_
+ # define down_write_nested(sem, subclass)	down_write(sem)
+ #endif
+ 
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* _LINUX_RWSEM_H */
+Index: linux-3.2/include/linux/rwsem_rt.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/linux/rwsem_rt.h
+@@ -0,0 +1,105 @@
++#ifndef _LINUX_RWSEM_RT_H
++#define _LINUX_RWSEM_RT_H
++
++#ifndef _LINUX_RWSEM_H
++#error "Include rwsem.h"
++#endif
++
++/*
++ * RW-semaphores are a spinlock plus a reader-depth count.
++ *
++ * Note that the semantics are different from the usual
++ * Linux rw-sems, in PREEMPT_RT mode we do not allow
++ * multiple readers to hold the lock at once, we only allow
++ * a read-lock owner to read-lock recursively. This is
++ * better for latency, makes the implementation inherently
++ * fair and makes it simpler as well.
++ */
++
++#include <linux/rtmutex.h>
++
++struct rw_semaphore {
++	struct rt_mutex		lock;
++	int			read_depth;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	struct lockdep_map	dep_map;
++#endif
++};
++
++#define __RWSEM_INITIALIZER(name) \
++	{ .lock = __RT_MUTEX_INITIALIZER(name.lock), \
++	  RW_DEP_MAP_INIT(name) }
++
++#define DECLARE_RWSEM(lockname) \
++	struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
++
++extern void  __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
++				     struct lock_class_key *key);
++
++# define rt_init_rwsem(sem)				\
++do {							\
++	static struct lock_class_key __key;		\
++							\
++	rt_mutex_init(&(sem)->lock);			\
++	__rt_rwsem_init((sem), #sem, &__key);		\
++} while (0)
++
++extern void  rt_down_write(struct rw_semaphore *rwsem);
++extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
++extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
++extern void  rt_down_read(struct rw_semaphore *rwsem);
++extern int  rt_down_write_trylock(struct rw_semaphore *rwsem);
++extern int  rt_down_read_trylock(struct rw_semaphore *rwsem);
++extern void  rt_up_read(struct rw_semaphore *rwsem);
++extern void  rt_up_write(struct rw_semaphore *rwsem);
++extern void  rt_downgrade_write(struct rw_semaphore *rwsem);
++
++#define init_rwsem(sem)		rt_init_rwsem(sem)
++#define rwsem_is_locked(s)	rt_mutex_is_locked(&(s)->lock)
++
++static inline void down_read(struct rw_semaphore *sem)
++{
++	rt_down_read(sem);
++}
++
++static inline int down_read_trylock(struct rw_semaphore *sem)
++{
++	return rt_down_read_trylock(sem);
++}
++
++static inline void down_write(struct rw_semaphore *sem)
++{
++	rt_down_write(sem);
++}
++
++static inline int down_write_trylock(struct rw_semaphore *sem)
++{
++	return rt_down_write_trylock(sem);
++}
++
++static inline void up_read(struct rw_semaphore *sem)
++{
++	rt_up_read(sem);
++}
++
++static inline void up_write(struct rw_semaphore *sem)
++{
++	rt_up_write(sem);
++}
++
++static inline void downgrade_write(struct rw_semaphore *sem)
++{
++	rt_downgrade_write(sem);
++}
++
++static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
++{
++	return rt_down_read_nested(sem, subclass);
++}
++
++static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
++{
++	rt_down_write_nested(sem, subclass);
++}
++
++#endif
+Index: linux-3.2/lib/Makefile
+===================================================================
+--- linux-3.2.orig/lib/Makefile
++++ linux-3.2/lib/Makefile
+@@ -37,8 +37,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o 
+ obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
+ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
++
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
++endif
+ 
+ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
+ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+Index: linux-3.2/include/linux/rwlock_rt.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/linux/rwlock_rt.h
+@@ -0,0 +1,123 @@
++#ifndef __LINUX_RWLOCK_RT_H
++#define __LINUX_RWLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++#define rwlock_init(rwl)				\
++do {							\
++	static struct lock_class_key __key;		\
++							\
++	rt_mutex_init(&(rwl)->lock);			\
++	__rt_rwlock_init(rwl, #rwl, &__key);		\
++} while (0)
++
++extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
++
++#define read_trylock(lock)	__cond_lock(lock, rt_read_trylock(lock))
++#define write_trylock(lock)	__cond_lock(lock, rt_write_trylock(lock))
++
++#define write_trylock_irqsave(lock, flags)	\
++	__cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
++
++#define read_lock_irqsave(lock, flags)			\
++	do {						\
++		typecheck(unsigned long, flags);	\
++		migrate_disable();			\
++		flags = rt_read_lock_irqsave(lock);	\
++	} while (0)
++
++#define write_lock_irqsave(lock, flags)			\
++	do {						\
++		typecheck(unsigned long, flags);	\
++		migrate_disable();			\
++		flags = rt_write_lock_irqsave(lock);	\
++	} while (0)
++
++#define read_lock(lock)					\
++	do {						\
++		migrate_disable();			\
++		rt_read_lock(lock);			\
++	} while (0)
++
++#define read_lock_bh(lock)				\
++	do {						\
++		local_bh_disable();			\
++		migrate_disable();			\
++		rt_read_lock(lock);			\
++	} while (0)
++
++#define read_lock_irq(lock)	read_lock(lock)
++
++#define write_lock(lock)				\
++	do {						\
++		migrate_disable();			\
++		rt_write_lock(lock);			\
++	} while (0)
++
++#define write_lock_bh(lock)				\
++	do {						\
++		local_bh_disable();			\
++		migrate_disable();			\
++		rt_write_lock(lock);			\
++	} while (0)
++
++#define write_lock_irq(lock)	write_lock(lock)
++
++#define read_unlock(lock)				\
++	do {						\
++		rt_read_unlock(lock);			\
++		migrate_enable();			\
++	} while (0)
++
++#define read_unlock_bh(lock)				\
++	do {						\
++		rt_read_unlock(lock);			\
++		migrate_enable();			\
++		local_bh_enable();			\
++	} while (0)
++
++#define read_unlock_irq(lock)	read_unlock(lock)
++
++#define write_unlock(lock)				\
++	do {						\
++		rt_write_unlock(lock);			\
++		migrate_enable();			\
++	} while (0)
++
++#define write_unlock_bh(lock)				\
++	do {						\
++		rt_write_unlock(lock);			\
++		migrate_enable();			\
++		local_bh_enable();			\
++	} while (0)
++
++#define write_unlock_irq(lock)	write_unlock(lock)
++
++#define read_unlock_irqrestore(lock, flags)		\
++	do {						\
++		typecheck(unsigned long, flags);	\
++		(void) flags;				\
++		rt_read_unlock(lock);			\
++		migrate_enable();			\
++	} while (0)
++
++#define write_unlock_irqrestore(lock, flags) \
++	do {						\
++		typecheck(unsigned long, flags);	\
++		(void) flags;				\
++		rt_write_unlock(lock);			\
++		migrate_enable();			\
++	} while (0)
++
++#endif
+Index: linux-3.2/include/linux/spinlock.h
+===================================================================
+--- linux-3.2.orig/include/linux/spinlock.h
++++ linux-3.2/include/linux/spinlock.h
+@@ -254,7 +254,11 @@ static inline void do_raw_spin_unlock(ra
+ #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
+ 
+ /* Include rwlock functions */
+-#include <linux/rwlock.h>
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_rt.h>
++#else
++# include <linux/rwlock.h>
++#endif
+ 
+ /*
+  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+@@ -265,6 +269,10 @@ static inline void do_raw_spin_unlock(ra
+ # include <linux/spinlock_api_up.h>
+ #endif
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_rt.h>
++#else /* PREEMPT_RT_FULL */
++
+ /*
+  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+  */
+@@ -397,4 +405,6 @@ extern int _atomic_dec_and_lock(atomic_t
+ #define atomic_dec_and_lock(atomic, lock) \
+ 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+ 
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* __LINUX_SPINLOCK_H */
+Index: linux-3.2/include/linux/spinlock_api_smp.h
+===================================================================
+--- linux-3.2.orig/include/linux/spinlock_api_smp.h
++++ linux-3.2/include/linux/spinlock_api_smp.h
+@@ -191,6 +191,8 @@ static inline int __raw_spin_trylock_bh(
+ 	return 0;
+ }
+ 
+-#include <linux/rwlock_api_smp.h>
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_api_smp.h>
++#endif
+ 
+ #endif /* __LINUX_SPINLOCK_API_SMP_H */
+Index: linux-3.2/include/linux/spinlock_rt.h
+===================================================================
+--- /dev/null
++++ linux-3.2/include/linux/spinlock_rt.h
+@@ -0,0 +1,166 @@
++#ifndef __LINUX_SPINLOCK_RT_H
++#define __LINUX_SPINLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++extern void
++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
++
++#define spin_lock_init(slock)				\
++do {							\
++	static struct lock_class_key __key;		\
++							\
++	rt_mutex_init(&(slock)->lock);			\
++	__rt_spin_lock_init(slock, #slock, &__key);	\
++} while (0)
++
++extern void __lockfunc rt_spin_lock(spinlock_t *lock);
++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
++extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
++
++/*
++ * lockdep-less calls, for derived types like rwlock:
++ * (for trylock they can use rt_mutex_trylock() directly.
++ */
++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
++
++#define spin_lock_local(lock)			rt_spin_lock(lock)
++#define spin_unlock_local(lock)			rt_spin_unlock(lock)
++
++#define spin_lock(lock)				\
++	do {					\
++		migrate_disable();		\
++		rt_spin_lock(lock);		\
++	} while (0)
++
++#define spin_lock_bh(lock)			\
++	do {					\
++		local_bh_disable();		\
++		migrate_disable();		\
++		rt_spin_lock(lock);		\
++	} while (0)
++
++#define spin_lock_irq(lock)		spin_lock(lock)
++
++#define spin_do_trylock(lock)		__cond_lock(lock, rt_spin_trylock(lock))
++
++#define spin_trylock(lock)			\
++({						\
++	int __locked;				\
++	migrate_disable();			\
++	__locked = spin_do_trylock(lock);	\
++	if (!__locked)				\
++		migrate_enable();		\
++	__locked;				\
++})
++
++#ifdef CONFIG_LOCKDEP
++# define spin_lock_nested(lock, subclass)		\
++	do {						\
++		migrate_disable();			\
++		rt_spin_lock_nested(lock, subclass);	\
++	} while (0)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++	do {						 \
++		typecheck(unsigned long, flags);	 \
++		flags = 0;				 \
++		migrate_disable();			 \
++		rt_spin_lock_nested(lock, subclass);	 \
++	} while (0)
++#else
++# define spin_lock_nested(lock, subclass)	spin_lock(lock)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++	do {						 \
++		typecheck(unsigned long, flags);	 \
++		flags = 0;				 \
++		spin_lock(lock);			 \
++	} while (0)
++#endif
++
++#define spin_lock_irqsave(lock, flags)			 \
++	do {						 \
++		typecheck(unsigned long, flags);	 \
++		flags = 0;				 \
++		spin_lock(lock);			 \
++	} while (0)
++
++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
++{
++	unsigned long flags = 0;
++#ifdef CONFIG_TRACE_IRQFLAGS
++	flags = rt_spin_lock_trace_flags(lock);
++#else
++	spin_lock(lock); /* lock_local */
++#endif
++	return flags;
++}
++
++/* FIXME: we need rt_spin_lock_nest_lock */
++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
++
++#define spin_unlock(lock)				\
++	do {						\
++		rt_spin_unlock(lock);			\
++		migrate_enable();			\
++	} while (0)
++
++#define spin_unlock_bh(lock)				\
++	do {						\
++		rt_spin_unlock(lock);			\
++		migrate_enable();			\
++		local_bh_enable();			\
++	} while (0)
++
++#define spin_unlock_irq(lock)		spin_unlock(lock)
++
++#define spin_unlock_irqrestore(lock, flags)		\
++	do {						\
++		typecheck(unsigned long, flags);	\
++		(void) flags;				\
++		spin_unlock(lock);			\
++	} while (0)
++
++#define spin_trylock_bh(lock)	__cond_lock(lock, rt_spin_trylock_bh(lock))
++#define spin_trylock_irq(lock)	spin_trylock(lock)
++
++#define spin_trylock_irqsave(lock, flags)	\
++	rt_spin_trylock_irqsave(lock, &(flags))
++
++#define spin_unlock_wait(lock)		rt_spin_unlock_wait(lock)
++
++#ifdef CONFIG_GENERIC_LOCKBREAK
++# define spin_is_contended(lock)	((lock)->break_lock)
++#else
++# define spin_is_contended(lock)	(((void)(lock), 0))
++#endif
++
++static inline int spin_can_lock(spinlock_t *lock)
++{
++	return !rt_mutex_is_locked(&lock->lock);
++}
++
++static inline int spin_is_locked(spinlock_t *lock)
++{
++	return rt_mutex_is_locked(&lock->lock);
++}
++
++static inline void assert_spin_locked(spinlock_t *lock)
++{
++	BUG_ON(!spin_is_locked(lock));
++}
++
++#define atomic_dec_and_lock(atomic, lock) \
++	atomic_dec_and_spin_lock(atomic, lock)
++
++#endif
+Index: linux-3.2/kernel/Makefile
+===================================================================
+--- linux-3.2.orig/kernel/Makefile
++++ linux-3.2/kernel/Makefile
+@@ -7,8 +7,8 @@ obj-y     = sched.o fork.o exec_domain.o
+ 	    sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
+ 	    signal.o sys.o kmod.o workqueue.o pid.o \
+ 	    rcupdate.o extable.o params.o posix-timers.o \
+-	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
+-	    hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
++	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
++	    hrtimer.o nsproxy.o srcu.o semaphore.o \
+ 	    notifier.o ksysfs.o sched_clock.o cred.o \
+ 	    async.o range.o
+ obj-y += groups.o
+@@ -29,7 +29,11 @@ obj-$(CONFIG_PROFILING) += profile.o
+ obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
+ obj-$(CONFIG_STACKTRACE) += stacktrace.o
+ obj-y += time/
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
++obj-y += mutex.o
+ obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
++obj-y += rwsem.o
++endif
+ obj-$(CONFIG_LOCKDEP) += lockdep.o
+ ifeq ($(CONFIG_PROC_FS),y)
+ obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+@@ -41,6 +45,7 @@ endif
+ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+ obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
+ obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
+ obj-$(CONFIG_SMP) += smp.o
+ ifneq ($(CONFIG_SMP),y)
+Index: linux-3.2/kernel/rt.c
+===================================================================
+--- /dev/null
++++ linux-3.2/kernel/rt.c
+@@ -0,0 +1,442 @@
++/*
++ * kernel/rt.c
++ *
++ * Real-Time Preemption Support
++ *
++ * started by Ingo Molnar:
++ *
++ *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo at redhat.com>
++ *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx at timesys.com>
++ *
++ * historic credit for proving that Linux spinlocks can be implemented via
++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
++ * and others) who prototyped it on 2.4 and did lots of comparative
++ * research and analysis; TimeSys, for proving that you can implement a
++ * fully preemptible kernel via the use of IRQ threading and mutexes;
++ * Bill Huey for persuasively arguing on lkml that the mutex model is the
++ * right one; and to MontaVista, who ported pmutexes to 2.6.
++ *
++ * This code is a from-scratch implementation and is not based on pmutexes,
++ * but the idea of converting spinlocks to mutexes is used here too.
++ *
++ * lock debugging, locking tree, deadlock detection:
++ *
++ *  Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
++ *  Released under the General Public License (GPL).
++ *
++ * Includes portions of the generic R/W semaphore implementation from:
++ *
++ *  Copyright (c) 2001   David Howells (dhowells at redhat.com).
++ *  - Derived partially from idea by Andrea Arcangeli <andrea at suse.de>
++ *  - Derived also from comments by Linus
++ *
++ * Pending ownership of locks and ownership stealing:
++ *
++ *  Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
++ *
++ *   (also by Steven Rostedt)
++ *    - Converted single pi_lock to individual task locks.
++ *
++ * By Esben Nielsen:
++ *    Doing priority inheritance with help of the scheduler.
++ *
++ *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx at timesys.com>
++ *  - major rework based on Esben Nielsens initial patch
++ *  - replaced thread_info references by task_struct refs
++ *  - removed task->pending_owner dependency
++ *  - BKL drop/reacquire for semaphore style locks to avoid deadlocks
++ *    in the scheduler return path as discussed with Steven Rostedt
++ *
++ *  Copyright (C) 2006, Kihon Technologies Inc.
++ *    Steven Rostedt <rostedt at goodmis.org>
++ *  - debugged and patched Thomas Gleixner's rework.
++ *  - added back the cmpxchg to the rework.
++ *  - turned atomic require back on for SMP.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/rtmutex.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/syscalls.h>
++#include <linux/interrupt.h>
++#include <linux/plist.h>
++#include <linux/fs.h>
++#include <linux/futex.h>
++#include <linux/hrtimer.h>
++
++#include "rtmutex_common.h"
++
++/*
++ * struct mutex functions
++ */
++void __mutex_do_init(struct mutex *mutex, const char *name,
++		     struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	/*
++	 * Make sure we are not reinitializing a held lock:
++	 */
++	debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
++	lockdep_init_map(&mutex->dep_map, name, key, 0);
++#endif
++	mutex->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__mutex_do_init);
++
++void __lockfunc _mutex_lock(struct mutex *lock)
++{
++	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++	rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock);
++
++int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
++{
++	int ret;
++
++	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++	ret = rt_mutex_lock_interruptible(&lock->lock, 0);
++	if (ret)
++		mutex_release(&lock->dep_map, 1, _RET_IP_);
++	return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible);
++
++int __lockfunc _mutex_lock_killable(struct mutex *lock)
++{
++	int ret;
++
++	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++	ret = rt_mutex_lock_killable(&lock->lock, 0);
++	if (ret)
++		mutex_release(&lock->dep_map, 1, _RET_IP_);
++	return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
++{
++	mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++	rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock_nested);
++
++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
++{
++	mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
++	rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock_nest_lock);
++
++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
++{
++	int ret;
++
++	mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++	ret = rt_mutex_lock_interruptible(&lock->lock, 0);
++	if (ret)
++		mutex_release(&lock->dep_map, 1, _RET_IP_);
++	return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
++
++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
++{
++	int ret;
++
++	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++	ret = rt_mutex_lock_killable(&lock->lock, 0);
++	if (ret)
++		mutex_release(&lock->dep_map, 1, _RET_IP_);
++	return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable_nested);
++#endif
++
++int __lockfunc _mutex_trylock(struct mutex *lock)
++{
++	int ret = rt_mutex_trylock(&lock->lock);
++
++	if (ret)
++		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++
++	return ret;
++}
++EXPORT_SYMBOL(_mutex_trylock);
++
++void __lockfunc _mutex_unlock(struct mutex *lock)
++{
++	mutex_release(&lock->dep_map, 1, _RET_IP_);
++	rt_mutex_unlock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_unlock);
++
++/*
++ * rwlock_t functions
++ */
++int __lockfunc rt_write_trylock(rwlock_t *rwlock)
++{
++	int ret = rt_mutex_trylock(&rwlock->lock);
++
++	migrate_disable();
++	if (ret)
++		rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++	else
++		migrate_enable();
++
++	return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock);
++
++int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
++{
++	int ret;
++
++	*flags = 0;
++	migrate_disable();
++	ret = rt_write_trylock(rwlock);
++	if (!ret)
++		migrate_enable();
++	return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock_irqsave);
++
++int __lockfunc rt_read_trylock(rwlock_t *rwlock)
++{
++	struct rt_mutex *lock = &rwlock->lock;
++	int ret = 1;
++
++	/*
++	 * recursive read locks succeed when current owns the lock,
++	 * but not when read_depth == 0 which means that the lock is
++	 * write locked.
++	 */
++	migrate_disable();
++	if (rt_mutex_owner(lock) != current)
++		ret = rt_mutex_trylock(lock);
++	else if (!rwlock->read_depth)
++		ret = 0;
++
++	if (ret) {
++		rwlock->read_depth++;
++		rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
++	} else
++		migrate_enable();
++
++	return ret;
++}
++EXPORT_SYMBOL(rt_read_trylock);
++
++void __lockfunc rt_write_lock(rwlock_t *rwlock)
++{
++	rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++	__rt_spin_lock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_write_lock);
++
++void __lockfunc rt_read_lock(rwlock_t *rwlock)
++{
++	struct rt_mutex *lock = &rwlock->lock;
++
++	rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
++
++	/*
++	 * recursive read locks succeed when current owns the lock
++	 */
++	if (rt_mutex_owner(lock) != current)
++		__rt_spin_lock(lock);
++	rwlock->read_depth++;
++}
++
++EXPORT_SYMBOL(rt_read_lock);
++
++void __lockfunc rt_write_unlock(rwlock_t *rwlock)
++{
++	/* NOTE: we always pass in '1' for nested, for simplicity */
++	rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++	__rt_spin_unlock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_write_unlock);
++
++void __lockfunc rt_read_unlock(rwlock_t *rwlock)
++{
++	rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++
++	/* Release the lock only when read_depth is down to 0 */
++	if (--rwlock->read_depth == 0)
++		__rt_spin_unlock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_read_unlock);
++
++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
++{
++	rt_write_lock(rwlock);
++
++	return 0;
++}
++EXPORT_SYMBOL(rt_write_lock_irqsave);
++
++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
++{
++	rt_read_lock(rwlock);
++
++	return 0;
++}
++EXPORT_SYMBOL(rt_read_lock_irqsave);
++
++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	/*
++	 * Make sure we are not reinitializing a held lock:
++	 */
++	debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
++	lockdep_init_map(&rwlock->dep_map, name, key, 0);
++#endif
++	rwlock->lock.save_state = 1;
++	rwlock->read_depth = 0;
++}
++EXPORT_SYMBOL(__rt_rwlock_init);
++
++/*
++ * rw_semaphores
++ */
++
++void  rt_up_write(struct rw_semaphore *rwsem)
++{
++	rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++	rt_mutex_unlock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_up_write);
++
++void  rt_up_read(struct rw_semaphore *rwsem)
++{
++	rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++	if (--rwsem->read_depth == 0)
++		rt_mutex_unlock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_up_read);
++
++/*
++ * downgrade a write lock into a read lock
++ * - just wake up any readers at the front of the queue
++ */
++void  rt_downgrade_write(struct rw_semaphore *rwsem)
++{
++	BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
++	rwsem->read_depth = 1;
++}
++EXPORT_SYMBOL(rt_downgrade_write);
++
++int  rt_down_write_trylock(struct rw_semaphore *rwsem)
++{
++	int ret = rt_mutex_trylock(&rwsem->lock);
++
++	if (ret)
++		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++	return ret;
++}
++EXPORT_SYMBOL(rt_down_write_trylock);
++
++void  rt_down_write(struct rw_semaphore *rwsem)
++{
++	rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
++	rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write);
++
++void  rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
++{
++	rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
++	rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write_nested);
++
++int  rt_down_read_trylock(struct rw_semaphore *rwsem)
++{
++	struct rt_mutex *lock = &rwsem->lock;
++	int ret = 1;
++
++	/*
++	 * recursive read locks succeed when current owns the rwsem,
++	 * but not when read_depth == 0 which means that the rwsem is
++	 * write locked.
++	 */
++	if (rt_mutex_owner(lock) != current)
++		ret = rt_mutex_trylock(&rwsem->lock);
++	else if (!rwsem->read_depth)
++		ret = 0;
++
++	if (ret) {
++		rwsem->read_depth++;
++		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(rt_down_read_trylock);
++
++static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
++{
++	struct rt_mutex *lock = &rwsem->lock;
++
++	rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
++
++	if (rt_mutex_owner(lock) != current)
++		rt_mutex_lock(&rwsem->lock);
++	rwsem->read_depth++;
++}
++
++void  rt_down_read(struct rw_semaphore *rwsem)
++{
++	__rt_down_read(rwsem, 0);
++}
++EXPORT_SYMBOL(rt_down_read);
++
++void  rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
++{
++	__rt_down_read(rwsem, subclass);
++}
++EXPORT_SYMBOL(rt_down_read_nested);
++
++void  __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
++			      struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	/*
++	 * Make sure we are not reinitializing a held lock:
++	 */
++	debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
++	lockdep_init_map(&rwsem->dep_map, name, key, 0);
++#endif
++	rwsem->read_depth = 0;
++	rwsem->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__rt_rwsem_init);
++
++/**
++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
++ * @cnt: the atomic which we are to dec
++ * @lock: the mutex to return holding if we dec to 0
++ *
++ * return true and hold lock if we dec to 0, return false otherwise
++ */
++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
++{
++	/* dec if we can't possibly hit 0 */
++	if (atomic_add_unless(cnt, -1, 1))
++		return 0;
++	/* we might hit 0, so take the lock */
++	mutex_lock(lock);
++	if (!atomic_dec_and_test(cnt)) {
++		/* when we actually did the dec, we didn't hit 0 */
++		mutex_unlock(lock);
++		return 0;
++	}
++	/* we hit 0, and we hold the lock */
++	return 1;
++}
++EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
+Index: linux-3.2/kernel/spinlock.c
+===================================================================
+--- linux-3.2.orig/kernel/spinlock.c
++++ linux-3.2/kernel/spinlock.c
+@@ -110,8 +110,11 @@ void __lockfunc __raw_##op##_lock_bh(loc
+  *         __[spin|read|write]_lock_bh()
+  */
+ BUILD_LOCK_OPS(spin, raw_spinlock);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ BUILD_LOCK_OPS(read, rwlock);
+ BUILD_LOCK_OPS(write, rwlock);
++#endif
+ 
+ #endif
+ 
+@@ -195,6 +198,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_
+ EXPORT_SYMBOL(_raw_spin_unlock_bh);
+ #endif
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #ifndef CONFIG_INLINE_READ_TRYLOCK
+ int __lockfunc _raw_read_trylock(rwlock_t *lock)
+ {
+@@ -339,6 +344,8 @@ void __lockfunc _raw_write_unlock_bh(rwl
+ EXPORT_SYMBOL(_raw_write_unlock_bh);
+ #endif
+ 
++#endif /* !PREEMPT_RT_FULL */
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ 
+ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+Index: linux-3.2/lib/spinlock_debug.c
+===================================================================
+--- linux-3.2.orig/lib/spinlock_debug.c
++++ linux-3.2/lib/spinlock_debug.c
+@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t
+ 
+ EXPORT_SYMBOL(__raw_spin_lock_init);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void __rwlock_init(rwlock_t *lock, const char *name,
+ 		   struct lock_class_key *key)
+ {
+@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const
+ }
+ 
+ EXPORT_SYMBOL(__rwlock_init);
++#endif
+ 
+ static void spin_dump(raw_spinlock_t *lock, const char *msg)
+ {
+@@ -155,6 +157,7 @@ void do_raw_spin_unlock(raw_spinlock_t *
+ 	arch_spin_unlock(&lock->raw_lock);
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static void rwlock_bug(rwlock_t *lock, const char *msg)
+ {
+ 	if (!debug_locks_off())
+@@ -296,3 +299,5 @@ void do_raw_write_unlock(rwlock_t *lock)
+ 	debug_write_unlock(lock);
+ 	arch_write_unlock(&lock->raw_lock);
+ }
++
++#endif
+Index: linux-3.2/include/linux/rcutree.h
+===================================================================
+--- linux-3.2.orig/include/linux/rcutree.h
++++ linux-3.2/include/linux/rcutree.h
+@@ -57,7 +57,11 @@ static inline void exit_rcu(void)
+ 
+ #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern void synchronize_rcu_bh(void);
++#else
++# define synchronize_rcu_bh	synchronize_rcu
++#endif
+ extern void synchronize_sched_expedited(void);
+ extern void synchronize_rcu_expedited(void);
+ 
+@@ -67,19 +71,29 @@ static inline void synchronize_rcu_bh_ex
+ }
+ 
+ extern void rcu_barrier(void);
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define rcu_barrier_bh		rcu_barrier
++#else
+ extern void rcu_barrier_bh(void);
++#endif
+ extern void rcu_barrier_sched(void);
+ 
+ extern unsigned long rcutorture_testseq;
+ extern unsigned long rcutorture_vernum;
+ extern long rcu_batches_completed(void);
+-extern long rcu_batches_completed_bh(void);
+ extern long rcu_batches_completed_sched(void);
+ 
+ extern void rcu_force_quiescent_state(void);
+-extern void rcu_bh_force_quiescent_state(void);
+ extern void rcu_sched_force_quiescent_state(void);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
++extern void rcu_bh_force_quiescent_state(void);
++extern long rcu_batches_completed_bh(void);
++#else
++# define rcu_bh_force_quiescent_state	rcu_force_quiescent_state
++# define rcu_batches_completed_bh	rcu_batches_completed
++#endif
++
+ /* A context switch is a grace period for RCU-sched and RCU-bh. */
+ static inline int rcu_blocking_is_gp(void)
+ {
+Index: linux-3.2/kernel/rcupdate.c
+===================================================================
+--- linux-3.2.orig/kernel/rcupdate.c
++++ linux-3.2/kernel/rcupdate.c
+@@ -77,6 +77,7 @@ int debug_lockdep_rcu_enabled(void)
+ }
+ EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
+  *
+@@ -96,6 +97,7 @@ int rcu_read_lock_bh_held(void)
+ 	return in_softirq() || irqs_disabled();
+ }
+ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
++#endif
+ 
+ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+ 
+Index: linux-3.2/kernel/rcutiny.c
+===================================================================
+--- linux-3.2.orig/kernel/rcutiny.c
++++ linux-3.2/kernel/rcutiny.c
+@@ -243,6 +243,7 @@ void call_rcu_sched(struct rcu_head *hea
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_sched);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+  * Post an RCU bottom-half callback to be invoked after any subsequent
+  * quiescent state.
+@@ -252,3 +253,4 @@ void call_rcu_bh(struct rcu_head *head, 
+ 	__call_rcu(head, func, &rcu_bh_ctrlblk);
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_bh);
++#endif
+Index: linux-3.2/include/linux/lglock.h
+===================================================================
+--- linux-3.2.orig/include/linux/lglock.h
++++ linux-3.2/include/linux/lglock.h
+@@ -70,6 +70,9 @@
+  extern void name##_global_lock_online(void);				\
+  extern void name##_global_unlock_online(void);				\
+ 
++
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #define DEFINE_LGLOCK(name)						\
+ 									\
+  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);				\
+@@ -169,4 +172,101 @@
+ 	preempt_enable();						\
+  }									\
+  EXPORT_SYMBOL(name##_global_unlock);
++
++#else /* !PREEMPT_RT_FULL */
++#define DEFINE_LGLOCK(name)						\
++									\
++ DEFINE_PER_CPU(struct rt_mutex, name##_lock);					\
++ DEFINE_LGLOCK_LOCKDEP(name);						\
++									\
++ void name##_lock_init(void) {						\
++	int i;								\
++	LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
++	for_each_possible_cpu(i) {					\
++		struct rt_mutex *lock;					\
++		lock = &per_cpu(name##_lock, i);			\
++		rt_mutex_init(lock);					\
++	}								\
++ }									\
++ EXPORT_SYMBOL(name##_lock_init);					\
++									\
++ void name##_local_lock(void) {						\
++	struct rt_mutex *lock;						\
++	migrate_disable();						\
++	rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);	\
++	lock = &__get_cpu_var(name##_lock);				\
++	__rt_spin_lock(lock);						\
++ }									\
++ EXPORT_SYMBOL(name##_local_lock);					\
++									\
++ void name##_local_unlock(void) {					\
++	struct rt_mutex *lock;						\
++	rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);		\
++	lock = &__get_cpu_var(name##_lock);				\
++	__rt_spin_unlock(lock);						\
++	migrate_enable();						\
++ }									\
++ EXPORT_SYMBOL(name##_local_unlock);					\
++									\
++ void name##_local_lock_cpu(int cpu) {					\
++	struct rt_mutex *lock;						\
++	rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_);	\
++	lock = &per_cpu(name##_lock, cpu);				\
++	__rt_spin_lock(lock);						\
++ }									\
++ EXPORT_SYMBOL(name##_local_lock_cpu);					\
++									\
++ void name##_local_unlock_cpu(int cpu) {				\
++	struct rt_mutex *lock;						\
++	rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_);		\
++	lock = &per_cpu(name##_lock, cpu);				\
++	__rt_spin_unlock(lock);						\
++ }									\
++ EXPORT_SYMBOL(name##_local_unlock_cpu);				\
++									\
++ void name##_global_lock_online(void) {					\
++	int i;								\
++	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\
++	for_each_online_cpu(i) {					\
++		struct rt_mutex *lock;					\
++		lock = &per_cpu(name##_lock, i);			\
++		__rt_spin_lock(lock);					\
++	}								\
++ }									\
++ EXPORT_SYMBOL(name##_global_lock_online);				\
++									\
++ void name##_global_unlock_online(void) {				\
++	int i;								\
++	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\
++	for_each_online_cpu(i) {					\
++		struct rt_mutex *lock;					\
++		lock = &per_cpu(name##_lock, i);			\
++		__rt_spin_unlock(lock);					\
++	}								\
++ }									\
++ EXPORT_SYMBOL(name##_global_unlock_online);				\
++									\
++ void name##_global_lock(void) {					\
++	int i;								\
++	rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);		\
++	for_each_possible_cpu(i) {					\
++		struct rt_mutex *lock;					\
++		lock = &per_cpu(name##_lock, i);			\
++		__rt_spin_lock(lock);					\
++	}								\
++ }									\
++ EXPORT_SYMBOL(name##_global_lock);					\
++									\
++ void name##_global_unlock(void) {					\
++	int i;								\
++	rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);		\
++	for_each_possible_cpu(i) {					\
++		struct rt_mutex *lock;					\
++		lock = &per_cpu(name##_lock, i);			\
++		__rt_spin_unlock(lock);					\
++	}								\
++ }									\
++ EXPORT_SYMBOL(name##_global_unlock);
++#endif /* PRREMPT_RT_FULL */
++
+ #endif
+Index: linux-3.2/drivers/tty/serial/8250.c
+===================================================================
+--- linux-3.2.orig/drivers/tty/serial/8250.c
++++ linux-3.2/drivers/tty/serial/8250.c
+@@ -38,6 +38,7 @@
+ #include <linux/nmi.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/kdb.h>
+ 
+ #include <asm/io.h>
+ #include <asm/irq.h>
+@@ -1631,12 +1632,14 @@ static irqreturn_t serial8250_interrupt(
+ 
+ 		l = l->next;
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 		if (l == i->head && pass_counter++ > PASS_LIMIT) {
+ 			/* If we hit this, we're dead. */
+ 			printk_ratelimited(KERN_ERR
+ 				"serial8250: too much work for irq%d\n", irq);
+ 			break;
+ 		}
++#endif
+ 	} while (l != end);
+ 
+ 	spin_unlock(&i->lock);
+@@ -2846,14 +2849,14 @@ serial8250_console_write(struct console 
+ 
+ 	touch_nmi_watchdog();
+ 
+-	local_irq_save(flags);
+-	if (up->port.sysrq) {
+-		/* serial8250_handle_port() already took the lock */
++	if (unlikely(in_kdb_printk())) {
+ 		locked = 0;
+-	} else if (oops_in_progress) {
+-		locked = spin_trylock(&up->port.lock);
+-	} else
+-		spin_lock(&up->port.lock);
++	} else {
++		if (up->port.sysrq || oops_in_progress)
++			locked = spin_trylock_irqsave(&up->port.lock, flags);
++		else
++			spin_lock_irqsave(&up->port.lock, flags);
++	}
+ 
+ 	/*
+ 	 *	First save the IER then disable the interrupts
+@@ -2885,8 +2888,7 @@ serial8250_console_write(struct console 
+ 		check_modem_status(up);
+ 
+ 	if (locked)
+-		spin_unlock(&up->port.lock);
+-	local_irq_restore(flags);
++		spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+ 
+ static int __init serial8250_console_setup(struct console *co, char *options)
+Index: linux-3.2/drivers/tty/tty_buffer.c
+===================================================================
+--- linux-3.2.orig/drivers/tty/tty_buffer.c
++++ linux-3.2/drivers/tty/tty_buffer.c
+@@ -493,10 +493,14 @@ void tty_flip_buffer_push(struct tty_str
+ 		tty->buf.tail->commit = tty->buf.tail->used;
+ 	spin_unlock_irqrestore(&tty->buf.lock, flags);
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	if (tty->low_latency)
+ 		flush_to_ldisc(&tty->buf.work);
+ 	else
+ 		schedule_work(&tty->buf.work);
++#else
++	flush_to_ldisc(&tty->buf.work);
++#endif
+ }
+ EXPORT_SYMBOL(tty_flip_buffer_push);
+ 
+Index: linux-3.2/drivers/tty/serial/omap-serial.c
+===================================================================
+--- linux-3.2.orig/drivers/tty/serial/omap-serial.c
++++ linux-3.2/drivers/tty/serial/omap-serial.c
+@@ -946,13 +946,12 @@ serial_omap_console_write(struct console
+ 	unsigned int ier;
+ 	int locked = 1;
+ 
+-	local_irq_save(flags);
+ 	if (up->port.sysrq)
+ 		locked = 0;
+ 	else if (oops_in_progress)
+-		locked = spin_trylock(&up->port.lock);
++		locked = spin_trylock_irqsave(&up->port.lock, flags);
+ 	else
+-		spin_lock(&up->port.lock);
++		spin_lock_irqsave(&up->port.lock, flags);
+ 
+ 	/*
+ 	 * First save the IER then disable the interrupts
+@@ -979,8 +978,7 @@ serial_omap_console_write(struct console
+ 		check_modem_status(up);
+ 
+ 	if (locked)
+-		spin_unlock(&up->port.lock);
+-	local_irq_restore(flags);
++		spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+ 
+ static int __init
+Index: linux-3.2/fs/namespace.c
+===================================================================
+--- linux-3.2.orig/fs/namespace.c
++++ linux-3.2/fs/namespace.c
+@@ -341,8 +341,14 @@ int mnt_want_write(struct vfsmount *mnt)
+ 	 * incremented count after it has set MNT_WRITE_HOLD.
+ 	 */
+ 	smp_mb();
+-	while (mnt->mnt_flags & MNT_WRITE_HOLD)
++	/*
++	 * No need to keep preemption disabled accross the spin loop.
++	 */
++	while (mnt->mnt_flags & MNT_WRITE_HOLD) {
++		preempt_enable();
+ 		cpu_relax();
++		preempt_disable();
++	}
+ 	/*
+ 	 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
+ 	 * be set to match its requirements. So we must not load that until
+@@ -352,9 +358,7 @@ int mnt_want_write(struct vfsmount *mnt)
+ 	if (__mnt_is_readonly(mnt)) {
+ 		mnt_dec_writers(mnt);
+ 		ret = -EROFS;
+-		goto out;
+ 	}
+-out:
+ 	preempt_enable();
+ 	return ret;
+ }
+Index: linux-3.2/fs/exec.c
+===================================================================
+--- linux-3.2.orig/fs/exec.c
++++ linux-3.2/fs/exec.c
+@@ -837,10 +837,12 @@ static int exec_mmap(struct mm_struct *m
+ 		}
+ 	}
+ 	task_lock(tsk);
++	local_irq_disable_rt();
+ 	active_mm = tsk->active_mm;
+ 	tsk->mm = mm;
+ 	tsk->active_mm = mm;
+ 	activate_mm(active_mm, mm);
++	local_irq_enable_rt();
+ 	task_unlock(tsk);
+ 	arch_pick_mmap_layout(mm);
+ 	if (old_mm) {
+Index: linux-3.2/mm/mmu_context.c
+===================================================================
+--- linux-3.2.orig/mm/mmu_context.c
++++ linux-3.2/mm/mmu_context.c
+@@ -26,6 +26,7 @@ void use_mm(struct mm_struct *mm)
+ 	struct task_struct *tsk = current;
+ 
+ 	task_lock(tsk);
++	local_irq_disable_rt();
+ 	active_mm = tsk->active_mm;
+ 	if (active_mm != mm) {
+ 		atomic_inc(&mm->mm_count);
+@@ -33,6 +34,7 @@ void use_mm(struct mm_struct *mm)
+ 	}
+ 	tsk->mm = mm;
+ 	switch_mm(active_mm, mm, tsk);
++	local_irq_enable_rt();
+ 	task_unlock(tsk);
+ 
+ 	if (active_mm != mm)
+Index: linux-3.2/arch/x86/kernel/cpu/mcheck/mce.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/cpu/mcheck/mce.c
++++ linux-3.2/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -38,6 +38,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/irq_work.h>
+ #include <linux/export.h>
++#include <linux/jiffies.h>
+ 
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+@@ -1106,17 +1107,14 @@ void mce_log_therm_throt_event(__u64 sta
+  * poller finds an MCE, poll 2x faster.  When the poller finds no more
+  * errors, poll 2x slower (up to check_interval seconds).
+  */
+-static int check_interval = 5 * 60; /* 5 minutes */
++static unsigned long check_interval = 5 * 60; /* 5 minutes */
+ 
+-static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
+-static DEFINE_PER_CPU(struct timer_list, mce_timer);
++static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
++static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+ 
+-static void mce_start_timer(unsigned long data)
++static enum hrtimer_restart mce_start_timer(struct hrtimer *timer)
+ {
+-	struct timer_list *t = &per_cpu(mce_timer, data);
+-	int *n;
+-
+-	WARN_ON(smp_processor_id() != data);
++	unsigned long *n;
+ 
+ 	if (mce_available(__this_cpu_ptr(&cpu_info))) {
+ 		machine_check_poll(MCP_TIMESTAMP,
+@@ -1129,21 +1127,22 @@ static void mce_start_timer(unsigned lon
+ 	 */
+ 	n = &__get_cpu_var(mce_next_interval);
+ 	if (mce_notify_irq())
+-		*n = max(*n/2, HZ/100);
++		*n = max(*n/2, HZ/100UL);
+ 	else
+-		*n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
++		*n = min(*n*2, round_jiffies_relative(check_interval*HZ));
+ 
+-	t->expires = jiffies + *n;
+-	add_timer_on(t, smp_processor_id());
++	hrtimer_forward(timer, timer->base->get_time(),
++			ns_to_ktime(jiffies_to_usecs(*n) * 1000));
++	return HRTIMER_RESTART;
+ }
+ 
+-/* Must not be called in IRQ context where del_timer_sync() can deadlock */
++/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */
+ static void mce_timer_delete_all(void)
+ {
+ 	int cpu;
+ 
+ 	for_each_online_cpu(cpu)
+-		del_timer_sync(&per_cpu(mce_timer, cpu));
++		hrtimer_cancel(&per_cpu(mce_timer, cpu));
+ }
+ 
+ static void mce_do_trigger(struct work_struct *work)
+@@ -1375,10 +1374,11 @@ static void __mcheck_cpu_init_vendor(str
+ 
+ static void __mcheck_cpu_init_timer(void)
+ {
+-	struct timer_list *t = &__get_cpu_var(mce_timer);
+-	int *n = &__get_cpu_var(mce_next_interval);
++	struct hrtimer *t = &__get_cpu_var(mce_timer);
++	unsigned long *n = &__get_cpu_var(mce_next_interval);
+ 
+-	setup_timer(t, mce_start_timer, smp_processor_id());
++	hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++	t->function = mce_start_timer;
+ 
+ 	if (mce_ignore_ce)
+ 		return;
+@@ -1386,8 +1386,9 @@ static void __mcheck_cpu_init_timer(void
+ 	*n = check_interval * HZ;
+ 	if (!*n)
+ 		return;
+-	t->expires = round_jiffies(jiffies + *n);
+-	add_timer_on(t, smp_processor_id());
++
++	hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(*n) * 1000),
++			       0 , HRTIMER_MODE_REL_PINNED);
+ }
+ 
+ /* Handle unconfigured int18 (should never happen) */
+@@ -2004,6 +2005,8 @@ static void __cpuinit mce_disable_cpu(vo
+ 	if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ 		return;
+ 
++	hrtimer_cancel(&__get_cpu_var(mce_timer));
++
+ 	if (!(action & CPU_TASKS_FROZEN))
+ 		cmci_clear();
+ 	for (i = 0; i < banks; i++) {
+@@ -2030,6 +2033,7 @@ static void __cpuinit mce_reenable_cpu(v
+ 		if (b->init)
+ 			wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
+ 	}
++	__mcheck_cpu_init_timer();
+ }
+ 
+ /* Get notified when a cpu comes on/off. Be hotplug friendly. */
+@@ -2037,7 +2041,6 @@ static int __cpuinit
+ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ {
+ 	unsigned int cpu = (unsigned long)hcpu;
+-	struct timer_list *t = &per_cpu(mce_timer, cpu);
+ 
+ 	switch (action) {
+ 	case CPU_ONLINE:
+@@ -2054,16 +2057,10 @@ mce_cpu_callback(struct notifier_block *
+ 		break;
+ 	case CPU_DOWN_PREPARE:
+ 	case CPU_DOWN_PREPARE_FROZEN:
+-		del_timer_sync(t);
+ 		smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
+ 		break;
+ 	case CPU_DOWN_FAILED:
+ 	case CPU_DOWN_FAILED_FROZEN:
+-		if (!mce_ignore_ce && check_interval) {
+-			t->expires = round_jiffies(jiffies +
+-					   __get_cpu_var(mce_next_interval));
+-			add_timer_on(t, cpu);
+-		}
+ 		smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
+ 		break;
+ 	case CPU_POST_DEAD:
+Index: linux-3.2/arch/x86/include/asm/stackprotector.h
+===================================================================
+--- linux-3.2.orig/arch/x86/include/asm/stackprotector.h
++++ linux-3.2/arch/x86/include/asm/stackprotector.h
+@@ -58,7 +58,7 @@
+  */
+ static __always_inline void boot_init_stack_canary(void)
+ {
+-	u64 canary;
++	u64 uninitialized_var(canary);
+ 	u64 tsc;
+ 
+ #ifdef CONFIG_X86_64
+@@ -69,8 +69,16 @@ static __always_inline void boot_init_st
+ 	 * of randomness. The TSC only matters for very early init,
+ 	 * there it already has some randomness on most systems. Later
+ 	 * on during the bootup the random pool has true entropy too.
++	 *
++	 * For preempt-rt we need to weaken the randomness a bit, as
++	 * we can't call into the random generator from atomic context
++	 * due to locking constraints. We just leave canary
++	 * uninitialized and use the TSC based randomness on top of
++	 * it.
+ 	 */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	get_random_bytes(&canary, sizeof(canary));
++#endif
+ 	tsc = __native_read_tsc();
+ 	canary += tsc + (tsc << 32UL);
+ 
+Index: linux-3.2/arch/x86/Kconfig
+===================================================================
+--- linux-3.2.orig/arch/x86/Kconfig
++++ linux-3.2/arch/x86/Kconfig
+@@ -161,10 +161,10 @@ config ARCH_MAY_HAVE_PC_FDC
+ 	def_bool ISA_DMA_API
+ 
+ config RWSEM_GENERIC_SPINLOCK
+-	def_bool !X86_XADD
++	def_bool !X86_XADD || PREEMPT_RT_FULL
+ 
+ config RWSEM_XCHGADD_ALGORITHM
+-	def_bool X86_XADD
++	def_bool X86_XADD && !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+ 
+ config ARCH_HAS_CPU_IDLE_WAIT
+ 	def_bool y
+Index: linux-3.2/arch/x86/include/asm/page_64_types.h
+===================================================================
+--- linux-3.2.orig/arch/x86/include/asm/page_64_types.h
++++ linux-3.2/arch/x86/include/asm/page_64_types.h
+@@ -14,12 +14,21 @@
+ #define IRQ_STACK_ORDER 2
+ #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
+ 
+-#define STACKFAULT_STACK 1
+-#define DOUBLEFAULT_STACK 2
+-#define NMI_STACK 3
+-#define DEBUG_STACK 4
+-#define MCE_STACK 5
+-#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define STACKFAULT_STACK 0
++# define DOUBLEFAULT_STACK 1
++# define NMI_STACK 2
++# define DEBUG_STACK 0
++# define MCE_STACK 3
++# define N_EXCEPTION_STACKS 3  /* hw limit: 7 */
++#else
++# define STACKFAULT_STACK 1
++# define DOUBLEFAULT_STACK 2
++# define NMI_STACK 3
++# define DEBUG_STACK 4
++# define MCE_STACK 5
++# define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
++#endif
+ 
+ #define PUD_PAGE_SIZE		(_AC(1, UL) << PUD_SHIFT)
+ #define PUD_PAGE_MASK		(~(PUD_PAGE_SIZE-1))
+Index: linux-3.2/arch/x86/kernel/cpu/common.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/cpu/common.c
++++ linux-3.2/arch/x86/kernel/cpu/common.c
+@@ -1055,7 +1055,9 @@ DEFINE_PER_CPU(unsigned int, irq_count) 
+  */
+ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+ 	  [0 ... N_EXCEPTION_STACKS - 1]	= EXCEPTION_STKSZ,
++#if DEBUG_STACK > 0
+ 	  [DEBUG_STACK - 1]			= DEBUG_STKSZ
++#endif
+ };
+ 
+ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+Index: linux-3.2/arch/x86/kernel/dumpstack_64.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/dumpstack_64.c
++++ linux-3.2/arch/x86/kernel/dumpstack_64.c
+@@ -21,10 +21,14 @@
+ 		(N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
+ 
+ static char x86_stack_ids[][8] = {
++#if DEBUG_STACK > 0
+ 		[ DEBUG_STACK-1			]	= "#DB",
++#endif
+ 		[ NMI_STACK-1			]	= "NMI",
+ 		[ DOUBLEFAULT_STACK-1		]	= "#DF",
++#if STACKFAULT_STACK > 0
+ 		[ STACKFAULT_STACK-1		]	= "#SS",
++#endif
+ 		[ MCE_STACK-1			]	= "#MC",
+ #if DEBUG_STKSZ > EXCEPTION_STKSZ
+ 		[ N_EXCEPTION_STACKS ...
+Index: linux-3.2/fs/eventpoll.c
+===================================================================
+--- linux-3.2.orig/fs/eventpoll.c
++++ linux-3.2/fs/eventpoll.c
+@@ -438,12 +438,12 @@ static int ep_poll_wakeup_proc(void *pri
+  */
+ static void ep_poll_safewake(wait_queue_head_t *wq)
+ {
+-	int this_cpu = get_cpu();
++	int this_cpu = get_cpu_light();
+ 
+ 	ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
+ 		       ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
+ 
+-	put_cpu();
++	put_cpu_light();
+ }
+ 
+ /*
+Index: linux-3.2/mm/vmalloc.c
+===================================================================
+--- linux-3.2.orig/mm/vmalloc.c
++++ linux-3.2/mm/vmalloc.c
+@@ -782,7 +782,7 @@ static struct vmap_block *new_vmap_block
+ 	struct vmap_block *vb;
+ 	struct vmap_area *va;
+ 	unsigned long vb_idx;
+-	int node, err;
++	int node, err, cpu;
+ 
+ 	node = numa_node_id();
+ 
+@@ -821,12 +821,13 @@ static struct vmap_block *new_vmap_block
+ 	BUG_ON(err);
+ 	radix_tree_preload_end();
+ 
+-	vbq = &get_cpu_var(vmap_block_queue);
++	cpu = get_cpu_light();
++	vbq = &__get_cpu_var(vmap_block_queue);
+ 	vb->vbq = vbq;
+ 	spin_lock(&vbq->lock);
+ 	list_add_rcu(&vb->free_list, &vbq->free);
+ 	spin_unlock(&vbq->lock);
+-	put_cpu_var(vmap_block_queue);
++	put_cpu_light();
+ 
+ 	return vb;
+ }
+@@ -900,7 +901,7 @@ static void *vb_alloc(unsigned long size
+ 	struct vmap_block *vb;
+ 	unsigned long addr = 0;
+ 	unsigned int order;
+-	int purge = 0;
++	int purge = 0, cpu;
+ 
+ 	BUG_ON(size & ~PAGE_MASK);
+ 	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+@@ -908,7 +909,8 @@ static void *vb_alloc(unsigned long size
+ 
+ again:
+ 	rcu_read_lock();
+-	vbq = &get_cpu_var(vmap_block_queue);
++	cpu = get_cpu_light();
++	vbq = &__get_cpu_var(vmap_block_queue);
+ 	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+ 		int i;
+ 
+@@ -945,7 +947,7 @@ next:
+ 	if (purge)
+ 		purge_fragmented_blocks_thiscpu();
+ 
+-	put_cpu_var(vmap_block_queue);
++	put_cpu_light();
+ 	rcu_read_unlock();
+ 
+ 	if (!addr) {
+Index: linux-3.2/include/linux/workqueue.h
+===================================================================
+--- linux-3.2.orig/include/linux/workqueue.h
++++ linux-3.2/include/linux/workqueue.h
+@@ -254,9 +254,10 @@ enum {
+ 	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
+ 	WQ_HIGHPRI		= 1 << 4, /* high priority */
+ 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu instensive workqueue */
++	WQ_NON_AFFINE		= 1 << 6, /* free to move works around cpus */
+ 
+-	WQ_DRAINING		= 1 << 6, /* internal: workqueue is draining */
+-	WQ_RESCUER		= 1 << 7, /* internal: workqueue has rescuer */
++	WQ_DRAINING		= 1 << 7, /* internal: workqueue is draining */
++	WQ_RESCUER		= 1 << 8, /* internal: workqueue has rescuer */
+ 
+ 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
+ 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
+Index: linux-3.2/lib/debugobjects.c
+===================================================================
+--- linux-3.2.orig/lib/debugobjects.c
++++ linux-3.2/lib/debugobjects.c
+@@ -306,7 +306,10 @@ __debug_object_init(void *addr, struct d
+ 	struct debug_obj *obj;
+ 	unsigned long flags;
+ 
+-	fill_pool();
++#ifdef CONFIG_PREEMPT_RT_FULL
++	if (preempt_count() == 0 && !irqs_disabled())
++#endif
++		fill_pool();
+ 
+ 	db = get_bucket((unsigned long) addr);
+ 
+@@ -1015,9 +1018,9 @@ static int __init debug_objects_replace_
+ 		}
+ 	}
+ 
++	local_irq_enable();
+ 	printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
+ 	       obj_pool_used);
+-	local_irq_enable();
+ 	return 0;
+ free:
+ 	hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
+Index: linux-3.2/include/linux/jump_label.h
+===================================================================
+--- linux-3.2.orig/include/linux/jump_label.h
++++ linux-3.2/include/linux/jump_label.h
+@@ -4,7 +4,7 @@
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+ 
+-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
++#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && !defined(CONFIG_PREEMPT_BASE)
+ 
+ struct jump_label_key {
+ 	atomic_t enabled;
+Index: linux-3.2/include/linux/netdevice.h
+===================================================================
+--- linux-3.2.orig/include/linux/netdevice.h
++++ linux-3.2/include/linux/netdevice.h
+@@ -1760,6 +1760,7 @@ struct softnet_data {
+ 	unsigned		dropped;
+ 	struct sk_buff_head	input_pkt_queue;
+ 	struct napi_struct	backlog;
++	struct sk_buff_head	tofree_queue;
+ };
+ 
+ static inline void input_queue_head_incr(struct softnet_data *sd)
+Index: linux-3.2/include/linux/skbuff.h
+===================================================================
+--- linux-3.2.orig/include/linux/skbuff.h
++++ linux-3.2/include/linux/skbuff.h
+@@ -124,6 +124,7 @@ struct sk_buff_head {
+ 
+ 	__u32		qlen;
+ 	spinlock_t	lock;
++	raw_spinlock_t	raw_lock;
+ };
+ 
+ struct sk_buff;
+@@ -925,6 +926,12 @@ static inline void skb_queue_head_init(s
+ 	__skb_queue_head_init(list);
+ }
+ 
++static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
++{
++	raw_spin_lock_init(&list->raw_lock);
++	__skb_queue_head_init(list);
++}
++
+ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
+ 		struct lock_class_key *class)
+ {
+Index: linux-3.2/arch/x86/kernel/irq_work.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/irq_work.c
++++ linux-3.2/arch/x86/kernel/irq_work.c
+@@ -18,6 +18,7 @@ void smp_irq_work_interrupt(struct pt_re
+ 	irq_exit();
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void arch_irq_work_raise(void)
+ {
+ #ifdef CONFIG_X86_LOCAL_APIC
+@@ -28,3 +29,4 @@ void arch_irq_work_raise(void)
+ 	apic_wait_icr_idle();
+ #endif
+ }
++#endif
+Index: linux-3.2/kernel/irq_work.c
+===================================================================
+--- linux-3.2.orig/kernel/irq_work.c
++++ linux-3.2/kernel/irq_work.c
+@@ -105,8 +105,10 @@ void irq_work_run(void)
+ 	if (llist_empty(this_list))
+ 		return;
+ 
++#ifndef CONFIG_PREEMPT_RT_FULL
+ 	BUG_ON(!in_irq());
+ 	BUG_ON(!irqs_disabled());
++#endif
+ 
+ 	llnode = llist_del_all(this_list);
+ 	while (llnode != NULL) {
+Index: linux-3.2/arch/mips/Kconfig
+===================================================================
+--- linux-3.2.orig/arch/mips/Kconfig
++++ linux-3.2/arch/mips/Kconfig
+@@ -2040,7 +2040,7 @@ config CPU_R4400_WORKAROUNDS
+ #
+ config HIGHMEM
+ 	bool "High Memory Support"
+-	depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM
++	depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL
+ 
+ config CPU_SUPPORTS_HIGHMEM
+ 	bool
+Index: linux-3.2/Documentation/sysrq.txt
+===================================================================
+--- linux-3.2.orig/Documentation/sysrq.txt
++++ linux-3.2/Documentation/sysrq.txt
+@@ -57,10 +57,17 @@ On PowerPC - Press 'ALT - Print Screen (
+ On other - If you know of the key combos for other architectures, please
+            let me know so I can add them to this section.
+ 
+-On all -  write a character to /proc/sysrq-trigger.  e.g.:
+-
++On all -  write a character to /proc/sysrq-trigger, e.g.:
+ 		echo t > /proc/sysrq-trigger
+ 
++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
++		echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
++	 Send an ICMP echo request with this pattern plus the particular
++	 SysRq command key. Example:
++	 	# ping -c1 -s57 -p0102030468
++	 will trigger the SysRq-H (help) command.
++
++
+ *  What are the 'command' keys?
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 'b'     - Will immediately reboot the system without syncing or unmounting
+Index: linux-3.2/include/net/netns/ipv4.h
+===================================================================
+--- linux-3.2.orig/include/net/netns/ipv4.h
++++ linux-3.2/include/net/netns/ipv4.h
+@@ -47,6 +47,7 @@ struct netns_ipv4 {
+ 
+ 	int sysctl_icmp_echo_ignore_all;
+ 	int sysctl_icmp_echo_ignore_broadcasts;
++	int sysctl_icmp_echo_sysrq;
+ 	int sysctl_icmp_ignore_bogus_error_responses;
+ 	int sysctl_icmp_ratelimit;
+ 	int sysctl_icmp_ratemask;
+Index: linux-3.2/net/ipv4/icmp.c
+===================================================================
+--- linux-3.2.orig/net/ipv4/icmp.c
++++ linux-3.2/net/ipv4/icmp.c
+@@ -67,6 +67,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/kernel.h>
+ #include <linux/fcntl.h>
++#include <linux/sysrq.h>
+ #include <linux/socket.h>
+ #include <linux/in.h>
+ #include <linux/inet.h>
+@@ -801,6 +802,30 @@ out_err:
+ }
+ 
+ /*
++ * 32bit and 64bit have different timestamp length, so we check for
++ * the cookie at offset 20 and verify it is repeated at offset 50
++ */
++#define CO_POS0		20
++#define CO_POS1		50
++#define CO_SIZE		sizeof(int)
++#define ICMP_SYSRQ_SIZE	57
++
++/*
++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
++ * pattern and if it matches send the next byte as a trigger to sysrq.
++ */
++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
++{
++	int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
++	char *p = skb->data;
++
++	if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
++	    !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
++	    p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
++		handle_sysrq(p[CO_POS0 + CO_SIZE]);
++}
++
++/*
+  *	Handle ICMP_ECHO ("ping") requests.
+  *
+  *	RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
+@@ -827,6 +852,11 @@ static void icmp_echo(struct sk_buff *sk
+ 		icmp_param.data_len	   = skb->len;
+ 		icmp_param.head_len	   = sizeof(struct icmphdr);
+ 		icmp_reply(&icmp_param, skb);
++
++		if (skb->len == ICMP_SYSRQ_SIZE &&
++		    net->ipv4.sysctl_icmp_echo_sysrq) {
++			icmp_check_sysrq(net, skb);
++		}
+ 	}
+ }
+ 
+Index: linux-3.2/net/ipv4/sysctl_net_ipv4.c
+===================================================================
+--- linux-3.2.orig/net/ipv4/sysctl_net_ipv4.c
++++ linux-3.2/net/ipv4/sysctl_net_ipv4.c
+@@ -680,6 +680,13 @@ static struct ctl_table ipv4_net_table[]
+ 		.proc_handler	= proc_dointvec
+ 	},
+ 	{
++		.procname	= "icmp_echo_sysrq",
++		.data		= &init_net.ipv4.sysctl_icmp_echo_sysrq,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec
++	},
++	{
+ 		.procname	= "icmp_ignore_bogus_error_responses",
+ 		.data		= &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
+ 		.maxlen		= sizeof(int),
+Index: linux-3.2/include/linux/kdb.h
+===================================================================
+--- linux-3.2.orig/include/linux/kdb.h
++++ linux-3.2/include/linux/kdb.h
+@@ -150,12 +150,14 @@ extern int kdb_register(char *, kdb_func
+ extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
+ 			       short, kdb_repeat_t);
+ extern int kdb_unregister(char *);
++#define in_kdb_printk() (kdb_trap_printk)
+ #else /* ! CONFIG_KGDB_KDB */
+ #define kdb_printf(...)
+ #define kdb_init(x)
+ #define kdb_register(...)
+ #define kdb_register_repeat(...)
+ #define kdb_uregister(x)
++#define in_kdb_printk() (0)
+ #endif	/* CONFIG_KGDB_KDB */
+ enum {
+ 	KDB_NOT_INITIALIZED,
+Index: linux-3.2/kernel/debug/kdb/kdb_io.c
+===================================================================
+--- linux-3.2.orig/kernel/debug/kdb/kdb_io.c
++++ linux-3.2/kernel/debug/kdb/kdb_io.c
+@@ -553,7 +553,6 @@ int vkdb_printf(const char *fmt, va_list
+ 	int diag;
+ 	int linecount;
+ 	int logging, saved_loglevel = 0;
+-	int saved_trap_printk;
+ 	int got_printf_lock = 0;
+ 	int retlen = 0;
+ 	int fnd, len;
+@@ -564,8 +563,6 @@ int vkdb_printf(const char *fmt, va_list
+ 	unsigned long uninitialized_var(flags);
+ 
+ 	preempt_disable();
+-	saved_trap_printk = kdb_trap_printk;
+-	kdb_trap_printk = 0;
+ 
+ 	/* Serialize kdb_printf if multiple cpus try to write at once.
+ 	 * But if any cpu goes recursive in kdb, just print the output,
+@@ -821,7 +818,6 @@ kdb_print_out:
+ 	} else {
+ 		__release(kdb_printf_lock);
+ 	}
+-	kdb_trap_printk = saved_trap_printk;
+ 	preempt_enable();
+ 	return retlen;
+ }
+@@ -831,9 +827,11 @@ int kdb_printf(const char *fmt, ...)
+ 	va_list ap;
+ 	int r;
+ 
++	kdb_trap_printk++;
+ 	va_start(ap, fmt);
+ 	r = vkdb_printf(fmt, ap);
+ 	va_end(ap);
++	kdb_trap_printk--;
+ 
+ 	return r;
+ }
+Index: linux-3.2/kernel/ksysfs.c
+===================================================================
+--- linux-3.2.orig/kernel/ksysfs.c
++++ linux-3.2/kernel/ksysfs.c
+@@ -133,6 +133,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
+ 
+ #endif /* CONFIG_KEXEC */
+ 
++#if defined(CONFIG_PREEMPT_RT_FULL)
++static ssize_t  realtime_show(struct kobject *kobj,
++			      struct kobj_attribute *attr, char *buf)
++{
++	return sprintf(buf, "%d\n", 1);
++}
++KERNEL_ATTR_RO(realtime);
++#endif
++
+ /* whether file capabilities are enabled */
+ static ssize_t fscaps_show(struct kobject *kobj,
+ 				  struct kobj_attribute *attr, char *buf)
+@@ -182,6 +191,9 @@ static struct attribute * kernel_attrs[]
+ 	&kexec_crash_size_attr.attr,
+ 	&vmcoreinfo_attr.attr,
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++	&realtime_attr.attr,
++#endif
+ 	NULL
+ };
+ 
+Index: linux-3.2/ipc/sem.c
+===================================================================
+--- linux-3.2.orig/ipc/sem.c
++++ linux-3.2/ipc/sem.c
+@@ -461,6 +461,13 @@ undo:
+ static void wake_up_sem_queue_prepare(struct list_head *pt,
+ 				struct sem_queue *q, int error)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++	struct task_struct *p = q->sleeper;
++	get_task_struct(p);
++	q->status = error;
++	wake_up_process(p);
++	put_task_struct(p);
++#else
+ 	if (list_empty(pt)) {
+ 		/*
+ 		 * Hold preempt off so that we don't get preempted and have the
+@@ -472,6 +479,7 @@ static void wake_up_sem_queue_prepare(st
+ 	q->pid = error;
+ 
+ 	list_add_tail(&q->simple_list, pt);
++#endif
+ }
+ 
+ /**
+@@ -485,6 +493,7 @@ static void wake_up_sem_queue_prepare(st
+  */
+ static void wake_up_sem_queue_do(struct list_head *pt)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ 	struct sem_queue *q, *t;
+ 	int did_something;
+ 
+@@ -497,6 +506,7 @@ static void wake_up_sem_queue_do(struct 
+ 	}
+ 	if (did_something)
+ 		preempt_enable();
++#endif
+ }
+ 
+ static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
+Index: linux-3.2/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+===================================================================
+--- linux-3.2.orig/drivers/tty/serial/cpm_uart/cpm_uart_core.c
++++ linux-3.2/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+@@ -1226,7 +1226,7 @@ static void cpm_uart_console_write(struc
+ {
+ 	struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
+ 	unsigned long flags;
+-	int nolock = oops_in_progress;
++	int nolock = oops_in_progress || sysrq_in_progress;
+ 
+ 	if (unlikely(nolock)) {
+ 		local_irq_save(flags);
+Index: linux-3.2/drivers/tty/sysrq.c
+===================================================================
+--- linux-3.2.orig/drivers/tty/sysrq.c
++++ linux-3.2/drivers/tty/sysrq.c
+@@ -492,6 +492,23 @@ static void __sysrq_put_key_op(int key, 
+                 sysrq_key_table[i] = op_p;
+ }
+ 
++#ifdef CONFIG_MAGIC_SYSRQ_FORCE_PRINTK
++
++int sysrq_in_progress;
++
++static void set_sysrq_in_progress(int value)
++{
++	sysrq_in_progress = value;
++}
++
++#else
++
++static void set_sysrq_in_progress(int value)
++{
++}
++
++#endif
++
+ void __handle_sysrq(int key, bool check_mask)
+ {
+ 	struct sysrq_key_op *op_p;
+@@ -500,6 +517,9 @@ void __handle_sysrq(int key, bool check_
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&sysrq_key_table_lock, flags);
++
++	set_sysrq_in_progress(1);
++
+ 	/*
+ 	 * Raise the apparent loglevel to maximum so that the sysrq header
+ 	 * is shown to provide the user with positive feedback.  We do not
+@@ -541,6 +561,9 @@ void __handle_sysrq(int key, bool check_
+ 		printk("\n");
+ 		console_loglevel = orig_log_level;
+ 	}
++
++	set_sysrq_in_progress(0);
++
+ 	spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+ }
+ 
+Index: linux-3.2/include/linux/sysrq.h
+===================================================================
+--- linux-3.2.orig/include/linux/sysrq.h
++++ linux-3.2/include/linux/sysrq.h
+@@ -38,6 +38,11 @@ struct sysrq_key_op {
+ 	int enable_mask;
+ };
+ 
++#ifdef CONFIG_MAGIC_SYSRQ_FORCE_PRINTK
++extern int sysrq_in_progress;
++#else
++#define sysrq_in_progress 0
++#endif
+ #ifdef CONFIG_MAGIC_SYSRQ
+ 
+ /* Generic SysRq interface -- you may call it from any device driver, supplying
+Index: linux-3.2/arch/x86/kvm/x86.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kvm/x86.c
++++ linux-3.2/arch/x86/kvm/x86.c
+@@ -5185,6 +5185,13 @@ int kvm_arch_init(void *opaque)
+ 		goto out;
+ 	}
+ 
++#ifdef CONFIG_PREEMPT_RT_FULL
++	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
++		printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
++		return -EOPNOTSUPP;
++	}
++#endif
++
+ 	r = kvm_mmu_module_init();
+ 	if (r)
+ 		goto out;
+Index: linux-3.2/drivers/scsi/fcoe/fcoe.c
+===================================================================
+--- linux-3.2.orig/drivers/scsi/fcoe/fcoe.c
++++ linux-3.2/drivers/scsi/fcoe/fcoe.c
+@@ -1147,7 +1147,7 @@ static void fcoe_percpu_thread_destroy(u
+ 	struct sk_buff *skb;
+ #ifdef CONFIG_SMP
+ 	struct fcoe_percpu_s *p0;
+-	unsigned targ_cpu = get_cpu();
++	unsigned targ_cpu = get_cpu_light();
+ #endif /* CONFIG_SMP */
+ 
+ 	FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
+@@ -1203,7 +1203,7 @@ static void fcoe_percpu_thread_destroy(u
+ 			kfree_skb(skb);
+ 		spin_unlock_bh(&p->fcoe_rx_list.lock);
+ 	}
+-	put_cpu();
++	put_cpu_light();
+ #else
+ 	/*
+ 	 * This a non-SMP scenario where the singular Rx thread is
+@@ -1426,11 +1426,11 @@ err2:
+ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
+ {
+ 	struct fcoe_percpu_s *fps;
+-	int rc;
++	int rc, cpu = get_cpu_light();
+ 
+-	fps = &get_cpu_var(fcoe_percpu);
++	fps = &per_cpu(fcoe_percpu, cpu);
+ 	rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
+-	put_cpu_var(fcoe_percpu);
++	put_cpu_light();
+ 
+ 	return rc;
+ }
+@@ -1624,6 +1624,7 @@ static inline int fcoe_filter_frames(str
+ 	stats->InvalidCRCCount++;
+ 	if (stats->InvalidCRCCount < 5)
+ 		printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
++	put_cpu();
+ 	return -EINVAL;
+ }
+ 
+@@ -1668,7 +1669,7 @@ static void fcoe_recv_frame(struct sk_bu
+ 	 */
+ 	hp = (struct fcoe_hdr *) skb_network_header(skb);
+ 
+-	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
++	stats = per_cpu_ptr(lport->dev_stats, get_cpu_light());
+ 	if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ 		if (stats->ErrorFrames < 5)
+ 			printk(KERN_WARNING "fcoe: FCoE version "
+@@ -1700,13 +1701,13 @@ static void fcoe_recv_frame(struct sk_bu
+ 		goto drop;
+ 
+ 	if (!fcoe_filter_frames(lport, fp)) {
+-		put_cpu();
++		put_cpu_light();
+ 		fc_exch_recv(lport, fp);
+ 		return;
+ 	}
+ drop:
+ 	stats->ErrorFrames++;
+-	put_cpu();
++	put_cpu_light();
+ 	kfree_skb(skb);
+ }
+ 
+Index: linux-3.2/drivers/scsi/fcoe/fcoe_ctlr.c
+===================================================================
+--- linux-3.2.orig/drivers/scsi/fcoe/fcoe_ctlr.c
++++ linux-3.2/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -716,7 +716,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+ 	unsigned long sel_time = 0;
+ 	struct fcoe_dev_stats *stats;
+ 
+-	stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
++	stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu_light());
+ 
+ 	list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+ 		deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
+@@ -749,7 +749,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+ 				sel_time = fcf->time;
+ 		}
+ 	}
+-	put_cpu();
++	put_cpu_light();
+ 	if (sel_time && !fip->sel_fcf && !fip->sel_time) {
+ 		sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+ 		fip->sel_time = sel_time;
+Index: linux-3.2/drivers/scsi/libfc/fc_exch.c
+===================================================================
+--- linux-3.2.orig/drivers/scsi/libfc/fc_exch.c
++++ linux-3.2/drivers/scsi/libfc/fc_exch.c
+@@ -724,10 +724,10 @@ static struct fc_exch *fc_exch_em_alloc(
+ 	}
+ 	memset(ep, 0, sizeof(*ep));
+ 
+-	cpu = get_cpu();
++	cpu = get_cpu_light();
+ 	pool = per_cpu_ptr(mp->pool, cpu);
+ 	spin_lock_bh(&pool->lock);
+-	put_cpu();
++	put_cpu_light();
+ 
+ 	/* peek cache of free slot */
+ 	if (pool->left != FC_XID_UNKNOWN) {
+Index: linux-3.2/arch/x86/crypto/aesni-intel_glue.c
+===================================================================
+--- linux-3.2.orig/arch/x86/crypto/aesni-intel_glue.c
++++ linux-3.2/arch/x86/crypto/aesni-intel_glue.c
+@@ -289,14 +289,14 @@ static int ecb_encrypt(struct blkcipher_
+ 	err = blkcipher_walk_virt(desc, &walk);
+ 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+-	kernel_fpu_begin();
+ 	while ((nbytes = walk.nbytes)) {
++		kernel_fpu_begin();
+ 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+-			      nbytes & AES_BLOCK_MASK);
++				nbytes & AES_BLOCK_MASK);
++		kernel_fpu_end();
+ 		nbytes &= AES_BLOCK_SIZE - 1;
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 	}
+-	kernel_fpu_end();
+ 
+ 	return err;
+ }
+@@ -313,14 +313,14 @@ static int ecb_decrypt(struct blkcipher_
+ 	err = blkcipher_walk_virt(desc, &walk);
+ 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+-	kernel_fpu_begin();
+ 	while ((nbytes = walk.nbytes)) {
++		kernel_fpu_begin();
+ 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ 			      nbytes & AES_BLOCK_MASK);
++		kernel_fpu_end();
+ 		nbytes &= AES_BLOCK_SIZE - 1;
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 	}
+-	kernel_fpu_end();
+ 
+ 	return err;
+ }
+@@ -359,14 +359,14 @@ static int cbc_encrypt(struct blkcipher_
+ 	err = blkcipher_walk_virt(desc, &walk);
+ 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+-	kernel_fpu_begin();
+ 	while ((nbytes = walk.nbytes)) {
++		kernel_fpu_begin();
+ 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ 			      nbytes & AES_BLOCK_MASK, walk.iv);
++		kernel_fpu_end();
+ 		nbytes &= AES_BLOCK_SIZE - 1;
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 	}
+-	kernel_fpu_end();
+ 
+ 	return err;
+ }
+@@ -383,14 +383,14 @@ static int cbc_decrypt(struct blkcipher_
+ 	err = blkcipher_walk_virt(desc, &walk);
+ 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+-	kernel_fpu_begin();
+ 	while ((nbytes = walk.nbytes)) {
++		kernel_fpu_begin();
+ 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ 			      nbytes & AES_BLOCK_MASK, walk.iv);
++		kernel_fpu_end();
+ 		nbytes &= AES_BLOCK_SIZE - 1;
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 	}
+-	kernel_fpu_end();
+ 
+ 	return err;
+ }
+@@ -445,18 +445,20 @@ static int ctr_crypt(struct blkcipher_de
+ 	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+-	kernel_fpu_begin();
+ 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
++		kernel_fpu_begin();
+ 		aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ 			      nbytes & AES_BLOCK_MASK, walk.iv);
++		kernel_fpu_end();
+ 		nbytes &= AES_BLOCK_SIZE - 1;
+ 		err = blkcipher_walk_done(desc, &walk, nbytes);
+ 	}
+ 	if (walk.nbytes) {
++		kernel_fpu_begin();
+ 		ctr_crypt_final(ctx, &walk);
++		kernel_fpu_end();
+ 		err = blkcipher_walk_done(desc, &walk, 0);
+ 	}
+-	kernel_fpu_end();
+ 
+ 	return err;
+ }
+Index: linux-3.2/drivers/md/dm.c
+===================================================================
+--- linux-3.2.orig/drivers/md/dm.c
++++ linux-3.2/drivers/md/dm.c
+@@ -1648,14 +1648,14 @@ static void dm_request_fn(struct request
+ 		if (map_request(ti, clone, md))
+ 			goto requeued;
+ 
+-		BUG_ON(!irqs_disabled());
++		BUG_ON_NORT(!irqs_disabled());
+ 		spin_lock(q->queue_lock);
+ 	}
+ 
+ 	goto out;
+ 
+ requeued:
+-	BUG_ON(!irqs_disabled());
++	BUG_ON_NORT(!irqs_disabled());
+ 	spin_lock(q->queue_lock);
+ 
+ delay_and_out:
+Index: linux-3.2/arch/Kconfig
+===================================================================
+--- linux-3.2.orig/arch/Kconfig
++++ linux-3.2/arch/Kconfig
+@@ -6,6 +6,7 @@ config OPROFILE
+ 	tristate "OProfile system profiling"
+ 	depends on PROFILING
+ 	depends on HAVE_OPROFILE
++	depends on !PREEMPT_RT_FULL
+ 	select RING_BUFFER
+ 	select RING_BUFFER_ALLOW_SWAP
+ 	help
+Index: linux-3.2/drivers/net/Kconfig
+===================================================================
+--- linux-3.2.orig/drivers/net/Kconfig
++++ linux-3.2/drivers/net/Kconfig
+@@ -154,6 +154,7 @@ config MACVTAP
+ 
+ config NETCONSOLE
+ 	tristate "Network console logging support"
++	depends on !PREEMPT_RT_FULL
+ 	---help---
+ 	If you want to log kernel messages over the network, enable this.
+ 	See <file:Documentation/networking/netconsole.txt> for details.
+Index: linux-3.2/mm/Kconfig
+===================================================================
+--- linux-3.2.orig/mm/Kconfig
++++ linux-3.2/mm/Kconfig
+@@ -307,7 +307,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+ 
+ config TRANSPARENT_HUGEPAGE
+ 	bool "Transparent Hugepage Support"
+-	depends on X86 && MMU
++	depends on X86 && MMU && !PREEMPT_RT_FULL
+ 	select COMPACTION
+ 	help
+ 	  Transparent Hugepages allows the kernel to use huge pages and
+Index: linux-3.2/init/Makefile
+===================================================================
+--- linux-3.2.orig/init/Makefile
++++ linux-3.2/init/Makefile
+@@ -29,4 +29,4 @@ silent_chk_compile.h = :
+ include/generated/compile.h: FORCE
+ 	@$($(quiet)chk_compile.h)
+ 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+-	"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
++	"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
+Index: linux-3.2/scripts/mkcompile_h
+===================================================================
+--- linux-3.2.orig/scripts/mkcompile_h
++++ linux-3.2/scripts/mkcompile_h
+@@ -4,7 +4,8 @@ TARGET=$1
+ ARCH=$2
+ SMP=$3
+ PREEMPT=$4
+-CC=$5
++RT=$5
++CC=$6
+ 
+ vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
+ 
+@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
+ CONFIG_FLAGS=""
+ if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
+ if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
+ UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
+ 
+ # Truncate to maximum length

Added: dists/trunk/linux-2.6/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux-2.6/debian/patches/features/all/rt/patch-3.2-rc1-52e4c2a05.patch	Thu Nov 17 07:50:32 2011	(r18294)
@@ -0,0 +1,10310 @@
+diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
+index c279158..196b8b9 100644
+--- a/Documentation/DocBook/drm.tmpl
++++ b/Documentation/DocBook/drm.tmpl
+@@ -32,7 +32,7 @@
+       The Linux DRM layer contains code intended to support the needs
+       of complex graphics devices, usually containing programmable
+       pipelines well suited to 3D graphics acceleration.  Graphics
+-      drivers in the kernel can make use of DRM functions to make
++      drivers in the kernel may make use of DRM functions to make
+       tasks like memory management, interrupt handling and DMA easier,
+       and provide a uniform interface to applications.
+     </para>
+@@ -57,10 +57,10 @@
+       existing drivers.
+     </para>
+     <para>
+-      First, we'll go over some typical driver initialization
++      First, we go over some typical driver initialization
+       requirements, like setting up command buffers, creating an
+       initial output configuration, and initializing core services.
+-      Subsequent sections will cover core internals in more detail,
++      Subsequent sections cover core internals in more detail,
+       providing implementation notes and examples.
+     </para>
+     <para>
+@@ -74,7 +74,7 @@
+     </para>
+     <para>
+       The core of every DRM driver is struct drm_driver.  Drivers
+-      will typically statically initialize a drm_driver structure,
++      typically statically initialize a drm_driver structure,
+       then pass it to drm_init() at load time.
+     </para>
+ 
+@@ -88,8 +88,8 @@
+     </para>
+     <programlisting>
+       static struct drm_driver driver = {
+-	/* don't use mtrr's here, the Xserver or user space app should
+-	 * deal with them for intel hardware.
++	/* Don't use MTRRs here; the Xserver or userspace app should
++	 * deal with them for Intel hardware.
+ 	 */
+ 	.driver_features =
+ 	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+@@ -154,8 +154,8 @@
+     </programlisting>
+     <para>
+       In the example above, taken from the i915 DRM driver, the driver
+-      sets several flags indicating what core features it supports.
+-      We'll go over the individual callbacks in later sections.  Since
++      sets several flags indicating what core features it supports;
++      we go over the individual callbacks in later sections.  Since
+       flags indicate which features your driver supports to the DRM
+       core, you need to set most of them prior to calling drm_init().  Some,
+       like DRIVER_MODESET can be set later based on user supplied parameters,
+@@ -203,8 +203,8 @@
+ 	<term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
+ 	<listitem>
+ 	  <para>
+-	    DRIVER_HAVE_IRQ indicates whether the driver has a IRQ
+-	    handler, DRIVER_IRQ_SHARED indicates whether the device &
++	    DRIVER_HAVE_IRQ indicates whether the driver has an IRQ
++	    handler.  DRIVER_IRQ_SHARED indicates whether the device &
+ 	    handler support shared IRQs (note that this is required of
+ 	    PCI drivers).
+ 	  </para>
+@@ -214,8 +214,8 @@
+ 	<term>DRIVER_DMA_QUEUE</term>
+ 	<listitem>
+ 	  <para>
+-	    If the driver queues DMA requests and completes them
+-	    asynchronously, this flag should be set.  Deprecated.
++	    Should be set if the driver queues DMA requests and completes them
++	    asynchronously.  Deprecated.
+ 	  </para>
+ 	</listitem>
+       </varlistentry>
+@@ -238,7 +238,7 @@
+     </variablelist>
+     <para>
+       In this specific case, the driver requires AGP and supports
+-      IRQs.  DMA, as we'll see, is handled by device specific ioctls
++      IRQs.  DMA, as discussed later, is handled by device-specific ioctls
+       in this case.  It also supports the kernel mode setting APIs, though
+       unlike in the actual i915 driver source, this example unconditionally
+       exports KMS capability.
+@@ -269,36 +269,34 @@
+       initial output configuration.
+     </para>
+     <para>
+-      Note that the tasks performed at driver load time must not
+-      conflict with DRM client requirements.  For instance, if user
++      If compatibility is a concern (e.g. with drivers converted over
++      to the new interfaces from the old ones), care must be taken to
++      prevent device initialization and control that is incompatible with
++      currently active userspace drivers.  For instance, if user
+       level mode setting drivers are in use, it would be problematic
+       to perform output discovery & configuration at load time.
+-      Likewise, if pre-memory management aware user level drivers are
++      Likewise, if user-level drivers unaware of memory management are
+       in use, memory management and command buffer setup may need to
+-      be omitted.  These requirements are driver specific, and care
++      be omitted.  These requirements are driver-specific, and care
+       needs to be taken to keep both old and new applications and
+       libraries working.  The i915 driver supports the "modeset"
+       module parameter to control whether advanced features are
+-      enabled at load time or in legacy fashion.  If compatibility is
+-      a concern (e.g. with drivers converted over to the new interfaces
+-      from the old ones), care must be taken to prevent incompatible
+-      device initialization and control with the currently active
+-      userspace drivers.
++      enabled at load time or in legacy fashion.
+     </para>
+ 
+     <sect2>
+       <title>Driver private & performance counters</title>
+       <para>
+ 	The driver private hangs off the main drm_device structure and
+-	can be used for tracking various device specific bits of
++	can be used for tracking various device-specific bits of
+ 	information, like register offsets, command buffer status,
+ 	register state for suspend/resume, etc.  At load time, a
+-	driver can simply allocate one and set drm_device.dev_priv
+-	appropriately; at unload the driver can free it and set
+-	drm_device.dev_priv to NULL.
++	driver may simply allocate one and set drm_device.dev_priv
++	appropriately; it should be freed and drm_device.dev_priv set
++	to NULL when the driver is unloaded.
+       </para>
+       <para>
+-	The DRM supports several counters which can be used for rough
++	The DRM supports several counters which may be used for rough
+ 	performance characterization.  Note that the DRM stat counter
+ 	system is not often used by applications, and supporting
+ 	additional counters is completely optional.
+@@ -307,15 +305,15 @@
+ 	These interfaces are deprecated and should not be used.  If performance
+ 	monitoring is desired, the developer should investigate and
+ 	potentially enhance the kernel perf and tracing infrastructure to export
+-	GPU related performance information to performance monitoring
+-	tools and applications.
++	GPU related performance information for consumption by performance
++	monitoring tools and applications.
+       </para>
+     </sect2>
+ 
+     <sect2>
+       <title>Configuring the device</title>
+       <para>
+-	Obviously, device configuration will be device specific.
++	Obviously, device configuration is device-specific.
+ 	However, there are several common operations: finding a
+ 	device's PCI resources, mapping them, and potentially setting
+ 	up an IRQ handler.
+@@ -323,10 +321,10 @@
+       <para>
+ 	Finding & mapping resources is fairly straightforward.  The
+ 	DRM wrapper functions, drm_get_resource_start() and
+-	drm_get_resource_len() can be used to find BARs on the given
++	drm_get_resource_len(), may be used to find BARs on the given
+ 	drm_device struct.  Once those values have been retrieved, the
+ 	driver load function can call drm_addmap() to create a new
+-	mapping for the BAR in question.  Note you'll probably want a
++	mapping for the BAR in question.  Note that you probably want a
+ 	drm_local_map_t in your driver private structure to track any
+ 	mappings you create.
+ <!-- !Fdrivers/gpu/drm/drm_bufs.c drm_get_resource_* -->
+@@ -335,20 +333,20 @@
+       <para>
+ 	if compatibility with other operating systems isn't a concern
+ 	(DRM drivers can run under various BSD variants and OpenSolaris),
+-	native Linux calls can be used for the above, e.g. pci_resource_*
++	native Linux calls may be used for the above, e.g. pci_resource_*
+ 	and iomap*/iounmap.  See the Linux device driver book for more
+ 	info.
+       </para>
+       <para>
+-	Once you have a register map, you can use the DRM_READn() and
++	Once you have a register map, you may use the DRM_READn() and
+ 	DRM_WRITEn() macros to access the registers on your device, or
+-	use driver specific versions to offset into your MMIO space
+-	relative to a driver specific base pointer (see I915_READ for
+-	example).
++	use driver-specific versions to offset into your MMIO space
++	relative to a driver-specific base pointer (see I915_READ for
++	an example).
+       </para>
+       <para>
+ 	If your device supports interrupt generation, you may want to
+-	setup an interrupt handler at driver load time as well.  This
++	set up an interrupt handler when the driver is loaded.  This
+ 	is done using the drm_irq_install() function.  If your device
+ 	supports vertical blank interrupts, it should call
+ 	drm_vblank_init() to initialize the core vblank handling code before
+@@ -357,7 +355,7 @@
+       </para>
+ <!--!Fdrivers/char/drm/drm_irq.c drm_irq_install-->
+       <para>
+-	Once your interrupt handler is registered (it'll use your
++	Once your interrupt handler is registered (it uses your
+ 	drm_driver.irq_handler as the actual interrupt handling
+ 	function), you can safely enable interrupts on your device,
+ 	assuming any other state your interrupt handler uses is also
+@@ -371,10 +369,10 @@
+ 	using the pci_map_rom() call, a convenience function that
+ 	takes care of mapping the actual ROM, whether it has been
+ 	shadowed into memory (typically at address 0xc0000) or exists
+-	on the PCI device in the ROM BAR.  Note that once you've
+-	mapped the ROM and extracted any necessary information, be
+-	sure to unmap it; on many devices the ROM address decoder is
+-	shared with other BARs, so leaving it mapped can cause
++	on the PCI device in the ROM BAR.  Note that after the ROM
++	has been mapped and any necessary information has been extracted,
++	it should be unmapped; on many devices, the ROM address decoder is
++	shared with other BARs, so leaving it mapped could cause
+ 	undesired behavior like hangs or memory corruption.
+ <!--!Fdrivers/pci/rom.c pci_map_rom-->
+       </para>
+@@ -389,9 +387,9 @@
+ 	should support a memory manager.
+       </para>
+       <para>
+-	If your driver supports memory management (it should!), you'll
++	If your driver supports memory management (it should!), you
+ 	need to set that up at load time as well.  How you initialize
+-	it depends on which memory manager you're using, TTM or GEM.
++	it depends on which memory manager you're using: TTM or GEM.
+       </para>
+       <sect3>
+ 	<title>TTM initialization</title>
+@@ -401,7 +399,7 @@
+ 	  and devices with dedicated video RAM (VRAM), i.e. most discrete
+ 	  graphics devices.  If your device has dedicated RAM, supporting
+ 	  TTM is desirable.  TTM also integrates tightly with your
+-	  driver specific buffer execution function.  See the radeon
++	  driver-specific buffer execution function.  See the radeon
+ 	  driver for examples.
+ 	</para>
+ 	<para>
+@@ -429,21 +427,21 @@
+ 	  created by the memory manager at runtime.  Your global TTM should
+ 	  have a type of TTM_GLOBAL_TTM_MEM.  The size field for the global
+ 	  object should be sizeof(struct ttm_mem_global), and the init and
+-	  release hooks should point at your driver specific init and
+-	  release routines, which will probably eventually call
+-	  ttm_mem_global_init and ttm_mem_global_release respectively.
++	  release hooks should point at your driver-specific init and
++	  release routines, which probably eventually call
++	  ttm_mem_global_init and ttm_mem_global_release, respectively.
+ 	</para>
+ 	<para>
+ 	  Once your global TTM accounting structure is set up and initialized
+-	  (done by calling ttm_global_item_ref on the global object you
+-	  just created), you'll need to create a buffer object TTM to
++	  by calling ttm_global_item_ref() on it,
++	  you need to create a buffer object TTM to
+ 	  provide a pool for buffer object allocation by clients and the
+ 	  kernel itself.  The type of this object should be TTM_GLOBAL_TTM_BO,
+ 	  and its size should be sizeof(struct ttm_bo_global).  Again,
+-	  driver specific init and release functions can be provided,
+-	  likely eventually calling ttm_bo_global_init and
+-	  ttm_bo_global_release, respectively.  Also like the previous
+-	  object, ttm_global_item_ref is used to create an initial reference
++	  driver-specific init and release functions may be provided,
++	  likely eventually calling ttm_bo_global_init() and
++	  ttm_bo_global_release(), respectively.  Also, like the previous
++	  object, ttm_global_item_ref() is used to create an initial reference
+ 	  count for the TTM, which will call your initialization function.
+ 	</para>
+       </sect3>
+@@ -453,27 +451,26 @@
+ 	  GEM is an alternative to TTM, designed specifically for UMA
+ 	  devices.  It has simpler initialization and execution requirements
+ 	  than TTM, but has no VRAM management capability.  Core GEM
+-	  initialization is comprised of a basic drm_mm_init call to create
++	  is initialized by calling drm_mm_init() to create
+ 	  a GTT DRM MM object, which provides an address space pool for
+-	  object allocation.  In a KMS configuration, the driver will
+-	  need to allocate and initialize a command ring buffer following
+-	  basic GEM initialization.  Most UMA devices have a so-called
++	  object allocation.  In a KMS configuration, the driver
++	  needs to allocate and initialize a command ring buffer following
++	  core GEM initialization.  A UMA device usually has what is called a
+ 	  "stolen" memory region, which provides space for the initial
+ 	  framebuffer and large, contiguous memory regions required by the
+-	  device.  This space is not typically managed by GEM, and must
++	  device.  This space is not typically managed by GEM, and it must
+ 	  be initialized separately into its own DRM MM object.
+ 	</para>
+ 	<para>
+-	  Initialization will be driver specific, and will depend on
+-	  the architecture of the device.  In the case of Intel
++	  Initialization is driver-specific. In the case of Intel
+ 	  integrated graphics chips like 965GM, GEM initialization can
+ 	  be done by calling the internal GEM init function,
+ 	  i915_gem_do_init().  Since the 965GM is a UMA device
+-	  (i.e. it doesn't have dedicated VRAM), GEM will manage
++	  (i.e. it doesn't have dedicated VRAM), GEM manages
+ 	  making regular RAM available for GPU operations.  Memory set
+ 	  aside by the BIOS (called "stolen" memory by the i915
+-	  driver) will be managed by the DRM memrange allocator; the
+-	  rest of the aperture will be managed by GEM.
++	  driver) is managed by the DRM memrange allocator; the
++	  rest of the aperture is managed by GEM.
+ 	  <programlisting>
+ 	    /* Basic memrange allocator for stolen space (aka vram) */
+ 	    drm_memrange_init(&dev_priv->vram, 0, prealloc_size);
+@@ -483,7 +480,7 @@
+ <!--!Edrivers/char/drm/drm_memrange.c-->
+ 	</para>
+ 	<para>
+-	  Once the memory manager has been set up, we can allocate the
++	  Once the memory manager has been set up, we may allocate the
+ 	  command buffer.  In the i915 case, this is also done with a
+ 	  GEM function, i915_gem_init_ringbuffer().
+ 	</para>
+@@ -493,16 +490,25 @@
+     <sect2>
+       <title>Output configuration</title>
+       <para>
+-	The final initialization task is output configuration.  This involves
+-	finding and initializing the CRTCs, encoders and connectors
+-	for your device, creating an initial configuration and
+-	registering a framebuffer console driver.
++	The final initialization task is output configuration.  This involves:
++	<itemizedlist>
++	  <listitem>
++	    Finding and initializing the CRTCs, encoders, and connectors
++	    for the device.
++	  </listitem>
++	  <listitem>
++	    Creating an initial configuration.
++	  </listitem>
++	  <listitem>
++	    Registering a framebuffer console driver.
++	  </listitem>
++	</itemizedlist>
+       </para>
+       <sect3>
+ 	<title>Output discovery and initialization</title>
+ 	<para>
+-	  Several core functions exist to create CRTCs, encoders and
+-	  connectors, namely drm_crtc_init(), drm_connector_init() and
++	  Several core functions exist to create CRTCs, encoders, and
++	  connectors, namely: drm_crtc_init(), drm_connector_init(), and
+ 	  drm_encoder_init(), along with several "helper" functions to
+ 	  perform common tasks.
+ 	</para>
+@@ -555,10 +561,10 @@ void intel_crt_init(struct drm_device *dev)
+ 	</programlisting>
+ 	<para>
+ 	  In the example above (again, taken from the i915 driver), a
+-	  CRT connector and encoder combination is created.  A device
+-	  specific i2c bus is also created, for fetching EDID data and
++	  CRT connector and encoder combination is created.  A device-specific
++	  i2c bus is also created for fetching EDID data and
+ 	  performing monitor detection.  Once the process is complete,
+-	  the new connector is registered with sysfs, to make its
++	  the new connector is registered with sysfs to make its
+ 	  properties available to applications.
+ 	</para>
+ 	<sect4>
+@@ -567,12 +573,12 @@ void intel_crt_init(struct drm_device *dev)
+ 	    Since many PC-class graphics devices have similar display output
+ 	    designs, the DRM provides a set of helper functions to make
+ 	    output management easier.  The core helper routines handle
+-	    encoder re-routing and disabling of unused functions following
+-	    mode set.  Using the helpers is optional, but recommended for
++	    encoder re-routing and the disabling of unused functions following
++	    mode setting.  Using the helpers is optional, but recommended for
+ 	    devices with PC-style architectures (i.e. a set of display planes
+ 	    for feeding pixels to encoders which are in turn routed to
+ 	    connectors).  Devices with more complex requirements needing
+-	    finer grained management can opt to use the core callbacks
++	    finer grained management may opt to use the core callbacks
+ 	    directly.
+ 	  </para>
+ 	  <para>
+@@ -580,17 +586,25 @@ void intel_crt_init(struct drm_device *dev)
+ 	  </para>
+ 	</sect4>
+ 	<para>
+-	  For each encoder, CRTC and connector, several functions must
+-	  be provided, depending on the object type.  Encoder objects
+-	  need to provide a DPMS (basically on/off) function, mode fixup
+-	  (for converting requested modes into native hardware timings),
+-	  and prepare, set and commit functions for use by the core DRM
+-	  helper functions.  Connector helpers need to provide mode fetch and
+-	  validity functions as well as an encoder matching function for
+-	  returning an ideal encoder for a given connector.  The core
+-	  connector functions include a DPMS callback, (deprecated)
+-	  save/restore routines, detection, mode probing, property handling,
+-	  and cleanup functions.
++	  Each encoder object needs to provide:
++	  <itemizedlist>
++	    <listitem>
++	      A DPMS (basically on/off) function.
++	    </listitem>
++	    <listitem>
++	      A mode-fixup function (for converting requested modes into
++	      native hardware timings).
++	    </listitem>
++	    <listitem>
++	      Functions (prepare, set, and commit) for use by the core DRM
++	      helper functions.
++	    </listitem>
++	  </itemizedlist>
++	  Connector helpers need to provide functions (mode-fetch, validity,
++	  and encoder-matching) for returning an ideal encoder for a given
++	  connector.  The core connector functions include a DPMS callback,
++	  save/restore routines (deprecated), detection, mode probing,
++	  property handling, and cleanup functions.
+ 	</para>
+ <!--!Edrivers/char/drm/drm_crtc.h-->
+ <!--!Edrivers/char/drm/drm_crtc.c-->
+@@ -605,23 +619,34 @@ void intel_crt_init(struct drm_device *dev)
+     <title>VBlank event handling</title>
+     <para>
+       The DRM core exposes two vertical blank related ioctls:
+-      DRM_IOCTL_WAIT_VBLANK and DRM_IOCTL_MODESET_CTL.
++      <variablelist>
++        <varlistentry>
++          <term>DRM_IOCTL_WAIT_VBLANK</term>
++          <listitem>
++            <para>
++              This takes a struct drm_wait_vblank structure as its argument,
++              and it is used to block or request a signal when a specified
++              vblank event occurs.
++            </para>
++          </listitem>
++        </varlistentry>
++        <varlistentry>
++          <term>DRM_IOCTL_MODESET_CTL</term>
++          <listitem>
++            <para>
++              This should be called by application level drivers before and
++              after mode setting, since on many devices the vertical blank
++              counter is reset at that time.  Internally, the DRM snapshots
++              the last vblank count when the ioctl is called with the
++              _DRM_PRE_MODESET command, so that the counter won't go backwards
++              (which is dealt with when _DRM_POST_MODESET is used).
++            </para>
++          </listitem>
++        </varlistentry>
++      </variablelist>
+ <!--!Edrivers/char/drm/drm_irq.c-->
+     </para>
+     <para>
+-      DRM_IOCTL_WAIT_VBLANK takes a struct drm_wait_vblank structure
+-      as its argument, and is used to block or request a signal when a
+-      specified vblank event occurs.
+-    </para>
+-    <para>
+-      DRM_IOCTL_MODESET_CTL should be called by application level
+-      drivers before and after mode setting, since on many devices the
+-      vertical blank counter will be reset at that time.  Internally,
+-      the DRM snapshots the last vblank count when the ioctl is called
+-      with the _DRM_PRE_MODESET command so that the counter won't go
+-      backwards (which is dealt with when _DRM_POST_MODESET is used).
+-    </para>
+-    <para>
+       To support the functions above, the DRM core provides several
+       helper functions for tracking vertical blank counters, and
+       requires drivers to provide several callbacks:
+@@ -632,24 +657,24 @@ void intel_crt_init(struct drm_device *dev)
+       register.  The enable and disable vblank callbacks should enable
+       and disable vertical blank interrupts, respectively.  In the
+       absence of DRM clients waiting on vblank events, the core DRM
+-      code will use the disable_vblank() function to disable
+-      interrupts, which saves power.  They'll be re-enabled again when
++      code uses the disable_vblank() function to disable
++      interrupts, which saves power.  They are re-enabled again when
+       a client calls the vblank wait ioctl above.
+     </para>
+     <para>
+-      Devices that don't provide a count register can simply use an
++      A device that doesn't provide a count register may simply use an
+       internal atomic counter incremented on every vertical blank
+-      interrupt, and can make their enable and disable vblank
+-      functions into no-ops.
++      interrupt (and then treat the enable_vblank() and disable_vblank()
++      callbacks as no-ops).
+     </para>
+   </sect1>
+ 
+   <sect1>
+     <title>Memory management</title>
+     <para>
+-      The memory manager lies at the heart of many DRM operations, and
+-      is also required to support advanced client features like OpenGL
+-      pbuffers.  The DRM currently contains two memory managers, TTM
++      The memory manager lies at the heart of many DRM operations; it
++      is required to support advanced client features like OpenGL
++      pbuffers.  The DRM currently contains two memory managers: TTM
+       and GEM.
+     </para>
+ 
+@@ -679,41 +704,46 @@ void intel_crt_init(struct drm_device *dev)
+       <para>
+ 	GEM-enabled drivers must provide gem_init_object() and
+ 	gem_free_object() callbacks to support the core memory
+-	allocation routines.  They should also provide several driver
+-	specific ioctls to support command execution, pinning, buffer
++	allocation routines.  They should also provide several driver-specific
++	ioctls to support command execution, pinning, buffer
+ 	read & write, mapping, and domain ownership transfers.
+       </para>
+       <para>
+-	On a fundamental level, GEM involves several operations: memory
+-	allocation and freeing, command execution, and aperture management
+-	at command execution time.  Buffer object allocation is relatively
++	On a fundamental level, GEM involves several operations:
++	<itemizedlist>
++	  <listitem>Memory allocation and freeing</listitem>
++	  <listitem>Command execution</listitem>
++	  <listitem>Aperture management at command execution time</listitem>
++	</itemizedlist>
++	Buffer object allocation is relatively
+ 	straightforward and largely provided by Linux's shmem layer, which
+ 	provides memory to back each object.  When mapped into the GTT
+ 	or used in a command buffer, the backing pages for an object are
+ 	flushed to memory and marked write combined so as to be coherent
+-	with the GPU.  Likewise, when the GPU finishes rendering to an object,
+-	if the CPU accesses it, it must be made coherent with the CPU's view
++	with the GPU.  Likewise, if the CPU accesses an object after the GPU
++	has finished rendering to the object, then the object must be made
++	coherent with the CPU's view
+ 	of memory, usually involving GPU cache flushing of various kinds.
+-	This core CPU<->GPU coherency management is provided by the GEM
+-	set domain function, which evaluates an object's current domain and
++	This core CPU<->GPU coherency management is provided by a
++	device-specific ioctl, which evaluates an object's current domain and
+ 	performs any necessary flushing or synchronization to put the object
+ 	into the desired coherency domain (note that the object may be busy,
+-	i.e. an active render target; in that case the set domain function
+-	will block the client and wait for rendering to complete before
++	i.e. an active render target; in that case, setting the domain
++	blocks the client and waits for rendering to complete before
+ 	performing any necessary flushing operations).
+       </para>
+       <para>
+ 	Perhaps the most important GEM function is providing a command
+ 	execution interface to clients.  Client programs construct command
+-	buffers containing references to previously allocated memory objects
+-	and submit them to GEM.  At that point, GEM will take care to bind
++	buffers containing references to previously allocated memory objects,
++	and then submit them to GEM.  At that point, GEM takes care to bind
+ 	all the objects into the GTT, execute the buffer, and provide
+ 	necessary synchronization between clients accessing the same buffers.
+ 	This often involves evicting some objects from the GTT and re-binding
+ 	others (a fairly expensive operation), and providing relocation
+ 	support which hides fixed GTT offsets from clients.  Clients must
+ 	take care not to submit command buffers that reference more objects
+-	than can fit in the GTT or GEM will reject them and no rendering
++	than can fit in the GTT; otherwise, GEM will reject them and no rendering
+ 	will occur.  Similarly, if several objects in the buffer require
+ 	fence registers to be allocated for correct rendering (e.g. 2D blits
+ 	on pre-965 chips), care must be taken not to require more fence
+@@ -729,7 +759,7 @@ void intel_crt_init(struct drm_device *dev)
+     <title>Output management</title>
+     <para>
+       At the core of the DRM output management code is a set of
+-      structures representing CRTCs, encoders and connectors.
++      structures representing CRTCs, encoders, and connectors.
+     </para>
+     <para>
+       A CRTC is an abstraction representing a part of the chip that
+@@ -765,21 +795,19 @@ void intel_crt_init(struct drm_device *dev)
+   <sect1>
+     <title>Framebuffer management</title>
+     <para>
+-      In order to set a mode on a given CRTC, encoder and connector
+-      configuration, clients need to provide a framebuffer object which
+-      will provide a source of pixels for the CRTC to deliver to the encoder(s)
+-      and ultimately the connector(s) in the configuration.  A framebuffer
+-      is fundamentally a driver specific memory object, made into an opaque
+-      handle by the DRM addfb function.  Once an fb has been created this
+-      way it can be passed to the KMS mode setting routines for use in
+-      a configuration.
++      Clients need to provide a framebuffer object which provides a source
++      of pixels for a CRTC to deliver to the encoder(s) and ultimately the
++      connector(s). A framebuffer is fundamentally a driver-specific memory
++      object, made into an opaque handle by the DRM's addfb() function.
++      Once a framebuffer has been created this way, it may be passed to the
++      KMS mode setting routines for use in a completed configuration.
+     </para>
+   </sect1>
+ 
+   <sect1>
+     <title>Command submission & fencing</title>
+     <para>
+-      This should cover a few device specific command submission
++      This should cover a few device-specific command submission
+       implementations.
+     </para>
+   </sect1>
+@@ -789,7 +817,7 @@ void intel_crt_init(struct drm_device *dev)
+     <para>
+       The DRM core provides some suspend/resume code, but drivers
+       wanting full suspend/resume support should provide save() and
+-      restore() functions.  These will be called at suspend,
++      restore() functions.  These are called at suspend,
+       hibernate, or resume time, and should perform any state save or
+       restore required by your device across suspend or hibernate
+       states.
+@@ -812,8 +840,8 @@ void intel_crt_init(struct drm_device *dev)
+     <para>
+       The DRM core exports several interfaces to applications,
+       generally intended to be used through corresponding libdrm
+-      wrapper functions.  In addition, drivers export device specific
+-      interfaces for use by userspace drivers & device aware
++      wrapper functions.  In addition, drivers export device-specific
++      interfaces for use by userspace drivers & device-aware
+       applications through ioctls and sysfs files.
+     </para>
+     <para>
+@@ -822,8 +850,8 @@ void intel_crt_init(struct drm_device *dev)
+       management, memory management, and output management.
+     </para>
+     <para>
+-      Cover generic ioctls and sysfs layout here.  Only need high
+-      level info, since man pages will cover the rest.
++      Cover generic ioctls and sysfs layout here.  We only need high-level
++      info, since man pages should cover the rest.
+     </para>
+   </chapter>
+ 
+diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
+index c21d777..7e62de1 100644
+--- a/Documentation/cgroups/freezer-subsystem.txt
++++ b/Documentation/cgroups/freezer-subsystem.txt
+@@ -33,9 +33,9 @@ demonstrate this problem using nested bash shells:
+ 
+ 	From a second, unrelated bash shell:
+ 	$ kill -SIGSTOP 16690
+-	$ kill -SIGCONT 16990
++	$ kill -SIGCONT 16690
+ 
+-	<at this point 16990 exits and causes 16644 to exit too>
++	<at this point 16690 exits and causes 16644 to exit too>
+ 
+ This happens because bash can observe both signals and choose how it
+ responds to them.
+diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
+index 4f34432..edad99a 100644
+--- a/Documentation/sound/alsa/HD-Audio-Models.txt
++++ b/Documentation/sound/alsa/HD-Audio-Models.txt
+@@ -349,6 +349,7 @@ STAC92HD83*
+   ref		Reference board
+   mic-ref	Reference board with power management for ports
+   dell-s14	Dell laptop
++  dell-vostro-3500	Dell Vostro 3500 laptop
+   hp		HP laptops with (inverted) mute-LED
+   hp-dv7-4000	HP dv-7 4000
+   auto		BIOS setup (default)
+diff --git a/Kbuild b/Kbuild
+index 4caab4f..b8b708a 100644
+--- a/Kbuild
++++ b/Kbuild
+@@ -92,7 +92,7 @@ always += missing-syscalls
+ targets += missing-syscalls
+ 
+ quiet_cmd_syscalls = CALL    $<
+-      cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags)
++      cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)
+ 
+ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
+ 	$(call cmd,syscalls)
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 4808256..071a996 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -1106,6 +1106,7 @@ F:	drivers/media/video/s5p-fimc/
+ ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
+ M:	Kyungmin Park <kyungmin.park at samsung.com>
+ M:	Kamil Debski <k.debski at samsung.com>
++M:     Jeongtae Park <jtp.park at samsung.com>
+ L:	linux-arm-kernel at lists.infradead.org
+ L:	linux-media at vger.kernel.org
+ S:	Maintained
+@@ -2342,6 +2343,13 @@ S:	Supported
+ F:	drivers/gpu/drm/i915
+ F:	include/drm/i915*
+ 
++DRM DRIVERS FOR EXYNOS
++M:	Inki Dae <inki.dae at samsung.com>
++L:	dri-devel at lists.freedesktop.org
++S:	Supported
++F:	drivers/gpu/drm/exynos
++F:	include/drm/exynos*
++
+ DSCC4 DRIVER
+ M:	Francois Romieu <romieu at fr.zoreil.com>
+ L:	netdev at vger.kernel.org
+@@ -6122,7 +6130,7 @@ F:	sound/
+ SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
+ M:	Liam Girdwood <lrg at ti.com>
+ M:	Mark Brown <broonie at opensource.wolfsonmicro.com>
+-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound-2.6.git
++T:	git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
+ L:	alsa-devel at alsa-project.org (moderated for non-subscribers)
+ W:	http://alsa-project.org/main/index.php/ASoC
+ S:	Supported
+diff --git a/arch/arm/boot/dts/tegra-ventana.dts b/arch/arm/boot/dts/tegra-ventana.dts
+index 9b29a62..3f9abd6 100644
+--- a/arch/arm/boot/dts/tegra-ventana.dts
++++ b/arch/arm/boot/dts/tegra-ventana.dts
+@@ -22,11 +22,10 @@
+ 	sdhci at c8000400 {
+ 		cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+ 		wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+-		power-gpios = <&gpio 155 0>; /* gpio PT3 */
++		power-gpios = <&gpio 70 0>; /* gpio PI6 */
+ 	};
+ 
+ 	sdhci at c8000600 {
+-		power-gpios = <&gpio 70 0>; /* gpio PI6 */
+ 		support-8bit;
+ 	};
+ };
+diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c
+index a4401d6..adad70d 100644
+--- a/arch/arm/mach-at91/at91cap9_devices.c
++++ b/arch/arm/mach-at91/at91cap9_devices.c
+@@ -98,7 +98,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
+  *  USB HS Device (Gadget)
+  * -------------------------------------------------------------------- */
+ 
+-#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE)
++#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE)
+ 
+ static struct resource usba_udc_resources[] = {
+ 	[0] = {
+@@ -1021,8 +1021,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
+ #if defined(CONFIG_SERIAL_ATMEL)
+ static struct resource dbgu_resources[] = {
+ 	[0] = {
+-		.start	= AT91_VA_BASE_SYS + AT91_DBGU,
+-		.end	= AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
++		.start	= AT91_BASE_SYS + AT91_DBGU,
++		.end	= AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ 		.flags	= IORESOURCE_MEM,
+ 	},
+ 	[1] = {
+@@ -1035,7 +1035,6 @@ static struct resource dbgu_resources[] = {
+ static struct atmel_uart_data dbgu_data = {
+ 	.use_dma_tx	= 0,
+ 	.use_dma_rx	= 0,		/* DBGU not capable of receive DMA */
+-	.regs		= (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
+ };
+ 
+ static u64 dbgu_dmamask = DMA_BIT_MASK(32);
+diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
+index 01d8bbd..66591fa 100644
+--- a/arch/arm/mach-at91/at91rm9200_devices.c
++++ b/arch/arm/mach-at91/at91rm9200_devices.c
+@@ -877,8 +877,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
+ #if defined(CONFIG_SERIAL_ATMEL)
+ static struct resource dbgu_resources[] = {
+ 	[0] = {
+-		.start	= AT91_VA_BASE_SYS + AT91_DBGU,
+-		.end	= AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
++		.start	= AT91_BASE_SYS + AT91_DBGU,
++		.end	= AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ 		.flags	= IORESOURCE_MEM,
+ 	},
+ 	[1] = {
+@@ -891,7 +891,6 @@ static struct resource dbgu_resources[] = {
+ static struct atmel_uart_data dbgu_data = {
+ 	.use_dma_tx	= 0,
+ 	.use_dma_rx	= 0,		/* DBGU not capable of receive DMA */
+-	.regs		= (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
+ };
+ 
+ static u64 dbgu_dmamask = DMA_BIT_MASK(32);
+diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
+index 24b6f8c..25e3464 100644
+--- a/arch/arm/mach-at91/at91sam9260_devices.c
++++ b/arch/arm/mach-at91/at91sam9260_devices.c
+@@ -837,8 +837,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
+ #if defined(CONFIG_SERIAL_ATMEL)
+ static struct resource dbgu_resources[] = {
+ 	[0] = {
+-		.start	= AT91_VA_BASE_SYS + AT91_DBGU,
+-		.end	= AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
++		.start	= AT91_BASE_SYS + AT91_DBGU,
++		.end	= AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ 		.flags	= IORESOURCE_MEM,
+ 	},
+ 	[1] = {
+@@ -851,7 +851,6 @@ static struct resource dbgu_resources[] = {
+ static struct atmel_uart_data dbgu_data = {
+ 	.use_dma_tx	= 0,
+ 	.use_dma_rx	= 0,		/* DBGU not capable of receive DMA */
+-	.regs		= (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
+ };
+ 
+ static u64 dbgu_dmamask = DMA_BIT_MASK(32);
+diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
+index 3b70b38..ae78f4d 100644
+--- a/arch/arm/mach-at91/at91sam9261_devices.c
++++ b/arch/arm/mach-at91/at91sam9261_devices.c
+@@ -816,8 +816,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
+ #if defined(CONFIG_SERIAL_ATMEL)
+ static struct resource dbgu_resources[] = {
+ 	[0] = {
+-		.start	= AT91_VA_BASE_SYS + AT91_DBGU,
+-		.end	= AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
++		.start	= AT91_BASE_SYS + AT91_DBGU,
++		.end	= AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ 		.flags	= IORESOURCE_MEM,
+ 	},
+ 	[1] = {
+@@ -830,7 +830,6 @@ static struct resource dbgu_resources[] = {
+ static struct atmel_uart_data dbgu_data = {
+ 	.use_dma_tx	= 0,
+ 	.use_dma_rx	= 0,		/* DBGU not capable of receive DMA */
+-	.regs		= (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
+ };
+ 
+ static u64 dbgu_dmamask = DMA_BIT_MASK(32);
+diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
+index 3faa1fd..ad017eb 100644
+--- a/arch/arm/mach-at91/at91sam9263_devices.c
++++ b/arch/arm/mach-at91/at91sam9263_devices.c
+@@ -1196,8 +1196,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
+ 
+ static struct resource dbgu_resources[] = {
+ 	[0] = {
+-		.start	= AT91_VA_BASE_SYS + AT91_DBGU,
+-		.end	= AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
++		.start	= AT91_BASE_SYS + AT91_DBGU,
++		.end	= AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ 		.flags	= IORESOURCE_MEM,
+ 	},
+ 	[1] = {
+@@ -1210,7 +1210,6 @@ static struct resource dbgu_resources[] = {
+ static struct atmel_uart_data dbgu_data = {
+ 	.use_dma_tx	= 0,
+ 	.use_dma_rx	= 0,		/* DBGU not capable of receive DMA */
+-	.regs		= (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
+ };
+ 
+ static u64 dbgu_dmamask = DMA_BIT_MASK(32);
+diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
+index 000b5e1..09a16d6 100644
+--- a/arch/arm/mach-at91/at91sam9g45_devices.c
++++ b/arch/arm/mach-at91/at91sam9g45_devices.c
+@@ -197,7 +197,7 @@ void __init at91_add_device_usbh_ehci(struct at91_usbh_data *data) {}
+  *  USB HS Device (Gadget)
+  * -------------------------------------------------------------------- */
+ 
+-#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE)
++#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE)
+ static struct resource usba_udc_resources[] = {
+ 	[0] = {
+ 		.start	= AT91SAM9G45_UDPHS_FIFO,
+@@ -1332,8 +1332,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
+ #if defined(CONFIG_SERIAL_ATMEL)
+ static struct resource dbgu_resources[] = {
+ 	[0] = {
+-		.start	= AT91_VA_BASE_SYS + AT91_DBGU,
+-		.end	= AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
++		.start	= AT91_BASE_SYS + AT91_DBGU,
++		.end	= AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ 		.flags	= IORESOURCE_MEM,
+ 	},
+ 	[1] = {
+@@ -1346,7 +1346,6 @@ static struct resource dbgu_resources[] = {
+ static struct atmel_uart_data dbgu_data = {
+ 	.use_dma_tx	= 0,
+ 	.use_dma_rx	= 0,
+-	.regs		= (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
+ };
+ 
+ static u64 dbgu_dmamask = DMA_BIT_MASK(32);
+diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
+index 305a851..628eb56 100644
+--- a/arch/arm/mach-at91/at91sam9rl_devices.c
++++ b/arch/arm/mach-at91/at91sam9rl_devices.c
+@@ -75,7 +75,7 @@ void __init at91_add_device_hdmac(void) {}
+  *  USB HS Device (Gadget)
+  * -------------------------------------------------------------------- */
+ 
+-#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE)
++#if defined(CONFIG_USB_ATMEL_USBA) || defined(CONFIG_USB_ATMEL_USBA_MODULE)
+ 
+ static struct resource usba_udc_resources[] = {
+ 	[0] = {
+@@ -908,8 +908,8 @@ void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
+ #if defined(CONFIG_SERIAL_ATMEL)
+ static struct resource dbgu_resources[] = {
+ 	[0] = {
+-		.start	= AT91_VA_BASE_SYS + AT91_DBGU,
+-		.end	= AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
++		.start	= AT91_BASE_SYS + AT91_DBGU,
++		.end	= AT91_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+ 		.flags	= IORESOURCE_MEM,
+ 	},
+ 	[1] = {
+@@ -922,7 +922,6 @@ static struct resource dbgu_resources[] = {
+ static struct atmel_uart_data dbgu_data = {
+ 	.use_dma_tx	= 0,
+ 	.use_dma_rx	= 0,		/* DBGU not capable of receive DMA */
+-	.regs		= (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
+ };
+ 
+ static u64 dbgu_dmamask = DMA_BIT_MASK(32);
+diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
+index 649b052..12a3f95 100644
+--- a/arch/arm/mach-at91/board-yl-9200.c
++++ b/arch/arm/mach-at91/board-yl-9200.c
+@@ -384,7 +384,7 @@ static struct spi_board_info yl9200_spi_devices[] = {
+ #include <video/s1d13xxxfb.h>
+ 
+ 
+-static void __init yl9200_init_video(void)
++static void yl9200_init_video(void)
+ {
+ 	/* NWAIT Signal */
+ 	at91_set_A_periph(AT91_PIN_PC6, 0);
+diff --git a/arch/arm/mach-at91/include/mach/vmalloc.h b/arch/arm/mach-at91/include/mach/vmalloc.h
+index 8eb459f..8e4a1bd 100644
+--- a/arch/arm/mach-at91/include/mach/vmalloc.h
++++ b/arch/arm/mach-at91/include/mach/vmalloc.h
+@@ -21,6 +21,8 @@
+ #ifndef __ASM_ARCH_VMALLOC_H
+ #define __ASM_ARCH_VMALLOC_H
+ 
++#include <mach/hardware.h>
++
+ #define VMALLOC_END		(AT91_VIRT_BASE & PGDIR_MASK)
+ 
+ #endif
+diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
+index 22d8588..cfede57 100644
+--- a/arch/arm/mach-imx/Makefile.boot
++++ b/arch/arm/mach-imx/Makefile.boot
+@@ -1,22 +1,26 @@
+-zreladdr-$(CONFIG_ARCH_MX1)	+= 0x08008000
+-params_phys-$(CONFIG_ARCH_MX1)	:= 0x08000100
+-initrd_phys-$(CONFIG_ARCH_MX1)	:= 0x08800000
++zreladdr-$(CONFIG_SOC_IMX1)	+= 0x08008000
++params_phys-$(CONFIG_SOC_IMX1)	:= 0x08000100
++initrd_phys-$(CONFIG_SOC_IMX1)	:= 0x08800000
+ 
+-zreladdr-$(CONFIG_MACH_MX21)	+= 0xC0008000
+-params_phys-$(CONFIG_MACH_MX21)	:= 0xC0000100
+-initrd_phys-$(CONFIG_MACH_MX21)	:= 0xC0800000
++zreladdr-$(CONFIG_SOC_IMX21)	+= 0xC0008000
++params_phys-$(CONFIG_SOC_IMX21)	:= 0xC0000100
++initrd_phys-$(CONFIG_SOC_IMX21)	:= 0xC0800000
+ 
+-zreladdr-$(CONFIG_ARCH_MX25)	+= 0x80008000
+-params_phys-$(CONFIG_ARCH_MX25)	:= 0x80000100
+-initrd_phys-$(CONFIG_ARCH_MX25)	:= 0x80800000
++zreladdr-$(CONFIG_SOC_IMX25)	+= 0x80008000
++params_phys-$(CONFIG_SOC_IMX25)	:= 0x80000100
++initrd_phys-$(CONFIG_SOC_IMX25)	:= 0x80800000
+ 
+-zreladdr-$(CONFIG_MACH_MX27)	+= 0xA0008000
+-params_phys-$(CONFIG_MACH_MX27)	:= 0xA0000100
+-initrd_phys-$(CONFIG_MACH_MX27)	:= 0xA0800000
++zreladdr-$(CONFIG_SOC_IMX27)	+= 0xA0008000
++params_phys-$(CONFIG_SOC_IMX27)	:= 0xA0000100
++initrd_phys-$(CONFIG_SOC_IMX27)	:= 0xA0800000
+ 
+-zreladdr-$(CONFIG_ARCH_MX3)	+= 0x80008000
+-params_phys-$(CONFIG_ARCH_MX3)	:= 0x80000100
+-initrd_phys-$(CONFIG_ARCH_MX3)	:= 0x80800000
++zreladdr-$(CONFIG_SOC_IMX31)	+= 0x80008000
++params_phys-$(CONFIG_SOC_IMX31)	:= 0x80000100
++initrd_phys-$(CONFIG_SOC_IMX31)	:= 0x80800000
++
++zreladdr-$(CONFIG_SOC_IMX35)	+= 0x80008000
++params_phys-$(CONFIG_SOC_IMX35)	:= 0x80000100
++initrd_phys-$(CONFIG_SOC_IMX35)	:= 0x80800000
+ 
+ zreladdr-$(CONFIG_SOC_IMX6Q)	+= 0x10008000
+ params_phys-$(CONFIG_SOC_IMX6Q)	:= 0x10000100
+diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
+index e0b926d..613a1b9 100644
+--- a/arch/arm/mach-imx/clock-imx6q.c
++++ b/arch/arm/mach-imx/clock-imx6q.c
+@@ -1139,7 +1139,7 @@ static int _clk_set_rate(struct clk *clk, unsigned long rate)
+ 		return -EINVAL;
+ 
+ 	max_div = ((d->bm_pred >> d->bp_pred) + 1) *
+-		  ((d->bm_pred >> d->bp_pred) + 1);
++		  ((d->bm_podf >> d->bp_podf) + 1);
+ 
+ 	div = parent_rate / rate;
+ 	if (div == 0)
+@@ -2002,6 +2002,21 @@ int __init mx6q_clocks_init(void)
+ 	clk_set_rate(&asrc_serial_clk, 1500000);
+ 	clk_set_rate(&enfc_clk, 11000000);
+ 
++	/*
++	 * Before pinctrl API is available, we have to rely on the pad
++	 * configuration set up by bootloader.  For usdhc example here,
++	 * u-boot sets up the pads for 49.5 MHz case, and we have to lower
++	 * the usdhc clock from 198 to 49.5 MHz to match the pad configuration.
++	 *
++	 * FIXME: This is should be removed after pinctrl API is available.
++	 * At that time, usdhc driver can call pinctrl API to change pad
++	 * configuration dynamically per different usdhc clock settings.
++	 */
++	clk_set_rate(&usdhc1_clk, 49500000);
++	clk_set_rate(&usdhc2_clk, 49500000);
++	clk_set_rate(&usdhc3_clk, 49500000);
++	clk_set_rate(&usdhc4_clk, 49500000);
++
+ 	np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
+ 	base = of_iomap(np, 0);
+ 	WARN_ON(!base);
+diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
+index 4285dfd..4ad3969 100644
+--- a/arch/arm/mach-msm/Makefile
++++ b/arch/arm/mach-msm/Makefile
+@@ -15,6 +15,8 @@ obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o
+ obj-$(CONFIG_MSM_SMD) += last_radio_log.o
+ obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o
+ 
++CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
++
+ obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+ obj-$(CONFIG_SMP) += headsmp.o platsmp.o
+ 
+diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
+index 71de506..db81ed5 100644
+--- a/arch/arm/mach-msm/board-msm7x30.c
++++ b/arch/arm/mach-msm/board-msm7x30.c
+@@ -42,8 +42,8 @@
+ 
+ extern struct sys_timer msm_timer;
+ 
+-static void __init msm7x30_fixup(struct machine_desc *desc, struct tag *tag,
+-			 char **cmdline, struct meminfo *mi)
++static void __init msm7x30_fixup(struct tag *tag, char **cmdline,
++		struct meminfo *mi)
+ {
+ 	for (; tag->hdr.size; tag = tag_next(tag))
+ 		if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) {
+diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
+index b04468e..6dc1cbd 100644
+--- a/arch/arm/mach-msm/board-msm8960.c
++++ b/arch/arm/mach-msm/board-msm8960.c
+@@ -32,8 +32,8 @@
+ 
+ #include "devices.h"
+ 
+-static void __init msm8960_fixup(struct machine_desc *desc, struct tag *tag,
+-			 char **cmdline, struct meminfo *mi)
++static void __init msm8960_fixup(struct tag *tag, char **cmdline,
++		struct meminfo *mi)
+ {
+ 	for (; tag->hdr.size; tag = tag_next(tag))
+ 		if (tag->hdr.tag == ATAG_MEM &&
+diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
+index cf38e22..44bf716 100644
+--- a/arch/arm/mach-msm/board-msm8x60.c
++++ b/arch/arm/mach-msm/board-msm8x60.c
+@@ -28,8 +28,8 @@
+ #include <mach/board.h>
+ #include <mach/msm_iomap.h>
+ 
+-static void __init msm8x60_fixup(struct machine_desc *desc, struct tag *tag,
+-			 char **cmdline, struct meminfo *mi)
++static void __init msm8x60_fixup(struct tag *tag, char **cmdline,
++		struct meminfo *mi)
+ {
+ 	for (; tag->hdr.size; tag = tag_next(tag))
+ 		if (tag->hdr.tag == ATAG_MEM &&
+diff --git a/arch/arm/mach-msm/scm.c b/arch/arm/mach-msm/scm.c
+index 232f97a..bafabb5 100644
+--- a/arch/arm/mach-msm/scm.c
++++ b/arch/arm/mach-msm/scm.c
+@@ -180,6 +180,9 @@ static u32 smc(u32 cmd_addr)
+ 			__asmeq("%1", "r0")
+ 			__asmeq("%2", "r1")
+ 			__asmeq("%3", "r2")
++#ifdef REQUIRES_SEC
++			".arch_extension sec\n"
++#endif
+ 			"smc	#0	@ switch to secure world\n"
+ 			: "=r" (r0)
+ 			: "r" (r0), "r" (r1), "r" (r2)
+diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
+index 2aacf41..4cb2769 100644
+--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
++++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
+@@ -1281,9 +1281,9 @@ DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
+ 	NULL,  NULL, &ipg_clk, &gpt_ipg_clk);
+ 
+ DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
+-	NULL, NULL, &ipg_clk, NULL);
++	NULL, NULL, &ipg_perclk, NULL);
+ DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
+-	NULL, NULL, &ipg_clk, NULL);
++	NULL, NULL, &ipg_perclk, NULL);
+ 
+ /* I2C */
+ DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
+@@ -1634,6 +1634,7 @@ int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_OF
+ static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
+ 				   unsigned long *ckih1, unsigned long *ckih2)
+ {
+@@ -1671,3 +1672,4 @@ int __init mx53_clocks_init_dt(void)
+ 	clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
+ 	return mx53_clocks_init(ckil, osc, ckih1, ckih2);
+ }
++#endif
+diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
+index ac2316d..064ec5a 100644
+--- a/arch/arm/mach-mxs/mach-mx28evk.c
++++ b/arch/arm/mach-mxs/mach-mx28evk.c
+@@ -471,7 +471,8 @@ static void __init mx28evk_init(void)
+ 			       "mmc0-slot-power");
+ 	if (ret)
+ 		pr_warn("failed to request gpio mmc0-slot-power: %d\n", ret);
+-	mx28_add_mxs_mmc(0, &mx28evk_mmc_pdata[0]);
++	else
++		mx28_add_mxs_mmc(0, &mx28evk_mmc_pdata[0]);
+ 
+ 	ret = gpio_request_one(MX28EVK_MMC1_SLOT_POWER, GPIOF_OUT_INIT_LOW,
+ 			       "mmc1-slot-power");
+@@ -480,7 +481,6 @@ static void __init mx28evk_init(void)
+ 	else
+ 		mx28_add_mxs_mmc(1, &mx28evk_mmc_pdata[1]);
+ 
+-	mx28_add_mxs_mmc(1, &mx28evk_mmc_pdata[1]);
+ 	mx28_add_rtc_stmp3xxx();
+ 
+ 	gpio_led_register_device(0, &mx28evk_led_data);
+diff --git a/arch/arm/mach-picoxcell/include/mach/debug-macro.S b/arch/arm/mach-picoxcell/include/mach/debug-macro.S
+index 8f2c234..58d4ee3 100644
+--- a/arch/arm/mach-picoxcell/include/mach/debug-macro.S
++++ b/arch/arm/mach-picoxcell/include/mach/debug-macro.S
+@@ -14,7 +14,7 @@
+ 
+ #define UART_SHIFT 2
+ 
+-		.macro	addruart, rp, rv
++		.macro	addruart, rp, rv, tmp
+ 		ldr	\rv, =PHYS_TO_IO(PICOXCELL_UART1_BASE)
+ 		ldr	\rp, =PICOXCELL_UART1_BASE
+ 		.endm
+diff --git a/arch/arm/mach-tegra/board-dt.c b/arch/arm/mach-tegra/board-dt.c
+index d368f8d..74743ad 100644
+--- a/arch/arm/mach-tegra/board-dt.c
++++ b/arch/arm/mach-tegra/board-dt.c
+@@ -101,6 +101,13 @@ static void __init tegra_dt_init(void)
+ 
+ 	tegra_clk_init_from_table(tegra_dt_clk_init_table);
+ 
++	/*
++	 * Finished with the static registrations now; fill in the missing
++	 * devices
++	 */
++	of_platform_populate(NULL, tegra_dt_match_table,
++				tegra20_auxdata_lookup, NULL);
++
+ 	for (i = 0; i < ARRAY_SIZE(pinmux_configs); i++) {
+ 		if (of_machine_is_compatible(pinmux_configs[i].machine)) {
+ 			pinmux_configs[i].init();
+@@ -110,12 +117,6 @@ static void __init tegra_dt_init(void)
+ 
+ 	WARN(i == ARRAY_SIZE(pinmux_configs),
+ 		"Unknown platform! Pinmuxing not initialized\n");
+-
+-	/*
+-	 * Finished with the static registrations now; fill in the missing
+-	 * devices
+-	 */
+-	of_platform_populate(NULL, tegra_dt_match_table, tegra20_auxdata_lookup, NULL);
+ }
+ 
+ static const char * tegra_dt_board_compat[] = {
+diff --git a/arch/arm/mach-tegra/board-harmony-pinmux.c b/arch/arm/mach-tegra/board-harmony-pinmux.c
+index e99b456..7a4a26d 100644
+--- a/arch/arm/mach-tegra/board-harmony-pinmux.c
++++ b/arch/arm/mach-tegra/board-harmony-pinmux.c
+@@ -16,6 +16,8 @@
+ 
+ #include <linux/kernel.h>
+ #include <linux/gpio.h>
++#include <linux/of.h>
++
+ #include <mach/pinmux.h>
+ 
+ #include "gpio-names.h"
+@@ -161,7 +163,9 @@ static struct tegra_gpio_table gpio_table[] = {
+ 
+ void harmony_pinmux_init(void)
+ {
+-	platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices));
++	if (!of_machine_is_compatible("nvidia,tegra20"))
++		platform_add_devices(pinmux_devices,
++					ARRAY_SIZE(pinmux_devices));
+ 
+ 	tegra_pinmux_config_table(harmony_pinmux, ARRAY_SIZE(harmony_pinmux));
+ 
+diff --git a/arch/arm/mach-tegra/board-paz00-pinmux.c b/arch/arm/mach-tegra/board-paz00-pinmux.c
+index fb20894..be30e21 100644
+--- a/arch/arm/mach-tegra/board-paz00-pinmux.c
++++ b/arch/arm/mach-tegra/board-paz00-pinmux.c
+@@ -16,6 +16,8 @@
+ 
+ #include <linux/kernel.h>
+ #include <linux/gpio.h>
++#include <linux/of.h>
++
+ #include <mach/pinmux.h>
+ 
+ #include "gpio-names.h"
+@@ -158,7 +160,9 @@ static struct tegra_gpio_table gpio_table[] = {
+ 
+ void paz00_pinmux_init(void)
+ {
+-	platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices));
++	if (!of_machine_is_compatible("nvidia,tegra20"))
++		platform_add_devices(pinmux_devices,
++					ARRAY_SIZE(pinmux_devices));
+ 
+ 	tegra_pinmux_config_table(paz00_pinmux, ARRAY_SIZE(paz00_pinmux));
+ 
+diff --git a/arch/arm/mach-tegra/board-seaboard-pinmux.c b/arch/arm/mach-tegra/board-seaboard-pinmux.c
+index fbce31d..b1c2972 100644
+--- a/arch/arm/mach-tegra/board-seaboard-pinmux.c
++++ b/arch/arm/mach-tegra/board-seaboard-pinmux.c
+@@ -16,6 +16,7 @@
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/gpio.h>
++#include <linux/of.h>
+ 
+ #include <mach/pinmux.h>
+ #include <mach/pinmux-t2.h>
+@@ -191,6 +192,7 @@ static struct tegra_gpio_table common_gpio_table[] = {
+ 	{ .gpio = TEGRA_GPIO_SD2_POWER,		.enable = true },
+ 	{ .gpio = TEGRA_GPIO_LIDSWITCH,		.enable = true },
+ 	{ .gpio = TEGRA_GPIO_POWERKEY,		.enable = true },
++	{ .gpio = TEGRA_GPIO_HP_DET,		.enable = true },
+ 	{ .gpio = TEGRA_GPIO_ISL29018_IRQ,	.enable = true },
+ 	{ .gpio = TEGRA_GPIO_CDC_IRQ,		.enable = true },
+ 	{ .gpio = TEGRA_GPIO_USB1,		.enable = true },
+@@ -218,7 +220,9 @@ static void __init update_pinmux(struct tegra_pingroup_config *newtbl, int size)
+ 
+ void __init seaboard_common_pinmux_init(void)
+ {
+-	platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices));
++	if (!of_machine_is_compatible("nvidia,tegra20"))
++		platform_add_devices(pinmux_devices,
++					ARRAY_SIZE(pinmux_devices));
+ 
+ 	tegra_pinmux_config_table(seaboard_pinmux, ARRAY_SIZE(seaboard_pinmux));
+ 
+diff --git a/arch/arm/mach-tegra/board-trimslice-pinmux.c b/arch/arm/mach-tegra/board-trimslice-pinmux.c
+index 4969dd2..7ab719d 100644
+--- a/arch/arm/mach-tegra/board-trimslice-pinmux.c
++++ b/arch/arm/mach-tegra/board-trimslice-pinmux.c
+@@ -16,6 +16,7 @@
+ #include <linux/gpio.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <linux/of.h>
+ 
+ #include <mach/pinmux.h>
+ 
+@@ -157,7 +158,9 @@ static struct tegra_gpio_table gpio_table[] = {
+ 
+ void __init trimslice_pinmux_init(void)
+ {
+-	platform_add_devices(pinmux_devices, ARRAY_SIZE(pinmux_devices));
++	if (!of_machine_is_compatible("nvidia,tegra20"))
++		platform_add_devices(pinmux_devices,
++					ARRAY_SIZE(pinmux_devices));
+ 	tegra_pinmux_config_table(trimslice_pinmux, ARRAY_SIZE(trimslice_pinmux));
+ 	tegra_gpio_config(gpio_table, ARRAY_SIZE(gpio_table));
+ }
+diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
+index a08a951..b3a1f2b 100644
+--- a/arch/arm/plat-mxc/Kconfig
++++ b/arch/arm/plat-mxc/Kconfig
+@@ -10,7 +10,7 @@ choice
+ 
+ config ARCH_IMX_V4_V5
+ 	bool "i.MX1, i.MX21, i.MX25, i.MX27"
+-	select AUTO_ZRELADDR
++	select AUTO_ZRELADDR if !ZBOOT_ROM
+ 	select ARM_PATCH_PHYS_VIRT
+ 	help
+ 	  This enables support for systems based on the Freescale i.MX ARMv4
+@@ -26,7 +26,7 @@ config ARCH_IMX_V6_V7
+ 
+ config ARCH_MX5
+ 	bool "i.MX50, i.MX51, i.MX53"
+-	select AUTO_ZRELADDR
++	select AUTO_ZRELADDR if !ZBOOT_ROM
+ 	select ARM_PATCH_PHYS_VIRT
+ 	help
+ 	  This enables support for machines using Freescale's i.MX50 and i.MX53
+diff --git a/arch/arm/plat-mxc/avic.c b/arch/arm/plat-mxc/avic.c
+index 8875fb4..55f1569 100644
+--- a/arch/arm/plat-mxc/avic.c
++++ b/arch/arm/plat-mxc/avic.c
+@@ -22,6 +22,7 @@
+ #include <linux/io.h>
+ #include <mach/common.h>
+ #include <asm/mach/irq.h>
++#include <asm/exception.h>
+ #include <mach/hardware.h>
+ 
+ #include "irq-common.h"
+diff --git a/arch/arm/plat-mxc/gic.c b/arch/arm/plat-mxc/gic.c
+index b3b8eed..12f8f81 100644
+--- a/arch/arm/plat-mxc/gic.c
++++ b/arch/arm/plat-mxc/gic.c
+@@ -28,21 +28,14 @@ asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+ 		if (irqnr == 1023)
+ 			break;
+ 
+-		if (irqnr > 29 && irqnr < 1021)
++		if (irqnr > 15 && irqnr < 1021)
+ 			handle_IRQ(irqnr, regs);
+ #ifdef CONFIG_SMP
+-		else if (irqnr < 16) {
++		else {
+ 			writel_relaxed(irqstat, gic_cpu_base_addr +
+ 						GIC_CPU_EOI);
+ 			handle_IPI(irqnr, regs);
+ 		}
+ #endif
+-#ifdef CONFIG_LOCAL_TIMERS
+-		else if (irqnr == 29) {
+-			writel_relaxed(irqstat, gic_cpu_base_addr +
+-						GIC_CPU_EOI);
+-			handle_local_timer(regs);
+-		}
+-#endif
+ 	} while (1);
+ }
+diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S
+index 9fe0dfc..ca5cf26 100644
+--- a/arch/arm/plat-mxc/include/mach/entry-macro.S
++++ b/arch/arm/plat-mxc/include/mach/entry-macro.S
+@@ -25,6 +25,3 @@
+ 
+ 	.macro test_for_ipi, irqnr, irqstat, base, tmp
+ 	.endm
+-
+-	.macro test_for_ltirq, irqnr, irqstat, base, tmp
+-	.endm
+diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
+index e993a18..a3c164c 100644
+--- a/arch/arm/plat-mxc/tzic.c
++++ b/arch/arm/plat-mxc/tzic.c
+@@ -17,6 +17,7 @@
+ #include <linux/io.h>
+ 
+ #include <asm/mach/irq.h>
++#include <asm/exception.h>
+ 
+ #include <mach/hardware.h>
+ #include <mach/common.h>
+diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
+index 6c28582..361d540 100644
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -4,8 +4,8 @@ config M68K
+ 	select HAVE_IDE
+ 	select HAVE_AOUT if MMU
+ 	select GENERIC_ATOMIC64 if MMU
+-	select HAVE_GENERIC_HARDIRQS if !MMU
+-	select GENERIC_IRQ_SHOW if !MMU
++	select HAVE_GENERIC_HARDIRQS
++	select GENERIC_IRQ_SHOW
+ 	select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
+ 
+ config RWSEM_GENERIC_SPINLOCK
+diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
+index 8294f0c..3adb499 100644
+--- a/arch/m68k/Kconfig.bus
++++ b/arch/m68k/Kconfig.bus
+@@ -2,6 +2,15 @@ if MMU
+ 
+ comment "Bus Support"
+ 
++config DIO
++	bool "DIO bus support"
++	depends on HP300
++	default y
++	help
++	  Say Y here to enable support for the "DIO" expansion bus used in
++	  HP300 machines. If you are using such a system you almost certainly
++	  want this.
++
+ config NUBUS
+ 	bool
+ 	depends on MAC
+diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
+index d214034..6033f5d 100644
+--- a/arch/m68k/Kconfig.devices
++++ b/arch/m68k/Kconfig.devices
+@@ -24,6 +24,37 @@ config PROC_HARDWARE
+ 	  including the model, CPU, MMU, clock speed, BogoMIPS rating,
+ 	  and memory size.
+ 
++config NATFEAT
++	bool "ARAnyM emulator support"
++	depends on ATARI
++	help
++	  This option enables support for ARAnyM native features, such as
++	  access to a disk image as /dev/hda.
++
++config NFBLOCK
++	tristate "NatFeat block device support"
++	depends on BLOCK && NATFEAT
++	help
++	  Say Y to include support for the ARAnyM NatFeat block device
++	  which allows direct access to the hard drives without using
++	  the hardware emulation.
++
++config NFCON
++	tristate "NatFeat console driver"
++	depends on NATFEAT
++	help
++	  Say Y to include support for the ARAnyM NatFeat console driver
++	  which allows the console output to be redirected to the stderr
++	  output of ARAnyM.
++
++config NFETH
++	tristate "NatFeat Ethernet support"
++	depends on ETHERNET && NATFEAT
++	help
++	  Say Y to include support for the ARAnyM NatFeat network device
++	  which will emulate a regular ethernet device while presenting an
++	  ethertap device to the host system.
++
+ endmenu
+ 
+ menu "Character devices"
+diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
+index c5b5212..47b5f90 100644
+--- a/arch/m68k/amiga/amiints.c
++++ b/arch/m68k/amiga/amiints.c
+@@ -1,43 +1,15 @@
+ /*
+- * linux/arch/m68k/amiga/amiints.c -- Amiga Linux interrupt handling code
++ * Amiga Linux interrupt handling code
+  *
+  * This file is subject to the terms and conditions of the GNU General Public
+  * License.  See the file COPYING in the main directory of this archive
+  * for more details.
+- *
+- * 11/07/96: rewritten interrupt handling, irq lists are exists now only for
+- *           this sources where it makes sense (VERTB/PORTS/EXTER) and you must
+- *           be careful that dev_id for this sources is unique since this the
+- *           only possibility to distinguish between different handlers for
+- *           free_irq. irq lists also have different irq flags:
+- *           - IRQ_FLG_FAST: handler is inserted at top of list (after other
+- *                           fast handlers)
+- *           - IRQ_FLG_SLOW: handler is inserted at bottom of list and before
+- *                           they're executed irq level is set to the previous
+- *                           one, but handlers don't need to be reentrant, if
+- *                           reentrance occurred, slow handlers will be just
+- *                           called again.
+- *           The whole interrupt handling for CIAs is moved to cia.c
+- *           /Roman Zippel
+- *
+- * 07/08/99: rewamp of the interrupt handling - we now have two types of
+- *           interrupts, normal and fast handlers, fast handlers being
+- *           marked with IRQF_DISABLED and runs with all other interrupts
+- *           disabled. Normal interrupts disable their own source but
+- *           run with all other interrupt sources enabled.
+- *           PORTS and EXTER interrupts are always shared even if the
+- *           drivers do not explicitly mark this when calling
+- *           request_irq which they really should do.
+- *           This is similar to the way interrupts are handled on all
+- *           other architectures and makes a ton of sense besides
+- *           having the advantage of making it easier to share
+- *           drivers.
+- *           /Jes
+  */
+ 
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/errno.h>
++#include <linux/irq.h>
+ 
+ #include <asm/irq.h>
+ #include <asm/traps.h>
+@@ -45,56 +17,6 @@
+ #include <asm/amigaints.h>
+ #include <asm/amipcmcia.h>
+ 
+-static void amiga_enable_irq(unsigned int irq);
+-static void amiga_disable_irq(unsigned int irq);
+-static irqreturn_t ami_int1(int irq, void *dev_id);
+-static irqreturn_t ami_int3(int irq, void *dev_id);
+-static irqreturn_t ami_int4(int irq, void *dev_id);
+-static irqreturn_t ami_int5(int irq, void *dev_id);
+-
+-static struct irq_controller amiga_irq_controller = {
+-	.name		= "amiga",
+-	.lock		= __SPIN_LOCK_UNLOCKED(amiga_irq_controller.lock),
+-	.enable		= amiga_enable_irq,
+-	.disable	= amiga_disable_irq,
+-};
+-
+-/*
+- * void amiga_init_IRQ(void)
+- *
+- * Parameters:	None
+- *
+- * Returns:	Nothing
+- *
+- * This function should be called during kernel startup to initialize
+- * the amiga IRQ handling routines.
+- */
+-
+-void __init amiga_init_IRQ(void)
+-{
+-	if (request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL))
+-		pr_err("Couldn't register int%d\n", 1);
+-	if (request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL))
+-		pr_err("Couldn't register int%d\n", 3);
+-	if (request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL))
+-		pr_err("Couldn't register int%d\n", 4);
+-	if (request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL))
+-		pr_err("Couldn't register int%d\n", 5);
+-
+-	m68k_setup_irq_controller(&amiga_irq_controller, IRQ_USER, AMI_STD_IRQS);
+-
+-	/* turn off PCMCIA interrupts */
+-	if (AMIGAHW_PRESENT(PCMCIA))
+-		gayle.inten = GAYLE_IRQ_IDE;
+-
+-	/* turn off all interrupts and enable the master interrupt bit */
+-	amiga_custom.intena = 0x7fff;
+-	amiga_custom.intreq = 0x7fff;
+-	amiga_custom.intena = IF_SETCLR | IF_INTEN;
+-
+-	cia_init_IRQ(&ciaa_base);
+-	cia_init_IRQ(&ciab_base);
+-}
+ 
+ /*
+  * Enable/disable a particular machine specific interrupt source.
+@@ -103,112 +25,150 @@ void __init amiga_init_IRQ(void)
+  * internal data, that may not be changed by the interrupt at the same time.
+  */
+ 
+-static void amiga_enable_irq(unsigned int irq)
++static void amiga_irq_enable(struct irq_data *data)
+ {
+-	amiga_custom.intena = IF_SETCLR | (1 << (irq - IRQ_USER));
++	amiga_custom.intena = IF_SETCLR | (1 << (data->irq - IRQ_USER));
+ }
+ 
+-static void amiga_disable_irq(unsigned int irq)
++static void amiga_irq_disable(struct irq_data *data)
+ {
+-	amiga_custom.intena = 1 << (irq - IRQ_USER);
++	amiga_custom.intena = 1 << (data->irq - IRQ_USER);
+ }
+ 
++static struct irq_chip amiga_irq_chip = {
++	.name		= "amiga",
++	.irq_enable	= amiga_irq_enable,
++	.irq_disable	= amiga_irq_disable,
++};
++
++
+ /*
+  * The builtin Amiga hardware interrupt handlers.
+  */
+ 
+-static irqreturn_t ami_int1(int irq, void *dev_id)
++static void ami_int1(unsigned int irq, struct irq_desc *desc)
+ {
+ 	unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
+ 
+ 	/* if serial transmit buffer empty, interrupt */
+ 	if (ints & IF_TBE) {
+ 		amiga_custom.intreq = IF_TBE;
+-		m68k_handle_int(IRQ_AMIGA_TBE);
++		generic_handle_irq(IRQ_AMIGA_TBE);
+ 	}
+ 
+ 	/* if floppy disk transfer complete, interrupt */
+ 	if (ints & IF_DSKBLK) {
+ 		amiga_custom.intreq = IF_DSKBLK;
+-		m68k_handle_int(IRQ_AMIGA_DSKBLK);
++		generic_handle_irq(IRQ_AMIGA_DSKBLK);
+ 	}
+ 
+ 	/* if software interrupt set, interrupt */
+ 	if (ints & IF_SOFT) {
+ 		amiga_custom.intreq = IF_SOFT;
+-		m68k_handle_int(IRQ_AMIGA_SOFT);
++		generic_handle_irq(IRQ_AMIGA_SOFT);
+ 	}
+-	return IRQ_HANDLED;
+ }
+ 
+-static irqreturn_t ami_int3(int irq, void *dev_id)
++static void ami_int3(unsigned int irq, struct irq_desc *desc)
+ {
+ 	unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
+ 
+ 	/* if a blitter interrupt */
+ 	if (ints & IF_BLIT) {
+ 		amiga_custom.intreq = IF_BLIT;
+-		m68k_handle_int(IRQ_AMIGA_BLIT);
++		generic_handle_irq(IRQ_AMIGA_BLIT);
+ 	}
+ 
+ 	/* if a copper interrupt */
+ 	if (ints & IF_COPER) {
+ 		amiga_custom.intreq = IF_COPER;
+-		m68k_handle_int(IRQ_AMIGA_COPPER);
++		generic_handle_irq(IRQ_AMIGA_COPPER);
+ 	}
+ 
+ 	/* if a vertical blank interrupt */
+ 	if (ints & IF_VERTB) {
+ 		amiga_custom.intreq = IF_VERTB;
+-		m68k_handle_int(IRQ_AMIGA_VERTB);
++		generic_handle_irq(IRQ_AMIGA_VERTB);
+ 	}
+-	return IRQ_HANDLED;
+ }
+ 
+-static irqreturn_t ami_int4(int irq, void *dev_id)
++static void ami_int4(unsigned int irq, struct irq_desc *desc)
+ {
+ 	unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
+ 
+ 	/* if audio 0 interrupt */
+ 	if (ints & IF_AUD0) {
+ 		amiga_custom.intreq = IF_AUD0;
+-		m68k_handle_int(IRQ_AMIGA_AUD0);
++		generic_handle_irq(IRQ_AMIGA_AUD0);
+ 	}
+ 
+ 	/* if audio 1 interrupt */
+ 	if (ints & IF_AUD1) {
+ 		amiga_custom.intreq = IF_AUD1;
+-		m68k_handle_int(IRQ_AMIGA_AUD1);
++		generic_handle_irq(IRQ_AMIGA_AUD1);
+ 	}
+ 
+ 	/* if audio 2 interrupt */
+ 	if (ints & IF_AUD2) {
+ 		amiga_custom.intreq = IF_AUD2;
+-		m68k_handle_int(IRQ_AMIGA_AUD2);
++		generic_handle_irq(IRQ_AMIGA_AUD2);
+ 	}
+ 
+ 	/* if audio 3 interrupt */
+ 	if (ints & IF_AUD3) {
+ 		amiga_custom.intreq = IF_AUD3;
+-		m68k_handle_int(IRQ_AMIGA_AUD3);
++		generic_handle_irq(IRQ_AMIGA_AUD3);
+ 	}
+-	return IRQ_HANDLED;
+ }
+ 
+-static irqreturn_t ami_int5(int irq, void *dev_id)
++static void ami_int5(unsigned int irq, struct irq_desc *desc)
+ {
+ 	unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
+ 
+ 	/* if serial receive buffer full interrupt */
+ 	if (ints & IF_RBF) {
+ 		/* acknowledge of IF_RBF must be done by the serial interrupt */
+-		m68k_handle_int(IRQ_AMIGA_RBF);
++		generic_handle_irq(IRQ_AMIGA_RBF);
+ 	}
+ 
+ 	/* if a disk sync interrupt */
+ 	if (ints & IF_DSKSYN) {
+ 		amiga_custom.intreq = IF_DSKSYN;
+-		m68k_handle_int(IRQ_AMIGA_DSKSYN);
++		generic_handle_irq(IRQ_AMIGA_DSKSYN);
+ 	}
+-	return IRQ_HANDLED;
++}
++
++
++/*
++ * void amiga_init_IRQ(void)
++ *
++ * Parameters:	None
++ *
++ * Returns:	Nothing
++ *
++ * This function should be called during kernel startup to initialize
++ * the amiga IRQ handling routines.
++ */
++
++void __init amiga_init_IRQ(void)
++{
++	m68k_setup_irq_controller(&amiga_irq_chip, handle_simple_irq, IRQ_USER,
++				  AMI_STD_IRQS);
++
++	irq_set_chained_handler(IRQ_AUTO_1, ami_int1);
++	irq_set_chained_handler(IRQ_AUTO_3, ami_int3);
++	irq_set_chained_handler(IRQ_AUTO_4, ami_int4);
++	irq_set_chained_handler(IRQ_AUTO_5, ami_int5);
++
++	/* turn off PCMCIA interrupts */
++	if (AMIGAHW_PRESENT(PCMCIA))
++		gayle.inten = GAYLE_IRQ_IDE;
++
++	/* turn off all interrupts and enable the master interrupt bit */
++	amiga_custom.intena = 0x7fff;
++	amiga_custom.intreq = 0x7fff;
++	amiga_custom.intena = IF_SETCLR | IF_INTEN;
++
++	cia_init_IRQ(&ciaa_base);
++	cia_init_IRQ(&ciab_base);
+ }
+diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
+index ecd0f7c..18c0e29 100644
+--- a/arch/m68k/amiga/cia.c
++++ b/arch/m68k/amiga/cia.c
+@@ -93,13 +93,14 @@ static irqreturn_t cia_handler(int irq, void *dev_id)
+ 	amiga_custom.intreq = base->int_mask;
+ 	for (; ints; mach_irq++, ints >>= 1) {
+ 		if (ints & 1)
+-			m68k_handle_int(mach_irq);
++			generic_handle_irq(mach_irq);
+ 	}
+ 	return IRQ_HANDLED;
+ }
+ 
+-static void cia_enable_irq(unsigned int irq)
++static void cia_irq_enable(struct irq_data *data)
+ {
++	unsigned int irq = data->irq;
+ 	unsigned char mask;
+ 
+ 	if (irq >= IRQ_AMIGA_CIAB) {
+@@ -113,19 +114,20 @@ static void cia_enable_irq(unsigned int irq)
+ 	}
+ }
+ 
+-static void cia_disable_irq(unsigned int irq)
++static void cia_irq_disable(struct irq_data *data)
+ {
++	unsigned int irq = data->irq;
++
+ 	if (irq >= IRQ_AMIGA_CIAB)
+ 		cia_able_irq(&ciab_base, 1 << (irq - IRQ_AMIGA_CIAB));
+ 	else
+ 		cia_able_irq(&ciaa_base, 1 << (irq - IRQ_AMIGA_CIAA));
+ }
+ 
+-static struct irq_controller cia_irq_controller = {
++static struct irq_chip cia_irq_chip = {
+ 	.name		= "cia",
+-	.lock		= __SPIN_LOCK_UNLOCKED(cia_irq_controller.lock),
+-	.enable		= cia_enable_irq,
+-	.disable	= cia_disable_irq,
++	.irq_enable	= cia_irq_enable,
++	.irq_disable	= cia_irq_disable,
+ };
+ 
+ /*
+@@ -134,9 +136,9 @@ static struct irq_controller cia_irq_controller = {
+  * into this chain.
+  */
+ 
+-static void auto_enable_irq(unsigned int irq)
++static void auto_irq_enable(struct irq_data *data)
+ {
+-	switch (irq) {
++	switch (data->irq) {
+ 	case IRQ_AUTO_2:
+ 		amiga_custom.intena = IF_SETCLR | IF_PORTS;
+ 		break;
+@@ -146,9 +148,9 @@ static void auto_enable_irq(unsigned int irq)
+ 	}
+ }
+ 
+-static void auto_disable_irq(unsigned int irq)
++static void auto_irq_disable(struct irq_data *data)
+ {
+-	switch (irq) {
++	switch (data->irq) {
+ 	case IRQ_AUTO_2:
+ 		amiga_custom.intena = IF_PORTS;
+ 		break;
+@@ -158,24 +160,25 @@ static void auto_disable_irq(unsigned int irq)
+ 	}
+ }
+ 
+-static struct irq_controller auto_irq_controller = {
++static struct irq_chip auto_irq_chip = {
+ 	.name		= "auto",
+-	.lock		= __SPIN_LOCK_UNLOCKED(auto_irq_controller.lock),
+-	.enable		= auto_enable_irq,
+-	.disable	= auto_disable_irq,
++	.irq_enable	= auto_irq_enable,
++	.irq_disable	= auto_irq_disable,
+ };
+ 
+ void __init cia_init_IRQ(struct ciabase *base)
+ {
+-	m68k_setup_irq_controller(&cia_irq_controller, base->cia_irq, CIA_IRQS);
++	m68k_setup_irq_controller(&cia_irq_chip, handle_simple_irq,
++				  base->cia_irq, CIA_IRQS);
+ 
+ 	/* clear any pending interrupt and turn off all interrupts */
+ 	cia_set_irq(base, CIA_ICR_ALL);
+ 	cia_able_irq(base, CIA_ICR_ALL);
+ 
+ 	/* override auto int and install CIA handler */
+-	m68k_setup_irq_controller(&auto_irq_controller, base->handler_irq, 1);
+-	m68k_irq_startup(base->handler_irq);
++	m68k_setup_irq_controller(&auto_irq_chip, handle_simple_irq,
++				  base->handler_irq, 1);
++	m68k_irq_startup_irq(base->handler_irq);
+ 	if (request_irq(base->handler_irq, cia_handler, IRQF_SHARED,
+ 			base->name, base))
+ 		pr_err("Couldn't register %s interrupt\n", base->name);
+diff --git a/arch/m68k/apollo/dn_ints.c b/arch/m68k/apollo/dn_ints.c
+index 5d47f3a..17be1e7 100644
+--- a/arch/m68k/apollo/dn_ints.c
++++ b/arch/m68k/apollo/dn_ints.c
+@@ -1,19 +1,13 @@
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ 
+-#include <asm/irq.h>
+ #include <asm/traps.h>
+ #include <asm/apollohw.h>
+ 
+-void dn_process_int(unsigned int irq, struct pt_regs *fp)
++unsigned int apollo_irq_startup(struct irq_data *data)
+ {
+-	__m68k_handle_int(irq, fp);
++	unsigned int irq = data->irq;
+ 
+-	*(volatile unsigned char *)(pica)=0x20;
+-	*(volatile unsigned char *)(picb)=0x20;
+-}
+-
+-int apollo_irq_startup(unsigned int irq)
+-{
+ 	if (irq < 8)
+ 		*(volatile unsigned char *)(pica+1) &= ~(1 << irq);
+ 	else
+@@ -21,24 +15,33 @@ int apollo_irq_startup(unsigned int irq)
+ 	return 0;
+ }
+ 
+-void apollo_irq_shutdown(unsigned int irq)
++void apollo_irq_shutdown(struct irq_data *data)
+ {
++	unsigned int irq = data->irq;
++
+ 	if (irq < 8)
+ 		*(volatile unsigned char *)(pica+1) |= (1 << irq);
+ 	else
+ 		*(volatile unsigned char *)(picb+1) |= (1 << (irq - 8));
+ }
+ 
+-static struct irq_controller apollo_irq_controller = {
++void apollo_irq_eoi(struct irq_data *data)
++{
++	*(volatile unsigned char *)(pica) = 0x20;
++	*(volatile unsigned char *)(picb) = 0x20;
++}
++
++static struct irq_chip apollo_irq_chip = {
+ 	.name           = "apollo",
+-	.lock           = __SPIN_LOCK_UNLOCKED(apollo_irq_controller.lock),
+-	.startup        = apollo_irq_startup,
+-	.shutdown       = apollo_irq_shutdown,
++	.irq_startup    = apollo_irq_startup,
++	.irq_shutdown   = apollo_irq_shutdown,
++	.irq_eoi	= apollo_irq_eoi,
+ };
+ 
+ 
+ void __init dn_init_IRQ(void)
+ {
+-	m68k_setup_user_interrupt(VEC_USER + 96, 16, dn_process_int);
+-	m68k_setup_irq_controller(&apollo_irq_controller, IRQ_APOLLO, 16);
++	m68k_setup_user_interrupt(VEC_USER + 96, 16);
++	m68k_setup_irq_controller(&apollo_irq_chip, handle_fasteoi_irq,
++				  IRQ_APOLLO, 16);
+ }
+diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
+index 26a804e..6d196da 100644
+--- a/arch/m68k/atari/ataints.c
++++ b/arch/m68k/atari/ataints.c
+@@ -60,243 +60,7 @@
+  * <asm/atariints.h>): Autovector interrupts are 1..7, then follow ST-MFP,
+  * TT-MFP, SCC, and finally VME interrupts. Vector numbers for the latter can
+  * be allocated by atari_register_vme_int().
+- *
+- * Each interrupt can be of three types:
+- *
+- *  - SLOW: The handler runs with all interrupts enabled, except the one it
+- *    was called by (to avoid reentering). This should be the usual method.
+- *    But it is currently possible only for MFP ints, since only the MFP
+- *    offers an easy way to mask interrupts.
+- *
+- *  - FAST: The handler runs with all interrupts disabled. This should be used
+- *    only for really fast handlers, that just do actions immediately
+- *    necessary, and let the rest do a bottom half or task queue.
+- *
+- *  - PRIORITIZED: The handler can be interrupted by higher-level ints
+- *    (greater IPL, no MFP priorities!). This is the method of choice for ints
+- *    which should be slow, but are not from a MFP.
+- *
+- * The feature of more than one handler for one int source is still there, but
+- * only applicable if all handers are of the same type. To not slow down
+- * processing of ints with only one handler by the chaining feature, the list
+- * calling function atari_call_irq_list() is only plugged in at the time the
+- * second handler is registered.
+- *
+- * Implementation notes: For fast-as-possible int handling, there are separate
+- * entry points for each type (slow/fast/prio). The assembler handler calls
+- * the irq directly in the usual case, no C wrapper is involved. In case of
+- * multiple handlers, atari_call_irq_list() is registered as handler and calls
+- * in turn the real irq's. To ease access from assembler level to the irq
+- * function pointer and accompanying data, these two are stored in a separate
+- * array, irq_handler[]. The rest of data (type, name) are put into a second
+- * array, irq_param, that is accessed from C only. For each slow interrupt (32
+- * in all) there are separate handler functions, which makes it possible to
+- * hard-code the MFP register address and value, are necessary to mask the
+- * int. If there'd be only one generic function, lots of calculations would be
+- * needed to determine MFP register and int mask from the vector number :-(
+- *
+- * Furthermore, slow ints may not lower the IPL below its previous value
+- * (before the int happened). This is needed so that an int of class PRIO, on
+- * that this int may be stacked, cannot be reentered. This feature is
+- * implemented as follows: If the stack frame format is 1 (throwaway), the int
+- * is not stacked, and the IPL is anded with 0xfbff, resulting in a new level
+- * 2, which still blocks the HSYNC, but no interrupts of interest. If the
+- * frame format is 0, the int is nested, and the old IPL value can be found in
+- * the sr copy in the frame.
+- */
+-
+-#if 0
+-
+-#define	NUM_INT_SOURCES	(8 + NUM_ATARI_SOURCES)
+-
+-typedef void (*asm_irq_handler)(void);
+-
+-struct irqhandler {
+-	irqreturn_t (*handler)(int, void *, struct pt_regs *);
+-	void	*dev_id;
+-};
+-
+-struct irqparam {
+-	unsigned long	flags;
+-	const char	*devname;
+-};
+-
+-/*
+- * Array with irq's and their parameter data. This array is accessed from low
+- * level assembler code, so an element size of 8 allows usage of index scaling
+- * addressing mode.
+  */
+-static struct irqhandler irq_handler[NUM_INT_SOURCES];
+-
+-/*
+- * This array hold the rest of parameters of int handlers: type
+- * (slow,fast,prio) and the name of the handler. These values are only
+- * accessed from C
+- */
+-static struct irqparam irq_param[NUM_INT_SOURCES];
+-
+-/* check for valid int number (complex, sigh...) */
+-#define	IS_VALID_INTNO(n)											\
+-	((n) > 0 &&														\
+-	 /* autovec and ST-MFP ok anyway */								\
+-	 (((n) < TTMFP_SOURCE_BASE) ||									\
+-	  /* TT-MFP ok if present */									\
+-	  ((n) >= TTMFP_SOURCE_BASE && (n) < SCC_SOURCE_BASE &&			\
+-	   ATARIHW_PRESENT(TT_MFP)) ||									\
+-	  /* SCC ok if present and number even */						\
+-	  ((n) >= SCC_SOURCE_BASE && (n) < VME_SOURCE_BASE &&			\
+-	   !((n) & 1) && ATARIHW_PRESENT(SCC)) ||						\
+-	  /* greater numbers ok if they are registered VME vectors */		\
+-	  ((n) >= VME_SOURCE_BASE && (n) < VME_SOURCE_BASE + VME_MAX_SOURCES && \
+-		  free_vme_vec_bitmap & (1 << ((n) - VME_SOURCE_BASE)))))
+-
+-
+-/*
+- * Here start the assembler entry points for interrupts
+- */
+-
+-#define IRQ_NAME(nr) atari_slow_irq_##nr##_handler(void)
+-
+-#define	BUILD_SLOW_IRQ(n)						   \
+-asmlinkage void IRQ_NAME(n);						   \
+-/* Dummy function to allow asm with operands.  */			   \
+-void atari_slow_irq_##n##_dummy (void) {				   \
+-__asm__ (__ALIGN_STR "\n"						   \
+-"atari_slow_irq_" #n "_handler:\t"					   \
+-"	addl	%6,%5\n"	/* preempt_count() += HARDIRQ_OFFSET */	   \
+-	SAVE_ALL_INT "\n"						   \
+-	GET_CURRENT(%%d0) "\n"						   \
+-"	andb	#~(1<<(%c3&7)),%a4:w\n"	/* mask this interrupt */	   \
+-	/* get old IPL from stack frame */				   \
+-"	bfextu	%%sp@(%c2){#5,#3},%%d0\n"				   \
+-"	movew	%%sr,%%d1\n"						   \
+-"	bfins	%%d0,%%d1{#21,#3}\n"					   \
+-"	movew	%%d1,%%sr\n"		/* set IPL = previous value */	   \
+-"	addql	#1,%a0\n"						   \
+-"	lea	%a1,%%a0\n"						   \
+-"	pea	%%sp@\n"		/* push addr of frame */	   \
+-"	movel	%%a0@(4),%%sp at -\n"	/* push handler data */		   \
+-"	pea	(%c3+8)\n"		/* push int number */		   \
+-"	movel	%%a0@,%%a0\n"						   \
+-"	jbsr	%%a0@\n"		/* call the handler */		   \
+-"	addql	#8,%%sp\n"						   \
+-"	addql	#4,%%sp\n"						   \
+-"	orw	#0x0600,%%sr\n"						   \
+-"	andw	#0xfeff,%%sr\n"		/* set IPL = 6 again */		   \
+-"	orb	#(1<<(%c3&7)),%a4:w\n"	/* now unmask the int again */	   \
+-"	jbra	ret_from_interrupt\n"					   \
+-	 : : "i" (&kstat_cpu(0).irqs[n+8]), "i" (&irq_handler[n+8]),	   \
+-	     "n" (PT_OFF_SR), "n" (n),					   \
+-	     "i" (n & 8 ? (n & 16 ? &tt_mfp.int_mk_a : &st_mfp.int_mk_a)   \
+-		        : (n & 16 ? &tt_mfp.int_mk_b : &st_mfp.int_mk_b)), \
+-	     "m" (preempt_count()), "di" (HARDIRQ_OFFSET)		   \
+-);									   \
+-	for (;;);			/* fake noreturn */		   \
+-}
+-
+-BUILD_SLOW_IRQ(0);
+-BUILD_SLOW_IRQ(1);
+-BUILD_SLOW_IRQ(2);
+-BUILD_SLOW_IRQ(3);
+-BUILD_SLOW_IRQ(4);
+-BUILD_SLOW_IRQ(5);
+-BUILD_SLOW_IRQ(6);
+-BUILD_SLOW_IRQ(7);
+-BUILD_SLOW_IRQ(8);
+-BUILD_SLOW_IRQ(9);
+-BUILD_SLOW_IRQ(10);
+-BUILD_SLOW_IRQ(11);
+-BUILD_SLOW_IRQ(12);
+-BUILD_SLOW_IRQ(13);
+-BUILD_SLOW_IRQ(14);
+-BUILD_SLOW_IRQ(15);
+-BUILD_SLOW_IRQ(16);
+-BUILD_SLOW_IRQ(17);
+-BUILD_SLOW_IRQ(18);
+-BUILD_SLOW_IRQ(19);
+-BUILD_SLOW_IRQ(20);
+-BUILD_SLOW_IRQ(21);
+-BUILD_SLOW_IRQ(22);
+-BUILD_SLOW_IRQ(23);
+-BUILD_SLOW_IRQ(24);
+-BUILD_SLOW_IRQ(25);
+-BUILD_SLOW_IRQ(26);
+-BUILD_SLOW_IRQ(27);
+-BUILD_SLOW_IRQ(28);
+-BUILD_SLOW_IRQ(29);
+-BUILD_SLOW_IRQ(30);
+-BUILD_SLOW_IRQ(31);
+-
+-asm_irq_handler slow_handlers[32] = {
+-	[0]	= atari_slow_irq_0_handler,
+-	[1]	= atari_slow_irq_1_handler,
+-	[2]	= atari_slow_irq_2_handler,
+-	[3]	= atari_slow_irq_3_handler,
+-	[4]	= atari_slow_irq_4_handler,
+-	[5]	= atari_slow_irq_5_handler,
+-	[6]	= atari_slow_irq_6_handler,
+-	[7]	= atari_slow_irq_7_handler,
+-	[8]	= atari_slow_irq_8_handler,
+-	[9]	= atari_slow_irq_9_handler,
+-	[10]	= atari_slow_irq_10_handler,
+-	[11]	= atari_slow_irq_11_handler,
+-	[12]	= atari_slow_irq_12_handler,
+-	[13]	= atari_slow_irq_13_handler,
+-	[14]	= atari_slow_irq_14_handler,
+-	[15]	= atari_slow_irq_15_handler,
+-	[16]	= atari_slow_irq_16_handler,
+-	[17]	= atari_slow_irq_17_handler,
+-	[18]	= atari_slow_irq_18_handler,
+-	[19]	= atari_slow_irq_19_handler,
+-	[20]	= atari_slow_irq_20_handler,
+-	[21]	= atari_slow_irq_21_handler,
+-	[22]	= atari_slow_irq_22_handler,
+-	[23]	= atari_slow_irq_23_handler,
+-	[24]	= atari_slow_irq_24_handler,
+-	[25]	= atari_slow_irq_25_handler,
+-	[26]	= atari_slow_irq_26_handler,
+-	[27]	= atari_slow_irq_27_handler,
+-	[28]	= atari_slow_irq_28_handler,
+-	[29]	= atari_slow_irq_29_handler,
+-	[30]	= atari_slow_irq_30_handler,
+-	[31]	= atari_slow_irq_31_handler
+-};
+-
+-asmlinkage void atari_fast_irq_handler( void );
+-asmlinkage void atari_prio_irq_handler( void );
+-
+-/* Dummy function to allow asm with operands.  */
+-void atari_fast_prio_irq_dummy (void) {
+-__asm__ (__ALIGN_STR "\n"
+-"atari_fast_irq_handler:\n\t"
+-	"orw	#0x700,%%sr\n"		/* disable all interrupts */
+-"atari_prio_irq_handler:\n\t"
+-	"addl	%3,%2\n\t"		/* preempt_count() += HARDIRQ_OFFSET */
+-	SAVE_ALL_INT "\n\t"
+-	GET_CURRENT(%%d0) "\n\t"
+-	/* get vector number from stack frame and convert to source */
+-	"bfextu	%%sp@(%c1){#4,#10},%%d0\n\t"
+-	"subw	#(0x40-8),%%d0\n\t"
+-	"jpl	1f\n\t"
+-	"addw	#(0x40-8-0x18),%%d0\n"
+-    "1:\tlea	%a0,%%a0\n\t"
+-	"addql	#1,%%a0@(%%d0:l:4)\n\t"
+-	"lea	irq_handler,%%a0\n\t"
+-	"lea	%%a0@(%%d0:l:8),%%a0\n\t"
+-	"pea	%%sp@\n\t"		/* push frame address */
+-	"movel	%%a0@(4),%%sp at -\n\t"	/* push handler data */
+-	"movel	%%d0,%%sp at -\n\t"	/* push int number */
+-	"movel	%%a0@,%%a0\n\t"
+-	"jsr	%%a0@\n\t"		/* and call the handler */
+-	"addql	#8,%%sp\n\t"
+-	"addql	#4,%%sp\n\t"
+-	"jbra	ret_from_interrupt"
+-	 : : "i" (&kstat_cpu(0).irqs), "n" (PT_OFF_FORMATVEC),
+-	     "m" (preempt_count()), "di" (HARDIRQ_OFFSET)
+-);
+-	for (;;);
+-}
+-#endif
+ 
+ /*
+  * Bitmap for free interrupt vector numbers
+@@ -320,31 +84,44 @@ extern void atari_microwire_cmd(int cmd);
+ 
+ extern int atari_SCC_reset_done;
+ 
+-static int atari_startup_irq(unsigned int irq)
++static unsigned int atari_irq_startup(struct irq_data *data)
+ {
+-	m68k_irq_startup(irq);
++	unsigned int irq = data->irq;
++
++	m68k_irq_startup(data);
+ 	atari_turnon_irq(irq);
+ 	atari_enable_irq(irq);
+ 	return 0;
+ }
+ 
+-static void atari_shutdown_irq(unsigned int irq)
++static void atari_irq_shutdown(struct irq_data *data)
+ {
++	unsigned int irq = data->irq;
++
+ 	atari_disable_irq(irq);
+ 	atari_turnoff_irq(irq);
+-	m68k_irq_shutdown(irq);
++	m68k_irq_shutdown(data);
+ 
+ 	if (irq == IRQ_AUTO_4)
+ 	    vectors[VEC_INT4] = falcon_hblhandler;
+ }
+ 
+-static struct irq_controller atari_irq_controller = {
++static void atari_irq_enable(struct irq_data *data)
++{
++	atari_enable_irq(data->irq);
++}
++
++static void atari_irq_disable(struct irq_data *data)
++{
++	atari_disable_irq(data->irq);
++}
++
++static struct irq_chip atari_irq_chip = {
+ 	.name		= "atari",
+-	.lock		= __SPIN_LOCK_UNLOCKED(atari_irq_controller.lock),
+-	.startup	= atari_startup_irq,
+-	.shutdown	= atari_shutdown_irq,
+-	.enable		= atari_enable_irq,
+-	.disable	= atari_disable_irq,
++	.irq_startup	= atari_irq_startup,
++	.irq_shutdown	= atari_irq_shutdown,
++	.irq_enable	= atari_irq_enable,
++	.irq_disable	= atari_irq_disable,
+ };
+ 
+ /*
+@@ -360,8 +137,9 @@ static struct irq_controller atari_irq_controller = {
+ 
+ void __init atari_init_IRQ(void)
+ {
+-	m68k_setup_user_interrupt(VEC_USER, NUM_ATARI_SOURCES - IRQ_USER, NULL);
+-	m68k_setup_irq_controller(&atari_irq_controller, 1, NUM_ATARI_SOURCES - 1);
++	m68k_setup_user_interrupt(VEC_USER, NUM_ATARI_SOURCES - IRQ_USER);
++	m68k_setup_irq_controller(&atari_irq_chip, handle_simple_irq, 1,
++				  NUM_ATARI_SOURCES - 1);
+ 
+ 	/* Initialize the MFP(s) */
+ 
+diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
+index 1edd950..8128647 100644
+--- a/arch/m68k/bvme6000/config.c
++++ b/arch/m68k/bvme6000/config.c
+@@ -86,7 +86,7 @@ static void bvme6000_get_model(char *model)
+  */
+ static void __init bvme6000_init_IRQ(void)
+ {
+-	m68k_setup_user_interrupt(VEC_USER, 192, NULL);
++	m68k_setup_user_interrupt(VEC_USER, 192);
+ }
+ 
+ void __init config_bvme6000(void)
+diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
+index f6312c7..c87fe69 100644
+--- a/arch/m68k/hp300/time.c
++++ b/arch/m68k/hp300/time.c
+@@ -70,7 +70,7 @@ void __init hp300_sched_init(irq_handler_t vector)
+ 
+   asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
+ 
+-  if (request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector))
++  if (request_irq(IRQ_AUTO_6, hp300_tick, 0, "timer tick", vector))
+     pr_err("Couldn't register timer interrupt\n");
+ 
+   out_8(CLOCKBASE + CLKCR2, 0x1);		/* select CR1 */
+diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
+index 870e534..db30ed2 100644
+--- a/arch/m68k/include/asm/hardirq.h
++++ b/arch/m68k/include/asm/hardirq.h
+@@ -18,6 +18,11 @@
+ 
+ #ifdef CONFIG_MMU
+ 
++static inline void ack_bad_irq(unsigned int irq)
++{
++	pr_crit("unexpected IRQ trap at vector %02x\n", irq);
++}
++
+ /* entry.S is sensitive to the offsets of these fields */
+ typedef struct {
+ 	unsigned int __softirq_pending;
+diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
+index 69ed0d74..6198df5 100644
+--- a/arch/m68k/include/asm/irq.h
++++ b/arch/m68k/include/asm/irq.h
+@@ -27,11 +27,6 @@
+ 
+ #ifdef CONFIG_MMU
+ 
+-#include <linux/linkage.h>
+-#include <linux/hardirq.h>
+-#include <linux/irqreturn.h>
+-#include <linux/spinlock_types.h>
+-
+ /*
+  * Interrupt source definitions
+  * General interrupt sources are the level 1-7.
+@@ -54,10 +49,6 @@
+ 
+ #define IRQ_USER	8
+ 
+-extern unsigned int irq_canonicalize(unsigned int irq);
+-
+-struct pt_regs;
+-
+ /*
+  * various flags for request_irq() - the Amiga now uses the standard
+  * mechanism like all other architectures - IRQF_DISABLED and
+@@ -71,57 +62,27 @@ struct pt_regs;
+ #define IRQ_FLG_STD	(0x8000)	/* internally used		*/
+ #endif
+ 
+-/*
+- * This structure is used to chain together the ISRs for a particular
+- * interrupt source (if it supports chaining).
+- */
+-typedef struct irq_node {
+-	irqreturn_t	(*handler)(int, void *);
+-	void		*dev_id;
+-	struct irq_node *next;
+-	unsigned long	flags;
+-	const char	*devname;
+-} irq_node_t;
+-
+-/*
+- * This structure has only 4 elements for speed reasons
+- */
+-struct irq_handler {
+-	int		(*handler)(int, void *);
+-	unsigned long	flags;
+-	void		*dev_id;
+-	const char	*devname;
+-};
+-
+-struct irq_controller {
+-	const char *name;
+-	spinlock_t lock;
+-	int (*startup)(unsigned int irq);
+-	void (*shutdown)(unsigned int irq);
+-	void (*enable)(unsigned int irq);
+-	void (*disable)(unsigned int irq);
+-};
+-
+-extern int m68k_irq_startup(unsigned int);
+-extern void m68k_irq_shutdown(unsigned int);
+-
+-/*
+- * This function returns a new irq_node_t
+- */
+-extern irq_node_t *new_irq_node(void);
++struct irq_data;
++struct irq_chip;
++struct irq_desc;
++extern unsigned int m68k_irq_startup(struct irq_data *data);
++extern unsigned int m68k_irq_startup_irq(unsigned int irq);
++extern void m68k_irq_shutdown(struct irq_data *data);
++extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int,
++						      struct pt_regs *));
++extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt);
++extern void m68k_setup_irq_controller(struct irq_chip *,
++				      void (*handle)(unsigned int irq,
++						     struct irq_desc *desc),
++				      unsigned int irq, unsigned int cnt);
+ 
+-extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *));
+-extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt,
+-				      void (*handler)(unsigned int, struct pt_regs *));
+-extern void m68k_setup_irq_controller(struct irq_controller *, unsigned int, unsigned int);
+-
+-asmlinkage void m68k_handle_int(unsigned int);
+-asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
++extern unsigned int irq_canonicalize(unsigned int irq);
+ 
+ #else
+ #define irq_canonicalize(irq)  (irq)
+ #endif /* CONFIG_MMU */
+ 
+ asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
++extern atomic_t irq_err_count;
+ 
+ #endif /* _M68K_IRQ_H_ */
+diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
+index c2a1c5e..12ebe43 100644
+--- a/arch/m68k/include/asm/macintosh.h
++++ b/arch/m68k/include/asm/macintosh.h
+@@ -12,6 +12,8 @@ extern void mac_reset(void);
+ extern void mac_poweroff(void);
+ extern void mac_init_IRQ(void);
+ extern int mac_irq_pending(unsigned int);
++extern void mac_irq_enable(struct irq_data *data);
++extern void mac_irq_disable(struct irq_data *data);
+ 
+ /*
+  *	Floppy driver magic hook - probably shouldn't be here
+diff --git a/arch/m68k/include/asm/q40ints.h b/arch/m68k/include/asm/q40ints.h
+index 3d970af..22f12c9 100644
+--- a/arch/m68k/include/asm/q40ints.h
++++ b/arch/m68k/include/asm/q40ints.h
+@@ -24,6 +24,3 @@
+ #define Q40_IRQ10_MASK       (1<<5)
+ #define Q40_IRQ14_MASK       (1<<6)
+ #define Q40_IRQ15_MASK       (1<<7)
+-
+-extern unsigned long q40_probe_irq_on (void);
+-extern int q40_probe_irq_off (unsigned long irqs);
+diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
+index e7f0f2e..c569619 100644
+--- a/arch/m68k/kernel/Makefile
++++ b/arch/m68k/kernel/Makefile
+@@ -6,16 +6,15 @@ extra-$(CONFIG_MMU)	:= head.o
+ extra-$(CONFIG_SUN3)	:= sun3-head.o
+ extra-y			+= vmlinux.lds
+ 
+-obj-y	:= entry.o m68k_ksyms.o module.o process.o ptrace.o setup.o signal.o \
+-	   sys_m68k.o syscalltable.o time.o traps.o
++obj-y	:= entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o setup.o \
++	   signal.o sys_m68k.o syscalltable.o time.o traps.o
+ 
+-obj-$(CONFIG_MMU)	+= ints.o devres.o vectors.o
+-devres-$(CONFIG_MMU)	= ../../../kernel/irq/devres.o
++obj-$(CONFIG_MMU)	+= ints.o vectors.o
+ 
+ ifndef CONFIG_MMU_SUN3
+ obj-y			+= dma.o
+ endif
+ ifndef CONFIG_MMU
+-obj-y			+= init_task.o irq.o
++obj-y			+= init_task.o
+ endif
+ 
+diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
+index bd0ec05..c713f51 100644
+--- a/arch/m68k/kernel/entry_mm.S
++++ b/arch/m68k/kernel/entry_mm.S
+@@ -48,7 +48,7 @@
+ .globl sys_fork, sys_clone, sys_vfork
+ .globl ret_from_interrupt, bad_interrupt
+ .globl auto_irqhandler_fixup
+-.globl user_irqvec_fixup, user_irqhandler_fixup
++.globl user_irqvec_fixup
+ 
+ .text
+ ENTRY(buserr)
+@@ -207,7 +207,7 @@ ENTRY(auto_inthandler)
+ 	movel	%sp,%sp at -
+ 	movel	%d0,%sp at -		|  put vector # on stack
+ auto_irqhandler_fixup = . + 2
+-	jsr	__m68k_handle_int	|  process the IRQ
++	jsr	do_IRQ			|  process the IRQ
+ 	addql	#8,%sp			|  pop parameters off stack
+ 
+ ret_from_interrupt:
+@@ -240,8 +240,7 @@ user_irqvec_fixup = . + 2
+ 
+ 	movel	%sp,%sp at -
+ 	movel	%d0,%sp at -		|  put vector # on stack
+-user_irqhandler_fixup = . + 2
+-	jsr	__m68k_handle_int	|  process the IRQ
++	jsr	do_IRQ			|  process the IRQ
+ 	addql	#8,%sp			|  pop parameters off stack
+ 
+ 	subqb	#1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
+index 761ee04..74fefac 100644
+--- a/arch/m68k/kernel/ints.c
++++ b/arch/m68k/kernel/ints.c
+@@ -4,25 +4,6 @@
+  * This file is subject to the terms and conditions of the GNU General Public
+  * License.  See the file COPYING in the main directory of this archive
+  * for more details.
+- *
+- * 07/03/96: Timer initialization, and thus mach_sched_init(),
+- *           removed from request_irq() and moved to init_time().
+- *           We should therefore consider renaming our add_isr() and
+- *           remove_isr() to request_irq() and free_irq()
+- *           respectively, so they are compliant with the other
+- *           architectures.                                     /Jes
+- * 11/07/96: Changed all add_/remove_isr() to request_/free_irq() calls.
+- *           Removed irq list support, if any machine needs an irq server
+- *           it must implement this itself (as it's already done), instead
+- *           only default handler are used with mach_default_handler.
+- *           request_irq got some flags different from other architectures:
+- *           - IRQ_FLG_REPLACE : Replace an existing handler (the default one
+- *                               can be replaced without this flag)
+- *           - IRQ_FLG_LOCK : handler can't be replaced
+- *           There are other machine depending flags, see there
+- *           If you want to replace a default handler you should know what
+- *           you're doing, since it might handle different other irq sources
+- *           which must be served                               /Roman Zippel
+  */
+ 
+ #include <linux/module.h>
+@@ -47,33 +28,22 @@
+ #endif
+ 
+ extern u32 auto_irqhandler_fixup[];
+-extern u32 user_irqhandler_fixup[];
+ extern u16 user_irqvec_fixup[];
+ 
+-/* table for system interrupt handlers */
+-static struct irq_node *irq_list[NR_IRQS];
+-static struct irq_controller *irq_controller[NR_IRQS];
+-static int irq_depth[NR_IRQS];
+-
+ static int m68k_first_user_vec;
+ 
+-static struct irq_controller auto_irq_controller = {
++static struct irq_chip auto_irq_chip = {
+ 	.name		= "auto",
+-	.lock		= __SPIN_LOCK_UNLOCKED(auto_irq_controller.lock),
+-	.startup	= m68k_irq_startup,
+-	.shutdown	= m68k_irq_shutdown,
++	.irq_startup	= m68k_irq_startup,
++	.irq_shutdown	= m68k_irq_shutdown,
+ };
+ 
+-static struct irq_controller user_irq_controller = {
++static struct irq_chip user_irq_chip = {
+ 	.name		= "user",
+-	.lock		= __SPIN_LOCK_UNLOCKED(user_irq_controller.lock),
+-	.startup	= m68k_irq_startup,
+-	.shutdown	= m68k_irq_shutdown,
++	.irq_startup	= m68k_irq_startup,
++	.irq_shutdown	= m68k_irq_shutdown,
+ };
+ 
+-#define NUM_IRQ_NODES 100
+-static irq_node_t nodes[NUM_IRQ_NODES];
+-
+ /*
+  * void init_IRQ(void)
+  *
+@@ -96,7 +66,7 @@ void __init init_IRQ(void)
+ 	}
+ 
+ 	for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++)
+-		irq_controller[i] = &auto_irq_controller;
++		irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq);
+ 
+ 	mach_init_IRQ();
+ }
+@@ -106,7 +76,7 @@ void __init init_IRQ(void)
+  * @handler: called from auto vector interrupts
+  *
+  * setup the handler to be called from auto vector interrupts instead of the
+- * standard __m68k_handle_int(), it will be called with irq numbers in the range
++ * standard do_IRQ(), it will be called with irq numbers in the range
+  * from IRQ_AUTO_1 - IRQ_AUTO_7.
+  */
+ void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *))
+@@ -120,217 +90,49 @@ void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_re
+  * m68k_setup_user_interrupt
+  * @vec: first user vector interrupt to handle
+  * @cnt: number of active user vector interrupts
+- * @handler: called from user vector interrupts
+  *
+  * setup user vector interrupts, this includes activating the specified range
+  * of interrupts, only then these interrupts can be requested (note: this is
+- * different from auto vector interrupts). An optional handler can be installed
+- * to be called instead of the default __m68k_handle_int(), it will be called
+- * with irq numbers starting from IRQ_USER.
++ * different from auto vector interrupts).
+  */
+-void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt,
+-				      void (*handler)(unsigned int, struct pt_regs *))
++void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt)
+ {
+ 	int i;
+ 
+ 	BUG_ON(IRQ_USER + cnt > NR_IRQS);
+ 	m68k_first_user_vec = vec;
+ 	for (i = 0; i < cnt; i++)
+-		irq_controller[IRQ_USER + i] = &user_irq_controller;
++		irq_set_chip(IRQ_USER + i, &user_irq_chip);
+ 	*user_irqvec_fixup = vec - IRQ_USER;
+-	if (handler)
+-		*user_irqhandler_fixup = (u32)handler;
+ 	flush_icache();
+ }
+ 
+ /**
+  * m68k_setup_irq_controller
+- * @contr: irq controller which controls specified irq
++ * @chip: irq chip which controls specified irq
++ * @handle: flow handler which handles specified irq
+  * @irq: first irq to be managed by the controller
++ * @cnt: number of irqs to be managed by the controller
+  *
+  * Change the controller for the specified range of irq, which will be used to
+  * manage these irq. auto/user irq already have a default controller, which can
+  * be changed as well, but the controller probably should use m68k_irq_startup/
+  * m68k_irq_shutdown.
+  */
+-void m68k_setup_irq_controller(struct irq_controller *contr, unsigned int irq,
++void m68k_setup_irq_controller(struct irq_chip *chip,
++			       irq_flow_handler_t handle, unsigned int irq,
+ 			       unsigned int cnt)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < cnt; i++)
+-		irq_controller[irq + i] = contr;
+-}
+-
+-irq_node_t *new_irq_node(void)
+-{
+-	irq_node_t *node;
+-	short i;
+-
+-	for (node = nodes, i = NUM_IRQ_NODES-1; i >= 0; node++, i--) {
+-		if (!node->handler) {
+-			memset(node, 0, sizeof(*node));
+-			return node;
+-		}
++	for (i = 0; i < cnt; i++) {
++		irq_set_chip(irq + i, chip);
++		if (handle)
++			irq_set_handler(irq + i, handle);
+ 	}
+-
+-	printk ("new_irq_node: out of nodes\n");
+-	return NULL;
+ }
+ 
+-int setup_irq(unsigned int irq, struct irq_node *node)
+-{
+-	struct irq_controller *contr;
+-	struct irq_node **prev;
+-	unsigned long flags;
+-
+-	if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
+-		printk("%s: Incorrect IRQ %d from %s\n",
+-		       __func__, irq, node->devname);
+-		return -ENXIO;
+-	}
+-
+-	spin_lock_irqsave(&contr->lock, flags);
+-
+-	prev = irq_list + irq;
+-	if (*prev) {
+-		/* Can't share interrupts unless both agree to */
+-		if (!((*prev)->flags & node->flags & IRQF_SHARED)) {
+-			spin_unlock_irqrestore(&contr->lock, flags);
+-			return -EBUSY;
+-		}
+-		while (*prev)
+-			prev = &(*prev)->next;
+-	}
+-
+-	if (!irq_list[irq]) {
+-		if (contr->startup)
+-			contr->startup(irq);
+-		else
+-			contr->enable(irq);
+-	}
+-	node->next = NULL;
+-	*prev = node;
+-
+-	spin_unlock_irqrestore(&contr->lock, flags);
+-
+-	return 0;
+-}
+-
+-int request_irq(unsigned int irq,
+-		irq_handler_t handler,
+-		unsigned long flags, const char *devname, void *dev_id)
+-{
+-	struct irq_node *node;
+-	int res;
+-
+-	node = new_irq_node();
+-	if (!node)
+-		return -ENOMEM;
+-
+-	node->handler = handler;
+-	node->flags   = flags;
+-	node->dev_id  = dev_id;
+-	node->devname = devname;
+-
+-	res = setup_irq(irq, node);
+-	if (res)
+-		node->handler = NULL;
+-
+-	return res;
+-}
+-
+-EXPORT_SYMBOL(request_irq);
+-
+-void free_irq(unsigned int irq, void *dev_id)
+-{
+-	struct irq_controller *contr;
+-	struct irq_node **p, *node;
+-	unsigned long flags;
+-
+-	if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
+-		printk("%s: Incorrect IRQ %d\n", __func__, irq);
+-		return;
+-	}
+-
+-	spin_lock_irqsave(&contr->lock, flags);
+-
+-	p = irq_list + irq;
+-	while ((node = *p)) {
+-		if (node->dev_id == dev_id)
+-			break;
+-		p = &node->next;
+-	}
+-
+-	if (node) {
+-		*p = node->next;
+-		node->handler = NULL;
+-	} else
+-		printk("%s: Removing probably wrong IRQ %d\n",
+-		       __func__, irq);
+-
+-	if (!irq_list[irq]) {
+-		if (contr->shutdown)
+-			contr->shutdown(irq);
+-		else
+-			contr->disable(irq);
+-	}
+-
+-	spin_unlock_irqrestore(&contr->lock, flags);
+-}
+-
+-EXPORT_SYMBOL(free_irq);
+-
+-void enable_irq(unsigned int irq)
+-{
+-	struct irq_controller *contr;
+-	unsigned long flags;
+-
+-	if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
+-		printk("%s: Incorrect IRQ %d\n",
+-		       __func__, irq);
+-		return;
+-	}
+-
+-	spin_lock_irqsave(&contr->lock, flags);
+-	if (irq_depth[irq]) {
+-		if (!--irq_depth[irq]) {
+-			if (contr->enable)
+-				contr->enable(irq);
+-		}
+-	} else
+-		WARN_ON(1);
+-	spin_unlock_irqrestore(&contr->lock, flags);
+-}
+-
+-EXPORT_SYMBOL(enable_irq);
+-
+-void disable_irq(unsigned int irq)
+-{
+-	struct irq_controller *contr;
+-	unsigned long flags;
+-
+-	if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
+-		printk("%s: Incorrect IRQ %d\n",
+-		       __func__, irq);
+-		return;
+-	}
+-
+-	spin_lock_irqsave(&contr->lock, flags);
+-	if (!irq_depth[irq]++) {
+-		if (contr->disable)
+-			contr->disable(irq);
+-	}
+-	spin_unlock_irqrestore(&contr->lock, flags);
+-}
+-
+-EXPORT_SYMBOL(disable_irq);
+-
+-void disable_irq_nosync(unsigned int irq) __attribute__((alias("disable_irq")));
+-
+-EXPORT_SYMBOL(disable_irq_nosync);
+-
+-int m68k_irq_startup(unsigned int irq)
++unsigned int m68k_irq_startup_irq(unsigned int irq)
+ {
+ 	if (irq <= IRQ_AUTO_7)
+ 		vectors[VEC_SPUR + irq] = auto_inthandler;
+@@ -339,41 +141,21 @@ int m68k_irq_startup(unsigned int irq)
+ 	return 0;
+ }
+ 
+-void m68k_irq_shutdown(unsigned int irq)
++unsigned int m68k_irq_startup(struct irq_data *data)
+ {
+-	if (irq <= IRQ_AUTO_7)
+-		vectors[VEC_SPUR + irq] = bad_inthandler;
+-	else
+-		vectors[m68k_first_user_vec + irq - IRQ_USER] = bad_inthandler;
++	return m68k_irq_startup_irq(data->irq);
+ }
+ 
+-
+-/*
+- * Do we need these probe functions on the m68k?
+- *
+- *  ... may be useful with ISA devices
+- */
+-unsigned long probe_irq_on (void)
++void m68k_irq_shutdown(struct irq_data *data)
+ {
+-#ifdef CONFIG_Q40
+-	if (MACH_IS_Q40)
+-		return q40_probe_irq_on();
+-#endif
+-	return 0;
+-}
++	unsigned int irq = data->irq;
+ 
+-EXPORT_SYMBOL(probe_irq_on);
+-
+-int probe_irq_off (unsigned long irqs)
+-{
+-#ifdef CONFIG_Q40
+-	if (MACH_IS_Q40)
+-		return q40_probe_irq_off(irqs);
+-#endif
+-	return 0;
++	if (irq <= IRQ_AUTO_7)
++		vectors[VEC_SPUR + irq] = bad_inthandler;
++	else
++		vectors[m68k_first_user_vec + irq - IRQ_USER] = bad_inthandler;
+ }
+ 
+-EXPORT_SYMBOL(probe_irq_off);
+ 
+ unsigned int irq_canonicalize(unsigned int irq)
+ {
+@@ -386,52 +168,9 @@ unsigned int irq_canonicalize(unsigned int irq)
+ 
+ EXPORT_SYMBOL(irq_canonicalize);
+ 
+-asmlinkage void m68k_handle_int(unsigned int irq)
+-{
+-	struct irq_node *node;
+-	kstat_cpu(0).irqs[irq]++;
+-	node = irq_list[irq];
+-	do {
+-		node->handler(irq, node->dev_id);
+-		node = node->next;
+-	} while (node);
+-}
+-
+-asmlinkage void __m68k_handle_int(unsigned int irq, struct pt_regs *regs)
+-{
+-	struct pt_regs *old_regs;
+-	old_regs = set_irq_regs(regs);
+-	m68k_handle_int(irq);
+-	set_irq_regs(old_regs);
+-}
+ 
+ asmlinkage void handle_badint(struct pt_regs *regs)
+ {
+-	kstat_cpu(0).irqs[0]++;
+-	printk("unexpected interrupt from %u\n", regs->vector);
+-}
+-
+-int show_interrupts(struct seq_file *p, void *v)
+-{
+-	struct irq_controller *contr;
+-	struct irq_node *node;
+-	int i = *(loff_t *) v;
+-
+-	/* autovector interrupts */
+-	if (irq_list[i]) {
+-		contr = irq_controller[i];
+-		node = irq_list[i];
+-		seq_printf(p, "%-8s %3u: %10u %s", contr->name, i, kstat_cpu(0).irqs[i], node->devname);
+-		while ((node = node->next))
+-			seq_printf(p, ", %s", node->devname);
+-		seq_puts(p, "\n");
+-	}
+-	return 0;
+-}
+-
+-#ifdef CONFIG_PROC_FS
+-void init_irq_proc(void)
+-{
+-	/* Insert /proc/irq driver here */
++	atomic_inc(&irq_err_count);
++	pr_warn("unexpected interrupt from %u\n", regs->vector);
+ }
+-#endif
+diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
+index 2a96beb..b403924 100644
+--- a/arch/m68k/mac/baboon.c
++++ b/arch/m68k/mac/baboon.c
+@@ -11,6 +11,7 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+ #include <linux/init.h>
++#include <linux/irq.h>
+ 
+ #include <asm/traps.h>
+ #include <asm/bootinfo.h>
+@@ -20,9 +21,6 @@
+ 
+ /* #define DEBUG_IRQS */
+ 
+-extern void mac_enable_irq(unsigned int);
+-extern void mac_disable_irq(unsigned int);
+-
+ int baboon_present;
+ static volatile struct baboon *baboon;
+ static unsigned char baboon_disabled;
+@@ -53,7 +51,7 @@ void __init baboon_init(void)
+  * Baboon interrupt handler. This works a lot like a VIA.
+  */
+ 
+-static irqreturn_t baboon_irq(int irq, void *dev_id)
++static void baboon_irq(unsigned int irq, struct irq_desc *desc)
+ {
+ 	int irq_bit, irq_num;
+ 	unsigned char events;
+@@ -64,15 +62,16 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
+ 		(uint) baboon->mb_status);
+ #endif
+ 
+-	if (!(events = baboon->mb_ifr & 0x07))
+-		return IRQ_NONE;
++	events = baboon->mb_ifr & 0x07;
++	if (!events)
++		return;
+ 
+ 	irq_num = IRQ_BABOON_0;
+ 	irq_bit = 1;
+ 	do {
+ 	        if (events & irq_bit) {
+ 			baboon->mb_ifr &= ~irq_bit;
+-			m68k_handle_int(irq_num);
++			generic_handle_irq(irq_num);
+ 		}
+ 		irq_bit <<= 1;
+ 		irq_num++;
+@@ -82,7 +81,6 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
+ 	/* for now we need to smash all interrupts */
+ 	baboon->mb_ifr &= ~events;
+ #endif
+-	return IRQ_HANDLED;
+ }
+ 
+ /*
+@@ -92,8 +90,7 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
+ void __init baboon_register_interrupts(void)
+ {
+ 	baboon_disabled = 0;
+-	if (request_irq(IRQ_NUBUS_C, baboon_irq, 0, "baboon", (void *)baboon))
+-		pr_err("Couldn't register baboon interrupt\n");
++	irq_set_chained_handler(IRQ_NUBUS_C, baboon_irq);
+ }
+ 
+ /*
+@@ -111,7 +108,7 @@ void baboon_irq_enable(int irq)
+ 
+ 	baboon_disabled &= ~(1 << irq_idx);
+ 	if (!baboon_disabled)
+-		mac_enable_irq(IRQ_NUBUS_C);
++		mac_irq_enable(irq_get_irq_data(IRQ_NUBUS_C));
+ }
+ 
+ void baboon_irq_disable(int irq)
+@@ -124,7 +121,7 @@ void baboon_irq_disable(int irq)
+ 
+ 	baboon_disabled |= 1 << irq_idx;
+ 	if (baboon_disabled)
+-		mac_disable_irq(IRQ_NUBUS_C);
++		mac_irq_disable(irq_get_irq_data(IRQ_NUBUS_C));
+ }
+ 
+ void baboon_irq_clear(int irq)
+diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
+index 1ad4e9d..a5462cc 100644
+--- a/arch/m68k/mac/iop.c
++++ b/arch/m68k/mac/iop.c
+@@ -305,15 +305,13 @@ void __init iop_register_interrupts(void)
+ {
+ 	if (iop_ism_present) {
+ 		if (oss_present) {
+-			if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq,
+-					IRQ_FLG_LOCK, "ISM IOP",
+-					(void *) IOP_NUM_ISM))
++			if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq, 0,
++					"ISM IOP", (void *)IOP_NUM_ISM))
+ 				pr_err("Couldn't register ISM IOP interrupt\n");
+ 			oss_irq_enable(IRQ_MAC_ADB);
+ 		} else {
+-			if (request_irq(IRQ_VIA2_0, iop_ism_irq,
+-					IRQ_FLG_LOCK|IRQ_FLG_FAST, "ISM IOP",
+-					(void *) IOP_NUM_ISM))
++			if (request_irq(IRQ_VIA2_0, iop_ism_irq, 0, "ISM IOP",
++					(void *)IOP_NUM_ISM))
+ 				pr_err("Couldn't register ISM IOP interrupt\n");
+ 		}
+ 		if (!iop_alive(iop_base[IOP_NUM_ISM])) {
+diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
+index f92190c..ba220b7 100644
+--- a/arch/m68k/mac/macints.c
++++ b/arch/m68k/mac/macints.c
+@@ -190,14 +190,10 @@ irqreturn_t mac_debug_handler(int, void *);
+ 
+ /* #define DEBUG_MACINTS */
+ 
+-void mac_enable_irq(unsigned int irq);
+-void mac_disable_irq(unsigned int irq);
+-
+-static struct irq_controller mac_irq_controller = {
++static struct irq_chip mac_irq_chip = {
+ 	.name		= "mac",
+-	.lock		= __SPIN_LOCK_UNLOCKED(mac_irq_controller.lock),
+-	.enable		= mac_enable_irq,
+-	.disable	= mac_disable_irq,
++	.irq_enable	= mac_irq_enable,
++	.irq_disable	= mac_irq_disable,
+ };
+ 
+ void __init mac_init_IRQ(void)
+@@ -205,7 +201,7 @@ void __init mac_init_IRQ(void)
+ #ifdef DEBUG_MACINTS
+ 	printk("mac_init_IRQ(): Setting things up...\n");
+ #endif
+-	m68k_setup_irq_controller(&mac_irq_controller, IRQ_USER,
++	m68k_setup_irq_controller(&mac_irq_chip, handle_simple_irq, IRQ_USER,
+ 				  NUM_MAC_SOURCES - IRQ_USER);
+ 	/* Make sure the SONIC interrupt is cleared or things get ugly */
+ #ifdef SHUTUP_SONIC
+@@ -241,16 +237,17 @@ void __init mac_init_IRQ(void)
+ }
+ 
+ /*
+- *  mac_enable_irq - enable an interrupt source
+- * mac_disable_irq - disable an interrupt source
++ *  mac_irq_enable - enable an interrupt source
++ * mac_irq_disable - disable an interrupt source
+  *   mac_clear_irq - clears a pending interrupt
+- * mac_pending_irq - Returns the pending status of an IRQ (nonzero = pending)
++ * mac_irq_pending - returns the pending status of an IRQ (nonzero = pending)
+  *
+  * These routines are just dispatchers to the VIA/OSS/PSC routines.
+  */
+ 
+-void mac_enable_irq(unsigned int irq)
++void mac_irq_enable(struct irq_data *data)
+ {
++	int irq = data->irq;
+ 	int irq_src = IRQ_SRC(irq);
+ 
+ 	switch(irq_src) {
+@@ -283,8 +280,9 @@ void mac_enable_irq(unsigned int irq)
+ 	}
+ }
+ 
+-void mac_disable_irq(unsigned int irq)
++void mac_irq_disable(struct irq_data *data)
+ {
++	int irq = data->irq;
+ 	int irq_src = IRQ_SRC(irq);
+ 
+ 	switch(irq_src) {
+diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
+index a9c0f5a..a4c82da 100644
+--- a/arch/m68k/mac/oss.c
++++ b/arch/m68k/mac/oss.c
+@@ -19,6 +19,7 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+ #include <linux/init.h>
++#include <linux/irq.h>
+ 
+ #include <asm/bootinfo.h>
+ #include <asm/macintosh.h>
+@@ -29,10 +30,7 @@
+ int oss_present;
+ volatile struct mac_oss *oss;
+ 
+-static irqreturn_t oss_irq(int, void *);
+-static irqreturn_t oss_nubus_irq(int, void *);
+-
+-extern irqreturn_t via1_irq(int, void *);
++extern void via1_irq(unsigned int irq, struct irq_desc *desc);
+ 
+ /*
+  * Initialize the OSS
+@@ -60,26 +58,6 @@ void __init oss_init(void)
+ }
+ 
+ /*
+- * Register the OSS and NuBus interrupt dispatchers.
+- */
+-
+-void __init oss_register_interrupts(void)
+-{
+-	if (request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
+-			"scsi", (void *) oss))
+-		pr_err("Couldn't register %s interrupt\n", "scsi");
+-	if (request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
+-			"nubus", (void *) oss))
+-		pr_err("Couldn't register %s interrupt\n", "nubus");
+-	if (request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
+-			"sound", (void *) oss))
+-		pr_err("Couldn't register %s interrupt\n", "sound");
+-	if (request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
+-			"via1", (void *) via1))
+-		pr_err("Couldn't register %s interrupt\n", "via1");
+-}
+-
+-/*
+  * Initialize OSS for Nubus access
+  */
+ 
+@@ -92,17 +70,17 @@ void __init oss_nubus_init(void)
+  * and SCSI; everything else is routed to its own autovector IRQ.
+  */
+ 
+-static irqreturn_t oss_irq(int irq, void *dev_id)
++static void oss_irq(unsigned int irq, struct irq_desc *desc)
+ {
+ 	int events;
+ 
+ 	events = oss->irq_pending & (OSS_IP_SOUND|OSS_IP_SCSI);
+ 	if (!events)
+-		return IRQ_NONE;
++		return;
+ 
+ #ifdef DEBUG_IRQS
+ 	if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) {
+-		printk("oss_irq: irq %d events = 0x%04X\n", irq,
++		printk("oss_irq: irq %u events = 0x%04X\n", irq,
+ 			(int) oss->irq_pending);
+ 	}
+ #endif
+@@ -113,11 +91,10 @@ static irqreturn_t oss_irq(int irq, void *dev_id)
+ 		/* FIXME: call sound handler */
+ 	} else if (events & OSS_IP_SCSI) {
+ 		oss->irq_pending &= ~OSS_IP_SCSI;
+-		m68k_handle_int(IRQ_MAC_SCSI);
++		generic_handle_irq(IRQ_MAC_SCSI);
+ 	} else {
+ 		/* FIXME: error check here? */
+ 	}
+-	return IRQ_HANDLED;
+ }
+ 
+ /*
+@@ -126,13 +103,13 @@ static irqreturn_t oss_irq(int irq, void *dev_id)
+  * Unlike the VIA/RBV this is on its own autovector interrupt level.
+  */
+ 
+-static irqreturn_t oss_nubus_irq(int irq, void *dev_id)
++static void oss_nubus_irq(unsigned int irq, struct irq_desc *desc)
+ {
+ 	int events, irq_bit, i;
+ 
+ 	events = oss->irq_pending & OSS_IP_NUBUS;
+ 	if (!events)
+-		return IRQ_NONE;
++		return;
+ 
+ #ifdef DEBUG_NUBUS_INT
+ 	if (console_loglevel > 7) {
+@@ -148,10 +125,21 @@ static irqreturn_t oss_nubus_irq(int irq, void *dev_id)
+ 		irq_bit >>= 1;
+ 		if (events & irq_bit) {
+ 			oss->irq_pending &= ~irq_bit;
+-			m68k_handle_int(NUBUS_SOURCE_BASE + i);
++			generic_handle_irq(NUBUS_SOURCE_BASE + i);
+ 		}
+ 	} while(events & (irq_bit - 1));
+-	return IRQ_HANDLED;
++}
++
++/*
++ * Register the OSS and NuBus interrupt dispatchers.
++ */
++
++void __init oss_register_interrupts(void)
++{
++	irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_irq);
++	irq_set_chained_handler(OSS_IRQLEV_NUBUS, oss_nubus_irq);
++	irq_set_chained_handler(OSS_IRQLEV_SOUND, oss_irq);
++	irq_set_chained_handler(OSS_IRQLEV_VIA1, via1_irq);
+ }
+ 
+ /*
+diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
+index a4c3eb6..e6c2d20 100644
+--- a/arch/m68k/mac/psc.c
++++ b/arch/m68k/mac/psc.c
+@@ -18,6 +18,7 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+ #include <linux/init.h>
++#include <linux/irq.h>
+ 
+ #include <asm/traps.h>
+ #include <asm/bootinfo.h>
+@@ -30,8 +31,6 @@
+ int psc_present;
+ volatile __u8 *psc;
+ 
+-irqreturn_t psc_irq(int, void *);
+-
+ /*
+  * Debugging dump, used in various places to see what's going on.
+  */
+@@ -112,52 +111,52 @@ void __init psc_init(void)
+ }
+ 
+ /*
+- * Register the PSC interrupt dispatchers for autovector interrupts 3-6.
+- */
+-
+-void __init psc_register_interrupts(void)
+-{
+-	if (request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30))
+-		pr_err("Couldn't register psc%d interrupt\n", 3);
+-	if (request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40))
+-		pr_err("Couldn't register psc%d interrupt\n", 4);
+-	if (request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50))
+-		pr_err("Couldn't register psc%d interrupt\n", 5);
+-	if (request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60))
+-		pr_err("Couldn't register psc%d interrupt\n", 6);
+-}
+-
+-/*
+  * PSC interrupt handler. It's a lot like the VIA interrupt handler.
+  */
+ 
+-irqreturn_t psc_irq(int irq, void *dev_id)
++static void psc_irq(unsigned int irq, struct irq_desc *desc)
+ {
+-	int pIFR	= pIFRbase + ((int) dev_id);
+-	int pIER	= pIERbase + ((int) dev_id);
++	unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc);
++	int pIFR	= pIFRbase + offset;
++	int pIER	= pIERbase + offset;
+ 	int irq_num;
+ 	unsigned char irq_bit, events;
+ 
+ #ifdef DEBUG_IRQS
+-	printk("psc_irq: irq %d pIFR = 0x%02X pIER = 0x%02X\n",
++	printk("psc_irq: irq %u pIFR = 0x%02X pIER = 0x%02X\n",
+ 		irq, (int) psc_read_byte(pIFR), (int) psc_read_byte(pIER));
+ #endif
+ 
+ 	events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF;
+ 	if (!events)
+-		return IRQ_NONE;
++		return;
+ 
+ 	irq_num = irq << 3;
+ 	irq_bit = 1;
+ 	do {
+ 		if (events & irq_bit) {
+ 			psc_write_byte(pIFR, irq_bit);
+-			m68k_handle_int(irq_num);
++			generic_handle_irq(irq_num);
+ 		}
+ 		irq_num++;
+ 		irq_bit <<= 1;
+ 	} while (events >= irq_bit);
+-	return IRQ_HANDLED;
++}
++
++/*
++ * Register the PSC interrupt dispatchers for autovector interrupts 3-6.
++ */
++
++void __init psc_register_interrupts(void)
++{
++	irq_set_chained_handler(IRQ_AUTO_3, psc_irq);
++	irq_set_handler_data(IRQ_AUTO_3, (void *)0x30);
++	irq_set_chained_handler(IRQ_AUTO_4, psc_irq);
++	irq_set_handler_data(IRQ_AUTO_4, (void *)0x40);
++	irq_set_chained_handler(IRQ_AUTO_5, psc_irq);
++	irq_set_handler_data(IRQ_AUTO_5, (void *)0x50);
++	irq_set_chained_handler(IRQ_AUTO_6, psc_irq);
++	irq_set_handler_data(IRQ_AUTO_6, (void *)0x60);
+ }
+ 
+ void psc_irq_enable(int irq) {
+diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
+index e71166d..f1600ad 100644
+--- a/arch/m68k/mac/via.c
++++ b/arch/m68k/mac/via.c
+@@ -28,6 +28,7 @@
+ #include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
++#include <linux/irq.h>
+ 
+ #include <asm/bootinfo.h>
+ #include <asm/macintosh.h>
+@@ -77,9 +78,6 @@ static int gIER,gIFR,gBufA,gBufB;
+ static u8 nubus_disabled;
+ 
+ void via_debug_dump(void);
+-irqreturn_t via1_irq(int, void *);
+-irqreturn_t via2_irq(int, void *);
+-irqreturn_t via_nubus_irq(int, void *);
+ void via_irq_enable(int irq);
+ void via_irq_disable(int irq);
+ void via_irq_clear(int irq);
+@@ -281,40 +279,11 @@ void __init via_init_clock(irq_handler_t func)
+ 	via1[vT1CL] = MAC_CLOCK_LOW;
+ 	via1[vT1CH] = MAC_CLOCK_HIGH;
+ 
+-	if (request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func))
++	if (request_irq(IRQ_MAC_TIMER_1, func, 0, "timer", func))
+ 		pr_err("Couldn't register %s interrupt\n", "timer");
+ }
+ 
+ /*
+- * Register the interrupt dispatchers for VIA or RBV machines only.
+- */
+-
+-void __init via_register_interrupts(void)
+-{
+-	if (via_alt_mapping) {
+-		if (request_irq(IRQ_AUTO_1, via1_irq,
+-				IRQ_FLG_LOCK|IRQ_FLG_FAST, "software",
+-				(void *) via1))
+-			pr_err("Couldn't register %s interrupt\n", "software");
+-		if (request_irq(IRQ_AUTO_6, via1_irq,
+-				IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
+-				(void *) via1))
+-			pr_err("Couldn't register %s interrupt\n", "via1");
+-	} else {
+-		if (request_irq(IRQ_AUTO_1, via1_irq,
+-				IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
+-				(void *) via1))
+-			pr_err("Couldn't register %s interrupt\n", "via1");
+-	}
+-	if (request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
+-			"via2", (void *) via2))
+-		pr_err("Couldn't register %s interrupt\n", "via2");
+-	if (request_irq(IRQ_MAC_NUBUS, via_nubus_irq,
+-			IRQ_FLG_LOCK|IRQ_FLG_FAST, "nubus", (void *) via2))
+-		pr_err("Couldn't register %s interrupt\n", "nubus");
+-}
+-
+-/*
+  * Debugging dump, used in various places to see what's going on.
+  */
+ 
+@@ -446,48 +415,46 @@ void __init via_nubus_init(void)
+  * via6522.c :-), disable/pending masks added.
+  */
+ 
+-irqreturn_t via1_irq(int irq, void *dev_id)
++void via1_irq(unsigned int irq, struct irq_desc *desc)
+ {
+ 	int irq_num;
+ 	unsigned char irq_bit, events;
+ 
+ 	events = via1[vIFR] & via1[vIER] & 0x7F;
+ 	if (!events)
+-		return IRQ_NONE;
++		return;
+ 
+ 	irq_num = VIA1_SOURCE_BASE;
+ 	irq_bit = 1;
+ 	do {
+ 		if (events & irq_bit) {
+ 			via1[vIFR] = irq_bit;
+-			m68k_handle_int(irq_num);
++			generic_handle_irq(irq_num);
+ 		}
+ 		++irq_num;
+ 		irq_bit <<= 1;
+ 	} while (events >= irq_bit);
+-	return IRQ_HANDLED;
+ }
+ 
+-irqreturn_t via2_irq(int irq, void *dev_id)
++static void via2_irq(unsigned int irq, struct irq_desc *desc)
+ {
+ 	int irq_num;
+ 	unsigned char irq_bit, events;
+ 
+ 	events = via2[gIFR] & via2[gIER] & 0x7F;
+ 	if (!events)
+-		return IRQ_NONE;
++		return;
+ 
+ 	irq_num = VIA2_SOURCE_BASE;
+ 	irq_bit = 1;
+ 	do {
+ 		if (events & irq_bit) {
+ 			via2[gIFR] = irq_bit | rbv_clear;
+-			m68k_handle_int(irq_num);
++			generic_handle_irq(irq_num);
+ 		}
+ 		++irq_num;
+ 		irq_bit <<= 1;
+ 	} while (events >= irq_bit);
+-	return IRQ_HANDLED;
+ }
+ 
+ /*
+@@ -495,7 +462,7 @@ irqreturn_t via2_irq(int irq, void *dev_id)
+  * VIA2 dispatcher as a fast interrupt handler.
+  */
+ 
+-irqreturn_t via_nubus_irq(int irq, void *dev_id)
++void via_nubus_irq(unsigned int irq, struct irq_desc *desc)
+ {
+ 	int slot_irq;
+ 	unsigned char slot_bit, events;
+@@ -506,7 +473,7 @@ irqreturn_t via_nubus_irq(int irq, void *dev_id)
+ 	else
+ 		events &= ~via2[vDirA];
+ 	if (!events)
+-		return IRQ_NONE;
++		return;
+ 
+ 	do {
+ 		slot_irq = IRQ_NUBUS_F;
+@@ -514,7 +481,7 @@ irqreturn_t via_nubus_irq(int irq, void *dev_id)
+ 		do {
+ 			if (events & slot_bit) {
+ 				events &= ~slot_bit;
+-				m68k_handle_int(slot_irq);
++				generic_handle_irq(slot_irq);
+ 			}
+ 			--slot_irq;
+ 			slot_bit >>= 1;
+@@ -528,7 +495,24 @@ irqreturn_t via_nubus_irq(int irq, void *dev_id)
+ 		else
+ 			events &= ~via2[vDirA];
+ 	} while (events);
+-	return IRQ_HANDLED;
++}
++
++/*
++ * Register the interrupt dispatchers for VIA or RBV machines only.
++ */
++
++void __init via_register_interrupts(void)
++{
++	if (via_alt_mapping) {
++		/* software interrupt */
++		irq_set_chained_handler(IRQ_AUTO_1, via1_irq);
++		/* via1 interrupt */
++		irq_set_chained_handler(IRQ_AUTO_6, via1_irq);
++	} else {
++		irq_set_chained_handler(IRQ_AUTO_1, via1_irq);
++	}
++	irq_set_chained_handler(IRQ_AUTO_2, via2_irq);
++	irq_set_chained_handler(IRQ_MAC_NUBUS, via_nubus_irq);
+ }
+ 
+ void via_irq_enable(int irq) {
+diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
+index 6cb9c3a..5de924e 100644
+--- a/arch/m68k/mvme147/config.c
++++ b/arch/m68k/mvme147/config.c
+@@ -81,7 +81,7 @@ static void mvme147_get_model(char *model)
+ 
+ void __init mvme147_init_IRQ(void)
+ {
+-	m68k_setup_user_interrupt(VEC_USER, 192, NULL);
++	m68k_setup_user_interrupt(VEC_USER, 192);
+ }
+ 
+ void __init config_mvme147(void)
+@@ -114,8 +114,7 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
+ void mvme147_sched_init (irq_handler_t timer_routine)
+ {
+ 	tick_handler = timer_routine;
+-	if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, IRQ_FLG_REPLACE,
+-			"timer 1", NULL))
++	if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1", NULL))
+ 		pr_err("Couldn't register timer interrupt\n");
+ 
+ 	/* Init the clock with a value */
+diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
+index 0b28e26..31a66d9 100644
+--- a/arch/m68k/mvme16x/config.c
++++ b/arch/m68k/mvme16x/config.c
+@@ -117,7 +117,7 @@ static void mvme16x_get_hardware_list(struct seq_file *m)
+ 
+ static void __init mvme16x_init_IRQ (void)
+ {
+-	m68k_setup_user_interrupt(VEC_USER, 192, NULL);
++	m68k_setup_user_interrupt(VEC_USER, 192);
+ }
+ 
+ #define pcc2chip	((volatile u_char *)0xfff42000)
+diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
+index 9f0e3d5..2b88849 100644
+--- a/arch/m68k/q40/q40ints.c
++++ b/arch/m68k/q40/q40ints.c
+@@ -15,10 +15,10 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ 
+ #include <asm/ptrace.h>
+ #include <asm/system.h>
+-#include <asm/irq.h>
+ #include <asm/traps.h>
+ 
+ #include <asm/q40_master.h>
+@@ -35,35 +35,36 @@
+ */
+ 
+ static void q40_irq_handler(unsigned int, struct pt_regs *fp);
+-static void q40_enable_irq(unsigned int);
+-static void q40_disable_irq(unsigned int);
++static void q40_irq_enable(struct irq_data *data);
++static void q40_irq_disable(struct irq_data *data);
+ 
+ unsigned short q40_ablecount[35];
+ unsigned short q40_state[35];
+ 
+-static int q40_irq_startup(unsigned int irq)
++static unsigned int q40_irq_startup(struct irq_data *data)
+ {
++	unsigned int irq = data->irq;
++
+ 	/* test for ISA ints not implemented by HW */
+ 	switch (irq) {
+ 	case 1: case 2: case 8: case 9:
+ 	case 11: case 12: case 13:
+ 		printk("%s: ISA IRQ %d not implemented by HW\n", __func__, irq);
+-		return -ENXIO;
++		/* FIXME return -ENXIO; */
+ 	}
+ 	return 0;
+ }
+ 
+-static void q40_irq_shutdown(unsigned int irq)
++static void q40_irq_shutdown(struct irq_data *data)
+ {
+ }
+ 
+-static struct irq_controller q40_irq_controller = {
++static struct irq_chip q40_irq_chip = {
+ 	.name		= "q40",
+-	.lock		= __SPIN_LOCK_UNLOCKED(q40_irq_controller.lock),
+-	.startup	= q40_irq_startup,
+-	.shutdown	= q40_irq_shutdown,
+-	.enable		= q40_enable_irq,
+-	.disable	= q40_disable_irq,
++	.irq_startup	= q40_irq_startup,
++	.irq_shutdown	= q40_irq_shutdown,
++	.irq_enable	= q40_irq_enable,
++	.irq_disable	= q40_irq_disable,
+ };
+ 
+ /*
+@@ -81,13 +82,14 @@ static int disabled;
+ 
+ void __init q40_init_IRQ(void)
+ {
+-	m68k_setup_irq_controller(&q40_irq_controller, 1, Q40_IRQ_MAX);
++	m68k_setup_irq_controller(&q40_irq_chip, handle_simple_irq, 1,
++				  Q40_IRQ_MAX);
+ 
+ 	/* setup handler for ISA ints */
+ 	m68k_setup_auto_interrupt(q40_irq_handler);
+ 
+-	m68k_irq_startup(IRQ_AUTO_2);
+-	m68k_irq_startup(IRQ_AUTO_4);
++	m68k_irq_startup_irq(IRQ_AUTO_2);
++	m68k_irq_startup_irq(IRQ_AUTO_4);
+ 
+ 	/* now enable some ints.. */
+ 	master_outb(1, EXT_ENABLE_REG);  /* ISA IRQ 5-15 */
+@@ -218,11 +220,11 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
+ 	switch (irq) {
+ 	case 4:
+ 	case 6:
+-		__m68k_handle_int(Q40_IRQ_SAMPLE, fp);
++		do_IRQ(Q40_IRQ_SAMPLE, fp);
+ 		return;
+ 	}
+ 	if (mir & Q40_IRQ_FRAME_MASK) {
+-		__m68k_handle_int(Q40_IRQ_FRAME, fp);
++		do_IRQ(Q40_IRQ_FRAME, fp);
+ 		master_outb(-1, FRAME_CLEAR_REG);
+ 	}
+ 	if ((mir & Q40_IRQ_SER_MASK) || (mir & Q40_IRQ_EXT_MASK)) {
+@@ -257,7 +259,7 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
+ 					goto iirq;
+ 				}
+ 				q40_state[irq] |= IRQ_INPROGRESS;
+-				__m68k_handle_int(irq, fp);
++				do_IRQ(irq, fp);
+ 				q40_state[irq] &= ~IRQ_INPROGRESS;
+ 
+ 				/* naively enable everything, if that fails than    */
+@@ -288,25 +290,29 @@ static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
+ 	mir = master_inb(IIRQ_REG);
+ 	/* should test whether keyboard irq is really enabled, doing it in defhand */
+ 	if (mir & Q40_IRQ_KEYB_MASK)
+-		__m68k_handle_int(Q40_IRQ_KEYBOARD, fp);
++		do_IRQ(Q40_IRQ_KEYBOARD, fp);
+ 
+ 	return;
+ }
+ 
+-void q40_enable_irq(unsigned int irq)
++void q40_irq_enable(struct irq_data *data)
+ {
++	unsigned int irq = data->irq;
++
+ 	if (irq >= 5 && irq <= 15) {
+ 		mext_disabled--;
+ 		if (mext_disabled > 0)
+-			printk("q40_enable_irq : nested disable/enable\n");
++			printk("q40_irq_enable : nested disable/enable\n");
+ 		if (mext_disabled == 0)
+ 			master_outb(1, EXT_ENABLE_REG);
+ 	}
+ }
+ 
+ 
+-void q40_disable_irq(unsigned int irq)
++void q40_irq_disable(struct irq_data *data)
+ {
++	unsigned int irq = data->irq;
++
+ 	/* disable ISA iqs : only do something if the driver has been
+ 	 * verified to be Q40 "compatible" - right now IDE, NE2K
+ 	 * Any driver should not attempt to sleep across disable_irq !!
+@@ -319,13 +325,3 @@ void q40_disable_irq(unsigned int irq)
+ 			printk("disable_irq nesting count %d\n",mext_disabled);
+ 	}
+ }
+-
+-unsigned long q40_probe_irq_on(void)
+-{
+-	printk("irq probing not working - reconfigure the driver to avoid this\n");
+-	return -1;
+-}
+-int q40_probe_irq_off(unsigned long irqs)
+-{
+-	return -1;
+-}
+diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
+index 6464ad3..78b60f5 100644
+--- a/arch/m68k/sun3/sun3ints.c
++++ b/arch/m68k/sun3/sun3ints.c
+@@ -51,25 +51,29 @@ void sun3_disable_irq(unsigned int irq)
+ 
+ static irqreturn_t sun3_int7(int irq, void *dev_id)
+ {
+-	*sun3_intreg |=  (1 << irq);
+-	if (!(kstat_cpu(0).irqs[irq] % 2000))
+-		sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 16000) / 2000]);
++	unsigned int cnt;
++
++	cnt = kstat_irqs_cpu(irq, 0);
++	if (!(cnt % 2000))
++		sun3_leds(led_pattern[cnt % 16000 / 2000]);
+ 	return IRQ_HANDLED;
+ }
+ 
+ static irqreturn_t sun3_int5(int irq, void *dev_id)
+ {
++	unsigned int cnt;
++
+ #ifdef CONFIG_SUN3
+ 	intersil_clear();
+ #endif
+-        *sun3_intreg |=  (1 << irq);
+ #ifdef CONFIG_SUN3
+ 	intersil_clear();
+ #endif
+ 	xtime_update(1);
+ 	update_process_times(user_mode(get_irq_regs()));
+-        if (!(kstat_cpu(0).irqs[irq] % 20))
+-                sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
++	cnt = kstat_irqs_cpu(irq, 0);
++	if (!(cnt % 20))
++		sun3_leds(led_pattern[cnt % 160 / 20]);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -79,29 +83,33 @@ static irqreturn_t sun3_vec255(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static void sun3_inthandle(unsigned int irq, struct pt_regs *fp)
++static void sun3_irq_enable(struct irq_data *data)
+ {
+-        *sun3_intreg &= ~(1 << irq);
++    sun3_enable_irq(data->irq);
++};
+ 
+-	__m68k_handle_int(irq, fp);
+-}
++static void sun3_irq_disable(struct irq_data *data)
++{
++    sun3_disable_irq(data->irq);
++};
+ 
+-static struct irq_controller sun3_irq_controller = {
++static struct irq_chip sun3_irq_chip = {
+ 	.name		= "sun3",
+-	.lock		= __SPIN_LOCK_UNLOCKED(sun3_irq_controller.lock),
+-	.startup	= m68k_irq_startup,
+-	.shutdown	= m68k_irq_shutdown,
+-	.enable		= sun3_enable_irq,
+-	.disable	= sun3_disable_irq,
++	.irq_startup	= m68k_irq_startup,
++	.irq_shutdown	= m68k_irq_shutdown,
++	.irq_enable	= sun3_irq_enable,
++	.irq_disable	= sun3_irq_disable,
++	.irq_mask	= sun3_irq_disable,
++	.irq_unmask	= sun3_irq_enable,
+ };
+ 
+ void __init sun3_init_IRQ(void)
+ {
+ 	*sun3_intreg = 1;
+ 
+-	m68k_setup_auto_interrupt(sun3_inthandle);
+-	m68k_setup_irq_controller(&sun3_irq_controller, IRQ_AUTO_1, 7);
+-	m68k_setup_user_interrupt(VEC_USER, 128, NULL);
++	m68k_setup_irq_controller(&sun3_irq_chip, handle_level_irq, IRQ_AUTO_1,
++				  7);
++	m68k_setup_user_interrupt(VEC_USER, 128);
+ 
+ 	if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL))
+ 		pr_err("Couldn't register %s interrupt\n", "int5");
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index 9b4cb00..0be3186 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -286,11 +286,11 @@ CLEAN_FILES += vmlinux.32 vmlinux.64
+ archprepare:
+ ifdef CONFIG_MIPS32_N32
+ 	@echo '  Checking missing-syscalls for N32'
+-	$(Q)$(MAKE) $(build)=. missing-syscalls ccflags-y="-mabi=n32"
++	$(Q)$(MAKE) $(build)=. missing-syscalls missing_syscalls_flags="-mabi=n32"
+ endif
+ ifdef CONFIG_MIPS32_O32
+ 	@echo '  Checking missing-syscalls for O32'
+-	$(Q)$(MAKE) $(build)=. missing-syscalls ccflags-y="-mabi=32"
++	$(Q)$(MAKE) $(build)=. missing-syscalls missing_syscalls_flags="-mabi=32"
+ endif
+ 
+ install:
+diff --git a/arch/powerpc/boot/dts/charon.dts b/arch/powerpc/boot/dts/charon.dts
+new file mode 100644
+index 0000000..0e00e50
+--- /dev/null
++++ b/arch/powerpc/boot/dts/charon.dts
+@@ -0,0 +1,236 @@
++/*
++ * charon board Device Tree Source
++ *
++ * Copyright (C) 2007 Semihalf
++ * Marian Balakowicz <m8 at semihalf.com>
++ *
++ * Copyright (C) 2010 DENX Software Engineering GmbH
++ * Heiko Schocher <hs at denx.de>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ */
++
++/dts-v1/;
++
++/ {
++	model = "anon,charon";
++	compatible = "anon,charon";
++	#address-cells = <1>;
++	#size-cells = <1>;
++	interrupt-parent = <&mpc5200_pic>;
++
++	cpus {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		PowerPC,5200 at 0 {
++			device_type = "cpu";
++			reg = <0>;
++			d-cache-line-size = <32>;
++			i-cache-line-size = <32>;
++			d-cache-size = <0x4000>;	// L1, 16K
++			i-cache-size = <0x4000>;	// L1, 16K
++			timebase-frequency = <0>;	// from bootloader
++			bus-frequency = <0>;		// from bootloader
++			clock-frequency = <0>;		// from bootloader
++		};
++	};
++
++	memory {
++		device_type = "memory";
++		reg = <0x00000000 0x08000000>;	// 128MB
++	};
++
++	soc5200 at f0000000 {
++		#address-cells = <1>;
++		#size-cells = <1>;
++		compatible = "fsl,mpc5200-immr";
++		ranges = <0 0xf0000000 0x0000c000>;
++		reg = <0xf0000000 0x00000100>;
++		bus-frequency = <0>;		// from bootloader
++		system-frequency = <0>;		// from bootloader
++
++		cdm at 200 {
++			compatible = "fsl,mpc5200-cdm";
++			reg = <0x200 0x38>;
++		};
++
++		mpc5200_pic: interrupt-controller at 500 {
++			// 5200 interrupts are encoded into two levels;
++			interrupt-controller;
++			#interrupt-cells = <3>;
++			compatible = "fsl,mpc5200-pic";
++			reg = <0x500 0x80>;
++		};
++
++		timer at 600 {	// General Purpose Timer
++			compatible = "fsl,mpc5200-gpt";
++			reg = <0x600 0x10>;
++			interrupts = <1 9 0>;
++			fsl,has-wdt;
++		};
++
++		can at 900 {
++			compatible = "fsl,mpc5200-mscan";
++			interrupts = <2 17 0>;
++			reg = <0x900 0x80>;
++		};
++
++		can at 980 {
++			compatible = "fsl,mpc5200-mscan";
++			interrupts = <2 18 0>;
++			reg = <0x980 0x80>;
++		};
++
++		gpio_simple: gpio at b00 {
++			compatible = "fsl,mpc5200-gpio";
++			reg = <0xb00 0x40>;
++			interrupts = <1 7 0>;
++			gpio-controller;
++			#gpio-cells = <2>;
++		};
++
++		usb at 1000 {
++			compatible = "fsl,mpc5200-ohci","ohci-be";
++			reg = <0x1000 0xff>;
++			interrupts = <2 6 0>;
++		};
++
++		dma-controller at 1200 {
++			device_type = "dma-controller";
++			compatible = "fsl,mpc5200-bestcomm";
++			reg = <0x1200 0x80>;
++			interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
++			              3 4 0  3 5 0  3 6 0  3 7 0
++			              3 8 0  3 9 0  3 10 0  3 11 0
++			              3 12 0  3 13 0  3 14 0  3 15 0>;
++		};
++
++		xlb at 1f00 {
++			compatible = "fsl,mpc5200-xlb";
++			reg = <0x1f00 0x100>;
++		};
++
++		serial at 2000 {		// PSC1
++			compatible = "fsl,mpc5200-psc-uart";
++			reg = <0x2000 0x100>;
++			interrupts = <2 1 0>;
++		};
++
++		serial at 2400 {		// PSC3
++			compatible = "fsl,mpc5200-psc-uart";
++			reg = <0x2400 0x100>;
++			interrupts = <2 3 0>;
++		};
++
++		ethernet at 3000 {
++			compatible = "fsl,mpc5200-fec";
++			reg = <0x3000 0x400>;
++			local-mac-address = [ 00 00 00 00 00 00 ];
++			interrupts = <2 5 0>;
++			fixed-link = <1 1 100 0 0>;
++		};
++
++		mdio at 3000 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,mpc5200-mdio";
++			reg = <0x3000 0x400>;       // fec range, since we need to setup fec interrupts
++			interrupts = <2 5 0>;   // these are for "mii command finished", not link changes & co.
++		};
++
++		ata at 3a00 {
++			compatible = "fsl,mpc5200-ata";
++			reg = <0x3a00 0x100>;
++			interrupts = <2 7 0>;
++		};
++
++		i2c at 3d00 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,mpc5200-i2c","fsl-i2c";
++			reg = <0x3d00 0x40>;
++			interrupts = <2 15 0>;
++		};
++
++
++		i2c at 3d40 {
++			#address-cells = <1>;
++			#size-cells = <0>;
++			compatible = "fsl,mpc5200-i2c","fsl-i2c";
++			reg = <0x3d40 0x40>;
++			interrupts = <2 16 0>;
++
++			dtt at 28 {
++				compatible = "national,lm80";
++				reg = <0x28>;
++			};
++
++			rtc at 68 {
++				compatible = "dallas,ds1374";
++				reg = <0x68>;
++			};
++		};
++
++		sram at 8000 {
++			compatible = "fsl,mpc5200-sram";
++			reg = <0x8000 0x4000>;
++		};
++	};
++
++	localbus {
++		compatible = "fsl,mpc5200-lpb","simple-bus";
++		#address-cells = <2>;
++		#size-cells = <1>;
++		ranges = <	0 0 0xfc000000 0x02000000
++				1 0 0xe0000000 0x04000000 // CS1 range, SM501
++				3 0 0xe8000000 0x00080000>;
++
++		flash at 0,0 {
++			compatible = "cfi-flash";
++			reg = <0 0 0x02000000>;
++			bank-width = <4>;
++			device-width = <2>;
++			#size-cells = <1>;
++			#address-cells = <1>;
++		};
++
++		display at 1,0 {
++			compatible = "smi,sm501";
++			reg = <1 0x00000000 0x00800000
++			       1 0x03e00000 0x00200000>;
++			mode = "640x480-32 at 60";
++			interrupts = <1 1 3>;
++			little-endian;
++		};
++
++		mram0 at 3,0 {
++			compatible = "mtd-ram";
++			reg = <3 0x00000 0x80000>;
++			bank-width = <1>;
++		};
++	};
++
++	pci at f0000d00 {
++		#interrupt-cells = <1>;
++		#size-cells = <2>;
++		#address-cells = <3>;
++		device_type = "pci";
++		compatible = "fsl,mpc5200-pci";
++		reg = <0xf0000d00 0x100>;
++		interrupt-map-mask = <0xf800 0 0 7>;
++		interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3
++				 0xc000 0 0 2 &mpc5200_pic 0 0 3
++				 0xc000 0 0 3 &mpc5200_pic 0 0 3
++				 0xc000 0 0 4 &mpc5200_pic 0 0 3>;
++		clock-frequency = <0>; // From boot loader
++		interrupts = <2 8 0 2 9 0 2 10 0>;
++		bus-range = <0 0>;
++		ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
++			  0x02000000 0 0x90000000 0x90000000 0 0x10000000
++			  0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
++	};
++};
+diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
+index 959cd2c..716a37b 100644
+--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
++++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
+@@ -1,9 +1,10 @@
+ CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
++CONFIG_SPARSE_IRQ=y
+ CONFIG_LOG_BUF_SHIFT=14
+ CONFIG_BLK_DEV_INITRD=y
+ # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_EXPERT=y
++CONFIG_EMBEDDED=y
+ # CONFIG_SYSCTL_SYSCALL is not set
+ # CONFIG_KALLSYMS is not set
+ # CONFIG_EPOLL is not set
+@@ -17,7 +18,6 @@ CONFIG_PPC_MPC5200_SIMPLE=y
+ CONFIG_PPC_MPC5200_BUGFIX=y
+ # CONFIG_PPC_PMAC is not set
+ CONFIG_PPC_BESTCOMM=y
+-CONFIG_SPARSE_IRQ=y
+ CONFIG_PM=y
+ # CONFIG_PCI is not set
+ CONFIG_NET=y
+@@ -38,17 +38,18 @@ CONFIG_MTD=y
+ CONFIG_MTD_CONCAT=y
+ CONFIG_MTD_PARTITIONS=y
+ CONFIG_MTD_CMDLINE_PARTS=y
++CONFIG_MTD_OF_PARTS=y
+ CONFIG_MTD_CHAR=y
+ CONFIG_MTD_BLOCK=y
+ CONFIG_MTD_CFI=y
+ CONFIG_MTD_CFI_AMDSTD=y
+ CONFIG_MTD_ROM=y
+ CONFIG_MTD_PHYSMAP_OF=y
++CONFIG_MTD_PLATRAM=y
+ CONFIG_PROC_DEVICETREE=y
+ CONFIG_BLK_DEV_LOOP=y
+ CONFIG_BLK_DEV_RAM=y
+ CONFIG_BLK_DEV_RAM_SIZE=32768
+-# CONFIG_MISC_DEVICES is not set
+ CONFIG_BLK_DEV_SD=y
+ CONFIG_CHR_DEV_SG=y
+ CONFIG_ATA=y
+@@ -56,13 +57,11 @@ CONFIG_PATA_MPC52xx=y
+ CONFIG_PATA_PLATFORM=y
+ CONFIG_NETDEVICES=y
+ CONFIG_LXT_PHY=y
++CONFIG_FIXED_PHY=y
+ CONFIG_NET_ETHERNET=y
+ CONFIG_FEC_MPC52xx=y
+ # CONFIG_NETDEV_1000 is not set
+ # CONFIG_NETDEV_10000 is not set
+-# CONFIG_INPUT is not set
+-# CONFIG_SERIO is not set
+-# CONFIG_VT is not set
+ CONFIG_SERIAL_MPC52xx=y
+ CONFIG_SERIAL_MPC52xx_CONSOLE=y
+ CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
+@@ -70,7 +69,13 @@ CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
+ CONFIG_I2C=y
+ CONFIG_I2C_CHARDEV=y
+ CONFIG_I2C_MPC=y
++CONFIG_SENSORS_LM80=y
+ CONFIG_WATCHDOG=y
++CONFIG_MFD_SM501=y
++CONFIG_FB=y
++CONFIG_FB_FOREIGN_ENDIAN=y
++CONFIG_FB_SM501=y
++CONFIG_FRAMEBUFFER_CONSOLE=y
+ CONFIG_USB=y
+ CONFIG_USB_DEVICEFS=y
+ # CONFIG_USB_DEVICE_CLASS is not set
+@@ -80,10 +85,10 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
+ CONFIG_USB_STORAGE=y
+ CONFIG_RTC_CLASS=y
+ CONFIG_RTC_DRV_DS1307=y
++CONFIG_RTC_DRV_DS1374=y
+ CONFIG_EXT2_FS=y
+ CONFIG_EXT3_FS=y
+ # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+-CONFIG_INOTIFY=y
+ CONFIG_MSDOS_FS=y
+ CONFIG_VFAT_FS=y
+ CONFIG_PROC_KCORE=y
+@@ -102,7 +107,6 @@ CONFIG_DEBUG_KERNEL=y
+ CONFIG_DETECT_HUNG_TASK=y
+ # CONFIG_DEBUG_BUGVERBOSE is not set
+ CONFIG_DEBUG_INFO=y
+-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+ CONFIG_CRYPTO_ECB=y
+ CONFIG_CRYPTO_PCBC=y
+ # CONFIG_CRYPTO_ANSI_CPRNG is not set
+diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
+index 84a685a5..535711f 100644
+--- a/arch/powerpc/configs/ppc64_defconfig
++++ b/arch/powerpc/configs/ppc64_defconfig
+@@ -485,3 +485,7 @@ CONFIG_CRYPTO_TWOFISH=m
+ CONFIG_CRYPTO_LZO=m
+ # CONFIG_CRYPTO_ANSI_CPRNG is not set
+ # CONFIG_CRYPTO_HW is not set
++CONFIG_VIRTUALIZATION=y
++CONFIG_KVM_BOOK3S_64=m
++CONFIG_KVM_BOOK3S_64_HV=y
++CONFIG_VHOST_NET=m
+diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
+index 96a58b7..a72f241 100644
+--- a/arch/powerpc/configs/pseries_defconfig
++++ b/arch/powerpc/configs/pseries_defconfig
+@@ -362,3 +362,7 @@ CONFIG_CRYPTO_TWOFISH=m
+ CONFIG_CRYPTO_LZO=m
+ # CONFIG_CRYPTO_ANSI_CPRNG is not set
+ # CONFIG_CRYPTO_HW is not set
++CONFIG_VIRTUALIZATION=y
++CONFIG_KVM_BOOK3S_64=m
++CONFIG_KVM_BOOK3S_64_HV=y
++CONFIG_VHOST_NET=m
+diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
+index 24bd34c..936a904 100644
+--- a/arch/powerpc/include/asm/floppy.h
++++ b/arch/powerpc/include/asm/floppy.h
+@@ -108,10 +108,10 @@ static int fd_request_irq(void)
+ {
+ 	if (can_use_virtual_dma)
+ 		return request_irq(FLOPPY_IRQ, floppy_hardint,
+-				   IRQF_DISABLED, "floppy", NULL);
++				   0, "floppy", NULL);
+ 	else
+ 		return request_irq(FLOPPY_IRQ, floppy_interrupt,
+-				   IRQF_DISABLED, "floppy", NULL);
++				   0, "floppy", NULL);
+ }
+ 
+ static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h
+index 9cd5fc8..f77c708 100644
+--- a/arch/powerpc/include/asm/lv1call.h
++++ b/arch/powerpc/include/asm/lv1call.h
+@@ -316,7 +316,7 @@ LV1_CALL(gpu_context_free,                              1, 0, 218 )
+ LV1_CALL(gpu_context_iomap,                             5, 0, 221 )
+ LV1_CALL(gpu_context_attribute,                         6, 0, 225 )
+ LV1_CALL(gpu_context_intr,                              1, 1, 227 )
+-LV1_CALL(gpu_attribute,                                 5, 0, 228 )
++LV1_CALL(gpu_attribute,                                 3, 0, 228 )
+ LV1_CALL(get_rtc,                                       0, 2, 232 )
+ LV1_CALL(set_ppe_periodic_tracer_frequency,             1, 0, 240 )
+ LV1_CALL(start_ppe_periodic_tracer,                     5, 0, 241 )
+diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
+index bd6c401..c48de98 100644
+--- a/arch/powerpc/include/asm/xics.h
++++ b/arch/powerpc/include/asm/xics.h
+@@ -15,8 +15,8 @@
+ #define	DEFAULT_PRIORITY	5
+ 
+ /*
+- * Mark IPIs as higher priority so we can take them inside interrupts that
+- * arent marked IRQF_DISABLED
++ * Mark IPIs as higher priority so we can take them inside interrupts
++ * FIXME: still true now?
+  */
+ #define IPI_PRIORITY		4
+ 
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index a54d92f..cf9c69b 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -267,7 +267,7 @@ vsx_unavailable_pSeries_1:
+ 
+ #ifdef CONFIG_CBE_RAS
+ 	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
+-	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
++	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
+ #endif /* CONFIG_CBE_RAS */
+ 
+ 	STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
+@@ -275,7 +275,7 @@ vsx_unavailable_pSeries_1:
+ 
+ #ifdef CONFIG_CBE_RAS
+ 	STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
+-	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
++	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
+ #endif /* CONFIG_CBE_RAS */
+ 
+ 	STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
+@@ -283,7 +283,7 @@ vsx_unavailable_pSeries_1:
+ 
+ #ifdef CONFIG_CBE_RAS
+ 	STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
+-	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
++	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
+ #endif /* CONFIG_CBE_RAS */
+ 
+ 	. = 0x3000
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 25ddbfc..6df7090 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -187,7 +187,7 @@ int smp_request_message_ipi(int virq, int msg)
+ 		return 1;
+ 	}
+ #endif
+-	err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
++	err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU,
+ 			  smp_ipi_name[msg], 0);
+ 	WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
+ 		virq, smp_ipi_name[msg], err);
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index f422231..44d8829 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1263,7 +1263,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
+ 	addi	r6,r5,VCORE_NAPPING_THREADS
+ 31:	lwarx	r4,0,r6
+ 	or	r4,r4,r0
+-	popcntw	r7,r4
++	PPC_POPCNTW(r7,r4)
+ 	cmpw	r7,r8
+ 	bge	2f
+ 	stwcx.	r4,0,r6
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 16da595..2dd6bdd 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -34,6 +34,7 @@
+ #include <linux/suspend.h>
+ #include <linux/memblock.h>
+ #include <linux/hugetlb.h>
++#include <linux/slab.h>
+ 
+ #include <asm/pgalloc.h>
+ #include <asm/prom.h>
+@@ -555,3 +556,32 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ 		book3e_hugetlb_preload(vma->vm_mm, address, *ptep);
+ #endif
+ }
++
++/*
++ * System memory should not be in /proc/iomem but various tools expect it
++ * (eg kdump).
++ */
++static int add_system_ram_resources(void)
++{
++	struct memblock_region *reg;
++
++	for_each_memblock(memory, reg) {
++		struct resource *res;
++		unsigned long base = reg->base;
++		unsigned long size = reg->size;
++
++		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
++		WARN_ON(!res);
++
++		if (res) {
++			res->name = "System RAM";
++			res->start = base;
++			res->end = base + size - 1;
++			res->flags = IORESOURCE_MEM;
++			WARN_ON(request_resource(&iomem_resource, res) < 0);
++		}
++	}
++
++	return 0;
++}
++subsys_initcall(add_system_ram_resources);
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index c7dd4de..b22a83a 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -315,7 +315,10 @@ static int __init find_min_common_depth(void)
+ 	struct device_node *root;
+ 	const char *vec5;
+ 
+-	root = of_find_node_by_path("/rtas");
++	if (firmware_has_feature(FW_FEATURE_OPAL))
++		root = of_find_node_by_path("/ibm,opal");
++	else
++		root = of_find_node_by_path("/rtas");
+ 	if (!root)
+ 		root = of_find_node_by_path("/");
+ 
+@@ -344,12 +347,19 @@ static int __init find_min_common_depth(void)
+ 
+ #define VEC5_AFFINITY_BYTE	5
+ #define VEC5_AFFINITY		0x80
+-	chosen = of_find_node_by_path("/chosen");
+-	if (chosen) {
+-		vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL);
+-		if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) {
+-			dbg("Using form 1 affinity\n");
+-			form1_affinity = 1;
++
++	if (firmware_has_feature(FW_FEATURE_OPAL))
++		form1_affinity = 1;
++	else {
++		chosen = of_find_node_by_path("/chosen");
++		if (chosen) {
++			vec5 = of_get_property(chosen,
++					       "ibm,architecture-vec-5", NULL);
++			if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
++							VEC5_AFFINITY)) {
++				dbg("Using form 1 affinity\n");
++				form1_affinity = 1;
++			}
+ 		}
+ 	}
+ 
+diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
+index e36d6e2..846b789 100644
+--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
++++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
+@@ -50,6 +50,7 @@ static void __init mpc5200_simple_setup_arch(void)
+ 
+ /* list of the supported boards */
+ static const char *board[] __initdata = {
++	"anon,charon",
+ 	"intercontrol,digsy-mtc",
+ 	"manroland,mucmc52",
+ 	"manroland,uc101",
+diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
+index 232fc38..852592b 100644
+--- a/arch/powerpc/platforms/cell/beat.c
++++ b/arch/powerpc/platforms/cell/beat.c
+@@ -230,7 +230,7 @@ static int __init beat_register_event(void)
+ 		}
+ 		ev->virq = virq;
+ 
+-		rc = request_irq(virq, ev->handler, IRQF_DISABLED,
++		rc = request_irq(virq, ev->handler, 0,
+ 				      ev->typecode, NULL);
+ 		if (rc != 0) {
+ 			printk(KERN_ERR "Beat: failed to request virtual IRQ"
+diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+index ae790ac..14be2bd 100644
+--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
++++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+@@ -514,7 +514,7 @@ static __init int celleb_setup_pciex(struct device_node *node,
+ 	virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
+ 				     oirq.size);
+ 	if (request_irq(virq, pciex_handle_internal_irq,
+-			IRQF_DISABLED, "pciex", (void *)phb)) {
++			0, "pciex", (void *)phb)) {
+ 		pr_err("PCIEXC:Failed to request irq\n");
+ 		goto error;
+ 	}
+diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
+index fc46fca..592c3d5 100644
+--- a/arch/powerpc/platforms/cell/iommu.c
++++ b/arch/powerpc/platforms/cell/iommu.c
+@@ -412,8 +412,7 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
+ 			IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
+ 	BUG_ON(virq == NO_IRQ);
+ 
+-	ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED,
+-			iommu->name, iommu);
++	ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
+ 	BUG_ON(ret);
+ 
+ 	/* set the IOC segment table origin register (and turn on the iommu) */
+diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
+index 1acf360..59c1a16 100644
+--- a/arch/powerpc/platforms/cell/pmu.c
++++ b/arch/powerpc/platforms/cell/pmu.c
+@@ -392,7 +392,7 @@ static int __init cbe_init_pm_irq(void)
+ 		}
+ 
+ 		rc = request_irq(irq, cbe_pm_irq,
+-				 IRQF_DISABLED, "cbe-pmu-0", NULL);
++				 0, "cbe-pmu-0", NULL);
+ 		if (rc) {
+ 			printk("ERROR: Request for irq on node %d failed\n",
+ 			       node);
+diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
+index 3675da7..e94d3ec 100644
+--- a/arch/powerpc/platforms/cell/spu_base.c
++++ b/arch/powerpc/platforms/cell/spu_base.c
+@@ -442,8 +442,7 @@ static int spu_request_irqs(struct spu *spu)
+ 		snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
+ 			 spu->number);
+ 		ret = request_irq(spu->irqs[0], spu_irq_class_0,
+-				  IRQF_DISABLED,
+-				  spu->irq_c0, spu);
++				  0, spu->irq_c0, spu);
+ 		if (ret)
+ 			goto bail0;
+ 	}
+@@ -451,8 +450,7 @@ static int spu_request_irqs(struct spu *spu)
+ 		snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
+ 			 spu->number);
+ 		ret = request_irq(spu->irqs[1], spu_irq_class_1,
+-				  IRQF_DISABLED,
+-				  spu->irq_c1, spu);
++				  0, spu->irq_c1, spu);
+ 		if (ret)
+ 			goto bail1;
+ 	}
+@@ -460,8 +458,7 @@ static int spu_request_irqs(struct spu *spu)
+ 		snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
+ 			 spu->number);
+ 		ret = request_irq(spu->irqs[2], spu_irq_class_2,
+-				  IRQF_DISABLED,
+-				  spu->irq_c2, spu);
++				  0, spu->irq_c2, spu);
+ 		if (ret)
+ 			goto bail2;
+ 	}
+diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
+index cb40e92..901bfbd 100644
+--- a/arch/powerpc/platforms/powermac/pic.c
++++ b/arch/powerpc/platforms/powermac/pic.c
+@@ -272,7 +272,6 @@ static struct irqaction xmon_action = {
+ 
+ static struct irqaction gatwick_cascade_action = {
+ 	.handler	= gatwick_action,
+-	.flags		= IRQF_DISABLED,
+ 	.name		= "cascade",
+ };
+ 
+diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
+index 9a521dc..9b6a820 100644
+--- a/arch/powerpc/platforms/powermac/smp.c
++++ b/arch/powerpc/platforms/powermac/smp.c
+@@ -200,7 +200,7 @@ static int psurge_secondary_ipi_init(void)
+ 
+ 	if (psurge_secondary_virq)
+ 		rc = request_irq(psurge_secondary_virq, psurge_ipi_intr,
+-			IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
++			IRQF_PERCPU, "IPI", NULL);
+ 
+ 	if (rc)
+ 		pr_err("Failed to setup secondary cpu IPI\n");
+@@ -408,7 +408,7 @@ static int __init smp_psurge_kick_cpu(int nr)
+ 
+ static struct irqaction psurge_irqaction = {
+ 	.handler = psurge_ipi_intr,
+-	.flags = IRQF_DISABLED|IRQF_PERCPU,
++	.flags = IRQF_PERCPU,
+ 	.name = "primary IPI",
+ };
+ 
+diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
+index 6c4b583..3f175e8 100644
+--- a/arch/powerpc/platforms/ps3/device-init.c
++++ b/arch/powerpc/platforms/ps3/device-init.c
+@@ -825,7 +825,7 @@ static int ps3_probe_thread(void *data)
+ 
+ 	spin_lock_init(&dev.lock);
+ 
+-	res = request_irq(irq, ps3_notification_interrupt, IRQF_DISABLED,
++	res = request_irq(irq, ps3_notification_interrupt, 0,
+ 			  "ps3_notification", &dev);
+ 	if (res) {
+ 		pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__,
+diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
+index 5e304c2..ca40f6a 100644
+--- a/arch/powerpc/platforms/ps3/repository.c
++++ b/arch/powerpc/platforms/ps3/repository.c
+@@ -184,7 +184,7 @@ int ps3_repository_read_bus_type(unsigned int bus_index,
+ 	enum ps3_bus_type *bus_type)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_PME,
+ 		make_first_field("bus", bus_index),
+@@ -199,7 +199,7 @@ int ps3_repository_read_bus_num_dev(unsigned int bus_index,
+ 	unsigned int *num_dev)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_PME,
+ 		make_first_field("bus", bus_index),
+@@ -239,7 +239,7 @@ int ps3_repository_read_dev_type(unsigned int bus_index,
+ 	unsigned int dev_index, enum ps3_dev_type *dev_type)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_PME,
+ 		make_first_field("bus", bus_index),
+@@ -256,8 +256,8 @@ int ps3_repository_read_dev_intr(unsigned int bus_index,
+ 	enum ps3_interrupt_type *intr_type, unsigned int *interrupt_id)
+ {
+ 	int result;
+-	u64 v1;
+-	u64 v2;
++	u64 v1 = 0;
++	u64 v2 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_PME,
+ 		make_first_field("bus", bus_index),
+@@ -275,7 +275,7 @@ int ps3_repository_read_dev_reg_type(unsigned int bus_index,
+ 	enum ps3_reg_type *reg_type)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_PME,
+ 		make_first_field("bus", bus_index),
+@@ -615,7 +615,7 @@ int ps3_repository_read_stor_dev_num_regions(unsigned int bus_index,
+ 	unsigned int dev_index, unsigned int *num_regions)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_PME,
+ 		make_first_field("bus", bus_index),
+@@ -631,7 +631,7 @@ int ps3_repository_read_stor_dev_region_id(unsigned int bus_index,
+ 	unsigned int *region_id)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_PME,
+ 	    make_first_field("bus", bus_index),
+@@ -786,7 +786,7 @@ int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
+ int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_CURRENT,
+ 		make_first_field("bi", 0),
+@@ -805,7 +805,7 @@ int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
+ int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_CURRENT,
+ 		make_first_field("bi", 0),
+@@ -827,8 +827,8 @@ int ps3_repository_read_spu_resource_id(unsigned int res_index,
+ 	enum ps3_spu_resource_type *resource_type, unsigned int *resource_id)
+ {
+ 	int result;
+-	u64 v1;
+-	u64 v2;
++	u64 v1 = 0;
++	u64 v2 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_CURRENT,
+ 		make_first_field("bi", 0),
+@@ -854,7 +854,7 @@ static int ps3_repository_read_boot_dat_address(u64 *address)
+ int ps3_repository_read_boot_dat_size(unsigned int *size)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_CURRENT,
+ 		make_first_field("bi", 0),
+@@ -869,7 +869,7 @@ int ps3_repository_read_boot_dat_size(unsigned int *size)
+ int ps3_repository_read_vuart_av_port(unsigned int *port)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_CURRENT,
+ 		make_first_field("bi", 0),
+@@ -884,7 +884,7 @@ int ps3_repository_read_vuart_av_port(unsigned int *port)
+ int ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_CURRENT,
+ 		make_first_field("bi", 0),
+@@ -919,7 +919,7 @@ int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size)
+ int ps3_repository_read_num_be(unsigned int *num_be)
+ {
+ 	int result;
+-	u64 v1;
++	u64 v1 = 0;
+ 
+ 	result = read_node(PS3_LPAR_ID_PME,
+ 		make_first_field("ben", 0),
+diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
+index 0842c6f..8c7e852 100644
+--- a/arch/powerpc/sysdev/mpic.c
++++ b/arch/powerpc/sysdev/mpic.c
+@@ -800,8 +800,6 @@ static void mpic_end_ipi(struct irq_data *d)
+ 	 * IPIs are marked IRQ_PER_CPU. This has the side effect of
+ 	 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
+ 	 * applying to them. We EOI them late to avoid re-entering.
+-	 * We mark IPI's with IRQF_DISABLED as they must run with
+-	 * irqs disabled.
+ 	 */
+ 	mpic_eoi(mpic);
+ }
+diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c
+index d3d6ce3..0debcc3 100644
+--- a/arch/powerpc/sysdev/ppc4xx_soc.c
++++ b/arch/powerpc/sysdev/ppc4xx_soc.c
+@@ -115,7 +115,7 @@ static int __init ppc4xx_l2c_probe(void)
+ 	}
+ 
+ 	/* Install error handler */
+-	if (request_irq(irq, l2c_error_handler, IRQF_DISABLED, "L2C", 0) < 0) {
++	if (request_irq(irq, l2c_error_handler, 0, "L2C", 0) < 0) {
+ 		printk(KERN_ERR "Cannot install L2C error handler"
+ 		       ", cache is not enabled\n");
+ 		of_node_put(np);
+diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
+index 3d93a8d..63762c6 100644
+--- a/arch/powerpc/sysdev/xics/xics-common.c
++++ b/arch/powerpc/sysdev/xics/xics-common.c
+@@ -134,11 +134,10 @@ static void xics_request_ipi(void)
+ 	BUG_ON(ipi == NO_IRQ);
+ 
+ 	/*
+-	 * IPIs are marked IRQF_DISABLED as they must run with irqs
+-	 * disabled, and PERCPU.  The handler was set in map.
++	 * IPIs are marked IRQF_PERCPU. The handler was set in map.
+ 	 */
+ 	BUG_ON(request_irq(ipi, icp_ops->ipi_action,
+-			   IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL));
++			   IRQF_PERCPU, "IPI", NULL));
+ }
+ 
+ int __init xics_smp_probe(void)
+diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
+index 6260d5d..c7cb0af 100644
+--- a/arch/sparc/include/asm/unistd.h
++++ b/arch/sparc/include/asm/unistd.h
+@@ -406,8 +406,10 @@
+ #define __NR_syncfs		335
+ #define __NR_sendmmsg		336
+ #define __NR_setns		337
++#define __NR_process_vm_readv	338
++#define __NR_process_vm_writev	339
+ 
+-#define NR_syscalls		338
++#define NR_syscalls		340
+ 
+ #ifdef __32bit_syscall_numbers__
+ /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
+diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
+index 09d8ec4..63402f9 100644
+--- a/arch/sparc/kernel/systbls_32.S
++++ b/arch/sparc/kernel/systbls_32.S
+@@ -84,4 +84,4 @@ sys_call_table:
+ /*320*/	.long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
+ /*325*/	.long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
+ /*330*/	.long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
+-/*335*/	.long sys_syncfs, sys_sendmmsg, sys_setns
++/*335*/	.long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
+diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
+index edbec45..db86b1a 100644
+--- a/arch/sparc/kernel/systbls_64.S
++++ b/arch/sparc/kernel/systbls_64.S
+@@ -85,7 +85,7 @@ sys_call_table32:
+ /*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
+ 	.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
+ /*330*/	.word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
+-	.word sys_syncfs, compat_sys_sendmmsg, sys_setns
++	.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
+ 
+ #endif /* CONFIG_COMPAT */
+ 
+@@ -162,4 +162,4 @@ sys_call_table:
+ /*320*/	.word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
+ 	.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
+ /*330*/	.word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
+-	.word sys_syncfs, sys_sendmmsg, sys_setns
++	.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
+diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
+index 28071bb..4c61b52 100644
+--- a/arch/x86/platform/ce4100/ce4100.c
++++ b/arch/x86/platform/ce4100/ce4100.c
+@@ -109,7 +109,7 @@ static __init void sdv_serial_fixup(void)
+ }
+ 
+ #else
+-static inline void sdv_serial_fixup(void);
++static inline void sdv_serial_fixup(void) {};
+ #endif
+ 
+ static void __init sdv_arch_setup(void)
+diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
+index 6ed7afd..541020d 100644
+--- a/arch/x86/platform/mrst/mrst.c
++++ b/arch/x86/platform/mrst/mrst.c
+@@ -608,6 +608,7 @@ static void *msic_ocd_platform_data(void *info)
+ }
+ 
+ static const struct devs_id __initconst device_ids[] = {
++	{"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
+ 	{"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
+ 	{"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
+ 	{"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
+diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c
+index a8ac6f1..225bd0f 100644
+--- a/arch/x86/platform/mrst/vrtc.c
++++ b/arch/x86/platform/mrst/vrtc.c
+@@ -76,8 +76,8 @@ unsigned long vrtc_get_time(void)
+ 
+ 	spin_unlock_irqrestore(&rtc_lock, flags);
+ 
+-	/* vRTC YEAR reg contains the offset to 1960 */
+-	year += 1960;
++	/* vRTC YEAR reg contains the offset to 1972 */
++	year += 1972;
+ 
+ 	printk(KERN_INFO "vRTC: sec: %d min: %d hour: %d day: %d "
+ 		"mon: %d year: %d\n", sec, min, hour, mday, mon, year);
+diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
+index a816f24..a0f768c 100644
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -383,6 +383,7 @@ static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_NET
+ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ 	struct crypto_report_blkcipher rblkcipher;
+@@ -404,6 +405,12 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
++#else
++static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++	return -ENOSYS;
++}
++#endif
+ 
+ static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
+ 	__attribute__ ((unused));
+@@ -457,6 +464,7 @@ static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_NET
+ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ 	struct crypto_report_blkcipher rblkcipher;
+@@ -478,6 +486,12 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
++#else
++static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++	return -ENOSYS;
++}
++#endif
+ 
+ static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
+ 	__attribute__ ((unused));
+diff --git a/crypto/aead.c b/crypto/aead.c
+index 701556f..04add3dc 100644
+--- a/crypto/aead.c
++++ b/crypto/aead.c
+@@ -111,6 +111,7 @@ static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_NET
+ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ 	struct crypto_report_aead raead;
+@@ -132,6 +133,12 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
++#else
++static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++	return -ENOSYS;
++}
++#endif
+ 
+ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
+ 	__attribute__ ((unused));
+@@ -190,6 +197,7 @@ static int crypto_init_nivaead_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_NET
+ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ 	struct crypto_report_aead raead;
+@@ -210,6 +218,12 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
+ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
++#else
++static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++	return -ENOSYS;
++}
++#endif
+ 
+ 
+ static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index a3e6ef9..ac93c99 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -399,6 +399,7 @@ static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
+ 	return sizeof(struct crypto_shash *);
+ }
+ 
++#ifdef CONFIG_NET
+ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ 	struct crypto_report_hash rhash;
+@@ -416,6 +417,12 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
+ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
++#else
++static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++	return -ENOSYS;
++}
++#endif
+ 
+ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
+ 	__attribute__ ((unused));
+diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
+index 2572d26..1e61d1a 100644
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -494,6 +494,7 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+ 		return crypto_init_blkcipher_ops_async(tfm);
+ }
+ 
++#ifdef CONFIG_NET
+ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ 	struct crypto_report_blkcipher rblkcipher;
+@@ -515,6 +516,12 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
++#else
++static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++	return -ENOSYS;
++}
++#endif
+ 
+ static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
+ 	__attribute__ ((unused));
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index 2abca78..0605a2b 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -44,9 +44,6 @@ static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
+ 
+ 	down_read(&crypto_alg_sem);
+ 
+-	if (list_empty(&crypto_alg_list))
+-		return NULL;
+-
+ 	list_for_each_entry(q, &crypto_alg_list, cra_list) {
+ 		int match = 0;
+ 
+diff --git a/crypto/pcompress.c b/crypto/pcompress.c
+index fefda78..2e458e5 100644
+--- a/crypto/pcompress.c
++++ b/crypto/pcompress.c
+@@ -48,6 +48,7 @@ static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_NET
+ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ 	struct crypto_report_comp rpcomp;
+@@ -62,6 +63,12 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
++#else
++static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++	return -ENOSYS;
++}
++#endif
+ 
+ static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
+ 	__attribute__ ((unused));
+diff --git a/crypto/rng.c b/crypto/rng.c
+index feb7de0..64f864f 100644
+--- a/crypto/rng.c
++++ b/crypto/rng.c
+@@ -60,6 +60,7 @@ static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_NET
+ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ 	struct crypto_report_rng rrng;
+@@ -76,6 +77,12 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
+ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
++#else
++static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++	return -ENOSYS;
++}
++#endif
+ 
+ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
+ 	__attribute__ ((unused));
+diff --git a/crypto/shash.c b/crypto/shash.c
+index ea8a9c6..9100912 100644
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -524,6 +524,7 @@ static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
+ 	return alg->cra_ctxsize;
+ }
+ 
++#ifdef CONFIG_NET
+ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ 	struct crypto_report_hash rhash;
+@@ -541,6 +542,12 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
+ nla_put_failure:
+ 	return -EMSGSIZE;
+ }
++#else
++static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
++{
++	return -ENOSYS;
++}
++#endif
+ 
+ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
+ 	__attribute__ ((unused));
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 73b2909..0e8e2de 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -224,7 +224,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
+ /*
+  * Suspend / resume control
+  */
+-static int acpi_idle_suspend;
+ static u32 saved_bm_rld;
+ 
+ static void acpi_idle_bm_rld_save(void)
+@@ -243,21 +242,13 @@ static void acpi_idle_bm_rld_restore(void)
+ 
+ int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
+ {
+-	if (acpi_idle_suspend == 1)
+-		return 0;
+-
+ 	acpi_idle_bm_rld_save();
+-	acpi_idle_suspend = 1;
+ 	return 0;
+ }
+ 
+ int acpi_processor_resume(struct acpi_device * device)
+ {
+-	if (acpi_idle_suspend == 0)
+-		return 0;
+-
+ 	acpi_idle_bm_rld_restore();
+-	acpi_idle_suspend = 0;
+ 	return 0;
+ }
+ 
+@@ -763,13 +754,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
+ 
+ 	local_irq_disable();
+ 
+-	/* Do not access any ACPI IO ports in suspend path */
+-	if (acpi_idle_suspend) {
+-		local_irq_enable();
+-		cpu_relax();
+-		return -EINVAL;
+-	}
+-
+ 	lapic_timer_state_broadcast(pr, cx, 1);
+ 	kt1 = ktime_get_real();
+ 	acpi_idle_do_entry(cx);
+@@ -810,13 +794,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
+ 
+ 	local_irq_disable();
+ 
+-	if (acpi_idle_suspend) {
+-		local_irq_enable();
+-		cpu_relax();
+-		return -EINVAL;
+-	}
+-
+-
+ 	if (cx->entry_method != ACPI_CSTATE_FFH) {
+ 		current_thread_info()->status &= ~TS_POLLING;
+ 		/*
+@@ -895,12 +872,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
+ 	if (unlikely(!pr))
+ 		return -EINVAL;
+ 
+-
+-	if (acpi_idle_suspend) {
+-		cpu_relax();
+-		return -EINVAL;
+-	}
+-
+ 	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
+ 		if (drv->safe_state_index >= 0) {
+ 			return drv->states[drv->safe_state_index].enter(dev,
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index fb7b90b..cf26222 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -390,6 +390,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	/* Promise */
+ 	{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },	/* PDC42819 */
+ 
++	/* Asmedia */
++	{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },	/* ASM1061 */
++
+ 	/* Generic, PCI class code for AHCI */
+ 	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
+diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
+index 004f2ce..ec55595 100644
+--- a/drivers/ata/ahci_platform.c
++++ b/drivers/ata/ahci_platform.c
+@@ -65,7 +65,7 @@ static struct scsi_host_template ahci_platform_sht = {
+ static int __init ahci_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+-	struct ahci_platform_data *pdata = dev->platform_data;
++	struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ 	const struct platform_device_id *id = platform_get_device_id(pdev);
+ 	struct ata_port_info pi = ahci_port_info[id->driver_data];
+ 	const struct ata_port_info *ppi[] = { &pi, NULL };
+@@ -191,7 +191,7 @@ err0:
+ static int __devexit ahci_remove(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+-	struct ahci_platform_data *pdata = dev->platform_data;
++	struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ 	struct ata_host *host = dev_get_drvdata(dev);
+ 
+ 	ata_host_detach(host);
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index f22957c..a9b2820 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2883,7 +2883,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
+ 	    sata_scr_read(link, SCR_STATUS, &sstatus))
+ 		rc = -ERESTART;
+ 
+-	if (rc == -ERESTART || try >= max_tries) {
++	if (try >= max_tries) {
+ 		/*
+ 		 * Thaw host port even if reset failed, so that the port
+ 		 * can be retried on the next phy event.  This risks
+@@ -2909,6 +2909,16 @@ int ata_eh_reset(struct ata_link *link, int classify,
+ 		ata_eh_acquire(ap);
+ 	}
+ 
++	/*
++	 * While disks spinup behind PMP, some controllers fail sending SRST.
++	 * They need to be reset - as well as the PMP - before retrying.
++	 */
++	if (rc == -ERESTART) {
++		if (ata_is_host_link(link))
++			ata_eh_thaw_port(ap);
++		goto out;
++	}
++
+ 	if (try == max_tries - 1) {
+ 		sata_down_spd_limit(link, 0);
+ 		if (slave)
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index 104462d..21b80c5 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -389,12 +389,9 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ 			/* link reports offline after LPM */
+ 			link->flags |= ATA_LFLAG_NO_LPM;
+ 
+-			/* Class code report is unreliable and SRST
+-			 * times out under certain configurations.
+-			 */
++			/* Class code report is unreliable. */
+ 			if (link->pmp < 5)
+-				link->flags |= ATA_LFLAG_NO_SRST |
+-					       ATA_LFLAG_ASSUME_ATA;
++				link->flags |= ATA_LFLAG_ASSUME_ATA;
+ 
+ 			/* port 5 is for SEMB device and it doesn't like SRST */
+ 			if (link->pmp == 5)
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 72a9770..2a5412e 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1217,6 +1217,10 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
+ 
+ /**
+  *	__ata_change_queue_depth - helper for ata_scsi_change_queue_depth
++ *	@ap: ATA port to which the device change the queue depth
++ *	@sdev: SCSI device to configure queue depth for
++ *	@queue_depth: new queue depth
++ *	@reason: calling context
+  *
+  *	libsas and libata have different approaches for associating a sdev to
+  *	its ata_port.
+diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
+index a72ab0d..2a472c5 100644
+--- a/drivers/ata/pata_of_platform.c
++++ b/drivers/ata/pata_of_platform.c
+@@ -52,7 +52,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
+ 	}
+ 
+ 	ret = of_irq_to_resource(dn, 0, &irq_res);
+-	if (ret == NO_IRQ)
++	if (!ret)
+ 		irq_res.start = irq_res.end = 0;
+ 	else
+ 		irq_res.flags = 0;
+diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
+index 447d9c0..95ec435 100644
+--- a/drivers/ata/sata_sis.c
++++ b/drivers/ata/sata_sis.c
+@@ -104,7 +104,7 @@ static const struct ata_port_info sis_port_info = {
+ };
+ 
+ MODULE_AUTHOR("Uwe Koziolek");
+-MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
++MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller");
+ MODULE_LICENSE("GPL");
+ MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
+ MODULE_VERSION(DRV_VERSION);
+diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
+index 434a6c0..95706fa 100644
+--- a/drivers/base/power/opp.c
++++ b/drivers/base/power/opp.c
+@@ -669,7 +669,7 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+ 	struct device_opp *dev_opp = find_device_opp(dev);
+ 
+ 	if (IS_ERR(dev_opp))
+-		return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */
++		return ERR_CAST(dev_opp); /* matching type */
+ 
+ 	return &dev_opp->head;
+ }
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index 66cd0b8..c92424c 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -1186,10 +1186,11 @@ static void gen6_cleanup(void)
+ /* Certain Gen5 chipsets require require idling the GPU before
+  * unmapping anything from the GTT when VT-d is enabled.
+  */
+-extern int intel_iommu_gfx_mapped;
+ static inline int needs_idle_maps(void)
+ {
++#ifdef CONFIG_INTEL_IOMMU
+ 	const unsigned short gpu_devid = intel_private.pcidev->device;
++	extern int intel_iommu_gfx_mapped;
+ 
+ 	/* Query intel_iommu to see if we need the workaround. Presumably that
+ 	 * was loaded first.
+@@ -1198,7 +1199,7 @@ static inline int needs_idle_maps(void)
+ 	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ 	     intel_iommu_gfx_mapped)
+ 		return 1;
+-
++#endif
+ 	return 0;
+ }
+ 
+@@ -1236,7 +1237,7 @@ static int i9xx_setup(void)
+ 		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+ 	}
+ 
+-	if (needs_idle_maps());
++	if (needs_idle_maps())
+ 		intel_private.base.do_idle_maps = 1;
+ 
+ 	intel_i9xx_setup_flush();
+diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c
+index edaa987..f500201 100644
+--- a/drivers/cpufreq/db8500-cpufreq.c
++++ b/drivers/cpufreq/db8500-cpufreq.c
+@@ -109,7 +109,7 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
+ 
+ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
+ {
+-	int res;
++	int i, res;
+ 
+ 	BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table));
+ 
+@@ -120,8 +120,8 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
+ 			freq_table[3].frequency = 1000000;
+ 	}
+ 	pr_info("db8500-cpufreq : Available frequencies:\n");
+-	while (freq_table[i].frequency != CPUFREQ_TABLE_END)
+-		pr_info("  %d Mhz\n", freq_table[i++].frequency/1000);
++	for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
++		pr_info("  %d Mhz\n", freq_table[i].frequency/1000);
+ 
+ 	/* get policy fields based on the table */
+ 	res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 785127c..1368826 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -9,7 +9,6 @@ menuconfig DRM
+ 	depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+ 	select I2C
+ 	select I2C_ALGOBIT
+-	select SLOW_WORK
+ 	help
+ 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
+ 	  introduced in XFree86 4.0. If you say Y here, you need to select
+@@ -96,6 +95,7 @@ config DRM_I915
+ 	select FB_CFB_IMAGEBLIT
+ 	# i915 depends on ACPI_VIDEO when ACPI is enabled
+ 	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
++	select BACKLIGHT_LCD_SUPPORT if ACPI
+ 	select BACKLIGHT_CLASS_DEVICE if ACPI
+ 	select VIDEO_OUTPUT_CONTROL if ACPI
+ 	select INPUT if ACPI
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 9a2e2a1..405c63b 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -2118,8 +2118,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ 	property->num_values = num_values;
+ 	INIT_LIST_HEAD(&property->enum_blob_list);
+ 
+-	if (name)
++	if (name) {
+ 		strncpy(property->name, name, DRM_PROP_NAME_LEN);
++		property->name[DRM_PROP_NAME_LEN-1] = '\0';
++	}
+ 
+ 	list_add_tail(&property->head, &dev->mode_config.property_list);
+ 	return property;
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index 2957636..3969f75 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -484,6 +484,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
+ 	struct drm_connector *save_connectors, *connector;
+ 	int count = 0, ro, fail = 0;
+ 	struct drm_crtc_helper_funcs *crtc_funcs;
++	struct drm_mode_set save_set;
+ 	int ret = 0;
+ 	int i;
+ 
+@@ -556,6 +557,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
+ 		save_connectors[count++] = *connector;
+ 	}
+ 
++	save_set.crtc = set->crtc;
++	save_set.mode = &set->crtc->mode;
++	save_set.x = set->crtc->x;
++	save_set.y = set->crtc->y;
++	save_set.fb = set->crtc->fb;
++
+ 	/* We should be able to check here if the fb has the same properties
+ 	 * and then just flip_or_move it */
+ 	if (set->crtc->fb != set->fb) {
+@@ -721,6 +728,12 @@ fail:
+ 		*connector = save_connectors[count++];
+ 	}
+ 
++	/* Try to restore the config */
++	if (mode_changed &&
++	    !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
++				      save_set.y, save_set.fb))
++		DRM_ERROR("failed to restore config after modeset failure\n");
++
+ 	kfree(save_connectors);
+ 	kfree(save_encoders);
+ 	kfree(save_crtcs);
+diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
+index d067c12..1c7a1c0 100644
+--- a/drivers/gpu/drm/drm_debugfs.c
++++ b/drivers/gpu/drm/drm_debugfs.c
+@@ -118,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
+ 		tmp->minor = minor;
+ 		tmp->dent = ent;
+ 		tmp->info_ent = &files[i];
+-		list_add(&(tmp->list), &(minor->debugfs_nodes.list));
++
++		mutex_lock(&minor->debugfs_lock);
++		list_add(&tmp->list, &minor->debugfs_list);
++		mutex_unlock(&minor->debugfs_lock);
+ 	}
+ 	return 0;
+ 
+@@ -146,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
+ 	char name[64];
+ 	int ret;
+ 
+-	INIT_LIST_HEAD(&minor->debugfs_nodes.list);
++	INIT_LIST_HEAD(&minor->debugfs_list);
++	mutex_init(&minor->debugfs_lock);
+ 	sprintf(name, "%d", minor_id);
+ 	minor->debugfs_root = debugfs_create_dir(name, root);
+ 	if (!minor->debugfs_root) {
+@@ -192,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+ 	struct drm_info_node *tmp;
+ 	int i;
+ 
++	mutex_lock(&minor->debugfs_lock);
+ 	for (i = 0; i < count; i++) {
+-		list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
++		list_for_each_safe(pos, q, &minor->debugfs_list) {
+ 			tmp = list_entry(pos, struct drm_info_node, list);
+ 			if (tmp->info_ent == &files[i]) {
+ 				debugfs_remove(tmp->dent);
+@@ -202,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
+ 			}
+ 		}
+ 	}
++	mutex_unlock(&minor->debugfs_lock);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(drm_debugfs_remove_files);
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index fc81af9..40c187c 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -125,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
+ 	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 
+-	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
++	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+ 
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+ 
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index cb3794a..68b7562 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -407,13 +407,16 @@ int drm_irq_uninstall(struct drm_device *dev)
+ 	/*
+ 	 * Wake up any waiters so they don't hang.
+ 	 */
+-	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+-	for (i = 0; i < dev->num_crtcs; i++) {
+-		DRM_WAKEUP(&dev->vbl_queue[i]);
+-		dev->vblank_enabled[i] = 0;
+-		dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i);
++	if (dev->num_crtcs) {
++		spin_lock_irqsave(&dev->vbl_lock, irqflags);
++		for (i = 0; i < dev->num_crtcs; i++) {
++			DRM_WAKEUP(&dev->vbl_queue[i]);
++			dev->vblank_enabled[i] = 0;
++			dev->last_vblank[i] =
++				dev->driver->get_vblank_counter(dev, i);
++		}
++		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ 	}
+-	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ 
+ 	if (!irq_enabled)
+ 		return -EINVAL;
+@@ -1125,6 +1128,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+ 		trace_drm_vblank_event_delivered(current->pid, pipe,
+ 						 vblwait->request.sequence);
+ 	} else {
++		/* drm_handle_vblank_events will call drm_vblank_put */
+ 		list_add_tail(&e->base.link, &dev->vblank_event_list);
+ 		vblwait->reply.sequence = vblwait->request.sequence;
+ 	}
+@@ -1205,8 +1209,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
+ 		goto done;
+ 	}
+ 
+-	if (flags & _DRM_VBLANK_EVENT)
++	if (flags & _DRM_VBLANK_EVENT) {
++		/* must hold on to the vblank ref until the event fires
++		 * drm_vblank_put will be called asynchronously
++		 */
+ 		return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
++	}
+ 
+ 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ 	    (seq - vblwait->request.sequence) <= (1<<23)) {
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index d14b44e..4f40f1c 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -1506,7 +1506,10 @@ drm_add_fake_info_node(struct drm_minor *minor,
+ 	node->minor = minor;
+ 	node->dent = ent;
+ 	node->info_ent = (void *) key;
+-	list_add(&node->list, &minor->debugfs_nodes.list);
++
++	mutex_lock(&minor->debugfs_lock);
++	list_add(&node->list, &minor->debugfs_list);
++	mutex_unlock(&minor->debugfs_lock);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index cc531bb..e9c2cfe 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -789,8 +789,8 @@ static struct vm_operations_struct i915_gem_vm_ops = {
+ };
+ 
+ static struct drm_driver driver = {
+-	/* don't use mtrr's here, the Xserver or user space app should
+-	 * deal with them for intel hardware.
++	/* Don't use MTRRs here; the Xserver or userspace app should
++	 * deal with them for Intel hardware.
+ 	 */
+ 	.driver_features =
+ 	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 6651c36..d18b07a 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1396,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
+ 
+ 	if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+ 		ret = -E2BIG;
+-		goto unlock;
++		goto out;
+ 	}
+ 
+ 	if (obj->madv != I915_MADV_WILLNEED) {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index 032a820..5fc201b 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -640,10 +640,9 @@ static int
+ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	uint32_t reg0 = nv_rd32(dev, reg + 0);
+-	uint32_t reg1 = nv_rd32(dev, reg + 4);
+ 	struct nouveau_pll_vals pll;
+ 	struct pll_lims pll_limits;
++	u32 ctrl, mask, coef;
+ 	int ret;
+ 
+ 	ret = get_pll_limits(dev, reg, &pll_limits);
+@@ -654,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
+ 	if (!clk)
+ 		return -ERANGE;
+ 
+-	reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
+-	reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
+-
+-	if (dev_priv->vbios.execute) {
+-		still_alive();
+-		nv_wr32(dev, reg + 4, reg1);
+-		nv_wr32(dev, reg + 0, reg0);
++	coef = pll.N1 << 8 | pll.M1;
++	ctrl = pll.log2P << 16;
++	mask = 0x00070000;
++	if (reg == 0x004008) {
++		mask |= 0x01f80000;
++		ctrl |= (pll_limits.log2p_bias << 19);
++		ctrl |= (pll.log2P << 22);
+ 	}
+ 
++	if (!dev_priv->vbios.execute)
++		return 0;
++
++	nv_mask(dev, reg + 0, mask, ctrl);
++	nv_wr32(dev, reg + 4, coef);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 7226f41..7cc37e6 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -148,7 +148,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
+ 
+ 	if (dev_priv->card_type == NV_10 &&
+ 	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+-	    nvbo->bo.mem.num_pages < vram_pages / 2) {
++	    nvbo->bo.mem.num_pages < vram_pages / 4) {
+ 		/*
+ 		 * Make sure that the color and depth buffers are handled
+ 		 * by independent memory controller units. Up to a 9x
+diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
+index a319d56..bb6ec9e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
++++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
+@@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
+ 	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+ 	INIT_LIST_HEAD(&chan->nvsw.flip);
+ 	INIT_LIST_HEAD(&chan->fence.pending);
++	spin_lock_init(&chan->fence.lock);
+ 
+ 	/* setup channel's memory and vm */
+ 	ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index e0d275e..cea6696 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -710,7 +710,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
+ 	case OUTPUT_DP:
+ 		max_clock  = nv_encoder->dp.link_nr;
+ 		max_clock *= nv_encoder->dp.link_bw;
+-		clock = clock * nouveau_connector_bpp(connector) / 8;
++		clock = clock * nouveau_connector_bpp(connector) / 10;
+ 		break;
+ 	default:
+ 		BUG_ON(1);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index 14a8627..3a4cc32 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -487,6 +487,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_fbdev *nfbdev;
++	int preferred_bpp;
+ 	int ret;
+ 
+ 	nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+@@ -505,7 +506,15 @@ int nouveau_fbcon_init(struct drm_device *dev)
+ 	}
+ 
+ 	drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
+-	drm_fb_helper_initial_config(&nfbdev->helper, 32);
++
++	if (dev_priv->vram_size <= 32 * 1024 * 1024)
++		preferred_bpp = 8;
++	else if (dev_priv->vram_size <= 64 * 1024 * 1024)
++		preferred_bpp = 16;
++	else
++		preferred_bpp = 32;
++
++	drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
+index 81116cf..2f6daae 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
+@@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
+ 			return ret;
+ 	}
+ 
+-	INIT_LIST_HEAD(&chan->fence.pending);
+-	spin_lock_init(&chan->fence.lock);
+ 	atomic_set(&chan->fence.last_sequence_irq, 0);
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
+index c6143df..d39b220 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
++++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
+@@ -333,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
+ 
+ 	NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
+ 
+-	for (i = 0; info[i].addr; i++) {
++	for (i = 0; i2c && info[i].addr; i++) {
+ 		if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
+ 		    (!match || match(i2c, &info[i]))) {
+ 			NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
+index 9f178aa..33d03fb 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
++++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
+@@ -239,7 +239,7 @@ nouveau_perf_init(struct drm_device *dev)
+ 	if(version == 0x15) {
+ 		memtimings->timing =
+ 				kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+-		if(!memtimings) {
++		if (!memtimings->timing) {
+ 			NV_WARN(dev,"Could not allocate memtiming table\n");
+ 			return;
+ 		}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
+index 82478e0..d8831ab 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_state.c
++++ b/drivers/gpu/drm/nouveau/nouveau_state.c
+@@ -579,6 +579,14 @@ nouveau_card_init(struct drm_device *dev)
+ 	if (ret)
+ 		goto out_display_early;
+ 
++	/* workaround an odd issue on nvc1 by disabling the device's
++	 * nosnoop capability.  hopefully won't cause issues until a
++	 * better fix is found - assuming there is one...
++	 */
++	if (dev_priv->chipset == 0xc1) {
++		nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
++	}
++
+ 	nouveau_pm_init(dev);
+ 
+ 	ret = engine->vram.init(dev);
+@@ -1102,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
+ 	dev_priv->noaccel = !!nouveau_noaccel;
+ 	if (nouveau_noaccel == -1) {
+ 		switch (dev_priv->chipset) {
+-		case 0xc1: /* known broken */
+-		case 0xc8: /* never tested */
++#if 0
++		case 0xXX: /* known broken */
+ 			NV_INFO(dev, "acceleration disabled by default, pass "
+ 				     "noaccel=0 to force enable\n");
+ 			dev_priv->noaccel = true;
+ 			break;
++#endif
+ 		default:
+ 			dev_priv->noaccel = false;
+ 			break;
+diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
+index bbc0b9c..e676b0d 100644
+--- a/drivers/gpu/drm/nouveau/nv40_pm.c
++++ b/drivers/gpu/drm/nouveau/nv40_pm.c
+@@ -57,12 +57,14 @@ read_pll_2(struct drm_device *dev, u32 reg)
+ 	int P = (ctrl & 0x00070000) >> 16;
+ 	u32 ref = 27000, clk = 0;
+ 
+-	if (ctrl & 0x80000000)
++	if ((ctrl & 0x80000000) && M1) {
+ 		clk = ref * N1 / M1;
+-
+-	if (!(ctrl & 0x00000100)) {
+-		if (ctrl & 0x40000000)
+-			clk = clk * N2 / M2;
++		if ((ctrl & 0x40000100) == 0x40000000) {
++			if (M2)
++				clk = clk * N2 / M2;
++			else
++				clk = 0;
++		}
+ 	}
+ 
+ 	return clk >> P;
+@@ -177,6 +179,11 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+ 	}
+ 
+ 	/* memory clock */
++	if (!perflvl->memory) {
++		info->mpll_ctrl = 0x00000000;
++		goto out;
++	}
++
+ 	ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
+ 			    &N1, &M1, &N2, &M2, &log2P);
+ 	if (ret < 0)
+@@ -264,6 +271,9 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
+ 	mdelay(5);
+ 	nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
+ 
++	if (!info->mpll_ctrl)
++		goto resume;
++
+ 	/* wait for vblank start on active crtcs, disable memory access */
+ 	for (i = 0; i < 2; i++) {
+ 		if (!(crtc_mask & (1 << i)))
+diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
+index 8c979b3..ac601f7 100644
+--- a/drivers/gpu/drm/nouveau/nv50_graph.c
++++ b/drivers/gpu/drm/nouveau/nv50_graph.c
+@@ -131,8 +131,8 @@ nv50_graph_init(struct drm_device *dev, int engine)
+ 	NV_DEBUG(dev, "\n");
+ 
+ 	/* master reset */
+-	nv_mask(dev, 0x000200, 0x00200100, 0x00000000);
+-	nv_mask(dev, 0x000200, 0x00200100, 0x00200100);
++	nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
++	nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
+ 	nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
+ 
+ 	/* reset/enable traps and interrupts */
+diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
+index d05c2c3..4b46d69 100644
+--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
++++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
+@@ -601,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
+ 					gr_def(ctx, offset + 0x1c, 0x00880000);
+ 					break;
+ 				case 0x86:
+-					gr_def(ctx, offset + 0x1c, 0x008c0000);
++					gr_def(ctx, offset + 0x1c, 0x018c0000);
+ 					break;
+ 				case 0x92:
+ 				case 0x96:
+diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
+index 9da2383..2e45e57 100644
+--- a/drivers/gpu/drm/nouveau/nv50_vram.c
++++ b/drivers/gpu/drm/nouveau/nv50_vram.c
+@@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev)
+ 	colbits  =  (r4 & 0x0000f000) >> 12;
+ 	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ 	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+-	banks    = ((r4 & 0x01000000) ? 8 : 4);
++	banks    = 1 << (((r4 & 0x03000000) >> 24) + 2);
+ 
+ 	rowsize = parts * banks * (1 << colbits) * 8;
+ 	predicted = rowsize << rowbitsa;
+diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
+index bbdbc51..a74e501 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
++++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
+@@ -157,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+ 	struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ 	struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ 	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	int i = 0, gpc, tp, ret;
+-	u32 magic;
+ 
+ 	ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
+ 				 &grch->unk408004);
+@@ -207,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+ 	nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
+ 	nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+ 
+-	magic = 0x02180000;
+-	nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+-	nv_wo32(grch->mmio, i++ * 4, magic);
+-	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+-		for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) {
+-			u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
+-			nv_wo32(grch->mmio, i++ * 4, reg);
+-			nv_wo32(grch->mmio, i++ * 4, magic);
++	if (dev_priv->chipset != 0xc1) {
++		u32 magic = 0x02180000;
++		nv_wo32(grch->mmio, i++ * 4, 0x00405830);
++		nv_wo32(grch->mmio, i++ * 4, magic);
++		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
++			for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
++				u32 reg = TP_UNIT(gpc, tp, 0x520);
++				nv_wo32(grch->mmio, i++ * 4, reg);
++				nv_wo32(grch->mmio, i++ * 4, magic);
++				magic += 0x0324;
++			}
++		}
++	} else {
++		u32 magic = 0x02180000;
++		nv_wo32(grch->mmio, i++ * 4, 0x00405830);
++		nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
++		nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
++		nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
++		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
++			for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
++				u32 reg = TP_UNIT(gpc, tp, 0x520);
++				nv_wo32(grch->mmio, i++ * 4, reg);
++				nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
++				magic += 0x0324;
++			}
++			for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
++				u32 reg = TP_UNIT(gpc, tp, 0x544);
++				nv_wo32(grch->mmio, i++ * 4, reg);
++				nv_wo32(grch->mmio, i++ * 4, magic);
++				magic += 0x0324;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
+index dd0e6a7..96b0b93 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
++++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
+@@ -1812,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
+ 		/* calculate first set of magics */
+ 		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ 
++		gpc = -1;
+ 		for (tp = 0; tp < priv->tp_total; tp++) {
+ 			do {
+ 				gpc = (gpc + 1) % priv->gpc_nr;
+@@ -1861,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
+ 
+ 	if (1) {
+ 		u32 tp_mask = 0, tp_set = 0;
+-		u8  tpnr[GPC_MAX];
++		u8  tpnr[GPC_MAX], a, b;
+ 
+ 		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ 		for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ 			tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
+ 
+-		gpc = -1;
+-		for (i = 0, gpc = -1; i < 32; i++) {
+-			int ltp = i * (priv->tp_total - 1) / 32;
+-
+-			do {
+-				gpc = (gpc + 1) % priv->gpc_nr;
+-			} while (!tpnr[gpc]);
+-			tp = priv->tp_nr[gpc] - tpnr[gpc]--;
++		for (i = 0, gpc = -1, b = -1; i < 32; i++) {
++			a = (i * (priv->tp_total - 1)) / 32;
++			if (a != b) {
++				b = a;
++				do {
++					gpc = (gpc + 1) % priv->gpc_nr;
++				} while (!tpnr[gpc]);
++				tp = priv->tp_nr[gpc] - tpnr[gpc]--;
+ 
+-			tp_set |= 1 << ((gpc * 8) + tp);
++				tp_set |= 1 << ((gpc * 8) + tp);
++			}
+ 
+-			do {
+-				nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
+-				tp_set ^= tp_mask;
+-				nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
+-				tp_set ^= tp_mask;
+-			} while (ltp == (++i * (priv->tp_total - 1) / 32));
+-			i--;
++			nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
++			nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
+index edbfe93..ce984d5 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
++++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
+@@ -43,7 +43,7 @@ static const u8 types[256] = {
+ 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 	0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+ 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
+-	3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
++	3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
+ 	3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
+ 	3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
+ 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
+@@ -110,22 +110,26 @@ nvc0_vram_init(struct drm_device *dev)
+ 	u32 bsize = nv_rd32(dev, 0x10f20c);
+ 	u32 offset, length;
+ 	bool uniform = true;
+-	int ret, i;
++	int ret, part;
+ 
+ 	NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
+ 	NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize);
+ 
+ 	/* read amount of vram attached to each memory controller */
+-	for (i = 0; i < parts; i++) {
+-		u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000));
++	part = 0;
++	while (parts) {
++		u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000));
++		if (psize == 0)
++			continue;
++		parts--;
++
+ 		if (psize != bsize) {
+ 			if (psize < bsize)
+ 				bsize = psize;
+ 			uniform = false;
+ 		}
+ 
+-		NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize);
+-
++		NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
+ 		dev_priv->vram_size += (u64)psize << 20;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 87921c8..87631fe 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1522,12 +1522,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+ 				     struct drm_display_mode *mode,
+ 				     struct drm_display_mode *adjusted_mode)
+ {
+-	struct drm_device *dev = crtc->dev;
+-	struct radeon_device *rdev = dev->dev_private;
+-
+-	/* adjust pm to upcoming mode change */
+-	radeon_pm_compute_clocks(rdev);
+-
+ 	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ 		return false;
+ 	return true;
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index a0de485..6fb335a 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ 		}
+ 	}
+ 
+-	DRM_ERROR("aux i2c too many retries, giving up\n");
++	DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
+ 	return -EREMOTEIO;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index e4c384b..1d603a3 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -157,6 +157,57 @@ int sumo_get_temp(struct radeon_device *rdev)
+ 	return actual_temp * 1000;
+ }
+ 
++void sumo_pm_init_profile(struct radeon_device *rdev)
++{
++	int idx;
++
++	/* default */
++	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
++	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
++	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
++	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
++
++	/* low,mid sh/mh */
++	if (rdev->flags & RADEON_IS_MOBILITY)
++		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
++	else
++		idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
++
++	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
++	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
++
++	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
++	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
++
++	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
++	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
++
++	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
++	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
++
++	/* high sh/mh */
++	idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
++	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
++	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
++		rdev->pm.power_state[idx].num_clock_modes - 1;
++
++	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
++	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
++	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
++		rdev->pm.power_state[idx].num_clock_modes - 1;
++}
++
+ void evergreen_pm_misc(struct radeon_device *rdev)
+ {
+ 	int req_ps_idx = rdev->pm.requested_power_state_index;
+@@ -1219,7 +1270,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
+ 		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ 			rdev->mc.vram_end >> 12);
+ 	}
+-	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
++	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+ 	if (rdev->flags & RADEON_IS_IGP) {
+ 		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+ 		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 19afc43..9cdda0b 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -288,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
+ 		  pcie_lanes);
+ }
+ 
+-static int r600_pm_get_type_index(struct radeon_device *rdev,
+-				  enum radeon_pm_state_type ps_type,
+-				  int instance)
+-{
+-	int i;
+-	int found_instance = -1;
+-
+-	for (i = 0; i < rdev->pm.num_power_states; i++) {
+-		if (rdev->pm.power_state[i].type == ps_type) {
+-			found_instance++;
+-			if (found_instance == instance)
+-				return i;
+-		}
+-	}
+-	/* return default if no match */
+-	return rdev->pm.default_power_state_index;
+-}
+-
+ void rs780_pm_init_profile(struct radeon_device *rdev)
+ {
+ 	if (rdev->pm.num_power_states == 2) {
+@@ -421,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
+ 
+ void r600_pm_init_profile(struct radeon_device *rdev)
+ {
++	int idx;
++
+ 	if (rdev->family == CHIP_R600) {
+ 		/* XXX */
+ 		/* default */
+@@ -502,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev)
+ 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
+ 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
+ 			/* low sh */
+-			if (rdev->flags & RADEON_IS_MOBILITY) {
+-				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+-				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+-				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+-				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+-			} else {
+-				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+-				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+-				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
+-				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+-			}
++			if (rdev->flags & RADEON_IS_MOBILITY)
++				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
++			else
++				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
++			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
++			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
++			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
++			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
+ 			/* mid sh */
+-			if (rdev->flags & RADEON_IS_MOBILITY) {
+-				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+-				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
+-				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+-				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+-			} else {
+-				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+-				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+-				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
+-				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+-			}
++			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
++			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
++			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
++			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
+ 			/* high sh */
+-			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
+-				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
+-			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
+-				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
++			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
++			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
++			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
+ 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
+ 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
+ 			/* low mh */
+-			if (rdev->flags & RADEON_IS_MOBILITY) {
+-				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+-				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+-				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+-				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+-			} else {
+-				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+-				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+-				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
+-				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+-			}
++			if (rdev->flags & RADEON_IS_MOBILITY)
++				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
++			else
++				idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
++			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
++			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
++			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
++			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
+ 			/* mid mh */
+-			if (rdev->flags & RADEON_IS_MOBILITY) {
+-				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+-				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
+-				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+-				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+-			} else {
+-				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+-				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
+-					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+-				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
+-				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+-			}
++			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
++			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
++			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
++			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
+ 			/* high mh */
+-			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
+-				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
+-			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
+-				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
++			idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
++			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
++			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
+ 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
+ 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
+ 		}
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index b316b30..fc5a1d6 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -784,8 +784,7 @@ struct radeon_pm_clock_info {
+ 
+ struct radeon_power_state {
+ 	enum radeon_pm_state_type type;
+-	/* XXX: use a define for num clock modes */
+-	struct radeon_pm_clock_info clock_info[8];
++	struct radeon_pm_clock_info *clock_info;
+ 	/* number of valid clock modes in this power state */
+ 	int num_clock_modes;
+ 	struct radeon_pm_clock_info *default_clock_mode;
+@@ -855,6 +854,9 @@ struct radeon_pm {
+ 	struct device	        *int_hwmon_dev;
+ };
+ 
++int radeon_pm_get_type_index(struct radeon_device *rdev,
++			     enum radeon_pm_state_type ps_type,
++			     int instance);
+ 
+ /*
+  * Benchmarking
+@@ -1142,6 +1144,48 @@ struct r600_vram_scratch {
+ 	u64				gpu_addr;
+ };
+ 
++
++/*
++ * Mutex which allows recursive locking from the same process.
++ */
++struct radeon_mutex {
++	struct mutex		mutex;
++	struct task_struct	*owner;
++	int			level;
++};
++
++static inline void radeon_mutex_init(struct radeon_mutex *mutex)
++{
++	mutex_init(&mutex->mutex);
++	mutex->owner = NULL;
++	mutex->level = 0;
++}
++
++static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
++{
++	if (mutex_trylock(&mutex->mutex)) {
++		/* The mutex was unlocked before, so it's ours now */
++		mutex->owner = current;
++	} else if (mutex->owner != current) {
++		/* Another process locked the mutex, take it */
++		mutex_lock(&mutex->mutex);
++		mutex->owner = current;
++	}
++	/* Otherwise the mutex was already locked by this process */
++
++	mutex->level++;
++}
++
++static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
++{
++	if (--mutex->level > 0)
++		return;
++
++	mutex->owner = NULL;
++	mutex_unlock(&mutex->mutex);
++}
++
++
+ /*
+  * Core structure, functions and helpers.
+  */
+@@ -1197,7 +1241,7 @@ struct radeon_device {
+ 	struct radeon_gem		gem;
+ 	struct radeon_pm		pm;
+ 	uint32_t			bios_scratch[RADEON_BIOS_NUM_SCRATCH];
+-	struct mutex			cs_mutex;
++	struct radeon_mutex		cs_mutex;
+ 	struct radeon_wb		wb;
+ 	struct radeon_dummy_page	dummy_page;
+ 	bool				gpu_lockup;
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
+index e294456..a2e1eae 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.c
++++ b/drivers/gpu/drm/radeon/radeon_asic.c
+@@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = {
+ 	.pm_misc = &evergreen_pm_misc,
+ 	.pm_prepare = &evergreen_pm_prepare,
+ 	.pm_finish = &evergreen_pm_finish,
+-	.pm_init_profile = &rs780_pm_init_profile,
++	.pm_init_profile = &sumo_pm_init_profile,
+ 	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ 	.pre_page_flip = &evergreen_pre_page_flip,
+ 	.page_flip = &evergreen_page_flip,
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 85f14f0..5991484 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+ extern void evergreen_pm_misc(struct radeon_device *rdev);
+ extern void evergreen_pm_prepare(struct radeon_device *rdev);
+ extern void evergreen_pm_finish(struct radeon_device *rdev);
++extern void sumo_pm_init_profile(struct radeon_device *rdev);
+ extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+ extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 08d0b94..d2d1792 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -1999,6 +1999,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+ 		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ 		switch (frev) {
+ 		case 1:
++			rdev->pm.power_state[state_index].clock_info =
++				kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
++			if (!rdev->pm.power_state[state_index].clock_info)
++				return state_index;
+ 			rdev->pm.power_state[state_index].num_clock_modes = 1;
+ 			rdev->pm.power_state[state_index].clock_info[0].mclk =
+ 				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+@@ -2035,6 +2039,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+ 			state_index++;
+ 			break;
+ 		case 2:
++			rdev->pm.power_state[state_index].clock_info =
++				kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
++			if (!rdev->pm.power_state[state_index].clock_info)
++				return state_index;
+ 			rdev->pm.power_state[state_index].num_clock_modes = 1;
+ 			rdev->pm.power_state[state_index].clock_info[0].mclk =
+ 				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+@@ -2072,6 +2080,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+ 			state_index++;
+ 			break;
+ 		case 3:
++			rdev->pm.power_state[state_index].clock_info =
++				kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
++			if (!rdev->pm.power_state[state_index].clock_info)
++				return state_index;
+ 			rdev->pm.power_state[state_index].num_clock_modes = 1;
+ 			rdev->pm.power_state[state_index].clock_info[0].mclk =
+ 				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+@@ -2257,7 +2269,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
+ 		rdev->pm.default_power_state_index = state_index;
+ 		rdev->pm.power_state[state_index].default_clock_mode =
+ 			&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+-		if (ASIC_IS_DCE5(rdev)) {
++		if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
+ 			/* NI chips post without MC ucode, so default clocks are strobe mode only */
+ 			rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
+ 			rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
+@@ -2377,17 +2389,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+ 			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+ 			 (power_state->v1.ucNonClockStateIndex *
+ 			  power_info->pplib.ucNonClockSize));
+-		for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+-			clock_info = (union pplib_clock_info *)
+-				(mode_info->atom_context->bios + data_offset +
+-				 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+-				 (power_state->v1.ucClockStateIndices[j] *
+-				  power_info->pplib.ucClockInfoSize));
+-			valid = radeon_atombios_parse_pplib_clock_info(rdev,
+-								       state_index, mode_index,
+-								       clock_info);
+-			if (valid)
+-				mode_index++;
++		rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
++							     ((power_info->pplib.ucStateEntrySize - 1) ?
++							      (power_info->pplib.ucStateEntrySize - 1) : 1),
++							     GFP_KERNEL);
++		if (!rdev->pm.power_state[i].clock_info)
++			return state_index;
++		if (power_info->pplib.ucStateEntrySize - 1) {
++			for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
++				clock_info = (union pplib_clock_info *)
++					(mode_info->atom_context->bios + data_offset +
++					 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
++					 (power_state->v1.ucClockStateIndices[j] *
++					  power_info->pplib.ucClockInfoSize));
++				valid = radeon_atombios_parse_pplib_clock_info(rdev,
++									       state_index, mode_index,
++									       clock_info);
++				if (valid)
++					mode_index++;
++			}
++		} else {
++			rdev->pm.power_state[state_index].clock_info[0].mclk =
++				rdev->clock.default_mclk;
++			rdev->pm.power_state[state_index].clock_info[0].sclk =
++				rdev->clock.default_sclk;
++			mode_index++;
+ 		}
+ 		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ 		if (mode_index) {
+@@ -2456,18 +2482,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ 		non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+ 		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ 			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+-		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+-			clock_array_index = power_state->v2.clockInfoIndex[j];
+-			/* XXX this might be an inagua bug... */
+-			if (clock_array_index >= clock_info_array->ucNumEntries)
+-				continue;
+-			clock_info = (union pplib_clock_info *)
+-				&clock_info_array->clockInfo[clock_array_index];
+-			valid = radeon_atombios_parse_pplib_clock_info(rdev,
+-								       state_index, mode_index,
+-								       clock_info);
+-			if (valid)
+-				mode_index++;
++		rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
++							     (power_state->v2.ucNumDPMLevels ?
++							      power_state->v2.ucNumDPMLevels : 1),
++							     GFP_KERNEL);
++		if (!rdev->pm.power_state[i].clock_info)
++			return state_index;
++		if (power_state->v2.ucNumDPMLevels) {
++			for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
++				clock_array_index = power_state->v2.clockInfoIndex[j];
++				/* XXX this might be an inagua bug... */
++				if (clock_array_index >= clock_info_array->ucNumEntries)
++					continue;
++				clock_info = (union pplib_clock_info *)
++					&clock_info_array->clockInfo[clock_array_index];
++				valid = radeon_atombios_parse_pplib_clock_info(rdev,
++									       state_index, mode_index,
++									       clock_info);
++				if (valid)
++					mode_index++;
++			}
++		} else {
++			rdev->pm.power_state[state_index].clock_info[0].mclk =
++				rdev->clock.default_mclk;
++			rdev->pm.power_state[state_index].clock_info[0].sclk =
++				rdev->clock.default_sclk;
++			mode_index++;
+ 		}
+ 		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ 		if (mode_index) {
+@@ -2524,19 +2564,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+ 	} else {
+ 		rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+ 		if (rdev->pm.power_state) {
+-			/* add the default mode */
+-			rdev->pm.power_state[state_index].type =
+-				POWER_STATE_TYPE_DEFAULT;
+-			rdev->pm.power_state[state_index].num_clock_modes = 1;
+-			rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+-			rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+-			rdev->pm.power_state[state_index].default_clock_mode =
+-				&rdev->pm.power_state[state_index].clock_info[0];
+-			rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+-			rdev->pm.power_state[state_index].pcie_lanes = 16;
+-			rdev->pm.default_power_state_index = state_index;
+-			rdev->pm.power_state[state_index].flags = 0;
+-			state_index++;
++			rdev->pm.power_state[0].clock_info =
++				kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
++			if (rdev->pm.power_state[0].clock_info) {
++				/* add the default mode */
++				rdev->pm.power_state[state_index].type =
++					POWER_STATE_TYPE_DEFAULT;
++				rdev->pm.power_state[state_index].num_clock_modes = 1;
++				rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
++				rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
++				rdev->pm.power_state[state_index].default_clock_mode =
++					&rdev->pm.power_state[state_index].clock_info[0];
++				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
++				rdev->pm.power_state[state_index].pcie_lanes = 16;
++				rdev->pm.default_power_state_index = state_index;
++				rdev->pm.power_state[state_index].flags = 0;
++				state_index++;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
+index 5cafc90..17e1a9b 100644
+--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
++++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
+@@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
+ 	struct radeon_bo *sobj = NULL;
+ 	uint64_t saddr, daddr;
+ 	int r, n;
+-	unsigned int time;
++	int time;
+ 
+ 	n = RADEON_BENCHMARK_ITERATIONS;
+ 	r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 8bf83c4..81fc100 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
+ 
+ 	/* allocate 2 power states */
+ 	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
+-	if (!rdev->pm.power_state) {
+-		rdev->pm.default_power_state_index = state_index;
+-		rdev->pm.num_power_states = 0;
+-
+-		rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+-		rdev->pm.current_clock_mode_index = 0;
+-		return;
+-	}
++	if (rdev->pm.power_state) {
++		/* allocate 1 clock mode per state */
++		rdev->pm.power_state[0].clock_info =
++			kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
++		rdev->pm.power_state[1].clock_info =
++			kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
++		if (!rdev->pm.power_state[0].clock_info ||
++		    !rdev->pm.power_state[1].clock_info)
++			goto pm_failed;
++	} else
++		goto pm_failed;
+ 
+ 	/* check for a thermal chip */
+ 	offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
+@@ -2735,6 +2738,14 @@ default_mode:
+ 
+ 	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+ 	rdev->pm.current_clock_mode_index = 0;
++	return;
++
++pm_failed:
++	rdev->pm.default_power_state_index = state_index;
++	rdev->pm.num_power_states = 0;
++
++	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
++	rdev->pm.current_clock_mode_index = 0;
+ }
+ 
+ void radeon_external_tmds_setup(struct drm_encoder *encoder)
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index fae00c0..ccaa243 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -222,7 +222,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 	struct radeon_cs_chunk *ib_chunk;
+ 	int r;
+ 
+-	mutex_lock(&rdev->cs_mutex);
++	radeon_mutex_lock(&rdev->cs_mutex);
+ 	/* initialize parser */
+ 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
+ 	parser.filp = filp;
+@@ -233,14 +233,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 	if (r) {
+ 		DRM_ERROR("Failed to initialize parser !\n");
+ 		radeon_cs_parser_fini(&parser, r);
+-		mutex_unlock(&rdev->cs_mutex);
++		radeon_mutex_unlock(&rdev->cs_mutex);
+ 		return r;
+ 	}
+ 	r =  radeon_ib_get(rdev, &parser.ib);
+ 	if (r) {
+ 		DRM_ERROR("Failed to get ib !\n");
+ 		radeon_cs_parser_fini(&parser, r);
+-		mutex_unlock(&rdev->cs_mutex);
++		radeon_mutex_unlock(&rdev->cs_mutex);
+ 		return r;
+ 	}
+ 	r = radeon_cs_parser_relocs(&parser);
+@@ -248,7 +248,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		if (r != -ERESTARTSYS)
+ 			DRM_ERROR("Failed to parse relocation %d!\n", r);
+ 		radeon_cs_parser_fini(&parser, r);
+-		mutex_unlock(&rdev->cs_mutex);
++		radeon_mutex_unlock(&rdev->cs_mutex);
+ 		return r;
+ 	}
+ 	/* Copy the packet into the IB, the parser will read from the
+@@ -260,14 +260,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 	if (r || parser.parser_error) {
+ 		DRM_ERROR("Invalid command stream !\n");
+ 		radeon_cs_parser_fini(&parser, r);
+-		mutex_unlock(&rdev->cs_mutex);
++		radeon_mutex_unlock(&rdev->cs_mutex);
+ 		return r;
+ 	}
+ 	r = radeon_cs_finish_pages(&parser);
+ 	if (r) {
+ 		DRM_ERROR("Invalid command stream !\n");
+ 		radeon_cs_parser_fini(&parser, r);
+-		mutex_unlock(&rdev->cs_mutex);
++		radeon_mutex_unlock(&rdev->cs_mutex);
+ 		return r;
+ 	}
+ 	r = radeon_ib_schedule(rdev, parser.ib);
+@@ -275,7 +275,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		DRM_ERROR("Failed to schedule IB !\n");
+ 	}
+ 	radeon_cs_parser_fini(&parser, r);
+-	mutex_unlock(&rdev->cs_mutex);
++	radeon_mutex_unlock(&rdev->cs_mutex);
+ 	return r;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index c33bc91..c4d00a1 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev,
+ 
+ 	/* mutex initialization are all done here so we
+ 	 * can recall function without having locking issues */
+-	mutex_init(&rdev->cs_mutex);
++	radeon_mutex_init(&rdev->cs_mutex);
+ 	mutex_init(&rdev->ib_pool.mutex);
+ 	mutex_init(&rdev->cp.mutex);
+ 	mutex_init(&rdev->dc_hw_i2c_mutex);
+@@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev)
+ 	int r;
+ 	int resched;
+ 
++	/* Prevent CS ioctl from interfering */
++	radeon_mutex_lock(&rdev->cs_mutex);
++
+ 	radeon_save_bios_scratch_regs(rdev);
+ 	/* block TTM */
+ 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+@@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev)
+ 		radeon_restore_bios_scratch_regs(rdev);
+ 		drm_helper_resume_force_mode(rdev->ddev);
+ 		ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+-		return 0;
+ 	}
+-	/* bad news, how to tell it to userspace ? */
+-	dev_info(rdev->dev, "GPU reset failed\n");
++
++	radeon_mutex_unlock(&rdev->cs_mutex);
++
++	if (r) {
++		/* bad news, how to tell it to userspace ? */
++		dev_info(rdev->dev, "GPU reset failed\n");
++	}
++
+ 	return r;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+index 41a5d48..daadf21 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+@@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
+ 				   struct drm_display_mode *mode,
+ 				   struct drm_display_mode *adjusted_mode)
+ {
+-	struct drm_device *dev = crtc->dev;
+-	struct radeon_device *rdev = dev->dev_private;
+-
+-	/* adjust pm to upcoming mode change */
+-	radeon_pm_compute_clocks(rdev);
+-
+ 	if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ 		return false;
+ 	return true;
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 6fabe89..78a665b 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev);
+ 
+ #define ACPI_AC_CLASS           "ac_adapter"
+ 
++int radeon_pm_get_type_index(struct radeon_device *rdev,
++			     enum radeon_pm_state_type ps_type,
++			     int instance)
++{
++	int i;
++	int found_instance = -1;
++
++	for (i = 0; i < rdev->pm.num_power_states; i++) {
++		if (rdev->pm.power_state[i].type == ps_type) {
++			found_instance++;
++			if (found_instance == instance)
++				return i;
++		}
++	}
++	/* return default if no match */
++	return rdev->pm.default_power_state_index;
++}
++
+ #ifdef CONFIG_ACPI
+ static int radeon_acpi_event(struct notifier_block *nb,
+ 			     unsigned long val,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 03daefa..880e285 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -105,6 +105,10 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ 	struct vmw_dma_buffer *dmabuf = NULL;
+ 	int ret;
+ 
++	/* A lot of the code assumes this */
++	if (handle && (width != 64 || height != 64))
++		return -EINVAL;
++
+ 	if (handle) {
+ 		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+ 						     handle, &surface);
+@@ -410,8 +414,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
+ 	top = clips->y1;
+ 	bottom = clips->y2;
+ 
+-	clips_ptr = clips;
+-	for (i = 1; i < num_clips; i++, clips_ptr += inc) {
++	/* skip the first clip rect */
++	for (i = 1, clips_ptr = clips + inc;
++	     i < num_clips; i++, clips_ptr += inc) {
+ 		left = min_t(int, left, (int)clips_ptr->x1);
+ 		right = max_t(int, right, (int)clips_ptr->x2);
+ 		top = min_t(int, top, (int)clips_ptr->y1);
+@@ -1323,7 +1328,10 @@ int vmw_kms_close(struct vmw_private *dev_priv)
+ 	 * drm_encoder_cleanup which takes the lock we deadlock.
+ 	 */
+ 	drm_mode_config_cleanup(dev_priv->dev);
+-	vmw_kms_close_legacy_display_system(dev_priv);
++	if (dev_priv->sou_priv)
++		vmw_kms_close_screen_object_display(dev_priv);
++	else
++		vmw_kms_close_legacy_display_system(dev_priv);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
+index 143461a..86980fe 100644
+--- a/drivers/hwspinlock/u8500_hsem.c
++++ b/drivers/hwspinlock/u8500_hsem.c
+@@ -21,6 +21,7 @@
+  * General Public License for more details.
+  */
+ 
++#include <linux/module.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+ #include <linux/pm_runtime.h>
+@@ -108,10 +109,8 @@ static int __devinit u8500_hsem_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 
+ 	io_base = ioremap(res->start, resource_size(res));
+-	if (!io_base) {
+-		ret = -ENOMEM;
+-		goto free_state;
+-	}
++	if (!io_base)
++		return -ENOMEM;
+ 
+ 	/* make sure protocol 1 is selected */
+ 	val = readl(io_base + HSEM_CTRL_REG);
+diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
+index 04b0956..8126824 100644
+--- a/drivers/ide/ide-cd.c
++++ b/drivers/ide/ide-cd.c
+@@ -43,7 +43,6 @@
+ /* For SCSI -> ATAPI command conversion */
+ #include <scsi/scsi.h>
+ 
+-#include <linux/irq.h>
+ #include <linux/io.h>
+ #include <asm/byteorder.h>
+ #include <linux/uaccess.h>
+diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
+index 61fdf54..3d42043 100644
+--- a/drivers/ide/ide-floppy.c
++++ b/drivers/ide/ide-floppy.c
+@@ -35,7 +35,6 @@
+ #include <scsi/scsi_ioctl.h>
+ 
+ #include <asm/byteorder.h>
+-#include <linux/irq.h>
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
+ #include <asm/unaligned.h>
+diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
+index 7ecb1ad..ce8237d 100644
+--- a/drivers/ide/ide-tape.c
++++ b/drivers/ide/ide-tape.c
+@@ -41,7 +41,6 @@
+ #include <scsi/scsi.h>
+ 
+ #include <asm/byteorder.h>
+-#include <linux/irq.h>
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
+ #include <asm/unaligned.h>
+diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
+index 817f37a..c9570fc 100644
+--- a/drivers/macintosh/via-macii.c
++++ b/drivers/macintosh/via-macii.c
+@@ -159,7 +159,7 @@ int macii_init(void)
+ 	err = macii_init_via();
+ 	if (err) goto out;
+ 
+-	err = request_irq(IRQ_MAC_ADB, macii_interrupt, IRQ_FLG_LOCK, "ADB",
++	err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
+ 			  macii_interrupt);
+ 	if (err) goto out;
+ 
+diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c
+index 9ab5b0c..34d02a9 100644
+--- a/drivers/macintosh/via-maciisi.c
++++ b/drivers/macintosh/via-maciisi.c
+@@ -122,8 +122,8 @@ maciisi_init(void)
+ 		return err;
+ 	}
+ 
+-	if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, IRQ_FLG_LOCK | IRQ_FLG_FAST, 
+-			"ADB", maciisi_interrupt)) {
++	if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, 0, "ADB",
++			maciisi_interrupt)) {
+ 		printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB);
+ 		return -EAGAIN;
+ 	}
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 472aedf..297e260 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3110,7 +3110,7 @@ static void handle_stripe(struct stripe_head *sh)
+ 	struct r5dev *pdev, *qdev;
+ 
+ 	clear_bit(STRIPE_HANDLE, &sh->state);
+-	if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) {
++	if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
+ 		/* already being handled, ensure it gets handled
+ 		 * again when current action finishes */
+ 		set_bit(STRIPE_HANDLE, &sh->state);
+@@ -3159,10 +3159,14 @@ static void handle_stripe(struct stripe_head *sh)
+ 	/* check if the array has lost more than max_degraded devices and,
+ 	 * if so, some requests might need to be failed.
+ 	 */
+-	if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written)
+-		handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
+-	if (s.failed > conf->max_degraded && s.syncing)
+-		handle_failed_sync(conf, sh, &s);
++	if (s.failed > conf->max_degraded) {
++		sh->check_state = 0;
++		sh->reconstruct_state = 0;
++		if (s.to_read+s.to_write+s.written)
++			handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
++		if (s.syncing)
++			handle_failed_sync(conf, sh, &s);
++	}
+ 
+ 	/*
+ 	 * might be able to return some write requests if the parity blocks
+@@ -3371,7 +3375,7 @@ finish:
+ 
+ 	return_io(s.return_bi);
+ 
+-	clear_bit(STRIPE_ACTIVE, &sh->state);
++	clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
+ }
+ 
+ static void raid5_activate_delayed(struct r5conf *conf)
+diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
+index 2e8c288..3443455 100644
+--- a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
++++ b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c
+@@ -398,7 +398,6 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state,
+ 	u8 i2c_r_data[24];
+ 	u8 i = 0;
+ 	u8 fifo_status = 0;
+-	int ret;
+ 	int status = 0;
+ 
+ 	mxl_i2c("read %d bytes", count);
+@@ -418,7 +417,7 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state,
+ 		i2c_w_data[4+(i*3)] = 0x00;
+ 	}
+ 
+-	ret = mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data);
++	mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data);
+ 
+ 	/* Check for I2C NACK status */
+ 	if (mxl111sf_i2c_check_status(state) == 1) {
+diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
+index 91dc1fc..b741b3a 100644
+--- a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
++++ b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c
+@@ -296,8 +296,7 @@ int mxl111sf_config_spi(struct mxl111sf_state *state, int onoff)
+ 		goto fail;
+ 
+ 	ret = mxl111sf_write_reg(state, 0x00, 0x00);
+-	if (mxl_fail(ret))
+-		goto fail;
++	mxl_fail(ret);
+ fail:
+ 	return ret;
+ }
+@@ -328,11 +327,13 @@ int mxl111sf_idac_config(struct mxl111sf_state *state,
+ 		/* set hysteresis value  reg: 0x0B<5:0> */
+ 		ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG,
+ 					 (hysteresis_value & 0x3F));
++		mxl_fail(ret);
+ 	}
+ 
+ 	ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val);
++	mxl_fail(ret);
+ 
+-	return val;
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/media/video/s5k6aa.c b/drivers/media/video/s5k6aa.c
+index 2446736..0df7f2a 100644
+--- a/drivers/media/video/s5k6aa.c
++++ b/drivers/media/video/s5k6aa.c
+@@ -19,6 +19,7 @@
+ #include <linux/gpio.h>
+ #include <linux/i2c.h>
+ #include <linux/media.h>
++#include <linux/module.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
+ 
+diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+index 725634d..844a4d7 100644
+--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
++++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+@@ -220,8 +220,8 @@ static int vidioc_querycap(struct file *file, void *priv,
+ 	strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
+ 	cap->bus_info[0] = 0;
+ 	cap->version = KERNEL_VERSION(1, 0, 0);
+-	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+-						    | V4L2_CAP_STREAMING;
++	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
++			V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+index ecef127..1e8cdb7 100644
+--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
++++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+@@ -785,8 +785,8 @@ static int vidioc_querycap(struct file *file, void *priv,
+ 	strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
+ 	cap->bus_info[0] = 0;
+ 	cap->version = KERNEL_VERSION(1, 0, 0);
+-	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
+-			  | V4L2_CAP_VIDEO_OUTPUT
++	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE
++			  | V4L2_CAP_VIDEO_OUTPUT_MPLANE
+ 			  | V4L2_CAP_STREAMING;
+ 	return 0;
+ }
+diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
+index 10c2364..254d326 100644
+--- a/drivers/media/video/uvc/uvc_ctrl.c
++++ b/drivers/media/video/uvc/uvc_ctrl.c
+@@ -1016,7 +1016,8 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
+ 
+ 	menu_info = &mapping->menu_info[query_menu->index];
+ 
+-	if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) {
++	if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
++	    (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
+ 		s32 bitmap;
+ 
+ 		if (!ctrl->cached) {
+@@ -1225,7 +1226,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
+ 		/* Valid menu indices are reported by the GET_RES request for
+ 		 * UVC controls that support it.
+ 		 */
+-		if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) {
++		if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
++		    (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
+ 			if (!ctrl->cached) {
+ 				ret = uvc_ctrl_populate_cache(chain, ctrl);
+ 				if (ret < 0)
+diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
+index f17f92b..0f415da 100644
+--- a/drivers/media/video/v4l2-ctrls.c
++++ b/drivers/media/video/v4l2-ctrls.c
+@@ -821,8 +821,8 @@ static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
+ 	fill_event(&ev, ctrl, changes);
+ 
+ 	list_for_each_entry(sev, &ctrl->ev_subs, node)
+-		if (sev->fh && (sev->fh != fh ||
+-				(sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)))
++		if (sev->fh != fh ||
++		    (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
+ 			v4l2_event_queue_fh(sev->fh, &ev);
+ }
+ 
+@@ -947,6 +947,7 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
+ 			if (ctrl->cluster[0]->has_volatiles)
+ 				ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ 		}
++		fh = NULL;
+ 	}
+ 	if (changed || update_inactive) {
+ 		/* If a control was changed that was not one of the controls
+diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
+index 46037f22..c26ad96 100644
+--- a/drivers/media/video/v4l2-event.c
++++ b/drivers/media/video/v4l2-event.c
+@@ -216,6 +216,9 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
+ 	unsigned long flags;
+ 	unsigned i;
+ 
++	if (sub->type == V4L2_EVENT_ALL)
++		return -EINVAL;
++
+ 	if (elems < 1)
+ 		elems = 1;
+ 	if (sub->type == V4L2_EVENT_CTRL) {
+@@ -283,6 +286,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ {
+ 	struct v4l2_subscribed_event *sev;
+ 	unsigned long flags;
++	int i;
+ 
+ 	if (sub->type == V4L2_EVENT_ALL) {
+ 		v4l2_event_unsubscribe_all(fh);
+@@ -293,8 +297,12 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ 
+ 	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
+ 	if (sev != NULL) {
++		/* Remove any pending events for this subscription */
++		for (i = 0; i < sev->in_use; i++) {
++			list_del(&sev->events[sev_pos(sev, i)].list);
++			fh->navailable--;
++		}
+ 		list_del(&sev->list);
+-		sev->fh = NULL;
+ 	}
+ 
+ 	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
+index 979e544..95a3f5e 100644
+--- a/drivers/media/video/videobuf2-core.c
++++ b/drivers/media/video/videobuf2-core.c
+@@ -131,6 +131,7 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
+ 			continue;
+ 
+ 		for (plane = 0; plane < vb->num_planes; ++plane) {
++			vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+ 			vb->v4l2_planes[plane].m.mem_offset = off;
+ 
+ 			dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
+@@ -264,6 +265,7 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+ 	q->num_buffers -= buffers;
+ 	if (!q->num_buffers)
+ 		q->memory = 0;
++	INIT_LIST_HEAD(&q->queued_list);
+ }
+ 
+ /**
+@@ -296,14 +298,14 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
+ {
+ 	unsigned int plane;
+ 	for (plane = 0; plane < vb->num_planes; ++plane) {
++		void *mem_priv = vb->planes[plane].mem_priv;
+ 		/*
+ 		 * If num_users() has not been provided, call_memop
+ 		 * will return 0, apparently nobody cares about this
+ 		 * case anyway. If num_users() returns more than 1,
+ 		 * we are not the only user of the plane's memory.
+ 		 */
+-		if (call_memop(q, plane, num_users,
+-				vb->planes[plane].mem_priv) > 1)
++		if (mem_priv && call_memop(q, plane, num_users, mem_priv) > 1)
+ 			return true;
+ 	}
+ 	return false;
+diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
+index 4175544..ec10629 100644
+--- a/drivers/mfd/ab5500-core.c
++++ b/drivers/mfd/ab5500-core.c
+@@ -13,6 +13,7 @@
+  * TODO: Event handling with irq_chip. Waiting for PRCMU fw support.
+  */
+ 
++#include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/err.h>
+ #include <linux/platform_device.h>
+diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
+index 6be1fe6..43c0ebb 100644
+--- a/drivers/mfd/ab5500-debugfs.c
++++ b/drivers/mfd/ab5500-debugfs.c
+@@ -4,6 +4,7 @@
+  * Debugfs support for the AB5500 MFD driver
+  */
+ 
++#include <linux/export.h>
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+ #include <linux/mfd/ab5500/ab5500.h>
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index ae57769..4b976f0 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -32,6 +32,7 @@
+ /* VENDOR SPEC register */
+ #define SDHCI_VENDOR_SPEC		0xC0
+ #define  SDHCI_VENDOR_SPEC_SDIO_QUIRK	0x00000002
++#define SDHCI_WTMK_LVL			0x44
+ #define SDHCI_MIX_CTRL			0x48
+ 
+ /*
+@@ -476,6 +477,13 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
+ 	if (is_imx53_esdhc(imx_data))
+ 		imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
+ 
++	/*
++	 * The imx6q ROM code will change the default watermark level setting
++	 * to something insane.  Change it back here.
++	 */
++	if (is_imx6q_usdhc(imx_data))
++		writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL);
++
+ 	boarddata = &imx_data->boarddata;
+ 	if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
+ 		if (!host->mmc->parent->platform_data) {
+diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
+index 608967f..736ca10 100644
+--- a/drivers/mtd/maps/bcm963xx-flash.c
++++ b/drivers/mtd/maps/bcm963xx-flash.c
+@@ -21,6 +21,7 @@
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
++#include <linux/module.h>
+ #include <linux/mtd/map.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index f4e3d82..7f43cf8 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -83,8 +83,10 @@ config DELL_LAPTOP
+ 	depends on EXPERIMENTAL
+ 	depends on BACKLIGHT_CLASS_DEVICE
+ 	depends on RFKILL || RFKILL = n
+-	depends on POWER_SUPPLY
+ 	depends on SERIO_I8042
++	select POWER_SUPPLY
++	select LEDS_CLASS
++	select NEW_LEDS
+ 	default n
+ 	---help---
+ 	This driver adds support for rfkill and backlight control to Dell
+diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
+index d9fb729..fb73008 100644
+--- a/drivers/ps3/ps3-vuart.c
++++ b/drivers/ps3/ps3-vuart.c
+@@ -952,7 +952,7 @@ static int ps3_vuart_bus_interrupt_get(void)
+ 	}
+ 
+ 	result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
+-		IRQF_DISABLED, "vuart", &vuart_bus_priv);
++		0, "vuart", &vuart_bus_priv);
+ 
+ 	if (result) {
+ 		pr_debug("%s:%d: request_irq failed (%d)\n",
+diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
+index cc328de..8c3f5ad 100644
+--- a/drivers/ps3/ps3stor_lib.c
++++ b/drivers/ps3/ps3stor_lib.c
+@@ -167,7 +167,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
+ 		goto fail_close_device;
+ 	}
+ 
+-	error = request_irq(dev->irq, handler, IRQF_DISABLED,
++	error = request_irq(dev->irq, handler, 0,
+ 			    dev->sbd.core.driver->name, dev);
+ 	if (error) {
+ 		dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n",
+diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
+index d335448..bb21f44 100644
+--- a/drivers/rtc/rtc-mrst.c
++++ b/drivers/rtc/rtc-mrst.c
+@@ -76,12 +76,15 @@ static inline unsigned char vrtc_is_updating(void)
+ /*
+  * rtc_time's year contains the increment over 1900, but vRTC's YEAR
+  * register can't be programmed to value larger than 0x64, so vRTC
+- * driver chose to use 1960 (1970 is UNIX time start point) as the base,
++ * driver chose to use 1972 (1970 is UNIX time start point) as the base,
+  * and does the translation at read/write time.
+  *
+- * Why not just use 1970 as the offset? it's because using 1960 will
++ * Why not just use 1970 as the offset? it's because using 1972 will
+  * make it consistent in leap year setting for both vrtc and low-level
+- * physical rtc devices.
++ * physical rtc devices. Then why not use 1960 as the offset? If we use
++ * 1960, for a device's first use, its YEAR register is 0 and the system
++ * year will be parsed as 1960 which is not a valid UNIX time and will
++ * cause many applications to fail mysteriously.
+  */
+ static int mrst_read_time(struct device *dev, struct rtc_time *time)
+ {
+@@ -99,10 +102,10 @@ static int mrst_read_time(struct device *dev, struct rtc_time *time)
+ 	time->tm_year = vrtc_cmos_read(RTC_YEAR);
+ 	spin_unlock_irqrestore(&rtc_lock, flags);
+ 
+-	/* Adjust for the 1960/1900 */
+-	time->tm_year += 60;
++	/* Adjust for the 1972/1900 */
++	time->tm_year += 72;
+ 	time->tm_mon--;
+-	return RTC_24H;
++	return rtc_valid_tm(time);
+ }
+ 
+ static int mrst_set_time(struct device *dev, struct rtc_time *time)
+@@ -119,9 +122,9 @@ static int mrst_set_time(struct device *dev, struct rtc_time *time)
+ 	min = time->tm_min;
+ 	sec = time->tm_sec;
+ 
+-	if (yrs < 70 || yrs > 138)
++	if (yrs < 72 || yrs > 138)
+ 		return -EINVAL;
+-	yrs -= 60;
++	yrs -= 72;
+ 
+ 	spin_lock_irqsave(&rtc_lock, flags);
+ 
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index 79665e2..16d6a83 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -907,7 +907,7 @@ static void atmel_spi_cleanup(struct spi_device *spi)
+ 
+ /*-------------------------------------------------------------------------*/
+ 
+-static int __init atmel_spi_probe(struct platform_device *pdev)
++static int __devinit atmel_spi_probe(struct platform_device *pdev)
+ {
+ 	struct resource		*regs;
+ 	int			irq;
+@@ -1003,7 +1003,7 @@ out_free:
+ 	return ret;
+ }
+ 
+-static int __exit atmel_spi_remove(struct platform_device *pdev)
++static int __devexit atmel_spi_remove(struct platform_device *pdev)
+ {
+ 	struct spi_master	*master = platform_get_drvdata(pdev);
+ 	struct atmel_spi	*as = spi_master_get_devdata(master);
+@@ -1072,6 +1072,7 @@ static struct platform_driver atmel_spi_driver = {
+ 	},
+ 	.suspend	= atmel_spi_suspend,
+ 	.resume		= atmel_spi_resume,
++	.probe		= atmel_spi_probe,
+ 	.remove		= __exit_p(atmel_spi_remove),
+ };
+ module_platform_driver(atmel_spi_driver);
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 4cb0d0a..fc7bbba 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -66,14 +66,16 @@
+ static int debug;
+ module_param(debug, int, 0600);
+ 
+-#define T1	(HZ/10)
+-#define T2	(HZ/3)
+-#define N2	3
++/* Defaults: these are from the specification */
++
++#define T1	10		/* 100mS */
++#define T2	34		/* 333mS */
++#define N2	3		/* Retry 3 times */
+ 
+ /* Use long timers for testing at low speed with debug on */
+ #ifdef DEBUG_TIMING
+-#define T1	HZ
+-#define T2	(2 * HZ)
++#define T1	100
++#define T2	200
+ #endif
+ 
+ /*
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index 5a5d325..634608d 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -147,14 +147,12 @@ struct btrfs_inode {
+ 	 * the btrfs file release call will add this inode to the
+ 	 * ordered operations list so that we make sure to flush out any
+ 	 * new data the application may have written before commit.
+-	 *
+-	 * yes, its silly to have a single bitflag, but we might grow more
+-	 * of these.
+ 	 */
+ 	unsigned ordered_data_close:1;
+ 	unsigned orphan_meta_reserved:1;
+ 	unsigned dummy_inode:1;
+ 	unsigned in_defrag:1;
++	unsigned delalloc_meta_reserved:1;
+ 
+ 	/*
+ 	 * always compress this one file
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 3a1b939..5b16357 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -617,12 +617,14 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
+ static int btrfs_delayed_inode_reserve_metadata(
+ 					struct btrfs_trans_handle *trans,
+ 					struct btrfs_root *root,
++					struct inode *inode,
+ 					struct btrfs_delayed_node *node)
+ {
+ 	struct btrfs_block_rsv *src_rsv;
+ 	struct btrfs_block_rsv *dst_rsv;
+ 	u64 num_bytes;
+ 	int ret;
++	int release = false;
+ 
+ 	src_rsv = trans->block_rsv;
+ 	dst_rsv = &root->fs_info->delayed_block_rsv;
+@@ -652,12 +654,65 @@ static int btrfs_delayed_inode_reserve_metadata(
+ 		if (!ret)
+ 			node->bytes_reserved = num_bytes;
+ 		return ret;
++	} else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
++		spin_lock(&BTRFS_I(inode)->lock);
++		if (BTRFS_I(inode)->delalloc_meta_reserved) {
++			BTRFS_I(inode)->delalloc_meta_reserved = 0;
++			spin_unlock(&BTRFS_I(inode)->lock);
++			release = true;
++			goto migrate;
++		}
++		spin_unlock(&BTRFS_I(inode)->lock);
++
++		/* Ok we didn't have space pre-reserved.  This shouldn't happen
++		 * too often but it can happen if we do delalloc to an existing
++		 * inode which gets dirtied because of the time update, and then
++		 * isn't touched again until after the transaction commits and
++		 * then we try to write out the data.  First try to be nice and
++		 * reserve something strictly for us.  If not be a pain and try
++		 * to steal from the delalloc block rsv.
++		 */
++		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
++		if (!ret)
++			goto out;
++
++		ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
++		if (!ret)
++			goto out;
++
++		/*
++		 * Ok this is a problem, let's just steal from the global rsv
++		 * since this really shouldn't happen that often.
++		 */
++		WARN_ON(1);
++		ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
++					      dst_rsv, num_bytes);
++		goto out;
+ 	}
+ 
++migrate:
+ 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
++
++out:
++	/*
++	 * Migrate only takes a reservation, it doesn't touch the size of the
++	 * block_rsv.  This is to simplify people who don't normally have things
++	 * migrated from their block rsv.  If they go to release their
++	 * reservation, that will decrease the size as well, so if migrate
++	 * reduced size we'd end up with a negative size.  But for the
++	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
++	 * but we could in fact do this reserve/migrate dance several times
++	 * between the time we did the original reservation and we'd clean it
++	 * up.  So to take care of this, release the space for the meta
++	 * reservation here.  I think it may be time for a documentation page on
++	 * how block rsvs. work.
++	 */
+ 	if (!ret)
+ 		node->bytes_reserved = num_bytes;
+ 
++	if (release)
++		btrfs_block_rsv_release(root, src_rsv, num_bytes);
++
+ 	return ret;
+ }
+ 
+@@ -1708,7 +1763,8 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+ 		goto release_node;
+ 	}
+ 
+-	ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
++	ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
++						   delayed_node);
+ 	if (ret)
+ 		goto release_node;
+ 
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 102c176..62afe5c 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1890,31 +1890,32 @@ struct btrfs_root *open_ctree(struct super_block *sb,
+ 	u64 features;
+ 	struct btrfs_key location;
+ 	struct buffer_head *bh;
+-	struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
+-						 GFP_NOFS);
+-	struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
+-						 GFP_NOFS);
++	struct btrfs_super_block *disk_super;
+ 	struct btrfs_root *tree_root = btrfs_sb(sb);
+-	struct btrfs_fs_info *fs_info = NULL;
+-	struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
+-						GFP_NOFS);
+-	struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
+-					      GFP_NOFS);
++	struct btrfs_fs_info *fs_info = tree_root->fs_info;
++	struct btrfs_root *extent_root;
++	struct btrfs_root *csum_root;
++	struct btrfs_root *chunk_root;
++	struct btrfs_root *dev_root;
+ 	struct btrfs_root *log_tree_root;
+-
+ 	int ret;
+ 	int err = -EINVAL;
+ 	int num_backups_tried = 0;
+ 	int backup_index = 0;
+ 
+-	struct btrfs_super_block *disk_super;
++	extent_root = fs_info->extent_root =
++		kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
++	csum_root = fs_info->csum_root =
++		kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
++	chunk_root = fs_info->chunk_root =
++		kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
++	dev_root = fs_info->dev_root =
++		kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
+ 
+-	if (!extent_root || !tree_root || !tree_root->fs_info ||
+-	    !chunk_root || !dev_root || !csum_root) {
++	if (!extent_root || !csum_root || !chunk_root || !dev_root) {
+ 		err = -ENOMEM;
+ 		goto fail;
+ 	}
+-	fs_info = tree_root->fs_info;
+ 
+ 	ret = init_srcu_struct(&fs_info->subvol_srcu);
+ 	if (ret) {
+@@ -1954,12 +1955,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
+ 	mutex_init(&fs_info->reloc_mutex);
+ 
+ 	init_completion(&fs_info->kobj_unregister);
+-	fs_info->tree_root = tree_root;
+-	fs_info->extent_root = extent_root;
+-	fs_info->csum_root = csum_root;
+-	fs_info->chunk_root = chunk_root;
+-	fs_info->dev_root = dev_root;
+-	fs_info->fs_devices = fs_devices;
+ 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
+ 	INIT_LIST_HEAD(&fs_info->space_info);
+ 	btrfs_mapping_init(&fs_info->mapping_tree);
+@@ -2465,21 +2460,20 @@ fail_sb_buffer:
+ 	btrfs_stop_workers(&fs_info->caching_workers);
+ fail_alloc:
+ fail_iput:
++	btrfs_mapping_tree_free(&fs_info->mapping_tree);
++
+ 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
+ 	iput(fs_info->btree_inode);
+-
+-	btrfs_close_devices(fs_info->fs_devices);
+-	btrfs_mapping_tree_free(&fs_info->mapping_tree);
+ fail_bdi:
+ 	bdi_destroy(&fs_info->bdi);
+ fail_srcu:
+ 	cleanup_srcu_struct(&fs_info->subvol_srcu);
+ fail:
++	btrfs_close_devices(fs_info->fs_devices);
+ 	free_fs_info(fs_info);
+ 	return ERR_PTR(err);
+ 
+ recovery_tree_root:
+-
+ 	if (!btrfs_test_opt(tree_root, RECOVERY))
+ 		goto fail_tree_roots;
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 9879bd4..b232150 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3797,16 +3797,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
+ 	kfree(rsv);
+ }
+ 
+-int btrfs_block_rsv_add(struct btrfs_root *root,
+-			struct btrfs_block_rsv *block_rsv,
+-			u64 num_bytes)
++static inline int __block_rsv_add(struct btrfs_root *root,
++				  struct btrfs_block_rsv *block_rsv,
++				  u64 num_bytes, int flush)
+ {
+ 	int ret;
+ 
+ 	if (num_bytes == 0)
+ 		return 0;
+ 
+-	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
++	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
+ 	if (!ret) {
+ 		block_rsv_add_bytes(block_rsv, num_bytes, 1);
+ 		return 0;
+@@ -3815,22 +3815,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
+ 	return ret;
+ }
+ 
++int btrfs_block_rsv_add(struct btrfs_root *root,
++			struct btrfs_block_rsv *block_rsv,
++			u64 num_bytes)
++{
++	return __block_rsv_add(root, block_rsv, num_bytes, 1);
++}
++
+ int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
+ 				struct btrfs_block_rsv *block_rsv,
+ 				u64 num_bytes)
+ {
+-	int ret;
+-
+-	if (num_bytes == 0)
+-		return 0;
+-
+-	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
+-	if (!ret) {
+-		block_rsv_add_bytes(block_rsv, num_bytes, 1);
+-		return 0;
+-	}
+-
+-	return ret;
++	return __block_rsv_add(root, block_rsv, num_bytes, 0);
+ }
+ 
+ int btrfs_block_rsv_check(struct btrfs_root *root,
+@@ -4064,23 +4060,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
+  */
+ static unsigned drop_outstanding_extent(struct inode *inode)
+ {
++	unsigned drop_inode_space = 0;
+ 	unsigned dropped_extents = 0;
+ 
+ 	BUG_ON(!BTRFS_I(inode)->outstanding_extents);
+ 	BTRFS_I(inode)->outstanding_extents--;
+ 
++	if (BTRFS_I(inode)->outstanding_extents == 0 &&
++	    BTRFS_I(inode)->delalloc_meta_reserved) {
++		drop_inode_space = 1;
++		BTRFS_I(inode)->delalloc_meta_reserved = 0;
++	}
++
+ 	/*
+ 	 * If we have more or the same amount of outsanding extents than we have
+ 	 * reserved then we need to leave the reserved extents count alone.
+ 	 */
+ 	if (BTRFS_I(inode)->outstanding_extents >=
+ 	    BTRFS_I(inode)->reserved_extents)
+-		return 0;
++		return drop_inode_space;
+ 
+ 	dropped_extents = BTRFS_I(inode)->reserved_extents -
+ 		BTRFS_I(inode)->outstanding_extents;
+ 	BTRFS_I(inode)->reserved_extents -= dropped_extents;
+-	return dropped_extents;
++	return dropped_extents + drop_inode_space;
+ }
+ 
+ /**
+@@ -4166,9 +4169,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
+ 		nr_extents = BTRFS_I(inode)->outstanding_extents -
+ 			BTRFS_I(inode)->reserved_extents;
+ 		BTRFS_I(inode)->reserved_extents += nr_extents;
++	}
+ 
+-		to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
++	/*
++	 * Add an item to reserve for updating the inode when we complete the
++	 * delalloc io.
++	 */
++	if (!BTRFS_I(inode)->delalloc_meta_reserved) {
++		nr_extents++;
++		BTRFS_I(inode)->delalloc_meta_reserved = 1;
+ 	}
++
++	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
+ 	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
+ 	spin_unlock(&BTRFS_I(inode)->lock);
+ 
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 7a15fcf..181760f 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -537,6 +537,13 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
+ 			    struct btrfs_free_space *entry, u8 *type)
+ {
+ 	struct btrfs_free_space_entry *e;
++	int ret;
++
++	if (!io_ctl->cur) {
++		ret = io_ctl_check_crc(io_ctl, io_ctl->index);
++		if (ret)
++			return ret;
++	}
+ 
+ 	e = io_ctl->cur;
+ 	entry->offset = le64_to_cpu(e->offset);
+@@ -550,10 +557,7 @@ static int io_ctl_read_entry(struct io_ctl *io_ctl,
+ 
+ 	io_ctl_unmap_page(io_ctl);
+ 
+-	if (io_ctl->index >= io_ctl->num_pages)
+-		return 0;
+-
+-	return io_ctl_check_crc(io_ctl, io_ctl->index);
++	return 0;
+ }
+ 
+ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
+@@ -561,9 +565,6 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
+ {
+ 	int ret;
+ 
+-	if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
+-		io_ctl_unmap_page(io_ctl);
+-
+ 	ret = io_ctl_check_crc(io_ctl, io_ctl->index);
+ 	if (ret)
+ 		return ret;
+@@ -699,6 +700,8 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ 		num_entries--;
+ 	}
+ 
++	io_ctl_unmap_page(&io_ctl);
++
+ 	/*
+ 	 * We add the bitmaps at the end of the entries in order that
+ 	 * the bitmap entries are added to the cache.
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index 53dcbdf..f8962a9 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -398,6 +398,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
+ 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ 	struct btrfs_path *path;
+ 	struct inode *inode;
++	struct btrfs_block_rsv *rsv;
++	u64 num_bytes;
+ 	u64 alloc_hint = 0;
+ 	int ret;
+ 	int prealloc;
+@@ -421,11 +423,26 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
+ 	if (!path)
+ 		return -ENOMEM;
+ 
++	rsv = trans->block_rsv;
++	trans->block_rsv = &root->fs_info->trans_block_rsv;
++
++	num_bytes = trans->bytes_reserved;
++	/*
++	 * 1 item for inode item insertion if need
++	 * 3 items for inode item update (in the worst case)
++	 * 1 item for free space object
++	 * 3 items for pre-allocation
++	 */
++	trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8);
++	ret = btrfs_block_rsv_add_noflush(root, trans->block_rsv,
++					  trans->bytes_reserved);
++	if (ret)
++		goto out;
+ again:
+ 	inode = lookup_free_ino_inode(root, path);
+ 	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
+ 		ret = PTR_ERR(inode);
+-		goto out;
++		goto out_release;
+ 	}
+ 
+ 	if (IS_ERR(inode)) {
+@@ -434,7 +451,7 @@ again:
+ 
+ 		ret = create_free_ino_inode(root, trans, path);
+ 		if (ret)
+-			goto out;
++			goto out_release;
+ 		goto again;
+ 	}
+ 
+@@ -477,11 +494,14 @@ again:
+ 	}
+ 	btrfs_free_reserved_data_space(inode, prealloc);
+ 
++	ret = btrfs_write_out_ino_cache(root, trans, path);
+ out_put:
+ 	iput(inode);
++out_release:
++	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
+ out:
+-	if (ret == 0)
+-		ret = btrfs_write_out_ino_cache(root, trans, path);
++	trans->block_rsv = rsv;
++	trans->bytes_reserved = num_bytes;
+ 
+ 	btrfs_free_path(path);
+ 	return ret;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 966ddcc..116ab67 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -93,6 +93,8 @@ static noinline int cow_file_range(struct inode *inode,
+ 				   struct page *locked_page,
+ 				   u64 start, u64 end, int *page_started,
+ 				   unsigned long *nr_written, int unlock);
++static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
++				struct btrfs_root *root, struct inode *inode);
+ 
+ static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
+ 				     struct inode *inode,  struct inode *dir,
+@@ -1741,7 +1743,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
+ 				trans = btrfs_join_transaction(root);
+ 			BUG_ON(IS_ERR(trans));
+ 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+-			ret = btrfs_update_inode(trans, root, inode);
++			ret = btrfs_update_inode_fallback(trans, root, inode);
+ 			BUG_ON(ret);
+ 		}
+ 		goto out;
+@@ -1791,7 +1793,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
+ 
+ 	ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+ 	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
+-		ret = btrfs_update_inode(trans, root, inode);
++		ret = btrfs_update_inode_fallback(trans, root, inode);
+ 		BUG_ON(ret);
+ 	}
+ 	ret = 0;
+@@ -2199,6 +2201,9 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
+ 		if (ret)
+ 			goto out;
+ 	}
++	/* release the path since we're done with it */
++	btrfs_release_path(path);
++
+ 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
+ 
+ 	if (root->orphan_block_rsv)
+@@ -2426,7 +2431,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
+ /*
+  * copy everything in the in-memory inode into the btree.
+  */
+-noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
++static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
+ 				struct btrfs_root *root, struct inode *inode)
+ {
+ 	struct btrfs_inode_item *inode_item;
+@@ -2434,21 +2439,6 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
+ 	struct extent_buffer *leaf;
+ 	int ret;
+ 
+-	/*
+-	 * If the inode is a free space inode, we can deadlock during commit
+-	 * if we put it into the delayed code.
+-	 *
+-	 * The data relocation inode should also be directly updated
+-	 * without delay
+-	 */
+-	if (!btrfs_is_free_space_inode(root, inode)
+-	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
+-		ret = btrfs_delayed_update_inode(trans, root, inode);
+-		if (!ret)
+-			btrfs_set_inode_last_trans(trans, inode);
+-		return ret;
+-	}
+-
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+ 		return -ENOMEM;
+@@ -2477,6 +2467,43 @@ failed:
+ }
+ 
+ /*
++ * copy everything in the in-memory inode into the btree.
++ */
++noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
++				struct btrfs_root *root, struct inode *inode)
++{
++	int ret;
++
++	/*
++	 * If the inode is a free space inode, we can deadlock during commit
++	 * if we put it into the delayed code.
++	 *
++	 * The data relocation inode should also be directly updated
++	 * without delay
++	 */
++	if (!btrfs_is_free_space_inode(root, inode)
++	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
++		ret = btrfs_delayed_update_inode(trans, root, inode);
++		if (!ret)
++			btrfs_set_inode_last_trans(trans, inode);
++		return ret;
++	}
++
++	return btrfs_update_inode_item(trans, root, inode);
++}
++
++static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
++				struct btrfs_root *root, struct inode *inode)
++{
++	int ret;
++
++	ret = btrfs_update_inode(trans, root, inode);
++	if (ret == -ENOSPC)
++		return btrfs_update_inode_item(trans, root, inode);
++	return ret;
++}
++
++/*
+  * unlink helper that gets used here in inode.c and in the tree logging
+  * recovery code.  It remove a link in a directory with a given name, and
+  * also drops the back refs in the inode to the directory
+@@ -5632,7 +5659,7 @@ again:
+ 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
+ 		ret = btrfs_ordered_update_i_size(inode, 0, ordered);
+ 		if (!ret)
+-			err = btrfs_update_inode(trans, root, inode);
++			err = btrfs_update_inode_fallback(trans, root, inode);
+ 		goto out;
+ 	}
+ 
+@@ -5670,7 +5697,7 @@ again:
+ 	add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
+ 	ret = btrfs_ordered_update_i_size(inode, 0, ordered);
+ 	if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
+-		btrfs_update_inode(trans, root, inode);
++		btrfs_update_inode_fallback(trans, root, inode);
+ 	ret = 0;
+ out_unlock:
+ 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
+@@ -6529,14 +6556,16 @@ end_trans:
+ 		ret = btrfs_orphan_del(NULL, inode);
+ 	}
+ 
+-	trans->block_rsv = &root->fs_info->trans_block_rsv;
+-	ret = btrfs_update_inode(trans, root, inode);
+-	if (ret && !err)
+-		err = ret;
++	if (trans) {
++		trans->block_rsv = &root->fs_info->trans_block_rsv;
++		ret = btrfs_update_inode(trans, root, inode);
++		if (ret && !err)
++			err = ret;
+ 
+-	nr = trans->blocks_used;
+-	ret = btrfs_end_transaction_throttle(trans, root);
+-	btrfs_btree_balance_dirty(root, nr);
++		nr = trans->blocks_used;
++		ret = btrfs_end_transaction_throttle(trans, root);
++		btrfs_btree_balance_dirty(root, nr);
++	}
+ 
+ out:
+ 	btrfs_free_block_rsv(root, rsv);
+@@ -6605,6 +6634,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ 	ei->orphan_meta_reserved = 0;
+ 	ei->dummy_inode = 0;
+ 	ei->in_defrag = 0;
++	ei->delalloc_meta_reserved = 0;
+ 	ei->force_compress = BTRFS_COMPRESS_NONE;
+ 
+ 	ei->delayed_node = NULL;
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 24d654c..dff29d5 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1174,6 +1174,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
+ 			list_add_tail(&new_edge->list[UPPER],
+ 				      &new_node->lower);
+ 		}
++	} else {
++		list_add_tail(&new_node->lower, &cache->leaves);
+ 	}
+ 
+ 	rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index ed11d38..f4190f2 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -944,50 +944,18 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
+ static int scrub_submit(struct scrub_dev *sdev)
+ {
+ 	struct scrub_bio *sbio;
+-	struct bio *bio;
+-	int i;
+ 
+ 	if (sdev->curr == -1)
+ 		return 0;
+ 
+ 	sbio = sdev->bios[sdev->curr];
+-
+-	bio = bio_alloc(GFP_NOFS, sbio->count);
+-	if (!bio)
+-		goto nomem;
+-
+-	bio->bi_private = sbio;
+-	bio->bi_end_io = scrub_bio_end_io;
+-	bio->bi_bdev = sdev->dev->bdev;
+-	bio->bi_sector = sbio->physical >> 9;
+-
+-	for (i = 0; i < sbio->count; ++i) {
+-		struct page *page;
+-		int ret;
+-
+-		page = alloc_page(GFP_NOFS);
+-		if (!page)
+-			goto nomem;
+-
+-		ret = bio_add_page(bio, page, PAGE_SIZE, 0);
+-		if (!ret) {
+-			__free_page(page);
+-			goto nomem;
+-		}
+-	}
+-
+ 	sbio->err = 0;
+ 	sdev->curr = -1;
+ 	atomic_inc(&sdev->in_flight);
+ 
+-	submit_bio(READ, bio);
++	submit_bio(READ, sbio->bio);
+ 
+ 	return 0;
+-
+-nomem:
+-	scrub_free_bio(bio);
+-
+-	return -ENOMEM;
+ }
+ 
+ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
+@@ -995,6 +963,8 @@ static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
+ 		      u8 *csum, int force)
+ {
+ 	struct scrub_bio *sbio;
++	struct page *page;
++	int ret;
+ 
+ again:
+ 	/*
+@@ -1015,12 +985,22 @@ again:
+ 	}
+ 	sbio = sdev->bios[sdev->curr];
+ 	if (sbio->count == 0) {
++		struct bio *bio;
++
+ 		sbio->physical = physical;
+ 		sbio->logical = logical;
++		bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
++		if (!bio)
++			return -ENOMEM;
++
++		bio->bi_private = sbio;
++		bio->bi_end_io = scrub_bio_end_io;
++		bio->bi_bdev = sdev->dev->bdev;
++		bio->bi_sector = sbio->physical >> 9;
++		sbio->err = 0;
++		sbio->bio = bio;
+ 	} else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
+ 		   sbio->logical + sbio->count * PAGE_SIZE != logical) {
+-		int ret;
+-
+ 		ret = scrub_submit(sdev);
+ 		if (ret)
+ 			return ret;
+@@ -1030,6 +1010,20 @@ again:
+ 	sbio->spag[sbio->count].generation = gen;
+ 	sbio->spag[sbio->count].have_csum = 0;
+ 	sbio->spag[sbio->count].mirror_num = mirror_num;
++
++	page = alloc_page(GFP_NOFS);
++	if (!page)
++		return -ENOMEM;
++
++	ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0);
++	if (!ret) {
++		__free_page(page);
++		ret = scrub_submit(sdev);
++		if (ret)
++			return ret;
++		goto again;
++	}
++
+ 	if (csum) {
+ 		sbio->spag[sbio->count].have_csum = 1;
+ 		memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 57080df..8bd9d6d 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -197,7 +197,7 @@ static match_table_t tokens = {
+ 	{Opt_subvolrootid, "subvolrootid=%d"},
+ 	{Opt_defrag, "autodefrag"},
+ 	{Opt_inode_cache, "inode_cache"},
+-	{Opt_no_space_cache, "no_space_cache"},
++	{Opt_no_space_cache, "nospace_cache"},
+ 	{Opt_recovery, "recovery"},
+ 	{Opt_err, NULL},
+ };
+@@ -448,6 +448,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
+ 		token = match_token(p, tokens, args);
+ 		switch (token) {
+ 		case Opt_subvol:
++			kfree(*subvol_name);
+ 			*subvol_name = match_strdup(&args[0]);
+ 			break;
+ 		case Opt_subvolid:
+@@ -710,7 +711,7 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
+ 	if (btrfs_test_opt(root, SPACE_CACHE))
+ 		seq_puts(seq, ",space_cache");
+ 	else
+-		seq_puts(seq, ",no_space_cache");
++		seq_puts(seq, ",nospace_cache");
+ 	if (btrfs_test_opt(root, CLEAR_CACHE))
+ 		seq_puts(seq, ",clear_cache");
+ 	if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
+@@ -890,7 +891,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
+ 	struct super_block *s;
+ 	struct dentry *root;
+ 	struct btrfs_fs_devices *fs_devices = NULL;
+-	struct btrfs_root *tree_root = NULL;
+ 	struct btrfs_fs_info *fs_info = NULL;
+ 	fmode_t mode = FMODE_READ;
+ 	char *subvol_name = NULL;
+@@ -904,8 +904,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
+ 	error = btrfs_parse_early_options(data, mode, fs_type,
+ 					  &subvol_name, &subvol_objectid,
+ 					  &subvol_rootid, &fs_devices);
+-	if (error)
++	if (error) {
++		kfree(subvol_name);
+ 		return ERR_PTR(error);
++	}
+ 
+ 	if (subvol_name) {
+ 		root = mount_subvol(subvol_name, flags, device_name, data);
+@@ -917,15 +919,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
+ 	if (error)
+ 		return ERR_PTR(error);
+ 
+-	error = btrfs_open_devices(fs_devices, mode, fs_type);
+-	if (error)
+-		return ERR_PTR(error);
+-
+-	if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
+-		error = -EACCES;
+-		goto error_close_devices;
+-	}
+-
+ 	/*
+ 	 * Setup a dummy root and fs_info for test/set super.  This is because
+ 	 * we don't actually fill this stuff out until open_ctree, but we need
+@@ -933,24 +926,36 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
+ 	 * then open_ctree will properly initialize everything later.
+ 	 */
+ 	fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
+-	tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
+-	if (!fs_info || !tree_root) {
++	if (!fs_info)
++		return ERR_PTR(-ENOMEM);
++
++	fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
++	if (!fs_info->tree_root) {
+ 		error = -ENOMEM;
+-		goto error_close_devices;
++		goto error_fs_info;
+ 	}
+-	fs_info->tree_root = tree_root;
++	fs_info->tree_root->fs_info = fs_info;
+ 	fs_info->fs_devices = fs_devices;
+-	tree_root->fs_info = fs_info;
+ 
+ 	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
+ 	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
+ 	if (!fs_info->super_copy || !fs_info->super_for_commit) {
+ 		error = -ENOMEM;
++		goto error_fs_info;
++	}
++
++	error = btrfs_open_devices(fs_devices, mode, fs_type);
++	if (error)
++		goto error_fs_info;
++
++	if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
++		error = -EACCES;
+ 		goto error_close_devices;
+ 	}
+ 
+ 	bdev = fs_devices->latest_bdev;
+-	s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root);
++	s = sget(fs_type, btrfs_test_super, btrfs_set_super,
++		 fs_info->tree_root);
+ 	if (IS_ERR(s)) {
+ 		error = PTR_ERR(s);
+ 		goto error_close_devices;
+@@ -959,12 +964,12 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
+ 	if (s->s_root) {
+ 		if ((flags ^ s->s_flags) & MS_RDONLY) {
+ 			deactivate_locked_super(s);
+-			return ERR_PTR(-EBUSY);
++			error = -EBUSY;
++			goto error_close_devices;
+ 		}
+ 
+ 		btrfs_close_devices(fs_devices);
+ 		free_fs_info(fs_info);
+-		kfree(tree_root);
+ 	} else {
+ 		char b[BDEVNAME_SIZE];
+ 
+@@ -991,8 +996,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
+ 
+ error_close_devices:
+ 	btrfs_close_devices(fs_devices);
++error_fs_info:
+ 	free_fs_info(fs_info);
+-	kfree(tree_root);
+ 	return ERR_PTR(error);
+ }
+ 
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 960835e..6a0574e 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -882,8 +882,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
+ 	btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
+ 
+ 	if (to_reserve > 0) {
+-		ret = btrfs_block_rsv_add(root, &pending->block_rsv,
+-					  to_reserve);
++		ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
++						  to_reserve);
+ 		if (ret) {
+ 			pending->error = ret;
+ 			goto fail;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index f8e29431..c37433d 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -999,7 +999,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
+ 	key.objectid = device->devid;
+ 	key.offset = start;
+ 	key.type = BTRFS_DEV_EXTENT_KEY;
+-
++again:
+ 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ 	if (ret > 0) {
+ 		ret = btrfs_previous_item(root, path, key.objectid,
+@@ -1012,6 +1012,9 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
+ 					struct btrfs_dev_extent);
+ 		BUG_ON(found_key.offset > start || found_key.offset +
+ 		       btrfs_dev_extent_length(leaf, extent) < start);
++		key = found_key;
++		btrfs_release_path(path);
++		goto again;
+ 	} else if (ret == 0) {
+ 		leaf = path->nodes[0];
+ 		extent = btrfs_item_ptr(leaf, path->slots[0],
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 2db1bd3..851ba3d 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1652,46 +1652,12 @@ out:
+ 	return error;
+ }
+ 
+-static int proc_pid_fd_link_getattr(struct vfsmount *mnt, struct dentry *dentry,
+-		struct kstat *stat)
+-{
+-	struct inode *inode = dentry->d_inode;
+-	struct task_struct *task = get_proc_task(inode);
+-	int rc;
+-
+-	if (task == NULL)
+-		return -ESRCH;
+-
+-	rc = -EACCES;
+-	if (lock_trace(task))
+-		goto out_task;
+-
+-	generic_fillattr(inode, stat);
+-	unlock_trace(task);
+-	rc = 0;
+-out_task:
+-	put_task_struct(task);
+-	return rc;
+-}
+-
+ static const struct inode_operations proc_pid_link_inode_operations = {
+ 	.readlink	= proc_pid_readlink,
+ 	.follow_link	= proc_pid_follow_link,
+ 	.setattr	= proc_setattr,
+ };
+ 
+-static const struct inode_operations proc_fdinfo_link_inode_operations = {
+-	.setattr	= proc_setattr,
+-	.getattr	= proc_pid_fd_link_getattr,
+-};
+-
+-static const struct inode_operations proc_fd_link_inode_operations = {
+-	.readlink	= proc_pid_readlink,
+-	.follow_link	= proc_pid_follow_link,
+-	.setattr	= proc_setattr,
+-	.getattr	= proc_pid_fd_link_getattr,
+-};
+-
+ 
+ /* building an inode */
+ 
+@@ -1923,61 +1889,49 @@ out:
+ 
+ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
+ {
+-	struct task_struct *task;
+-	struct files_struct *files;
++	struct task_struct *task = get_proc_task(inode);
++	struct files_struct *files = NULL;
+ 	struct file *file;
+ 	int fd = proc_fd(inode);
+-	int rc;
+-
+-	task = get_proc_task(inode);
+-	if (!task)
+-		return -ENOENT;
+-
+-	rc = -EACCES;
+-	if (lock_trace(task))
+-		goto out_task;
+-
+-	rc = -ENOENT;
+-	files = get_files_struct(task);
+-	if (files == NULL)
+-		goto out_unlock;
+ 
+-	/*
+-	 * We are not taking a ref to the file structure, so we must
+-	 * hold ->file_lock.
+-	 */
+-	spin_lock(&files->file_lock);
+-	file = fcheck_files(files, fd);
+-	if (file) {
+-		unsigned int f_flags;
+-		struct fdtable *fdt;
+-
+-		fdt = files_fdtable(files);
+-		f_flags = file->f_flags & ~O_CLOEXEC;
+-		if (FD_ISSET(fd, fdt->close_on_exec))
+-			f_flags |= O_CLOEXEC;
+-
+-		if (path) {
+-			*path = file->f_path;
+-			path_get(&file->f_path);
++	if (task) {
++		files = get_files_struct(task);
++		put_task_struct(task);
++	}
++	if (files) {
++		/*
++		 * We are not taking a ref to the file structure, so we must
++		 * hold ->file_lock.
++		 */
++		spin_lock(&files->file_lock);
++		file = fcheck_files(files, fd);
++		if (file) {
++			unsigned int f_flags;
++			struct fdtable *fdt;
++
++			fdt = files_fdtable(files);
++			f_flags = file->f_flags & ~O_CLOEXEC;
++			if (FD_ISSET(fd, fdt->close_on_exec))
++				f_flags |= O_CLOEXEC;
++
++			if (path) {
++				*path = file->f_path;
++				path_get(&file->f_path);
++			}
++			if (info)
++				snprintf(info, PROC_FDINFO_MAX,
++					 "pos:\t%lli\n"
++					 "flags:\t0%o\n",
++					 (long long) file->f_pos,
++					 f_flags);
++			spin_unlock(&files->file_lock);
++			put_files_struct(files);
++			return 0;
+ 		}
+-		if (info)
+-			snprintf(info, PROC_FDINFO_MAX,
+-				 "pos:\t%lli\n"
+-				 "flags:\t0%o\n",
+-				 (long long) file->f_pos,
+-				 f_flags);
+-		rc = 0;
+-	} else
+-		rc = -ENOENT;
+-	spin_unlock(&files->file_lock);
+-	put_files_struct(files);
+-
+-out_unlock:
+-	unlock_trace(task);
+-out_task:
+-	put_task_struct(task);
+-	return rc;
++		spin_unlock(&files->file_lock);
++		put_files_struct(files);
++	}
++	return -ENOENT;
+ }
+ 
+ static int proc_fd_link(struct inode *inode, struct path *path)
+@@ -2072,7 +2026,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
+ 	spin_unlock(&files->file_lock);
+ 	put_files_struct(files);
+ 
+-	inode->i_op = &proc_fd_link_inode_operations;
++	inode->i_op = &proc_pid_link_inode_operations;
+ 	inode->i_size = 64;
+ 	ei->op.proc_get_link = proc_fd_link;
+ 	d_set_d_op(dentry, &tid_fd_dentry_operations);
+@@ -2104,12 +2058,7 @@ static struct dentry *proc_lookupfd_common(struct inode *dir,
+ 	if (fd == ~0U)
+ 		goto out;
+ 
+-	result = ERR_PTR(-EACCES);
+-	if (lock_trace(task))
+-		goto out;
+-
+ 	result = instantiate(dir, dentry, task, &fd);
+-	unlock_trace(task);
+ out:
+ 	put_task_struct(task);
+ out_no_task:
+@@ -2129,28 +2078,23 @@ static int proc_readfd_common(struct file * filp, void * dirent,
+ 	retval = -ENOENT;
+ 	if (!p)
+ 		goto out_no_task;
+-
+-	retval = -EACCES;
+-	if (lock_trace(p))
+-		goto out;
+-
+ 	retval = 0;
+ 
+ 	fd = filp->f_pos;
+ 	switch (fd) {
+ 		case 0:
+ 			if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
+-				goto out_unlock;
++				goto out;
+ 			filp->f_pos++;
+ 		case 1:
+ 			ino = parent_ino(dentry);
+ 			if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0)
+-				goto out_unlock;
++				goto out;
+ 			filp->f_pos++;
+ 		default:
+ 			files = get_files_struct(p);
+ 			if (!files)
+-				goto out_unlock;
++				goto out;
+ 			rcu_read_lock();
+ 			for (fd = filp->f_pos-2;
+ 			     fd < files_fdtable(files)->max_fds;
+@@ -2174,9 +2118,6 @@ static int proc_readfd_common(struct file * filp, void * dirent,
+ 			rcu_read_unlock();
+ 			put_files_struct(files);
+ 	}
+-
+-out_unlock:
+-	unlock_trace(p);
+ out:
+ 	put_task_struct(p);
+ out_no_task:
+@@ -2254,7 +2195,6 @@ static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
+ 	ei->fd = fd;
+ 	inode->i_mode = S_IFREG | S_IRUSR;
+ 	inode->i_fop = &proc_fdinfo_file_operations;
+-	inode->i_op = &proc_fdinfo_link_inode_operations;
+ 	d_set_d_op(dentry, &tid_fd_dentry_operations);
+ 	d_add(dentry, inode);
+ 	/* Close the race of the process dying before we return the dentry */
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 33b1331..574d4ee 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -189,7 +189,7 @@ xfs_end_io(
+ 	int		error = 0;
+ 
+ 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+-		error = -EIO;
++		ioend->io_error = -EIO;
+ 		goto done;
+ 	}
+ 	if (ioend->io_error)
+diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
+index 1a35138..eac97ef 100644
+--- a/fs/xfs/xfs_buf_item.c
++++ b/fs/xfs/xfs_buf_item.c
+@@ -656,7 +656,7 @@ xfs_buf_item_committing(
+ /*
+  * This is the ops vector shared by all buf log items.
+  */
+-static struct xfs_item_ops xfs_buf_item_ops = {
++static const struct xfs_item_ops xfs_buf_item_ops = {
+ 	.iop_size	= xfs_buf_item_size,
+ 	.iop_format	= xfs_buf_item_format,
+ 	.iop_pin	= xfs_buf_item_pin,
+diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
+index bb3f71d..0dee0b7 100644
+--- a/fs/xfs/xfs_dquot_item.c
++++ b/fs/xfs/xfs_dquot_item.c
+@@ -295,7 +295,7 @@ xfs_qm_dquot_logitem_committing(
+ /*
+  * This is the ops vector for dquots
+  */
+-static struct xfs_item_ops xfs_dquot_item_ops = {
++static const struct xfs_item_ops xfs_dquot_item_ops = {
+ 	.iop_size	= xfs_qm_dquot_logitem_size,
+ 	.iop_format	= xfs_qm_dquot_logitem_format,
+ 	.iop_pin	= xfs_qm_dquot_logitem_pin,
+@@ -483,7 +483,7 @@ xfs_qm_qoff_logitem_committing(
+ {
+ }
+ 
+-static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
++static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
+ 	.iop_size	= xfs_qm_qoff_logitem_size,
+ 	.iop_format	= xfs_qm_qoff_logitem_format,
+ 	.iop_pin	= xfs_qm_qoff_logitem_pin,
+@@ -498,7 +498,7 @@ static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
+ /*
+  * This is the ops vector shared by all quotaoff-start log items.
+  */
+-static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
++static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
+ 	.iop_size	= xfs_qm_qoff_logitem_size,
+ 	.iop_format	= xfs_qm_qoff_logitem_format,
+ 	.iop_pin	= xfs_qm_qoff_logitem_pin,
+diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
+index d22e626..35c2aff 100644
+--- a/fs/xfs/xfs_extfree_item.c
++++ b/fs/xfs/xfs_extfree_item.c
+@@ -217,7 +217,7 @@ xfs_efi_item_committing(
+ /*
+  * This is the ops vector shared by all efi log items.
+  */
+-static struct xfs_item_ops xfs_efi_item_ops = {
++static const struct xfs_item_ops xfs_efi_item_ops = {
+ 	.iop_size	= xfs_efi_item_size,
+ 	.iop_format	= xfs_efi_item_format,
+ 	.iop_pin	= xfs_efi_item_pin,
+@@ -477,7 +477,7 @@ xfs_efd_item_committing(
+ /*
+  * This is the ops vector shared by all efd log items.
+  */
+-static struct xfs_item_ops xfs_efd_item_ops = {
++static const struct xfs_item_ops xfs_efd_item_ops = {
+ 	.iop_size	= xfs_efd_item_size,
+ 	.iop_format	= xfs_efd_item_format,
+ 	.iop_pin	= xfs_efd_item_pin,
+diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
+index b7cf21b..abaafdb 100644
+--- a/fs/xfs/xfs_inode_item.c
++++ b/fs/xfs/xfs_inode_item.c
+@@ -795,7 +795,7 @@ xfs_inode_item_committing(
+ /*
+  * This is the ops vector shared by all buf log items.
+  */
+-static struct xfs_item_ops xfs_inode_item_ops = {
++static const struct xfs_item_ops xfs_inode_item_ops = {
+ 	.iop_size	= xfs_inode_item_size,
+ 	.iop_format	= xfs_inode_item_format,
+ 	.iop_pin	= xfs_inode_item_pin,
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index 2758a62..a14cd89 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -626,7 +626,7 @@ xfs_log_item_init(
+ 	struct xfs_mount	*mp,
+ 	struct xfs_log_item	*item,
+ 	int			type,
+-	struct xfs_item_ops	*ops)
++	const struct xfs_item_ops *ops)
+ {
+ 	item->li_mountp = mp;
+ 	item->li_ailp = mp->m_ail;
+diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
+index 78c9039..3f7bf45 100644
+--- a/fs/xfs/xfs_log.h
++++ b/fs/xfs/xfs_log.h
+@@ -137,7 +137,7 @@ struct xfs_trans;
+ void	xfs_log_item_init(struct xfs_mount	*mp,
+ 			struct xfs_log_item	*item,
+ 			int			type,
+-			struct xfs_item_ops	*ops);
++			const struct xfs_item_ops *ops);
+ 
+ xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
+ 		       struct xlog_ticket *ticket,
+diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
+index 603f3eb..3ae713c 100644
+--- a/fs/xfs/xfs_trans.h
++++ b/fs/xfs/xfs_trans.h
+@@ -326,7 +326,7 @@ typedef struct xfs_log_item {
+ 						 struct xfs_log_item *);
+ 							/* buffer item iodone */
+ 							/* callback func */
+-	struct xfs_item_ops		*li_ops;	/* function list */
++	const struct xfs_item_ops	*li_ops;	/* function list */
+ 
+ 	/* delayed logging */
+ 	struct list_head		li_cil;		/* CIL pointers */
+@@ -341,7 +341,7 @@ typedef struct xfs_log_item {
+ 	{ XFS_LI_IN_AIL,	"IN_AIL" }, \
+ 	{ XFS_LI_ABORTED,	"ABORTED" }
+ 
+-typedef struct xfs_item_ops {
++struct xfs_item_ops {
+ 	uint (*iop_size)(xfs_log_item_t *);
+ 	void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
+ 	void (*iop_pin)(xfs_log_item_t *);
+@@ -352,7 +352,7 @@ typedef struct xfs_item_ops {
+ 	void (*iop_push)(xfs_log_item_t *);
+ 	bool (*iop_pushbuf)(xfs_log_item_t *);
+ 	void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
+-} xfs_item_ops_t;
++};
+ 
+ #define IOP_SIZE(ip)		(*(ip)->li_ops->iop_size)(ip)
+ #define IOP_FORMAT(ip,vp)	(*(ip)->li_ops->iop_format)(ip, vp)
+diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
+index 4ecf2a5..ce9268a 100644
+--- a/fs/xfs/xfs_vnodeops.c
++++ b/fs/xfs/xfs_vnodeops.c
+@@ -112,7 +112,7 @@ xfs_readlink(
+ 	char		*link)
+ {
+ 	xfs_mount_t	*mp = ip->i_mount;
+-	int		pathlen;
++	xfs_fsize_t	pathlen;
+ 	int		error = 0;
+ 
+ 	trace_xfs_readlink(ip);
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index cf39949..1f9e951 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -990,7 +990,9 @@ struct drm_minor {
+ 	struct proc_dir_entry *proc_root;  /**< proc directory entry */
+ 	struct drm_info_node proc_nodes;
+ 	struct dentry *debugfs_root;
+-	struct drm_info_node debugfs_nodes;
++
++	struct list_head debugfs_list;
++	struct mutex debugfs_lock; /* Protects debugfs_list. */
+ 
+ 	struct drm_master *master; /* currently active master for this node */
+ 	struct list_head master_list;
+diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
+index 874c4d2..1d161cb 100644
+--- a/include/drm/exynos_drm.h
++++ b/include/drm/exynos_drm.h
+@@ -36,11 +36,13 @@
+  *	- this size value would be page-aligned internally.
+  * @flags: user request for setting memory type or cache attributes.
+  * @handle: returned handle for the object.
++ * @pad: just padding to be 64-bit aligned.
+  */
+ struct drm_exynos_gem_create {
+ 	unsigned int size;
+ 	unsigned int flags;
+ 	unsigned int handle;
++	unsigned int pad;
+ };
+ 
+ /**
+diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
+index afb9458..98ce812 100644
+--- a/include/linux/devfreq.h
++++ b/include/linux/devfreq.h
+@@ -41,7 +41,7 @@ struct devfreq_dev_status {
+ 	unsigned long total_time;
+ 	unsigned long busy_time;
+ 	unsigned long current_frequency;
+-	void *private_date;
++	void *private_data;
+ };
+ 
+ /**
+diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
+index 08a2fee..aad6bd4 100644
+--- a/include/linux/hwspinlock.h
++++ b/include/linux/hwspinlock.h
+@@ -118,7 +118,6 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+ static inline
+ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+ {
+-	return 0;
+ }
+ 
+ static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
+diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
+index fae2950..83a9cae 100644
+--- a/include/linux/mfd/wm8994/registers.h
++++ b/include/linux/mfd/wm8994/registers.h
+@@ -1963,6 +1963,21 @@
+ #define WM8958_MICB2_DISCH_WIDTH                     1  /* MICB2_DISCH */
+ 
+ /*
++ * R210 (0xD2) - Mic Detect 3
++ */
++#define WM8958_MICD_LVL_MASK                    0x07FC  /* MICD_LVL - [10:2] */
++#define WM8958_MICD_LVL_SHIFT                        2  /* MICD_LVL - [10:2] */
++#define WM8958_MICD_LVL_WIDTH                        9  /* MICD_LVL - [10:2] */
++#define WM8958_MICD_VALID                       0x0002  /* MICD_VALID */
++#define WM8958_MICD_VALID_MASK                  0x0002  /* MICD_VALID */
++#define WM8958_MICD_VALID_SHIFT                      1  /* MICD_VALID */
++#define WM8958_MICD_VALID_WIDTH                      1  /* MICD_VALID */
++#define WM8958_MICD_STS                         0x0001  /* MICD_STS */
++#define WM8958_MICD_STS_MASK                    0x0001  /* MICD_STS */
++#define WM8958_MICD_STS_SHIFT                        0  /* MICD_STS */
++#define WM8958_MICD_STS_WIDTH                        1  /* MICD_STS */
++
++/*
+  * R76 (0x4C) - Charge Pump (1)
+  */
+ #define WM8994_CP_ENA                           0x8000  /* CP_ENA */
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 3fdf251..172ba70 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2405,6 +2405,8 @@
+ 
+ #define PCI_VENDOR_ID_AZWAVE		0x1a3b
+ 
++#define PCI_VENDOR_ID_ASMEDIA		0x1b21
++
+ #define PCI_VENDOR_ID_TEKRAM		0x1de1
+ #define PCI_DEVICE_ID_TEKRAM_DC290	0xdc29
+ 
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 56db751..995e3bd 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -70,6 +70,7 @@ static struct pm_qos_constraints cpu_dma_constraints = {
+ };
+ static struct pm_qos_object cpu_dma_pm_qos = {
+ 	.constraints = &cpu_dma_constraints,
++	.name = "cpu_dma_latency",
+ };
+ 
+ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
+index 5dbab38..130cfe6 100644
+--- a/sound/core/vmaster.c
++++ b/sound/core/vmaster.c
+@@ -52,6 +52,7 @@ struct link_slave {
+ 	struct link_ctl_info info;
+ 	int vals[2];		/* current values */
+ 	unsigned int flags;
++	struct snd_kcontrol *kctl; /* original kcontrol pointer */
+ 	struct snd_kcontrol slave; /* the copy of original control entry */
+ };
+ 
+@@ -252,6 +253,7 @@ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave,
+ 		       slave->count * sizeof(*slave->vd), GFP_KERNEL);
+ 	if (!srec)
+ 		return -ENOMEM;
++	srec->kctl = slave;
+ 	srec->slave = *slave;
+ 	memcpy(srec->slave.vd, slave->vd, slave->count * sizeof(*slave->vd));
+ 	srec->master = master_link;
+@@ -333,10 +335,18 @@ static int master_put(struct snd_kcontrol *kcontrol,
+ static void master_free(struct snd_kcontrol *kcontrol)
+ {
+ 	struct link_master *master = snd_kcontrol_chip(kcontrol);
+-	struct link_slave *slave;
+-
+-	list_for_each_entry(slave, &master->slaves, list)
+-		slave->master = NULL;
++	struct link_slave *slave, *n;
++
++	/* free all slave links and retore the original slave kctls */
++	list_for_each_entry_safe(slave, n, &master->slaves, list) {
++		struct snd_kcontrol *sctl = slave->kctl;
++		struct list_head olist = sctl->list;
++		memcpy(sctl, &slave->slave, sizeof(*sctl));
++		memcpy(sctl->vd, slave->slave.vd,
++		       sctl->count * sizeof(*sctl->vd));
++		sctl->list = olist; /* keep the current linked-list */
++		kfree(slave);
++	}
+ 	kfree(master);
+ }
+ 
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 916a186..e44b107 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2331,6 +2331,39 @@ int snd_hda_codec_reset(struct hda_codec *codec)
+ 	return 0;
+ }
+ 
++typedef int (*map_slave_func_t)(void *, struct snd_kcontrol *);
++
++/* apply the function to all matching slave ctls in the mixer list */
++static int map_slaves(struct hda_codec *codec, const char * const *slaves,
++		      map_slave_func_t func, void *data) 
++{
++	struct hda_nid_item *items;
++	const char * const *s;
++	int i, err;
++
++	items = codec->mixers.list;
++	for (i = 0; i < codec->mixers.used; i++) {
++		struct snd_kcontrol *sctl = items[i].kctl;
++		if (!sctl || !sctl->id.name ||
++		    sctl->id.iface != SNDRV_CTL_ELEM_IFACE_MIXER)
++			continue;
++		for (s = slaves; *s; s++) {
++			if (!strcmp(sctl->id.name, *s)) {
++				err = func(data, sctl);
++				if (err)
++					return err;
++				break;
++			}
++		}
++	}
++	return 0;
++}
++
++static int check_slave_present(void *data, struct snd_kcontrol *sctl)
++{
++	return 1;
++}
++
+ /**
+  * snd_hda_add_vmaster - create a virtual master control and add slaves
+  * @codec: HD-audio codec
+@@ -2351,12 +2384,10 @@ int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
+ 			unsigned int *tlv, const char * const *slaves)
+ {
+ 	struct snd_kcontrol *kctl;
+-	const char * const *s;
+ 	int err;
+ 
+-	for (s = slaves; *s && !snd_hda_find_mixer_ctl(codec, *s); s++)
+-		;
+-	if (!*s) {
++	err = map_slaves(codec, slaves, check_slave_present, NULL);
++	if (err != 1) {
+ 		snd_printdd("No slave found for %s\n", name);
+ 		return 0;
+ 	}
+@@ -2367,23 +2398,10 @@ int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
+ 	if (err < 0)
+ 		return err;
+ 
+-	for (s = slaves; *s; s++) {
+-		struct snd_kcontrol *sctl;
+-		int i = 0;
+-		for (;;) {
+-			sctl = _snd_hda_find_mixer_ctl(codec, *s, i);
+-			if (!sctl) {
+-				if (!i)
+-					snd_printdd("Cannot find slave %s, "
+-						    "skipped\n", *s);
+-				break;
+-			}
+-			err = snd_ctl_add_slave(kctl, sctl);
+-			if (err < 0)
+-				return err;
+-			i++;
+-		}
+-	}
++	err = map_slaves(codec, slaves, (map_slave_func_t)snd_ctl_add_slave,
++			 kctl);
++	if (err < 0)
++		return err;
+ 	return 0;
+ }
+ EXPORT_SYMBOL_HDA(snd_hda_add_vmaster);
+@@ -4752,6 +4770,7 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
+ 	memset(sequences_hp, 0, sizeof(sequences_hp));
+ 	assoc_line_out = 0;
+ 
++	codec->ignore_misc_bit = true;
+ 	end_nid = codec->start_nid + codec->num_nodes;
+ 	for (nid = codec->start_nid; nid < end_nid; nid++) {
+ 		unsigned int wid_caps = get_wcaps(codec, nid);
+@@ -4767,6 +4786,9 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
+ 			continue;
+ 
+ 		def_conf = snd_hda_codec_get_pincfg(codec, nid);
++		if (!(get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) &
++		      AC_DEFCFG_MISC_NO_PRESENCE))
++			codec->ignore_misc_bit = false;
+ 		conn = get_defcfg_connect(def_conf);
+ 		if (conn == AC_JACK_PORT_NONE)
+ 			continue;
+diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
+index 755f2b0..5644711 100644
+--- a/sound/pci/hda/hda_codec.h
++++ b/sound/pci/hda/hda_codec.h
+@@ -854,6 +854,7 @@ struct hda_codec {
+ 	unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */
+ 	unsigned int pins_shutup:1;	/* pins are shut up */
+ 	unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
++	unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
+ #ifdef CONFIG_SND_HDA_POWER_SAVE
+ 	unsigned int power_on :1;	/* current (global) power-state */
+ 	unsigned int power_transition :1; /* power-state in transition */
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
+index dcbea0d..6579e0f 100644
+--- a/sound/pci/hda/hda_local.h
++++ b/sound/pci/hda/hda_local.h
+@@ -510,13 +510,15 @@ int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid);
+ 
+ static inline bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid)
+ {
+-	return (snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT) &&
+-		/* disable MISC_NO_PRESENCE check because it may break too
+-		 * many devices
+-		 */
+-		/*(get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid) &
+-		  AC_DEFCFG_MISC_NO_PRESENCE)) &&*/
+-		(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP);
++	if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT))
++		return false;
++	if (!codec->ignore_misc_bit &&
++	    (get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) &
++	     AC_DEFCFG_MISC_NO_PRESENCE))
++		return false;
++	if (!(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP))
++		return false;
++	return true;
+ }
+ 
+ /* flags for hda_nid_item */
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 5e706e4..0de2119 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3062,7 +3062,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS),
+ 	SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
+ 	SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
+-	SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
+ 	SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board",
+ 		      CXT5066_LAPTOP),
+ 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a24e068..308bb57 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -284,7 +284,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
+ 	struct alc_spec *spec = codec->spec;
+ 	const struct hda_input_mux *imux;
+ 	unsigned int mux_idx;
+-	int i, type;
++	int i, type, num_conns;
+ 	hda_nid_t nid;
+ 
+ 	mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
+@@ -307,16 +307,17 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
+ 		spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
+ 
+ 	/* no selection? */
+-	if (snd_hda_get_conn_list(codec, nid, NULL) <= 1)
++	num_conns = snd_hda_get_conn_list(codec, nid, NULL);
++	if (num_conns <= 1)
+ 		return 1;
+ 
+ 	type = get_wcaps_type(get_wcaps(codec, nid));
+ 	if (type == AC_WID_AUD_MIX) {
+ 		/* Matrix-mixer style (e.g. ALC882) */
+-		for (i = 0; i < imux->num_items; i++) {
+-			unsigned int v = (i == idx) ? 0 : HDA_AMP_MUTE;
+-			snd_hda_codec_amp_stereo(codec, nid, HDA_INPUT,
+-						 imux->items[i].index,
++		int active = imux->items[idx].index;
++		for (i = 0; i < num_conns; i++) {
++			unsigned int v = (i == active) ? 0 : HDA_AMP_MUTE;
++			snd_hda_codec_amp_stereo(codec, nid, HDA_INPUT, i,
+ 						 HDA_AMP_MUTE, v);
+ 		}
+ 	} else {
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 4e715fe..edc2b7b 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -95,6 +95,7 @@ enum {
+ 	STAC_92HD83XXX_REF,
+ 	STAC_92HD83XXX_PWR_REF,
+ 	STAC_DELL_S14,
++	STAC_DELL_VOSTRO_3500,
+ 	STAC_92HD83XXX_HP,
+ 	STAC_92HD83XXX_HP_cNB11_INTQUAD,
+ 	STAC_HP_DV7_4000,
+@@ -1659,6 +1660,12 @@ static const unsigned int dell_s14_pin_configs[10] = {
+ 	0x40f000f0, 0x40f000f0,
+ };
+ 
++static const unsigned int dell_vostro_3500_pin_configs[10] = {
++	0x02a11020, 0x0221101f, 0x400000f0, 0x90170110,
++	0x400000f1, 0x400000f2, 0x400000f3, 0x90a60160,
++	0x400000f4, 0x400000f5,
++};
++
+ static const unsigned int hp_dv7_4000_pin_configs[10] = {
+ 	0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110,
+ 	0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140,
+@@ -1675,6 +1682,7 @@ static const unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = {
+ 	[STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs,
+ 	[STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs,
+ 	[STAC_DELL_S14] = dell_s14_pin_configs,
++	[STAC_DELL_VOSTRO_3500] = dell_vostro_3500_pin_configs,
+ 	[STAC_92HD83XXX_HP_cNB11_INTQUAD] = hp_cNB11_intquad_pin_configs,
+ 	[STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs,
+ };
+@@ -1684,6 +1692,7 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
+ 	[STAC_92HD83XXX_REF] = "ref",
+ 	[STAC_92HD83XXX_PWR_REF] = "mic-ref",
+ 	[STAC_DELL_S14] = "dell-s14",
++	[STAC_DELL_VOSTRO_3500] = "dell-vostro-3500",
+ 	[STAC_92HD83XXX_HP] = "hp",
+ 	[STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad",
+ 	[STAC_HP_DV7_4000] = "hp-dv7-4000",
+@@ -1697,6 +1706,8 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
+ 		      "DFI LanParty", STAC_92HD83XXX_REF),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba,
+ 		      "unknown Dell", STAC_DELL_S14),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x1028,
++		      "Dell Vostro 3500", STAC_DELL_VOSTRO_3500),
+ 	SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600,
+ 			  "HP", STAC_92HD83XXX_HP),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1656,
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index 29e3125..11718b49 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -1077,6 +1077,13 @@ static snd_pcm_uframes_t snd_intel8x0_pcm_pointer(struct snd_pcm_substream *subs
+ 		}
+ 		if (civ != igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV))
+ 			continue;
++
++		/* IO read operation is very expensive inside virtual machine
++		 * as it is emulated. The probability that subsequent PICB read
++		 * will return different result is high enough to loop till
++		 * timeout here.
++		 * Same CIV is strict enough condition to be sure that PICB
++		 * is valid inside VM on emulated card. */
+ 		if (chip->inside_vm)
+ 			break;
+ 		if (ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
+@@ -2930,6 +2937,45 @@ static unsigned int sis_codec_bits[3] = {
+ 	ICH_PCR, ICH_SCR, ICH_SIS_TCR
+ };
+ 
++static int __devinit snd_intel8x0_inside_vm(struct pci_dev *pci)
++{
++	int result  = inside_vm;
++	char *msg   = NULL;
++
++	/* check module parameter first (override detection) */
++	if (result >= 0) {
++		msg = result ? "enable (forced) VM" : "disable (forced) VM";
++		goto fini;
++	}
++
++	/* detect KVM and Parallels virtual environments */
++	result = kvm_para_available();
++#ifdef X86_FEATURE_HYPERVISOR
++	result = result || boot_cpu_has(X86_FEATURE_HYPERVISOR);
++#endif
++	if (!result)
++		goto fini;
++
++	/* check for known (emulated) devices */
++	if (pci->subsystem_vendor == 0x1af4 &&
++	    pci->subsystem_device == 0x1100) {
++		/* KVM emulated sound, PCI SSID: 1af4:1100 */
++		msg = "enable KVM";
++	} else if (pci->subsystem_vendor == 0x1ab8) {
++		/* Parallels VM emulated sound, PCI SSID: 1ab8:xxxx */
++		msg = "enable Parallels VM";
++	} else {
++		msg = "disable (unknown or VT-d) VM";
++		result = 0;
++	}
++
++fini:
++	if (msg != NULL)
++		printk(KERN_INFO "intel8x0: %s optimization\n", msg);
++
++	return result;
++}
++
+ static int __devinit snd_intel8x0_create(struct snd_card *card,
+ 					 struct pci_dev *pci,
+ 					 unsigned long device_type,
+@@ -2997,9 +3043,7 @@ static int __devinit snd_intel8x0_create(struct snd_card *card,
+ 	if (xbox)
+ 		chip->xbox = 1;
+ 
+-	chip->inside_vm = inside_vm;
+-	if (inside_vm)
+-		printk(KERN_INFO "intel8x0: enable KVM optimization\n");
++	chip->inside_vm = snd_intel8x0_inside_vm(pci);
+ 
+ 	if (pci->vendor == PCI_VENDOR_ID_INTEL &&
+ 	    pci->device == PCI_DEVICE_ID_INTEL_440MX)
+@@ -3243,14 +3287,6 @@ static int __devinit snd_intel8x0_probe(struct pci_dev *pci,
+ 			buggy_irq = 0;
+ 	}
+ 
+-	if (inside_vm < 0) {
+-		/* detect KVM and Parallels virtual environments */
+-		inside_vm = kvm_para_available();
+-#if defined(__i386__) || defined(__x86_64__)
+-		inside_vm = inside_vm || boot_cpu_has(X86_FEATURE_HYPERVISOR);
+-#endif
+-	}
+-
+ 	if ((err = snd_intel8x0_create(card, pci, pci_id->driver_data,
+ 				       &chip)) < 0) {
+ 		snd_card_free(card);
+diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
+index a3ce1b2..1aa52ef 100644
+--- a/sound/ppc/snd_ps3.c
++++ b/sound/ppc/snd_ps3.c
+@@ -876,7 +876,7 @@ static void __devinit snd_ps3_audio_set_base_addr(uint64_t ioaddr_start)
+ 		(0x0fUL << 12) |
+ 		(PS3_AUDIO_IOID);
+ 
+-	ret = lv1_gpu_attribute(0x100, 0x007, val, 0, 0);
++	ret = lv1_gpu_attribute(0x100, 0x007, val);
+ 	if (ret)
+ 		pr_info("%s: gpu_attribute failed %d\n", __func__,
+ 			ret);
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 6b73efd..9c982e4 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -56,7 +56,7 @@ static int wm8994_retune_mobile_base[] = {
+ static int wm8994_readable(struct snd_soc_codec *codec, unsigned int reg)
+ {
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+-	struct wm8994 *control = wm8994->control_data;
++	struct wm8994 *control = codec->control_data;
+ 
+ 	switch (reg) {
+ 	case WM8994_GPIO_1:
+@@ -3030,19 +3030,34 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
+ {
+ 	struct wm8994_priv *wm8994 = data;
+ 	struct snd_soc_codec *codec = wm8994->codec;
+-	int reg;
++	int reg, count;
+ 
+-	reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
+-	if (reg < 0) {
+-		dev_err(codec->dev, "Failed to read mic detect status: %d\n",
+-			reg);
+-		return IRQ_NONE;
+-	}
++	/* We may occasionally read a detection without an impedence
++	 * range being provided - if that happens loop again.
++	 */
++	count = 10;
++	do {
++		reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
++		if (reg < 0) {
++			dev_err(codec->dev,
++				"Failed to read mic detect status: %d\n",
++				reg);
++			return IRQ_NONE;
++		}
+ 
+-	if (!(reg & WM8958_MICD_VALID)) {
+-		dev_dbg(codec->dev, "Mic detect data not valid\n");
+-		goto out;
+-	}
++		if (!(reg & WM8958_MICD_VALID)) {
++			dev_dbg(codec->dev, "Mic detect data not valid\n");
++			goto out;
++		}
++
++		if (!(reg & WM8958_MICD_STS) || (reg & WM8958_MICD_LVL_MASK))
++			break;
++
++		msleep(1);
++	} while (count--);
++
++	if (count == 0)
++		dev_warn(codec->dev, "No impedence range reported for jack\n");
+ 
+ #ifndef CONFIG_SND_SOC_WM8994_MODULE
+ 	trace_snd_soc_jack_irq(dev_name(codec->dev));
+@@ -3180,9 +3195,9 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
+ 
+ 	wm8994_request_irq(codec->control_data, WM8994_IRQ_FIFOS_ERR,
+ 			   wm8994_fifo_error, "FIFO error", codec);
+-	wm8994_request_irq(wm8994->control_data, WM8994_IRQ_TEMP_WARN,
++	wm8994_request_irq(codec->control_data, WM8994_IRQ_TEMP_WARN,
+ 			   wm8994_temp_warn, "Thermal warning", codec);
+-	wm8994_request_irq(wm8994->control_data, WM8994_IRQ_TEMP_SHUT,
++	wm8994_request_irq(codec->control_data, WM8994_IRQ_TEMP_SHUT,
+ 			   wm8994_temp_shut, "Thermal shutdown", codec);
+ 
+ 	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_DCS_DONE,
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 60f65ac..ab23869 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -765,10 +765,61 @@ static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
+  * interface to ALSA control for feature/mixer units
+  */
+ 
++/* volume control quirks */
++static void volume_control_quirks(struct usb_mixer_elem_info *cval,
++				  struct snd_kcontrol *kctl)
++{
++	switch (cval->mixer->chip->usb_id) {
++	case USB_ID(0x0471, 0x0101):
++	case USB_ID(0x0471, 0x0104):
++	case USB_ID(0x0471, 0x0105):
++	case USB_ID(0x0672, 0x1041):
++	/* quirk for UDA1321/N101.
++	 * note that detection between firmware 2.1.1.7 (N101)
++	 * and later 2.1.1.21 is not very clear from datasheets.
++	 * I hope that the min value is -15360 for newer firmware --jk
++	 */
++		if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
++		    cval->min == -15616) {
++			snd_printk(KERN_INFO
++				 "set volume quirk for UDA1321/N101 chip\n");
++			cval->max = -256;
++		}
++		break;
++
++	case USB_ID(0x046d, 0x09a4):
++		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
++			snd_printk(KERN_INFO
++				"set volume quirk for QuickCam E3500\n");
++			cval->min = 6080;
++			cval->max = 8768;
++			cval->res = 192;
++		}
++		break;
++
++	case USB_ID(0x046d, 0x0808):
++	case USB_ID(0x046d, 0x0809):
++	case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
++	case USB_ID(0x046d, 0x0991):
++	/* Most audio usb devices lie about volume resolution.
++	 * Most Logitech webcams have res = 384.
++	 * Proboly there is some logitech magic behind this number --fishor
++	 */
++		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
++			snd_printk(KERN_INFO
++				"set resolution quirk: cval->res = 384\n");
++			cval->res = 384;
++		}
++		break;
++
++	}
++}
++
+ /*
+  * retrieve the minimum and maximum values for the specified control
+  */
+-static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
++static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
++				   int default_min, struct snd_kcontrol *kctl)
+ {
+ 	/* for failsafe */
+ 	cval->min = default_min;
+@@ -844,6 +895,9 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
+ 		cval->initialized = 1;
+ 	}
+ 
++	if (kctl)
++		volume_control_quirks(cval, kctl);
++
+ 	/* USB descriptions contain the dB scale in 1/256 dB unit
+ 	 * while ALSA TLV contains in 1/100 dB unit
+ 	 */
+@@ -864,6 +918,7 @@ static int get_min_max(struct usb_mixer_elem_info *cval, int default_min)
+ 	return 0;
+ }
+ 
++#define get_min_max(cval, def)	get_min_max_with_quirks(cval, def, NULL)
+ 
+ /* get a feature/mixer unit info */
+ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+@@ -882,7 +937,7 @@ static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_
+ 		uinfo->value.integer.max = 1;
+ 	} else {
+ 		if (!cval->initialized) {
+-			get_min_max(cval, 0);
++			get_min_max_with_quirks(cval, 0, kcontrol);
+ 			if (cval->initialized && cval->dBmin >= cval->dBmax) {
+ 				kcontrol->vd[0].access &= 
+ 					~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
+@@ -1045,9 +1100,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
+ 		cval->ch_readonly = readonly_mask;
+ 	}
+ 
+-	/* get min/max values */
+-	get_min_max(cval, 0);
+-
+ 	/* if all channels in the mask are marked read-only, make the control
+ 	 * read-only. set_cur_mix_value() will check the mask again and won't
+ 	 * issue write commands to read-only channels. */
+@@ -1069,6 +1121,9 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
+ 		len = snd_usb_copy_string_desc(state, nameid,
+ 				kctl->id.name, sizeof(kctl->id.name));
+ 
++	/* get min/max values */
++	get_min_max_with_quirks(cval, 0, kctl);
++
+ 	switch (control) {
+ 	case UAC_FU_MUTE:
+ 	case UAC_FU_VOLUME:
+@@ -1118,51 +1173,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
+ 		break;
+ 	}
+ 
+-	/* volume control quirks */
+-	switch (state->chip->usb_id) {
+-	case USB_ID(0x0471, 0x0101):
+-	case USB_ID(0x0471, 0x0104):
+-	case USB_ID(0x0471, 0x0105):
+-	case USB_ID(0x0672, 0x1041):
+-	/* quirk for UDA1321/N101.
+-	 * note that detection between firmware 2.1.1.7 (N101)
+-	 * and later 2.1.1.21 is not very clear from datasheets.
+-	 * I hope that the min value is -15360 for newer firmware --jk
+-	 */
+-		if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
+-		    cval->min == -15616) {
+-			snd_printk(KERN_INFO
+-				 "set volume quirk for UDA1321/N101 chip\n");
+-			cval->max = -256;
+-		}
+-		break;
+-
+-	case USB_ID(0x046d, 0x09a4):
+-		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+-			snd_printk(KERN_INFO
+-				"set volume quirk for QuickCam E3500\n");
+-			cval->min = 6080;
+-			cval->max = 8768;
+-			cval->res = 192;
+-		}
+-		break;
+-
+-	case USB_ID(0x046d, 0x0808):
+-	case USB_ID(0x046d, 0x0809):
+-	case USB_ID(0x046d, 0x0991):
+-	/* Most audio usb devices lie about volume resolution.
+-	 * Most Logitech webcams have res = 384.
+-	 * Proboly there is some logitech magic behind this number --fishor
+-	 */
+-		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+-			snd_printk(KERN_INFO
+-				"set resolution quirk: cval->res = 384\n");
+-			cval->res = 384;
+-		}
+-		break;
+-
+-	}
+-
+ 	range = (cval->max - cval->min) / cval->res;
+ 	/* Are there devices with volume range more than 255? I use a bit more
+ 	 * to be sure. 384 is a resolution magic number found on Logitech
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 2e5bc73..a3ddac0 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -137,12 +137,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 		return -ENOMEM;
+ 	}
+ 	if (fp->nr_rates > 0) {
+-		rate_table = kmalloc(sizeof(int) * fp->nr_rates, GFP_KERNEL);
++		rate_table = kmemdup(fp->rate_table,
++				     sizeof(int) * fp->nr_rates, GFP_KERNEL);
+ 		if (!rate_table) {
+ 			kfree(fp);
+ 			return -ENOMEM;
+ 		}
+-		memcpy(rate_table, fp->rate_table, sizeof(int) * fp->nr_rates);
+ 		fp->rate_table = rate_table;
+ 	}
+ 
+@@ -224,10 +224,9 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
+ 	if (altsd->bNumEndpoints != 1)
+ 		return -ENXIO;
+ 
+-	fp = kmalloc(sizeof(*fp), GFP_KERNEL);
++	fp = kmemdup(&ua_format, sizeof(*fp), GFP_KERNEL);
+ 	if (!fp)
+ 		return -ENOMEM;
+-	memcpy(fp, &ua_format, sizeof(*fp));
+ 
+ 	fp->iface = altsd->bInterfaceNumber;
+ 	fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress;
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index 8d02ccb..30e2bef 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -42,6 +42,7 @@ $default{"BISECT_MANUAL"}	= 0;
+ $default{"BISECT_SKIP"}		= 1;
+ $default{"SUCCESS_LINE"}	= "login:";
+ $default{"DETECT_TRIPLE_FAULT"} = 1;
++$default{"NO_INSTALL"}		= 0;
+ $default{"BOOTED_TIMEOUT"}	= 1;
+ $default{"DIE_ON_FAILURE"}	= 1;
+ $default{"SSH_EXEC"}		= "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND";
+@@ -84,6 +85,7 @@ my $grub_number;
+ my $target;
+ my $make;
+ my $post_install;
++my $no_install;
+ my $noclean;
+ my $minconfig;
+ my $start_minconfig;
+@@ -115,6 +117,7 @@ my $timeout;
+ my $booted_timeout;
+ my $detect_triplefault;
+ my $console;
++my $reboot_success_line;
+ my $success_line;
+ my $stop_after_success;
+ my $stop_after_failure;
+@@ -130,6 +133,12 @@ my %config_help;
+ my %variable;
+ my %force_config;
+ 
++# do not force reboots on config problems
++my $no_reboot = 1;
++
++# default variables that can be used
++chomp ($variable{"PWD"} = `pwd`);
++
+ $config_help{"MACHINE"} = << "EOF"
+  The machine hostname that you will test.
+ EOF
+@@ -241,6 +250,7 @@ sub read_yn {
+ 
+ sub get_ktest_config {
+     my ($config) = @_;
++    my $ans;
+ 
+     return if (defined($opt{$config}));
+ 
+@@ -254,16 +264,17 @@ sub get_ktest_config {
+ 	if (defined($default{$config})) {
+ 	    print "\[$default{$config}\] ";
+ 	}
+-	$entered_configs{$config} = <STDIN>;
+-	$entered_configs{$config} =~ s/^\s*(.*\S)\s*$/$1/;
+-	if ($entered_configs{$config} =~ /^\s*$/) {
++	$ans = <STDIN>;
++	$ans =~ s/^\s*(.*\S)\s*$/$1/;
++	if ($ans =~ /^\s*$/) {
+ 	    if ($default{$config}) {
+-		$entered_configs{$config} = $default{$config};
++		$ans = $default{$config};
+ 	    } else {
+ 		print "Your answer can not be blank\n";
+ 		next;
+ 	    }
+ 	}
++	$entered_configs{$config} = process_variables($ans);
+ 	last;
+     }
+ }
+@@ -298,7 +309,7 @@ sub get_ktest_configs {
+ }
+ 
+ sub process_variables {
+-    my ($value) = @_;
++    my ($value, $remove_undef) = @_;
+     my $retval = "";
+ 
+     # We want to check for '\', and it is just easier
+@@ -316,6 +327,10 @@ sub process_variables {
+ 	$retval = "$retval$begin";
+ 	if (defined($variable{$var})) {
+ 	    $retval = "$retval$variable{$var}";
++	} elsif (defined($remove_undef) && $remove_undef) {
++	    # for if statements, any variable that is not defined,
++	    # we simple convert to 0
++	    $retval = "${retval}0";
+ 	} else {
+ 	    # put back the origin piece.
+ 	    $retval = "$retval\$\{$var\}";
+@@ -331,10 +346,17 @@ sub process_variables {
+ }
+ 
+ sub set_value {
+-    my ($lvalue, $rvalue) = @_;
++    my ($lvalue, $rvalue, $override, $overrides, $name) = @_;
+ 
+     if (defined($opt{$lvalue})) {
+-	die "Error: Option $lvalue defined more than once!\n";
++	if (!$override || defined(${$overrides}{$lvalue})) {
++	    my $extra = "";
++	    if ($override) {
++		$extra = "In the same override section!\n";
++	    }
++	    die "$name: $.: Option $lvalue defined more than once!\n$extra";
++	}
++	${$overrides}{$lvalue} = $rvalue;
+     }
+     if ($rvalue =~ /^\s*$/) {
+ 	delete $opt{$lvalue};
+@@ -355,86 +377,274 @@ sub set_variable {
+     }
+ }
+ 
+-sub read_config {
+-    my ($config) = @_;
++sub process_compare {
++    my ($lval, $cmp, $rval) = @_;
++
++    # remove whitespace
++
++    $lval =~ s/^\s*//;
++    $lval =~ s/\s*$//;
++
++    $rval =~ s/^\s*//;
++    $rval =~ s/\s*$//;
++
++    if ($cmp eq "==") {
++	return $lval eq $rval;
++    } elsif ($cmp eq "!=") {
++	return $lval ne $rval;
++    }
++
++    my $statement = "$lval $cmp $rval";
++    my $ret = eval $statement;
++
++    # $@ stores error of eval
++    if ($@) {
++	return -1;
++    }
++
++    return $ret;
++}
++
++sub value_defined {
++    my ($val) = @_;
++
++    return defined($variable{$2}) ||
++	defined($opt{$2});
++}
++
++my $d = 0;
++sub process_expression {
++    my ($name, $val) = @_;
++
++    my $c = $d++;
++
++    while ($val =~ s/\(([^\(]*?)\)/\&\&\&\&VAL\&\&\&\&/) {
++	my $express = $1;
++
++	if (process_expression($name, $express)) {
++	    $val =~ s/\&\&\&\&VAL\&\&\&\&/ 1 /;
++	} else {
++	    $val =~ s/\&\&\&\&VAL\&\&\&\&/ 0 /;
++	}
++    }
++
++    $d--;
++    my $OR = "\\|\\|";
++    my $AND = "\\&\\&";
++
++    while ($val =~ s/^(.*?)($OR|$AND)//) {
++	my $express = $1;
++	my $op = $2;
++
++	if (process_expression($name, $express)) {
++	    if ($op eq "||") {
++		return 1;
++	    }
++	} else {
++	    if ($op eq "&&") {
++		return 0;
++	    }
++	}
++    }
++
++    if ($val =~ /(.*)(==|\!=|>=|<=|>|<)(.*)/) {
++	my $ret = process_compare($1, $2, $3);
++	if ($ret < 0) {
++	    die "$name: $.: Unable to process comparison\n";
++	}
++	return $ret;
++    }
++
++    if ($val =~ /^\s*(NOT\s*)?DEFINED\s+(\S+)\s*$/) {
++	if (defined $1) {
++	    return !value_defined($2);
++	} else {
++	    return value_defined($2);
++	}
++    }
++
++    if ($val =~ /^\s*0\s*$/) {
++	return 0;
++    } elsif ($val =~ /^\s*\d+\s*$/) {
++	return 1;
++    }
++
++    die ("$name: $.: Undefined content $val in if statement\n");
++}
++
++sub process_if {
++    my ($name, $value) = @_;
++
++    # Convert variables and replace undefined ones with 0
++    my $val = process_variables($value, 1);
++    my $ret = process_expression $name, $val;
++
++    return $ret;
++}
+ 
+-    open(IN, $config) || die "can't read file $config";
++sub __read_config {
++    my ($config, $current_test_num) = @_;
++
++    my $in;
++    open($in, $config) || die "can't read file $config";
+ 
+     my $name = $config;
+     $name =~ s,.*/(.*),$1,;
+ 
+-    my $test_num = 0;
++    my $test_num = $$current_test_num;
+     my $default = 1;
+     my $repeat = 1;
+     my $num_tests_set = 0;
+     my $skip = 0;
+     my $rest;
++    my $line;
+     my $test_case = 0;
++    my $if = 0;
++    my $if_set = 0;
++    my $override = 0;
+ 
+-    while (<IN>) {
++    my %overrides;
++
++    while (<$in>) {
+ 
+ 	# ignore blank lines and comments
+ 	next if (/^\s*$/ || /\s*\#/);
+ 
+-	if (/^\s*TEST_START(.*)/) {
++	if (/^\s*(TEST_START|DEFAULTS)\b(.*)/) {
+ 
+-	    $rest = $1;
++	    my $type = $1;
++	    $rest = $2;
++	    $line = $2;
+ 
+-	    if ($num_tests_set) {
+-		die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
+-	    }
++	    my $old_test_num;
++	    my $old_repeat;
++	    $override = 0;
++
++	    if ($type eq "TEST_START") {
++
++		if ($num_tests_set) {
++		    die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
++		}
+ 
+-	    my $old_test_num = $test_num;
+-	    my $old_repeat = $repeat;
++		$old_test_num = $test_num;
++		$old_repeat = $repeat;
+ 
+-	    $test_num += $repeat;
+-	    $default = 0;
+-	    $repeat = 1;
++		$test_num += $repeat;
++		$default = 0;
++		$repeat = 1;
++	    } else {
++		$default = 1;
++	    }
+ 
+-	    if ($rest =~ /\s+SKIP(.*)/) {
+-		$rest = $1;
++	    # If SKIP is anywhere in the line, the command will be skipped
++	    if ($rest =~ s/\s+SKIP\b//) {
+ 		$skip = 1;
+ 	    } else {
+ 		$test_case = 1;
+ 		$skip = 0;
+ 	    }
+ 
+-	    if ($rest =~ /\s+ITERATE\s+(\d+)(.*)$/) {
+-		$repeat = $1;
+-		$rest = $2;
+-		$repeat_tests{"$test_num"} = $repeat;
++	    if ($rest =~ s/\sELSE\b//) {
++		if (!$if) {
++		    die "$name: $.: ELSE found with out matching IF section\n$_";
++		}
++		$if = 0;
++
++		if ($if_set) {
++		    $skip = 1;
++		} else {
++		    $skip = 0;
++		}
+ 	    }
+ 
+-	    if ($rest =~ /\s+SKIP(.*)/) {
+-		$rest = $1;
+-		$skip = 1;
++	    if ($rest =~ s/\sIF\s+(.*)//) {
++		if (process_if($name, $1)) {
++		    $if_set = 1;
++		} else {
++		    $skip = 1;
++		}
++		$if = 1;
++	    } else {
++		$if = 0;
++		$if_set = 0;
+ 	    }
+ 
+-	    if ($rest !~ /^\s*$/) {
+-		die "$name: $.: Gargbage found after TEST_START\n$_";
++	    if (!$skip) {
++		if ($type eq "TEST_START") {
++		    if ($rest =~ s/\s+ITERATE\s+(\d+)//) {
++			$repeat = $1;
++			$repeat_tests{"$test_num"} = $repeat;
++		    }
++		} elsif ($rest =~ s/\sOVERRIDE\b//) {
++		    # DEFAULT only
++		    $override = 1;
++		    # Clear previous overrides
++		    %overrides = ();
++		}
++	    }
++
++	    if (!$skip && $rest !~ /^\s*$/) {
++		die "$name: $.: Gargbage found after $type\n$_";
+ 	    }
+ 
+-	    if ($skip) {
++	    if ($skip && $type eq "TEST_START") {
+ 		$test_num = $old_test_num;
+ 		$repeat = $old_repeat;
+ 	    }
+ 
+-	} elsif (/^\s*DEFAULTS(.*)$/) {
+-	    $default = 1;
+-
++	} elsif (/^\s*ELSE\b(.*)$/) {
++	    if (!$if) {
++		die "$name: $.: ELSE found with out matching IF section\n$_";
++	    }
+ 	    $rest = $1;
+-
+-	    if ($rest =~ /\s+SKIP(.*)/) {
+-		$rest = $1;
++	    if ($if_set) {
+ 		$skip = 1;
++		$rest = "";
+ 	    } else {
+ 		$skip = 0;
++
++		if ($rest =~ /\sIF\s+(.*)/) {
++		    # May be a ELSE IF section.
++		    if (!process_if($name, $1)) {
++			$skip = 1;
++		    }
++		    $rest = "";
++		} else {
++		    $if = 0;
++		}
+ 	    }
+ 
+ 	    if ($rest !~ /^\s*$/) {
+ 		die "$name: $.: Gargbage found after DEFAULTS\n$_";
+ 	    }
+ 
++	} elsif (/^\s*INCLUDE\s+(\S+)/) {
++
++	    next if ($skip);
++
++	    if (!$default) {
++		die "$name: $.: INCLUDE can only be done in default sections\n$_";
++	    }
++
++	    my $file = process_variables($1);
++
++	    if ($file !~ m,^/,) {
++		# check the path of the config file first
++		if ($config =~ m,(.*)/,) {
++		    if (-f "$1/$file") {
++			$file = "$1/$file";
++		    }
++		}
++	    }
++		
++	    if ( ! -r $file ) {
++		die "$name: $.: Can't read file $file\n$_";
++	    }
++
++	    if (__read_config($file, \$test_num)) {
++		$test_case = 1;
++	    }
++
+ 	} elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
+ 
+ 	    next if ($skip);
+@@ -460,10 +670,10 @@ sub read_config {
+ 	    }
+ 
+ 	    if ($default || $lvalue =~ /\[\d+\]$/) {
+-		set_value($lvalue, $rvalue);
++		set_value($lvalue, $rvalue, $override, \%overrides, $name);
+ 	    } else {
+ 		my $val = "$lvalue\[$test_num\]";
+-		set_value($val, $rvalue);
++		set_value($val, $rvalue, $override, \%overrides, $name);
+ 
+ 		if ($repeat > 1) {
+ 		    $repeats{$val} = $repeat;
+@@ -490,13 +700,26 @@ sub read_config {
+ 	}
+     }
+ 
+-    close(IN);
+-
+     if ($test_num) {
+ 	$test_num += $repeat - 1;
+ 	$opt{"NUM_TESTS"} = $test_num;
+     }
+ 
++    close($in);
++
++    $$current_test_num = $test_num;
++
++    return $test_case;
++}
++
++sub read_config {
++    my ($config) = @_;
++
++    my $test_case;
++    my $test_num = 0;
++
++    $test_case = __read_config $config, \$test_num;
++
+     # make sure we have all mandatory configs
+     get_ktest_configs;
+ 
+@@ -603,8 +826,20 @@ sub doprint {
+ }
+ 
+ sub run_command;
++sub start_monitor;
++sub end_monitor;
++sub wait_for_monitor;
+ 
+ sub reboot {
++    my ($time) = @_;
++
++    if (defined($time)) {
++	start_monitor;
++	# flush out current monitor
++	# May contain the reboot success line
++	wait_for_monitor 1;
++    }
++
+     # try to reboot normally
+     if (run_command $reboot) {
+ 	if (defined($powercycle_after_reboot)) {
+@@ -615,12 +850,17 @@ sub reboot {
+ 	# nope? power cycle it.
+ 	run_command "$power_cycle";
+     }
++
++    if (defined($time)) {
++	wait_for_monitor($time, $reboot_success_line);
++	end_monitor;
++    }
+ }
+ 
+ sub do_not_reboot {
+     my $i = $iteration;
+ 
+-    return $test_type eq "build" ||
++    return $test_type eq "build" || $no_reboot ||
+ 	($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") ||
+ 	($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build");
+ }
+@@ -693,16 +933,29 @@ sub end_monitor {
+ }
+ 
+ sub wait_for_monitor {
+-    my ($time) = @_;
++    my ($time, $stop) = @_;
++    my $full_line = "";
+     my $line;
++    my $booted = 0;
+ 
+     doprint "** Wait for monitor to settle down **\n";
+ 
+     # read the monitor and wait for the system to calm down
+-    do {
++    while (!$booted) {
+ 	$line = wait_for_input($monitor_fp, $time);
+-	print "$line" if (defined($line));
+-    } while (defined($line));
++	last if (!defined($line));
++	print "$line";
++	$full_line .= $line;
++
++	if (defined($stop) && $full_line =~ /$stop/) {
++	    doprint "wait for monitor detected $stop\n";
++	    $booted = 1;
++	}
++
++	if ($line =~ /\n/) {
++	    $full_line = "";
++	}
++    }
+     print "** Monitor flushed **\n";
+ }
+ 
+@@ -719,10 +972,7 @@ sub fail {
+ 	# no need to reboot for just building.
+ 	if (!do_not_reboot) {
+ 	    doprint "REBOOTING\n";
+-	    reboot;
+-	    start_monitor;
+-	    wait_for_monitor $sleep_time;
+-	    end_monitor;
++	    reboot $sleep_time;
+ 	}
+ 
+ 	my $name = "";
+@@ -854,9 +1104,12 @@ sub get_grub_index {
+     open(IN, "$ssh_grub |")
+ 	or die "unable to get menu.lst";
+ 
++    my $found = 0;
++
+     while (<IN>) {
+ 	if (/^\s*title\s+$grub_menu\s*$/) {
+ 	    $grub_number++;
++	    $found = 1;
+ 	    last;
+ 	} elsif (/^\s*title\s/) {
+ 	    $grub_number++;
+@@ -865,7 +1118,7 @@ sub get_grub_index {
+     close(IN);
+ 
+     die "Could not find '$grub_menu' in /boot/grub/menu on $machine"
+-	if ($grub_number < 0);
++	if (!$found);
+     doprint "$grub_number\n";
+ }
+ 
+@@ -902,7 +1155,8 @@ sub wait_for_input
+ 
+ sub reboot_to {
+     if ($reboot_type eq "grub") {
+-	run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch && reboot)'";
++	run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
++	reboot;
+ 	return;
+     }
+ 
+@@ -1083,6 +1337,8 @@ sub do_post_install {
+ 
+ sub install {
+ 
++    return if ($no_install);
++
+     run_scp "$outputdir/$build_target", "$target_image" or
+ 	dodie "failed to copy image";
+ 
+@@ -1140,6 +1396,11 @@ sub get_version {
+ }
+ 
+ sub start_monitor_and_boot {
++    # Make sure the stable kernel has finished booting
++    start_monitor;
++    wait_for_monitor 5;
++    end_monitor;
++
+     get_grub_index;
+     get_version;
+     install;
+@@ -1250,6 +1511,10 @@ sub build {
+ 
+     unlink $buildlog;
+ 
++    # Failed builds should not reboot the target
++    my $save_no_reboot = $no_reboot;
++    $no_reboot = 1;
++
+     if (defined($pre_build)) {
+ 	my $ret = run_command $pre_build;
+ 	if (!$ret && defined($pre_build_die) &&
+@@ -1272,15 +1537,15 @@ sub build {
+ 	# allow for empty configs
+ 	run_command "touch $output_config";
+ 
+-	run_command "mv $output_config $outputdir/config_temp" or
+-	    dodie "moving .config";
++	if (!$noclean) {
++	    run_command "mv $output_config $outputdir/config_temp" or
++		dodie "moving .config";
+ 
+-	if (!$noclean && !run_command "$make mrproper") {
+-	    dodie "make mrproper";
+-	}
++	    run_command "$make mrproper" or dodie "make mrproper";
+ 
+-	run_command "mv $outputdir/config_temp $output_config" or
+-	    dodie "moving config_temp";
++	    run_command "mv $outputdir/config_temp $output_config" or
++		dodie "moving config_temp";
++	}
+ 
+     } elsif (!$noclean) {
+ 	unlink "$output_config";
+@@ -1318,10 +1583,15 @@ sub build {
+ 
+     if (!$build_ret) {
+ 	# bisect may need this to pass
+-	return 0 if ($in_bisect);
++	if ($in_bisect) {
++	    $no_reboot = $save_no_reboot;
++	    return 0;
++	}
+ 	fail "failed build" and return 0;
+     }
+ 
++    $no_reboot = $save_no_reboot;
++
+     return 1;
+ }
+ 
+@@ -1356,10 +1626,7 @@ sub success {
+ 
+     if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
+ 	doprint "Reboot and wait $sleep_time seconds\n";
+-	reboot;
+-	start_monitor;
+-	wait_for_monitor $sleep_time;
+-	end_monitor;
++	reboot $sleep_time;
+     }
+ }
+ 
+@@ -1500,10 +1767,7 @@ sub run_git_bisect {
+ 
+ sub bisect_reboot {
+     doprint "Reboot and sleep $bisect_sleep_time seconds\n";
+-    reboot;
+-    start_monitor;
+-    wait_for_monitor $bisect_sleep_time;
+-    end_monitor;
++    reboot $bisect_sleep_time;
+ }
+ 
+ # returns 1 on success, 0 on failure, -1 on skip
+@@ -2066,10 +2330,7 @@ sub config_bisect {
+ 
+ sub patchcheck_reboot {
+     doprint "Reboot and sleep $patchcheck_sleep_time seconds\n";
+-    reboot;
+-    start_monitor;
+-    wait_for_monitor $patchcheck_sleep_time;
+-    end_monitor;
++    reboot $patchcheck_sleep_time;
+ }
+ 
+ sub patchcheck {
+@@ -2178,12 +2439,31 @@ sub patchcheck {
+ }
+ 
+ my %depends;
++my %depcount;
+ my $iflevel = 0;
+ my @ifdeps;
+ 
+ # prevent recursion
+ my %read_kconfigs;
+ 
++sub add_dep {
++    # $config depends on $dep
++    my ($config, $dep) = @_;
++
++    if (defined($depends{$config})) {
++	$depends{$config} .= " " . $dep;
++    } else {
++	$depends{$config} = $dep;
++    }
++
++    # record the number of configs depending on $dep
++    if (defined $depcount{$dep}) {
++	$depcount{$dep}++;
++    } else {
++	$depcount{$dep} = 1;
++    } 
++}
++
+ # taken from streamline_config.pl
+ sub read_kconfig {
+     my ($kconfig) = @_;
+@@ -2230,30 +2510,19 @@ sub read_kconfig {
+ 	    $config = $2;
+ 
+ 	    for (my $i = 0; $i < $iflevel; $i++) {
+-		if ($i) {
+-		    $depends{$config} .= " " . $ifdeps[$i];
+-		} else {
+-		    $depends{$config} = $ifdeps[$i];
+-		}
+-		$state = "DEP";
++		add_dep $config, $ifdeps[$i];
+ 	    }
+ 
+ 	# collect the depends for the config
+ 	} elsif ($state eq "NEW" && /^\s*depends\s+on\s+(.*)$/) {
+ 
+-	    if (defined($depends{$1})) {
+-		$depends{$config} .= " " . $1;
+-	    } else {
+-		$depends{$config} = $1;
+-	    }
++	    add_dep $config, $1;
+ 
+ 	# Get the configs that select this config
+-	} elsif ($state ne "NONE" && /^\s*select\s+(\S+)/) {
+-	    if (defined($depends{$1})) {
+-		$depends{$1} .= " " . $config;
+-	    } else {
+-		$depends{$1} = $config;
+-	    }
++	} elsif ($state eq "NEW" && /^\s*select\s+(\S+)/) {
++
++	    # selected by depends on config
++	    add_dep $1, $config;
+ 
+ 	# Check for if statements
+ 	} elsif (/^if\s+(.*\S)\s*$/) {
+@@ -2365,11 +2634,18 @@ sub make_new_config {
+     close OUT;
+ }
+ 
++sub chomp_config {
++    my ($config) = @_;
++
++    $config =~ s/CONFIG_//;
++
++    return $config;
++}
++
+ sub get_depends {
+     my ($dep) = @_;
+ 
+-    my $kconfig = $dep;
+-    $kconfig =~ s/CONFIG_//;
++    my $kconfig = chomp_config $dep;
+ 
+     $dep = $depends{"$kconfig"};
+ 
+@@ -2419,8 +2695,7 @@ sub test_this_config {
+ 	return undef;
+     }
+ 
+-    my $kconfig = $config;
+-    $kconfig =~ s/CONFIG_//;
++    my $kconfig = chomp_config $config;
+ 
+     # Test dependencies first
+     if (defined($depends{"$kconfig"})) {
+@@ -2510,6 +2785,14 @@ sub make_min_config {
+ 
+     my @config_keys = keys %min_configs;
+ 
++    # All configs need a depcount
++    foreach my $config (@config_keys) {
++	my $kconfig = chomp_config $config;
++	if (!defined $depcount{$kconfig}) {
++		$depcount{$kconfig} = 0;
++	}
++    }
++
+     # Remove anything that was set by the make allnoconfig
+     # we shouldn't need them as they get set for us anyway.
+     foreach my $config (@config_keys) {
+@@ -2548,8 +2831,13 @@ sub make_min_config {
+ 	# Now disable each config one by one and do a make oldconfig
+ 	# till we find a config that changes our list.
+ 
+-	# Put configs that did not modify the config at the end.
+ 	my @test_configs = keys %min_configs;
++
++	# Sort keys by who is most dependent on
++	@test_configs = sort  { $depcount{chomp_config($b)} <=> $depcount{chomp_config($a)} }
++			  @test_configs ;
++
++	# Put configs that did not modify the config at the end.
+ 	my $reset = 1;
+ 	for (my $i = 0; $i < $#test_configs; $i++) {
+ 	    if (!defined($nochange_config{$test_configs[0]})) {
+@@ -2659,10 +2947,7 @@ sub make_min_config {
+ 	}
+ 
+ 	doprint "Reboot and wait $sleep_time seconds\n";
+-	reboot;
+-	start_monitor;
+-	wait_for_monitor $sleep_time;
+-	end_monitor;
++	reboot $sleep_time;
+     }
+ 
+     success $i;
+@@ -2783,6 +3068,9 @@ sub set_test_option {
+ # First we need to do is the builds
+ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
+ 
++    # Do not reboot on failing test options
++    $no_reboot = 1;
++
+     $iteration = $i;
+ 
+     my $makecmd = set_test_option("MAKE_CMD", $i);
+@@ -2811,6 +3099,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
+     $reboot_type = set_test_option("REBOOT_TYPE", $i);
+     $grub_menu = set_test_option("GRUB_MENU", $i);
+     $post_install = set_test_option("POST_INSTALL", $i);
++    $no_install = set_test_option("NO_INSTALL", $i);
+     $reboot_script = set_test_option("REBOOT_SCRIPT", $i);
+     $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i);
+     $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i);
+@@ -2832,6 +3121,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
+     $console = set_test_option("CONSOLE", $i);
+     $detect_triplefault = set_test_option("DETECT_TRIPLE_FAULT", $i);
+     $success_line = set_test_option("SUCCESS_LINE", $i);
++    $reboot_success_line = set_test_option("REBOOT_SUCCESS_LINE", $i);
+     $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i);
+     $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i);
+     $stop_test_after = set_test_option("STOP_TEST_AFTER", $i);
+@@ -2850,9 +3140,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
+ 
+     chdir $builddir || die "can't change directory to $builddir";
+ 
+-    if (!-d $tmpdir) {
+-	mkpath($tmpdir) or
+-	    die "can't create $tmpdir";
++    foreach my $dir ($tmpdir, $outputdir) {
++	if (!-d $dir) {
++	    mkpath($dir) or
++		die "can't create $dir";
++	}
+     }
+ 
+     $ENV{"SSH_USER"} = $ssh_user;
+@@ -2889,8 +3181,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
+ 	$run_type = "ERROR";
+     }
+ 
++    my $installme = "";
++    $installme = " no_install" if ($no_install);
++
+     doprint "\n\n";
+-    doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type\n\n";
++    doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type$installme\n\n";
+ 
+     unlink $dmesg;
+     unlink $buildlog;
+@@ -2911,6 +3206,9 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
+ 	    die "failed to checkout $checkout";
+     }
+ 
++    $no_reboot = 0;
++
++
+     if ($test_type eq "bisect") {
+ 	bisect $i;
+ 	next;
+@@ -2929,6 +3227,13 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
+ 	build $build_type or next;
+     }
+ 
++    if ($test_type eq "install") {
++	get_version;
++	install;
++	success $i;
++	next;
++    }
++
+     if ($test_type ne "build") {
+ 	my $failed = 0;
+ 	start_monitor_and_boot or $failed = 1;
+diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
+index b8bcd14..dbedfa1 100644
+--- a/tools/testing/ktest/sample.conf
++++ b/tools/testing/ktest/sample.conf
+@@ -72,6 +72,128 @@
+ # the same option name under the same test or as default
+ # ktest will fail to execute, and no tests will run.
+ #
++# DEFAULTS OVERRIDE
++#
++# Options defined in the DEFAULTS section can not be duplicated
++# even if they are defined in two different DEFAULT sections.
++# This is done to catch mistakes where an option is added but
++# the previous option was forgotten about and not commented.
++#
++# The OVERRIDE keyword can be added to a section to allow this
++# section to override other DEFAULT sections values that have
++# been defined previously. It will only override options that
++# have been defined before its use. Options defined later
++# in a non override section will still error. The same option
++# can not be defined in the same section even if that section
++# is marked OVERRIDE.
++#
++#
++#
++# Both TEST_START and DEFAULTS sections can also have the IF keyword
++# The value after the IF must evaluate into a 0 or non 0 positive
++# integer, and can use the config variables (explained below).
++#
++# DEFAULTS IF ${IS_X86_32}
++#
++# The above will process the DEFAULTS section if the config
++# variable IS_X86_32 evaluates to a non zero positive integer
++# otherwise if it evaluates to zero, it will act the same
++# as if the SKIP keyword was used.
++#
++# The ELSE keyword can be used directly after a section with
++# a IF statement.
++#
++# TEST_START IF ${RUN_NET_TESTS}
++# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
++#
++# ELSE
++#
++# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-normal
++#
++#
++# The ELSE keyword can also contain an IF statement to allow multiple
++# if then else sections. But all the sections must be either
++# DEFAULT or TEST_START, they can not be a mixture.
++#
++# TEST_START IF ${RUN_NET_TESTS}
++# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
++#
++# ELSE IF ${RUN_DISK_TESTS}
++# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-tests
++#
++# ELSE IF ${RUN_CPU_TESTS}
++# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-cpu
++#
++# ELSE
++# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-network
++#
++# The if statement may also have comparisons that will and for
++# == and !=, strings may be used for both sides.
++#
++# BOX_TYPE := x86_32
++#
++# DEFAULTS IF ${BOX_TYPE} == x86_32
++# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-32
++# ELSE
++# BUILD_TYPE = useconfig:${CONFIG_DIR}/config-64
++#
++# The DEFINED keyword can be used by the IF statements too.
++# It returns true if the given config variable or option has been defined
++# or false otherwise.
++#
++# 
++# DEFAULTS IF DEFINED USE_CC
++# CC := ${USE_CC}
++# ELSE
++# CC := gcc
++#
++#
++# As well as NOT DEFINED.
++#
++# DEFAULTS IF NOT DEFINED MAKE_CMD
++# MAKE_CMD := make ARCH=x86
++#
++#
++# And/or ops (&&,||) may also be used to make complex conditionals.
++#
++# TEST_START IF (DEFINED ALL_TESTS || ${MYTEST} == boottest) && ${MACHINE} == gandalf
++#
++# Notice the use of paranthesis. Without any paranthesis the above would be
++# processed the same as:
++#
++# TEST_START IF DEFINED ALL_TESTS || (${MYTEST} == boottest && ${MACHINE} == gandalf)
++#
++#
++#
++# INCLUDE file
++#
++# The INCLUDE keyword may be used in DEFAULT sections. This will
++# read another config file and process that file as well. The included
++# file can include other files, add new test cases or default
++# statements. Config variables will be passed to these files and changes
++# to config variables will be seen by top level config files. Including
++# a file is processed just like the contents of the file was cut and pasted
++# into the top level file, except, that include files that end with
++# TEST_START sections will have that section ended at the end of
++# the include file. That is, an included file is included followed
++# by another DEFAULT keyword.
++#
++# Unlike other files referenced in this config, the file path does not need
++# to be absolute. If the file does not start with '/', then the directory
++# that the current config file was located in is used. If no config by the
++# given name is found there, then the current directory is searched.
++#
++# INCLUDE myfile
++# DEFAULT
++#
++# is the same as:
++#
++# INCLUDE myfile
++#
++# Note, if the include file does not contain a full path, the file is
++# searched first by the location of the original include file, and then
++# by the location that ktest.pl was executed in.
++#
+ 
+ #### Config variables ####
+ #
+@@ -253,9 +375,10 @@
+ 
+ # The default test type (default test)
+ # The test types may be:
+-#   build - only build the kernel, do nothing else
+-#   boot - build and boot the kernel
+-#   test - build, boot and if TEST is set, run the test script
++#   build   - only build the kernel, do nothing else
++#   install - build and install, but do nothing else (does not reboot)
++#   boot    - build, install, and boot the kernel
++#   test    - build, boot and if TEST is set, run the test script
+ #          (If TEST is not set, it defaults back to boot)
+ #   bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
+ #   patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
+@@ -293,6 +416,13 @@
+ # or on some systems:
+ #POST_INSTALL = ssh user at target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
+ 
++# If for some reason you just want to boot the kernel and you do not
++# want the test to install anything new. For example, you may just want
++# to boot test the same kernel over and over and do not want to go through
++# the hassle of installing anything, you can set this option to 1
++# (default 0)
++#NO_INSTALL = 1
++
+ # If there is a script that you require to run before the build is done
+ # you can specify it with PRE_BUILD.
+ #
+@@ -415,6 +545,14 @@
+ # (default "login:")
+ #SUCCESS_LINE = login:
+ 
++# To speed up between reboots, defining a line that the
++# default kernel produces that represents that the default
++# kernel has successfully booted and can be used to pass
++# a new test kernel to it. Otherwise ktest.pl will wait till
++# SLEEP_TIME to continue.
++# (default undefined)
++#REBOOT_SUCCESS_LINE = login:
++
+ # In case the console constantly fills the screen, having
+ # a specified time to stop the test after success is recommended.
+ # (in seconds)
+@@ -480,6 +618,8 @@
+ # another test. If a reboot to the reliable kernel happens,
+ # we wait SLEEP_TIME for the console to stop producing output
+ # before starting the next test.
++#
++# You can speed up reboot times even more by setting REBOOT_SUCCESS_LINE.
+ # (default 60)
+ #SLEEP_TIME = 60
+ 

Added: dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux-2.6/debian/patches/series/1~experimental.1-extra	Thu Nov 17 07:50:32 2011	(r18294)
@@ -0,0 +1,2 @@
++ features/all/rt/patch-3.2-rc1-52e4c2a05.patch featureset=rt
++ features/all/rt/patch-3.2-rc1-52e4c2a05-rt2.patch featureset=rt



More information about the Kernel-svn-changes mailing list