[kernel] r18073 - in dists/sid/linux-2.6/debian: . patches/features/all/rt patches/series
Uwe Kleine-König
ukleinek-guest at alioth.debian.org
Sat Sep 10 21:22:50 UTC 2011
Author: ukleinek-guest
Date: Sat Sep 10 21:22:48 2011
New Revision: 18073
Log:
[amd64] Update rt featureset to 3.0.4-rt13
Added:
dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.4-rt13.patch
- copied, changed from r18057, dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.1-rt11.patch
dists/sid/linux-2.6/debian/patches/series/4-extra
Deleted:
dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.1-rt11.patch
dists/sid/linux-2.6/debian/patches/series/2-extra
Modified:
dists/sid/linux-2.6/debian/changelog
Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog Sat Sep 10 03:17:12 2011 (r18072)
+++ dists/sid/linux-2.6/debian/changelog Sat Sep 10 21:22:48 2011 (r18073)
@@ -25,6 +25,9 @@
* Update Spanish debconf template translations (Omar Campagne)
(Closes: #636242)
+ [ Uwe Kleine-König ]
+ * [amd64] Update rt featureset to 3.0.4-rt13
+
-- Ben Hutchings <ben at decadent.org.uk> Sun, 28 Aug 2011 17:07:47 +0100
linux-2.6 (3.0.0-3) unstable; urgency=low
Copied and modified: dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.4-rt13.patch (from r18057, dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.1-rt11.patch)
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.1-rt11.patch Wed Sep 7 03:06:48 2011 (r18057, copy source)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/patch-3.0.4-rt13.patch Sat Sep 10 21:22:48 2011 (r18073)
@@ -1,11214 +1,11690 @@
-bwh: Update context for 3.0.2
-
-Index: linux-2.6/kernel/trace/ftrace.c
+Index: linux-2.6/mm/memory.c
===================================================================
---- linux-2.6.orig/kernel/trace/ftrace.c
-+++ linux-2.6/kernel/trace/ftrace.c
-@@ -1182,8 +1182,14 @@ alloc_and_copy_ftrace_hash(int size_bits
- return NULL;
+--- linux-2.6.orig/mm/memory.c
++++ linux-2.6/mm/memory.c
+@@ -1290,13 +1290,6 @@ static unsigned long unmap_page_range(st
+ return addr;
}
-+static void
-+ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
-+static void
-+ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
-+
- static int
--ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
-+ftrace_hash_move(struct ftrace_ops *ops, int enable,
-+ struct ftrace_hash **dst, struct ftrace_hash *src)
- {
- struct ftrace_func_entry *entry;
- struct hlist_node *tp, *tn;
-@@ -1193,9 +1199,16 @@ ftrace_hash_move(struct ftrace_hash **ds
- unsigned long key;
- int size = src->count;
- int bits = 0;
-+ int ret;
- int i;
+-#ifdef CONFIG_PREEMPT
+-# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
+-#else
+-/* No preempt: go for improved straight-line efficiency */
+-# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
+-#endif
+-
+ /**
+ * unmap_vmas - unmap a range of memory covered by a list of vma's
+ * @tlb: address of the caller's struct mmu_gather
+@@ -3435,6 +3428,32 @@ unlock:
+ return 0;
+ }
- /*
-+ * Remove the current set, update the hash and add
-+ * them back.
++#ifdef CONFIG_PREEMPT_RT_FULL
++void pagefault_disable(void)
++{
++ migrate_disable();
++ current->pagefault_disabled++;
++ /*
++ * make sure to have issued the store before a pagefault
++ * can hit.
+ */
-+ ftrace_hash_rec_disable(ops, enable);
++ barrier();
++}
++EXPORT_SYMBOL_GPL(pagefault_disable);
+
++void pagefault_enable(void)
++{
+ /*
- * If the new source is empty, just free dst and assign it
- * the empty_hash.
- */
-@@ -1215,9 +1228,10 @@ ftrace_hash_move(struct ftrace_hash **ds
- if (bits > FTRACE_HASH_MAX_BITS)
- bits = FTRACE_HASH_MAX_BITS;
++ * make sure to issue those last loads/stores before enabling
++ * the pagefault handler again.
++ */
++ barrier();
++ current->pagefault_disabled--;
++ migrate_enable();
++}
++EXPORT_SYMBOL_GPL(pagefault_enable);
++#endif
++
+ /*
+ * By the time we get here, we already hold the mm semaphore
+ */
+@@ -3983,3 +4002,35 @@ void copy_user_huge_page(struct page *ds
+ }
+ }
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
++
++#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0)
++/*
++ * Heinous hack, relies on the caller doing something like:
++ *
++ * pte = alloc_pages(PGALLOC_GFP, 0);
++ * if (pte)
++ * pgtable_page_ctor(pte);
++ * return pte;
++ *
++ * This ensures we release the page and return NULL when the
++ * lock allocation fails.
++ */
++struct page *pte_lock_init(struct page *page)
++{
++ page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
++ if (page->ptl) {
++ spin_lock_init(__pte_lockptr(page));
++ } else {
++ __free_page(page);
++ page = NULL;
++ }
++ return page;
++}
++
++void pte_lock_deinit(struct page *page)
++{
++ kfree(page->ptl);
++ page->mapping = NULL;
++}
++
++#endif
+Index: linux-2.6/kernel/sched_cpupri.c
+===================================================================
+--- linux-2.6.orig/kernel/sched_cpupri.c
++++ linux-2.6/kernel/sched_cpupri.c
+@@ -47,9 +47,6 @@ static int convert_prio(int prio)
+ return cpupri;
+ }
-+ ret = -ENOMEM;
- new_hash = alloc_ftrace_hash(bits);
- if (!new_hash)
-- return -ENOMEM;
-+ goto out;
+-#define for_each_cpupri_active(array, idx) \
+- for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES)
+-
+ /**
+ * cpupri_find - find the best (lowest-pri) CPU in the system
+ * @cp: The cpupri context
+@@ -71,11 +68,38 @@ int cpupri_find(struct cpupri *cp, struc
+ int idx = 0;
+ int task_pri = convert_prio(p->prio);
- size = 1 << src->size_bits;
- for (i = 0; i < size; i++) {
-@@ -1236,7 +1250,16 @@ ftrace_hash_move(struct ftrace_hash **ds
- rcu_assign_pointer(*dst, new_hash);
- free_ftrace_hash_rcu(old_hash);
+- for_each_cpupri_active(cp->pri_active, idx) {
++ if (task_pri >= MAX_RT_PRIO)
++ return 0;
++
++ for (idx = 0; idx < task_pri; idx++) {
+ struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
++ int skip = 0;
-- return 0;
-+ ret = 0;
-+ out:
-+ /*
-+ * Enable regardless of ret:
-+ * On success, we enable the new hash.
-+ * On failure, we re-enable the original hash.
-+ */
-+ ftrace_hash_rec_enable(ops, enable);
+- if (idx >= task_pri)
+- break;
++ if (!atomic_read(&(vec)->count))
++ skip = 1;
++ /*
++ * When looking at the vector, we need to read the counter,
++ * do a memory barrier, then read the mask.
++ *
++ * Note: This is still all racey, but we can deal with it.
++ * Ideally, we only want to look at masks that are set.
++ *
++ * If a mask is not set, then the only thing wrong is that we
++ * did a little more work than necessary.
++ *
++ * If we read a zero count but the mask is set, because of the
++ * memory barriers, that can only happen when the highest prio
++ * task for a run queue has left the run queue, in which case,
++ * it will be followed by a pull. If the task we are processing
++ * fails to find a proper place to go, that pull request will
++ * pull this task if the run queue is running at a lower
++ * priority.
++ */
++ smp_rmb();
+
-+ return ret;
- }
++ /* Need to do the rmb for every iteration */
++ if (skip)
++ continue;
- /*
-@@ -2857,7 +2880,7 @@ ftrace_set_regex(struct ftrace_ops *ops,
- ftrace_match_records(hash, buf, len);
+ if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
+ continue;
+@@ -115,7 +139,7 @@ void cpupri_set(struct cpupri *cp, int c
+ {
+ int *currpri = &cp->cpu_to_pri[cpu];
+ int oldpri = *currpri;
+- unsigned long flags;
++ int do_mb = 0;
- mutex_lock(&ftrace_lock);
-- ret = ftrace_hash_move(orig_hash, hash);
-+ ret = ftrace_hash_move(ops, enable, orig_hash, hash);
- mutex_unlock(&ftrace_lock);
+ newpri = convert_prio(newpri);
- mutex_unlock(&ftrace_regex_lock);
-@@ -3040,18 +3063,12 @@ ftrace_regex_release(struct inode *inode
- orig_hash = &iter->ops->notrace_hash;
+@@ -134,26 +158,41 @@ void cpupri_set(struct cpupri *cp, int c
+ if (likely(newpri != CPUPRI_INVALID)) {
+ struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
- mutex_lock(&ftrace_lock);
-- /*
-- * Remove the current set, update the hash and add
-- * them back.
-- */
-- ftrace_hash_rec_disable(iter->ops, filter_hash);
-- ret = ftrace_hash_move(orig_hash, iter->hash);
-- if (!ret) {
-- ftrace_hash_rec_enable(iter->ops, filter_hash);
-- if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
-- && ftrace_enabled)
-- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
-- }
-+ ret = ftrace_hash_move(iter->ops, filter_hash,
-+ orig_hash, iter->hash);
-+ if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
-+ && ftrace_enabled)
-+ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
-+
- mutex_unlock(&ftrace_lock);
- }
- free_ftrace_hash(iter->hash);
-Index: linux-2.6/drivers/block/floppy.c
-===================================================================
---- linux-2.6.orig/drivers/block/floppy.c
-+++ linux-2.6/drivers/block/floppy.c
-@@ -4250,7 +4250,7 @@ static int __init floppy_init(void)
- use_virtual_dma = can_use_virtual_dma & 1;
- fdc_state[0].address = FDC1;
- if (fdc_state[0].address == -1) {
-- del_timer(&fd_timeout);
-+ del_timer_sync(&fd_timeout);
- err = -ENODEV;
- goto out_unreg_region;
+- raw_spin_lock_irqsave(&vec->lock, flags);
+-
+ cpumask_set_cpu(cpu, vec->mask);
+- vec->count++;
+- if (vec->count == 1)
+- set_bit(newpri, cp->pri_active);
+-
+- raw_spin_unlock_irqrestore(&vec->lock, flags);
++ /*
++ * When adding a new vector, we update the mask first,
++ * do a write memory barrier, and then update the count, to
++ * make sure the vector is visible when count is set.
++ */
++ smp_mb__before_atomic_inc();
++ atomic_inc(&(vec)->count);
++ do_mb = 1;
}
-@@ -4261,7 +4261,7 @@ static int __init floppy_init(void)
- fdc = 0; /* reset fdc in case of unexpected interrupt */
- err = floppy_grab_irq_and_dma();
- if (err) {
-- del_timer(&fd_timeout);
-+ del_timer_sync(&fd_timeout);
- err = -EBUSY;
- goto out_unreg_region;
- }
-@@ -4318,7 +4318,7 @@ static int __init floppy_init(void)
- user_reset_fdc(-1, FD_RESET_ALWAYS, false);
- }
- fdc = 0;
-- del_timer(&fd_timeout);
-+ del_timer_sync(&fd_timeout);
- current_drive = 0;
- initialized = true;
- if (have_no_fdc) {
-@@ -4368,7 +4368,7 @@ static int __init floppy_init(void)
- unregister_blkdev(FLOPPY_MAJOR, "fd");
- out_put_disk:
- while (dr--) {
-- del_timer(&motor_off_timer[dr]);
-+ del_timer_sync(&motor_off_timer[dr]);
- if (disks[dr]->queue)
- blk_cleanup_queue(disks[dr]->queue);
- put_disk(disks[dr]);
-Index: linux-2.6/drivers/gpu/drm/drm_irq.c
-===================================================================
---- linux-2.6.orig/drivers/gpu/drm/drm_irq.c
-+++ linux-2.6/drivers/gpu/drm/drm_irq.c
-@@ -109,10 +109,7 @@ static void vblank_disable_and_save(stru
- /* Prevent vblank irq processing while disabling vblank irqs,
- * so no updates of timestamps or count can happen after we've
- * disabled. Needed to prevent races in case of delayed irq's.
-- * Disable preemption, so vblank_time_lock is held as short as
-- * possible, even under a kernel with PREEMPT_RT patches.
- */
-- preempt_disable();
- spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+ if (likely(oldpri != CPUPRI_INVALID)) {
+ struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
- dev->driver->disable_vblank(dev, crtc);
-@@ -163,7 +160,6 @@ static void vblank_disable_and_save(stru
- clear_vblank_timestamps(dev, crtc);
+- raw_spin_lock_irqsave(&vec->lock, flags);
+-
+- vec->count--;
+- if (!vec->count)
+- clear_bit(oldpri, cp->pri_active);
++ /*
++ * Because the order of modification of the vec->count
++ * is important, we must make sure that the update
++ * of the new prio is seen before we decrement the
++ * old prio. This makes sure that the loop sees
++ * one or the other when we raise the priority of
++ * the run queue. We don't care about when we lower the
++ * priority, as that will trigger an rt pull anyway.
++ *
++ * We only need to do a memory barrier if we updated
++ * the new priority vec.
++ */
++ if (do_mb)
++ smp_mb__after_atomic_inc();
++
++ /*
++ * When removing from the vector, we decrement the counter first
++ * do a memory barrier and then clear the mask.
++ */
++ atomic_dec(&(vec)->count);
++ smp_mb__after_atomic_inc();
+ cpumask_clear_cpu(cpu, vec->mask);
+-
+- raw_spin_unlock_irqrestore(&vec->lock, flags);
+ }
- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
-- preempt_enable();
- }
+ *currpri = newpri;
+@@ -175,8 +214,7 @@ int cpupri_init(struct cpupri *cp)
+ for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
+ struct cpupri_vec *vec = &cp->pri_to_cpu[i];
- static void vblank_disable_fn(unsigned long arg)
-@@ -875,10 +871,6 @@ int drm_vblank_get(struct drm_device *de
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- /* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
-- /* Disable preemption while holding vblank_time_lock. Do
-- * it explicitely to guard against PREEMPT_RT kernel.
-- */
-- preempt_disable();
- spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
- if (!dev->vblank_enabled[crtc]) {
- /* Enable vblank irqs under vblank_time_lock protection.
-@@ -898,7 +890,6 @@ int drm_vblank_get(struct drm_device *de
- }
- }
- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
-- preempt_enable();
- } else {
- if (!dev->vblank_enabled[crtc]) {
- atomic_dec(&dev->vblank_refcount[crtc]);
-Index: linux-2.6/arch/x86/kernel/kprobes.c
-===================================================================
---- linux-2.6.orig/arch/x86/kernel/kprobes.c
-+++ linux-2.6/arch/x86/kernel/kprobes.c
-@@ -475,7 +475,6 @@ static void __kprobes setup_singlestep(s
- * stepping.
- */
- regs->ip = (unsigned long)p->ainsn.insn;
-- preempt_enable_no_resched();
- return;
+- raw_spin_lock_init(&vec->lock);
+- vec->count = 0;
++ atomic_set(&vec->count, 0);
+ if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
+ goto cleanup;
}
- #endif
-Index: linux-2.6/drivers/ide/ide_platform.c
+Index: linux-2.6/kernel/sched_cpupri.h
===================================================================
---- linux-2.6.orig/drivers/ide/ide_platform.c
-+++ linux-2.6/drivers/ide/ide_platform.c
-@@ -95,7 +95,7 @@ static int __devinit plat_ide_probe(stru
- plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
- hw.dev = &pdev->dev;
+--- linux-2.6.orig/kernel/sched_cpupri.h
++++ linux-2.6/kernel/sched_cpupri.h
+@@ -12,9 +12,8 @@
+ /* values 2-101 are RT priorities 0-99 */
-- d.irq_flags = res_irq->flags;
-+ d.irq_flags = 0;
- if (mmio)
- d.host_flags |= IDE_HFLAG_MMIO;
+ struct cpupri_vec {
+- raw_spinlock_t lock;
+- int count;
+- cpumask_var_t mask;
++ atomic_t count;
++ cpumask_var_t mask;
+ };
-Index: linux-2.6/kernel/sched.c
+ struct cpupri {
+Index: linux-2.6/mm/slab.c
===================================================================
---- linux-2.6.orig/kernel/sched.c
-+++ linux-2.6/kernel/sched.c
-@@ -185,6 +185,7 @@ void init_rt_bandwidth(struct rt_bandwid
+--- linux-2.6.orig/mm/slab.c
++++ linux-2.6/mm/slab.c
+@@ -116,6 +116,7 @@
+ #include <linux/kmemcheck.h>
+ #include <linux/memory.h>
+ #include <linux/prefetch.h>
++#include <linux/locallock.h>
- hrtimer_init(&rt_b->rt_period_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ rt_b->rt_period_timer.irqsafe = 1;
- rt_b->rt_period_timer.function = sched_rt_period_timer;
- }
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
+@@ -620,6 +621,51 @@ int slab_is_available(void)
+ static struct lock_class_key on_slab_l3_key;
+ static struct lock_class_key on_slab_alc_key;
-@@ -800,7 +801,11 @@ late_initcall(sched_init_debug);
- * Number of tasks to iterate in a single balance run.
- * Limited because this is done with IRQs disabled.
- */
-+#ifndef CONFIG_PREEMPT_RT_FULL
- const_debug unsigned int sysctl_sched_nr_migrate = 32;
-+#else
-+const_debug unsigned int sysctl_sched_nr_migrate = 8;
-+#endif
++static struct lock_class_key debugobj_l3_key;
++static struct lock_class_key debugobj_alc_key;
++
++static void slab_set_lock_classes(struct kmem_cache *cachep,
++ struct lock_class_key *l3_key, struct lock_class_key *alc_key,
++ int q)
++{
++ struct array_cache **alc;
++ struct kmem_list3 *l3;
++ int r;
++
++ l3 = cachep->nodelists[q];
++ if (!l3)
++ return;
++
++ lockdep_set_class(&l3->list_lock, l3_key);
++ alc = l3->alien;
++ /*
++ * FIXME: This check for BAD_ALIEN_MAGIC
++ * should go away when common slab code is taught to
++ * work even without alien caches.
++ * Currently, non NUMA code returns BAD_ALIEN_MAGIC
++ * for alloc_alien_cache,
++ */
++ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
++ return;
++ for_each_node(r) {
++ if (alc[r])
++ lockdep_set_class(&alc[r]->lock, alc_key);
++ }
++}
++
++static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
++{
++ slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
++}
++
++static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
++{
++ int node;
++
++ for_each_online_node(node)
++ slab_set_debugobj_lock_classes_node(cachep, node);
++}
++
+ static void init_node_lock_keys(int q)
+ {
+ struct cache_sizes *s = malloc_sizes;
+@@ -628,29 +674,14 @@ static void init_node_lock_keys(int q)
+ return;
- /*
- * period over which we average the RT time consumption, measured
-@@ -1136,6 +1141,7 @@ static void init_rq_hrtick(struct rq *rq
+ for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
+- struct array_cache **alc;
+ struct kmem_list3 *l3;
+- int r;
- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rq->hrtick_timer.function = hrtick;
-+ rq->hrtick_timer.irqsafe = 1;
+ l3 = s->cs_cachep->nodelists[q];
+ if (!l3 || OFF_SLAB(s->cs_cachep))
+ continue;
+- lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
+- alc = l3->alien;
+- /*
+- * FIXME: This check for BAD_ALIEN_MAGIC
+- * should go away when common slab code is taught to
+- * work even without alien caches.
+- * Currently, non NUMA code returns BAD_ALIEN_MAGIC
+- * for alloc_alien_cache,
+- */
+- if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
+- continue;
+- for_each_node(r) {
+- if (alc[r])
+- lockdep_set_class(&alc[r]->lock,
+- &on_slab_alc_key);
+- }
++
++ slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
++ &on_slab_alc_key, q);
+ }
}
- #else /* CONFIG_SCHED_HRTICK */
- static inline void hrtick_clear(struct rq *rq)
-@@ -2378,11 +2384,11 @@ static int select_fallback_rq(int cpu, s
-
- /* Look for allowed, online CPU in same node. */
- for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
-- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
-+ if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
- return dest_cpu;
- /* Any allowed, online CPU? */
-- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
-+ dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask);
- if (dest_cpu < nr_cpu_ids)
- return dest_cpu;
-
-@@ -2419,7 +2425,7 @@ int select_task_rq(struct task_struct *p
- * [ this allows ->select_task() to simply return task_cpu(p) and
- * not worry about this generic constraint ]
- */
-- if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
-+ if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
- !cpu_online(cpu)))
- cpu = select_fallback_rq(task_cpu(p), p);
-
-@@ -2477,10 +2483,6 @@ static void ttwu_activate(struct rq *rq,
+@@ -669,6 +700,14 @@ static void init_node_lock_keys(int q)
+ static inline void init_lock_keys(void)
{
- activate_task(rq, p, en_flags);
- p->on_rq = 1;
--
-- /* if a worker is waking up, notify workqueue */
-- if (p->flags & PF_WQ_WORKER)
-- wq_worker_waking_up(p, cpu_of(rq));
}
-
- /*
-@@ -2678,8 +2680,25 @@ try_to_wake_up(struct task_struct *p, un
-
- smp_wmb();
- raw_spin_lock_irqsave(&p->pi_lock, flags);
-- if (!(p->state & state))
-+ if (!(p->state & state)) {
-+ /*
-+ * The task might be running due to a spinlock sleeper
-+ * wakeup. Check the saved state and set it to running
-+ * if the wakeup condition is true.
-+ */
-+ if (!(wake_flags & WF_LOCK_SLEEPER)) {
-+ if (p->saved_state & state)
-+ p->saved_state = TASK_RUNNING;
-+ }
- goto out;
-+ }
+
-+ /*
-+ * If this is a regular wakeup, then we can unconditionally
-+ * clear the saved state of a "lock sleeper".
-+ */
-+ if (!(wake_flags & WF_LOCK_SLEEPER))
-+ p->saved_state = TASK_RUNNING;
-
- success = 1; /* we're going to change ->state */
- cpu = task_cpu(p);
-@@ -2735,40 +2754,6 @@ try_to_wake_up(struct task_struct *p, un
- }
-
- /**
-- * try_to_wake_up_local - try to wake up a local task with rq lock held
-- * @p: the thread to be awakened
-- *
-- * Put @p on the run-queue if it's not already there. The caller must
-- * ensure that this_rq() is locked, @p is bound to this_rq() and not
-- * the current task.
-- */
--static void try_to_wake_up_local(struct task_struct *p)
--{
-- struct rq *rq = task_rq(p);
--
-- BUG_ON(rq != this_rq());
-- BUG_ON(p == current);
-- lockdep_assert_held(&rq->lock);
--
-- if (!raw_spin_trylock(&p->pi_lock)) {
-- raw_spin_unlock(&rq->lock);
-- raw_spin_lock(&p->pi_lock);
-- raw_spin_lock(&rq->lock);
-- }
--
-- if (!(p->state & TASK_NORMAL))
-- goto out;
--
-- if (!p->on_rq)
-- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
--
-- ttwu_do_wakeup(rq, p, 0);
-- ttwu_stat(p, smp_processor_id(), 0);
--out:
-- raw_spin_unlock(&p->pi_lock);
--}
--
--/**
- * wake_up_process - Wake up a specific process
- * @p: The process to be woken up.
- *
-@@ -2785,6 +2770,18 @@ int wake_up_process(struct task_struct *
- }
- EXPORT_SYMBOL(wake_up_process);
-
-+/**
-+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
-+ * @p: The process to be woken up.
-+ *
-+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
-+ * the nature of the wakeup.
-+ */
-+int wake_up_lock_sleeper(struct task_struct *p)
++static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
+{
-+ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
+}
+
- int wake_up_state(struct task_struct *p, unsigned int state)
- {
- return try_to_wake_up(p, state, 0);
-@@ -2825,7 +2822,7 @@ static void __sched_fork(struct task_str
- void sched_fork(struct task_struct *p)
- {
- unsigned long flags;
-- int cpu = get_cpu();
-+ int cpu;
-
- __sched_fork(p);
- /*
-@@ -2865,6 +2862,7 @@ void sched_fork(struct task_struct *p)
- if (!rt_prio(p->prio))
- p->sched_class = &fair_sched_class;
-
-+ cpu = get_cpu();
- if (p->sched_class->task_fork)
- p->sched_class->task_fork(p);
-
-@@ -2876,8 +2874,9 @@ void sched_fork(struct task_struct *p)
- * Silence PROVE_RCU.
- */
- raw_spin_lock_irqsave(&p->pi_lock, flags);
-- set_task_cpu(p, cpu);
-+ set_task_cpu(p, smp_processor_id());
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-+ put_cpu();
-
- #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
- if (likely(sched_info_on()))
-@@ -2893,8 +2892,6 @@ void sched_fork(struct task_struct *p)
- #ifdef CONFIG_SMP
- plist_node_init(&p->pushable_tasks, MAX_PRIO);
++static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
++{
++}
#endif
--
-- put_cpu();
- }
-
- /*
-@@ -3060,8 +3057,12 @@ static void finish_task_switch(struct rq
- finish_lock_switch(rq, prev);
-
- fire_sched_in_preempt_notifiers(current);
-+ /*
-+ * We use mmdrop_delayed() here so we don't have to do the
-+ * full __mmdrop() when we are the last user.
-+ */
- if (mm)
-- mmdrop(mm);
-+ mmdrop_delayed(mm);
- if (unlikely(prev_state == TASK_DEAD)) {
- /*
- * Remove function-return probe instances associated with this
-@@ -4242,9 +4243,9 @@ pick_next_task(struct rq *rq)
- }
/*
-- * schedule() is the main scheduler function.
-+ * __schedule() is the main scheduler function.
- */
--asmlinkage void __sched schedule(void)
-+static void __sched __schedule(void)
- {
- struct task_struct *prev, *next;
- unsigned long *switch_count;
-@@ -4272,29 +4273,6 @@ asmlinkage void __sched schedule(void)
- } else {
- deactivate_task(rq, prev, DEQUEUE_SLEEP);
- prev->on_rq = 0;
--
-- /*
-- * If a worker went to sleep, notify and ask workqueue
-- * whether it wants to wake up a task to maintain
-- * concurrency.
-- */
-- if (prev->flags & PF_WQ_WORKER) {
-- struct task_struct *to_wakeup;
--
-- to_wakeup = wq_worker_sleeping(prev, cpu);
-- if (to_wakeup)
-- try_to_wake_up_local(to_wakeup);
-- }
--
-- /*
-- * If we are going to sleep and we have plugged IO
-- * queued, make sure to submit it to avoid deadlocks.
-- */
-- if (blk_needs_flush_plug(prev)) {
-- raw_spin_unlock(&rq->lock);
-- blk_schedule_flush_plug(prev);
-- raw_spin_lock(&rq->lock);
-- }
- }
- switch_count = &prev->nvcsw;
- }
-@@ -4328,12 +4306,62 @@ asmlinkage void __sched schedule(void)
-
- post_schedule(rq);
+@@ -678,12 +717,66 @@ static DEFINE_MUTEX(cache_chain_mutex);
+ static struct list_head cache_chain;
-- preempt_enable_no_resched();
-+ __preempt_enable_no_resched();
- if (need_resched())
- goto need_resched;
- }
+ static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
++static DEFINE_PER_CPU(struct list_head, slab_free_list);
++static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
+
-+static inline void sched_submit_work(struct task_struct *tsk)
++#ifndef CONFIG_PREEMPT_RT_BASE
++# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1)
++#else
++/*
++ * execute func() for all CPUs. On PREEMPT_RT we dont actually have
++ * to run on the remote CPUs - we only have to take their CPU-locks.
++ * (This is a rare operation, so cacheline bouncing is not an issue.)
++ */
++static void
++slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
+{
-+ if (!tsk->state || tsk_is_pi_blocked(tsk))
-+ return;
-+
-+ /*
-+ * If a worker went to sleep, notify and ask workqueue whether
-+ * it wants to wake up a task to maintain concurrency.
-+ */
-+ if (tsk->flags & PF_WQ_WORKER)
-+ wq_worker_sleeping(tsk);
++ unsigned int i;
+
-+ /*
-+ * If we are going to sleep and we have plugged IO queued,
-+ * make sure to submit it to avoid deadlocks.
-+ */
-+ if (blk_needs_flush_plug(tsk))
-+ blk_schedule_flush_plug(tsk);
++ for_each_online_cpu(i)
++ func(arg, i);
+}
++#endif
+
-+static inline void sched_update_worker(struct task_struct *tsk)
++static void free_delayed(struct list_head *h)
+{
-+ if (tsk_is_pi_blocked(tsk))
-+ return;
++ while(!list_empty(h)) {
++ struct page *page = list_first_entry(h, struct page, lru);
+
-+ if (tsk->flags & PF_WQ_WORKER)
-+ wq_worker_running(tsk);
++ list_del(&page->lru);
++ __free_pages(page, page->index);
++ }
+}
+
-+asmlinkage void schedule(void)
++static void unlock_l3_and_free_delayed(spinlock_t *list_lock)
+{
-+ struct task_struct *tsk = current;
++ LIST_HEAD(tmp);
+
-+ sched_submit_work(tsk);
-+ __schedule();
-+ sched_update_worker(tsk);
++ list_splice_init(&__get_cpu_var(slab_free_list), &tmp);
++ local_spin_unlock_irq(slab_lock, list_lock);
++ free_delayed(&tmp);
++}
++
++static void unlock_slab_and_free_delayed(unsigned long flags)
++{
++ LIST_HEAD(tmp);
++
++ list_splice_init(&__get_cpu_var(slab_free_list), &tmp);
++ local_unlock_irqrestore(slab_lock, flags);
++ free_delayed(&tmp);
+}
- EXPORT_SYMBOL(schedule);
-+/**
-+ * schedule_preempt_disabled - called with preemption disabled
-+ *
-+ * Returns with preemption disabled. Note: preempt_count must be 1
-+ */
-+void __sched schedule_preempt_disabled(void)
+ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
+ {
+ return cachep->array[smp_processor_id()];
+ }
+
++static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep,
++ int cpu)
+{
-+ __preempt_enable_no_resched();
-+ schedule();
-+ preempt_disable();
++ return cachep->array[cpu];
+}
+
- #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-
- static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
-@@ -4405,7 +4433,7 @@ asmlinkage void __sched notrace preempt_
+ static inline struct kmem_cache *__find_general_cachep(size_t size,
+ gfp_t gfpflags)
+ {
+@@ -1021,9 +1114,10 @@ static void reap_alien(struct kmem_cache
+ if (l3->alien) {
+ struct array_cache *ac = l3->alien[node];
- do {
- add_preempt_count_notrace(PREEMPT_ACTIVE);
-- schedule();
-+ __schedule();
- sub_preempt_count_notrace(PREEMPT_ACTIVE);
+- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
++ if (ac && ac->avail &&
++ local_spin_trylock_irq(slab_lock, &ac->lock)) {
+ __drain_alien_cache(cachep, ac, node);
+- spin_unlock_irq(&ac->lock);
++ local_spin_unlock_irq(slab_lock, &ac->lock);
+ }
+ }
+ }
+@@ -1038,9 +1132,9 @@ static void drain_alien_cache(struct kme
+ for_each_online_node(i) {
+ ac = alien[i];
+ if (ac) {
+- spin_lock_irqsave(&ac->lock, flags);
++ local_spin_lock_irqsave(slab_lock, &ac->lock, flags);
+ __drain_alien_cache(cachep, ac, i);
+- spin_unlock_irqrestore(&ac->lock, flags);
++ local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags);
+ }
+ }
+ }
+@@ -1119,11 +1213,11 @@ static int init_cache_nodelists_node(int
+ cachep->nodelists[node] = l3;
+ }
- /*
-@@ -4433,7 +4461,7 @@ asmlinkage void __sched preempt_schedule
- do {
- add_preempt_count(PREEMPT_ACTIVE);
- local_irq_enable();
-- schedule();
-+ __schedule();
- local_irq_disable();
- sub_preempt_count(PREEMPT_ACTIVE);
+- spin_lock_irq(&cachep->nodelists[node]->list_lock);
++ local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
+ cachep->nodelists[node]->free_limit =
+ (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
+- spin_unlock_irq(&cachep->nodelists[node]->list_lock);
++ local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
+ }
+ return 0;
+ }
+@@ -1148,7 +1242,7 @@ static void __cpuinit cpuup_canceled(lon
+ if (!l3)
+ goto free_array_cache;
-@@ -4828,9 +4856,8 @@ long __sched sleep_on_timeout(wait_queue
- EXPORT_SYMBOL(sleep_on_timeout);
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
- #ifdef CONFIG_RT_MUTEXES
--
- /*
-- * rt_mutex_setprio - set the current priority of a task
-+ * task_setprio - set the current priority of a task
- * @p: task
- * @prio: prio value (kernel-internal form)
- *
-@@ -4839,7 +4866,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
- *
- * Used by the rt_mutex code to implement priority inheritance logic.
- */
--void rt_mutex_setprio(struct task_struct *p, int prio)
-+void task_setprio(struct task_struct *p, int prio)
- {
- int oldprio, on_rq, running;
- struct rq *rq;
-@@ -4849,6 +4876,24 @@ void rt_mutex_setprio(struct task_struct
+ /* Free limit for this kmem_list3 */
+ l3->free_limit -= cachep->batchcount;
+@@ -1156,7 +1250,7 @@ static void __cpuinit cpuup_canceled(lon
+ free_block(cachep, nc->entry, nc->avail, node);
- rq = __task_rq_lock(p);
+ if (!cpumask_empty(mask)) {
+- spin_unlock_irq(&l3->list_lock);
++ unlock_l3_and_free_delayed(&l3->list_lock);
+ goto free_array_cache;
+ }
-+ /*
-+ * Idle task boosting is a nono in general. There is one
-+ * exception, when PREEMPT_RT and NOHZ is active:
-+ *
-+ * The idle task calls get_next_timer_interrupt() and holds
-+ * the timer wheel base->lock on the CPU and another CPU wants
-+ * to access the timer (probably to cancel it). We can safely
-+ * ignore the boosting request, as the idle CPU runs this code
-+ * with interrupts disabled and will complete the lock
-+ * protected section without being interrupted. So there is no
-+ * real need to boost.
-+ */
-+ if (unlikely(p == rq->idle)) {
-+ WARN_ON(p != rq->curr);
-+ WARN_ON(p->pi_blocked_on);
-+ goto out_unlock;
-+ }
-+
- trace_sched_pi_setprio(p, prio);
- oldprio = p->prio;
- prev_class = p->sched_class;
-@@ -4872,9 +4917,9 @@ void rt_mutex_setprio(struct task_struct
- enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
+@@ -1170,7 +1264,7 @@ static void __cpuinit cpuup_canceled(lon
+ alien = l3->alien;
+ l3->alien = NULL;
- check_class_changed(rq, p, prev_class, oldprio);
-+out_unlock:
- __task_rq_unlock(rq);
- }
--
- #endif
+- spin_unlock_irq(&l3->list_lock);
++ unlock_l3_and_free_delayed(&l3->list_lock);
- void set_user_nice(struct task_struct *p, long nice)
-@@ -5543,7 +5588,7 @@ SYSCALL_DEFINE0(sched_yield)
- __release(rq->lock);
- spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
- do_raw_spin_unlock(&rq->lock);
-- preempt_enable_no_resched();
-+ __preempt_enable_no_resched();
+ kfree(shared);
+ if (alien) {
+@@ -1244,7 +1338,7 @@ static int __cpuinit cpuup_prepare(long
+ l3 = cachep->nodelists[node];
+ BUG_ON(!l3);
- schedule();
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+ if (!l3->shared) {
+ /*
+ * We are serialised from CPU_DEAD or
+@@ -1259,9 +1353,11 @@ static int __cpuinit cpuup_prepare(long
+ alien = NULL;
+ }
+ #endif
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ kfree(shared);
+ free_alien_cache(alien);
++ if (cachep->flags & SLAB_DEBUG_OBJECTS)
++ slab_set_debugobj_lock_classes_node(cachep, node);
+ }
+ init_node_lock_keys(node);
-@@ -5557,9 +5602,17 @@ static inline int should_resched(void)
+@@ -1448,6 +1544,10 @@ void __init kmem_cache_init(void)
+ if (num_possible_nodes() == 1)
+ use_alien_caches = 0;
- static void __cond_resched(void)
- {
-- add_preempt_count(PREEMPT_ACTIVE);
-- schedule();
-- sub_preempt_count(PREEMPT_ACTIVE);
-+ do {
-+ add_preempt_count(PREEMPT_ACTIVE);
-+ __schedule();
-+ sub_preempt_count(PREEMPT_ACTIVE);
-+ /*
-+ * Check again in case we missed a preemption
-+ * opportunity between schedule and now.
-+ */
-+ barrier();
++ local_irq_lock_init(slab_lock);
++ for_each_possible_cpu(i)
++ INIT_LIST_HEAD(&per_cpu(slab_free_list, i));
+
-+ } while (need_resched());
- }
+ for (i = 0; i < NUM_INIT_LISTS; i++) {
+ kmem_list3_init(&initkmem_list3[i]);
+ if (i < MAX_NUMNODES)
+@@ -1625,6 +1725,9 @@ void __init kmem_cache_init_late(void)
+ {
+ struct kmem_cache *cachep;
- int __sched _cond_resched(void)
-@@ -5600,6 +5653,7 @@ int __cond_resched_lock(spinlock_t *lock
- }
- EXPORT_SYMBOL(__cond_resched_lock);
++ /* Annotate slab for lockdep -- annotate the malloc caches */
++ init_lock_keys();
++
+ /* 6) resize the head arrays to their final sizes */
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next)
+@@ -1635,9 +1738,6 @@ void __init kmem_cache_init_late(void)
+ /* Done! */
+ g_cpucache_up = FULL;
-+#ifndef CONFIG_PREEMPT_RT_FULL
- int __sched __cond_resched_softirq(void)
+- /* Annotate slab for lockdep -- annotate the malloc caches */
+- init_lock_keys();
+-
+ /*
+ * Register a cpu startup notifier callback that initializes
+ * cpu_cache_get for all new cpus
+@@ -1725,12 +1825,14 @@ static void *kmem_getpages(struct kmem_c
+ /*
+ * Interface to system's page release.
+ */
+-static void kmem_freepages(struct kmem_cache *cachep, void *addr)
++static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed)
{
- BUG_ON(!in_softirq());
-@@ -5613,6 +5667,7 @@ int __sched __cond_resched_softirq(void)
- return 0;
- }
- EXPORT_SYMBOL(__cond_resched_softirq);
-+#endif
+ unsigned long i = (1 << cachep->gfporder);
+- struct page *page = virt_to_page(addr);
++ struct page *page, *basepage = virt_to_page(addr);
+ const unsigned long nr_freed = i;
- /**
- * yield - yield the current processor to other threads.
-@@ -5859,7 +5914,7 @@ void show_state_filter(unsigned long sta
- printk(KERN_INFO
- " task PC stack pid father\n");
- #endif
-- read_lock(&tasklist_lock);
-+ rcu_read_lock();
- do_each_thread(g, p) {
- /*
- * reset the NMI-timeout, listing all files on a slow
-@@ -5875,7 +5930,7 @@ void show_state_filter(unsigned long sta
- #ifdef CONFIG_SCHED_DEBUG
- sysrq_sched_debug_show();
- #endif
-- read_unlock(&tasklist_lock);
-+ rcu_read_unlock();
- /*
- * Only show locks if all tasks are dumped:
- */
-@@ -5997,12 +6052,12 @@ static inline void sched_init_granularit
- #ifdef CONFIG_SMP
- void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
- {
-- if (p->sched_class && p->sched_class->set_cpus_allowed)
-- p->sched_class->set_cpus_allowed(p, new_mask);
-- else {
-- cpumask_copy(&p->cpus_allowed, new_mask);
-+ if (!__migrate_disabled(p)) {
-+ if (p->sched_class && p->sched_class->set_cpus_allowed)
-+ p->sched_class->set_cpus_allowed(p, new_mask);
- p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
++ page = basepage;
++
+ kmemcheck_free_shadow(page, cachep->gfporder);
+
+ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
+@@ -1746,7 +1848,13 @@ static void kmem_freepages(struct kmem_c
}
-+ cpumask_copy(&p->cpus_allowed, new_mask);
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += nr_freed;
+- free_pages((unsigned long)addr, cachep->gfporder);
++
++ if (!delayed) {
++ free_pages((unsigned long)addr, cachep->gfporder);
++ } else {
++ basepage->index = cachep->gfporder;
++ list_add(&basepage->lru, &__get_cpu_var(slab_free_list));
++ }
}
- /*
-@@ -6053,7 +6108,7 @@ int set_cpus_allowed_ptr(struct task_str
- do_set_cpus_allowed(p, new_mask);
-
- /* Can the task run on the task's current CPU? If so, we're done */
-- if (cpumask_test_cpu(task_cpu(p), new_mask))
-+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
- goto out;
+ static void kmem_rcu_free(struct rcu_head *head)
+@@ -1754,7 +1862,7 @@ static void kmem_rcu_free(struct rcu_hea
+ struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
+ struct kmem_cache *cachep = slab_rcu->cachep;
- dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -6072,6 +6127,101 @@ int set_cpus_allowed_ptr(struct task_str
+- kmem_freepages(cachep, slab_rcu->addr);
++ kmem_freepages(cachep, slab_rcu->addr, false);
+ if (OFF_SLAB(cachep))
+ kmem_cache_free(cachep->slabp_cache, slab_rcu);
}
- EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+@@ -1973,7 +2081,8 @@ static void slab_destroy_debugcheck(stru
+ * Before calling the slab must have been unlinked from the cache. The
+ * cache-lock is not held/needed.
+ */
+-static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
++static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp,
++ bool delayed)
+ {
+ void *addr = slabp->s_mem - slabp->colouroff;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+void migrate_disable(void)
-+{
-+ struct task_struct *p = current;
-+ const struct cpumask *mask;
-+ unsigned long flags;
-+ struct rq *rq;
-+
-+ preempt_disable();
-+ if (p->migrate_disable) {
-+ p->migrate_disable++;
-+ preempt_enable();
-+ return;
-+ }
+@@ -1986,7 +2095,7 @@ static void slab_destroy(struct kmem_cac
+ slab_rcu->addr = addr;
+ call_rcu(&slab_rcu->head, kmem_rcu_free);
+ } else {
+- kmem_freepages(cachep, addr);
++ kmem_freepages(cachep, addr, delayed);
+ if (OFF_SLAB(cachep))
+ kmem_cache_free(cachep->slabp_cache, slabp);
+ }
+@@ -2424,6 +2533,16 @@ kmem_cache_create (const char *name, siz
+ goto oops;
+ }
+
++ if (flags & SLAB_DEBUG_OBJECTS) {
++ /*
++ * Would deadlock through slab_destroy()->call_rcu()->
++ * debug_object_activate()->kmem_cache_alloc().
++ */
++ WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
+
-+ pin_current_cpu();
-+ if (unlikely(!scheduler_running)) {
-+ p->migrate_disable = 1;
-+ preempt_enable();
-+ return;
++ slab_set_debugobj_lock_classes(cachep);
+ }
+
-+ /*
-+ * Since this is always current we can get away with only locking
-+ * rq->lock, the ->cpus_allowed value can normally only be changed
-+ * while holding both p->pi_lock and rq->lock, but seeing that this
-+ * it current, we cannot actually be waking up, so all code that
-+ * relies on serialization against p->pi_lock is out of scope.
-+ *
-+ * Taking rq->lock serializes us against things like
-+ * set_cpus_allowed_ptr() that can still happen concurrently.
-+ */
-+ rq = this_rq();
-+ raw_spin_lock_irqsave(&rq->lock, flags);
-+ p->migrate_disable = 1;
-+ mask = tsk_cpus_allowed(p);
-+
-+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
-+
-+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
-+ if (p->sched_class->set_cpus_allowed)
-+ p->sched_class->set_cpus_allowed(p, mask);
-+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
-+ }
-+ raw_spin_unlock_irqrestore(&rq->lock, flags);
-+ preempt_enable();
+ /* cache setup completed, link it into the list */
+ list_add(&cachep->next, &cache_chain);
+ oops:
+@@ -2441,7 +2560,7 @@ EXPORT_SYMBOL(kmem_cache_create);
+ #if DEBUG
+ static void check_irq_off(void)
+ {
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ }
+
+ static void check_irq_on(void)
+@@ -2476,13 +2595,12 @@ static void drain_array(struct kmem_cach
+ struct array_cache *ac,
+ int force, int node);
+
+-static void do_drain(void *arg)
++static void __do_drain(void *arg, unsigned int cpu)
+ {
+ struct kmem_cache *cachep = arg;
+ struct array_cache *ac;
+- int node = numa_mem_id();
++ int node = cpu_to_mem(cpu);
+
+- check_irq_off();
+ ac = cpu_cache_get(cachep);
+ spin_lock(&cachep->nodelists[node]->list_lock);
+ free_block(cachep, ac->entry, ac->avail, node);
+@@ -2490,12 +2608,30 @@ static void do_drain(void *arg)
+ ac->avail = 0;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_BASE
++static void do_drain(void *arg)
++{
++ __do_drain(arg, smp_processor_id());
+}
-+EXPORT_SYMBOL_GPL(migrate_disable);
-+
-+void migrate_enable(void)
++#else
++static void do_drain(void *arg, int cpu)
+{
-+ struct task_struct *p = current;
-+ const struct cpumask *mask;
-+ unsigned long flags;
-+ struct rq *rq;
-+
-+ WARN_ON_ONCE(p->migrate_disable <= 0);
-+
-+ preempt_disable();
-+ if (p->migrate_disable > 1) {
-+ p->migrate_disable--;
-+ preempt_enable();
-+ return;
-+ }
-+
-+ if (unlikely(!scheduler_running)) {
-+ p->migrate_disable = 0;
-+ unpin_current_cpu();
-+ preempt_enable();
-+ return;
-+ }
-+
-+ /*
-+ * See comment in migrate_disable().
-+ */
-+ rq = this_rq();
-+ raw_spin_lock_irqsave(&rq->lock, flags);
-+ p->migrate_disable = 0;
-+ mask = tsk_cpus_allowed(p);
-+
-+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
-+
-+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
-+ if (p->sched_class->set_cpus_allowed)
-+ p->sched_class->set_cpus_allowed(p, mask);
-+ p->rt.nr_cpus_allowed = cpumask_weight(mask);
-+ }
++ LIST_HEAD(tmp);
+
-+ raw_spin_unlock_irqrestore(&rq->lock, flags);
-+ unpin_current_cpu();
-+ preempt_enable();
++ spin_lock_irq(&per_cpu(slab_lock, cpu).lock);
++ __do_drain(arg, cpu);
++ list_splice_init(&per_cpu(slab_free_list, cpu), &tmp);
++ spin_unlock_irq(&per_cpu(slab_lock, cpu).lock);
++ free_delayed(&tmp);
+}
-+EXPORT_SYMBOL_GPL(migrate_enable);
-+#endif /* CONFIG_PREEMPT_RT_FULL */
++#endif
+
- /*
- * Move (not current) task off this cpu, onto dest cpu. We're doing
- * this because either it can't run here any more (set_cpus_allowed()
-@@ -6100,7 +6250,7 @@ static int __migrate_task(struct task_st
- if (task_cpu(p) != src_cpu)
- goto done;
- /* Affinity changed (again). */
-- if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
-+ if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
- goto fail;
-
- /*
-@@ -6142,6 +6292,8 @@ static int migration_cpu_stop(void *data
+ static void drain_cpu_caches(struct kmem_cache *cachep)
+ {
+ struct kmem_list3 *l3;
+ int node;
- #ifdef CONFIG_HOTPLUG_CPU
+- on_each_cpu(do_drain, cachep, 1);
++ slab_on_each_cpu(do_drain, cachep);
+ check_irq_on();
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+@@ -2526,10 +2662,10 @@ static int drain_freelist(struct kmem_ca
+ nr_freed = 0;
+ while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
-+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
-+
- /*
- * Ensures that the idle task is using init_mm right before its cpu goes
- * offline.
-@@ -6154,7 +6306,12 @@ void idle_task_exit(void)
-
- if (mm != &init_mm)
- switch_mm(mm, &init_mm, current);
-- mmdrop(mm);
-+
-+ /*
-+ * Defer the cleanup to an alive cpu. On RT we can neither
-+ * call mmdrop() nor mmdrop_delayed() from here.
-+ */
-+ per_cpu(idle_last_mm, smp_processor_id()) = mm;
- }
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+ p = l3->slabs_free.prev;
+ if (p == &l3->slabs_free) {
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ goto out;
+ }
- /*
-@@ -6472,6 +6629,12 @@ migration_call(struct notifier_block *nf
- migrate_nr_uninterruptible(rq);
- calc_global_load_remove(rq);
- break;
-+ case CPU_DEAD:
-+ if (per_cpu(idle_last_mm, cpu)) {
-+ mmdrop(per_cpu(idle_last_mm, cpu));
-+ per_cpu(idle_last_mm, cpu) = NULL;
-+ }
-+ break;
- #endif
+@@ -2543,8 +2679,8 @@ static int drain_freelist(struct kmem_ca
+ * to the cache.
+ */
+ l3->free_objects -= cache->num;
+- spin_unlock_irq(&l3->list_lock);
+- slab_destroy(cache, slabp);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
++ slab_destroy(cache, slabp, false);
+ nr_freed++;
}
+ out:
+@@ -2838,7 +2974,7 @@ static int cache_grow(struct kmem_cache
+ offset *= cachep->colour_off;
-@@ -8188,7 +8351,8 @@ void __init sched_init(void)
- #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
- static inline int preempt_count_equals(int preempt_offset)
- {
-- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
-+ int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
-+ sched_rcu_preempt_depth();
+ if (local_flags & __GFP_WAIT)
+- local_irq_enable();
++ local_unlock_irq(slab_lock);
- return (nested == preempt_offset);
+ /*
+ * The test for missing atomic flag is performed here, rather than
+@@ -2868,7 +3004,7 @@ static int cache_grow(struct kmem_cache
+ cache_init_objs(cachep, slabp);
+
+ if (local_flags & __GFP_WAIT)
+- local_irq_disable();
++ local_lock_irq(slab_lock);
+ check_irq_off();
+ spin_lock(&l3->list_lock);
+
+@@ -2879,10 +3015,10 @@ static int cache_grow(struct kmem_cache
+ spin_unlock(&l3->list_lock);
+ return 1;
+ opps1:
+- kmem_freepages(cachep, objp);
++ kmem_freepages(cachep, objp, false);
+ failed:
+ if (local_flags & __GFP_WAIT)
+- local_irq_disable();
++ local_lock_irq(slab_lock);
+ return 0;
}
-Index: linux-2.6/block/blk-core.c
-===================================================================
---- linux-2.6.orig/block/blk-core.c
-+++ linux-2.6/block/blk-core.c
-@@ -236,7 +236,7 @@ EXPORT_SYMBOL(blk_delay_queue);
- **/
- void blk_start_queue(struct request_queue *q)
- {
-- WARN_ON(!irqs_disabled());
-+ WARN_ON_NONRT(!irqs_disabled());
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q);
-@@ -301,7 +301,11 @@ void __blk_run_queue(struct request_queu
+@@ -3280,11 +3416,11 @@ retry:
+ * set and go into memory reserves if necessary.
+ */
+ if (local_flags & __GFP_WAIT)
+- local_irq_enable();
++ local_unlock_irq(slab_lock);
+ kmem_flagcheck(cache, flags);
+ obj = kmem_getpages(cache, local_flags, numa_mem_id());
+ if (local_flags & __GFP_WAIT)
+- local_irq_disable();
++ local_lock_irq(slab_lock);
+ if (obj) {
+ /*
+ * Insert into the appropriate per node queues
+@@ -3400,7 +3536,7 @@ __cache_alloc_node(struct kmem_cache *ca
+ return NULL;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+- local_irq_save(save_flags);
++ local_lock_irqsave(slab_lock, save_flags);
+
+ if (nodeid == -1)
+ nodeid = slab_node;
+@@ -3425,7 +3561,7 @@ __cache_alloc_node(struct kmem_cache *ca
+ /* ___cache_alloc_node can fall back to other nodes */
+ ptr = ____cache_alloc_node(cachep, flags, nodeid);
+ out:
+- local_irq_restore(save_flags);
++ local_unlock_irqrestore(slab_lock, save_flags);
+ ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+ kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
+ flags);
+@@ -3485,9 +3621,9 @@ __cache_alloc(struct kmem_cache *cachep,
+ return NULL;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+- local_irq_save(save_flags);
++ local_lock_irqsave(slab_lock, save_flags);
+ objp = __do_cache_alloc(cachep, flags);
+- local_irq_restore(save_flags);
++ local_unlock_irqrestore(slab_lock, save_flags);
+ objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+ kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
+ flags);
+@@ -3535,7 +3671,7 @@ static void free_block(struct kmem_cache
+ * a different cache, refer to comments before
+ * alloc_slabmgmt.
+ */
+- slab_destroy(cachep, slabp);
++ slab_destroy(cachep, slabp, true);
+ } else {
+ list_add(&slabp->list, &l3->slabs_free);
+ }
+@@ -3798,12 +3934,12 @@ void kmem_cache_free(struct kmem_cache *
{
- if (unlikely(blk_queue_stopped(q)))
- return;
--
-+ /*
-+ * q->request_fn() can drop q->queue_lock and reenable
-+ * interrupts, but must return with q->queue_lock held and
-+ * interrupts disabled.
-+ */
- q->request_fn(q);
- }
- EXPORT_SYMBOL(__blk_run_queue);
-@@ -2670,11 +2674,11 @@ static void queue_unplugged(struct reque
- * this lock).
- */
- if (from_schedule) {
-- spin_unlock(q->queue_lock);
-+ spin_unlock_irq(q->queue_lock);
- blk_run_queue_async(q);
- } else {
- __blk_run_queue(q);
-- spin_unlock(q->queue_lock);
-+ spin_unlock_irq(q->queue_lock);
- }
+ unsigned long flags;
+
+- local_irq_save(flags);
+ debug_check_no_locks_freed(objp, obj_size(cachep));
+ if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
+ debug_check_no_obj_freed(objp, obj_size(cachep));
++ local_lock_irqsave(slab_lock, flags);
+ __cache_free(cachep, objp, __builtin_return_address(0));
+- local_irq_restore(flags);
++ unlock_slab_and_free_delayed(flags);
+ trace_kmem_cache_free(_RET_IP_, objp);
}
-@@ -2700,7 +2704,6 @@ static void flush_plug_callbacks(struct
- void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
- {
- struct request_queue *q;
-- unsigned long flags;
- struct request *rq;
- LIST_HEAD(list);
- unsigned int depth;
-@@ -2721,11 +2724,6 @@ void blk_flush_plug_list(struct blk_plug
- q = NULL;
- depth = 0;
+@@ -3827,13 +3963,13 @@ void kfree(const void *objp)
-- /*
-- * Save and disable interrupts here, to avoid doing it for every
-- * queue lock we have to take.
-- */
+ if (unlikely(ZERO_OR_NULL_PTR(objp)))
+ return;
- local_irq_save(flags);
- while (!list_empty(&list)) {
- rq = list_entry_rq(list.next);
- list_del_init(&rq->queuelist);
-@@ -2738,7 +2736,7 @@ void blk_flush_plug_list(struct blk_plug
- queue_unplugged(q, depth, from_schedule);
- q = rq->q;
- depth = 0;
-- spin_lock(q->queue_lock);
-+ spin_lock_irq(q->queue_lock);
- }
- /*
- * rq is already accounted, so use raw insert
-@@ -2756,8 +2754,6 @@ void blk_flush_plug_list(struct blk_plug
- */
- if (q)
- queue_unplugged(q, depth, from_schedule);
--
+ kfree_debugcheck(objp);
+ c = virt_to_cache(objp);
+ debug_check_no_locks_freed(objp, obj_size(c));
+ debug_check_no_obj_freed(objp, obj_size(c));
++ local_lock_irqsave(slab_lock, flags);
+ __cache_free(c, (void *)objp, __builtin_return_address(0));
- local_irq_restore(flags);
++ unlock_slab_and_free_delayed(flags);
}
+ EXPORT_SYMBOL(kfree);
- void blk_finish_plug(struct blk_plug *plug)
-Index: linux-2.6/kernel/workqueue.c
-===================================================================
---- linux-2.6.orig/kernel/workqueue.c
-+++ linux-2.6/kernel/workqueue.c
-@@ -137,6 +137,7 @@ struct worker {
- unsigned int flags; /* X: flags */
- int id; /* I: worker id */
- struct work_struct rebind_work; /* L: rebind worker to cpu */
-+ int sleeping; /* None */
+@@ -3876,7 +4012,7 @@ static int alloc_kmemlist(struct kmem_ca
+ if (l3) {
+ struct array_cache *shared = l3->shared;
+
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+
+ if (shared)
+ free_block(cachep, shared->entry,
+@@ -3889,7 +4025,8 @@ static int alloc_kmemlist(struct kmem_ca
+ }
+ l3->free_limit = (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
+- spin_unlock_irq(&l3->list_lock);
++ unlock_l3_and_free_delayed(&l3->list_lock);
++
+ kfree(shared);
+ free_alien_cache(new_alien);
+ continue;
+@@ -3936,17 +4073,30 @@ struct ccupdate_struct {
+ struct array_cache *new[NR_CPUS];
};
- /*
-@@ -657,66 +658,58 @@ static void wake_up_worker(struct global
- }
+-static void do_ccupdate_local(void *info)
++static void __do_ccupdate_local(void *info, int cpu)
+ {
+ struct ccupdate_struct *new = info;
+ struct array_cache *old;
- /**
-- * wq_worker_waking_up - a worker is waking up
-- * @task: task waking up
-- * @cpu: CPU @task is waking up to
-+ * wq_worker_running - a worker is running again
-+ * @task: task returning from sleep
- *
-- * This function is called during try_to_wake_up() when a worker is
-- * being awoken.
-- *
-- * CONTEXT:
-- * spin_lock_irq(rq->lock)
-+ * This function is called when a worker returns from schedule()
- */
--void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
-+void wq_worker_running(struct task_struct *task)
- {
- struct worker *worker = kthread_data(task);
+- check_irq_off();
+- old = cpu_cache_get(new->cachep);
++ old = cpu_cache_get_on_cpu(new->cachep, cpu);
-+ if (!worker->sleeping)
-+ return;
- if (!(worker->flags & WORKER_NOT_RUNNING))
-- atomic_inc(get_gcwq_nr_running(cpu));
-+ atomic_inc(get_gcwq_nr_running(smp_processor_id()));
-+ worker->sleeping = 0;
+- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
+- new->new[smp_processor_id()] = old;
++ new->cachep->array[cpu] = new->new[cpu];
++ new->new[cpu] = old;
++}
++
++#ifndef CONFIG_PREEMPT_RT_BASE
++static void do_ccupdate_local(void *info)
++{
++ __do_ccupdate_local(info, smp_processor_id());
++}
++#else
++static void do_ccupdate_local(void *info, int cpu)
++{
++ spin_lock_irq(&per_cpu(slab_lock, cpu).lock);
++ __do_ccupdate_local(info, cpu);
++ spin_unlock_irq(&per_cpu(slab_lock, cpu).lock);
}
++#endif
- /**
- * wq_worker_sleeping - a worker is going to sleep
- * @task: task going to sleep
-- * @cpu: CPU in question, must be the current CPU number
-- *
-- * This function is called during schedule() when a busy worker is
-- * going to sleep. Worker on the same cpu can be woken up by
-- * returning pointer to its task.
-- *
-- * CONTEXT:
-- * spin_lock_irq(rq->lock)
- *
-- * RETURNS:
-- * Worker task on @cpu to wake up, %NULL if none.
-+ * This function is called from schedule() when a busy worker is
-+ * going to sleep.
- */
--struct task_struct *wq_worker_sleeping(struct task_struct *task,
-- unsigned int cpu)
-+void wq_worker_sleeping(struct task_struct *task)
- {
-- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
-- struct global_cwq *gcwq = get_gcwq(cpu);
-- atomic_t *nr_running = get_gcwq_nr_running(cpu);
-+ struct worker *worker = kthread_data(task);
-+ struct global_cwq *gcwq;
-+ int cpu;
-
- if (worker->flags & WORKER_NOT_RUNNING)
-- return NULL;
-+ return;
-+
-+ if (WARN_ON_ONCE(worker->sleeping))
-+ return;
+ /* Always called with the cache_chain_mutex held */
+ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+@@ -3971,7 +4121,7 @@ static int do_tune_cpucache(struct kmem_
+ }
+ new->cachep = cachep;
-- /* this can only happen on the local cpu */
-- BUG_ON(cpu != raw_smp_processor_id());
-+ worker->sleeping = 1;
+- on_each_cpu(do_ccupdate_local, (void *)new, 1);
++ slab_on_each_cpu(do_ccupdate_local, (void *)new);
-+ cpu = smp_processor_id();
-+ gcwq = get_gcwq(cpu);
-+ spin_lock_irq(&gcwq->lock);
- /*
- * The counterpart of the following dec_and_test, implied mb,
- * worklist not empty test sequence is in insert_work().
- * Please read comment there.
-- *
-- * NOT_RUNNING is clear. This means that trustee is not in
-- * charge and we're running on the local cpu w/ rq lock held
-- * and preemption disabled, which in turn means that none else
-- * could be manipulating idle_list, so dereferencing idle_list
-- * without gcwq lock is safe.
-- */
-- if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
-- to_wakeup = first_worker(gcwq);
-- return to_wakeup ? to_wakeup->task : NULL;
-+ */
-+ if (atomic_dec_and_test(get_gcwq_nr_running(cpu)) &&
-+ !list_empty(&gcwq->worklist)) {
-+ worker = first_worker(gcwq);
-+ if (worker)
-+ wake_up_process(worker->task);
-+ }
-+ spin_unlock_irq(&gcwq->lock);
+ check_irq_on();
+ cachep->batchcount = batchcount;
+@@ -3982,9 +4132,11 @@ static int do_tune_cpucache(struct kmem_
+ struct array_cache *ccold = new->new[i];
+ if (!ccold)
+ continue;
+- spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
++ local_spin_lock_irq(slab_lock,
++ &cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
+- spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
++
++ unlock_l3_and_free_delayed(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ kfree(ccold);
+ }
+ kfree(new);
+@@ -4060,7 +4212,7 @@ static void drain_array(struct kmem_cach
+ if (ac->touched && !force) {
+ ac->touched = 0;
+ } else {
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+ if (ac->avail) {
+ tofree = force ? ac->avail : (ac->limit + 4) / 5;
+ if (tofree > ac->avail)
+@@ -4070,7 +4222,7 @@ static void drain_array(struct kmem_cach
+ memmove(ac->entry, &(ac->entry[tofree]),
+ sizeof(void *) * ac->avail);
+ }
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ }
}
- /**
-@@ -1067,8 +1060,8 @@ int queue_work(struct workqueue_struct *
- {
- int ret;
+@@ -4209,7 +4361,7 @@ static int s_show(struct seq_file *m, vo
+ continue;
-- ret = queue_work_on(get_cpu(), wq, work);
-- put_cpu();
-+ ret = queue_work_on(get_cpu_light(), wq, work);
-+ put_cpu_light();
+ check_irq_on();
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
- return ret;
- }
-@@ -3484,6 +3477,25 @@ static int __devinit workqueue_cpu_callb
- kthread_stop(new_trustee);
- return NOTIFY_BAD;
- }
-+ break;
-+ case CPU_POST_DEAD:
-+ case CPU_UP_CANCELED:
-+ case CPU_DOWN_FAILED:
-+ case CPU_ONLINE:
-+ break;
-+ case CPU_DYING:
-+ /*
-+ * We access this lockless. We are on the dying CPU
-+ * and called from stomp machine.
-+ *
-+ * Before this, the trustee and all workers except for
-+ * the ones which are still executing works from
-+ * before the last CPU down must be on the cpu. After
-+ * this, they'll all be diasporas.
-+ */
-+ gcwq->flags |= GCWQ_DISASSOCIATED;
-+ default:
-+ goto out;
+ list_for_each_entry(slabp, &l3->slabs_full, list) {
+ if (slabp->inuse != cachep->num && !error)
+@@ -4234,7 +4386,7 @@ static int s_show(struct seq_file *m, vo
+ if (l3->shared)
+ shared_avail += l3->shared->avail;
+
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
}
+ num_slabs += active_slabs;
+ num_objs = num_slabs * cachep->num;
+@@ -4463,13 +4615,13 @@ static int leaks_show(struct seq_file *m
+ continue;
- /* some are called w/ irq disabled, don't disturb irq status */
-@@ -3503,16 +3515,6 @@ static int __devinit workqueue_cpu_callb
- gcwq->first_idle = new_worker;
- break;
+ check_irq_on();
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
-- case CPU_DYING:
-- /*
-- * Before this, the trustee and all workers except for
-- * the ones which are still executing works from
-- * before the last CPU down must be on the cpu. After
-- * this, they'll all be diasporas.
-- */
-- gcwq->flags |= GCWQ_DISASSOCIATED;
-- break;
+ list_for_each_entry(slabp, &l3->slabs_full, list)
+ handle_slab(n, cachep, slabp);
+ list_for_each_entry(slabp, &l3->slabs_partial, list)
+ handle_slab(n, cachep, slabp);
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ }
+ name = cachep->name;
+ if (n[0] == n[1]) {
+Index: linux-2.6/kernel/lockdep.c
+===================================================================
+--- linux-2.6.orig/kernel/lockdep.c
++++ linux-2.6/kernel/lockdep.c
+@@ -2859,10 +2859,7 @@ static int mark_lock(struct task_struct
+ void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass)
+ {
+- int i;
-
- case CPU_POST_DEAD:
- gcwq->trustee_state = TRUSTEE_BUTCHER;
- /* fall through */
-@@ -3546,6 +3548,7 @@ static int __devinit workqueue_cpu_callb
+- for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+- lock->class_cache[i] = NULL;
++ memset(lock, 0, sizeof(*lock));
- spin_unlock_irqrestore(&gcwq->lock, flags);
+ #ifdef CONFIG_LOCK_STAT
+ lock->cpu = raw_smp_processor_id();
+@@ -3341,6 +3338,7 @@ static void check_flags(unsigned long fl
+ }
+ }
-+out:
- return notifier_from_errno(0);
- }
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * We dont accurately track softirq state in e.g.
+ * hardirq contexts (such as on 4KSTACKS), so only
+@@ -3352,6 +3350,7 @@ static void check_flags(unsigned long fl
+ else
+ DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
+ }
++#endif
-Index: linux-2.6/kernel/workqueue_sched.h
+ if (!debug_locks)
+ print_irqtrace_events(current);
+Index: linux-2.6/kernel/trace/ftrace.c
===================================================================
---- linux-2.6.orig/kernel/workqueue_sched.h
-+++ linux-2.6/kernel/workqueue_sched.h
-@@ -4,6 +4,5 @@
- * Scheduler hooks for concurrency managed workqueue. Only to be
- * included from sched.c and workqueue.c.
- */
--void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
--struct task_struct *wq_worker_sleeping(struct task_struct *task,
-- unsigned int cpu);
-+void wq_worker_running(struct task_struct *task);
-+void wq_worker_sleeping(struct task_struct *task);
-Index: linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
-===================================================================
---- linux-2.6.orig/arch/x86/kernel/cpu/intel_cacheinfo.c
-+++ linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
-@@ -151,28 +151,17 @@ union _cpuid4_leaf_ecx {
- u32 full;
- };
-
--struct amd_l3_cache {
-- struct amd_northbridge *nb;
-- unsigned indices;
-- u8 subcaches[4];
--};
--
--struct _cpuid4_info {
-+struct _cpuid4_info_regs {
- union _cpuid4_leaf_eax eax;
- union _cpuid4_leaf_ebx ebx;
- union _cpuid4_leaf_ecx ecx;
- unsigned long size;
-- struct amd_l3_cache *l3;
-- DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
-+ struct amd_northbridge *nb;
- };
-
--/* subset of above _cpuid4_info w/o shared_cpu_map */
--struct _cpuid4_info_regs {
-- union _cpuid4_leaf_eax eax;
-- union _cpuid4_leaf_ebx ebx;
-- union _cpuid4_leaf_ecx ecx;
-- unsigned long size;
-- struct amd_l3_cache *l3;
-+struct _cpuid4_info {
-+ struct _cpuid4_info_regs base;
-+ DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
- };
-
- unsigned short num_cache_leaves;
-@@ -314,12 +303,13 @@ struct _cache_attr {
- /*
- * L3 cache descriptors
- */
--static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
-+static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
- {
-+ struct amd_l3_cache *l3 = &nb->l3_cache;
- unsigned int sc0, sc1, sc2, sc3;
- u32 val = 0;
-
-- pci_read_config_dword(l3->nb->misc, 0x1C4, &val);
-+ pci_read_config_dword(nb->misc, 0x1C4, &val);
-
- /* calculate subcache sizes */
- l3->subcaches[0] = sc0 = !(val & BIT(0));
-@@ -333,33 +323,16 @@ static void __cpuinit amd_calc_l3_indice
- static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
- int index)
- {
-- static struct amd_l3_cache *__cpuinitdata l3_caches;
- int node;
-
- /* only for L3, and not in virtualized environments */
-- if (index < 3 || amd_nb_num() == 0)
-+ if (index < 3)
- return;
-
-- /*
-- * Strictly speaking, the amount in @size below is leaked since it is
-- * never freed but this is done only on shutdown so it doesn't matter.
-- */
-- if (!l3_caches) {
-- int size = amd_nb_num() * sizeof(struct amd_l3_cache);
--
-- l3_caches = kzalloc(size, GFP_ATOMIC);
-- if (!l3_caches)
-- return;
-- }
--
- node = amd_get_nb_id(smp_processor_id());
--
-- if (!l3_caches[node].nb) {
-- l3_caches[node].nb = node_to_amd_nb(node);
-- amd_calc_l3_indices(&l3_caches[node]);
-- }
--
-- this_leaf->l3 = &l3_caches[node];
-+ this_leaf->nb = node_to_amd_nb(node);
-+ if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
-+ amd_calc_l3_indices(this_leaf->nb);
+--- linux-2.6.orig/kernel/trace/ftrace.c
++++ linux-2.6/kernel/trace/ftrace.c
+@@ -1182,8 +1182,14 @@ alloc_and_copy_ftrace_hash(int size_bits
+ return NULL;
}
- /*
-@@ -369,11 +342,11 @@ static void __cpuinit amd_init_l3_cache(
- *
- * @returns: the disabled index if used or negative value if slot free.
- */
--int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
-+int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
- {
- unsigned int reg = 0;
-
-- pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, ®);
-+ pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
-
- /* check whether this slot is activated already */
- if (reg & (3UL << 30))
-@@ -387,11 +360,10 @@ static ssize_t show_cache_disable(struct
- {
- int index;
-
-- if (!this_leaf->l3 ||
-- !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
-+ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- return -EINVAL;
-
-- index = amd_get_l3_disable_slot(this_leaf->l3, slot);
-+ index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
- if (index >= 0)
- return sprintf(buf, "%d\n", index);
-
-@@ -408,7 +380,7 @@ show_cache_disable_##slot(struct _cpuid4
- SHOW_CACHE_DISABLE(0)
- SHOW_CACHE_DISABLE(1)
-
--static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
-+static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
- unsigned slot, unsigned long idx)
++static void
++ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
++static void
++ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
++
+ static int
+-ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
++ftrace_hash_move(struct ftrace_ops *ops, int enable,
++ struct ftrace_hash **dst, struct ftrace_hash *src)
{
+ struct ftrace_func_entry *entry;
+ struct hlist_node *tp, *tn;
+@@ -1193,9 +1199,16 @@ ftrace_hash_move(struct ftrace_hash **ds
+ unsigned long key;
+ int size = src->count;
+ int bits = 0;
++ int ret;
int i;
-@@ -421,10 +393,10 @@ static void amd_l3_disable_index(struct
- for (i = 0; i < 4; i++) {
- u32 reg = idx | (i << 20);
-- if (!l3->subcaches[i])
-+ if (!nb->l3_cache.subcaches[i])
- continue;
+ /*
++ * Remove the current set, update the hash and add
++ * them back.
++ */
++ ftrace_hash_rec_disable(ops, enable);
++
++ /*
+ * If the new source is empty, just free dst and assign it
+ * the empty_hash.
+ */
+@@ -1215,9 +1228,10 @@ ftrace_hash_move(struct ftrace_hash **ds
+ if (bits > FTRACE_HASH_MAX_BITS)
+ bits = FTRACE_HASH_MAX_BITS;
-- pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
-+ pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
++ ret = -ENOMEM;
+ new_hash = alloc_ftrace_hash(bits);
+ if (!new_hash)
+- return -ENOMEM;
++ goto out;
- /*
- * We need to WBINVD on a core on the node containing the L3
-@@ -434,7 +406,7 @@ static void amd_l3_disable_index(struct
- wbinvd_on_cpu(cpu);
+ size = 1 << src->size_bits;
+ for (i = 0; i < size; i++) {
+@@ -1236,7 +1250,16 @@ ftrace_hash_move(struct ftrace_hash **ds
+ rcu_assign_pointer(*dst, new_hash);
+ free_ftrace_hash_rcu(old_hash);
- reg |= BIT(31);
-- pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
-+ pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
- }
+- return 0;
++ ret = 0;
++ out:
++ /*
++ * Enable regardless of ret:
++ * On success, we enable the new hash.
++ * On failure, we re-enable the original hash.
++ */
++ ftrace_hash_rec_enable(ops, enable);
++
++ return ret;
}
-@@ -448,24 +420,24 @@ static void amd_l3_disable_index(struct
- *
- * @return: 0 on success, error status on failure
- */
--int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot,
-+int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
- unsigned long index)
- {
- int ret = 0;
-
- /* check if @slot is already used or the index is already disabled */
-- ret = amd_get_l3_disable_slot(l3, slot);
-+ ret = amd_get_l3_disable_slot(nb, slot);
- if (ret >= 0)
- return -EINVAL;
-
-- if (index > l3->indices)
-+ if (index > nb->l3_cache.indices)
- return -EINVAL;
-
- /* check whether the other slot has disabled the same index already */
-- if (index == amd_get_l3_disable_slot(l3, !slot))
-+ if (index == amd_get_l3_disable_slot(nb, !slot))
- return -EINVAL;
-
-- amd_l3_disable_index(l3, cpu, slot, index);
-+ amd_l3_disable_index(nb, cpu, slot, index);
-
- return 0;
- }
-@@ -480,8 +452,7 @@ static ssize_t store_cache_disable(struc
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ /*
+@@ -2857,7 +2880,7 @@ ftrace_set_regex(struct ftrace_ops *ops,
+ ftrace_match_records(hash, buf, len);
-- if (!this_leaf->l3 ||
-- !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
-+ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
- return -EINVAL;
+ mutex_lock(&ftrace_lock);
+- ret = ftrace_hash_move(orig_hash, hash);
++ ret = ftrace_hash_move(ops, enable, orig_hash, hash);
+ mutex_unlock(&ftrace_lock);
- cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
-@@ -489,7 +460,7 @@ static ssize_t store_cache_disable(struc
- if (strict_strtoul(buf, 10, &val) < 0)
- return -EINVAL;
+ mutex_unlock(&ftrace_regex_lock);
+@@ -3040,18 +3063,12 @@ ftrace_regex_release(struct inode *inode
+ orig_hash = &iter->ops->notrace_hash;
-- err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val);
-+ err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
- if (err) {
- if (err == -EEXIST)
- printk(KERN_WARNING "L3 disable slot %d in use!\n",
-@@ -518,7 +489,7 @@ static struct _cache_attr cache_disable_
- static ssize_t
- show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
- {
-- if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
-+ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- return -EINVAL;
+ mutex_lock(&ftrace_lock);
+- /*
+- * Remove the current set, update the hash and add
+- * them back.
+- */
+- ftrace_hash_rec_disable(iter->ops, filter_hash);
+- ret = ftrace_hash_move(orig_hash, iter->hash);
+- if (!ret) {
+- ftrace_hash_rec_enable(iter->ops, filter_hash);
+- if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
+- && ftrace_enabled)
+- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+- }
++ ret = ftrace_hash_move(iter->ops, filter_hash,
++ orig_hash, iter->hash);
++ if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
++ && ftrace_enabled)
++ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++
+ mutex_unlock(&ftrace_lock);
+ }
+ free_ftrace_hash(iter->hash);
+Index: linux-2.6/drivers/block/floppy.c
+===================================================================
+--- linux-2.6.orig/drivers/block/floppy.c
++++ linux-2.6/drivers/block/floppy.c
+@@ -4250,7 +4250,7 @@ static int __init floppy_init(void)
+ use_virtual_dma = can_use_virtual_dma & 1;
+ fdc_state[0].address = FDC1;
+ if (fdc_state[0].address == -1) {
+- del_timer(&fd_timeout);
++ del_timer_sync(&fd_timeout);
+ err = -ENODEV;
+ goto out_unreg_region;
+ }
+@@ -4261,7 +4261,7 @@ static int __init floppy_init(void)
+ fdc = 0; /* reset fdc in case of unexpected interrupt */
+ err = floppy_grab_irq_and_dma();
+ if (err) {
+- del_timer(&fd_timeout);
++ del_timer_sync(&fd_timeout);
+ err = -EBUSY;
+ goto out_unreg_region;
+ }
+@@ -4318,7 +4318,7 @@ static int __init floppy_init(void)
+ user_reset_fdc(-1, FD_RESET_ALWAYS, false);
+ }
+ fdc = 0;
+- del_timer(&fd_timeout);
++ del_timer_sync(&fd_timeout);
+ current_drive = 0;
+ initialized = true;
+ if (have_no_fdc) {
+@@ -4368,7 +4368,7 @@ out_unreg_blkdev:
+ unregister_blkdev(FLOPPY_MAJOR, "fd");
+ out_put_disk:
+ while (dr--) {
+- del_timer(&motor_off_timer[dr]);
++ del_timer_sync(&motor_off_timer[dr]);
+ if (disks[dr]->queue)
+ blk_cleanup_queue(disks[dr]->queue);
+ put_disk(disks[dr]);
+Index: linux-2.6/drivers/gpu/drm/drm_irq.c
+===================================================================
+--- linux-2.6.orig/drivers/gpu/drm/drm_irq.c
++++ linux-2.6/drivers/gpu/drm/drm_irq.c
+@@ -109,10 +109,7 @@ static void vblank_disable_and_save(stru
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+- * Disable preemption, so vblank_time_lock is held as short as
+- * possible, even under a kernel with PREEMPT_RT patches.
+ */
+- preempt_disable();
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
- return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
-@@ -533,7 +504,7 @@ store_subcaches(struct _cpuid4_info *thi
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ dev->driver->disable_vblank(dev, crtc);
+@@ -163,7 +160,6 @@ static void vblank_disable_and_save(stru
+ clear_vblank_timestamps(dev, crtc);
-- if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
-+ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
- return -EINVAL;
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+- preempt_enable();
+ }
- if (strict_strtoul(buf, 16, &val) < 0)
-@@ -769,7 +740,7 @@ static void __cpuinit cache_shared_cpu_m
+ static void vblank_disable_fn(unsigned long arg)
+@@ -875,10 +871,6 @@ int drm_vblank_get(struct drm_device *de
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /* Going from 0->1 means we have to enable interrupts again */
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+- /* Disable preemption while holding vblank_time_lock. Do
+- * it explicitely to guard against PREEMPT_RT kernel.
+- */
+- preempt_disable();
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
+ if (!dev->vblank_enabled[crtc]) {
+ /* Enable vblank irqs under vblank_time_lock protection.
+@@ -898,7 +890,6 @@ int drm_vblank_get(struct drm_device *de
+ }
+ }
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+- preempt_enable();
+ } else {
+ if (!dev->vblank_enabled[crtc]) {
+ atomic_dec(&dev->vblank_refcount[crtc]);
+Index: linux-2.6/arch/x86/kernel/kprobes.c
+===================================================================
+--- linux-2.6.orig/arch/x86/kernel/kprobes.c
++++ linux-2.6/arch/x86/kernel/kprobes.c
+@@ -475,7 +475,6 @@ static void __kprobes setup_singlestep(s
+ * stepping.
+ */
+ regs->ip = (unsigned long)p->ainsn.insn;
+- preempt_enable_no_resched();
return;
}
- this_leaf = CPUID4_INFO_IDX(cpu, index);
-- num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
-+ num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
+ #endif
+Index: linux-2.6/drivers/ide/ide_platform.c
+===================================================================
+--- linux-2.6.orig/drivers/ide/ide_platform.c
++++ linux-2.6/drivers/ide/ide_platform.c
+@@ -95,7 +95,7 @@ static int __devinit plat_ide_probe(stru
+ plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
+ hw.dev = &pdev->dev;
- if (num_threads_sharing == 1)
- cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
-@@ -820,29 +791,19 @@ static void __cpuinit free_cache_attribu
- for (i = 0; i < num_cache_leaves; i++)
- cache_remove_shared_cpu_map(cpu, i);
+- d.irq_flags = res_irq->flags;
++ d.irq_flags = 0;
+ if (mmio)
+ d.host_flags |= IDE_HFLAG_MMIO;
-- kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
- kfree(per_cpu(ici_cpuid4_info, cpu));
- per_cpu(ici_cpuid4_info, cpu) = NULL;
+Index: linux-2.6/kernel/sched.c
+===================================================================
+--- linux-2.6.orig/kernel/sched.c
++++ linux-2.6/kernel/sched.c
+@@ -185,6 +185,7 @@ void init_rt_bandwidth(struct rt_bandwid
+
+ hrtimer_init(&rt_b->rt_period_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rt_b->rt_period_timer.irqsafe = 1;
+ rt_b->rt_period_timer.function = sched_rt_period_timer;
}
--static int
--__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
--{
-- struct _cpuid4_info_regs *leaf_regs =
-- (struct _cpuid4_info_regs *)this_leaf;
--
-- return cpuid4_cache_lookup_regs(index, leaf_regs);
--}
--
- static void __cpuinit get_cpu_leaves(void *_retval)
- {
- int j, *retval = _retval, cpu = smp_processor_id();
+@@ -800,7 +801,11 @@ late_initcall(sched_init_debug);
+ * Number of tasks to iterate in a single balance run.
+ * Limited because this is done with IRQs disabled.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ const_debug unsigned int sysctl_sched_nr_migrate = 32;
++#else
++const_debug unsigned int sysctl_sched_nr_migrate = 8;
++#endif
- /* Do cpuid and store the results */
- for (j = 0; j < num_cache_leaves; j++) {
-- struct _cpuid4_info *this_leaf;
-- this_leaf = CPUID4_INFO_IDX(cpu, j);
-- *retval = cpuid4_cache_lookup(j, this_leaf);
-+ struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
-+
-+ *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
- if (unlikely(*retval < 0)) {
- int i;
+ /*
+ * period over which we average the RT time consumption, measured
+@@ -1136,6 +1141,7 @@ static void init_rq_hrtick(struct rq *rq
-@@ -900,16 +861,16 @@ static ssize_t show_##file_name(struct _
- return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->hrtick_timer.function = hrtick;
++ rq->hrtick_timer.irqsafe = 1;
}
+ #else /* CONFIG_SCHED_HRTICK */
+ static inline void hrtick_clear(struct rq *rq)
+@@ -2378,11 +2384,11 @@ static int select_fallback_rq(int cpu, s
--show_one_plus(level, eax.split.level, 0);
--show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
--show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
--show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
--show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
-+show_one_plus(level, base.eax.split.level, 0);
-+show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
-+show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
-+show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
-+show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
+ /* Look for allowed, online CPU in same node. */
+ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
++ if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
+ return dest_cpu;
- static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int cpu)
+ /* Any allowed, online CPU? */
+- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
++ dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask);
+ if (dest_cpu < nr_cpu_ids)
+ return dest_cpu;
+
+@@ -2419,7 +2425,7 @@ int select_task_rq(struct task_struct *p
+ * [ this allows ->select_task() to simply return task_cpu(p) and
+ * not worry about this generic constraint ]
+ */
+- if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
++ if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
+ !cpu_online(cpu)))
+ cpu = select_fallback_rq(task_cpu(p), p);
+
+@@ -2477,10 +2483,6 @@ static void ttwu_activate(struct rq *rq,
{
-- return sprintf(buf, "%luK\n", this_leaf->size / 1024);
-+ return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
+ activate_task(rq, p, en_flags);
+ p->on_rq = 1;
+-
+- /* if a worker is waking up, notify workqueue */
+- if (p->flags & PF_WQ_WORKER)
+- wq_worker_waking_up(p, cpu_of(rq));
}
- static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
-@@ -946,7 +907,7 @@ static inline ssize_t show_shared_cpu_li
- static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
- unsigned int cpu)
- {
-- switch (this_leaf->eax.split.type) {
-+ switch (this_leaf->base.eax.split.type) {
- case CACHE_TYPE_DATA:
- return sprintf(buf, "Data\n");
- case CACHE_TYPE_INST:
-@@ -1135,7 +1096,7 @@ static int __cpuinit cache_add_dev(struc
+ /*
+@@ -2678,8 +2680,25 @@ try_to_wake_up(struct task_struct *p, un
- ktype_cache.default_attrs = default_attrs;
- #ifdef CONFIG_AMD_NB
-- if (this_leaf->l3)
-+ if (this_leaf->base.nb)
- ktype_cache.default_attrs = amd_l3_attrs();
- #endif
- retval = kobject_init_and_add(&(this_object->kobj),
-Index: linux-2.6/arch/x86/include/asm/amd_nb.h
-===================================================================
---- linux-2.6.orig/arch/x86/include/asm/amd_nb.h
-+++ linux-2.6/arch/x86/include/asm/amd_nb.h
-@@ -19,9 +19,15 @@ extern int amd_numa_init(void);
- extern int amd_get_subcaches(int);
- extern int amd_set_subcaches(int, int);
-
-+struct amd_l3_cache {
-+ unsigned indices;
-+ u8 subcaches[4];
-+};
-+
- struct amd_northbridge {
- struct pci_dev *misc;
- struct pci_dev *link;
-+ struct amd_l3_cache l3_cache;
- };
-
- struct amd_northbridge_info {
-Index: linux-2.6/arch/mips/sibyte/sb1250/irq.c
-===================================================================
---- linux-2.6.orig/arch/mips/sibyte/sb1250/irq.c
-+++ linux-2.6/arch/mips/sibyte/sb1250/irq.c
-@@ -178,7 +178,7 @@ static void ack_sb1250_irq(struct irq_da
-
- static struct irq_chip sb1250_irq_type = {
- .name = "SB1250-IMR",
-- .irq_mask_ack = ack_sb1250_irq,
-+ .irq_mask = ack_sb1250_irq,
- .irq_unmask = enable_sb1250_irq,
- #ifdef CONFIG_SMP
- .irq_set_affinity = sb1250_set_affinity
-Index: linux-2.6/arch/mips/kernel/ftrace.c
-===================================================================
---- linux-2.6.orig/arch/mips/kernel/ftrace.c
-+++ linux-2.6/arch/mips/kernel/ftrace.c
-@@ -19,6 +19,26 @@
-
- #include <asm-generic/sections.h>
-
-+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
-+#define MCOUNT_OFFSET_INSNS 5
-+#else
-+#define MCOUNT_OFFSET_INSNS 4
-+#endif
-+
-+/*
-+ * Check if the address is in kernel space
-+ *
-+ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
-+ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
-+ */
-+static inline int in_kernel_space(unsigned long ip)
-+{
-+ if (ip >= (unsigned long)_stext &&
-+ ip <= (unsigned long)_etext)
-+ return 1;
-+ return 0;
-+}
+ smp_wmb();
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+- if (!(p->state & state))
++ if (!(p->state & state)) {
++ /*
++ * The task might be running due to a spinlock sleeper
++ * wakeup. Check the saved state and set it to running
++ * if the wakeup condition is true.
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER)) {
++ if (p->saved_state & state)
++ p->saved_state = TASK_RUNNING;
++ }
+ goto out;
++ }
+
- #ifdef CONFIG_DYNAMIC_FTRACE
++ /*
++ * If this is a regular wakeup, then we can unconditionally
++ * clear the saved state of a "lock sleeper".
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER))
++ p->saved_state = TASK_RUNNING;
- #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
-@@ -54,20 +74,6 @@ static inline void ftrace_dyn_arch_init_
- #endif
+ success = 1; /* we're going to change ->state */
+ cpu = task_cpu(p);
+@@ -2735,40 +2754,6 @@ out:
}
--/*
-- * Check if the address is in kernel space
+ /**
+- * try_to_wake_up_local - try to wake up a local task with rq lock held
+- * @p: the thread to be awakened
- *
-- * Clone core_kernel_text() from kernel/extable.c, but doesn't call
-- * init_kernel_text() for Ftrace doesn't trace functions in init sections.
+- * Put @p on the run-queue if it's not already there. The caller must
+- * ensure that this_rq() is locked, @p is bound to this_rq() and not
+- * the current task.
- */
--static inline int in_kernel_space(unsigned long ip)
+-static void try_to_wake_up_local(struct task_struct *p)
-{
-- if (ip >= (unsigned long)_stext &&
-- ip <= (unsigned long)_etext)
-- return 1;
-- return 0;
+- struct rq *rq = task_rq(p);
+-
+- BUG_ON(rq != this_rq());
+- BUG_ON(p == current);
+- lockdep_assert_held(&rq->lock);
+-
+- if (!raw_spin_trylock(&p->pi_lock)) {
+- raw_spin_unlock(&rq->lock);
+- raw_spin_lock(&p->pi_lock);
+- raw_spin_lock(&rq->lock);
+- }
+-
+- if (!(p->state & TASK_NORMAL))
+- goto out;
+-
+- if (!p->on_rq)
+- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+-
+- ttwu_do_wakeup(rq, p, 0);
+- ttwu_stat(p, smp_processor_id(), 0);
+-out:
+- raw_spin_unlock(&p->pi_lock);
-}
-
- static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
+-/**
+ * wake_up_process - Wake up a specific process
+ * @p: The process to be woken up.
+ *
+@@ -2785,6 +2770,18 @@ int wake_up_process(struct task_struct *
+ }
+ EXPORT_SYMBOL(wake_up_process);
+
++/**
++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
++ * @p: The process to be woken up.
++ *
++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
++ * the nature of the wakeup.
++ */
++int wake_up_lock_sleeper(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
++}
++
+ int wake_up_state(struct task_struct *p, unsigned int state)
{
- int faulted;
-@@ -112,11 +118,6 @@ static int ftrace_modify_code(unsigned l
- * 1: offset = 4 instructions
- */
+ return try_to_wake_up(p, state, 0);
+@@ -2825,7 +2822,7 @@ static void __sched_fork(struct task_str
+ void sched_fork(struct task_struct *p)
+ {
+ unsigned long flags;
+- int cpu = get_cpu();
++ int cpu;
--#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
--#define MCOUNT_OFFSET_INSNS 5
--#else
--#define MCOUNT_OFFSET_INSNS 4
--#endif
- #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
+ __sched_fork(p);
+ /*
+@@ -2865,6 +2862,7 @@ void sched_fork(struct task_struct *p)
+ if (!rt_prio(p->prio))
+ p->sched_class = &fair_sched_class;
- int ftrace_make_nop(struct module *mod,
-Index: linux-2.6/arch/mips/loongson/fuloong-2e/irq.c
-===================================================================
---- linux-2.6.orig/arch/mips/loongson/fuloong-2e/irq.c
-+++ linux-2.6/arch/mips/loongson/fuloong-2e/irq.c
-@@ -42,6 +42,7 @@ asmlinkage void mach_irq_dispatch(unsign
- static struct irqaction cascade_irqaction = {
- .handler = no_action,
- .name = "cascade",
-+ .flags = IRQF_NO_THREAD,
- };
++ cpu = get_cpu();
+ if (p->sched_class->task_fork)
+ p->sched_class->task_fork(p);
- void __init mach_init_irq(void)
-Index: linux-2.6/arch/mips/loongson/lemote-2f/irq.c
-===================================================================
---- linux-2.6.orig/arch/mips/loongson/lemote-2f/irq.c
-+++ linux-2.6/arch/mips/loongson/lemote-2f/irq.c
-@@ -96,12 +96,13 @@ static irqreturn_t ip6_action(int cpl, v
- struct irqaction ip6_irqaction = {
- .handler = ip6_action,
- .name = "cascade",
-- .flags = IRQF_SHARED,
-+ .flags = IRQF_SHARED | IRQF_NO_THREAD,
- };
+@@ -2876,8 +2874,9 @@ void sched_fork(struct task_struct *p)
+ * Silence PROVE_RCU.
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+- set_task_cpu(p, cpu);
++ set_task_cpu(p, smp_processor_id());
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++ put_cpu();
- struct irqaction cascade_irqaction = {
- .handler = no_action,
- .name = "cascade",
-+ .flags = IRQF_NO_THREAD,
- };
+ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+ if (likely(sched_info_on()))
+@@ -2893,8 +2892,6 @@ void sched_fork(struct task_struct *p)
+ #ifdef CONFIG_SMP
+ plist_node_init(&p->pushable_tasks, MAX_PRIO);
+ #endif
+-
+- put_cpu();
+ }
- void __init mach_init_irq(void)
-Index: linux-2.6/arch/mips/ar7/irq.c
-===================================================================
---- linux-2.6.orig/arch/mips/ar7/irq.c
-+++ linux-2.6/arch/mips/ar7/irq.c
-@@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type
+ /*
+@@ -3060,8 +3057,12 @@ static void finish_task_switch(struct rq
+ finish_lock_switch(rq, prev);
- static struct irqaction ar7_cascade_action = {
- .handler = no_action,
-- .name = "AR7 cascade interrupt"
-+ .name = "AR7 cascade interrupt",
-+ .flags = IRQF_NO_THREAD,
- };
+ fire_sched_in_preempt_notifiers(current);
++ /*
++ * We use mmdrop_delayed() here so we don't have to do the
++ * full __mmdrop() when we are the last user.
++ */
+ if (mm)
+- mmdrop(mm);
++ mmdrop_delayed(mm);
+ if (unlikely(prev_state == TASK_DEAD)) {
+ /*
+ * Remove function-return probe instances associated with this
+@@ -4242,9 +4243,9 @@ pick_next_task(struct rq *rq)
+ }
- static void __init ar7_irq_init(int base)
-Index: linux-2.6/arch/mips/bcm63xx/irq.c
-===================================================================
---- linux-2.6.orig/arch/mips/bcm63xx/irq.c
-+++ linux-2.6/arch/mips/bcm63xx/irq.c
-@@ -222,6 +222,7 @@ static struct irq_chip bcm63xx_external_
- static struct irqaction cpu_ip2_cascade_action = {
- .handler = no_action,
- .name = "cascade_ip2",
-+ .flags = IRQF_NO_THREAD,
- };
+ /*
+- * schedule() is the main scheduler function.
++ * __schedule() is the main scheduler function.
+ */
+-asmlinkage void __sched schedule(void)
++static void __sched __schedule(void)
+ {
+ struct task_struct *prev, *next;
+ unsigned long *switch_count;
+@@ -4272,29 +4273,6 @@ need_resched:
+ } else {
+ deactivate_task(rq, prev, DEQUEUE_SLEEP);
+ prev->on_rq = 0;
+-
+- /*
+- * If a worker went to sleep, notify and ask workqueue
+- * whether it wants to wake up a task to maintain
+- * concurrency.
+- */
+- if (prev->flags & PF_WQ_WORKER) {
+- struct task_struct *to_wakeup;
+-
+- to_wakeup = wq_worker_sleeping(prev, cpu);
+- if (to_wakeup)
+- try_to_wake_up_local(to_wakeup);
+- }
+-
+- /*
+- * If we are going to sleep and we have plugged IO
+- * queued, make sure to submit it to avoid deadlocks.
+- */
+- if (blk_needs_flush_plug(prev)) {
+- raw_spin_unlock(&rq->lock);
+- blk_schedule_flush_plug(prev);
+- raw_spin_lock(&rq->lock);
+- }
+ }
+ switch_count = &prev->nvcsw;
+ }
+@@ -4328,12 +4306,62 @@ need_resched:
- void __init arch_init_irq(void)
-Index: linux-2.6/arch/mips/cobalt/irq.c
-===================================================================
---- linux-2.6.orig/arch/mips/cobalt/irq.c
-+++ linux-2.6/arch/mips/cobalt/irq.c
-@@ -48,6 +48,7 @@ asmlinkage void plat_irq_dispatch(void)
- static struct irqaction cascade = {
- .handler = no_action,
- .name = "cascade",
-+ .flags = IRQF_NO_THREAD,
- };
+ post_schedule(rq);
- void __init arch_init_irq(void)
-Index: linux-2.6/arch/mips/dec/setup.c
-===================================================================
---- linux-2.6.orig/arch/mips/dec/setup.c
-+++ linux-2.6/arch/mips/dec/setup.c
-@@ -101,20 +101,24 @@ int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_
- static struct irqaction ioirq = {
- .handler = no_action,
- .name = "cascade",
-+ .flags = IRQF_NO_THREAD,
- };
- static struct irqaction fpuirq = {
- .handler = no_action,
- .name = "fpu",
-+ .flags = IRQF_NO_THREAD,
- };
+- preempt_enable_no_resched();
++ __preempt_enable_no_resched();
+ if (need_resched())
+ goto need_resched;
+ }
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++ if (!tsk->state || tsk_is_pi_blocked(tsk))
++ return;
++
++ /*
++ * If a worker went to sleep, notify and ask workqueue whether
++ * it wants to wake up a task to maintain concurrency.
++ */
++ if (tsk->flags & PF_WQ_WORKER)
++ wq_worker_sleeping(tsk);
++
++ /*
++ * If we are going to sleep and we have plugged IO queued,
++ * make sure to submit it to avoid deadlocks.
++ */
++ if (blk_needs_flush_plug(tsk))
++ blk_schedule_flush_plug(tsk);
++}
++
++static inline void sched_update_worker(struct task_struct *tsk)
++{
++ if (tsk_is_pi_blocked(tsk))
++ return;
++
++ if (tsk->flags & PF_WQ_WORKER)
++ wq_worker_running(tsk);
++}
++
++asmlinkage void schedule(void)
++{
++ struct task_struct *tsk = current;
++
++ sched_submit_work(tsk);
++ __schedule();
++ sched_update_worker(tsk);
++}
+ EXPORT_SYMBOL(schedule);
- static struct irqaction busirq = {
- .flags = IRQF_DISABLED,
- .name = "bus error",
-+ .flags = IRQF_NO_THREAD,
- };
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++ __preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++}
++
+ #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- static struct irqaction haltirq = {
- .handler = dec_intr_halt,
- .name = "halt",
-+ .flags = IRQF_NO_THREAD,
- };
+ static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
+@@ -4405,7 +4433,7 @@ asmlinkage void __sched notrace preempt_
+ do {
+ add_preempt_count_notrace(PREEMPT_ACTIVE);
+- schedule();
++ __schedule();
+ sub_preempt_count_notrace(PREEMPT_ACTIVE);
-Index: linux-2.6/arch/mips/emma/markeins/irq.c
-===================================================================
---- linux-2.6.orig/arch/mips/emma/markeins/irq.c
-+++ linux-2.6/arch/mips/emma/markeins/irq.c
-@@ -169,7 +169,7 @@ void emma2rh_gpio_irq_init(void)
+ /*
+@@ -4433,7 +4461,7 @@ asmlinkage void __sched preempt_schedule
+ do {
+ add_preempt_count(PREEMPT_ACTIVE);
+ local_irq_enable();
+- schedule();
++ __schedule();
+ local_irq_disable();
+ sub_preempt_count(PREEMPT_ACTIVE);
- static struct irqaction irq_cascade = {
- .handler = no_action,
-- .flags = 0,
-+ .flags = IRQF_NO_THREAD,
- .name = "cascade",
- .dev_id = NULL,
- .next = NULL,
-Index: linux-2.6/arch/mips/lasat/interrupt.c
-===================================================================
---- linux-2.6.orig/arch/mips/lasat/interrupt.c
-+++ linux-2.6/arch/mips/lasat/interrupt.c
-@@ -105,6 +105,7 @@ asmlinkage void plat_irq_dispatch(void)
- static struct irqaction cascade = {
- .handler = no_action,
- .name = "cascade",
-+ .flags = IRQF_NO_THREAD,
- };
+@@ -4828,9 +4856,8 @@ long __sched sleep_on_timeout(wait_queue
+ EXPORT_SYMBOL(sleep_on_timeout);
- void __init arch_init_irq(void)
-Index: linux-2.6/arch/mips/mti-malta/malta-int.c
-===================================================================
---- linux-2.6.orig/arch/mips/mti-malta/malta-int.c
-+++ linux-2.6/arch/mips/mti-malta/malta-int.c
-@@ -350,12 +350,14 @@ unsigned int plat_ipi_resched_int_xlate(
+ #ifdef CONFIG_RT_MUTEXES
+-
+ /*
+- * rt_mutex_setprio - set the current priority of a task
++ * task_setprio - set the current priority of a task
+ * @p: task
+ * @prio: prio value (kernel-internal form)
+ *
+@@ -4839,7 +4866,7 @@ EXPORT_SYMBOL(sleep_on_timeout);
+ *
+ * Used by the rt_mutex code to implement priority inheritance logic.
+ */
+-void rt_mutex_setprio(struct task_struct *p, int prio)
++void task_setprio(struct task_struct *p, int prio)
+ {
+ int oldprio, on_rq, running;
+ struct rq *rq;
+@@ -4849,6 +4876,24 @@ void rt_mutex_setprio(struct task_struct
- static struct irqaction i8259irq = {
- .handler = no_action,
-- .name = "XT-PIC cascade"
-+ .name = "XT-PIC cascade",
-+ .flags = IRQF_NO_THREAD,
- };
+ rq = __task_rq_lock(p);
- static struct irqaction corehi_irqaction = {
- .handler = no_action,
-- .name = "CoreHi"
-+ .name = "CoreHi",
-+ .flags = IRQF_NO_THREAD,
- };
++ /*
++ * Idle task boosting is a nono in general. There is one
++ * exception, when PREEMPT_RT and NOHZ is active:
++ *
++ * The idle task calls get_next_timer_interrupt() and holds
++ * the timer wheel base->lock on the CPU and another CPU wants
++ * to access the timer (probably to cancel it). We can safely
++ * ignore the boosting request, as the idle CPU runs this code
++ * with interrupts disabled and will complete the lock
++ * protected section without being interrupted. So there is no
++ * real need to boost.
++ */
++ if (unlikely(p == rq->idle)) {
++ WARN_ON(p != rq->curr);
++ WARN_ON(p->pi_blocked_on);
++ goto out_unlock;
++ }
++
+ trace_sched_pi_setprio(p, prio);
+ oldprio = p->prio;
+ prev_class = p->sched_class;
+@@ -4872,9 +4917,9 @@ void rt_mutex_setprio(struct task_struct
+ enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
- static msc_irqmap_t __initdata msc_irqmap[] = {
-Index: linux-2.6/arch/mips/pmc-sierra/msp71xx/msp_irq.c
-===================================================================
---- linux-2.6.orig/arch/mips/pmc-sierra/msp71xx/msp_irq.c
-+++ linux-2.6/arch/mips/pmc-sierra/msp71xx/msp_irq.c
-@@ -109,11 +109,13 @@ asmlinkage void plat_irq_dispatch(struct
- static struct irqaction cic_cascade_msp = {
- .handler = no_action,
- .name = "MSP CIC cascade"
-+ .flags = IRQF_NO_THREAD,
- };
+ check_class_changed(rq, p, prev_class, oldprio);
++out_unlock:
+ __task_rq_unlock(rq);
+ }
+-
+ #endif
- static struct irqaction per_cascade_msp = {
- .handler = no_action,
- .name = "MSP PER cascade"
-+ .flags = IRQF_NO_THREAD,
- };
-
- void __init arch_init_irq(void)
-Index: linux-2.6/arch/mips/pnx8550/common/int.c
-===================================================================
---- linux-2.6.orig/arch/mips/pnx8550/common/int.c
-+++ linux-2.6/arch/mips/pnx8550/common/int.c
-@@ -167,7 +167,7 @@ static struct irq_chip level_irq_type =
-
- static struct irqaction gic_action = {
- .handler = no_action,
-- .flags = IRQF_DISABLED,
-+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
- .name = "GIC",
- };
-
-Index: linux-2.6/arch/mips/sgi-ip22/ip22-int.c
-===================================================================
---- linux-2.6.orig/arch/mips/sgi-ip22/ip22-int.c
-+++ linux-2.6/arch/mips/sgi-ip22/ip22-int.c
-@@ -155,32 +155,32 @@ static void __irq_entry indy_buserror_ir
-
- static struct irqaction local0_cascade = {
- .handler = no_action,
-- .flags = IRQF_DISABLED,
-+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
- .name = "local0 cascade",
- };
-
- static struct irqaction local1_cascade = {
- .handler = no_action,
-- .flags = IRQF_DISABLED,
-+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
- .name = "local1 cascade",
- };
-
- static struct irqaction buserr = {
- .handler = no_action,
-- .flags = IRQF_DISABLED,
-+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
- .name = "Bus Error",
- };
-
- static struct irqaction map0_cascade = {
- .handler = no_action,
-- .flags = IRQF_DISABLED,
-+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
- .name = "mapable0 cascade",
- };
-
- #ifdef USE_LIO3_IRQ
- static struct irqaction map1_cascade = {
- .handler = no_action,
-- .flags = IRQF_DISABLED,
-+ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
- .name = "mapable1 cascade",
- };
- #define SGI_INTERRUPTS SGINT_END
-Index: linux-2.6/arch/mips/sni/rm200.c
-===================================================================
---- linux-2.6.orig/arch/mips/sni/rm200.c
-+++ linux-2.6/arch/mips/sni/rm200.c
-@@ -359,6 +359,7 @@ void sni_rm200_init_8259A(void)
- static struct irqaction sni_rm200_irq2 = {
- .handler = no_action,
- .name = "cascade",
-+ .flags = IRQF_NO_THREAD,
- };
-
- static struct resource sni_rm200_pic1_resource = {
-Index: linux-2.6/arch/mips/vr41xx/common/irq.c
-===================================================================
---- linux-2.6.orig/arch/mips/vr41xx/common/irq.c
-+++ linux-2.6/arch/mips/vr41xx/common/irq.c
-@@ -34,6 +34,7 @@ static irq_cascade_t irq_cascade[NR_IRQS
- static struct irqaction cascade_irqaction = {
- .handler = no_action,
- .name = "cascade",
-+ .flags = IRQF_NO_THREAD,
- };
-
- int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))
-Index: linux-2.6/arch/mips/Kconfig
-===================================================================
---- linux-2.6.orig/arch/mips/Kconfig
-+++ linux-2.6/arch/mips/Kconfig
-@@ -24,6 +24,7 @@ config MIPS
- select GENERIC_IRQ_PROBE
- select GENERIC_IRQ_SHOW
- select HAVE_ARCH_JUMP_LABEL
-+ select IRQ_FORCED_THREADING
+ void set_user_nice(struct task_struct *p, long nice)
+@@ -5543,7 +5588,7 @@ SYSCALL_DEFINE0(sched_yield)
+ __release(rq->lock);
+ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
+ do_raw_spin_unlock(&rq->lock);
+- preempt_enable_no_resched();
++ __preempt_enable_no_resched();
- menu "Machine selection"
+ schedule();
-@@ -2038,7 +2039,7 @@ config CPU_R4400_WORKAROUNDS
- #
- config HIGHMEM
- bool "High Memory Support"
-- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM
-+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL
+@@ -5557,9 +5602,17 @@ static inline int should_resched(void)
- config CPU_SUPPORTS_HIGHMEM
- bool
-Index: linux-2.6/arch/mips/kernel/traps.c
-===================================================================
---- linux-2.6.orig/arch/mips/kernel/traps.c
-+++ linux-2.6/arch/mips/kernel/traps.c
-@@ -364,7 +364,7 @@ static int regs_to_trapnr(struct pt_regs
- return (regs->cp0_cause >> 2) & 0x1f;
+ static void __cond_resched(void)
+ {
+- add_preempt_count(PREEMPT_ACTIVE);
+- schedule();
+- sub_preempt_count(PREEMPT_ACTIVE);
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++ __schedule();
++ sub_preempt_count(PREEMPT_ACTIVE);
++ /*
++ * Check again in case we missed a preemption
++ * opportunity between schedule and now.
++ */
++ barrier();
++
++ } while (need_resched());
}
--static DEFINE_SPINLOCK(die_lock);
-+static DEFINE_RAW_SPINLOCK(die_lock);
+ int __sched _cond_resched(void)
+@@ -5600,6 +5653,7 @@ int __cond_resched_lock(spinlock_t *lock
+ }
+ EXPORT_SYMBOL(__cond_resched_lock);
- void __noreturn die(const char *str, struct pt_regs *regs)
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int __sched __cond_resched_softirq(void)
{
-@@ -378,7 +378,7 @@ void __noreturn die(const char *str, str
- sig = 0;
-
- console_verbose();
-- spin_lock_irq(&die_lock);
-+ raw_spin_lock_irq(&die_lock);
- bust_spinlocks(1);
- #ifdef CONFIG_MIPS_MT_SMTC
- mips_mt_regdump(dvpret);
-@@ -387,7 +387,7 @@ void __noreturn die(const char *str, str
- printk("%s[#%d]:\n", str, ++die_counter);
- show_registers(regs);
- add_taint(TAINT_DIE);
-- spin_unlock_irq(&die_lock);
-+ raw_spin_unlock_irq(&die_lock);
-
- if (in_interrupt())
- panic("Fatal exception in interrupt");
-Index: linux-2.6/arch/mips/kernel/signal.c
-===================================================================
---- linux-2.6.orig/arch/mips/kernel/signal.c
-+++ linux-2.6/arch/mips/kernel/signal.c
-@@ -603,6 +603,9 @@ static void do_signal(struct pt_regs *re
- if (!user_mode(regs))
- return;
-
-+ local_irq_enable();
-+ preempt_check_resched();
-+
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- oldset = ¤t->saved_sigmask;
- else
-Index: linux-2.6/arch/arm/kernel/signal.c
-===================================================================
---- linux-2.6.orig/arch/arm/kernel/signal.c
-+++ linux-2.6/arch/arm/kernel/signal.c
-@@ -673,6 +673,9 @@ static void do_signal(struct pt_regs *re
- if (!user_mode(regs))
- return;
+ BUG_ON(!in_softirq());
+@@ -5613,6 +5667,7 @@ int __sched __cond_resched_softirq(void)
+ return 0;
+ }
+ EXPORT_SYMBOL(__cond_resched_softirq);
++#endif
-+ local_irq_enable();
-+ preempt_check_resched();
-+
+ /**
+ * yield - yield the current processor to other threads.
+@@ -5859,7 +5914,7 @@ void show_state_filter(unsigned long sta
+ printk(KERN_INFO
+ " task PC stack pid father\n");
+ #endif
+- read_lock(&tasklist_lock);
++ rcu_read_lock();
+ do_each_thread(g, p) {
+ /*
+ * reset the NMI-timeout, listing all files on a slow
+@@ -5875,7 +5930,7 @@ void show_state_filter(unsigned long sta
+ #ifdef CONFIG_SCHED_DEBUG
+ sysrq_sched_debug_show();
+ #endif
+- read_unlock(&tasklist_lock);
++ rcu_read_unlock();
/*
- * If we were from a system call, check for system call restarting...
+ * Only show locks if all tasks are dumped:
*/
-Index: linux-2.6/kernel/time/clocksource.c
-===================================================================
---- linux-2.6.orig/kernel/time/clocksource.c
-+++ linux-2.6/kernel/time/clocksource.c
-@@ -186,6 +186,7 @@ static struct timer_list watchdog_timer;
- static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
- static DEFINE_SPINLOCK(watchdog_lock);
- static int watchdog_running;
-+static atomic_t watchdog_reset_pending;
+@@ -5997,12 +6052,12 @@ static inline void sched_init_granularit
+ #ifdef CONFIG_SMP
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+- if (p->sched_class && p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, new_mask);
+- else {
+- cpumask_copy(&p->cpus_allowed, new_mask);
++ if (!__migrate_disabled(p)) {
++ if (p->sched_class && p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, new_mask);
+ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
+ }
++ cpumask_copy(&p->cpus_allowed, new_mask);
+ }
- static int clocksource_watchdog_kthread(void *data);
- static void __clocksource_change_rating(struct clocksource *cs, int rating);
-@@ -247,12 +248,14 @@ static void clocksource_watchdog(unsigne
- struct clocksource *cs;
- cycle_t csnow, wdnow;
- int64_t wd_nsec, cs_nsec;
-- int next_cpu;
-+ int next_cpu, reset_pending;
+ /*
+@@ -6053,7 +6108,7 @@ int set_cpus_allowed_ptr(struct task_str
+ do_set_cpus_allowed(p, new_mask);
- spin_lock(&watchdog_lock);
- if (!watchdog_running)
+ /* Can the task run on the task's current CPU? If so, we're done */
+- if (cpumask_test_cpu(task_cpu(p), new_mask))
++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
-+ reset_pending = atomic_read(&watchdog_reset_pending);
-+
- list_for_each_entry(cs, &watchdog_list, wd_list) {
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+@@ -6072,6 +6127,124 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
- /* Clocksource already marked unstable? */
-@@ -268,7 +271,8 @@ static void clocksource_watchdog(unsigne
- local_irq_enable();
-
- /* Clocksource initialized ? */
-- if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
-+ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
-+ atomic_read(&watchdog_reset_pending)) {
- cs->flags |= CLOCK_SOURCE_WATCHDOG;
- cs->wd_last = wdnow;
- cs->cs_last = csnow;
-@@ -283,8 +287,11 @@ static void clocksource_watchdog(unsigne
- cs->cs_last = csnow;
- cs->wd_last = wdnow;
-
-+ if (atomic_read(&watchdog_reset_pending))
-+ continue;
++#ifdef CONFIG_PREEMPT_RT_FULL
++void migrate_disable(void)
++{
++ struct task_struct *p = current;
++ const struct cpumask *mask;
++ unsigned long flags;
++ struct rq *rq;
+
- /* Check the deviation from the watchdog clocksource. */
-- if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
-+ if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
- clocksource_unstable(cs, cs_nsec - wd_nsec);
- continue;
- }
-@@ -303,6 +310,13 @@ static void clocksource_watchdog(unsigne
- }
-
- /*
-+ * We only clear the watchdog_reset_pending, when we did a
-+ * full cycle through all clocksources.
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic++;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
++ preempt_disable();
++ if (p->migrate_disable) {
++ p->migrate_disable++;
++ preempt_enable();
++ return;
++ }
++
++ pin_current_cpu();
++ if (unlikely(!scheduler_running)) {
++ p->migrate_disable = 1;
++ preempt_enable();
++ return;
++ }
++
++ /*
++ * Since this is always current we can get away with only locking
++ * rq->lock, the ->cpus_allowed value can normally only be changed
++ * while holding both p->pi_lock and rq->lock, but seeing that this
++ * it current, we cannot actually be waking up, so all code that
++ * relies on serialization against p->pi_lock is out of scope.
++ *
++ * Taking rq->lock serializes us against things like
++ * set_cpus_allowed_ptr() that can still happen concurrently.
+ */
-+ if (reset_pending)
-+ atomic_dec(&watchdog_reset_pending);
++ rq = this_rq();
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ p->migrate_disable = 1;
++ mask = tsk_cpus_allowed(p);
++
++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++
++ if (!cpumask_equal(&p->cpus_allowed, mask)) {
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->rt.nr_cpus_allowed = cpumask_weight(mask);
++ }
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++ struct task_struct *p = current;
++ const struct cpumask *mask;
++ unsigned long flags;
++ struct rq *rq;
++
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic--;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++ WARN_ON_ONCE(p->migrate_disable <= 0);
++
++ preempt_disable();
++ if (p->migrate_disable > 1) {
++ p->migrate_disable--;
++ preempt_enable();
++ return;
++ }
++
++ if (unlikely(!scheduler_running)) {
++ p->migrate_disable = 0;
++ unpin_current_cpu();
++ preempt_enable();
++ return;
++ }
+
+ /*
- * Cycle through CPUs to check if the CPUs stay synchronized
- * to each other.
- */
-@@ -344,23 +358,7 @@ static inline void clocksource_reset_wat
++ * See comment in migrate_disable().
++ */
++ rq = this_rq();
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ mask = tsk_cpus_allowed(p);
++ p->migrate_disable = 0;
++
++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++
++ if (!cpumask_equal(&p->cpus_allowed, mask)) {
++ /* Get the mask now that migration is enabled */
++ mask = tsk_cpus_allowed(p);
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->rt.nr_cpus_allowed = cpumask_weight(mask);
++ }
++
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++ unpin_current_cpu();
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++#endif /* CONFIG_PREEMPT_RT_FULL */
++
+ /*
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
+@@ -6100,7 +6273,7 @@ static int __migrate_task(struct task_st
+ if (task_cpu(p) != src_cpu)
+ goto done;
+ /* Affinity changed (again). */
+- if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
++ if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
+ goto fail;
- static void clocksource_resume_watchdog(void)
- {
-- unsigned long flags;
--
-- /*
-- * We use trylock here to avoid a potential dead lock when
-- * kgdb calls this code after the kernel has been stopped with
-- * watchdog_lock held. When watchdog_lock is held we just
-- * return and accept, that the watchdog might trigger and mark
-- * the monitored clock source (usually TSC) unstable.
-- *
-- * This does not affect the other caller clocksource_resume()
-- * because at this point the kernel is UP, interrupts are
-- * disabled and nothing can hold watchdog_lock.
-- */
-- if (!spin_trylock_irqsave(&watchdog_lock, flags))
-- return;
-- clocksource_reset_watchdog();
-- spin_unlock_irqrestore(&watchdog_lock, flags);
-+ atomic_inc(&watchdog_reset_pending);
- }
+ /*
+@@ -6142,6 +6315,8 @@ static int migration_cpu_stop(void *data
- static void clocksource_enqueue_watchdog(struct clocksource *cs)
-Index: linux-2.6/kernel/watchdog.c
-===================================================================
---- linux-2.6.orig/kernel/watchdog.c
-+++ linux-2.6/kernel/watchdog.c
-@@ -208,6 +208,8 @@ static struct perf_event_attr wd_hw_attr
- .disabled = 1,
- };
+ #ifdef CONFIG_HOTPLUG_CPU
-+static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
+
- /* Callback function for perf event subsystem */
- static void watchdog_overflow_callback(struct perf_event *event, int nmi,
- struct perf_sample_data *data,
-@@ -234,10 +236,19 @@ static void watchdog_overflow_callback(s
- if (__this_cpu_read(hard_watchdog_warn) == true)
- return;
+ /*
+ * Ensures that the idle task is using init_mm right before its cpu goes
+ * offline.
+@@ -6154,7 +6329,12 @@ void idle_task_exit(void)
-- if (hardlockup_panic)
-+ /*
-+ * If early-printk is enabled then make sure we do not
-+ * lock up in printk() and kill console logging:
-+ */
-+ printk_kill();
+ if (mm != &init_mm)
+ switch_mm(mm, &init_mm, current);
+- mmdrop(mm);
+
-+ if (hardlockup_panic) {
- panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
-- else
-+ } else {
-+ raw_spin_lock(&watchdog_output_lock);
- WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
-+ raw_spin_unlock(&watchdog_output_lock);
++ /*
++ * Defer the cleanup to an alive cpu. On RT we can neither
++ * call mmdrop() nor mmdrop_delayed() from here.
++ */
++ per_cpu(idle_last_mm, smp_processor_id()) = mm;
+ }
+
+ /*
+@@ -6472,6 +6652,12 @@ migration_call(struct notifier_block *nf
+ migrate_nr_uninterruptible(rq);
+ calc_global_load_remove(rq);
+ break;
++ case CPU_DEAD:
++ if (per_cpu(idle_last_mm, cpu)) {
++ mmdrop(per_cpu(idle_last_mm, cpu));
++ per_cpu(idle_last_mm, cpu) = NULL;
+ }
++ break;
+ #endif
+ }
- __this_cpu_write(hard_watchdog_warn, true);
- return;
-@@ -320,7 +331,7 @@ static enum hrtimer_restart watchdog_tim
- */
- static int watchdog(void *unused)
+@@ -8188,7 +8374,8 @@ void __init sched_init(void)
+ #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+ static inline int preempt_count_equals(int preempt_offset)
{
-- static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
-+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
-
- sched_setscheduler(current, SCHED_FIFO, ¶m);
-@@ -349,7 +360,8 @@ static int watchdog(void *unused)
- set_current_state(TASK_INTERRUPTIBLE);
- }
- __set_current_state(TASK_RUNNING);
--
-+ param.sched_priority = 0;
-+ sched_setscheduler(current, SCHED_NORMAL, ¶m);
- return 0;
- }
+- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
++ int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
++ sched_rcu_preempt_depth();
-@@ -422,6 +434,7 @@ static void watchdog_prepare_cpu(int cpu
- WARN_ON(per_cpu(softlockup_watchdog, cpu));
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = watchdog_timer_fn;
-+ hrtimer->irqsafe = 1;
+ return (nested == preempt_offset);
}
-
- static int watchdog_enable(int cpu)
-Index: linux-2.6/kernel/rtmutex-debug.c
+Index: linux-2.6/block/blk-core.c
===================================================================
---- linux-2.6.orig/kernel/rtmutex-debug.c
-+++ linux-2.6/kernel/rtmutex-debug.c
-@@ -29,61 +29,6 @@
-
- #include "rtmutex_common.h"
-
--# define TRACE_WARN_ON(x) WARN_ON(x)
--# define TRACE_BUG_ON(x) BUG_ON(x)
--
--# define TRACE_OFF() \
--do { \
-- if (rt_trace_on) { \
-- rt_trace_on = 0; \
-- console_verbose(); \
-- if (raw_spin_is_locked(¤t->pi_lock)) \
-- raw_spin_unlock(¤t->pi_lock); \
-- } \
--} while (0)
--
--# define TRACE_OFF_NOLOCK() \
--do { \
-- if (rt_trace_on) { \
-- rt_trace_on = 0; \
-- console_verbose(); \
-- } \
--} while (0)
--
--# define TRACE_BUG_LOCKED() \
--do { \
-- TRACE_OFF(); \
-- BUG(); \
--} while (0)
--
--# define TRACE_WARN_ON_LOCKED(c) \
--do { \
-- if (unlikely(c)) { \
-- TRACE_OFF(); \
-- WARN_ON(1); \
-- } \
--} while (0)
--
--# define TRACE_BUG_ON_LOCKED(c) \
--do { \
-- if (unlikely(c)) \
-- TRACE_BUG_LOCKED(); \
--} while (0)
--
--#ifdef CONFIG_SMP
--# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
--#else
--# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
--#endif
--
--/*
-- * deadlock detection flag. We turn it off when we detect
-- * the first problem because we dont want to recurse back
-- * into the tracing code when doing error printk or
-- * executing a BUG():
-- */
--static int rt_trace_on = 1;
--
- static void printk_task(struct task_struct *p)
+--- linux-2.6.orig/block/blk-core.c
++++ linux-2.6/block/blk-core.c
+@@ -236,7 +236,7 @@ EXPORT_SYMBOL(blk_delay_queue);
+ **/
+ void blk_start_queue(struct request_queue *q)
{
- if (p)
-@@ -111,8 +56,8 @@ static void printk_lock(struct rt_mutex
+- WARN_ON(!irqs_disabled());
++ WARN_ON_NONRT(!irqs_disabled());
- void rt_mutex_debug_task_free(struct task_struct *task)
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ __blk_run_queue(q);
+@@ -301,7 +301,11 @@ void __blk_run_queue(struct request_queu
{
-- WARN_ON(!plist_head_empty(&task->pi_waiters));
-- WARN_ON(task->pi_blocked_on);
-+ DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters));
-+ DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
+ if (unlikely(blk_queue_stopped(q)))
+ return;
+-
++ /*
++ * q->request_fn() can drop q->queue_lock and reenable
++ * interrupts, but must return with q->queue_lock held and
++ * interrupts disabled.
++ */
+ q->request_fn(q);
}
+ EXPORT_SYMBOL(__blk_run_queue);
+@@ -2670,11 +2674,11 @@ static void queue_unplugged(struct reque
+ * this lock).
+ */
+ if (from_schedule) {
+- spin_unlock(q->queue_lock);
++ spin_unlock_irq(q->queue_lock);
+ blk_run_queue_async(q);
+ } else {
+ __blk_run_queue(q);
+- spin_unlock(q->queue_lock);
++ spin_unlock_irq(q->queue_lock);
+ }
- /*
-@@ -125,7 +70,7 @@ void debug_rt_mutex_deadlock(int detect,
+ }
+@@ -2700,7 +2704,6 @@ static void flush_plug_callbacks(struct
+ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
- struct task_struct *task;
+ struct request_queue *q;
+- unsigned long flags;
+ struct request *rq;
+ LIST_HEAD(list);
+ unsigned int depth;
+@@ -2721,11 +2724,6 @@ void blk_flush_plug_list(struct blk_plug
+ q = NULL;
+ depth = 0;
-- if (!rt_trace_on || detect || !act_waiter)
-+ if (!debug_locks || detect || !act_waiter)
- return;
+- /*
+- * Save and disable interrupts here, to avoid doing it for every
+- * queue lock we have to take.
+- */
+- local_irq_save(flags);
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
+ list_del_init(&rq->queuelist);
+@@ -2738,7 +2736,7 @@ void blk_flush_plug_list(struct blk_plug
+ queue_unplugged(q, depth, from_schedule);
+ q = rq->q;
+ depth = 0;
+- spin_lock(q->queue_lock);
++ spin_lock_irq(q->queue_lock);
+ }
+ /*
+ * rq is already accounted, so use raw insert
+@@ -2756,8 +2754,6 @@ void blk_flush_plug_list(struct blk_plug
+ */
+ if (q)
+ queue_unplugged(q, depth, from_schedule);
+-
+- local_irq_restore(flags);
+ }
- task = rt_mutex_owner(act_waiter->lock);
-@@ -139,7 +84,7 @@ void debug_rt_mutex_print_deadlock(struc
- {
- struct task_struct *task;
+ void blk_finish_plug(struct blk_plug *plug)
+Index: linux-2.6/kernel/workqueue.c
+===================================================================
+--- linux-2.6.orig/kernel/workqueue.c
++++ linux-2.6/kernel/workqueue.c
+@@ -137,6 +137,7 @@ struct worker {
+ unsigned int flags; /* X: flags */
+ int id; /* I: worker id */
+ struct work_struct rebind_work; /* L: rebind worker to cpu */
++ int sleeping; /* None */
+ };
-- if (!waiter->deadlock_lock || !rt_trace_on)
-+ if (!waiter->deadlock_lock || !debug_locks)
- return;
+ /*
+@@ -657,66 +658,58 @@ static void wake_up_worker(struct global
+ }
- rcu_read_lock();
-@@ -149,7 +94,8 @@ void debug_rt_mutex_print_deadlock(struc
- return;
- }
+ /**
+- * wq_worker_waking_up - a worker is waking up
+- * @task: task waking up
+- * @cpu: CPU @task is waking up to
++ * wq_worker_running - a worker is running again
++ * @task: task returning from sleep
+ *
+- * This function is called during try_to_wake_up() when a worker is
+- * being awoken.
+- *
+- * CONTEXT:
+- * spin_lock_irq(rq->lock)
++ * This function is called when a worker returns from schedule()
+ */
+-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
++void wq_worker_running(struct task_struct *task)
+ {
+ struct worker *worker = kthread_data(task);
-- TRACE_OFF_NOLOCK();
-+ if (!debug_locks_off())
++ if (!worker->sleeping)
+ return;
-
- printk("\n============================================\n");
- printk( "[ BUG: circular locking deadlock detected! ]\n");
-@@ -180,7 +126,6 @@ void debug_rt_mutex_print_deadlock(struc
-
- printk("[ turning off deadlock detection."
- "Please report this trace. ]\n\n");
-- local_irq_disable();
+ if (!(worker->flags & WORKER_NOT_RUNNING))
+- atomic_inc(get_gcwq_nr_running(cpu));
++ atomic_inc(get_gcwq_nr_running(smp_processor_id()));
++ worker->sleeping = 0;
}
- void debug_rt_mutex_lock(struct rt_mutex *lock)
-@@ -189,7 +134,7 @@ void debug_rt_mutex_lock(struct rt_mutex
-
- void debug_rt_mutex_unlock(struct rt_mutex *lock)
+ /**
+ * wq_worker_sleeping - a worker is going to sleep
+ * @task: task going to sleep
+- * @cpu: CPU in question, must be the current CPU number
+- *
+- * This function is called during schedule() when a busy worker is
+- * going to sleep. Worker on the same cpu can be woken up by
+- * returning pointer to its task.
+- *
+- * CONTEXT:
+- * spin_lock_irq(rq->lock)
+ *
+- * RETURNS:
+- * Worker task on @cpu to wake up, %NULL if none.
++ * This function is called from schedule() when a busy worker is
++ * going to sleep.
+ */
+-struct task_struct *wq_worker_sleeping(struct task_struct *task,
+- unsigned int cpu)
++void wq_worker_sleeping(struct task_struct *task)
{
-- TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
-+ DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
- }
+- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
+- struct global_cwq *gcwq = get_gcwq(cpu);
+- atomic_t *nr_running = get_gcwq_nr_running(cpu);
++ struct worker *worker = kthread_data(task);
++ struct global_cwq *gcwq;
++ int cpu;
- void
-@@ -199,7 +144,7 @@ debug_rt_mutex_proxy_lock(struct rt_mute
+ if (worker->flags & WORKER_NOT_RUNNING)
+- return NULL;
++ return;
++
++ if (WARN_ON_ONCE(worker->sleeping))
++ return;
- void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
- {
-- TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock));
-+ DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
- }
+- /* this can only happen on the local cpu */
+- BUG_ON(cpu != raw_smp_processor_id());
++ worker->sleeping = 1;
- void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
-@@ -213,8 +158,8 @@ void debug_rt_mutex_init_waiter(struct r
- void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
- {
- put_pid(waiter->deadlock_task_pid);
-- TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
-- TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
-+ DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry));
-+ DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
- memset(waiter, 0x22, sizeof(*waiter));
++ cpu = smp_processor_id();
++ gcwq = get_gcwq(cpu);
++ spin_lock_irq(&gcwq->lock);
+ /*
+ * The counterpart of the following dec_and_test, implied mb,
+ * worklist not empty test sequence is in insert_work().
+ * Please read comment there.
+- *
+- * NOT_RUNNING is clear. This means that trustee is not in
+- * charge and we're running on the local cpu w/ rq lock held
+- * and preemption disabled, which in turn means that none else
+- * could be manipulating idle_list, so dereferencing idle_list
+- * without gcwq lock is safe.
+- */
+- if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
+- to_wakeup = first_worker(gcwq);
+- return to_wakeup ? to_wakeup->task : NULL;
++ */
++ if (atomic_dec_and_test(get_gcwq_nr_running(cpu)) &&
++ !list_empty(&gcwq->worklist)) {
++ worker = first_worker(gcwq);
++ if (worker)
++ wake_up_process(worker->task);
++ }
++ spin_unlock_irq(&gcwq->lock);
}
-Index: linux-2.6/arch/arm/kernel/perf_event.c
-===================================================================
---- linux-2.6.orig/arch/arm/kernel/perf_event.c
-+++ linux-2.6/arch/arm/kernel/perf_event.c
-@@ -420,7 +420,7 @@ armpmu_reserve_hardware(void)
- continue;
-
- err = request_irq(irq, handle_irq,
-- IRQF_DISABLED | IRQF_NOBALANCING,
-+ IRQF_DISABLED | IRQF_NOBALANCING | IRQF_NO_THREAD,
- "armpmu", NULL);
- if (err) {
- pr_warning("unable to request IRQ%d for ARM perf "
-Index: linux-2.6/arch/arm/Kconfig
-===================================================================
---- linux-2.6.orig/arch/arm/Kconfig
-+++ linux-2.6/arch/arm/Kconfig
-@@ -29,6 +29,7 @@ config ARM
- select HAVE_GENERIC_HARDIRQS
- select HAVE_SPARSE_IRQ
- select GENERIC_IRQ_SHOW
-+ select IRQ_FORCED_THREADING
- help
- The ARM series is a line of low-power-consumption RISC chip designs
- licensed by ARM Ltd and targeted at embedded applications and
-@@ -1510,7 +1511,7 @@ config HAVE_ARCH_PFN_VALID
-
- config HIGHMEM
- bool "High Memory Support"
-- depends on MMU
-+ depends on MMU && !PREEMPT_RT_FULL
- help
- The address space of ARM processors is only 4 Gigabytes large
- and it has to accommodate user address space, kernel address
-Index: linux-2.6/arch/powerpc/platforms/85xx/mpc85xx_cds.c
-===================================================================
---- linux-2.6.orig/arch/powerpc/platforms/85xx/mpc85xx_cds.c
-+++ linux-2.6/arch/powerpc/platforms/85xx/mpc85xx_cds.c
-@@ -178,7 +178,7 @@ static irqreturn_t mpc85xx_8259_cascade_
+ /**
+@@ -1067,8 +1060,8 @@ int queue_work(struct workqueue_struct *
+ {
+ int ret;
- static struct irqaction mpc85xxcds_8259_irqaction = {
- .handler = mpc85xx_8259_cascade_action,
-- .flags = IRQF_SHARED,
-+ .flags = IRQF_SHARED | IRQF_NO_THREAD,
- .name = "8259 cascade",
- };
- #endif /* PPC_I8259 */
-Index: linux-2.6/arch/powerpc/Kconfig
-===================================================================
---- linux-2.6.orig/arch/powerpc/Kconfig
-+++ linux-2.6/arch/powerpc/Kconfig
-@@ -69,10 +69,11 @@ config LOCKDEP_SUPPORT
+- ret = queue_work_on(get_cpu(), wq, work);
+- put_cpu();
++ ret = queue_work_on(get_cpu_light(), wq, work);
++ put_cpu_light();
- config RWSEM_GENERIC_SPINLOCK
- bool
-+ default y if PREEMPT_RT_FULL
+ return ret;
+ }
+@@ -3484,6 +3477,25 @@ static int __devinit workqueue_cpu_callb
+ kthread_stop(new_trustee);
+ return NOTIFY_BAD;
+ }
++ break;
++ case CPU_POST_DEAD:
++ case CPU_UP_CANCELED:
++ case CPU_DOWN_FAILED:
++ case CPU_ONLINE:
++ break;
++ case CPU_DYING:
++ /*
++ * We access this lockless. We are on the dying CPU
++ * and called from stomp machine.
++ *
++ * Before this, the trustee and all workers except for
++ * the ones which are still executing works from
++ * before the last CPU down must be on the cpu. After
++ * this, they'll all be diasporas.
++ */
++ gcwq->flags |= GCWQ_DISASSOCIATED;
++ default:
++ goto out;
+ }
- config RWSEM_XCHGADD_ALGORITHM
- bool
-- default y
-+ default y if !PREEMPT_RT_FULL
+ /* some are called w/ irq disabled, don't disturb irq status */
+@@ -3503,16 +3515,6 @@ static int __devinit workqueue_cpu_callb
+ gcwq->first_idle = new_worker;
+ break;
- config GENERIC_LOCKBREAK
- bool
-@@ -134,6 +135,7 @@ config PPC
- select GENERIC_IRQ_SHOW_LEVEL
- select HAVE_RCU_TABLE_FREE if SMP
- select HAVE_SYSCALL_TRACEPOINTS
-+ select IRQ_FORCED_THREADING
+- case CPU_DYING:
+- /*
+- * Before this, the trustee and all workers except for
+- * the ones which are still executing works from
+- * before the last CPU down must be on the cpu. After
+- * this, they'll all be diasporas.
+- */
+- gcwq->flags |= GCWQ_DISASSOCIATED;
+- break;
+-
+ case CPU_POST_DEAD:
+ gcwq->trustee_state = TRUSTEE_BUTCHER;
+ /* fall through */
+@@ -3546,6 +3548,7 @@ static int __devinit workqueue_cpu_callb
- config EARLY_PRINTK
- bool
-@@ -271,7 +273,7 @@ menu "Kernel options"
+ spin_unlock_irqrestore(&gcwq->lock, flags);
- config HIGHMEM
- bool "High memory support"
-- depends on PPC32
-+ depends on PPC32 && !PREEMPT_RT_FULL
++out:
+ return notifier_from_errno(0);
+ }
- source kernel/time/Kconfig
- source kernel/Kconfig.hz
-Index: linux-2.6/include/linux/sched.h
+Index: linux-2.6/kernel/workqueue_sched.h
===================================================================
---- linux-2.6.orig/include/linux/sched.h
-+++ linux-2.6/include/linux/sched.h
-@@ -63,6 +63,7 @@ struct sched_param {
- #include <linux/nodemask.h>
- #include <linux/mm_types.h>
-
-+#include <asm/kmap_types.h>
- #include <asm/system.h>
- #include <asm/page.h>
- #include <asm/ptrace.h>
-@@ -90,6 +91,7 @@ struct sched_param {
- #include <linux/task_io_accounting.h>
- #include <linux/latencytop.h>
- #include <linux/cred.h>
-+#include <linux/hardirq.h>
-
- #include <asm/processor.h>
+--- linux-2.6.orig/kernel/workqueue_sched.h
++++ linux-2.6/kernel/workqueue_sched.h
+@@ -4,6 +4,5 @@
+ * Scheduler hooks for concurrency managed workqueue. Only to be
+ * included from sched.c and workqueue.c.
+ */
+-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
+-struct task_struct *wq_worker_sleeping(struct task_struct *task,
+- unsigned int cpu);
++void wq_worker_running(struct task_struct *task);
++void wq_worker_sleeping(struct task_struct *task);
+Index: linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
+===================================================================
+--- linux-2.6.orig/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ linux-2.6/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -151,28 +151,17 @@ union _cpuid4_leaf_ecx {
+ u32 full;
+ };
-@@ -359,6 +361,7 @@ extern signed long schedule_timeout_inte
- extern signed long schedule_timeout_killable(signed long timeout);
- extern signed long schedule_timeout_uninterruptible(signed long timeout);
- asmlinkage void schedule(void);
-+extern void schedule_preempt_disabled(void);
- extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
+-struct amd_l3_cache {
+- struct amd_northbridge *nb;
+- unsigned indices;
+- u8 subcaches[4];
+-};
+-
+-struct _cpuid4_info {
++struct _cpuid4_info_regs {
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned long size;
+- struct amd_l3_cache *l3;
+- DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
++ struct amd_northbridge *nb;
+ };
- struct nsproxy;
-@@ -510,7 +513,7 @@ struct task_cputime {
- struct thread_group_cputimer {
- struct task_cputime cputime;
- int running;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
+-/* subset of above _cpuid4_info w/o shared_cpu_map */
+-struct _cpuid4_info_regs {
+- union _cpuid4_leaf_eax eax;
+- union _cpuid4_leaf_ebx ebx;
+- union _cpuid4_leaf_ecx ecx;
+- unsigned long size;
+- struct amd_l3_cache *l3;
++struct _cpuid4_info {
++ struct _cpuid4_info_regs base;
++ DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};
- #include <linux/rwsem.h>
-@@ -1070,6 +1073,7 @@ struct sched_domain;
- #define WF_SYNC 0x01 /* waker goes to sleep after wakup */
- #define WF_FORK 0x02 /* child wakeup after fork */
- #define WF_MIGRATED 0x04 /* internal use, task got migrated */
-+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
+ unsigned short num_cache_leaves;
+@@ -314,12 +303,13 @@ struct _cache_attr {
+ /*
+ * L3 cache descriptors
+ */
+-static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
++static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
+ {
++ struct amd_l3_cache *l3 = &nb->l3_cache;
+ unsigned int sc0, sc1, sc2, sc3;
+ u32 val = 0;
- #define ENQUEUE_WAKEUP 1
- #define ENQUEUE_HEAD 2
-@@ -1219,6 +1223,7 @@ enum perf_event_task_context {
+- pci_read_config_dword(l3->nb->misc, 0x1C4, &val);
++ pci_read_config_dword(nb->misc, 0x1C4, &val);
- struct task_struct {
- volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
-+ volatile long saved_state; /* saved state for "spinlock sleepers" */
- void *stack;
- atomic_t usage;
- unsigned int flags; /* per process flags, defined below */
-@@ -1255,14 +1260,14 @@ struct task_struct {
- #endif
+ /* calculate subcache sizes */
+ l3->subcaches[0] = sc0 = !(val & BIT(0));
+@@ -333,33 +323,16 @@ static void __cpuinit amd_calc_l3_indice
+ static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
+ int index)
+ {
+- static struct amd_l3_cache *__cpuinitdata l3_caches;
+ int node;
- unsigned int policy;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int migrate_disable;
-+#endif
- cpumask_t cpus_allowed;
+ /* only for L3, and not in virtualized environments */
+- if (index < 3 || amd_nb_num() == 0)
++ if (index < 3)
+ return;
- #ifdef CONFIG_PREEMPT_RCU
- int rcu_read_lock_nesting;
- char rcu_read_unlock_special;
--#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
-- int rcu_boosted;
--#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
- struct list_head rcu_node_entry;
- #endif /* #ifdef CONFIG_PREEMPT_RCU */
- #ifdef CONFIG_TREE_PREEMPT_RCU
-@@ -1356,6 +1361,9 @@ struct task_struct {
+- /*
+- * Strictly speaking, the amount in @size below is leaked since it is
+- * never freed but this is done only on shutdown so it doesn't matter.
+- */
+- if (!l3_caches) {
+- int size = amd_nb_num() * sizeof(struct amd_l3_cache);
+-
+- l3_caches = kzalloc(size, GFP_ATOMIC);
+- if (!l3_caches)
+- return;
+- }
+-
+ node = amd_get_nb_id(smp_processor_id());
+-
+- if (!l3_caches[node].nb) {
+- l3_caches[node].nb = node_to_amd_nb(node);
+- amd_calc_l3_indices(&l3_caches[node]);
+- }
+-
+- this_leaf->l3 = &l3_caches[node];
++ this_leaf->nb = node_to_amd_nb(node);
++ if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
++ amd_calc_l3_indices(this_leaf->nb);
+ }
- struct task_cputime cputime_expires;
- struct list_head cpu_timers[3];
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct task_struct *posix_timer_list;
-+#endif
+ /*
+@@ -369,11 +342,11 @@ static void __cpuinit amd_init_l3_cache(
+ *
+ * @returns: the disabled index if used or negative value if slot free.
+ */
+-int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
++int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
+ {
+ unsigned int reg = 0;
- /* process credentials */
- const struct cred __rcu *real_cred; /* objective and real subjective task
-@@ -1389,6 +1397,7 @@ struct task_struct {
- /* signal handlers */
- struct signal_struct *signal;
- struct sighand_struct *sighand;
-+ struct sigqueue *sigqueue_cache;
+- pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, ®);
++ pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
- sigset_t blocked, real_blocked;
- sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
-@@ -1432,6 +1441,9 @@ struct task_struct {
- /* mutex deadlock detection */
- struct mutex_waiter *blocked_on;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int pagefault_disabled;
-+#endif
- #ifdef CONFIG_TRACE_IRQFLAGS
- unsigned int irq_events;
- unsigned long hardirq_enable_ip;
-@@ -1558,6 +1570,12 @@ struct task_struct {
- unsigned long trace;
- /* bitmask and counter of trace recursion */
- unsigned long trace_recursion;
-+#ifdef CONFIG_WAKEUP_LATENCY_HIST
-+ u64 preempt_timestamp_hist;
-+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
-+ unsigned long timer_offset;
-+#endif
-+#endif
- #endif /* CONFIG_TRACING */
- #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
- struct memcg_batch_info {
-@@ -1570,10 +1588,24 @@ struct task_struct {
- #ifdef CONFIG_HAVE_HW_BREAKPOINT
- atomic_t ptrace_bp_refcnt;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct rcu_head put_rcu;
-+ int softirq_nestcnt;
-+#endif
-+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
-+ int kmap_idx;
-+ pte_t kmap_pte[KM_TYPE_NR];
-+#endif
- };
+ /* check whether this slot is activated already */
+ if (reg & (3UL << 30))
+@@ -387,11 +360,10 @@ static ssize_t show_cache_disable(struct
+ {
+ int index;
--/* Future-safe accessor for struct task_struct's cpus_allowed. */
--#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-+static inline bool pagefault_disabled(void)
-+{
-+ return in_atomic()
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ || current->pagefault_disabled
-+#endif
-+ ;
-+}
+- if (!this_leaf->l3 ||
+- !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
++ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+ return -EINVAL;
- /*
- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
-@@ -1743,6 +1775,15 @@ extern struct pid *cad_pid;
- extern void free_task(struct task_struct *tsk);
- #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+- index = amd_get_l3_disable_slot(this_leaf->l3, slot);
++ index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
+ if (index >= 0)
+ return sprintf(buf, "%d\n", index);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+extern void __put_task_struct_cb(struct rcu_head *rhp);
-+
-+static inline void put_task_struct(struct task_struct *t)
-+{
-+ if (atomic_dec_and_test(&t->usage))
-+ call_rcu(&t->put_rcu, __put_task_struct_cb);
-+}
-+#else
- extern void __put_task_struct(struct task_struct *t);
+@@ -408,7 +380,7 @@ show_cache_disable_##slot(struct _cpuid4
+ SHOW_CACHE_DISABLE(0)
+ SHOW_CACHE_DISABLE(1)
- static inline void put_task_struct(struct task_struct *t)
-@@ -1750,6 +1791,7 @@ static inline void put_task_struct(struc
- if (atomic_dec_and_test(&t->usage))
- __put_task_struct(t);
- }
-+#endif
+-static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
++static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
+ unsigned slot, unsigned long idx)
+ {
+ int i;
+@@ -421,10 +393,10 @@ static void amd_l3_disable_index(struct
+ for (i = 0; i < 4; i++) {
+ u32 reg = idx | (i << 20);
- extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
- extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
-@@ -1774,6 +1816,7 @@ extern void thread_group_times(struct ta
- #define PF_FROZEN 0x00010000 /* frozen for system suspend */
- #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
- #define PF_KSWAPD 0x00040000 /* I am kswapd */
-+#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */
- #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
- #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
- #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
-@@ -2022,15 +2065,27 @@ static inline void sched_autogroup_exit(
- #endif
+- if (!l3->subcaches[i])
++ if (!nb->l3_cache.subcaches[i])
+ continue;
- #ifdef CONFIG_RT_MUTEXES
-+extern void task_setprio(struct task_struct *p, int prio);
- extern int rt_mutex_getprio(struct task_struct *p);
--extern void rt_mutex_setprio(struct task_struct *p, int prio);
-+static inline void rt_mutex_setprio(struct task_struct *p, int prio)
-+{
-+ task_setprio(p, prio);
-+}
- extern void rt_mutex_adjust_pi(struct task_struct *p);
-+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-+{
-+ return tsk->pi_blocked_on != NULL;
-+}
- #else
- static inline int rt_mutex_getprio(struct task_struct *p)
- {
- return p->normal_prio;
- }
- # define rt_mutex_adjust_pi(p) do { } while (0)
-+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-+{
-+ return false;
-+}
- #endif
+- pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
++ pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
- extern bool yield_to(struct task_struct *p, bool preempt);
-@@ -2110,6 +2165,7 @@ extern void xtime_update(unsigned long t
+ /*
+ * We need to WBINVD on a core on the node containing the L3
+@@ -434,7 +406,7 @@ static void amd_l3_disable_index(struct
+ wbinvd_on_cpu(cpu);
- extern int wake_up_state(struct task_struct *tsk, unsigned int state);
- extern int wake_up_process(struct task_struct *tsk);
-+extern int wake_up_lock_sleeper(struct task_struct * tsk);
- extern void wake_up_new_task(struct task_struct *tsk);
- #ifdef CONFIG_SMP
- extern void kick_process(struct task_struct *tsk);
-@@ -2199,12 +2255,24 @@ extern struct mm_struct * mm_alloc(void)
+ reg |= BIT(31);
+- pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
++ pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
+ }
+ }
- /* mmdrop drops the mm and the page tables */
- extern void __mmdrop(struct mm_struct *);
-+
- static inline void mmdrop(struct mm_struct * mm)
+@@ -448,24 +420,24 @@ static void amd_l3_disable_index(struct
+ *
+ * @return: 0 on success, error status on failure
+ */
+-int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot,
++int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
+ unsigned long index)
{
- if (unlikely(atomic_dec_and_test(&mm->mm_count)))
- __mmdrop(mm);
- }
+ int ret = 0;
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+extern void __mmdrop_delayed(struct rcu_head *rhp);
-+static inline void mmdrop_delayed(struct mm_struct *mm)
-+{
-+ if (atomic_dec_and_test(&mm->mm_count))
-+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
-+}
-+#else
-+# define mmdrop_delayed(mm) mmdrop(mm)
-+#endif
-+
- /* mmput gets rid of the mappings and all user-space */
- extern void mmput(struct mm_struct *);
- /* Grab a reference to a task's mm, if it is not already going away */
-@@ -2510,7 +2578,7 @@ extern int _cond_resched(void);
+ /* check if @slot is already used or the index is already disabled */
+- ret = amd_get_l3_disable_slot(l3, slot);
++ ret = amd_get_l3_disable_slot(nb, slot);
+ if (ret >= 0)
+ return -EINVAL;
- extern int __cond_resched_lock(spinlock_t *lock);
+- if (index > l3->indices)
++ if (index > nb->l3_cache.indices)
+ return -EINVAL;
--#ifdef CONFIG_PREEMPT
-+#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT_FULL)
- #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
- #else
- #define PREEMPT_LOCK_OFFSET 0
-@@ -2521,12 +2589,16 @@ extern int __cond_resched_lock(spinlock_
- __cond_resched_lock(lock); \
- })
+ /* check whether the other slot has disabled the same index already */
+- if (index == amd_get_l3_disable_slot(l3, !slot))
++ if (index == amd_get_l3_disable_slot(nb, !slot))
+ return -EINVAL;
-+#ifndef CONFIG_PREEMPT_RT_FULL
- extern int __cond_resched_softirq(void);
+- amd_l3_disable_index(l3, cpu, slot, index);
++ amd_l3_disable_index(nb, cpu, slot, index);
- #define cond_resched_softirq() ({ \
- __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
- __cond_resched_softirq(); \
- })
-+#else
-+# define cond_resched_softirq() cond_resched()
-+#endif
+ return 0;
+ }
+@@ -480,8 +452,7 @@ static ssize_t store_cache_disable(struc
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
- /*
- * Does a critical section need to be broken due to another
-@@ -2550,7 +2622,7 @@ void thread_group_cputimer(struct task_s
+- if (!this_leaf->l3 ||
+- !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
++ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
+ return -EINVAL;
- static inline void thread_group_cputime_init(struct signal_struct *sig)
- {
-- spin_lock_init(&sig->cputimer.lock);
-+ raw_spin_lock_init(&sig->cputimer.lock);
- }
+ cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
+@@ -489,7 +460,7 @@ static ssize_t store_cache_disable(struc
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
- /*
-@@ -2589,6 +2661,26 @@ static inline void set_task_cpu(struct t
+- err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val);
++ err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
+ if (err) {
+ if (err == -EEXIST)
+ printk(KERN_WARNING "L3 disable slot %d in use!\n",
+@@ -518,7 +489,7 @@ static struct _cache_attr cache_disable_
+ static ssize_t
+ show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
+ {
+- if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
++ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return -EINVAL;
- #endif /* CONFIG_SMP */
+ return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
+@@ -533,7 +504,7 @@ store_subcaches(struct _cpuid4_info *thi
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
-+static inline int __migrate_disabled(struct task_struct *p)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ return p->migrate_disable;
-+#else
-+ return 0;
-+#endif
-+}
-+
-+/* Future-safe accessor for struct task_struct's cpus_allowed. */
-+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (p->migrate_disable)
-+ return cpumask_of(task_cpu(p));
-+#endif
-+
-+ return &p->cpus_allowed;
-+}
-+
- extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
- extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
+- if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
++ if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return -EINVAL;
-Index: linux-2.6/arch/arm/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/arm/kernel/process.c
-+++ linux-2.6/arch/arm/kernel/process.c
-@@ -209,9 +209,7 @@ void cpu_idle(void)
- }
- leds_event(led_idle_end);
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
+ if (strict_strtoul(buf, 16, &val) < 0)
+@@ -769,7 +740,7 @@ static void __cpuinit cache_shared_cpu_m
+ return;
}
- }
+ this_leaf = CPUID4_INFO_IDX(cpu, index);
+- num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
++ num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
-Index: linux-2.6/arch/avr32/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/avr32/kernel/process.c
-+++ linux-2.6/arch/avr32/kernel/process.c
-@@ -38,9 +38,7 @@ void cpu_idle(void)
- while (!need_resched())
- cpu_idle_sleep();
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+ if (num_threads_sharing == 1)
+ cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
+@@ -820,29 +791,19 @@ static void __cpuinit free_cache_attribu
+ for (i = 0; i < num_cache_leaves; i++)
+ cache_remove_shared_cpu_map(cpu, i);
-Index: linux-2.6/arch/blackfin/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/blackfin/kernel/process.c
-+++ linux-2.6/arch/blackfin/kernel/process.c
-@@ -92,9 +92,7 @@ void cpu_idle(void)
- while (!need_resched())
- idle();
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+- kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
+ kfree(per_cpu(ici_cpuid4_info, cpu));
+ per_cpu(ici_cpuid4_info, cpu) = NULL;
}
-Index: linux-2.6/arch/cris/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/cris/kernel/process.c
-+++ linux-2.6/arch/cris/kernel/process.c
-@@ -115,9 +115,7 @@ void cpu_idle (void)
- idle = default_idle;
- idle();
- }
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+-static int
+-__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+-{
+- struct _cpuid4_info_regs *leaf_regs =
+- (struct _cpuid4_info_regs *)this_leaf;
+-
+- return cpuid4_cache_lookup_regs(index, leaf_regs);
+-}
+-
+ static void __cpuinit get_cpu_leaves(void *_retval)
+ {
+ int j, *retval = _retval, cpu = smp_processor_id();
-Index: linux-2.6/arch/frv/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/frv/kernel/process.c
-+++ linux-2.6/arch/frv/kernel/process.c
-@@ -92,9 +92,7 @@ void cpu_idle(void)
- idle();
- }
+ /* Do cpuid and store the results */
+ for (j = 0; j < num_cache_leaves; j++) {
+- struct _cpuid4_info *this_leaf;
+- this_leaf = CPUID4_INFO_IDX(cpu, j);
+- *retval = cpuid4_cache_lookup(j, this_leaf);
++ struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
++
++ *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
+ if (unlikely(*retval < 0)) {
+ int i;
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+@@ -900,16 +861,16 @@ static ssize_t show_##file_name(struct _
+ return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
}
-Index: linux-2.6/arch/h8300/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/h8300/kernel/process.c
-+++ linux-2.6/arch/h8300/kernel/process.c
-@@ -81,9 +81,7 @@ void cpu_idle(void)
- while (1) {
- while (!need_resched())
- idle();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
+-show_one_plus(level, eax.split.level, 0);
+-show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
+-show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
+-show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
+-show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
++show_one_plus(level, base.eax.split.level, 0);
++show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
++show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
++show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
++show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
+
+ static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
+ unsigned int cpu)
+ {
+- return sprintf(buf, "%luK\n", this_leaf->size / 1024);
++ return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
}
-Index: linux-2.6/arch/ia64/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/ia64/kernel/process.c
-+++ linux-2.6/arch/ia64/kernel/process.c
-@@ -330,9 +330,7 @@ cpu_idle (void)
- normal_xtp();
+ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
+@@ -946,7 +907,7 @@ static inline ssize_t show_shared_cpu_li
+ static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
+ unsigned int cpu)
+ {
+- switch (this_leaf->eax.split.type) {
++ switch (this_leaf->base.eax.split.type) {
+ case CACHE_TYPE_DATA:
+ return sprintf(buf, "Data\n");
+ case CACHE_TYPE_INST:
+@@ -1135,7 +1096,7 @@ static int __cpuinit cache_add_dev(struc
+
+ ktype_cache.default_attrs = default_attrs;
+ #ifdef CONFIG_AMD_NB
+- if (this_leaf->l3)
++ if (this_leaf->base.nb)
+ ktype_cache.default_attrs = amd_l3_attrs();
#endif
- }
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- check_pgt_cache();
- if (cpu_is_offline(cpu))
- play_dead();
-Index: linux-2.6/arch/m32r/kernel/process.c
+ retval = kobject_init_and_add(&(this_object->kobj),
+Index: linux-2.6/arch/x86/include/asm/amd_nb.h
===================================================================
---- linux-2.6.orig/arch/m32r/kernel/process.c
-+++ linux-2.6/arch/m32r/kernel/process.c
-@@ -90,9 +90,7 @@ void cpu_idle (void)
+--- linux-2.6.orig/arch/x86/include/asm/amd_nb.h
++++ linux-2.6/arch/x86/include/asm/amd_nb.h
+@@ -19,9 +19,15 @@ extern int amd_numa_init(void);
+ extern int amd_get_subcaches(int);
+ extern int amd_set_subcaches(int, int);
- idle();
- }
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
++struct amd_l3_cache {
++ unsigned indices;
++ u8 subcaches[4];
++};
++
+ struct amd_northbridge {
+ struct pci_dev *misc;
+ struct pci_dev *link;
++ struct amd_l3_cache l3_cache;
+ };
-Index: linux-2.6/arch/m68k/kernel/process_mm.c
+ struct amd_northbridge_info {
+Index: linux-2.6/arch/mips/sibyte/sb1250/irq.c
===================================================================
---- linux-2.6.orig/arch/m68k/kernel/process_mm.c
-+++ linux-2.6/arch/m68k/kernel/process_mm.c
-@@ -94,9 +94,7 @@ void cpu_idle(void)
- while (1) {
- while (!need_resched())
- idle();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+--- linux-2.6.orig/arch/mips/sibyte/sb1250/irq.c
++++ linux-2.6/arch/mips/sibyte/sb1250/irq.c
+@@ -178,7 +178,7 @@ static void ack_sb1250_irq(struct irq_da
-Index: linux-2.6/arch/m68k/kernel/process_no.c
+ static struct irq_chip sb1250_irq_type = {
+ .name = "SB1250-IMR",
+- .irq_mask_ack = ack_sb1250_irq,
++ .irq_mask = ack_sb1250_irq,
+ .irq_unmask = enable_sb1250_irq,
+ #ifdef CONFIG_SMP
+ .irq_set_affinity = sb1250_set_affinity
+Index: linux-2.6/arch/mips/kernel/ftrace.c
===================================================================
---- linux-2.6.orig/arch/m68k/kernel/process_no.c
-+++ linux-2.6/arch/m68k/kernel/process_no.c
-@@ -73,9 +73,7 @@ void cpu_idle(void)
- /* endless idle loop with no priority at all */
- while (1) {
- idle();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+--- linux-2.6.orig/arch/mips/kernel/ftrace.c
++++ linux-2.6/arch/mips/kernel/ftrace.c
+@@ -19,6 +19,26 @@
-Index: linux-2.6/arch/microblaze/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/microblaze/kernel/process.c
-+++ linux-2.6/arch/microblaze/kernel/process.c
-@@ -108,9 +108,7 @@ void cpu_idle(void)
- idle();
- tick_nohz_restart_sched_tick();
+ #include <asm-generic/sections.h>
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- check_pgt_cache();
- }
- }
-Index: linux-2.6/arch/mips/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/mips/kernel/process.c
-+++ linux-2.6/arch/mips/kernel/process.c
-@@ -78,9 +78,7 @@ void __noreturn cpu_idle(void)
- play_dead();
++#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
++#define MCOUNT_OFFSET_INSNS 5
++#else
++#define MCOUNT_OFFSET_INSNS 4
++#endif
++
++/*
++ * Check if the address is in kernel space
++ *
++ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
++ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
++ */
++static inline int in_kernel_space(unsigned long ip)
++{
++ if (ip >= (unsigned long)_stext &&
++ ip <= (unsigned long)_etext)
++ return 1;
++ return 0;
++}
++
+ #ifdef CONFIG_DYNAMIC_FTRACE
+
+ #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
+@@ -54,20 +74,6 @@ static inline void ftrace_dyn_arch_init_
#endif
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
}
-Index: linux-2.6/arch/mn10300/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/mn10300/kernel/process.c
-+++ linux-2.6/arch/mn10300/kernel/process.c
-@@ -123,9 +123,7 @@ void cpu_idle(void)
- idle();
- }
+-/*
+- * Check if the address is in kernel space
+- *
+- * Clone core_kernel_text() from kernel/extable.c, but doesn't call
+- * init_kernel_text() for Ftrace doesn't trace functions in init sections.
+- */
+-static inline int in_kernel_space(unsigned long ip)
+-{
+- if (ip >= (unsigned long)_stext &&
+- ip <= (unsigned long)_etext)
+- return 1;
+- return 0;
+-}
+-
+ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
+ {
+ int faulted;
+@@ -112,11 +118,6 @@ static int ftrace_modify_code(unsigned l
+ * 1: offset = 4 instructions
+ */
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+-#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+-#define MCOUNT_OFFSET_INSNS 5
+-#else
+-#define MCOUNT_OFFSET_INSNS 4
+-#endif
+ #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
-Index: linux-2.6/arch/parisc/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/parisc/kernel/process.c
-+++ linux-2.6/arch/parisc/kernel/process.c
-@@ -71,9 +71,7 @@ void cpu_idle(void)
- while (1) {
- while (!need_resched())
- barrier();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- check_pgt_cache();
- }
- }
-Index: linux-2.6/arch/powerpc/kernel/idle.c
+ int ftrace_make_nop(struct module *mod,
+Index: linux-2.6/arch/mips/loongson/fuloong-2e/irq.c
===================================================================
---- linux-2.6.orig/arch/powerpc/kernel/idle.c
-+++ linux-2.6/arch/powerpc/kernel/idle.c
-@@ -94,11 +94,11 @@ void cpu_idle(void)
- HMT_medium();
- ppc64_runlatch_on();
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- if (cpu_should_die())
-+ if (cpu_should_die()) {
-+ __preempt_enable_no_resched();
- cpu_die();
-- schedule();
-- preempt_disable();
-+ }
-+ schedule_preempt_disabled();
- }
- }
+--- linux-2.6.orig/arch/mips/loongson/fuloong-2e/irq.c
++++ linux-2.6/arch/mips/loongson/fuloong-2e/irq.c
+@@ -42,6 +42,7 @@ asmlinkage void mach_irq_dispatch(unsign
+ static struct irqaction cascade_irqaction = {
+ .handler = no_action,
+ .name = "cascade",
++ .flags = IRQF_NO_THREAD,
+ };
-Index: linux-2.6/arch/powerpc/platforms/iseries/setup.c
+ void __init mach_init_irq(void)
+Index: linux-2.6/arch/mips/loongson/lemote-2f/irq.c
===================================================================
---- linux-2.6.orig/arch/powerpc/platforms/iseries/setup.c
-+++ linux-2.6/arch/powerpc/platforms/iseries/setup.c
-@@ -581,9 +581,7 @@ static void iseries_shared_idle(void)
- if (hvlpevent_is_pending())
- process_iSeries_events();
+--- linux-2.6.orig/arch/mips/loongson/lemote-2f/irq.c
++++ linux-2.6/arch/mips/loongson/lemote-2f/irq.c
+@@ -96,12 +96,13 @@ static irqreturn_t ip6_action(int cpl, v
+ struct irqaction ip6_irqaction = {
+ .handler = ip6_action,
+ .name = "cascade",
+- .flags = IRQF_SHARED,
++ .flags = IRQF_SHARED | IRQF_NO_THREAD,
+ };
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+ struct irqaction cascade_irqaction = {
+ .handler = no_action,
+ .name = "cascade",
++ .flags = IRQF_NO_THREAD,
+ };
-@@ -610,9 +608,7 @@ static void iseries_dedicated_idle(void)
+ void __init mach_init_irq(void)
+Index: linux-2.6/arch/mips/ar7/irq.c
+===================================================================
+--- linux-2.6.orig/arch/mips/ar7/irq.c
++++ linux-2.6/arch/mips/ar7/irq.c
+@@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type
- ppc64_runlatch_on();
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+ static struct irqaction ar7_cascade_action = {
+ .handler = no_action,
+- .name = "AR7 cascade interrupt"
++ .name = "AR7 cascade interrupt",
++ .flags = IRQF_NO_THREAD,
+ };
-Index: linux-2.6/arch/s390/kernel/process.c
+ static void __init ar7_irq_init(int base)
+Index: linux-2.6/arch/mips/bcm63xx/irq.c
===================================================================
---- linux-2.6.orig/arch/s390/kernel/process.c
-+++ linux-2.6/arch/s390/kernel/process.c
-@@ -94,9 +94,7 @@ void cpu_idle(void)
- while (!need_resched())
- default_idle();
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
-
-Index: linux-2.6/arch/score/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/score/kernel/process.c
-+++ linux-2.6/arch/score/kernel/process.c
-@@ -53,9 +53,7 @@ void __noreturn cpu_idle(void)
- while (!need_resched())
- barrier();
+--- linux-2.6.orig/arch/mips/bcm63xx/irq.c
++++ linux-2.6/arch/mips/bcm63xx/irq.c
+@@ -222,6 +222,7 @@ static struct irq_chip bcm63xx_external_
+ static struct irqaction cpu_ip2_cascade_action = {
+ .handler = no_action,
+ .name = "cascade_ip2",
++ .flags = IRQF_NO_THREAD,
+ };
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+ void __init arch_init_irq(void)
+Index: linux-2.6/arch/mips/cobalt/irq.c
+===================================================================
+--- linux-2.6.orig/arch/mips/cobalt/irq.c
++++ linux-2.6/arch/mips/cobalt/irq.c
+@@ -48,6 +48,7 @@ asmlinkage void plat_irq_dispatch(void)
+ static struct irqaction cascade = {
+ .handler = no_action,
+ .name = "cascade",
++ .flags = IRQF_NO_THREAD,
+ };
-Index: linux-2.6/arch/sh/kernel/idle.c
+ void __init arch_init_irq(void)
+Index: linux-2.6/arch/mips/dec/setup.c
===================================================================
---- linux-2.6.orig/arch/sh/kernel/idle.c
-+++ linux-2.6/arch/sh/kernel/idle.c
-@@ -110,9 +110,7 @@ void cpu_idle(void)
- }
+--- linux-2.6.orig/arch/mips/dec/setup.c
++++ linux-2.6/arch/mips/dec/setup.c
+@@ -101,20 +101,24 @@ int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_
+ static struct irqaction ioirq = {
+ .handler = no_action,
+ .name = "cascade",
++ .flags = IRQF_NO_THREAD,
+ };
+ static struct irqaction fpuirq = {
+ .handler = no_action,
+ .name = "fpu",
++ .flags = IRQF_NO_THREAD,
+ };
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+ static struct irqaction busirq = {
+ .flags = IRQF_DISABLED,
+ .name = "bus error",
++ .flags = IRQF_NO_THREAD,
+ };
-Index: linux-2.6/arch/sparc/kernel/process_32.c
-===================================================================
---- linux-2.6.orig/arch/sparc/kernel/process_32.c
-+++ linux-2.6/arch/sparc/kernel/process_32.c
-@@ -113,9 +113,7 @@ void cpu_idle(void)
- while (!need_resched())
- cpu_relax();
- }
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- check_pgt_cache();
- }
- }
-@@ -138,9 +136,7 @@ void cpu_idle(void)
- while (!need_resched())
- cpu_relax();
- }
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- check_pgt_cache();
- }
- }
-Index: linux-2.6/arch/sparc/kernel/process_64.c
-===================================================================
---- linux-2.6.orig/arch/sparc/kernel/process_64.c
-+++ linux-2.6/arch/sparc/kernel/process_64.c
-@@ -102,15 +102,13 @@ void cpu_idle(void)
+ static struct irqaction haltirq = {
+ .handler = dec_intr_halt,
+ .name = "halt",
++ .flags = IRQF_NO_THREAD,
+ };
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
--
- #ifdef CONFIG_HOTPLUG_CPU
-- if (cpu_is_offline(cpu))
-+ if (cpu_is_offline(cpu)) {
-+ __preempt_enable_no_resched();
- cpu_play_dead();
-+ }
- #endif
--
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+Index: linux-2.6/arch/mips/emma/markeins/irq.c
+===================================================================
+--- linux-2.6.orig/arch/mips/emma/markeins/irq.c
++++ linux-2.6/arch/mips/emma/markeins/irq.c
+@@ -169,7 +169,7 @@ void emma2rh_gpio_irq_init(void)
-Index: linux-2.6/arch/tile/kernel/process.c
+ static struct irqaction irq_cascade = {
+ .handler = no_action,
+- .flags = 0,
++ .flags = IRQF_NO_THREAD,
+ .name = "cascade",
+ .dev_id = NULL,
+ .next = NULL,
+Index: linux-2.6/arch/mips/lasat/interrupt.c
===================================================================
---- linux-2.6.orig/arch/tile/kernel/process.c
-+++ linux-2.6/arch/tile/kernel/process.c
-@@ -106,9 +106,7 @@ void cpu_idle(void)
- current_thread_info()->status |= TS_POLLING;
- }
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+--- linux-2.6.orig/arch/mips/lasat/interrupt.c
++++ linux-2.6/arch/mips/lasat/interrupt.c
+@@ -105,6 +105,7 @@ asmlinkage void plat_irq_dispatch(void)
+ static struct irqaction cascade = {
+ .handler = no_action,
+ .name = "cascade",
++ .flags = IRQF_NO_THREAD,
+ };
-Index: linux-2.6/arch/x86/kernel/process_32.c
+ void __init arch_init_irq(void)
+Index: linux-2.6/arch/mips/mti-malta/malta-int.c
===================================================================
---- linux-2.6.orig/arch/x86/kernel/process_32.c
-+++ linux-2.6/arch/x86/kernel/process_32.c
-@@ -38,6 +38,7 @@
- #include <linux/uaccess.h>
- #include <linux/io.h>
- #include <linux/kdebug.h>
-+#include <linux/highmem.h>
+--- linux-2.6.orig/arch/mips/mti-malta/malta-int.c
++++ linux-2.6/arch/mips/mti-malta/malta-int.c
+@@ -350,12 +350,14 @@ unsigned int plat_ipi_resched_int_xlate(
- #include <asm/pgtable.h>
- #include <asm/system.h>
-@@ -113,9 +114,7 @@ void cpu_idle(void)
- start_critical_timings();
- }
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+ static struct irqaction i8259irq = {
+ .handler = no_action,
+- .name = "XT-PIC cascade"
++ .name = "XT-PIC cascade",
++ .flags = IRQF_NO_THREAD,
+ };
-@@ -348,6 +347,41 @@ __switch_to(struct task_struct *prev_p,
- task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
- __switch_to_xtra(prev_p, next_p, tss);
+ static struct irqaction corehi_irqaction = {
+ .handler = no_action,
+- .name = "CoreHi"
++ .name = "CoreHi",
++ .flags = IRQF_NO_THREAD,
+ };
-+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
-+ /*
-+ * Save @prev's kmap_atomic stack
-+ */
-+ prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx);
-+ if (unlikely(prev_p->kmap_idx)) {
-+ int i;
-+
-+ for (i = 0; i < prev_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+
-+ pte_t *ptep = kmap_pte - idx;
-+ prev_p->kmap_pte[i] = *ptep;
-+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
-+ }
-+
-+ __this_cpu_write(__kmap_atomic_idx, 0);
-+ }
-+
-+ /*
-+ * Restore @next_p's kmap_atomic stack
-+ */
-+ if (unlikely(next_p->kmap_idx)) {
-+ int i;
-+
-+ __this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx);
-+
-+ for (i = 0; i < next_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+
-+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
-+ }
-+ }
-+#endif
-+
- /* If we're going to preload the fpu context, make sure clts
- is run while we're batching the cpu state updates. */
- if (preload_fpu)
-Index: linux-2.6/arch/x86/kernel/process_64.c
+ static msc_irqmap_t __initdata msc_irqmap[] = {
+Index: linux-2.6/arch/mips/pmc-sierra/msp71xx/msp_irq.c
===================================================================
---- linux-2.6.orig/arch/x86/kernel/process_64.c
-+++ linux-2.6/arch/x86/kernel/process_64.c
-@@ -146,9 +146,7 @@ void cpu_idle(void)
- }
+--- linux-2.6.orig/arch/mips/pmc-sierra/msp71xx/msp_irq.c
++++ linux-2.6/arch/mips/pmc-sierra/msp71xx/msp_irq.c
+@@ -109,11 +109,13 @@ asmlinkage void plat_irq_dispatch(struct
+ static struct irqaction cic_cascade_msp = {
+ .handler = no_action,
+ .name = "MSP CIC cascade"
++ .flags = IRQF_NO_THREAD,
+ };
- tick_nohz_restart_sched_tick();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
+ static struct irqaction per_cascade_msp = {
+ .handler = no_action,
+ .name = "MSP PER cascade"
++ .flags = IRQF_NO_THREAD,
+ };
-Index: linux-2.6/arch/xtensa/kernel/process.c
-===================================================================
---- linux-2.6.orig/arch/xtensa/kernel/process.c
-+++ linux-2.6/arch/xtensa/kernel/process.c
-@@ -113,9 +113,7 @@ void cpu_idle(void)
- while (1) {
- while (!need_resched())
- platform_idle();
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- }
- }
-
-Index: linux-2.6/init/main.c
+ void __init arch_init_irq(void)
+Index: linux-2.6/arch/mips/pnx8550/common/int.c
===================================================================
---- linux-2.6.orig/init/main.c
-+++ linux-2.6/init/main.c
-@@ -68,6 +68,7 @@
- #include <linux/shmem_fs.h>
- #include <linux/slab.h>
- #include <linux/perf_event.h>
-+#include <linux/posix-timers.h>
+--- linux-2.6.orig/arch/mips/pnx8550/common/int.c
++++ linux-2.6/arch/mips/pnx8550/common/int.c
+@@ -167,7 +167,7 @@ static struct irq_chip level_irq_type =
- #include <asm/io.h>
- #include <asm/bugs.h>
-@@ -367,9 +368,7 @@ static noinline void __init_refok rest_i
- * at least once to get things moving:
- */
- init_idle_bootup_task(current);
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
+ static struct irqaction gic_action = {
+ .handler = no_action,
+- .flags = IRQF_DISABLED,
++ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
+ .name = "GIC",
+ };
- /* Call into cpu_idle with preempt disabled */
- cpu_idle();
-@@ -501,6 +500,7 @@ asmlinkage void __init start_kernel(void
- parse_args("Booting kernel", static_command_line, __start___param,
- __stop___param - __start___param,
- &unknown_bootoption);
-+ softirq_early_init();
- /*
- * These use large bootmem allocations and must precede
- * kmem_cache_init()
-Index: linux-2.6/kernel/mutex.c
+Index: linux-2.6/arch/mips/sgi-ip22/ip22-int.c
===================================================================
---- linux-2.6.orig/kernel/mutex.c
-+++ linux-2.6/kernel/mutex.c
-@@ -240,9 +240,7 @@ __mutex_lock_common(struct mutex *lock,
+--- linux-2.6.orig/arch/mips/sgi-ip22/ip22-int.c
++++ linux-2.6/arch/mips/sgi-ip22/ip22-int.c
+@@ -155,32 +155,32 @@ static void __irq_entry indy_buserror_ir
- /* didn't get the lock, go to sleep: */
- spin_unlock_mutex(&lock->wait_lock, flags);
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-+ schedule_preempt_disabled();
- spin_lock_mutex(&lock->wait_lock, flags);
- }
+ static struct irqaction local0_cascade = {
+ .handler = no_action,
+- .flags = IRQF_DISABLED,
++ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
+ .name = "local0 cascade",
+ };
-Index: linux-2.6/kernel/softirq.c
+ static struct irqaction local1_cascade = {
+ .handler = no_action,
+- .flags = IRQF_DISABLED,
++ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
+ .name = "local1 cascade",
+ };
+
+ static struct irqaction buserr = {
+ .handler = no_action,
+- .flags = IRQF_DISABLED,
++ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
+ .name = "Bus Error",
+ };
+
+ static struct irqaction map0_cascade = {
+ .handler = no_action,
+- .flags = IRQF_DISABLED,
++ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
+ .name = "mapable0 cascade",
+ };
+
+ #ifdef USE_LIO3_IRQ
+ static struct irqaction map1_cascade = {
+ .handler = no_action,
+- .flags = IRQF_DISABLED,
++ .flags = IRQF_DISABLED | IRQF_NO_THREAD,
+ .name = "mapable1 cascade",
+ };
+ #define SGI_INTERRUPTS SGINT_END
+Index: linux-2.6/arch/mips/sni/rm200.c
===================================================================
---- linux-2.6.orig/kernel/softirq.c
-+++ linux-2.6/kernel/softirq.c
-@@ -24,6 +24,7 @@
- #include <linux/ftrace.h>
- #include <linux/smp.h>
- #include <linux/tick.h>
-+#include <linux/locallock.h>
+--- linux-2.6.orig/arch/mips/sni/rm200.c
++++ linux-2.6/arch/mips/sni/rm200.c
+@@ -359,6 +359,7 @@ void sni_rm200_init_8259A(void)
+ static struct irqaction sni_rm200_irq2 = {
+ .handler = no_action,
+ .name = "cascade",
++ .flags = IRQF_NO_THREAD,
+ };
- #define CREATE_TRACE_POINTS
- #include <trace/events/irq.h>
-@@ -61,6 +62,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
- "TASKLET", "SCHED", "HRTIMER", "RCU"
+ static struct resource sni_rm200_pic1_resource = {
+Index: linux-2.6/arch/mips/vr41xx/common/irq.c
+===================================================================
+--- linux-2.6.orig/arch/mips/vr41xx/common/irq.c
++++ linux-2.6/arch/mips/vr41xx/common/irq.c
+@@ -34,6 +34,7 @@ static irq_cascade_t irq_cascade[NR_IRQS
+ static struct irqaction cascade_irqaction = {
+ .handler = no_action,
+ .name = "cascade",
++ .flags = IRQF_NO_THREAD,
};
-+#ifdef CONFIG_NO_HZ
-+# ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * On preempt-rt a softirq might be blocked on a lock. There might be
-+ * no other runnable task on this CPU because the lock owner runs on
-+ * some other CPU. So we have to go into idle with the pending bit
-+ * set. Therefor we need to check this otherwise we warn about false
-+ * positives which confuses users and defeats the whole purpose of
-+ * this test.
-+ *
-+ * This code is called with interrupts disabled.
-+ */
-+void softirq_check_pending_idle(void)
-+{
-+ static int rate_limit;
-+ u32 warnpending = 0, pending = local_softirq_pending();
-+
-+ if (rate_limit >= 10)
-+ return;
-+
-+ if (pending) {
-+ struct task_struct *tsk;
+ int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))
+Index: linux-2.6/arch/mips/Kconfig
+===================================================================
+--- linux-2.6.orig/arch/mips/Kconfig
++++ linux-2.6/arch/mips/Kconfig
+@@ -24,6 +24,7 @@ config MIPS
+ select GENERIC_IRQ_PROBE
+ select GENERIC_IRQ_SHOW
+ select HAVE_ARCH_JUMP_LABEL
++ select IRQ_FORCED_THREADING
+
+ menu "Machine selection"
+
+@@ -2038,7 +2039,7 @@ config CPU_R4400_WORKAROUNDS
+ #
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM
++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL
+
+ config CPU_SUPPORTS_HIGHMEM
+ bool
+Index: linux-2.6/arch/mips/kernel/traps.c
+===================================================================
+--- linux-2.6.orig/arch/mips/kernel/traps.c
++++ linux-2.6/arch/mips/kernel/traps.c
+@@ -364,7 +364,7 @@ static int regs_to_trapnr(struct pt_regs
+ return (regs->cp0_cause >> 2) & 0x1f;
+ }
+
+-static DEFINE_SPINLOCK(die_lock);
++static DEFINE_RAW_SPINLOCK(die_lock);
+
+ void __noreturn die(const char *str, struct pt_regs *regs)
+ {
+@@ -378,7 +378,7 @@ void __noreturn die(const char *str, str
+ sig = 0;
+
+ console_verbose();
+- spin_lock_irq(&die_lock);
++ raw_spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+ #ifdef CONFIG_MIPS_MT_SMTC
+ mips_mt_regdump(dvpret);
+@@ -387,7 +387,7 @@ void __noreturn die(const char *str, str
+ printk("%s[#%d]:\n", str, ++die_counter);
+ show_registers(regs);
+ add_taint(TAINT_DIE);
+- spin_unlock_irq(&die_lock);
++ raw_spin_unlock_irq(&die_lock);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+Index: linux-2.6/arch/mips/kernel/signal.c
+===================================================================
+--- linux-2.6.orig/arch/mips/kernel/signal.c
++++ linux-2.6/arch/mips/kernel/signal.c
+@@ -603,6 +603,9 @@ static void do_signal(struct pt_regs *re
+ if (!user_mode(regs))
+ return;
+
++ local_irq_enable();
++ preempt_check_resched();
+
-+ tsk = __get_cpu_var(ksoftirqd);
-+ /*
-+ * The wakeup code in rtmutex.c wakes up the task
-+ * _before_ it sets pi_blocked_on to NULL under
-+ * tsk->pi_lock. So we need to check for both: state
-+ * and pi_blocked_on.
-+ */
-+ raw_spin_lock(&tsk->pi_lock);
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = ¤t->saved_sigmask;
+ else
+Index: linux-2.6/arch/arm/kernel/signal.c
+===================================================================
+--- linux-2.6.orig/arch/arm/kernel/signal.c
++++ linux-2.6/arch/arm/kernel/signal.c
+@@ -673,6 +673,9 @@ static void do_signal(struct pt_regs *re
+ if (!user_mode(regs))
+ return;
+
++ local_irq_enable();
++ preempt_check_resched();
+
-+ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
-+ warnpending = 1;
+ /*
+ * If we were from a system call, check for system call restarting...
+ */
+Index: linux-2.6/arch/arm/kernel/smp.c
+===================================================================
+--- linux-2.6.orig/arch/arm/kernel/smp.c
++++ linux-2.6/arch/arm/kernel/smp.c
+@@ -305,6 +305,18 @@ asmlinkage void __cpuinit secondary_star
+ * Enable local interrupts.
+ */
+ notify_cpu_starting(cpu);
+
-+ raw_spin_unlock(&tsk->pi_lock);
-+ }
++ /*
++ * OK, now it's safe to let the boot CPU continue. Wait for
++ * the CPU migration code to notice that the CPU is online
++ * before we continue. We need to do that before we enable
++ * interrupts otherwise a wakeup of a kernel thread affine to
++ * this CPU might break the affinity and let hell break lose.
++ */
++ set_cpu_online(cpu, true);
++ while (!cpu_active(cpu))
++ cpu_relax();
+
-+ if (warnpending) {
-+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
-+ pending);
-+ rate_limit++;
-+ }
-+}
-+# else
-+/*
-+ * On !PREEMPT_RT we just printk rate limited:
-+ */
-+void softirq_check_pending_idle(void)
-+{
-+ static int rate_limit;
-+
-+ if (rate_limit < 10) {
-+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
-+ local_softirq_pending());
-+ rate_limit++;
-+ }
-+}
-+# endif
-+#endif
-+
- /*
- * we cannot loop indefinitely here to avoid userspace starvation,
- * but we also don't want to introduce a worst case 1/HZ latency
-@@ -76,6 +138,35 @@ static void wakeup_softirqd(void)
- wake_up_process(tsk);
+ local_irq_enable();
+ local_fiq_enable();
+
+@@ -318,15 +330,6 @@ asmlinkage void __cpuinit secondary_star
+ smp_store_cpu_info(cpu);
+
+ /*
+- * OK, now it's safe to let the boot CPU continue. Wait for
+- * the CPU migration code to notice that the CPU is online
+- * before we continue.
+- */
+- set_cpu_online(cpu, true);
+- while (!cpu_active(cpu))
+- cpu_relax();
+-
+- /*
+ * OK, it's off to the idle thread for us
+ */
+ cpu_idle();
+@@ -531,7 +534,7 @@ static void percpu_timer_stop(void)
}
+ #endif
-+static void handle_pending_softirqs(u32 pending, int cpu)
-+{
-+ struct softirq_action *h = softirq_vec;
-+ unsigned int prev_count = preempt_count();
-+
-+ local_irq_enable();
-+ for ( ; pending; h++, pending >>= 1) {
-+ unsigned int vec_nr = h - softirq_vec;
-+
-+ if (!(pending & 1))
-+ continue;
-+
-+ kstat_incr_softirqs_this_cpu(vec_nr);
-+ trace_softirq_entry(vec_nr);
-+ h->action(h);
-+ trace_softirq_exit(vec_nr);
-+ if (unlikely(prev_count != preempt_count())) {
-+ printk(KERN_ERR
-+ "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n",
-+ vec_nr, softirq_to_name[vec_nr], h->action,
-+ prev_count, (unsigned int) preempt_count());
-+ preempt_count() = prev_count;
-+ }
-+ rcu_bh_qs(cpu);
-+ }
-+ local_irq_disable();
-+}
-+
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * preempt_count and SOFTIRQ_OFFSET usage:
- * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
-@@ -206,7 +297,6 @@ EXPORT_SYMBOL(local_bh_enable_ip);
+-static DEFINE_SPINLOCK(stop_lock);
++static DEFINE_RAW_SPINLOCK(stop_lock);
- asmlinkage void __do_softirq(void)
+ /*
+ * ipi_cpu_stop - handle IPI from smp_send_stop()
+@@ -540,10 +543,10 @@ static void ipi_cpu_stop(unsigned int cp
{
-- struct softirq_action *h;
- __u32 pending;
- int max_restart = MAX_SOFTIRQ_RESTART;
- int cpu;
-@@ -215,7 +305,7 @@ asmlinkage void __do_softirq(void)
- account_system_vtime(current);
+ if (system_state == SYSTEM_BOOTING ||
+ system_state == SYSTEM_RUNNING) {
+- spin_lock(&stop_lock);
++ raw_spin_lock(&stop_lock);
+ printk(KERN_CRIT "CPU%u: stopping\n", cpu);
+ dump_stack();
+- spin_unlock(&stop_lock);
++ raw_spin_unlock(&stop_lock);
+ }
- __local_bh_disable((unsigned long)__builtin_return_address(0),
-- SOFTIRQ_OFFSET);
-+ SOFTIRQ_OFFSET);
- lockdep_softirq_enter();
+ set_cpu_online(cpu, false);
+Index: linux-2.6/arch/mips/kernel/i8259.c
+===================================================================
+--- linux-2.6.orig/arch/mips/kernel/i8259.c
++++ linux-2.6/arch/mips/kernel/i8259.c
+@@ -295,6 +295,7 @@ static void init_8259A(int auto_eoi)
+ static struct irqaction irq2 = {
+ .handler = no_action,
+ .name = "cascade",
++ .flags = IRQF_NO_THREAD,
+ };
- cpu = smp_processor_id();
-@@ -223,36 +313,7 @@ asmlinkage void __do_softirq(void)
- /* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(0);
+ static struct resource pic1_io_resource = {
+Index: linux-2.6/kernel/time/clocksource.c
+===================================================================
+--- linux-2.6.orig/kernel/time/clocksource.c
++++ linux-2.6/kernel/time/clocksource.c
+@@ -186,6 +186,7 @@ static struct timer_list watchdog_timer;
+ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
+ static DEFINE_SPINLOCK(watchdog_lock);
+ static int watchdog_running;
++static atomic_t watchdog_reset_pending;
-- local_irq_enable();
--
-- h = softirq_vec;
--
-- do {
-- if (pending & 1) {
-- unsigned int vec_nr = h - softirq_vec;
-- int prev_count = preempt_count();
--
-- kstat_incr_softirqs_this_cpu(vec_nr);
--
-- trace_softirq_entry(vec_nr);
-- h->action(h);
-- trace_softirq_exit(vec_nr);
-- if (unlikely(prev_count != preempt_count())) {
-- printk(KERN_ERR "huh, entered softirq %u %s %p"
-- "with preempt_count %08x,"
-- " exited with %08x?\n", vec_nr,
-- softirq_to_name[vec_nr], h->action,
-- prev_count, preempt_count());
-- preempt_count() = prev_count;
-- }
--
-- rcu_bh_qs(cpu);
-- }
-- h++;
-- pending >>= 1;
-- } while (pending);
--
-- local_irq_disable();
-+ handle_pending_softirqs(pending, cpu);
+ static int clocksource_watchdog_kthread(void *data);
+ static void __clocksource_change_rating(struct clocksource *cs, int rating);
+@@ -247,12 +248,14 @@ static void clocksource_watchdog(unsigne
+ struct clocksource *cs;
+ cycle_t csnow, wdnow;
+ int64_t wd_nsec, cs_nsec;
+- int next_cpu;
++ int next_cpu, reset_pending;
- pending = local_softirq_pending();
- if (pending && --max_restart)
-@@ -267,6 +328,26 @@ asmlinkage void __do_softirq(void)
- __local_bh_enable(SOFTIRQ_OFFSET);
- }
+ spin_lock(&watchdog_lock);
+ if (!watchdog_running)
+ goto out;
-+/*
-+ * Called with preemption disabled from run_ksoftirqd()
-+ */
-+static int ksoftirqd_do_softirq(int cpu)
-+{
-+ /*
-+ * Preempt disable stops cpu going offline.
-+ * If already offline, we'll be on wrong CPU:
-+ * don't process.
-+ */
-+ if (cpu_is_offline(cpu))
-+ return -1;
-+
-+ local_irq_disable();
-+ if (local_softirq_pending())
-+ __do_softirq();
-+ local_irq_enable();
-+ return 0;
-+}
++ reset_pending = atomic_read(&watchdog_reset_pending);
+
- #ifndef __ARCH_HAS_DO_SOFTIRQ
+ list_for_each_entry(cs, &watchdog_list, wd_list) {
- asmlinkage void do_softirq(void)
-@@ -289,6 +370,178 @@ asmlinkage void do_softirq(void)
+ /* Clocksource already marked unstable? */
+@@ -268,7 +271,8 @@ static void clocksource_watchdog(unsigne
+ local_irq_enable();
- #endif
+ /* Clocksource initialized ? */
+- if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
++ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
++ atomic_read(&watchdog_reset_pending)) {
+ cs->flags |= CLOCK_SOURCE_WATCHDOG;
+ cs->wd_last = wdnow;
+ cs->cs_last = csnow;
+@@ -283,8 +287,11 @@ static void clocksource_watchdog(unsigne
+ cs->cs_last = csnow;
+ cs->wd_last = wdnow;
-+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
-+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
-+static inline void ksoftirqd_set_sched_params(void) { }
-+static inline void ksoftirqd_clr_sched_params(void) { }
-+
-+#else /* !PREEMPT_RT_FULL */
-+
-+/*
-+ * On RT we serialize softirq execution with a cpu local lock
-+ */
-+static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
-+static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
-+
-+static void __do_softirq(void);
-+
-+void __init softirq_early_init(void)
-+{
-+ local_irq_lock_init(local_softirq_lock);
-+}
-+
-+void local_bh_disable(void)
-+{
-+ migrate_disable();
-+ current->softirq_nestcnt++;
-+}
-+EXPORT_SYMBOL(local_bh_disable);
-+
-+void local_bh_enable(void)
-+{
-+ if (WARN_ON(current->softirq_nestcnt == 0))
-+ return;
++ if (atomic_read(&watchdog_reset_pending))
++ continue;
+
-+ if ((current->softirq_nestcnt == 1) &&
-+ local_softirq_pending() &&
-+ local_trylock(local_softirq_lock)) {
+ /* Check the deviation from the watchdog clocksource. */
+- if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
++ if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
+ clocksource_unstable(cs, cs_nsec - wd_nsec);
+ continue;
+ }
+@@ -303,6 +310,13 @@ static void clocksource_watchdog(unsigne
+ }
+
+ /*
++ * We only clear the watchdog_reset_pending, when we did a
++ * full cycle through all clocksources.
++ */
++ if (reset_pending)
++ atomic_dec(&watchdog_reset_pending);
+
-+ local_irq_disable();
-+ if (local_softirq_pending())
-+ __do_softirq();
-+ local_unlock(local_softirq_lock);
-+ local_irq_enable();
-+ WARN_ON(current->softirq_nestcnt != 1);
-+ }
-+ current->softirq_nestcnt--;
-+ migrate_enable();
-+}
-+EXPORT_SYMBOL(local_bh_enable);
-+
-+void local_bh_enable_ip(unsigned long ip)
-+{
-+ local_bh_enable();
-+}
-+EXPORT_SYMBOL(local_bh_enable_ip);
-+
-+/* For tracing */
-+int notrace __in_softirq(void)
-+{
-+ if (__get_cpu_var(local_softirq_lock).owner == current)
-+ return __get_cpu_var(local_softirq_lock).nestcnt;
-+ return 0;
-+}
-+
-+int in_serving_softirq(void)
-+{
-+ int res;
-+
-+ preempt_disable();
-+ res = __get_cpu_var(local_softirq_runner) == current;
-+ preempt_enable();
-+ return res;
-+}
-+
-+/*
-+ * Called with bh and local interrupts disabled. For full RT cpu must
-+ * be pinned.
-+ */
-+static void __do_softirq(void)
-+{
-+ u32 pending = local_softirq_pending();
-+ int cpu = smp_processor_id();
-+
-+ current->softirq_nestcnt++;
-+
-+ /* Reset the pending bitmask before enabling irqs */
-+ set_softirq_pending(0);
-+
-+ __get_cpu_var(local_softirq_runner) = current;
-+
-+ lockdep_softirq_enter();
-+
-+ handle_pending_softirqs(pending, cpu);
-+
-+ pending = local_softirq_pending();
-+ if (pending)
-+ wakeup_softirqd();
-+
-+ lockdep_softirq_exit();
-+ __get_cpu_var(local_softirq_runner) = NULL;
-+
-+ current->softirq_nestcnt--;
-+}
-+
-+static int __thread_do_softirq(int cpu)
-+{
-+ /*
-+ * Prevent the current cpu from going offline.
-+ * pin_current_cpu() can reenable preemption and block on the
-+ * hotplug mutex. When it returns, the current cpu is
-+ * pinned. It might be the wrong one, but the offline check
-+ * below catches that.
-+ */
-+ pin_current_cpu();
-+ /*
-+ * If called from ksoftirqd (cpu >= 0) we need to check
-+ * whether we are on the wrong cpu due to cpu offlining. If
-+ * called via thread_do_softirq() no action required.
-+ */
-+ if (cpu >= 0 && cpu_is_offline(cpu)) {
-+ unpin_current_cpu();
-+ return -1;
-+ }
-+ preempt_enable();
-+ local_lock(local_softirq_lock);
-+ local_irq_disable();
+ /*
-+ * We cannot switch stacks on RT as we want to be able to
-+ * schedule!
-+ */
-+ if (local_softirq_pending())
-+ __do_softirq();
-+ local_unlock(local_softirq_lock);
-+ unpin_current_cpu();
-+ preempt_disable();
-+ local_irq_enable();
-+ return 0;
-+}
-+
-+/*
-+ * Called from netif_rx_ni(). Preemption enabled.
-+ */
-+void thread_do_softirq(void)
-+{
-+ if (!in_serving_softirq()) {
-+ preempt_disable();
-+ __thread_do_softirq(-1);
-+ preempt_enable();
-+ }
-+}
-+
-+static int ksoftirqd_do_softirq(int cpu)
-+{
-+ return __thread_do_softirq(cpu);
-+}
-+
-+static inline void local_bh_disable_nort(void) { }
-+static inline void _local_bh_enable_nort(void) { }
-+
-+static inline void ksoftirqd_set_sched_params(void)
-+{
-+ struct sched_param param = { .sched_priority = 1 };
-+
-+ sched_setscheduler(current, SCHED_FIFO, ¶m);
-+}
-+
-+static inline void ksoftirqd_clr_sched_params(void)
-+{
-+ struct sched_param param = { .sched_priority = 0 };
-+
-+ sched_setscheduler(current, SCHED_NORMAL, ¶m);
-+}
-+
-+#endif /* PREEMPT_RT_FULL */
- /*
- * Enter an interrupt context.
- */
-@@ -302,9 +555,9 @@ void irq_enter(void)
- * Prevent raise_softirq from needlessly waking up ksoftirqd
- * here, as softirq will be serviced on return from interrupt.
- */
-- local_bh_disable();
-+ local_bh_disable_nort();
- tick_check_idle(cpu);
-- _local_bh_enable();
-+ _local_bh_enable_nort();
- }
+ * Cycle through CPUs to check if the CPUs stay synchronized
+ * to each other.
+ */
+@@ -344,23 +358,7 @@ static inline void clocksource_reset_wat
- __irq_enter();
-@@ -313,6 +566,7 @@ void irq_enter(void)
- #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
- static inline void invoke_softirq(void)
- {
-+#ifndef CONFIG_PREEMPT_RT_FULL
- if (!force_irqthreads)
- __do_softirq();
- else {
-@@ -321,10 +575,14 @@ static inline void invoke_softirq(void)
- wakeup_softirqd();
- __local_bh_enable(SOFTIRQ_OFFSET);
- }
-+#else
-+ wakeup_softirqd();
-+#endif
- }
- #else
- static inline void invoke_softirq(void)
+ static void clocksource_resume_watchdog(void)
{
-+#ifndef CONFIG_PREEMPT_RT_FULL
- if (!force_irqthreads)
- do_softirq();
- else {
-@@ -333,6 +591,9 @@ static inline void invoke_softirq(void)
- wakeup_softirqd();
- __local_bh_enable(SOFTIRQ_OFFSET);
- }
-+#else
-+ wakeup_softirqd();
-+#endif
- }
- #endif
-
-@@ -353,7 +614,7 @@ void irq_exit(void)
- if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
- tick_nohz_stop_sched_tick(0);
- #endif
-- preempt_enable_no_resched();
-+ __preempt_enable_no_resched();
+- unsigned long flags;
+-
+- /*
+- * We use trylock here to avoid a potential dead lock when
+- * kgdb calls this code after the kernel has been stopped with
+- * watchdog_lock held. When watchdog_lock is held we just
+- * return and accept, that the watchdog might trigger and mark
+- * the monitored clock source (usually TSC) unstable.
+- *
+- * This does not affect the other caller clocksource_resume()
+- * because at this point the kernel is UP, interrupts are
+- * disabled and nothing can hold watchdog_lock.
+- */
+- if (!spin_trylock_irqsave(&watchdog_lock, flags))
+- return;
+- clocksource_reset_watchdog();
+- spin_unlock_irqrestore(&watchdog_lock, flags);
++ atomic_inc(&watchdog_reset_pending);
}
- /*
-@@ -739,29 +1000,21 @@ void __init softirq_init(void)
+ static void clocksource_enqueue_watchdog(struct clocksource *cs)
+Index: linux-2.6/kernel/watchdog.c
+===================================================================
+--- linux-2.6.orig/kernel/watchdog.c
++++ linux-2.6/kernel/watchdog.c
+@@ -208,6 +208,8 @@ static struct perf_event_attr wd_hw_attr
+ .disabled = 1,
+ };
- static int run_ksoftirqd(void * __bind_cpu)
- {
-+ ksoftirqd_set_sched_params();
++static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
+
- set_current_state(TASK_INTERRUPTIBLE);
+ /* Callback function for perf event subsystem */
+ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
+ struct perf_sample_data *data,
+@@ -234,10 +236,19 @@ static void watchdog_overflow_callback(s
+ if (__this_cpu_read(hard_watchdog_warn) == true)
+ return;
- while (!kthread_should_stop()) {
- preempt_disable();
-- if (!local_softirq_pending()) {
-- preempt_enable_no_resched();
-- schedule();
-- preempt_disable();
-- }
-+ if (!local_softirq_pending())
-+ schedule_preempt_disabled();
+- if (hardlockup_panic)
++ /*
++ * If early-printk is enabled then make sure we do not
++ * lock up in printk() and kill console logging:
++ */
++ printk_kill();
++
++ if (hardlockup_panic) {
+ panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+- else
++ } else {
++ raw_spin_lock(&watchdog_output_lock);
+ WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
++ raw_spin_unlock(&watchdog_output_lock);
++ }
- __set_current_state(TASK_RUNNING);
+ __this_cpu_write(hard_watchdog_warn, true);
+ return;
+@@ -320,7 +331,7 @@ static enum hrtimer_restart watchdog_tim
+ */
+ static int watchdog(void *unused)
+ {
+- static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
++ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
- while (local_softirq_pending()) {
-- /* Preempt disable stops cpu going offline.
-- If already offline, we'll be on wrong CPU:
-- don't process */
-- if (cpu_is_offline((long)__bind_cpu))
-+ if (ksoftirqd_do_softirq((long) __bind_cpu))
- goto wait_to_die;
-- local_irq_disable();
-- if (local_softirq_pending())
-- __do_softirq();
-- local_irq_enable();
-- preempt_enable_no_resched();
-+ __preempt_enable_no_resched();
- cond_resched();
- preempt_disable();
- rcu_note_context_switch((long)__bind_cpu);
-@@ -774,6 +1027,7 @@ static int run_ksoftirqd(void * __bind_c
+ sched_setscheduler(current, SCHED_FIFO, ¶m);
+@@ -349,7 +360,8 @@ static int watchdog(void *unused)
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ __set_current_state(TASK_RUNNING);
+-
++ param.sched_priority = 0;
++ sched_setscheduler(current, SCHED_NORMAL, ¶m);
+ return 0;
+ }
- wait_to_die:
- preempt_enable();
-+ ksoftirqd_clr_sched_params();
- /* Wait for kthread_stop */
- set_current_state(TASK_INTERRUPTIBLE);
- while (!kthread_should_stop()) {
-Index: linux-2.6/include/linux/kprobes.h
-===================================================================
---- linux-2.6.orig/include/linux/kprobes.h
-+++ linux-2.6/include/linux/kprobes.h
-@@ -181,7 +181,7 @@ struct kretprobe {
- int nmissed;
- size_t data_size;
- struct hlist_head free_instances;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- };
+@@ -422,6 +434,7 @@ static void watchdog_prepare_cpu(int cpu
+ WARN_ON(per_cpu(softlockup_watchdog, cpu));
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = watchdog_timer_fn;
++ hrtimer->irqsafe = 1;
+ }
- struct kretprobe_instance {
-Index: linux-2.6/kernel/kprobes.c
+ static int watchdog_enable(int cpu)
+Index: linux-2.6/kernel/rtmutex-debug.c
===================================================================
---- linux-2.6.orig/kernel/kprobes.c
-+++ linux-2.6/kernel/kprobes.c
-@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
- static DEFINE_MUTEX(kprobe_mutex);
- static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
- static struct {
-- spinlock_t lock ____cacheline_aligned_in_smp;
-+ raw_spinlock_t lock ____cacheline_aligned_in_smp;
- } kretprobe_table_locks[KPROBE_TABLE_SIZE];
+--- linux-2.6.orig/kernel/rtmutex-debug.c
++++ linux-2.6/kernel/rtmutex-debug.c
+@@ -29,61 +29,6 @@
--static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
-+static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
- {
- return &(kretprobe_table_locks[hash].lock);
- }
-@@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kr
- hlist_del(&ri->hlist);
- INIT_HLIST_NODE(&ri->hlist);
- if (likely(rp)) {
-- spin_lock(&rp->lock);
-+ raw_spin_lock(&rp->lock);
- hlist_add_head(&ri->hlist, &rp->free_instances);
-- spin_unlock(&rp->lock);
-+ raw_spin_unlock(&rp->lock);
- } else
- /* Unregistering */
- hlist_add_head(&ri->hlist, head);
-@@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struc
- __acquires(hlist_lock)
+ #include "rtmutex_common.h"
+
+-# define TRACE_WARN_ON(x) WARN_ON(x)
+-# define TRACE_BUG_ON(x) BUG_ON(x)
+-
+-# define TRACE_OFF() \
+-do { \
+- if (rt_trace_on) { \
+- rt_trace_on = 0; \
+- console_verbose(); \
+- if (raw_spin_is_locked(¤t->pi_lock)) \
+- raw_spin_unlock(¤t->pi_lock); \
+- } \
+-} while (0)
+-
+-# define TRACE_OFF_NOLOCK() \
+-do { \
+- if (rt_trace_on) { \
+- rt_trace_on = 0; \
+- console_verbose(); \
+- } \
+-} while (0)
+-
+-# define TRACE_BUG_LOCKED() \
+-do { \
+- TRACE_OFF(); \
+- BUG(); \
+-} while (0)
+-
+-# define TRACE_WARN_ON_LOCKED(c) \
+-do { \
+- if (unlikely(c)) { \
+- TRACE_OFF(); \
+- WARN_ON(1); \
+- } \
+-} while (0)
+-
+-# define TRACE_BUG_ON_LOCKED(c) \
+-do { \
+- if (unlikely(c)) \
+- TRACE_BUG_LOCKED(); \
+-} while (0)
+-
+-#ifdef CONFIG_SMP
+-# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
+-#else
+-# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
+-#endif
+-
+-/*
+- * deadlock detection flag. We turn it off when we detect
+- * the first problem because we dont want to recurse back
+- * into the tracing code when doing error printk or
+- * executing a BUG():
+- */
+-static int rt_trace_on = 1;
+-
+ static void printk_task(struct task_struct *p)
{
- unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-- spinlock_t *hlist_lock;
-+ raw_spinlock_t *hlist_lock;
+ if (p)
+@@ -111,8 +56,8 @@ static void printk_lock(struct rt_mutex
- *head = &kretprobe_inst_table[hash];
- hlist_lock = kretprobe_table_lock_ptr(hash);
-- spin_lock_irqsave(hlist_lock, *flags);
-+ raw_spin_lock_irqsave(hlist_lock, *flags);
+ void rt_mutex_debug_task_free(struct task_struct *task)
+ {
+- WARN_ON(!plist_head_empty(&task->pi_waiters));
+- WARN_ON(task->pi_blocked_on);
++ DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters));
++ DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
}
- static void __kprobes kretprobe_table_lock(unsigned long hash,
- unsigned long *flags)
- __acquires(hlist_lock)
+ /*
+@@ -125,7 +70,7 @@ void debug_rt_mutex_deadlock(int detect,
{
-- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-- spin_lock_irqsave(hlist_lock, *flags);
-+ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-+ raw_spin_lock_irqsave(hlist_lock, *flags);
- }
+ struct task_struct *task;
- void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
-@@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(str
- __releases(hlist_lock)
+- if (!rt_trace_on || detect || !act_waiter)
++ if (!debug_locks || detect || !act_waiter)
+ return;
+
+ task = rt_mutex_owner(act_waiter->lock);
+@@ -139,7 +84,7 @@ void debug_rt_mutex_print_deadlock(struc
{
- unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-- spinlock_t *hlist_lock;
-+ raw_spinlock_t *hlist_lock;
+ struct task_struct *task;
- hlist_lock = kretprobe_table_lock_ptr(hash);
-- spin_unlock_irqrestore(hlist_lock, *flags);
-+ raw_spin_unlock_irqrestore(hlist_lock, *flags);
+- if (!waiter->deadlock_lock || !rt_trace_on)
++ if (!waiter->deadlock_lock || !debug_locks)
+ return;
+
+ rcu_read_lock();
+@@ -149,7 +94,8 @@ void debug_rt_mutex_print_deadlock(struc
+ return;
+ }
+
+- TRACE_OFF_NOLOCK();
++ if (!debug_locks_off())
++ return;
+
+ printk("\n============================================\n");
+ printk( "[ BUG: circular locking deadlock detected! ]\n");
+@@ -180,7 +126,6 @@ void debug_rt_mutex_print_deadlock(struc
+
+ printk("[ turning off deadlock detection."
+ "Please report this trace. ]\n\n");
+- local_irq_disable();
}
- static void __kprobes kretprobe_table_unlock(unsigned long hash,
- unsigned long *flags)
- __releases(hlist_lock)
+ void debug_rt_mutex_lock(struct rt_mutex *lock)
+@@ -189,7 +134,7 @@ void debug_rt_mutex_lock(struct rt_mutex
+
+ void debug_rt_mutex_unlock(struct rt_mutex *lock)
{
-- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-- spin_unlock_irqrestore(hlist_lock, *flags);
-+ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-+ raw_spin_unlock_irqrestore(hlist_lock, *flags);
+- TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
++ DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
}
- /*
-@@ -1650,12 +1650,12 @@ static int __kprobes pre_handler_kretpro
+ void
+@@ -199,7 +144,7 @@ debug_rt_mutex_proxy_lock(struct rt_mute
- /*TODO: consider to only swap the RA after the last pre_handler fired */
- hash = hash_ptr(current, KPROBE_HASH_BITS);
-- spin_lock_irqsave(&rp->lock, flags);
-+ raw_spin_lock_irqsave(&rp->lock, flags);
- if (!hlist_empty(&rp->free_instances)) {
- ri = hlist_entry(rp->free_instances.first,
- struct kretprobe_instance, hlist);
- hlist_del(&ri->hlist);
-- spin_unlock_irqrestore(&rp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rp->lock, flags);
+ void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
+ {
+- TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock));
++ DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
+ }
- ri->rp = rp;
- ri->task = current;
-@@ -1672,7 +1672,7 @@ static int __kprobes pre_handler_kretpro
- kretprobe_table_unlock(hash, &flags);
- } else {
- rp->nmissed++;
-- spin_unlock_irqrestore(&rp->lock, flags);
-+ raw_spin_unlock_irqrestore(&rp->lock, flags);
- }
- return 0;
+ void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
+@@ -213,8 +158,8 @@ void debug_rt_mutex_init_waiter(struct r
+ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
+ {
+ put_pid(waiter->deadlock_task_pid);
+- TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
+- TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
++ DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry));
++ DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
+ memset(waiter, 0x22, sizeof(*waiter));
}
-@@ -1708,7 +1708,7 @@ int __kprobes register_kretprobe(struct
- rp->maxactive = num_possible_cpus();
- #endif
- }
-- spin_lock_init(&rp->lock);
-+ raw_spin_lock_init(&rp->lock);
- INIT_HLIST_HEAD(&rp->free_instances);
- for (i = 0; i < rp->maxactive; i++) {
- inst = kmalloc(sizeof(struct kretprobe_instance) +
-@@ -1946,7 +1946,7 @@ static int __init init_kprobes(void)
- for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
- INIT_HLIST_HEAD(&kprobe_table[i]);
- INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
-- spin_lock_init(&(kretprobe_table_locks[i].lock));
-+ raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
- }
- /*
-Index: linux-2.6/include/linux/percpu_counter.h
+Index: linux-2.6/arch/arm/kernel/perf_event.c
===================================================================
---- linux-2.6.orig/include/linux/percpu_counter.h
-+++ linux-2.6/include/linux/percpu_counter.h
-@@ -16,7 +16,7 @@
- #ifdef CONFIG_SMP
+--- linux-2.6.orig/arch/arm/kernel/perf_event.c
++++ linux-2.6/arch/arm/kernel/perf_event.c
+@@ -420,7 +420,7 @@ armpmu_reserve_hardware(void)
+ continue;
- struct percpu_counter {
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- s64 count;
- #ifdef CONFIG_HOTPLUG_CPU
- struct list_head list; /* All percpu_counters are on a list */
-Index: linux-2.6/lib/percpu_counter.c
+ err = request_irq(irq, handle_irq,
+- IRQF_DISABLED | IRQF_NOBALANCING,
++ IRQF_DISABLED | IRQF_NOBALANCING | IRQF_NO_THREAD,
+ "armpmu", NULL);
+ if (err) {
+ pr_warning("unable to request IRQ%d for ARM perf "
+Index: linux-2.6/arch/arm/Kconfig
===================================================================
---- linux-2.6.orig/lib/percpu_counter.c
-+++ linux-2.6/lib/percpu_counter.c
-@@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_co
- {
- int cpu;
-
-- spin_lock(&fbc->lock);
-+ raw_spin_lock(&fbc->lock);
- for_each_possible_cpu(cpu) {
- s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
- *pcount = 0;
- }
- fbc->count = amount;
-- spin_unlock(&fbc->lock);
-+ raw_spin_unlock(&fbc->lock);
- }
- EXPORT_SYMBOL(percpu_counter_set);
-
-@@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_
- preempt_disable();
- count = __this_cpu_read(*fbc->counters) + amount;
- if (count >= batch || count <= -batch) {
-- spin_lock(&fbc->lock);
-+ raw_spin_lock(&fbc->lock);
- fbc->count += count;
- __this_cpu_write(*fbc->counters, 0);
-- spin_unlock(&fbc->lock);
-+ raw_spin_unlock(&fbc->lock);
- } else {
- __this_cpu_write(*fbc->counters, count);
- }
-@@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_c
- s64 ret;
- int cpu;
+--- linux-2.6.orig/arch/arm/Kconfig
++++ linux-2.6/arch/arm/Kconfig
+@@ -29,6 +29,7 @@ config ARM
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_SPARSE_IRQ
+ select GENERIC_IRQ_SHOW
++ select IRQ_FORCED_THREADING
+ help
+ The ARM series is a line of low-power-consumption RISC chip designs
+ licensed by ARM Ltd and targeted at embedded applications and
+@@ -1510,7 +1511,7 @@ config HAVE_ARCH_PFN_VALID
-- spin_lock(&fbc->lock);
-+ raw_spin_lock(&fbc->lock);
- ret = fbc->count;
- for_each_online_cpu(cpu) {
- s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
- ret += *pcount;
- }
-- spin_unlock(&fbc->lock);
-+ raw_spin_unlock(&fbc->lock);
- return ret;
- }
- EXPORT_SYMBOL(__percpu_counter_sum);
-@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
- int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
- struct lock_class_key *key)
- {
-- spin_lock_init(&fbc->lock);
-+ raw_spin_lock_init(&fbc->lock);
- lockdep_set_class(&fbc->lock, key);
- fbc->count = amount;
- fbc->counters = alloc_percpu(s32);
-@@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotc
- s32 *pcount;
- unsigned long flags;
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on MMU
++ depends on MMU && !PREEMPT_RT_FULL
+ help
+ The address space of ARM processors is only 4 Gigabytes large
+ and it has to accommodate user address space, kernel address
+Index: linux-2.6/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+===================================================================
+--- linux-2.6.orig/arch/powerpc/platforms/85xx/mpc85xx_cds.c
++++ linux-2.6/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+@@ -178,7 +178,7 @@ static irqreturn_t mpc85xx_8259_cascade_
-- spin_lock_irqsave(&fbc->lock, flags);
-+ raw_spin_lock_irqsave(&fbc->lock, flags);
- pcount = per_cpu_ptr(fbc->counters, cpu);
- fbc->count += *pcount;
- *pcount = 0;
-- spin_unlock_irqrestore(&fbc->lock, flags);
-+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
- }
- mutex_unlock(&percpu_counters_lock);
- #endif
-Index: linux-2.6/kernel/cgroup.c
+ static struct irqaction mpc85xxcds_8259_irqaction = {
+ .handler = mpc85xx_8259_cascade_action,
+- .flags = IRQF_SHARED,
++ .flags = IRQF_SHARED | IRQF_NO_THREAD,
+ .name = "8259 cascade",
+ };
+ #endif /* PPC_I8259 */
+Index: linux-2.6/arch/powerpc/Kconfig
===================================================================
---- linux-2.6.orig/kernel/cgroup.c
-+++ linux-2.6/kernel/cgroup.c
-@@ -263,7 +263,7 @@ list_for_each_entry(_root, &roots, root_
- /* the list of cgroups eligible for automatic release. Protected by
- * release_list_lock */
- static LIST_HEAD(release_list);
--static DEFINE_SPINLOCK(release_list_lock);
-+static DEFINE_RAW_SPINLOCK(release_list_lock);
- static void cgroup_release_agent(struct work_struct *work);
- static DECLARE_WORK(release_agent_work, cgroup_release_agent);
- static void check_for_release(struct cgroup *cgrp);
-@@ -4010,11 +4010,11 @@ static int cgroup_rmdir(struct inode *un
- finish_wait(&cgroup_rmdir_waitq, &wait);
- clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
+--- linux-2.6.orig/arch/powerpc/Kconfig
++++ linux-2.6/arch/powerpc/Kconfig
+@@ -69,10 +69,11 @@ config LOCKDEP_SUPPORT
-- spin_lock(&release_list_lock);
-+ raw_spin_lock(&release_list_lock);
- set_bit(CGRP_REMOVED, &cgrp->flags);
- if (!list_empty(&cgrp->release_list))
- list_del_init(&cgrp->release_list);
-- spin_unlock(&release_list_lock);
-+ raw_spin_unlock(&release_list_lock);
+ config RWSEM_GENERIC_SPINLOCK
+ bool
++ default y if PREEMPT_RT_FULL
- cgroup_lock_hierarchy(cgrp->root);
- /* delete this cgroup from parent->children */
-@@ -4667,13 +4667,13 @@ static void check_for_release(struct cgr
- * already queued for a userspace notification, queue
- * it now */
- int need_schedule_work = 0;
-- spin_lock(&release_list_lock);
-+ raw_spin_lock(&release_list_lock);
- if (!cgroup_is_removed(cgrp) &&
- list_empty(&cgrp->release_list)) {
- list_add(&cgrp->release_list, &release_list);
- need_schedule_work = 1;
- }
-- spin_unlock(&release_list_lock);
-+ raw_spin_unlock(&release_list_lock);
- if (need_schedule_work)
- schedule_work(&release_agent_work);
- }
-@@ -4725,7 +4725,7 @@ static void cgroup_release_agent(struct
- {
- BUG_ON(work != &release_agent_work);
- mutex_lock(&cgroup_mutex);
-- spin_lock(&release_list_lock);
-+ raw_spin_lock(&release_list_lock);
- while (!list_empty(&release_list)) {
- char *argv[3], *envp[3];
- int i;
-@@ -4734,7 +4734,7 @@ static void cgroup_release_agent(struct
- struct cgroup,
- release_list);
- list_del_init(&cgrp->release_list);
-- spin_unlock(&release_list_lock);
-+ raw_spin_unlock(&release_list_lock);
- pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!pathbuf)
- goto continue_free;
-@@ -4764,9 +4764,9 @@ static void cgroup_release_agent(struct
- continue_free:
- kfree(pathbuf);
- kfree(agentbuf);
-- spin_lock(&release_list_lock);
-+ raw_spin_lock(&release_list_lock);
- }
-- spin_unlock(&release_list_lock);
-+ raw_spin_unlock(&release_list_lock);
- mutex_unlock(&cgroup_mutex);
- }
+ config RWSEM_XCHGADD_ALGORITHM
+ bool
+- default y
++ default y if !PREEMPT_RT_FULL
-Index: linux-2.6/include/linux/proportions.h
-===================================================================
---- linux-2.6.orig/include/linux/proportions.h
-+++ linux-2.6/include/linux/proportions.h
-@@ -58,7 +58,7 @@ struct prop_local_percpu {
- */
- int shift;
- unsigned long period;
-- spinlock_t lock; /* protect the snapshot state */
-+ raw_spinlock_t lock; /* protect the snapshot state */
- };
+ config GENERIC_LOCKBREAK
+ bool
+@@ -134,6 +135,7 @@ config PPC
+ select GENERIC_IRQ_SHOW_LEVEL
+ select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_SYSCALL_TRACEPOINTS
++ select IRQ_FORCED_THREADING
- int prop_local_init_percpu(struct prop_local_percpu *pl);
-@@ -106,11 +106,11 @@ struct prop_local_single {
- */
- unsigned long period;
- int shift;
-- spinlock_t lock; /* protect the snapshot state */
-+ raw_spinlock_t lock; /* protect the snapshot state */
- };
+ config EARLY_PRINTK
+ bool
+@@ -271,7 +273,7 @@ menu "Kernel options"
- #define INIT_PROP_LOCAL_SINGLE(name) \
--{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
-+{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- }
+ config HIGHMEM
+ bool "High memory support"
+- depends on PPC32
++ depends on PPC32 && !PREEMPT_RT_FULL
- int prop_local_init_single(struct prop_local_single *pl);
-Index: linux-2.6/lib/proportions.c
+ source kernel/time/Kconfig
+ source kernel/Kconfig.hz
+Index: linux-2.6/include/linux/sched.h
===================================================================
---- linux-2.6.orig/lib/proportions.c
-+++ linux-2.6/lib/proportions.c
-@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigne
-
- int prop_local_init_percpu(struct prop_local_percpu *pl)
- {
-- spin_lock_init(&pl->lock);
-+ raw_spin_lock_init(&pl->lock);
- pl->shift = 0;
- pl->period = 0;
- return percpu_counter_init(&pl->events, 0);
-@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global
- if (pl->period == global_period)
- return;
+--- linux-2.6.orig/include/linux/sched.h
++++ linux-2.6/include/linux/sched.h
+@@ -63,6 +63,7 @@ struct sched_param {
+ #include <linux/nodemask.h>
+ #include <linux/mm_types.h>
-- spin_lock_irqsave(&pl->lock, flags);
-+ raw_spin_lock_irqsave(&pl->lock, flags);
- prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
++#include <asm/kmap_types.h>
+ #include <asm/system.h>
+ #include <asm/page.h>
+ #include <asm/ptrace.h>
+@@ -90,6 +91,7 @@ struct sched_param {
+ #include <linux/task_io_accounting.h>
+ #include <linux/latencytop.h>
+ #include <linux/cred.h>
++#include <linux/hardirq.h>
- /*
-@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global
- percpu_counter_set(&pl->events, 0);
+ #include <asm/processor.h>
- pl->period = global_period;
-- spin_unlock_irqrestore(&pl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pl->lock, flags);
- }
+@@ -359,6 +361,7 @@ extern signed long schedule_timeout_inte
+ extern signed long schedule_timeout_killable(signed long timeout);
+ extern signed long schedule_timeout_uninterruptible(signed long timeout);
+ asmlinkage void schedule(void);
++extern void schedule_preempt_disabled(void);
+ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
- /*
-@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_de
+ struct nsproxy;
+@@ -510,7 +513,7 @@ struct task_cputime {
+ struct thread_group_cputimer {
+ struct task_cputime cputime;
+ int running;
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ };
- int prop_local_init_single(struct prop_local_single *pl)
- {
-- spin_lock_init(&pl->lock);
-+ raw_spin_lock_init(&pl->lock);
- pl->shift = 0;
- pl->period = 0;
- pl->events = 0;
-@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global
- if (pl->period == global_period)
- return;
+ #include <linux/rwsem.h>
+@@ -1070,6 +1073,7 @@ struct sched_domain;
+ #define WF_SYNC 0x01 /* waker goes to sleep after wakup */
+ #define WF_FORK 0x02 /* child wakeup after fork */
+ #define WF_MIGRATED 0x04 /* internal use, task got migrated */
++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
-- spin_lock_irqsave(&pl->lock, flags);
-+ raw_spin_lock_irqsave(&pl->lock, flags);
- prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
- /*
- * For each missed period, we half the local counter.
-@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global
- else
- pl->events = 0;
- pl->period = global_period;
-- spin_unlock_irqrestore(&pl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pl->lock, flags);
- }
+ #define ENQUEUE_WAKEUP 1
+ #define ENQUEUE_HEAD 2
+@@ -1219,6 +1223,7 @@ enum perf_event_task_context {
- /*
-Index: linux-2.6/kernel/trace/ring_buffer.c
-===================================================================
---- linux-2.6.orig/kernel/trace/ring_buffer.c
-+++ linux-2.6/kernel/trace/ring_buffer.c
-@@ -478,7 +478,7 @@ struct ring_buffer_per_cpu {
- int cpu;
- atomic_t record_disabled;
- struct ring_buffer *buffer;
-- spinlock_t reader_lock; /* serialize readers */
-+ raw_spinlock_t reader_lock; /* serialize readers */
- arch_spinlock_t lock;
- struct lock_class_key lock_key;
- struct list_head *pages;
-@@ -1055,7 +1055,7 @@ rb_allocate_cpu_buffer(struct ring_buffe
+ struct task_struct {
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
++ volatile long saved_state; /* saved state for "spinlock sleepers" */
+ void *stack;
+ atomic_t usage;
+ unsigned int flags; /* per process flags, defined below */
+@@ -1255,14 +1260,17 @@ struct task_struct {
+ #endif
- cpu_buffer->cpu = cpu;
- cpu_buffer->buffer = buffer;
-- spin_lock_init(&cpu_buffer->reader_lock);
-+ raw_spin_lock_init(&cpu_buffer->reader_lock);
- lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
- cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+ unsigned int policy;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int migrate_disable;
++#ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable_atomic;
++#endif
++#endif
+ cpumask_t cpus_allowed;
-@@ -1252,7 +1252,7 @@ rb_remove_pages(struct ring_buffer_per_c
- struct list_head *p;
- unsigned i;
+ #ifdef CONFIG_PREEMPT_RCU
+ int rcu_read_lock_nesting;
+ char rcu_read_unlock_special;
+-#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
+- int rcu_boosted;
+-#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
+ struct list_head rcu_node_entry;
+ #endif /* #ifdef CONFIG_PREEMPT_RCU */
+ #ifdef CONFIG_TREE_PREEMPT_RCU
+@@ -1356,6 +1364,9 @@ struct task_struct {
-- spin_lock_irq(&cpu_buffer->reader_lock);
-+ raw_spin_lock_irq(&cpu_buffer->reader_lock);
- rb_head_page_deactivate(cpu_buffer);
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct task_struct *posix_timer_list;
++#endif
- for (i = 0; i < nr_pages; i++) {
-@@ -1270,7 +1270,7 @@ rb_remove_pages(struct ring_buffer_per_c
- rb_check_pages(cpu_buffer);
+ /* process credentials */
+ const struct cred __rcu *real_cred; /* objective and real subjective task
+@@ -1389,6 +1400,7 @@ struct task_struct {
+ /* signal handlers */
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
++ struct sigqueue *sigqueue_cache;
- out:
-- spin_unlock_irq(&cpu_buffer->reader_lock);
-+ raw_spin_unlock_irq(&cpu_buffer->reader_lock);
- }
+ sigset_t blocked, real_blocked;
+ sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
+@@ -1432,6 +1444,9 @@ struct task_struct {
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int pagefault_disabled;
++#endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+@@ -1558,6 +1573,12 @@ struct task_struct {
+ unsigned long trace;
+ /* bitmask and counter of trace recursion */
+ unsigned long trace_recursion;
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ u64 preempt_timestamp_hist;
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ unsigned long timer_offset;
++#endif
++#endif
+ #endif /* CONFIG_TRACING */
+ #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
+ struct memcg_batch_info {
+@@ -1570,10 +1591,26 @@ struct task_struct {
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+ atomic_t ptrace_bp_refcnt;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head put_rcu;
++ int softirq_nestcnt;
++#endif
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++ int kmap_idx;
++ pte_t kmap_pte[KM_TYPE_NR];
++#endif
+ };
- static void
-@@ -1281,7 +1281,7 @@ rb_insert_pages(struct ring_buffer_per_c
- struct list_head *p;
- unsigned i;
+-/* Future-safe accessor for struct task_struct's cpus_allowed. */
+-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
++#else
++static inline bool cur_pf_disabled(void) { return false; }
++#endif
++
++static inline bool pagefault_disabled(void)
++{
++ return in_atomic() || cur_pf_disabled();
++}
-- spin_lock_irq(&cpu_buffer->reader_lock);
-+ raw_spin_lock_irq(&cpu_buffer->reader_lock);
- rb_head_page_deactivate(cpu_buffer);
+ /*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+@@ -1743,6 +1780,15 @@ extern struct pid *cad_pid;
+ extern void free_task(struct task_struct *tsk);
+ #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
- for (i = 0; i < nr_pages; i++) {
-@@ -1296,7 +1296,7 @@ rb_insert_pages(struct ring_buffer_per_c
- rb_check_pages(cpu_buffer);
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __put_task_struct_cb(struct rcu_head *rhp);
++
++static inline void put_task_struct(struct task_struct *t)
++{
++ if (atomic_dec_and_test(&t->usage))
++ call_rcu(&t->put_rcu, __put_task_struct_cb);
++}
++#else
+ extern void __put_task_struct(struct task_struct *t);
- out:
-- spin_unlock_irq(&cpu_buffer->reader_lock);
-+ raw_spin_unlock_irq(&cpu_buffer->reader_lock);
+ static inline void put_task_struct(struct task_struct *t)
+@@ -1750,6 +1796,7 @@ static inline void put_task_struct(struc
+ if (atomic_dec_and_test(&t->usage))
+ __put_task_struct(t);
}
++#endif
- /**
-@@ -2790,9 +2790,9 @@ void ring_buffer_iter_reset(struct ring_
-
- cpu_buffer = iter->cpu_buffer;
-
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- rb_iter_reset(iter);
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- }
- EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
+ extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+@@ -1774,6 +1821,7 @@ extern void thread_group_times(struct ta
+ #define PF_FROZEN 0x00010000 /* frozen for system suspend */
+ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
+ #define PF_KSWAPD 0x00040000 /* I am kswapd */
++#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */
+ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
+ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
+ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
+@@ -2022,15 +2070,27 @@ static inline void sched_autogroup_exit(
+ #endif
-@@ -3251,12 +3251,12 @@ ring_buffer_peek(struct ring_buffer *buf
- again:
- local_irq_save(flags);
- if (dolock)
-- spin_lock(&cpu_buffer->reader_lock);
-+ raw_spin_lock(&cpu_buffer->reader_lock);
- event = rb_buffer_peek(cpu_buffer, ts, lost_events);
- if (event && event->type_len == RINGBUF_TYPE_PADDING)
- rb_advance_reader(cpu_buffer);
- if (dolock)
-- spin_unlock(&cpu_buffer->reader_lock);
-+ raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
+ #ifdef CONFIG_RT_MUTEXES
++extern void task_setprio(struct task_struct *p, int prio);
+ extern int rt_mutex_getprio(struct task_struct *p);
+-extern void rt_mutex_setprio(struct task_struct *p, int prio);
++static inline void rt_mutex_setprio(struct task_struct *p, int prio)
++{
++ task_setprio(p, prio);
++}
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
++static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
++{
++ return tsk->pi_blocked_on != NULL;
++}
+ #else
+ static inline int rt_mutex_getprio(struct task_struct *p)
+ {
+ return p->normal_prio;
+ }
+ # define rt_mutex_adjust_pi(p) do { } while (0)
++static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
++{
++ return false;
++}
+ #endif
- if (event && event->type_len == RINGBUF_TYPE_PADDING)
-@@ -3281,9 +3281,9 @@ ring_buffer_iter_peek(struct ring_buffer
- unsigned long flags;
+ extern bool yield_to(struct task_struct *p, bool preempt);
+@@ -2110,6 +2170,7 @@ extern void xtime_update(unsigned long t
- again:
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- event = rb_iter_peek(iter, ts);
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+ extern int wake_up_process(struct task_struct *tsk);
++extern int wake_up_lock_sleeper(struct task_struct * tsk);
+ extern void wake_up_new_task(struct task_struct *tsk);
+ #ifdef CONFIG_SMP
+ extern void kick_process(struct task_struct *tsk);
+@@ -2199,12 +2260,24 @@ extern struct mm_struct * mm_alloc(void)
- if (event && event->type_len == RINGBUF_TYPE_PADDING)
- goto again;
-@@ -3323,7 +3323,7 @@ ring_buffer_consume(struct ring_buffer *
- cpu_buffer = buffer->buffers[cpu];
- local_irq_save(flags);
- if (dolock)
-- spin_lock(&cpu_buffer->reader_lock);
-+ raw_spin_lock(&cpu_buffer->reader_lock);
+ /* mmdrop drops the mm and the page tables */
+ extern void __mmdrop(struct mm_struct *);
++
+ static inline void mmdrop(struct mm_struct * mm)
+ {
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ __mmdrop(mm);
+ }
- event = rb_buffer_peek(cpu_buffer, ts, lost_events);
- if (event) {
-@@ -3332,7 +3332,7 @@ ring_buffer_consume(struct ring_buffer *
- }
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __mmdrop_delayed(struct rcu_head *rhp);
++static inline void mmdrop_delayed(struct mm_struct *mm)
++{
++ if (atomic_dec_and_test(&mm->mm_count))
++ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
++}
++#else
++# define mmdrop_delayed(mm) mmdrop(mm)
++#endif
++
+ /* mmput gets rid of the mappings and all user-space */
+ extern void mmput(struct mm_struct *);
+ /* Grab a reference to a task's mm, if it is not already going away */
+@@ -2510,7 +2583,7 @@ extern int _cond_resched(void);
- if (dolock)
-- spin_unlock(&cpu_buffer->reader_lock);
-+ raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
+ extern int __cond_resched_lock(spinlock_t *lock);
- out:
-@@ -3424,11 +3424,11 @@ ring_buffer_read_start(struct ring_buffe
+-#ifdef CONFIG_PREEMPT
++#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT_FULL)
+ #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
+ #else
+ #define PREEMPT_LOCK_OFFSET 0
+@@ -2521,12 +2594,16 @@ extern int __cond_resched_lock(spinlock_
+ __cond_resched_lock(lock); \
+ })
- cpu_buffer = iter->cpu_buffer;
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern int __cond_resched_softirq(void);
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- arch_spin_lock(&cpu_buffer->lock);
- rb_iter_reset(iter);
- arch_spin_unlock(&cpu_buffer->lock);
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ #define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
+ })
++#else
++# define cond_resched_softirq() cond_resched()
++#endif
+
+ /*
+ * Does a critical section need to be broken due to another
+@@ -2550,7 +2627,7 @@ void thread_group_cputimer(struct task_s
+
+ static inline void thread_group_cputime_init(struct signal_struct *sig)
+ {
+- spin_lock_init(&sig->cputimer.lock);
++ raw_spin_lock_init(&sig->cputimer.lock);
}
- EXPORT_SYMBOL_GPL(ring_buffer_read_start);
-@@ -3463,7 +3463,7 @@ ring_buffer_read(struct ring_buffer_iter
- struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
- unsigned long flags;
+ /*
+@@ -2589,6 +2666,26 @@ static inline void set_task_cpu(struct t
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- again:
- event = rb_iter_peek(iter, ts);
- if (!event)
-@@ -3474,7 +3474,7 @@ ring_buffer_read(struct ring_buffer_iter
+ #endif /* CONFIG_SMP */
- rb_advance_iter(iter);
- out:
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++static inline int __migrate_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ return p->migrate_disable;
++#else
++ return 0;
++#endif
++}
++
++/* Future-safe accessor for struct task_struct's cpus_allowed. */
++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (p->migrate_disable)
++ return cpumask_of(task_cpu(p));
++#endif
++
++ return &p->cpus_allowed;
++}
++
+ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
- return event;
+Index: linux-2.6/arch/arm/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/arm/kernel/process.c
++++ linux-2.6/arch/arm/kernel/process.c
+@@ -209,9 +209,7 @@ void cpu_idle(void)
+ }
+ leds_event(led_idle_end);
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
-@@ -3543,7 +3543,7 @@ void ring_buffer_reset_cpu(struct ring_b
- atomic_inc(&cpu_buffer->record_disabled);
+Index: linux-2.6/arch/avr32/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/avr32/kernel/process.c
++++ linux-2.6/arch/avr32/kernel/process.c
+@@ -38,9 +38,7 @@ void cpu_idle(void)
+ while (!need_resched())
+ cpu_idle_sleep();
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+Index: linux-2.6/arch/blackfin/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/blackfin/kernel/process.c
++++ linux-2.6/arch/blackfin/kernel/process.c
+@@ -92,9 +92,7 @@ void cpu_idle(void)
+ while (!need_resched())
+ idle();
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
- goto out;
-@@ -3555,7 +3555,7 @@ void ring_buffer_reset_cpu(struct ring_b
- arch_spin_unlock(&cpu_buffer->lock);
+Index: linux-2.6/arch/cris/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/cris/kernel/process.c
++++ linux-2.6/arch/cris/kernel/process.c
+@@ -115,9 +115,7 @@ void cpu_idle (void)
+ idle = default_idle;
+ idle();
+ }
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- out:
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+Index: linux-2.6/arch/frv/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/frv/kernel/process.c
++++ linux-2.6/arch/frv/kernel/process.c
+@@ -92,9 +92,7 @@ void cpu_idle(void)
+ idle();
+ }
- atomic_dec(&cpu_buffer->record_disabled);
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
-@@ -3593,10 +3593,10 @@ int ring_buffer_empty(struct ring_buffer
- cpu_buffer = buffer->buffers[cpu];
- local_irq_save(flags);
- if (dolock)
-- spin_lock(&cpu_buffer->reader_lock);
-+ raw_spin_lock(&cpu_buffer->reader_lock);
- ret = rb_per_cpu_empty(cpu_buffer);
- if (dolock)
-- spin_unlock(&cpu_buffer->reader_lock);
-+ raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
- if (!ret)
-@@ -3627,10 +3627,10 @@ int ring_buffer_empty_cpu(struct ring_bu
- cpu_buffer = buffer->buffers[cpu];
- local_irq_save(flags);
- if (dolock)
-- spin_lock(&cpu_buffer->reader_lock);
-+ raw_spin_lock(&cpu_buffer->reader_lock);
- ret = rb_per_cpu_empty(cpu_buffer);
- if (dolock)
-- spin_unlock(&cpu_buffer->reader_lock);
-+ raw_spin_unlock(&cpu_buffer->reader_lock);
- local_irq_restore(flags);
-
- return ret;
-@@ -3826,7 +3826,7 @@ int ring_buffer_read_page(struct ring_bu
- if (!bpage)
- goto out;
-
-- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-
- reader = rb_get_reader_page(cpu_buffer);
- if (!reader)
-@@ -3949,7 +3949,7 @@ int ring_buffer_read_page(struct ring_bu
- memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
-
- out_unlock:
-- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-
- out:
- return ret;
-Index: linux-2.6/kernel/trace/trace.c
+Index: linux-2.6/arch/h8300/kernel/process.c
===================================================================
---- linux-2.6.orig/kernel/trace/trace.c
-+++ linux-2.6/kernel/trace/trace.c
-@@ -341,7 +341,7 @@ unsigned long trace_flags = TRACE_ITER_P
- TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
+--- linux-2.6.orig/arch/h8300/kernel/process.c
++++ linux-2.6/arch/h8300/kernel/process.c
+@@ -81,9 +81,7 @@ void cpu_idle(void)
+ while (1) {
+ while (!need_resched())
+ idle();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- static int trace_stop_count;
--static DEFINE_SPINLOCK(tracing_start_lock);
-+static DEFINE_RAW_SPINLOCK(tracing_start_lock);
+Index: linux-2.6/arch/ia64/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/ia64/kernel/process.c
++++ linux-2.6/arch/ia64/kernel/process.c
+@@ -330,9 +330,7 @@ cpu_idle (void)
+ normal_xtp();
+ #endif
+ }
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ check_pgt_cache();
+ if (cpu_is_offline(cpu))
+ play_dead();
+Index: linux-2.6/arch/m32r/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/m32r/kernel/process.c
++++ linux-2.6/arch/m32r/kernel/process.c
+@@ -90,9 +90,7 @@ void cpu_idle (void)
- /**
- * trace_wake_up - wake up tasks waiting for trace input
-@@ -958,7 +958,7 @@ void tracing_start(void)
- if (tracing_disabled)
- return;
+ idle();
+ }
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
-- spin_lock_irqsave(&tracing_start_lock, flags);
-+ raw_spin_lock_irqsave(&tracing_start_lock, flags);
- if (--trace_stop_count) {
- if (trace_stop_count < 0) {
- /* Someone screwed up their debugging */
-@@ -983,7 +983,7 @@ void tracing_start(void)
+Index: linux-2.6/arch/m68k/kernel/process_mm.c
+===================================================================
+--- linux-2.6.orig/arch/m68k/kernel/process_mm.c
++++ linux-2.6/arch/m68k/kernel/process_mm.c
+@@ -94,9 +94,7 @@ void cpu_idle(void)
+ while (1) {
+ while (!need_resched())
+ idle();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- ftrace_start();
- out:
-- spin_unlock_irqrestore(&tracing_start_lock, flags);
-+ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
+Index: linux-2.6/arch/m68k/kernel/process_no.c
+===================================================================
+--- linux-2.6.orig/arch/m68k/kernel/process_no.c
++++ linux-2.6/arch/m68k/kernel/process_no.c
+@@ -73,9 +73,7 @@ void cpu_idle(void)
+ /* endless idle loop with no priority at all */
+ while (1) {
+ idle();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
- /**
-@@ -998,7 +998,7 @@ void tracing_stop(void)
- unsigned long flags;
+Index: linux-2.6/arch/microblaze/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/microblaze/kernel/process.c
++++ linux-2.6/arch/microblaze/kernel/process.c
+@@ -108,9 +108,7 @@ void cpu_idle(void)
+ idle();
+ tick_nohz_restart_sched_tick();
- ftrace_stop();
-- spin_lock_irqsave(&tracing_start_lock, flags);
-+ raw_spin_lock_irqsave(&tracing_start_lock, flags);
- if (trace_stop_count++)
- goto out;
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ check_pgt_cache();
+ }
+ }
+Index: linux-2.6/arch/mips/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/mips/kernel/process.c
++++ linux-2.6/arch/mips/kernel/process.c
+@@ -78,9 +78,7 @@ void __noreturn cpu_idle(void)
+ play_dead();
+ #endif
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
-@@ -1016,7 +1016,7 @@ void tracing_stop(void)
- arch_spin_unlock(&ftrace_max_lock);
+Index: linux-2.6/arch/mn10300/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/mn10300/kernel/process.c
++++ linux-2.6/arch/mn10300/kernel/process.c
+@@ -123,9 +123,7 @@ void cpu_idle(void)
+ idle();
+ }
- out:
-- spin_unlock_irqrestore(&tracing_start_lock, flags);
-+ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
- void trace_stop_cmdline_recording(void);
-@@ -1120,6 +1120,8 @@ tracing_generic_entry_update(struct trac
- ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
- ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
- (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
-+
-+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
+Index: linux-2.6/arch/parisc/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/parisc/kernel/process.c
++++ linux-2.6/arch/parisc/kernel/process.c
+@@ -71,9 +71,7 @@ void cpu_idle(void)
+ while (1) {
+ while (!need_resched())
+ barrier();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ check_pgt_cache();
+ }
}
- EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-
-@@ -1757,9 +1759,10 @@ static void print_lat_help_header(struct
- seq_puts(m, "# | / _----=> need-resched \n");
- seq_puts(m, "# || / _---=> hardirq/softirq \n");
- seq_puts(m, "# ||| / _--=> preempt-depth \n");
-- seq_puts(m, "# |||| / delay \n");
-- seq_puts(m, "# cmd pid ||||| time | caller \n");
-- seq_puts(m, "# \\ / ||||| \\ | / \n");
-+ seq_puts(m, "# |||| / _--=> migrate-disable\n");
-+ seq_puts(m, "# ||||| / delay \n");
-+ seq_puts(m, "# cmd pid |||||| time | caller \n");
-+ seq_puts(m, "# \\ / ||||| \\ | / \n");
+Index: linux-2.6/arch/powerpc/kernel/idle.c
+===================================================================
+--- linux-2.6.orig/arch/powerpc/kernel/idle.c
++++ linux-2.6/arch/powerpc/kernel/idle.c
+@@ -94,11 +94,11 @@ void cpu_idle(void)
+ HMT_medium();
+ ppc64_runlatch_on();
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- if (cpu_should_die())
++ if (cpu_should_die()) {
++ __preempt_enable_no_resched();
+ cpu_die();
+- schedule();
+- preempt_disable();
++ }
++ schedule_preempt_disabled();
+ }
}
- static void print_func_help_header(struct seq_file *m)
-Index: linux-2.6/kernel/trace/trace_irqsoff.c
+Index: linux-2.6/arch/powerpc/platforms/iseries/setup.c
===================================================================
---- linux-2.6.orig/kernel/trace/trace_irqsoff.c
-+++ linux-2.6/kernel/trace/trace_irqsoff.c
-@@ -17,13 +17,14 @@
- #include <linux/fs.h>
-
- #include "trace.h"
-+#include <trace/events/hist.h>
-
- static struct trace_array *irqsoff_trace __read_mostly;
- static int tracer_enabled __read_mostly;
+--- linux-2.6.orig/arch/powerpc/platforms/iseries/setup.c
++++ linux-2.6/arch/powerpc/platforms/iseries/setup.c
+@@ -581,9 +581,7 @@ static void iseries_shared_idle(void)
+ if (hvlpevent_is_pending())
+ process_iSeries_events();
- static DEFINE_PER_CPU(int, tracing_cpu);
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
--static DEFINE_SPINLOCK(max_trace_lock);
-+static DEFINE_RAW_SPINLOCK(max_trace_lock);
+@@ -610,9 +608,7 @@ static void iseries_dedicated_idle(void)
- enum {
- TRACER_IRQS_OFF = (1 << 1),
-@@ -319,7 +320,7 @@ check_critical_timing(struct trace_array
- if (!report_latency(delta))
- goto out;
+ ppc64_runlatch_on();
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
-- spin_lock_irqsave(&max_trace_lock, flags);
-+ raw_spin_lock_irqsave(&max_trace_lock, flags);
-
- /* check if we are still the max latency */
- if (!report_latency(delta))
-@@ -342,7 +343,7 @@ check_critical_timing(struct trace_array
- max_sequence++;
-
- out_unlock:
-- spin_unlock_irqrestore(&max_trace_lock, flags);
-+ raw_spin_unlock_irqrestore(&max_trace_lock, flags);
-
- out:
- data->critical_sequence = max_sequence;
-@@ -424,11 +425,13 @@ void start_critical_timings(void)
- {
- if (preempt_trace() || irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
-+ trace_preemptirqsoff_hist(TRACE_START, 1);
+Index: linux-2.6/arch/s390/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/s390/kernel/process.c
++++ linux-2.6/arch/s390/kernel/process.c
+@@ -94,9 +94,7 @@ void cpu_idle(void)
+ while (!need_resched())
+ default_idle();
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
- EXPORT_SYMBOL_GPL(start_critical_timings);
- void stop_critical_timings(void)
- {
-+ trace_preemptirqsoff_hist(TRACE_STOP, 0);
- if (preempt_trace() || irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
-@@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
- #ifdef CONFIG_PROVE_LOCKING
- void time_hardirqs_on(unsigned long a0, unsigned long a1)
- {
-+ trace_preemptirqsoff_hist(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(a0, a1);
- }
-@@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0,
- {
- if (!preempt_trace() && irq_trace())
- start_critical_timing(a0, a1);
-+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
+Index: linux-2.6/arch/score/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/score/kernel/process.c
++++ linux-2.6/arch/score/kernel/process.c
+@@ -53,9 +53,7 @@ void __noreturn cpu_idle(void)
+ while (!need_resched())
+ barrier();
- #else /* !CONFIG_PROVE_LOCKING */
-@@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct
- */
- void trace_hardirqs_on(void)
- {
-+ trace_preemptirqsoff_hist(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
- }
-@@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
- {
- if (!preempt_trace() && irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
-+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
- EXPORT_SYMBOL(trace_hardirqs_off);
- void trace_hardirqs_on_caller(unsigned long caller_addr)
- {
-+ trace_preemptirqsoff_hist(IRQS_ON, 0);
- if (!preempt_trace() && irq_trace())
- stop_critical_timing(CALLER_ADDR0, caller_addr);
- }
-@@ -494,6 +502,7 @@ void trace_hardirqs_off_caller(unsigned
- {
- if (!preempt_trace() && irq_trace())
- start_critical_timing(CALLER_ADDR0, caller_addr);
-+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
- }
- EXPORT_SYMBOL(trace_hardirqs_off_caller);
+Index: linux-2.6/arch/sh/kernel/idle.c
+===================================================================
+--- linux-2.6.orig/arch/sh/kernel/idle.c
++++ linux-2.6/arch/sh/kernel/idle.c
+@@ -110,9 +110,7 @@ void cpu_idle(void)
+ }
-@@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
- #ifdef CONFIG_PREEMPT_TRACER
- void trace_preempt_on(unsigned long a0, unsigned long a1)
- {
-+ trace_preemptirqsoff_hist(PREEMPT_ON, 0);
- if (preempt_trace())
- stop_critical_timing(a0, a1);
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
}
- void trace_preempt_off(unsigned long a0, unsigned long a1)
- {
-+ trace_preemptirqsoff_hist(PREEMPT_OFF, 1);
- if (preempt_trace())
- start_critical_timing(a0, a1);
- }
-Index: linux-2.6/include/linux/ratelimit.h
+Index: linux-2.6/arch/sparc/kernel/process_32.c
===================================================================
---- linux-2.6.orig/include/linux/ratelimit.h
-+++ linux-2.6/include/linux/ratelimit.h
-@@ -8,7 +8,7 @@
- #define DEFAULT_RATELIMIT_BURST 10
-
- struct ratelimit_state {
-- spinlock_t lock; /* protect the state */
-+ raw_spinlock_t lock; /* protect the state */
-
- int interval;
- int burst;
-@@ -20,7 +20,7 @@ struct ratelimit_state {
- #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
- \
- struct ratelimit_state name = { \
-- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
-+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .interval = interval_init, \
- .burst = burst_init, \
+--- linux-2.6.orig/arch/sparc/kernel/process_32.c
++++ linux-2.6/arch/sparc/kernel/process_32.c
+@@ -113,9 +113,7 @@ void cpu_idle(void)
+ while (!need_resched())
+ cpu_relax();
+ }
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ check_pgt_cache();
}
-@@ -28,7 +28,7 @@ struct ratelimit_state {
- static inline void ratelimit_state_init(struct ratelimit_state *rs,
- int interval, int burst)
- {
-- spin_lock_init(&rs->lock);
-+ raw_spin_lock_init(&rs->lock);
- rs->interval = interval;
- rs->burst = burst;
- rs->printed = 0;
-Index: linux-2.6/kernel/printk.c
+ }
+@@ -138,9 +136,7 @@ void cpu_idle(void)
+ while (!need_resched())
+ cpu_relax();
+ }
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ check_pgt_cache();
+ }
+ }
+Index: linux-2.6/arch/sparc/kernel/process_64.c
===================================================================
---- linux-2.6.orig/kernel/printk.c
-+++ linux-2.6/kernel/printk.c
-@@ -44,13 +44,6 @@
+--- linux-2.6.orig/arch/sparc/kernel/process_64.c
++++ linux-2.6/arch/sparc/kernel/process_64.c
+@@ -102,15 +102,13 @@ void cpu_idle(void)
- #include <asm/uaccess.h>
+ tick_nohz_restart_sched_tick();
--/*
-- * Architectures can override it:
-- */
--void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
--{
--}
+- preempt_enable_no_resched();
-
- #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
-
- /* printk's without a loglevel use this.. */
-@@ -100,7 +93,7 @@ static int console_locked, console_suspe
- * It is also used in interesting ways to provide interlocking in
- * console_unlock();.
- */
--static DEFINE_SPINLOCK(logbuf_lock);
-+static DEFINE_RAW_SPINLOCK(logbuf_lock);
+ #ifdef CONFIG_HOTPLUG_CPU
+- if (cpu_is_offline(cpu))
++ if (cpu_is_offline(cpu)) {
++ __preempt_enable_no_resched();
+ cpu_play_dead();
++ }
+ #endif
+-
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ }
+ }
- #define LOG_BUF_MASK (log_buf_len-1)
- #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
-@@ -212,7 +205,7 @@ void __init setup_log_buf(int early)
- return;
+Index: linux-2.6/arch/tile/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/tile/kernel/process.c
++++ linux-2.6/arch/tile/kernel/process.c
+@@ -106,9 +106,7 @@ void cpu_idle(void)
+ current_thread_info()->status |= TS_POLLING;
+ }
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
}
+ }
-- spin_lock_irqsave(&logbuf_lock, flags);
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
- log_buf_len = new_log_buf_len;
- log_buf = new_log_buf;
- new_log_buf_len = 0;
-@@ -230,7 +223,7 @@ void __init setup_log_buf(int early)
- log_start -= offset;
- con_start -= offset;
- log_end -= offset;
-- spin_unlock_irqrestore(&logbuf_lock, flags);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+Index: linux-2.6/arch/x86/kernel/process_32.c
+===================================================================
+--- linux-2.6.orig/arch/x86/kernel/process_32.c
++++ linux-2.6/arch/x86/kernel/process_32.c
+@@ -38,6 +38,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
+ #include <linux/kdebug.h>
++#include <linux/highmem.h>
- pr_info("log_buf_len: %d\n", log_buf_len);
- pr_info("early log buf free: %d(%d%%)\n",
-@@ -363,18 +356,18 @@ int do_syslog(int type, char __user *buf
- if (error)
- goto out;
- i = 0;
-- spin_lock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
- while (!error && (log_start != log_end) && i < len) {
- c = LOG_BUF(log_start);
- log_start++;
-- spin_unlock_irq(&logbuf_lock);
-+ raw_spin_unlock_irq(&logbuf_lock);
- error = __put_user(c,buf);
- buf++;
- i++;
- cond_resched();
-- spin_lock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
- }
-- spin_unlock_irq(&logbuf_lock);
-+ raw_spin_unlock_irq(&logbuf_lock);
- if (!error)
- error = i;
- break;
-@@ -397,7 +390,7 @@ int do_syslog(int type, char __user *buf
- count = len;
- if (count > log_buf_len)
- count = log_buf_len;
-- spin_lock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
- if (count > logged_chars)
- count = logged_chars;
- if (do_clear)
-@@ -414,12 +407,12 @@ int do_syslog(int type, char __user *buf
- if (j + log_buf_len < log_end)
- break;
- c = LOG_BUF(j);
-- spin_unlock_irq(&logbuf_lock);
-+ raw_spin_unlock_irq(&logbuf_lock);
- error = __put_user(c,&buf[count-1-i]);
- cond_resched();
-- spin_lock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+@@ -113,9 +114,7 @@ void cpu_idle(void)
+ start_critical_timings();
}
-- spin_unlock_irq(&logbuf_lock);
-+ raw_spin_unlock_irq(&logbuf_lock);
- if (error)
- break;
- error = i;
-@@ -509,6 +502,7 @@ static void __call_console_drivers(unsig
- {
- struct console *con;
-
-+ migrate_disable();
- for_each_console(con) {
- if (exclusive_console && con != exclusive_console)
- continue;
-@@ -517,7 +511,54 @@ static void __call_console_drivers(unsig
- (con->flags & CON_ANYTIME)))
- con->write(con, &LOG_BUF(start), end - start);
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
}
-+ migrate_enable();
-+}
+ }
+
+@@ -348,6 +347,41 @@ __switch_to(struct task_struct *prev_p,
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ __switch_to_xtra(prev_p, next_p, tss);
+
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++ /*
++ * Save @prev's kmap_atomic stack
++ */
++ prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx);
++ if (unlikely(prev_p->kmap_idx)) {
++ int i;
+
-+#ifdef CONFIG_EARLY_PRINTK
-+struct console *early_console;
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
+
-+static void early_vprintk(const char *fmt, va_list ap)
-+{
-+ char buf[512];
-+ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
-+ if (early_console)
-+ early_console->write(early_console, buf, n);
-+}
++ pte_t *ptep = kmap_pte - idx;
++ prev_p->kmap_pte[i] = *ptep;
++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
++ }
+
-+asmlinkage void early_printk(const char *fmt, ...)
-+{
-+ va_list ap;
-+ va_start(ap, fmt);
-+ early_vprintk(fmt, ap);
-+ va_end(ap);
-+}
++ __this_cpu_write(__kmap_atomic_idx, 0);
++ }
+
-+/*
-+ * This is independent of any log levels - a global
-+ * kill switch that turns off all of printk.
-+ *
-+ * Used by the NMI watchdog if early-printk is enabled.
-+ */
-+static int __read_mostly printk_killswitch;
++ /*
++ * Restore @next_p's kmap_atomic stack
++ */
++ if (unlikely(next_p->kmap_idx)) {
++ int i;
+
-+void printk_kill(void)
-+{
-+ printk_killswitch = 1;
-+}
++ __this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx);
+
-+static int forced_early_printk(const char *fmt, va_list ap)
-+{
-+ if (!printk_killswitch)
-+ return 0;
-+ early_vprintk(fmt, ap);
-+ return 1;
- }
-+#else
-+static inline int forced_early_printk(const char *fmt, va_list ap)
-+{
-+ return 0;
-+}
-+#endif
-
- static int __read_mostly ignore_loglevel;
-
-@@ -687,7 +728,7 @@ static void zap_locks(void)
- oops_timestamp = jiffies;
-
- /* If a crash is occurring, make sure we can't deadlock */
-- spin_lock_init(&logbuf_lock);
-+ raw_spin_lock_init(&logbuf_lock);
- /* And make sure that we print immediately */
- sema_init(&console_sem, 1);
- }
-@@ -779,12 +820,18 @@ static inline int can_use_console(unsign
- * interrupts disabled. It should return with 'lockbuf_lock'
- * released but interrupts still disabled.
- */
--static int console_trylock_for_printk(unsigned int cpu)
-+static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
- __releases(&logbuf_lock)
- {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
-+ !preempt_count();
-+#else
-+ int lock = 1;
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++ }
++ }
+#endif
- int retval = 0;
-
-- if (console_trylock()) {
-+ if (lock && console_trylock()) {
- retval = 1;
-
- /*
-@@ -800,7 +847,7 @@ static int console_trylock_for_printk(un
++
+ /* If we're going to preload the fpu context, make sure clts
+ is run while we're batching the cpu state updates. */
+ if (preload_fpu)
+Index: linux-2.6/arch/x86/kernel/process_64.c
+===================================================================
+--- linux-2.6.orig/arch/x86/kernel/process_64.c
++++ linux-2.6/arch/x86/kernel/process_64.c
+@@ -146,9 +146,7 @@ void cpu_idle(void)
}
+
+ tick_nohz_restart_sched_tick();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
}
- printk_cpu = UINT_MAX;
-- spin_unlock(&logbuf_lock);
-+ raw_spin_unlock(&logbuf_lock);
- return retval;
}
- static const char recursion_bug_msg [] =
-@@ -833,6 +880,13 @@ asmlinkage int vprintk(const char *fmt,
- size_t plen;
- char special;
-
-+ /*
-+ * Fall back to early_printk if a debugging subsystem has
-+ * killed printk output
-+ */
-+ if (unlikely(forced_early_printk(fmt, args)))
-+ return 1;
-+
- boot_delay_msec();
- printk_delay();
-@@ -860,7 +914,7 @@ asmlinkage int vprintk(const char *fmt,
+Index: linux-2.6/arch/xtensa/kernel/process.c
+===================================================================
+--- linux-2.6.orig/arch/xtensa/kernel/process.c
++++ linux-2.6/arch/xtensa/kernel/process.c
+@@ -113,9 +113,7 @@ void cpu_idle(void)
+ while (1) {
+ while (!need_resched())
+ platform_idle();
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
}
+ }
- lockdep_off();
-- spin_lock(&logbuf_lock);
-+ raw_spin_lock(&logbuf_lock);
- printk_cpu = this_cpu;
+Index: linux-2.6/init/main.c
+===================================================================
+--- linux-2.6.orig/init/main.c
++++ linux-2.6/init/main.c
+@@ -68,6 +68,7 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/posix-timers.h>
- if (recursion_bug) {
-@@ -953,8 +1007,15 @@ asmlinkage int vprintk(const char *fmt,
- * will release 'logbuf_lock' regardless of whether it
- * actually gets the semaphore or not.
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+@@ -367,9 +368,7 @@ static noinline void __init_refok rest_i
+ * at least once to get things moving:
*/
-- if (console_trylock_for_printk(this_cpu))
-+ if (console_trylock_for_printk(this_cpu, flags)) {
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+ console_unlock();
-+#else
-+ raw_local_irq_restore(flags);
- console_unlock();
-+ raw_local_irq_save(flags);
-+#endif
-+ }
-
- lockdep_on();
- out_restore_irqs:
-@@ -1252,18 +1313,23 @@ void console_unlock(void)
- console_may_schedule = 0;
+ init_idle_bootup_task(current);
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
- for ( ; ; ) {
-- spin_lock_irqsave(&logbuf_lock, flags);
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
- wake_klogd |= log_start - log_end;
- if (con_start == log_end)
- break; /* Nothing to print */
- _con_start = con_start;
- _log_end = log_end;
- con_start = log_end; /* Flush */
-- spin_unlock(&logbuf_lock);
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+ raw_spin_unlock(&logbuf_lock);
- stop_critical_timings(); /* don't trace print latency */
- call_console_drivers(_con_start, _log_end);
- start_critical_timings();
- local_irq_restore(flags);
-+#else
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+ call_console_drivers(_con_start, _log_end);
-+#endif
- }
- console_locked = 0;
-
-@@ -1272,7 +1338,7 @@ void console_unlock(void)
- exclusive_console = NULL;
-
- up(&console_sem);
-- spin_unlock_irqrestore(&logbuf_lock, flags);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- if (wake_klogd)
- wake_up_klogd();
- }
-@@ -1502,9 +1568,9 @@ void register_console(struct console *ne
- * console_unlock(); will print out the buffered messages
- * for us.
- */
-- spin_lock_irqsave(&logbuf_lock, flags);
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
- con_start = log_start;
-- spin_unlock_irqrestore(&logbuf_lock, flags);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- /*
- * We're about to replay the log buffer. Only do this to the
- * just-registered console to avoid excessive message spam to
-@@ -1711,10 +1777,10 @@ void kmsg_dump(enum kmsg_dump_reason rea
- /* Theoretically, the log could move on after we do this, but
- there's not a lot we can do about that. The new messages
- will overwrite the start of what we dump. */
-- spin_lock_irqsave(&logbuf_lock, flags);
-+ raw_spin_lock_irqsave(&logbuf_lock, flags);
- end = log_end & LOG_BUF_MASK;
- chars = logged_chars;
-- spin_unlock_irqrestore(&logbuf_lock, flags);
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-
- if (chars > end) {
- s1 = log_buf + log_buf_len - chars + end;
-Index: linux-2.6/lib/ratelimit.c
+ /* Call into cpu_idle with preempt disabled */
+ cpu_idle();
+@@ -501,6 +500,7 @@ asmlinkage void __init start_kernel(void
+ parse_args("Booting kernel", static_command_line, __start___param,
+ __stop___param - __start___param,
+ &unknown_bootoption);
++ softirq_early_init();
+ /*
+ * These use large bootmem allocations and must precede
+ * kmem_cache_init()
+Index: linux-2.6/kernel/mutex.c
===================================================================
---- linux-2.6.orig/lib/ratelimit.c
-+++ linux-2.6/lib/ratelimit.c
-@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state
- * in addition to the one that will be printed by
- * the entity that is holding the lock already:
- */
-- if (!spin_trylock_irqsave(&rs->lock, flags))
-+ if (!raw_spin_trylock_irqsave(&rs->lock, flags))
- return 0;
+--- linux-2.6.orig/kernel/mutex.c
++++ linux-2.6/kernel/mutex.c
+@@ -240,9 +240,7 @@ __mutex_lock_common(struct mutex *lock,
- if (!rs->begin)
-@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state
- rs->missed++;
- ret = 0;
+ /* didn't get the lock, go to sleep: */
+ spin_unlock_mutex(&lock->wait_lock, flags);
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
++ schedule_preempt_disabled();
+ spin_lock_mutex(&lock->wait_lock, flags);
}
-- spin_unlock_irqrestore(&rs->lock, flags);
-+ raw_spin_unlock_irqrestore(&rs->lock, flags);
- return ret;
- }
-Index: linux-2.6/include/linux/init_task.h
+Index: linux-2.6/kernel/softirq.c
===================================================================
---- linux-2.6.orig/include/linux/init_task.h
-+++ linux-2.6/include/linux/init_task.h
-@@ -42,7 +42,7 @@ extern struct fs_struct init_fs;
- .cputimer = { \
- .cputime = INIT_CPUTIME, \
- .running = 0, \
-- .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
-+ .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
- }, \
- .cred_guard_mutex = \
- __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
-@@ -126,6 +126,12 @@ extern struct cred init_cred;
- # define INIT_PERF_EVENTS(tsk)
- #endif
+--- linux-2.6.orig/kernel/softirq.c
++++ linux-2.6/kernel/softirq.c
+@@ -24,6 +24,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/tick.h>
++#include <linux/locallock.h>
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+# define INIT_TIMER_LIST .posix_timer_list = NULL,
-+#else
-+# define INIT_TIMER_LIST
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+@@ -61,6 +62,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+
++#ifdef CONFIG_NO_HZ
++# ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * On preempt-rt a softirq might be blocked on a lock. There might be
++ * no other runnable task on this CPU because the lock owner runs on
++ * some other CPU. So we have to go into idle with the pending bit
++ * set. Therefor we need to check this otherwise we warn about false
++ * positives which confuses users and defeats the whole purpose of
++ * this test.
++ *
++ * This code is called with interrupts disabled.
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++ u32 warnpending = 0, pending = local_softirq_pending();
++
++ if (rate_limit >= 10)
++ return;
++
++ if (pending) {
++ struct task_struct *tsk;
++
++ tsk = __get_cpu_var(ksoftirqd);
++ /*
++ * The wakeup code in rtmutex.c wakes up the task
++ * _before_ it sets pi_blocked_on to NULL under
++ * tsk->pi_lock. So we need to check for both: state
++ * and pi_blocked_on.
++ */
++ raw_spin_lock(&tsk->pi_lock);
++
++ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
++ warnpending = 1;
++
++ raw_spin_unlock(&tsk->pi_lock);
++ }
++
++ if (warnpending) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ pending);
++ rate_limit++;
++ }
++}
++# else
++/*
++ * On !PREEMPT_RT we just printk rate limited:
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++
++ if (rate_limit < 10) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ local_softirq_pending());
++ rate_limit++;
++ }
++}
++# endif
+#endif
+
/*
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
-@@ -179,6 +185,7 @@ extern struct cred init_cred;
- .fs_excl = ATOMIC_INIT(0), \
- .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
- .timer_slack_ns = 50000, /* 50 usec default slack */ \
-+ INIT_TIMER_LIST \
- .pids = { \
- [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
- [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
-Index: linux-2.6/kernel/posix-cpu-timers.c
-===================================================================
---- linux-2.6.orig/kernel/posix-cpu-timers.c
-+++ linux-2.6/kernel/posix-cpu-timers.c
-@@ -274,7 +274,7 @@ void thread_group_cputimer(struct task_s
- struct task_cputime sum;
- unsigned long flags;
-
-- spin_lock_irqsave(&cputimer->lock, flags);
-+ raw_spin_lock_irqsave(&cputimer->lock, flags);
- if (!cputimer->running) {
- cputimer->running = 1;
- /*
-@@ -287,7 +287,7 @@ void thread_group_cputimer(struct task_s
- update_gt_cputime(&cputimer->cputime, &sum);
- }
- *times = cputimer->cputime;
-- spin_unlock_irqrestore(&cputimer->lock, flags);
-+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
+ * we cannot loop indefinitely here to avoid userspace starvation,
+ * but we also don't want to introduce a worst case 1/HZ latency
+@@ -76,6 +138,35 @@ static void wakeup_softirqd(void)
+ wake_up_process(tsk);
}
++static void handle_pending_softirqs(u32 pending, int cpu)
++{
++ struct softirq_action *h = softirq_vec;
++ unsigned int prev_count = preempt_count();
++
++ local_irq_enable();
++ for ( ; pending; h++, pending >>= 1) {
++ unsigned int vec_nr = h - softirq_vec;
++
++ if (!(pending & 1))
++ continue;
++
++ kstat_incr_softirqs_this_cpu(vec_nr);
++ trace_softirq_entry(vec_nr);
++ h->action(h);
++ trace_softirq_exit(vec_nr);
++ if (unlikely(prev_count != preempt_count())) {
++ printk(KERN_ERR
++ "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n",
++ vec_nr, softirq_to_name[vec_nr], h->action,
++ prev_count, (unsigned int) preempt_count());
++ preempt_count() = prev_count;
++ }
++ rcu_bh_qs(cpu);
++ }
++ local_irq_disable();
++}
++
++#ifndef CONFIG_PREEMPT_RT_FULL
/*
-@@ -699,7 +699,7 @@ static int posix_cpu_timer_set(struct k_
- /*
- * Disarm any old timer after extracting its expiry time.
- */
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -206,7 +297,6 @@ EXPORT_SYMBOL(local_bh_enable_ip);
- ret = 0;
- old_incr = timer->it.cpu.incr;
-@@ -997,9 +997,9 @@ static void stop_process_timers(struct s
- struct thread_group_cputimer *cputimer = &sig->cputimer;
- unsigned long flags;
+ asmlinkage void __do_softirq(void)
+ {
+- struct softirq_action *h;
+ __u32 pending;
+ int max_restart = MAX_SOFTIRQ_RESTART;
+ int cpu;
+@@ -215,7 +305,7 @@ asmlinkage void __do_softirq(void)
+ account_system_vtime(current);
-- spin_lock_irqsave(&cputimer->lock, flags);
-+ raw_spin_lock_irqsave(&cputimer->lock, flags);
- cputimer->running = 0;
-- spin_unlock_irqrestore(&cputimer->lock, flags);
-+ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
- }
-
- static u32 onecputick;
-@@ -1221,7 +1221,7 @@ void posix_cpu_timer_schedule(struct k_i
- /*
- * Now re-arm for the new expiry time.
- */
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
- arm_timer(timer);
- spin_unlock(&p->sighand->siglock);
-
-@@ -1288,10 +1288,11 @@ static inline int fastpath_timer_check(s
- sig = tsk->signal;
- if (sig->cputimer.running) {
- struct task_cputime group_sample;
-+ unsigned long flags;
-
-- spin_lock(&sig->cputimer.lock);
-+ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
- group_sample = sig->cputimer.cputime;
-- spin_unlock(&sig->cputimer.lock);
-+ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+- SOFTIRQ_OFFSET);
++ SOFTIRQ_OFFSET);
+ lockdep_softirq_enter();
- if (task_cputime_expired(&group_sample, &sig->cputime_expires))
- return 1;
-@@ -1305,13 +1306,13 @@ static inline int fastpath_timer_check(s
- * already updated our counts. We need to check if any timers fire now.
- * Interrupts are disabled.
- */
--void run_posix_cpu_timers(struct task_struct *tsk)
-+static void __run_posix_cpu_timers(struct task_struct *tsk)
- {
- LIST_HEAD(firing);
- struct k_itimer *timer, *next;
- unsigned long flags;
+ cpu = smp_processor_id();
+@@ -223,36 +313,7 @@ restart:
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
+- local_irq_enable();
+-
+- h = softirq_vec;
+-
+- do {
+- if (pending & 1) {
+- unsigned int vec_nr = h - softirq_vec;
+- int prev_count = preempt_count();
+-
+- kstat_incr_softirqs_this_cpu(vec_nr);
+-
+- trace_softirq_entry(vec_nr);
+- h->action(h);
+- trace_softirq_exit(vec_nr);
+- if (unlikely(prev_count != preempt_count())) {
+- printk(KERN_ERR "huh, entered softirq %u %s %p"
+- "with preempt_count %08x,"
+- " exited with %08x?\n", vec_nr,
+- softirq_to_name[vec_nr], h->action,
+- prev_count, preempt_count());
+- preempt_count() = prev_count;
+- }
+-
+- rcu_bh_qs(cpu);
+- }
+- h++;
+- pending >>= 1;
+- } while (pending);
+-
+- local_irq_disable();
++ handle_pending_softirqs(pending, cpu);
- /*
- * The fast path checks that there are no expired thread or thread
-@@ -1369,6 +1370,190 @@ void run_posix_cpu_timers(struct task_st
- }
+ pending = local_softirq_pending();
+ if (pending && --max_restart)
+@@ -267,6 +328,26 @@ restart:
+ __local_bh_enable(SOFTIRQ_OFFSET);
}
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+#include <linux/kthread.h>
-+#include <linux/cpu.h>
-+DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
-+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
-+
-+static int posix_cpu_timers_thread(void *data)
++/*
++ * Called with preemption disabled from run_ksoftirqd()
++ */
++static int ksoftirqd_do_softirq(int cpu)
+{
-+ int cpu = (long)data;
++ /*
++ * Preempt disable stops cpu going offline.
++ * If already offline, we'll be on wrong CPU:
++ * don't process.
++ */
++ if (cpu_is_offline(cpu))
++ return -1;
+
-+ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
++ local_irq_disable();
++ if (local_softirq_pending())
++ __do_softirq();
++ local_irq_enable();
++ return 0;
++}
+
-+ while (!kthread_should_stop()) {
-+ struct task_struct *tsk = NULL;
-+ struct task_struct *next = NULL;
+ #ifndef __ARCH_HAS_DO_SOFTIRQ
+
+ asmlinkage void do_softirq(void)
+@@ -289,6 +370,178 @@ asmlinkage void do_softirq(void)
+
+ #endif
+
++static inline void local_bh_disable_nort(void) { local_bh_disable(); }
++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
++static inline void ksoftirqd_set_sched_params(void) { }
++static inline void ksoftirqd_clr_sched_params(void) { }
+
-+ if (cpu_is_offline(cpu))
-+ goto wait_to_die;
++#else /* !PREEMPT_RT_FULL */
+
-+ /* grab task list */
-+ raw_local_irq_disable();
-+ tsk = per_cpu(posix_timer_tasklist, cpu);
-+ per_cpu(posix_timer_tasklist, cpu) = NULL;
-+ raw_local_irq_enable();
++/*
++ * On RT we serialize softirq execution with a cpu local lock
++ */
++static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
++static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
+
-+ /* its possible the list is empty, just return */
-+ if (!tsk) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ schedule();
-+ __set_current_state(TASK_RUNNING);
-+ continue;
-+ }
++static void __do_softirq(void);
+
-+ /* Process task list */
-+ while (1) {
-+ /* save next */
-+ next = tsk->posix_timer_list;
++void __init softirq_early_init(void)
++{
++ local_irq_lock_init(local_softirq_lock);
++}
+
-+ /* run the task timers, clear its ptr and
-+ * unreference it
-+ */
-+ __run_posix_cpu_timers(tsk);
-+ tsk->posix_timer_list = NULL;
-+ put_task_struct(tsk);
++void local_bh_disable(void)
++{
++ migrate_disable();
++ current->softirq_nestcnt++;
++}
++EXPORT_SYMBOL(local_bh_disable);
+
-+ /* check if this is the last on the list */
-+ if (next == tsk)
-+ break;
-+ tsk = next;
-+ }
-+ }
-+ return 0;
++void local_bh_enable(void)
++{
++ if (WARN_ON(current->softirq_nestcnt == 0))
++ return;
+
-+wait_to_die:
-+ /* Wait for kthread_stop */
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ while (!kthread_should_stop()) {
-+ schedule();
-+ set_current_state(TASK_INTERRUPTIBLE);
++ if ((current->softirq_nestcnt == 1) &&
++ local_softirq_pending() &&
++ local_trylock(local_softirq_lock)) {
++
++ local_irq_disable();
++ if (local_softirq_pending())
++ __do_softirq();
++ local_unlock(local_softirq_lock);
++ local_irq_enable();
++ WARN_ON(current->softirq_nestcnt != 1);
+ }
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
++ current->softirq_nestcnt--;
++ migrate_enable();
+}
++EXPORT_SYMBOL(local_bh_enable);
+
-+static inline int __fastpath_timer_check(struct task_struct *tsk)
++void local_bh_enable_ip(unsigned long ip)
+{
-+ /* tsk == current, ensure it is safe to use ->signal/sighand */
-+ if (unlikely(tsk->exit_state))
-+ return 0;
-+
-+ if (!task_cputime_zero(&tsk->cputime_expires))
-+ return 1;
-+
-+ if (!task_cputime_zero(&tsk->signal->cputime_expires))
-+ return 1;
++ local_bh_enable();
++}
++EXPORT_SYMBOL(local_bh_enable_ip);
+
++/* For tracing */
++int notrace __in_softirq(void)
++{
++ if (__get_cpu_var(local_softirq_lock).owner == current)
++ return __get_cpu_var(local_softirq_lock).nestcnt;
+ return 0;
+}
+
-+void run_posix_cpu_timers(struct task_struct *tsk)
++int in_serving_softirq(void)
+{
-+ unsigned long cpu = smp_processor_id();
-+ struct task_struct *tasklist;
-+
-+ BUG_ON(!irqs_disabled());
-+ if(!per_cpu(posix_timer_task, cpu))
-+ return;
-+ /* get per-cpu references */
-+ tasklist = per_cpu(posix_timer_tasklist, cpu);
-+
-+ /* check to see if we're already queued */
-+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
-+ get_task_struct(tsk);
-+ if (tasklist) {
-+ tsk->posix_timer_list = tasklist;
-+ } else {
-+ /*
-+ * The list is terminated by a self-pointing
-+ * task_struct
-+ */
-+ tsk->posix_timer_list = tsk;
-+ }
-+ per_cpu(posix_timer_tasklist, cpu) = tsk;
++ int res;
+
-+ wake_up_process(per_cpu(posix_timer_task, cpu));
-+ }
++ preempt_disable();
++ res = __get_cpu_var(local_softirq_runner) == current;
++ preempt_enable();
++ return res;
+}
+
+/*
-+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
-+ * Here we can start up the necessary migration thread for the new CPU.
++ * Called with bh and local interrupts disabled. For full RT cpu must
++ * be pinned.
+ */
-+static int posix_cpu_thread_call(struct notifier_block *nfb,
-+ unsigned long action, void *hcpu)
++static void __do_softirq(void)
+{
-+ int cpu = (long)hcpu;
-+ struct task_struct *p;
-+ struct sched_param param;
++ u32 pending = local_softirq_pending();
++ int cpu = smp_processor_id();
+
-+ switch (action) {
-+ case CPU_UP_PREPARE:
-+ p = kthread_create(posix_cpu_timers_thread, hcpu,
-+ "posixcputmr/%d",cpu);
-+ if (IS_ERR(p))
-+ return NOTIFY_BAD;
-+ p->flags |= PF_NOFREEZE;
-+ kthread_bind(p, cpu);
-+ /* Must be high prio to avoid getting starved */
-+ param.sched_priority = MAX_RT_PRIO-1;
-+ sched_setscheduler(p, SCHED_FIFO, ¶m);
-+ per_cpu(posix_timer_task,cpu) = p;
-+ break;
-+ case CPU_ONLINE:
-+ /* Strictly unneccessary, as first user will wake it. */
-+ wake_up_process(per_cpu(posix_timer_task,cpu));
-+ break;
-+#ifdef CONFIG_HOTPLUG_CPU
-+ case CPU_UP_CANCELED:
-+ /* Unbind it from offline cpu so it can run. Fall thru. */
-+ kthread_bind(per_cpu(posix_timer_task,cpu),
-+ any_online_cpu(cpu_online_map));
-+ kthread_stop(per_cpu(posix_timer_task,cpu));
-+ per_cpu(posix_timer_task,cpu) = NULL;
-+ break;
-+ case CPU_DEAD:
-+ kthread_stop(per_cpu(posix_timer_task,cpu));
-+ per_cpu(posix_timer_task,cpu) = NULL;
-+ break;
-+#endif
++ current->softirq_nestcnt++;
++
++ /* Reset the pending bitmask before enabling irqs */
++ set_softirq_pending(0);
++
++ __get_cpu_var(local_softirq_runner) = current;
++
++ lockdep_softirq_enter();
++
++ handle_pending_softirqs(pending, cpu);
++
++ pending = local_softirq_pending();
++ if (pending)
++ wakeup_softirqd();
++
++ lockdep_softirq_exit();
++ __get_cpu_var(local_softirq_runner) = NULL;
++
++ current->softirq_nestcnt--;
++}
++
++static int __thread_do_softirq(int cpu)
++{
++ /*
++ * Prevent the current cpu from going offline.
++ * pin_current_cpu() can reenable preemption and block on the
++ * hotplug mutex. When it returns, the current cpu is
++ * pinned. It might be the wrong one, but the offline check
++ * below catches that.
++ */
++ pin_current_cpu();
++ /*
++ * If called from ksoftirqd (cpu >= 0) we need to check
++ * whether we are on the wrong cpu due to cpu offlining. If
++ * called via thread_do_softirq() no action required.
++ */
++ if (cpu >= 0 && cpu_is_offline(cpu)) {
++ unpin_current_cpu();
++ return -1;
+ }
-+ return NOTIFY_OK;
++ preempt_enable();
++ local_lock(local_softirq_lock);
++ local_irq_disable();
++ /*
++ * We cannot switch stacks on RT as we want to be able to
++ * schedule!
++ */
++ if (local_softirq_pending())
++ __do_softirq();
++ local_unlock(local_softirq_lock);
++ unpin_current_cpu();
++ preempt_disable();
++ local_irq_enable();
++ return 0;
+}
+
-+/* Register at highest priority so that task migration (migrate_all_tasks)
-+ * happens before everything else.
++/*
++ * Called from netif_rx_ni(). Preemption enabled.
+ */
-+static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
-+ .notifier_call = posix_cpu_thread_call,
-+ .priority = 10
-+};
++void thread_do_softirq(void)
++{
++ if (!in_serving_softirq()) {
++ preempt_disable();
++ __thread_do_softirq(-1);
++ preempt_enable();
++ }
++}
+
-+static int __init posix_cpu_thread_init(void)
++static int ksoftirqd_do_softirq(int cpu)
+{
-+ void *hcpu = (void *)(long)smp_processor_id();
-+ /* Start one for boot CPU. */
-+ unsigned long cpu;
++ return __thread_do_softirq(cpu);
++}
+
-+ /* init the per-cpu posix_timer_tasklets */
-+ for_each_cpu_mask(cpu, cpu_possible_map)
-+ per_cpu(posix_timer_tasklist, cpu) = NULL;
++static inline void local_bh_disable_nort(void) { }
++static inline void _local_bh_enable_nort(void) { }
+
-+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
-+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
-+ register_cpu_notifier(&posix_cpu_thread_notifier);
-+ return 0;
++static inline void ksoftirqd_set_sched_params(void)
++{
++ struct sched_param param = { .sched_priority = 1 };
++
++ sched_setscheduler(current, SCHED_FIFO, ¶m);
+}
-+early_initcall(posix_cpu_thread_init);
-+#else /* CONFIG_PREEMPT_RT_BASE */
-+void run_posix_cpu_timers(struct task_struct *tsk)
++
++static inline void ksoftirqd_clr_sched_params(void)
+{
-+ __run_posix_cpu_timers(tsk);
++ struct sched_param param = { .sched_priority = 0 };
++
++ sched_setscheduler(current, SCHED_NORMAL, ¶m);
+}
-+#endif /* CONFIG_PREEMPT_RT_BASE */
+
++#endif /* PREEMPT_RT_FULL */
/*
- * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
- * The tsk->sighand->siglock must be held by the caller.
-Index: linux-2.6/kernel/sched_stats.h
-===================================================================
---- linux-2.6.orig/kernel/sched_stats.h
-+++ linux-2.6/kernel/sched_stats.h
-@@ -282,10 +282,10 @@ static inline void account_group_user_ti
- if (!cputimer->running)
- return;
+ * Enter an interrupt context.
+ */
+@@ -302,9 +555,9 @@ void irq_enter(void)
+ * Prevent raise_softirq from needlessly waking up ksoftirqd
+ * here, as softirq will be serviced on return from interrupt.
+ */
+- local_bh_disable();
++ local_bh_disable_nort();
+ tick_check_idle(cpu);
+- _local_bh_enable();
++ _local_bh_enable_nort();
+ }
-- spin_lock(&cputimer->lock);
-+ raw_spin_lock(&cputimer->lock);
- cputimer->cputime.utime =
- cputime_add(cputimer->cputime.utime, cputime);
-- spin_unlock(&cputimer->lock);
-+ raw_spin_unlock(&cputimer->lock);
+ __irq_enter();
+@@ -313,6 +566,7 @@ void irq_enter(void)
+ #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!force_irqthreads)
+ __do_softirq();
+ else {
+@@ -321,10 +575,14 @@ static inline void invoke_softirq(void)
+ wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
++#else
++ wakeup_softirqd();
++#endif
}
-
- /**
-@@ -306,10 +306,10 @@ static inline void account_group_system_
- if (!cputimer->running)
- return;
-
-- spin_lock(&cputimer->lock);
-+ raw_spin_lock(&cputimer->lock);
- cputimer->cputime.stime =
- cputime_add(cputimer->cputime.stime, cputime);
-- spin_unlock(&cputimer->lock);
-+ raw_spin_unlock(&cputimer->lock);
+ #else
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!force_irqthreads)
+ do_softirq();
+ else {
+@@ -333,6 +591,9 @@ static inline void invoke_softirq(void)
+ wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
++#else
++ wakeup_softirqd();
++#endif
}
+ #endif
- /**
-@@ -330,7 +330,7 @@ static inline void account_group_exec_ru
- if (!cputimer->running)
- return;
-
-- spin_lock(&cputimer->lock);
-+ raw_spin_lock(&cputimer->lock);
- cputimer->cputime.sum_exec_runtime += ns;
-- spin_unlock(&cputimer->lock);
-+ raw_spin_unlock(&cputimer->lock);
+@@ -353,7 +614,7 @@ void irq_exit(void)
+ if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
+ tick_nohz_stop_sched_tick(0);
+ #endif
+- preempt_enable_no_resched();
++ __preempt_enable_no_resched();
}
-Index: linux-2.6/include/linux/semaphore.h
-===================================================================
---- linux-2.6.orig/include/linux/semaphore.h
-+++ linux-2.6/include/linux/semaphore.h
-@@ -14,14 +14,14 @@
- /* Please don't access any members of this structure directly */
- struct semaphore {
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- unsigned int count;
- struct list_head wait_list;
- };
+ /*
+@@ -739,29 +1000,21 @@ void __init softirq_init(void)
- #define __SEMAPHORE_INITIALIZER(name, n) \
- { \
-- .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
-+ .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
- .count = n, \
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
- }
-Index: linux-2.6/kernel/semaphore.c
-===================================================================
---- linux-2.6.orig/kernel/semaphore.c
-+++ linux-2.6/kernel/semaphore.c
-@@ -54,12 +54,12 @@ void down(struct semaphore *sem)
+ static int run_ksoftirqd(void * __bind_cpu)
{
- unsigned long flags;
++ ksoftirqd_set_sched_params();
++
+ set_current_state(TASK_INTERRUPTIBLE);
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- __down(sem);
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
- }
- EXPORT_SYMBOL(down);
+ while (!kthread_should_stop()) {
+ preempt_disable();
+- if (!local_softirq_pending()) {
+- preempt_enable_no_resched();
+- schedule();
+- preempt_disable();
+- }
++ if (!local_softirq_pending())
++ schedule_preempt_disabled();
-@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore
- unsigned long flags;
- int result = 0;
+ __set_current_state(TASK_RUNNING);
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- result = __down_interruptible(sem);
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
+ while (local_softirq_pending()) {
+- /* Preempt disable stops cpu going offline.
+- If already offline, we'll be on wrong CPU:
+- don't process */
+- if (cpu_is_offline((long)__bind_cpu))
++ if (ksoftirqd_do_softirq((long) __bind_cpu))
+ goto wait_to_die;
+- local_irq_disable();
+- if (local_softirq_pending())
+- __do_softirq();
+- local_irq_enable();
+- preempt_enable_no_resched();
++ __preempt_enable_no_resched();
+ cond_resched();
+ preempt_disable();
+ rcu_note_context_switch((long)__bind_cpu);
+@@ -774,6 +1027,7 @@ static int run_ksoftirqd(void * __bind_c
- return result;
- }
-@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem)
- unsigned long flags;
- int result = 0;
+ wait_to_die:
+ preempt_enable();
++ ksoftirqd_clr_sched_params();
+ /* Wait for kthread_stop */
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+Index: linux-2.6/include/linux/kprobes.h
+===================================================================
+--- linux-2.6.orig/include/linux/kprobes.h
++++ linux-2.6/include/linux/kprobes.h
+@@ -181,7 +181,7 @@ struct kretprobe {
+ int nmissed;
+ size_t data_size;
+ struct hlist_head free_instances;
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ };
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- result = __down_killable(sem);
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
+ struct kretprobe_instance {
+Index: linux-2.6/kernel/kprobes.c
+===================================================================
+--- linux-2.6.orig/kernel/kprobes.c
++++ linux-2.6/kernel/kprobes.c
+@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
+ static DEFINE_MUTEX(kprobe_mutex);
+ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
+ static struct {
+- spinlock_t lock ____cacheline_aligned_in_smp;
++ raw_spinlock_t lock ____cacheline_aligned_in_smp;
+ } kretprobe_table_locks[KPROBE_TABLE_SIZE];
- return result;
+-static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
++static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
+ {
+ return &(kretprobe_table_locks[hash].lock);
}
-@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem)
- unsigned long flags;
- int count;
+@@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kr
+ hlist_del(&ri->hlist);
+ INIT_HLIST_NODE(&ri->hlist);
+ if (likely(rp)) {
+- spin_lock(&rp->lock);
++ raw_spin_lock(&rp->lock);
+ hlist_add_head(&ri->hlist, &rp->free_instances);
+- spin_unlock(&rp->lock);
++ raw_spin_unlock(&rp->lock);
+ } else
+ /* Unregistering */
+ hlist_add_head(&ri->hlist, head);
+@@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struc
+ __acquires(hlist_lock)
+ {
+ unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
+- spinlock_t *hlist_lock;
++ raw_spinlock_t *hlist_lock;
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- count = sem->count - 1;
- if (likely(count >= 0))
- sem->count = count;
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
+ *head = &kretprobe_inst_table[hash];
+ hlist_lock = kretprobe_table_lock_ptr(hash);
+- spin_lock_irqsave(hlist_lock, *flags);
++ raw_spin_lock_irqsave(hlist_lock, *flags);
+ }
- return (count < 0);
+ static void __kprobes kretprobe_table_lock(unsigned long hash,
+ unsigned long *flags)
+ __acquires(hlist_lock)
+ {
+- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+- spin_lock_irqsave(hlist_lock, *flags);
++ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
++ raw_spin_lock_irqsave(hlist_lock, *flags);
}
-@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem,
- unsigned long flags;
- int result = 0;
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- result = __down_timeout(sem, jiffies);
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
+ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
+@@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(str
+ __releases(hlist_lock)
+ {
+ unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
+- spinlock_t *hlist_lock;
++ raw_spinlock_t *hlist_lock;
- return result;
+ hlist_lock = kretprobe_table_lock_ptr(hash);
+- spin_unlock_irqrestore(hlist_lock, *flags);
++ raw_spin_unlock_irqrestore(hlist_lock, *flags);
}
-@@ -179,12 +179,12 @@ void up(struct semaphore *sem)
- {
- unsigned long flags;
-- spin_lock_irqsave(&sem->lock, flags);
-+ raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(list_empty(&sem->wait_list)))
- sem->count++;
- else
- __up(sem);
-- spin_unlock_irqrestore(&sem->lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->lock, flags);
+ static void __kprobes kretprobe_table_unlock(unsigned long hash,
+ unsigned long *flags)
+ __releases(hlist_lock)
+ {
+- spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+- spin_unlock_irqrestore(hlist_lock, *flags);
++ raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
++ raw_spin_unlock_irqrestore(hlist_lock, *flags);
}
- EXPORT_SYMBOL(up);
-@@ -217,9 +217,9 @@ static inline int __sched __down_common(
- if (timeout <= 0)
- goto timed_out;
- __set_task_state(task, state);
-- spin_unlock_irq(&sem->lock);
-+ raw_spin_unlock_irq(&sem->lock);
- timeout = schedule_timeout(timeout);
-- spin_lock_irq(&sem->lock);
-+ raw_spin_lock_irq(&sem->lock);
- if (waiter.up)
- return 0;
+ /*
+@@ -1650,12 +1650,12 @@ static int __kprobes pre_handler_kretpro
+
+ /*TODO: consider to only swap the RA after the last pre_handler fired */
+ hash = hash_ptr(current, KPROBE_HASH_BITS);
+- spin_lock_irqsave(&rp->lock, flags);
++ raw_spin_lock_irqsave(&rp->lock, flags);
+ if (!hlist_empty(&rp->free_instances)) {
+ ri = hlist_entry(rp->free_instances.first,
+ struct kretprobe_instance, hlist);
+ hlist_del(&ri->hlist);
+- spin_unlock_irqrestore(&rp->lock, flags);
++ raw_spin_unlock_irqrestore(&rp->lock, flags);
+
+ ri->rp = rp;
+ ri->task = current;
+@@ -1672,7 +1672,7 @@ static int __kprobes pre_handler_kretpro
+ kretprobe_table_unlock(hash, &flags);
+ } else {
+ rp->nmissed++;
+- spin_unlock_irqrestore(&rp->lock, flags);
++ raw_spin_unlock_irqrestore(&rp->lock, flags);
}
-Index: linux-2.6/include/linux/rwsem-spinlock.h
-===================================================================
---- linux-2.6.orig/include/linux/rwsem-spinlock.h
-+++ linux-2.6/include/linux/rwsem-spinlock.h
-@@ -20,26 +20,42 @@
- * - if activity is -1 then there is one active writer
- * - if wait_list is not empty, then there are processes waiting for the semaphore
- */
-+struct rw_anon_semaphore {
-+ __s32 activity;
-+ raw_spinlock_t wait_lock;
-+ struct list_head wait_list;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ struct lockdep_map dep_map;
-+#endif
-+};
-+
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * Non preempt-rt implementation of rw_semaphore. Same as above, but
-+ * restricted vs. ownership. i.e. ownerless locked state and non owner
-+ * release not allowed.
-+ */
- struct rw_semaphore {
- __s32 activity;
-- spinlock_t wait_lock;
-+ raw_spinlock_t wait_lock;
- struct list_head wait_list;
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
+ return 0;
+ }
+@@ -1708,7 +1708,7 @@ int __kprobes register_kretprobe(struct
+ rp->maxactive = num_possible_cpus();
#endif
- };
-+#endif /* PREEMPT_RT_FULL */
+ }
+- spin_lock_init(&rp->lock);
++ raw_spin_lock_init(&rp->lock);
+ INIT_HLIST_HEAD(&rp->free_instances);
+ for (i = 0; i < rp->maxactive; i++) {
+ inst = kmalloc(sizeof(struct kretprobe_instance) +
+@@ -1946,7 +1946,7 @@ static int __init init_kprobes(void)
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ INIT_HLIST_HEAD(&kprobe_table[i]);
+ INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
+- spin_lock_init(&(kretprobe_table_locks[i].lock));
++ raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
+ }
- #define RWSEM_UNLOCKED_VALUE 0x00000000
-
--extern void __down_read(struct rw_semaphore *sem);
--extern int __down_read_trylock(struct rw_semaphore *sem);
--extern void __down_write(struct rw_semaphore *sem);
--extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
--extern int __down_write_trylock(struct rw_semaphore *sem);
--extern void __up_read(struct rw_semaphore *sem);
--extern void __up_write(struct rw_semaphore *sem);
--extern void __downgrade_write(struct rw_semaphore *sem);
--extern int rwsem_is_locked(struct rw_semaphore *sem);
-+extern void __down_read(struct rw_anon_semaphore *sem);
-+extern int __down_read_trylock(struct rw_anon_semaphore *sem);
-+extern void __down_write(struct rw_anon_semaphore *sem);
-+extern void __down_write_nested(struct rw_anon_semaphore *sem, int subclass);
-+extern int __down_write_trylock(struct rw_anon_semaphore *sem);
-+extern void __up_read(struct rw_anon_semaphore *sem);
-+extern void __up_write(struct rw_anon_semaphore *sem);
-+extern void __downgrade_write(struct rw_anon_semaphore *sem);
-+extern int anon_rwsem_is_locked(struct rw_anon_semaphore *sem);
-
- #endif /* __KERNEL__ */
- #endif /* _LINUX_RWSEM_SPINLOCK_H */
-Index: linux-2.6/include/linux/rwsem.h
+ /*
+Index: linux-2.6/include/linux/percpu_counter.h
===================================================================
---- linux-2.6.orig/include/linux/rwsem.h
-+++ linux-2.6/include/linux/rwsem.h
-@@ -17,37 +17,50 @@
- #include <asm/system.h>
- #include <asm/atomic.h>
-
-+struct rw_anon_semaphore;
- struct rw_semaphore;
+--- linux-2.6.orig/include/linux/percpu_counter.h
++++ linux-2.6/include/linux/percpu_counter.h
+@@ -16,7 +16,7 @@
+ #ifdef CONFIG_SMP
- #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
- #include <linux/rwsem-spinlock.h> /* use a generic implementation */
--#else
-+#else /* RWSEM_GENERIC_SPINLOCK */
-+
- /* All arch specific implementations share the same struct */
--struct rw_semaphore {
-+struct rw_anon_semaphore {
- long count;
-- spinlock_t wait_lock;
-+ raw_spinlock_t wait_lock;
- struct list_head wait_list;
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
- #endif
- };
+ struct percpu_counter {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ s64 count;
+ #ifdef CONFIG_HOTPLUG_CPU
+ struct list_head list; /* All percpu_counters are on a list */
+Index: linux-2.6/lib/percpu_counter.c
+===================================================================
+--- linux-2.6.orig/lib/percpu_counter.c
++++ linux-2.6/lib/percpu_counter.c
+@@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_co
+ {
+ int cpu;
--extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
--extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
--extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
--extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-+extern struct rw_anon_semaphore *rwsem_down_read_failed(struct rw_anon_semaphore *sem);
-+extern struct rw_anon_semaphore *rwsem_down_write_failed(struct rw_anon_semaphore *sem);
-+extern struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *);
-+extern struct rw_anon_semaphore *rwsem_downgrade_wake(struct rw_anon_semaphore *sem);
+- spin_lock(&fbc->lock);
++ raw_spin_lock(&fbc->lock);
+ for_each_possible_cpu(cpu) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ *pcount = 0;
+ }
+ fbc->count = amount;
+- spin_unlock(&fbc->lock);
++ raw_spin_unlock(&fbc->lock);
+ }
+ EXPORT_SYMBOL(percpu_counter_set);
- /* Include the arch specific part */
- #include <asm/rwsem.h>
+@@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_
+ preempt_disable();
+ count = __this_cpu_read(*fbc->counters) + amount;
+ if (count >= batch || count <= -batch) {
+- spin_lock(&fbc->lock);
++ raw_spin_lock(&fbc->lock);
+ fbc->count += count;
+ __this_cpu_write(*fbc->counters, 0);
+- spin_unlock(&fbc->lock);
++ raw_spin_unlock(&fbc->lock);
+ } else {
+ __this_cpu_write(*fbc->counters, count);
+ }
+@@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_c
+ s64 ret;
+ int cpu;
- /* In all implementations count != 0 means locked */
--static inline int rwsem_is_locked(struct rw_semaphore *sem)
-+static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
- {
- return sem->count != 0;
+- spin_lock(&fbc->lock);
++ raw_spin_lock(&fbc->lock);
+ ret = fbc->count;
+ for_each_online_cpu(cpu) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ ret += *pcount;
+ }
+- spin_unlock(&fbc->lock);
++ raw_spin_unlock(&fbc->lock);
+ return ret;
}
+ EXPORT_SYMBOL(__percpu_counter_sum);
+@@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
+ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+ struct lock_class_key *key)
+ {
+- spin_lock_init(&fbc->lock);
++ raw_spin_lock_init(&fbc->lock);
+ lockdep_set_class(&fbc->lock, key);
+ fbc->count = amount;
+ fbc->counters = alloc_percpu(s32);
+@@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotc
+ s32 *pcount;
+ unsigned long flags;
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+struct rw_semaphore {
-+ long count;
-+ raw_spinlock_t wait_lock;
-+ struct list_head wait_list;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+ struct lockdep_map dep_map;
-+#endif
-+};
+- spin_lock_irqsave(&fbc->lock, flags);
++ raw_spin_lock_irqsave(&fbc->lock, flags);
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ fbc->count += *pcount;
+ *pcount = 0;
+- spin_unlock_irqrestore(&fbc->lock, flags);
++ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+ }
+ mutex_unlock(&percpu_counters_lock);
#endif
+Index: linux-2.6/kernel/cgroup.c
+===================================================================
+--- linux-2.6.orig/kernel/cgroup.c
++++ linux-2.6/kernel/cgroup.c
+@@ -263,7 +263,7 @@ list_for_each_entry(_root, &roots, root_
+ /* the list of cgroups eligible for automatic release. Protected by
+ * release_list_lock */
+ static LIST_HEAD(release_list);
+-static DEFINE_SPINLOCK(release_list_lock);
++static DEFINE_RAW_SPINLOCK(release_list_lock);
+ static void cgroup_release_agent(struct work_struct *work);
+ static DECLARE_WORK(release_agent_work, cgroup_release_agent);
+ static void check_for_release(struct cgroup *cgrp);
+@@ -4010,11 +4010,11 @@ again:
+ finish_wait(&cgroup_rmdir_waitq, &wait);
+ clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
-+#endif /* !RWSEM_GENERIC_SPINLOCK */
-+
- /* Common initializer macros and functions */
+- spin_lock(&release_list_lock);
++ raw_spin_lock(&release_list_lock);
+ set_bit(CGRP_REMOVED, &cgrp->flags);
+ if (!list_empty(&cgrp->release_list))
+ list_del_init(&cgrp->release_list);
+- spin_unlock(&release_list_lock);
++ raw_spin_unlock(&release_list_lock);
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
-@@ -56,57 +69,59 @@ static inline int rwsem_is_locked(struct
- # define __RWSEM_DEP_MAP_INIT(lockname)
- #endif
+ cgroup_lock_hierarchy(cgrp->root);
+ /* delete this cgroup from parent->children */
+@@ -4667,13 +4667,13 @@ static void check_for_release(struct cgr
+ * already queued for a userspace notification, queue
+ * it now */
+ int need_schedule_work = 0;
+- spin_lock(&release_list_lock);
++ raw_spin_lock(&release_list_lock);
+ if (!cgroup_is_removed(cgrp) &&
+ list_empty(&cgrp->release_list)) {
+ list_add(&cgrp->release_list, &release_list);
+ need_schedule_work = 1;
+ }
+- spin_unlock(&release_list_lock);
++ raw_spin_unlock(&release_list_lock);
+ if (need_schedule_work)
+ schedule_work(&release_agent_work);
+ }
+@@ -4725,7 +4725,7 @@ static void cgroup_release_agent(struct
+ {
+ BUG_ON(work != &release_agent_work);
+ mutex_lock(&cgroup_mutex);
+- spin_lock(&release_list_lock);
++ raw_spin_lock(&release_list_lock);
+ while (!list_empty(&release_list)) {
+ char *argv[3], *envp[3];
+ int i;
+@@ -4734,7 +4734,7 @@ static void cgroup_release_agent(struct
+ struct cgroup,
+ release_list);
+ list_del_init(&cgrp->release_list);
+- spin_unlock(&release_list_lock);
++ raw_spin_unlock(&release_list_lock);
+ pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pathbuf)
+ goto continue_free;
+@@ -4764,9 +4764,9 @@ static void cgroup_release_agent(struct
+ continue_free:
+ kfree(pathbuf);
+ kfree(agentbuf);
+- spin_lock(&release_list_lock);
++ raw_spin_lock(&release_list_lock);
+ }
+- spin_unlock(&release_list_lock);
++ raw_spin_unlock(&release_list_lock);
+ mutex_unlock(&cgroup_mutex);
+ }
--#define __RWSEM_INITIALIZER(name) \
-- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \
-- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
-+#define __RWSEM_ANON_INITIALIZER(name) \
-+ { RWSEM_UNLOCKED_VALUE, \
-+ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
-+ LIST_HEAD_INIT((name).wait_list) \
-+ __RWSEM_DEP_MAP_INIT(name) }
+Index: linux-2.6/include/linux/proportions.h
+===================================================================
+--- linux-2.6.orig/include/linux/proportions.h
++++ linux-2.6/include/linux/proportions.h
+@@ -58,7 +58,7 @@ struct prop_local_percpu {
+ */
+ int shift;
+ unsigned long period;
+- spinlock_t lock; /* protect the snapshot state */
++ raw_spinlock_t lock; /* protect the snapshot state */
+ };
--#define DECLARE_RWSEM(name) \
-- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-+#define DECLARE_ANON_RWSEM(name) \
-+ struct rw_anon_semaphore name = __RWSEM_INITIALIZER(name)
+ int prop_local_init_percpu(struct prop_local_percpu *pl);
+@@ -106,11 +106,11 @@ struct prop_local_single {
+ */
+ unsigned long period;
+ int shift;
+- spinlock_t lock; /* protect the snapshot state */
++ raw_spinlock_t lock; /* protect the snapshot state */
+ };
--extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
-- struct lock_class_key *key);
-+extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
-+ struct lock_class_key *key);
+ #define INIT_PROP_LOCAL_SINGLE(name) \
+-{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
++{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ }
--#define init_rwsem(sem) \
-+#define init_anon_rwsem(sem) \
- do { \
- static struct lock_class_key __key; \
- \
-- __init_rwsem((sem), #sem, &__key); \
-+ __init_anon_rwsem((sem), #sem, &__key); \
- } while (0)
+ int prop_local_init_single(struct prop_local_single *pl);
+Index: linux-2.6/lib/proportions.c
+===================================================================
+--- linux-2.6.orig/lib/proportions.c
++++ linux-2.6/lib/proportions.c
+@@ -190,7 +190,7 @@ prop_adjust_shift(int *pl_shift, unsigne
- /*
- * lock for reading
- */
--extern void down_read(struct rw_semaphore *sem);
-+extern void anon_down_read(struct rw_anon_semaphore *sem);
+ int prop_local_init_percpu(struct prop_local_percpu *pl)
+ {
+- spin_lock_init(&pl->lock);
++ raw_spin_lock_init(&pl->lock);
+ pl->shift = 0;
+ pl->period = 0;
+ return percpu_counter_init(&pl->events, 0);
+@@ -226,7 +226,7 @@ void prop_norm_percpu(struct prop_global
+ if (pl->period == global_period)
+ return;
- /*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
--extern int down_read_trylock(struct rw_semaphore *sem);
-+extern int anon_down_read_trylock(struct rw_anon_semaphore *sem);
+- spin_lock_irqsave(&pl->lock, flags);
++ raw_spin_lock_irqsave(&pl->lock, flags);
+ prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
- /*
- * lock for writing
- */
--extern void down_write(struct rw_semaphore *sem);
-+extern void anon_down_write(struct rw_anon_semaphore *sem);
+ /*
+@@ -247,7 +247,7 @@ void prop_norm_percpu(struct prop_global
+ percpu_counter_set(&pl->events, 0);
- /*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
--extern int down_write_trylock(struct rw_semaphore *sem);
-+extern int anon_down_write_trylock(struct rw_anon_semaphore *sem);
+ pl->period = global_period;
+- spin_unlock_irqrestore(&pl->lock, flags);
++ raw_spin_unlock_irqrestore(&pl->lock, flags);
+ }
/*
- * release a read lock
- */
--extern void up_read(struct rw_semaphore *sem);
-+extern void anon_up_read(struct rw_anon_semaphore *sem);
+@@ -324,7 +324,7 @@ void prop_fraction_percpu(struct prop_de
- /*
- * release a write lock
- */
--extern void up_write(struct rw_semaphore *sem);
-+extern void anon_up_write(struct rw_anon_semaphore *sem);
+ int prop_local_init_single(struct prop_local_single *pl)
+ {
+- spin_lock_init(&pl->lock);
++ raw_spin_lock_init(&pl->lock);
+ pl->shift = 0;
+ pl->period = 0;
+ pl->events = 0;
+@@ -356,7 +356,7 @@ void prop_norm_single(struct prop_global
+ if (pl->period == global_period)
+ return;
- /*
- * downgrade write lock to read lock
- */
--extern void downgrade_write(struct rw_semaphore *sem);
-+extern void anon_downgrade_write(struct rw_anon_semaphore *sem);
+- spin_lock_irqsave(&pl->lock, flags);
++ raw_spin_lock_irqsave(&pl->lock, flags);
+ prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
+ /*
+ * For each missed period, we half the local counter.
+@@ -367,7 +367,7 @@ void prop_norm_single(struct prop_global
+ else
+ pl->events = 0;
+ pl->period = global_period;
+- spin_unlock_irqrestore(&pl->lock, flags);
++ raw_spin_unlock_irqrestore(&pl->lock, flags);
+ }
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
-@@ -122,21 +137,101 @@ extern void downgrade_write(struct rw_se
- * lockdep_set_class() at lock initialization time.
- * See Documentation/lockdep-design.txt for more details.)
- */
--extern void down_read_nested(struct rw_semaphore *sem, int subclass);
--extern void down_write_nested(struct rw_semaphore *sem, int subclass);
-+extern void anon_down_read_nested(struct rw_anon_semaphore *sem, int subclass);
-+extern void anon_down_write_nested(struct rw_anon_semaphore *sem, int subclass);
/*
- * Take/release a lock when not the owner will release it.
- *
- * [ This API should be avoided as much as possible - the
- * proper abstraction for this case is completions. ]
- */
--extern void down_read_non_owner(struct rw_semaphore *sem);
--extern void up_read_non_owner(struct rw_semaphore *sem);
-+extern void anon_down_read_non_owner(struct rw_anon_semaphore *sem);
-+extern void anon_up_read_non_owner(struct rw_anon_semaphore *sem);
- #else
--# define down_read_nested(sem, subclass) down_read(sem)
--# define down_write_nested(sem, subclass) down_write(sem)
--# define down_read_non_owner(sem) down_read(sem)
--# define up_read_non_owner(sem) up_read(sem)
-+# define anon_down_read_nested(sem, subclass) anon_down_read(sem)
-+# define anon_down_write_nested(sem, subclass) anon_down_write(sem)
-+# define anon_down_read_non_owner(sem) anon_down_read(sem)
-+# define anon_up_read_non_owner(sem) anon_up_read(sem)
- #endif
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+#include <linux/rwsem_rt.h>
-+#else /* PREEMPT_RT_FULL */
-+/*
-+ * Non preempt-rt implementations
-+ */
-+#define __RWSEM_INITIALIZER(name) \
-+ { RWSEM_UNLOCKED_VALUE, \
-+ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
-+ LIST_HEAD_INIT((name).wait_list) \
-+ __RWSEM_DEP_MAP_INIT(name) }
-+
-+#define DECLARE_RWSEM(name) \
-+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-+
-+static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
-+ struct lock_class_key *key)
-+{
-+ __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key);
-+}
-+
-+#define init_rwsem(sem) \
-+do { \
-+ static struct lock_class_key __key; \
-+ \
-+ __init_rwsem((sem), #sem, &__key); \
-+} while (0)
-+
-+static inline void down_read(struct rw_semaphore *sem)
-+{
-+ anon_down_read((struct rw_anon_semaphore *)sem);
-+}
-+
-+static inline int down_read_trylock(struct rw_semaphore *sem)
-+{
-+ return anon_down_read_trylock((struct rw_anon_semaphore *)sem);
-+}
-+
-+static inline void down_write(struct rw_semaphore *sem)
-+{
-+ anon_down_write((struct rw_anon_semaphore *)sem);
-+}
-+
-+static inline int down_write_trylock(struct rw_semaphore *sem)
-+{
-+ return anon_down_write_trylock((struct rw_anon_semaphore *)sem);
-+}
-+
-+static inline void up_read(struct rw_semaphore *sem)
-+{
-+ anon_up_read((struct rw_anon_semaphore *)sem);
-+}
-+
-+static inline void up_write(struct rw_semaphore *sem)
-+{
-+ anon_up_write((struct rw_anon_semaphore *)sem);
-+}
-+
-+static inline void downgrade_write(struct rw_semaphore *sem)
-+{
-+ anon_downgrade_write((struct rw_anon_semaphore *)sem);
-+}
-+
-+static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
-+{
-+ return anon_down_read_nested((struct rw_anon_semaphore *)sem, subclass);
-+}
-+
-+static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
-+{
-+ anon_down_write_nested((struct rw_anon_semaphore *)sem, subclass);
-+}
-+
-+static inline int rwsem_is_locked(struct rw_semaphore *sem)
-+{
-+ return anon_rwsem_is_locked((struct rw_anon_semaphore *)sem);
-+}
-+#endif /* !PREEMPT_RT_FULL */
-+
- #endif /* _LINUX_RWSEM_H */
-+
-Index: linux-2.6/lib/rwsem-spinlock.c
+Index: linux-2.6/kernel/trace/ring_buffer.c
===================================================================
---- linux-2.6.orig/lib/rwsem-spinlock.c
-+++ linux-2.6/lib/rwsem-spinlock.c
-@@ -17,24 +17,24 @@ struct rwsem_waiter {
- #define RWSEM_WAITING_FOR_WRITE 0x00000002
- };
+--- linux-2.6.orig/kernel/trace/ring_buffer.c
++++ linux-2.6/kernel/trace/ring_buffer.c
+@@ -478,7 +478,7 @@ struct ring_buffer_per_cpu {
+ int cpu;
+ atomic_t record_disabled;
+ struct ring_buffer *buffer;
+- spinlock_t reader_lock; /* serialize readers */
++ raw_spinlock_t reader_lock; /* serialize readers */
+ arch_spinlock_t lock;
+ struct lock_class_key lock_key;
+ struct list_head *pages;
+@@ -1055,7 +1055,7 @@ rb_allocate_cpu_buffer(struct ring_buffe
--int rwsem_is_locked(struct rw_semaphore *sem)
-+int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
- {
- int ret = 1;
- unsigned long flags;
+ cpu_buffer->cpu = cpu;
+ cpu_buffer->buffer = buffer;
+- spin_lock_init(&cpu_buffer->reader_lock);
++ raw_spin_lock_init(&cpu_buffer->reader_lock);
+ lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
+ cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
-- if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
-+ if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
- ret = (sem->activity != 0);
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- }
- return ret;
- }
--EXPORT_SYMBOL(rwsem_is_locked);
-+EXPORT_SYMBOL(anon_rwsem_is_locked);
+@@ -1252,7 +1252,7 @@ rb_remove_pages(struct ring_buffer_per_c
+ struct list_head *p;
+ unsigned i;
- /*
- * initialise the semaphore
- */
--void __init_rwsem(struct rw_semaphore *sem, const char *name,
-- struct lock_class_key *key)
-+void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
-+ struct lock_class_key *key)
- {
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
-@@ -44,10 +44,10 @@ void __init_rwsem(struct rw_semaphore *s
- lockdep_init_map(&sem->dep_map, name, key, 0);
- #endif
- sem->activity = 0;
-- spin_lock_init(&sem->wait_lock);
-+ raw_spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
- }
--EXPORT_SYMBOL(__init_rwsem);
-+EXPORT_SYMBOL(__init_anon_rwsem);
+- spin_lock_irq(&cpu_buffer->reader_lock);
++ raw_spin_lock_irq(&cpu_buffer->reader_lock);
+ rb_head_page_deactivate(cpu_buffer);
- /*
- * handle the lock release when processes blocked on it that can now run
-@@ -58,8 +58,8 @@ EXPORT_SYMBOL(__init_rwsem);
- * - woken process blocks are discarded from the list after having task zeroed
- * - writers are only woken if wakewrite is non-zero
- */
--static inline struct rw_semaphore *
--__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
-+static inline struct rw_anon_semaphore *
-+__rwsem_do_wake(struct rw_anon_semaphore *sem, int wakewrite)
- {
- struct rwsem_waiter *waiter;
- struct task_struct *tsk;
-@@ -117,8 +117,8 @@ __rwsem_do_wake(struct rw_semaphore *sem
- /*
- * wake a single writer
- */
--static inline struct rw_semaphore *
--__rwsem_wake_one_writer(struct rw_semaphore *sem)
-+static inline struct rw_anon_semaphore *
-+__rwsem_wake_one_writer(struct rw_anon_semaphore *sem)
- {
- struct rwsem_waiter *waiter;
- struct task_struct *tsk;
-@@ -139,18 +139,18 @@ __rwsem_wake_one_writer(struct rw_semaph
- /*
- * get a read lock on the semaphore
- */
--void __sched __down_read(struct rw_semaphore *sem)
-+void __sched __down_read(struct rw_anon_semaphore *sem)
- {
- struct rwsem_waiter waiter;
- struct task_struct *tsk;
- unsigned long flags;
+ for (i = 0; i < nr_pages; i++) {
+@@ -1270,7 +1270,7 @@ rb_remove_pages(struct ring_buffer_per_c
+ rb_check_pages(cpu_buffer);
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+ out:
+- spin_unlock_irq(&cpu_buffer->reader_lock);
++ raw_spin_unlock_irq(&cpu_buffer->reader_lock);
+ }
- if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
- /* granted */
- sem->activity++;
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- goto out;
- }
+ static void
+@@ -1281,7 +1281,7 @@ rb_insert_pages(struct ring_buffer_per_c
+ struct list_head *p;
+ unsigned i;
-@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semap
- list_add_tail(&waiter.list, &sem->wait_list);
+- spin_lock_irq(&cpu_buffer->reader_lock);
++ raw_spin_lock_irq(&cpu_buffer->reader_lock);
+ rb_head_page_deactivate(cpu_buffer);
- /* we don't need to touch the semaphore struct anymore */
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ for (i = 0; i < nr_pages; i++) {
+@@ -1296,7 +1296,7 @@ rb_insert_pages(struct ring_buffer_per_c
+ rb_check_pages(cpu_buffer);
- /* wait to be given the lock */
- for (;;) {
-@@ -183,13 +183,13 @@ void __sched __down_read(struct rw_semap
- /*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
--int __down_read_trylock(struct rw_semaphore *sem)
-+int __down_read_trylock(struct rw_anon_semaphore *sem)
- {
- unsigned long flags;
- int ret = 0;
+ out:
+- spin_unlock_irq(&cpu_buffer->reader_lock);
++ raw_spin_unlock_irq(&cpu_buffer->reader_lock);
+ }
+ /**
+@@ -2790,9 +2790,9 @@ void ring_buffer_iter_reset(struct ring_
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+ cpu_buffer = iter->cpu_buffer;
- if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
- /* granted */
-@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaph
- ret = 1;
- }
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ rb_iter_reset(iter);
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+@@ -3251,12 +3251,12 @@ ring_buffer_peek(struct ring_buffer *buf
+ again:
+ local_irq_save(flags);
+ if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ raw_spin_lock(&cpu_buffer->reader_lock);
+ event = rb_buffer_peek(cpu_buffer, ts, lost_events);
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ rb_advance_reader(cpu_buffer);
+ if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
++ raw_spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
- return ret;
- }
-@@ -206,18 +206,18 @@ int __down_read_trylock(struct rw_semaph
- * get a write lock on the semaphore
- * - we increment the waiting count anyway to indicate an exclusive lock
- */
--void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
-+void __sched __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
- {
- struct rwsem_waiter waiter;
- struct task_struct *tsk;
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
+@@ -3281,9 +3281,9 @@ ring_buffer_iter_peek(struct ring_buffer
unsigned long flags;
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+ again:
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ event = rb_iter_peek(iter, ts);
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- if (sem->activity == 0 && list_empty(&sem->wait_list)) {
- /* granted */
- sem->activity = -1;
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- goto out;
+ if (event && event->type_len == RINGBUF_TYPE_PADDING)
+ goto again;
+@@ -3323,7 +3323,7 @@ ring_buffer_consume(struct ring_buffer *
+ cpu_buffer = buffer->buffers[cpu];
+ local_irq_save(flags);
+ if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ raw_spin_lock(&cpu_buffer->reader_lock);
+
+ event = rb_buffer_peek(cpu_buffer, ts, lost_events);
+ if (event) {
+@@ -3332,7 +3332,7 @@ ring_buffer_consume(struct ring_buffer *
}
-@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct
- list_add_tail(&waiter.list, &sem->wait_list);
+ if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
++ raw_spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
- /* we don't need to touch the semaphore struct anymore */
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ out:
+@@ -3424,11 +3424,11 @@ ring_buffer_read_start(struct ring_buffe
- /* wait to be given the lock */
- for (;;) {
-@@ -247,7 +247,7 @@ void __sched __down_write_nested(struct
- ;
- }
+ cpu_buffer = iter->cpu_buffer;
--void __sched __down_write(struct rw_semaphore *sem)
-+void __sched __down_write(struct rw_anon_semaphore *sem)
- {
- __down_write_nested(sem, 0);
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ arch_spin_lock(&cpu_buffer->lock);
+ rb_iter_reset(iter);
+ arch_spin_unlock(&cpu_buffer->lock);
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
-@@ -255,12 +255,12 @@ void __sched __down_write(struct rw_sema
- /*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
--int __down_write_trylock(struct rw_semaphore *sem)
-+int __down_write_trylock(struct rw_anon_semaphore *sem)
- {
- unsigned long flags;
- int ret = 0;
+ EXPORT_SYMBOL_GPL(ring_buffer_read_start);
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+@@ -3463,7 +3463,7 @@ ring_buffer_read(struct ring_buffer_iter
+ struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ unsigned long flags;
- if (sem->activity == 0 && list_empty(&sem->wait_list)) {
- /* granted */
-@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semap
- ret = 1;
- }
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ again:
+ event = rb_iter_peek(iter, ts);
+ if (!event)
+@@ -3474,7 +3474,7 @@ ring_buffer_read(struct ring_buffer_iter
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ rb_advance_iter(iter);
+ out:
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- return ret;
+ return event;
}
-@@ -276,48 +276,48 @@ int __down_write_trylock(struct rw_semap
- /*
- * release a read lock on the semaphore
- */
--void __up_read(struct rw_semaphore *sem)
-+void __up_read(struct rw_anon_semaphore *sem)
- {
- unsigned long flags;
+@@ -3543,7 +3543,7 @@ void ring_buffer_reset_cpu(struct ring_b
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+ atomic_inc(&cpu_buffer->record_disabled);
- if (--sem->activity == 0 && !list_empty(&sem->wait_list))
- sem = __rwsem_wake_one_writer(sem);
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- }
+ if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
+ goto out;
+@@ -3555,7 +3555,7 @@ void ring_buffer_reset_cpu(struct ring_b
+ arch_spin_unlock(&cpu_buffer->lock);
- /*
- * release a write lock on the semaphore
- */
--void __up_write(struct rw_semaphore *sem)
-+void __up_write(struct rw_anon_semaphore *sem)
- {
- unsigned long flags;
-
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-
- sem->activity = 0;
- if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, 1);
+ out:
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ atomic_dec(&cpu_buffer->record_disabled);
}
+@@ -3593,10 +3593,10 @@ int ring_buffer_empty(struct ring_buffer
+ cpu_buffer = buffer->buffers[cpu];
+ local_irq_save(flags);
+ if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ raw_spin_lock(&cpu_buffer->reader_lock);
+ ret = rb_per_cpu_empty(cpu_buffer);
+ if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
++ raw_spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
- /*
- * downgrade a write lock into a read lock
- * - just wake up any readers at the front of the queue
- */
--void __downgrade_write(struct rw_semaphore *sem)
-+void __downgrade_write(struct rw_anon_semaphore *sem)
- {
- unsigned long flags;
+ if (!ret)
+@@ -3627,10 +3627,10 @@ int ring_buffer_empty_cpu(struct ring_bu
+ cpu_buffer = buffer->buffers[cpu];
+ local_irq_save(flags);
+ if (dolock)
+- spin_lock(&cpu_buffer->reader_lock);
++ raw_spin_lock(&cpu_buffer->reader_lock);
+ ret = rb_per_cpu_empty(cpu_buffer);
+ if (dolock)
+- spin_unlock(&cpu_buffer->reader_lock);
++ raw_spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+ return ret;
+@@ -3826,7 +3826,7 @@ int ring_buffer_read_page(struct ring_bu
+ if (!bpage)
+ goto out;
- sem->activity = 1;
- if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, 0);
+- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
++ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- }
+ reader = rb_get_reader_page(cpu_buffer);
+ if (!reader)
+@@ -3949,7 +3949,7 @@ int ring_buffer_read_page(struct ring_bu
+ memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
-Index: linux-2.6/lib/rwsem.c
+ out_unlock:
+- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
++ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ out:
+ return ret;
+Index: linux-2.6/kernel/trace/trace.c
===================================================================
---- linux-2.6.orig/lib/rwsem.c
-+++ linux-2.6/lib/rwsem.c
-@@ -11,8 +11,8 @@
- /*
- * Initialize an rwsem:
+--- linux-2.6.orig/kernel/trace/trace.c
++++ linux-2.6/kernel/trace/trace.c
+@@ -341,7 +341,7 @@ unsigned long trace_flags = TRACE_ITER_P
+ TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
+
+ static int trace_stop_count;
+-static DEFINE_SPINLOCK(tracing_start_lock);
++static DEFINE_RAW_SPINLOCK(tracing_start_lock);
+
+ /**
+ * trace_wake_up - wake up tasks waiting for trace input
+@@ -351,6 +351,7 @@ static DEFINE_SPINLOCK(tracing_start_loc
*/
--void __init_rwsem(struct rw_semaphore *sem, const char *name,
-- struct lock_class_key *key)
-+void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
-+ struct lock_class_key *key)
+ void trace_wake_up(void)
{
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
-@@ -22,11 +22,11 @@ void __init_rwsem(struct rw_semaphore *s
- lockdep_init_map(&sem->dep_map, name, key, 0);
- #endif
- sem->count = RWSEM_UNLOCKED_VALUE;
-- spin_lock_init(&sem->wait_lock);
-+ raw_spin_lock_init(&sem->wait_lock);
- INIT_LIST_HEAD(&sem->wait_list);
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int cpu;
+
+ if (trace_flags & TRACE_ITER_BLOCK)
+@@ -363,6 +364,7 @@ void trace_wake_up(void)
+ if (!runqueue_is_locked(cpu))
+ wake_up(&trace_wait);
+ put_cpu();
++#endif
}
--EXPORT_SYMBOL(__init_rwsem);
-+EXPORT_SYMBOL(__init_anon_rwsem);
+ static int __init set_buf_size(char *str)
+@@ -716,6 +718,12 @@ update_max_tr_single(struct trace_array
+ }
+ #endif /* CONFIG_TRACER_MAX_TRACE */
- struct rwsem_waiter {
- struct list_head list;
-@@ -54,8 +54,8 @@ struct rwsem_waiter {
- * - woken process blocks are discarded from the list after having task zeroed
- * - writers are only woken if downgrading is false
- */
--static struct rw_semaphore *
--__rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
-+static struct rw_anon_semaphore *
-+__rwsem_do_wake(struct rw_anon_semaphore *sem, int wake_type)
- {
- struct rwsem_waiter *waiter;
- struct task_struct *tsk;
-@@ -169,8 +169,8 @@ __rwsem_do_wake(struct rw_semaphore *sem
- /*
- * wait for a lock to be granted
- */
--static struct rw_semaphore __sched *
--rwsem_down_failed_common(struct rw_semaphore *sem,
-+static struct rw_anon_semaphore __sched *
-+rwsem_down_failed_common(struct rw_anon_semaphore *sem,
- unsigned int flags, signed long adjustment)
- {
- struct rwsem_waiter waiter;
-@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semap
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
++#ifndef CONFIG_PREEMPT_RT_FULL
++static void default_wait_pipe(struct trace_iterator *iter);
++#else
++#define default_wait_pipe poll_wait_pipe
++#endif
++
+ /**
+ * register_tracer - register a tracer with the ftrace system.
+ * @type - the plugin for the tracer
+@@ -958,7 +966,7 @@ void tracing_start(void)
+ if (tracing_disabled)
+ return;
- /* set up my own style of waitqueue */
-- spin_lock_irq(&sem->wait_lock);
-+ raw_spin_lock_irq(&sem->wait_lock);
- waiter.task = tsk;
- waiter.flags = flags;
- get_task_struct(tsk);
-@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semap
- adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
- sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
+- spin_lock_irqsave(&tracing_start_lock, flags);
++ raw_spin_lock_irqsave(&tracing_start_lock, flags);
+ if (--trace_stop_count) {
+ if (trace_stop_count < 0) {
+ /* Someone screwed up their debugging */
+@@ -983,7 +991,7 @@ void tracing_start(void)
-- spin_unlock_irq(&sem->wait_lock);
-+ raw_spin_unlock_irq(&sem->wait_lock);
+ ftrace_start();
+ out:
+- spin_unlock_irqrestore(&tracing_start_lock, flags);
++ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
+ }
- /* wait to be given the lock */
- for (;;) {
-@@ -222,7 +222,8 @@ rwsem_down_failed_common(struct rw_semap
- /*
- * wait for the read lock to be granted
- */
--struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
-+struct rw_anon_semaphore __sched *
-+rwsem_down_read_failed(struct rw_anon_semaphore *sem)
- {
- return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
- -RWSEM_ACTIVE_READ_BIAS);
-@@ -231,7 +232,8 @@ struct rw_semaphore __sched *rwsem_down_
- /*
- * wait for the write lock to be granted
- */
--struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
-+struct rw_anon_semaphore __sched *
-+rwsem_down_write_failed(struct rw_anon_semaphore *sem)
- {
- return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
- -RWSEM_ACTIVE_WRITE_BIAS);
-@@ -241,17 +243,17 @@ struct rw_semaphore __sched *rwsem_down_
- * handle waking up a waiter on the semaphore
- * - up_read/up_write has decremented the active part of count if we come here
- */
--struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
-+struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem)
- {
+ /**
+@@ -998,7 +1006,7 @@ void tracing_stop(void)
unsigned long flags;
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+ ftrace_stop();
+- spin_lock_irqsave(&tracing_start_lock, flags);
++ raw_spin_lock_irqsave(&tracing_start_lock, flags);
+ if (trace_stop_count++)
+ goto out;
- /* do nothing if list empty */
- if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
+@@ -1016,7 +1024,7 @@ void tracing_stop(void)
+ arch_spin_unlock(&ftrace_max_lock);
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ out:
+- spin_unlock_irqrestore(&tracing_start_lock, flags);
++ raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
+ }
- return sem;
+ void trace_stop_cmdline_recording(void);
+@@ -1120,6 +1128,8 @@ tracing_generic_entry_update(struct trac
+ ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+ ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
++
++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
}
-@@ -261,17 +263,17 @@ struct rw_semaphore *rwsem_wake(struct r
- * - caller incremented waiting part of count and discovered it still negative
- * - just wake up any readers at the front of the queue
- */
--struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
-+struct rw_anon_semaphore *rwsem_downgrade_wake(struct rw_anon_semaphore *sem)
- {
- unsigned long flags;
+ EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-- spin_lock_irqsave(&sem->wait_lock, flags);
-+ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+@@ -1757,9 +1767,10 @@ static void print_lat_help_header(struct
+ seq_puts(m, "# | / _----=> need-resched \n");
+ seq_puts(m, "# || / _---=> hardirq/softirq \n");
+ seq_puts(m, "# ||| / _--=> preempt-depth \n");
+- seq_puts(m, "# |||| / delay \n");
+- seq_puts(m, "# cmd pid ||||| time | caller \n");
+- seq_puts(m, "# \\ / ||||| \\ | / \n");
++ seq_puts(m, "# |||| / _--=> migrate-disable\n");
++ seq_puts(m, "# ||||| / delay \n");
++ seq_puts(m, "# cmd pid |||||| time | caller \n");
++ seq_puts(m, "# \\ / ||||| \\ | / \n");
+ }
- /* do nothing if list empty */
- if (!list_empty(&sem->wait_list))
- sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
+ static void print_func_help_header(struct seq_file *m)
+@@ -3067,6 +3078,7 @@ static int tracing_release_pipe(struct i
+ return 0;
+ }
-- spin_unlock_irqrestore(&sem->wait_lock, flags);
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static unsigned int
+ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+ {
+@@ -3088,8 +3100,7 @@ tracing_poll_pipe(struct file *filp, pol
+ }
+ }
- return sem;
+-
+-void default_wait_pipe(struct trace_iterator *iter)
++static void default_wait_pipe(struct trace_iterator *iter)
+ {
+ DEFINE_WAIT(wait);
+
+@@ -3100,6 +3111,20 @@ void default_wait_pipe(struct trace_iter
+
+ finish_wait(&trace_wait, &wait);
}
-Index: linux-2.6/kernel/time/timer_stats.c
-===================================================================
---- linux-2.6.orig/kernel/time/timer_stats.c
-+++ linux-2.6/kernel/time/timer_stats.c
-@@ -81,7 +81,7 @@ struct entry {
- /*
- * Spinlock protecting the tables - not taken during lookup:
- */
--static DEFINE_SPINLOCK(table_lock);
-+static DEFINE_RAW_SPINLOCK(table_lock);
++#else
++static unsigned int
++tracing_poll_pipe(struct file *filp, poll_table *poll_table)
++{
++ struct trace_iterator *iter = filp->private_data;
++
++ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter))
++ return POLLIN | POLLRDNORM;
++ poll_wait_pipe(iter);
++ if (!trace_empty(iter))
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++#endif
/*
- * Per-CPU lookup locks for fast hash lookup:
-@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct
- prev = NULL;
- curr = *head;
-
-- spin_lock(&table_lock);
-+ raw_spin_lock(&table_lock);
- /*
- * Make sure we have not raced with another CPU:
- */
-@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct
- *head = curr;
- }
- out_unlock:
-- spin_unlock(&table_lock);
-+ raw_spin_unlock(&table_lock);
-
- return curr;
- }
-Index: linux-2.6/kernel/latencytop.c
+ * This is a make-shift waitqueue.
+Index: linux-2.6/kernel/trace/trace_irqsoff.c
===================================================================
---- linux-2.6.orig/kernel/latencytop.c
-+++ linux-2.6/kernel/latencytop.c
-@@ -58,7 +58,7 @@
- #include <linux/list.h>
- #include <linux/stacktrace.h>
-
--static DEFINE_SPINLOCK(latency_lock);
-+static DEFINE_RAW_SPINLOCK(latency_lock);
-
- #define MAXLR 128
- static struct latency_record latency_record[MAXLR];
-@@ -72,19 +72,19 @@ void clear_all_latency_tracing(struct ta
- if (!latencytop_enabled)
- return;
+--- linux-2.6.orig/kernel/trace/trace_irqsoff.c
++++ linux-2.6/kernel/trace/trace_irqsoff.c
+@@ -17,13 +17,14 @@
+ #include <linux/fs.h>
-- spin_lock_irqsave(&latency_lock, flags);
-+ raw_spin_lock_irqsave(&latency_lock, flags);
- memset(&p->latency_record, 0, sizeof(p->latency_record));
- p->latency_record_count = 0;
-- spin_unlock_irqrestore(&latency_lock, flags);
-+ raw_spin_unlock_irqrestore(&latency_lock, flags);
- }
+ #include "trace.h"
++#include <trace/events/hist.h>
- static void clear_global_latency_tracing(void)
- {
- unsigned long flags;
+ static struct trace_array *irqsoff_trace __read_mostly;
+ static int tracer_enabled __read_mostly;
-- spin_lock_irqsave(&latency_lock, flags);
-+ raw_spin_lock_irqsave(&latency_lock, flags);
- memset(&latency_record, 0, sizeof(latency_record));
-- spin_unlock_irqrestore(&latency_lock, flags);
-+ raw_spin_unlock_irqrestore(&latency_lock, flags);
- }
+ static DEFINE_PER_CPU(int, tracing_cpu);
- static void __sched
-@@ -190,7 +190,7 @@ __account_scheduler_latency(struct task_
- lat.max = usecs;
- store_stacktrace(tsk, &lat);
+-static DEFINE_SPINLOCK(max_trace_lock);
++static DEFINE_RAW_SPINLOCK(max_trace_lock);
-- spin_lock_irqsave(&latency_lock, flags);
-+ raw_spin_lock_irqsave(&latency_lock, flags);
+ enum {
+ TRACER_IRQS_OFF = (1 << 1),
+@@ -319,7 +320,7 @@ check_critical_timing(struct trace_array
+ if (!report_latency(delta))
+ goto out;
- account_global_scheduler_latency(tsk, &lat);
+- spin_lock_irqsave(&max_trace_lock, flags);
++ raw_spin_lock_irqsave(&max_trace_lock, flags);
-@@ -231,7 +231,7 @@ __account_scheduler_latency(struct task_
- memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
+ /* check if we are still the max latency */
+ if (!report_latency(delta))
+@@ -342,7 +343,7 @@ check_critical_timing(struct trace_array
+ max_sequence++;
out_unlock:
-- spin_unlock_irqrestore(&latency_lock, flags);
-+ raw_spin_unlock_irqrestore(&latency_lock, flags);
- }
-
- static int lstats_show(struct seq_file *m, void *v)
-Index: linux-2.6/drivers/video/console/vgacon.c
-===================================================================
---- linux-2.6.orig/drivers/video/console/vgacon.c
-+++ linux-2.6/drivers/video/console/vgacon.c
-@@ -50,7 +50,7 @@
- #include <video/vga.h>
- #include <asm/io.h>
-
--static DEFINE_SPINLOCK(vga_lock);
-+static DEFINE_RAW_SPINLOCK(vga_lock);
- static int cursor_size_lastfrom;
- static int cursor_size_lastto;
- static u32 vgacon_xres;
-@@ -157,7 +157,7 @@ static inline void write_vga(unsigned ch
- * ddprintk might set the console position from interrupt
- * handlers, thus the write has to be IRQ-atomic.
- */
-- spin_lock_irqsave(&vga_lock, flags);
-+ raw_spin_lock_irqsave(&vga_lock, flags);
+- spin_unlock_irqrestore(&max_trace_lock, flags);
++ raw_spin_unlock_irqrestore(&max_trace_lock, flags);
- #ifndef SLOW_VGA
- v1 = reg + (val & 0xff00);
-@@ -170,7 +170,7 @@ static inline void write_vga(unsigned ch
- outb_p(reg + 1, vga_video_port_reg);
- outb_p(val & 0xff, vga_video_port_val);
- #endif
-- spin_unlock_irqrestore(&vga_lock, flags);
-+ raw_spin_unlock_irqrestore(&vga_lock, flags);
+ out:
+ data->critical_sequence = max_sequence;
+@@ -424,11 +425,13 @@ void start_critical_timings(void)
+ {
+ if (preempt_trace() || irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++ trace_preemptirqsoff_hist(TRACE_START, 1);
}
+ EXPORT_SYMBOL_GPL(start_critical_timings);
- static inline void vga_set_mem_top(struct vc_data *c)
-@@ -664,7 +664,7 @@ static void vgacon_set_cursor_size(int x
- cursor_size_lastfrom = from;
- cursor_size_lastto = to;
-
-- spin_lock_irqsave(&vga_lock, flags);
-+ raw_spin_lock_irqsave(&vga_lock, flags);
- if (vga_video_type >= VIDEO_TYPE_VGAC) {
- outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg);
- curs = inb_p(vga_video_port_val);
-@@ -682,7 +682,7 @@ static void vgacon_set_cursor_size(int x
- outb_p(curs, vga_video_port_val);
- outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg);
- outb_p(cure, vga_video_port_val);
-- spin_unlock_irqrestore(&vga_lock, flags);
-+ raw_spin_unlock_irqrestore(&vga_lock, flags);
+ void stop_critical_timings(void)
+ {
++ trace_preemptirqsoff_hist(TRACE_STOP, 0);
+ if (preempt_trace() || irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-
- static void vgacon_cursor(struct vc_data *c, int mode)
-@@ -757,7 +757,7 @@ static int vgacon_doresize(struct vc_dat
- unsigned int scanlines = height * c->vc_font.height;
- u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
-
-- spin_lock_irqsave(&vga_lock, flags);
-+ raw_spin_lock_irqsave(&vga_lock, flags);
-
- vgacon_xres = width * VGA_FONTWIDTH;
- vgacon_yres = height * c->vc_font.height;
-@@ -808,7 +808,7 @@ static int vgacon_doresize(struct vc_dat
- outb_p(vsync_end, vga_video_port_val);
- }
-
-- spin_unlock_irqrestore(&vga_lock, flags);
-+ raw_spin_unlock_irqrestore(&vga_lock, flags);
- return 0;
+@@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+ #ifdef CONFIG_PROVE_LOCKING
+ void time_hardirqs_on(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(a0, a1);
}
-
-@@ -891,11 +891,11 @@ static void vga_vesa_blank(struct vgasta
+@@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0,
{
- /* save original values of VGA controller registers */
- if (!vga_vesa_blanked) {
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I);
- vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg);
- vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(a0, a1);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
- outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
- vga_state.HorizontalTotal = inb_p(vga_video_port_val);
-@@ -918,7 +918,7 @@ static void vga_vesa_blank(struct vgasta
+ #else /* !CONFIG_PROVE_LOCKING */
+@@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct
+ */
+ void trace_hardirqs_on(void)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off);
- /* assure that video is enabled */
- /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20);
+ void trace_hardirqs_on_caller(unsigned long caller_addr)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(CALLER_ADDR0, caller_addr);
+ }
+@@ -494,6 +502,7 @@ void trace_hardirqs_off_caller(unsigned
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(CALLER_ADDR0, caller_addr);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off_caller);
- /* test for vertical retrace in process.... */
-@@ -954,13 +954,13 @@ static void vga_vesa_blank(struct vgasta
- /* restore both index registers */
- vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
- outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
+@@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
+ #ifdef CONFIG_PREEMPT_TRACER
+ void trace_preempt_on(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(PREEMPT_ON, 0);
+ if (preempt_trace())
+ stop_critical_timing(a0, a1);
}
- static void vga_vesa_unblank(struct vgastate *state)
+ void trace_preempt_off(unsigned long a0, unsigned long a1)
{
- /* restore original values of VGA controller registers */
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO);
-
- outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
-@@ -985,7 +985,7 @@ static void vga_vesa_unblank(struct vgas
- /* restore index/control registers */
- vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
- outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
++ trace_preemptirqsoff_hist(PREEMPT_OFF, 1);
+ if (preempt_trace())
+ start_critical_timing(a0, a1);
}
+Index: linux-2.6/include/linux/ratelimit.h
+===================================================================
+--- linux-2.6.orig/include/linux/ratelimit.h
++++ linux-2.6/include/linux/ratelimit.h
+@@ -8,7 +8,7 @@
+ #define DEFAULT_RATELIMIT_BURST 10
- static void vga_pal_blank(struct vgastate *state)
-@@ -1104,7 +1104,7 @@ static int vgacon_do_font_op(struct vgas
- charmap += 4 * cmapsz;
- #endif
-
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- /* First, the Sequencer */
- vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1);
- /* CPU writes only to map 2 */
-@@ -1120,7 +1120,7 @@ static int vgacon_do_font_op(struct vgas
- vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00);
- /* map start at A000:0000 */
- vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
-
- if (arg) {
- if (set)
-@@ -1147,7 +1147,7 @@ static int vgacon_do_font_op(struct vgas
- }
- }
+ struct ratelimit_state {
+- spinlock_t lock; /* protect the state */
++ raw_spinlock_t lock; /* protect the state */
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- /* First, the sequencer, Synchronous reset */
- vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01);
- /* CPU writes to maps 0 and 1 */
-@@ -1186,7 +1186,7 @@ static int vgacon_do_font_op(struct vgas
- inb_p(video_port_status);
- vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
+ int interval;
+ int burst;
+@@ -20,7 +20,7 @@ struct ratelimit_state {
+ #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
+ \
+ struct ratelimit_state name = { \
+- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .interval = interval_init, \
+ .burst = burst_init, \
}
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
- return 0;
- }
-
-@@ -1211,26 +1211,26 @@ static int vgacon_adjust_height(struct v
- registers; they are write-only on EGA, but it appears that they
- are all don't care bits on EGA, so I guess it doesn't matter. */
-
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
- ovr = inb_p(vga_video_port_val);
- outb_p(0x09, vga_video_port_reg); /* Font size register */
- fsr = inb_p(vga_video_port_val);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
-
- vde = maxscan & 0xff; /* Vertical display end reg */
- ovr = (ovr & 0xbd) + /* Overflow register */
- ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3);
- fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */
-
-- spin_lock_irq(&vga_lock);
-+ raw_spin_lock_irq(&vga_lock);
- outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
- outb_p(ovr, vga_video_port_val);
- outb_p(0x09, vga_video_port_reg); /* Font size */
- outb_p(fsr, vga_video_port_val);
- outb_p(0x12, vga_video_port_reg); /* Vertical display limit */
- outb_p(vde, vga_video_port_val);
-- spin_unlock_irq(&vga_lock);
-+ raw_spin_unlock_irq(&vga_lock);
- vga_video_font_height = fontheight;
-
- for (i = 0; i < MAX_NR_CONSOLES; i++) {
-Index: linux-2.6/arch/x86/oprofile/nmi_int.c
-===================================================================
---- linux-2.6.orig/arch/x86/oprofile/nmi_int.c
-+++ linux-2.6/arch/x86/oprofile/nmi_int.c
-@@ -355,10 +355,10 @@ static void nmi_cpu_setup(void *dummy)
- int cpu = smp_processor_id();
- struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
- nmi_cpu_save_registers(msrs);
-- spin_lock(&oprofilefs_lock);
-+ raw_spin_lock(&oprofilefs_lock);
- model->setup_ctrs(model, msrs);
- nmi_cpu_setup_mux(cpu, msrs);
-- spin_unlock(&oprofilefs_lock);
-+ raw_spin_unlock(&oprofilefs_lock);
- per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
- apic_write(APIC_LVTPC, APIC_DM_NMI);
- }
-Index: linux-2.6/drivers/oprofile/event_buffer.c
-===================================================================
---- linux-2.6.orig/drivers/oprofile/event_buffer.c
-+++ linux-2.6/drivers/oprofile/event_buffer.c
-@@ -82,10 +82,10 @@ int alloc_event_buffer(void)
+@@ -28,7 +28,7 @@ struct ratelimit_state {
+ static inline void ratelimit_state_init(struct ratelimit_state *rs,
+ int interval, int burst)
{
- unsigned long flags;
-
-- spin_lock_irqsave(&oprofilefs_lock, flags);
-+ raw_spin_lock_irqsave(&oprofilefs_lock, flags);
- buffer_size = oprofile_buffer_size;
- buffer_watershed = oprofile_buffer_watershed;
-- spin_unlock_irqrestore(&oprofilefs_lock, flags);
-+ raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
-
- if (buffer_watershed >= buffer_size)
- return -EINVAL;
-Index: linux-2.6/drivers/oprofile/oprofile_perf.c
+- spin_lock_init(&rs->lock);
++ raw_spin_lock_init(&rs->lock);
+ rs->interval = interval;
+ rs->burst = burst;
+ rs->printed = 0;
+Index: linux-2.6/kernel/printk.c
===================================================================
---- linux-2.6.orig/drivers/oprofile/oprofile_perf.c
-+++ linux-2.6/drivers/oprofile/oprofile_perf.c
-@@ -160,9 +160,9 @@ static int oprofile_perf_create_files(st
+--- linux-2.6.orig/kernel/printk.c
++++ linux-2.6/kernel/printk.c
+@@ -44,13 +44,6 @@
- static int oprofile_perf_setup(void)
- {
-- spin_lock(&oprofilefs_lock);
-+ raw_spin_lock(&oprofilefs_lock);
- op_perf_setup();
-- spin_unlock(&oprofilefs_lock);
-+ raw_spin_unlock(&oprofilefs_lock);
- return 0;
- }
+ #include <asm/uaccess.h>
-Index: linux-2.6/drivers/oprofile/oprofilefs.c
-===================================================================
---- linux-2.6.orig/drivers/oprofile/oprofilefs.c
-+++ linux-2.6/drivers/oprofile/oprofilefs.c
-@@ -21,7 +21,7 @@
+-/*
+- * Architectures can override it:
+- */
+-void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
+-{
+-}
+-
+ #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
- #define OPROFILEFS_MAGIC 0x6f70726f
+ /* printk's without a loglevel use this.. */
+@@ -100,7 +93,7 @@ static int console_locked, console_suspe
+ * It is also used in interesting ways to provide interlocking in
+ * console_unlock();.
+ */
+-static DEFINE_SPINLOCK(logbuf_lock);
++static DEFINE_RAW_SPINLOCK(logbuf_lock);
--DEFINE_SPINLOCK(oprofilefs_lock);
-+DEFINE_RAW_SPINLOCK(oprofilefs_lock);
+ #define LOG_BUF_MASK (log_buf_len-1)
+ #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
+@@ -212,7 +205,7 @@ void __init setup_log_buf(int early)
+ return;
+ }
- static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
+- spin_lock_irqsave(&logbuf_lock, flags);
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ log_buf_len = new_log_buf_len;
+ log_buf = new_log_buf;
+ new_log_buf_len = 0;
+@@ -230,7 +223,7 @@ void __init setup_log_buf(int early)
+ log_start -= offset;
+ con_start -= offset;
+ log_end -= offset;
+- spin_unlock_irqrestore(&logbuf_lock, flags);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
+ pr_info("log_buf_len: %d\n", log_buf_len);
+ pr_info("early log buf free: %d(%d%%)\n",
+@@ -363,18 +356,18 @@ int do_syslog(int type, char __user *buf
+ if (error)
+ goto out;
+ i = 0;
+- spin_lock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
+ while (!error && (log_start != log_end) && i < len) {
+ c = LOG_BUF(log_start);
+ log_start++;
+- spin_unlock_irq(&logbuf_lock);
++ raw_spin_unlock_irq(&logbuf_lock);
+ error = __put_user(c,buf);
+ buf++;
+ i++;
+ cond_resched();
+- spin_lock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
+ }
+- spin_unlock_irq(&logbuf_lock);
++ raw_spin_unlock_irq(&logbuf_lock);
+ if (!error)
+ error = i;
+ break;
+@@ -397,7 +390,7 @@ int do_syslog(int type, char __user *buf
+ count = len;
+ if (count > log_buf_len)
+ count = log_buf_len;
+- spin_lock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
+ if (count > logged_chars)
+ count = logged_chars;
+ if (do_clear)
+@@ -414,12 +407,12 @@ int do_syslog(int type, char __user *buf
+ if (j + log_buf_len < log_end)
+ break;
+ c = LOG_BUF(j);
+- spin_unlock_irq(&logbuf_lock);
++ raw_spin_unlock_irq(&logbuf_lock);
+ error = __put_user(c,&buf[count-1-i]);
+ cond_resched();
+- spin_lock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
+ }
+- spin_unlock_irq(&logbuf_lock);
++ raw_spin_unlock_irq(&logbuf_lock);
+ if (error)
+ break;
+ error = i;
+@@ -509,6 +502,7 @@ static void __call_console_drivers(unsig
{
-@@ -76,9 +76,9 @@ int oprofilefs_ulong_from_user(unsigned
- if (copy_from_user(tmpbuf, buf, count))
- return -EFAULT;
+ struct console *con;
-- spin_lock_irqsave(&oprofilefs_lock, flags);
-+ raw_spin_lock_irqsave(&oprofilefs_lock, flags);
- *val = simple_strtoul(tmpbuf, NULL, 0);
-- spin_unlock_irqrestore(&oprofilefs_lock, flags);
-+ raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- return 0;
++ migrate_disable();
+ for_each_console(con) {
+ if (exclusive_console && con != exclusive_console)
+ continue;
+@@ -517,8 +511,62 @@ static void __call_console_drivers(unsig
+ (con->flags & CON_ANYTIME)))
+ con->write(con, &LOG_BUF(start), end - start);
+ }
++ migrate_enable();
++}
++
++#ifdef CONFIG_EARLY_PRINTK
++struct console *early_console;
++
++static void early_vprintk(const char *fmt, va_list ap)
++{
++ char buf[512];
++ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
++ if (early_console)
++ early_console->write(early_console, buf, n);
++}
++
++asmlinkage void early_printk(const char *fmt, ...)
++{
++ va_list ap;
++ va_start(ap, fmt);
++ early_vprintk(fmt, ap);
++ va_end(ap);
++}
++
++/*
++ * This is independent of any log levels - a global
++ * kill switch that turns off all of printk.
++ *
++ * Used by the NMI watchdog if early-printk is enabled.
++ */
++static int __read_mostly printk_killswitch;
++
++static int __init force_early_printk_setup(char *str)
++{
++ printk_killswitch = 1;
++ return 0;
++}
++early_param("force_early_printk", force_early_printk_setup);
++
++void printk_kill(void)
++{
++ printk_killswitch = 1;
}
-Index: linux-2.6/include/linux/oprofile.h
-===================================================================
---- linux-2.6.orig/include/linux/oprofile.h
-+++ linux-2.6/include/linux/oprofile.h
-@@ -166,7 +166,7 @@ ssize_t oprofilefs_ulong_to_user(unsigne
- int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
++static int forced_early_printk(const char *fmt, va_list ap)
++{
++ if (!printk_killswitch)
++ return 0;
++ early_vprintk(fmt, ap);
++ return 1;
++}
++#else
++static inline int forced_early_printk(const char *fmt, va_list ap)
++{
++ return 0;
++}
++#endif
++
+ static int __read_mostly ignore_loglevel;
- /** lock for read/write safety */
--extern spinlock_t oprofilefs_lock;
-+extern raw_spinlock_t oprofilefs_lock;
+ static int __init ignore_loglevel_setup(char *str)
+@@ -687,7 +735,7 @@ static void zap_locks(void)
+ oops_timestamp = jiffies;
- /**
- * Add the contents of a circular buffer to the event buffer.
-Index: linux-2.6/drivers/acpi/processor_idle.c
-===================================================================
---- linux-2.6.orig/drivers/acpi/processor_idle.c
-+++ linux-2.6/drivers/acpi/processor_idle.c
-@@ -852,7 +852,7 @@ static int acpi_idle_enter_simple(struct
+ /* If a crash is occurring, make sure we can't deadlock */
+- spin_lock_init(&logbuf_lock);
++ raw_spin_lock_init(&logbuf_lock);
+ /* And make sure that we print immediately */
+ sema_init(&console_sem, 1);
}
-
- static int c3_cpu_count;
--static DEFINE_SPINLOCK(c3_lock);
-+static DEFINE_RAW_SPINLOCK(c3_lock);
-
- /**
- * acpi_idle_enter_bm - enters C3 with proper BM handling
-@@ -930,12 +930,12 @@ static int acpi_idle_enter_bm(struct cpu
- * without doing anything.
- */
- if (pr->flags.bm_check && pr->flags.bm_control) {
-- spin_lock(&c3_lock);
-+ raw_spin_lock(&c3_lock);
- c3_cpu_count++;
- /* Disable bus master arbitration when all CPUs are in C3 */
- if (c3_cpu_count == num_online_cpus())
- acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
-- spin_unlock(&c3_lock);
-+ raw_spin_unlock(&c3_lock);
- } else if (!pr->flags.bm_check) {
- ACPI_FLUSH_CPU_CACHE();
- }
-@@ -944,10 +944,10 @@ static int acpi_idle_enter_bm(struct cpu
-
- /* Re-enable bus master arbitration */
- if (pr->flags.bm_check && pr->flags.bm_control) {
-- spin_lock(&c3_lock);
-+ raw_spin_lock(&c3_lock);
- acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
- c3_cpu_count--;
-- spin_unlock(&c3_lock);
-+ raw_spin_unlock(&c3_lock);
- }
- kt2 = ktime_get_real();
- idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
-Index: linux-2.6/arch/x86/kernel/cpu/mcheck/mce_intel.c
-===================================================================
---- linux-2.6.orig/arch/x86/kernel/cpu/mcheck/mce_intel.c
-+++ linux-2.6/arch/x86/kernel/cpu/mcheck/mce_intel.c
-@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_b
- * cmci_discover_lock protects against parallel discovery attempts
- * which could race against each other.
+@@ -779,12 +827,18 @@ static inline int can_use_console(unsign
+ * interrupts disabled. It should return with 'lockbuf_lock'
+ * released but interrupts still disabled.
*/
--static DEFINE_SPINLOCK(cmci_discover_lock);
-+static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
-
- #define CMCI_THRESHOLD 1
-
-@@ -85,7 +85,7 @@ static void cmci_discover(int banks, int
- int hdr = 0;
- int i;
+-static int console_trylock_for_printk(unsigned int cpu)
++static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
+ __releases(&logbuf_lock)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
++ !preempt_count();
++#else
++ int lock = 1;
++#endif
+ int retval = 0;
-- spin_lock_irqsave(&cmci_discover_lock, flags);
-+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- for (i = 0; i < banks; i++) {
- u64 val;
+- if (console_trylock()) {
++ if (lock && console_trylock()) {
+ retval = 1;
-@@ -116,7 +116,7 @@ static void cmci_discover(int banks, int
- WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
+ /*
+@@ -800,7 +854,7 @@ static int console_trylock_for_printk(un
}
}
-- spin_unlock_irqrestore(&cmci_discover_lock, flags);
-+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
- if (hdr)
- printk(KERN_CONT "\n");
- }
-@@ -150,7 +150,7 @@ void cmci_clear(void)
-
- if (!cmci_supported(&banks))
- return;
-- spin_lock_irqsave(&cmci_discover_lock, flags);
-+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- for (i = 0; i < banks; i++) {
- if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
- continue;
-@@ -160,7 +160,7 @@ void cmci_clear(void)
- wrmsrl(MSR_IA32_MCx_CTL2(i), val);
- __clear_bit(i, __get_cpu_var(mce_banks_owned));
- }
-- spin_unlock_irqrestore(&cmci_discover_lock, flags);
-+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ printk_cpu = UINT_MAX;
+- spin_unlock(&logbuf_lock);
++ raw_spin_unlock(&logbuf_lock);
+ return retval;
}
+ static const char recursion_bug_msg [] =
+@@ -833,6 +887,13 @@ asmlinkage int vprintk(const char *fmt,
+ size_t plen;
+ char special;
- /*
-Index: linux-2.6/arch/powerpc/sysdev/uic.c
-===================================================================
---- linux-2.6.orig/arch/powerpc/sysdev/uic.c
-+++ linux-2.6/arch/powerpc/sysdev/uic.c
-@@ -47,7 +47,7 @@ struct uic {
- int index;
- int dcrbase;
-
-- spinlock_t lock;
-+ raw_spinlock_t lock;
++ /*
++ * Fall back to early_printk if a debugging subsystem has
++ * killed printk output
++ */
++ if (unlikely(forced_early_printk(fmt, args)))
++ return 1;
++
+ boot_delay_msec();
+ printk_delay();
- /* The remapper for this UIC */
- struct irq_host *irqhost;
-@@ -61,14 +61,14 @@ static void uic_unmask_irq(struct irq_da
- u32 er, sr;
+@@ -860,7 +921,7 @@ asmlinkage int vprintk(const char *fmt,
+ }
- sr = 1 << (31-src);
-- spin_lock_irqsave(&uic->lock, flags);
-+ raw_spin_lock_irqsave(&uic->lock, flags);
- /* ack level-triggered interrupts here */
- if (irqd_is_level_type(d))
- mtdcr(uic->dcrbase + UIC_SR, sr);
- er = mfdcr(uic->dcrbase + UIC_ER);
- er |= sr;
- mtdcr(uic->dcrbase + UIC_ER, er);
-- spin_unlock_irqrestore(&uic->lock, flags);
-+ raw_spin_unlock_irqrestore(&uic->lock, flags);
- }
+ lockdep_off();
+- spin_lock(&logbuf_lock);
++ raw_spin_lock(&logbuf_lock);
+ printk_cpu = this_cpu;
- static void uic_mask_irq(struct irq_data *d)
-@@ -78,11 +78,11 @@ static void uic_mask_irq(struct irq_data
- unsigned long flags;
- u32 er;
+ if (recursion_bug) {
+@@ -953,8 +1014,15 @@ asmlinkage int vprintk(const char *fmt,
+ * will release 'logbuf_lock' regardless of whether it
+ * actually gets the semaphore or not.
+ */
+- if (console_trylock_for_printk(this_cpu))
++ if (console_trylock_for_printk(this_cpu, flags)) {
++#ifndef CONFIG_PREEMPT_RT_FULL
++ console_unlock();
++#else
++ raw_local_irq_restore(flags);
+ console_unlock();
++ raw_local_irq_save(flags);
++#endif
++ }
-- spin_lock_irqsave(&uic->lock, flags);
-+ raw_spin_lock_irqsave(&uic->lock, flags);
- er = mfdcr(uic->dcrbase + UIC_ER);
- er &= ~(1 << (31 - src));
- mtdcr(uic->dcrbase + UIC_ER, er);
-- spin_unlock_irqrestore(&uic->lock, flags);
-+ raw_spin_unlock_irqrestore(&uic->lock, flags);
- }
+ lockdep_on();
+ out_restore_irqs:
+@@ -1252,18 +1320,23 @@ void console_unlock(void)
+ console_may_schedule = 0;
- static void uic_ack_irq(struct irq_data *d)
-@@ -91,9 +91,9 @@ static void uic_ack_irq(struct irq_data
- unsigned int src = irqd_to_hwirq(d);
- unsigned long flags;
-
-- spin_lock_irqsave(&uic->lock, flags);
-+ raw_spin_lock_irqsave(&uic->lock, flags);
- mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
-- spin_unlock_irqrestore(&uic->lock, flags);
-+ raw_spin_unlock_irqrestore(&uic->lock, flags);
- }
+ for ( ; ; ) {
+- spin_lock_irqsave(&logbuf_lock, flags);
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ wake_klogd |= log_start - log_end;
+ if (con_start == log_end)
+ break; /* Nothing to print */
+ _con_start = con_start;
+ _log_end = log_end;
+ con_start = log_end; /* Flush */
+- spin_unlock(&logbuf_lock);
++#ifndef CONFIG_PREEMPT_RT_FULL
++ raw_spin_unlock(&logbuf_lock);
+ stop_critical_timings(); /* don't trace print latency */
+ call_console_drivers(_con_start, _log_end);
+ start_critical_timings();
+ local_irq_restore(flags);
++#else
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ call_console_drivers(_con_start, _log_end);
++#endif
+ }
+ console_locked = 0;
- static void uic_mask_ack_irq(struct irq_data *d)
-@@ -104,7 +104,7 @@ static void uic_mask_ack_irq(struct irq_
- u32 er, sr;
+@@ -1272,7 +1345,7 @@ void console_unlock(void)
+ exclusive_console = NULL;
- sr = 1 << (31-src);
-- spin_lock_irqsave(&uic->lock, flags);
-+ raw_spin_lock_irqsave(&uic->lock, flags);
- er = mfdcr(uic->dcrbase + UIC_ER);
- er &= ~sr;
- mtdcr(uic->dcrbase + UIC_ER, er);
-@@ -118,7 +118,7 @@ static void uic_mask_ack_irq(struct irq_
- */
- if (!irqd_is_level_type(d))
- mtdcr(uic->dcrbase + UIC_SR, sr);
-- spin_unlock_irqrestore(&uic->lock, flags);
-+ raw_spin_unlock_irqrestore(&uic->lock, flags);
+ up(&console_sem);
+- spin_unlock_irqrestore(&logbuf_lock, flags);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ if (wake_klogd)
+ wake_up_klogd();
}
+@@ -1502,9 +1575,9 @@ void register_console(struct console *ne
+ * console_unlock(); will print out the buffered messages
+ * for us.
+ */
+- spin_lock_irqsave(&logbuf_lock, flags);
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ con_start = log_start;
+- spin_unlock_irqrestore(&logbuf_lock, flags);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ /*
+ * We're about to replay the log buffer. Only do this to the
+ * just-registered console to avoid excessive message spam to
+@@ -1711,10 +1784,10 @@ void kmsg_dump(enum kmsg_dump_reason rea
+ /* Theoretically, the log could move on after we do this, but
+ there's not a lot we can do about that. The new messages
+ will overwrite the start of what we dump. */
+- spin_lock_irqsave(&logbuf_lock, flags);
++ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ end = log_end & LOG_BUF_MASK;
+ chars = logged_chars;
+- spin_unlock_irqrestore(&logbuf_lock, flags);
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
-@@ -152,7 +152,7 @@ static int uic_set_irq_type(struct irq_d
-
- mask = ~(1 << (31 - src));
-
-- spin_lock_irqsave(&uic->lock, flags);
-+ raw_spin_lock_irqsave(&uic->lock, flags);
- tr = mfdcr(uic->dcrbase + UIC_TR);
- pr = mfdcr(uic->dcrbase + UIC_PR);
- tr = (tr & mask) | (trigger << (31-src));
-@@ -161,7 +161,7 @@ static int uic_set_irq_type(struct irq_d
- mtdcr(uic->dcrbase + UIC_PR, pr);
- mtdcr(uic->dcrbase + UIC_TR, tr);
+ if (chars > end) {
+ s1 = log_buf + log_buf_len - chars + end;
+Index: linux-2.6/lib/ratelimit.c
+===================================================================
+--- linux-2.6.orig/lib/ratelimit.c
++++ linux-2.6/lib/ratelimit.c
+@@ -39,7 +39,7 @@ int ___ratelimit(struct ratelimit_state
+ * in addition to the one that will be printed by
+ * the entity that is holding the lock already:
+ */
+- if (!spin_trylock_irqsave(&rs->lock, flags))
++ if (!raw_spin_trylock_irqsave(&rs->lock, flags))
+ return 0;
-- spin_unlock_irqrestore(&uic->lock, flags);
-+ raw_spin_unlock_irqrestore(&uic->lock, flags);
+ if (!rs->begin)
+@@ -60,7 +60,7 @@ int ___ratelimit(struct ratelimit_state
+ rs->missed++;
+ ret = 0;
+ }
+- spin_unlock_irqrestore(&rs->lock, flags);
++ raw_spin_unlock_irqrestore(&rs->lock, flags);
- return 0;
+ return ret;
}
-@@ -254,7 +254,7 @@ static struct uic * __init uic_init_one(
- if (! uic)
- return NULL; /* FIXME: panic? */
-
-- spin_lock_init(&uic->lock);
-+ raw_spin_lock_init(&uic->lock);
- indexp = of_get_property(node, "cell-index", &len);
- if (!indexp || (len != sizeof(u32))) {
- printk(KERN_ERR "uic: Device node %s has missing or invalid "
-Index: linux-2.6/drivers/dca/dca-core.c
+Index: linux-2.6/include/linux/init_task.h
===================================================================
---- linux-2.6.orig/drivers/dca/dca-core.c
-+++ linux-2.6/drivers/dca/dca-core.c
-@@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION);
- MODULE_LICENSE("GPL");
- MODULE_AUTHOR("Intel Corporation");
+--- linux-2.6.orig/include/linux/init_task.h
++++ linux-2.6/include/linux/init_task.h
+@@ -42,7 +42,7 @@ extern struct fs_struct init_fs;
+ .cputimer = { \
+ .cputime = INIT_CPUTIME, \
+ .running = 0, \
+- .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ }, \
+ .cred_guard_mutex = \
+ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
+@@ -126,6 +126,12 @@ extern struct cred init_cred;
+ # define INIT_PERF_EVENTS(tsk)
+ #endif
--static DEFINE_SPINLOCK(dca_lock);
-+static DEFINE_RAW_SPINLOCK(dca_lock);
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define INIT_TIMER_LIST .posix_timer_list = NULL,
++#else
++# define INIT_TIMER_LIST
++#endif
++
+ /*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+@@ -179,6 +185,7 @@ extern struct cred init_cred;
+ .fs_excl = ATOMIC_INIT(0), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
++ INIT_TIMER_LIST \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+Index: linux-2.6/kernel/posix-cpu-timers.c
+===================================================================
+--- linux-2.6.orig/kernel/posix-cpu-timers.c
++++ linux-2.6/kernel/posix-cpu-timers.c
+@@ -274,7 +274,7 @@ void thread_group_cputimer(struct task_s
+ struct task_cputime sum;
+ unsigned long flags;
- static LIST_HEAD(dca_domains);
+- spin_lock_irqsave(&cputimer->lock, flags);
++ raw_spin_lock_irqsave(&cputimer->lock, flags);
+ if (!cputimer->running) {
+ cputimer->running = 1;
+ /*
+@@ -287,7 +287,7 @@ void thread_group_cputimer(struct task_s
+ update_gt_cputime(&cputimer->cputime, &sum);
+ }
+ *times = cputimer->cputime;
+- spin_unlock_irqrestore(&cputimer->lock, flags);
++ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
+ }
-@@ -101,10 +101,10 @@ static void unregister_dca_providers(voi
+ /*
+@@ -699,7 +699,7 @@ static int posix_cpu_timer_set(struct k_
+ /*
+ * Disarm any old timer after extracting its expiry time.
+ */
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
- INIT_LIST_HEAD(&unregistered_providers);
+ ret = 0;
+ old_incr = timer->it.cpu.incr;
+@@ -997,9 +997,9 @@ static void stop_process_timers(struct s
+ struct thread_group_cputimer *cputimer = &sig->cputimer;
+ unsigned long flags;
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
+- spin_lock_irqsave(&cputimer->lock, flags);
++ raw_spin_lock_irqsave(&cputimer->lock, flags);
+ cputimer->running = 0;
+- spin_unlock_irqrestore(&cputimer->lock, flags);
++ raw_spin_unlock_irqrestore(&cputimer->lock, flags);
+ }
- if (list_empty(&dca_domains)) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return;
- }
+ static u32 onecputick;
+@@ -1221,7 +1221,7 @@ void posix_cpu_timer_schedule(struct k_i
+ /*
+ * Now re-arm for the new expiry time.
+ */
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ arm_timer(timer);
+ spin_unlock(&p->sighand->siglock);
-@@ -116,7 +116,7 @@ static void unregister_dca_providers(voi
+@@ -1288,10 +1288,11 @@ static inline int fastpath_timer_check(s
+ sig = tsk->signal;
+ if (sig->cputimer.running) {
+ struct task_cputime group_sample;
++ unsigned long flags;
- dca_free_domain(domain);
+- spin_lock(&sig->cputimer.lock);
++ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
+ group_sample = sig->cputimer.cputime;
+- spin_unlock(&sig->cputimer.lock);
++ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+ return 1;
+@@ -1305,13 +1306,13 @@ static inline int fastpath_timer_check(s
+ * already updated our counts. We need to check if any timers fire now.
+ * Interrupts are disabled.
+ */
+-void run_posix_cpu_timers(struct task_struct *tsk)
++static void __run_posix_cpu_timers(struct task_struct *tsk)
+ {
+ LIST_HEAD(firing);
+ struct k_itimer *timer, *next;
+ unsigned long flags;
- list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
- dca_sysfs_remove_provider(dca);
-@@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain
- domain = dca_find_domain(rc);
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
- if (!domain) {
-- if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
-+ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
- dca_providers_blocked = 1;
-- } else {
-- domain = dca_allocate_domain(rc);
-- if (domain)
-- list_add(&domain->node, &dca_domains);
-- }
+ /*
+ * The fast path checks that there are no expired thread or thread
+@@ -1369,6 +1370,190 @@ void run_posix_cpu_timers(struct task_st
}
+ }
- return domain;
-@@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev
- if (!dev)
- return -EFAULT;
++#ifdef CONFIG_PREEMPT_RT_BASE
++#include <linux/kthread.h>
++#include <linux/cpu.h>
++DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
++
++static int posix_cpu_timers_thread(void *data)
++{
++ int cpu = (long)data;
++
++ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
++
++ while (!kthread_should_stop()) {
++ struct task_struct *tsk = NULL;
++ struct task_struct *next = NULL;
++
++ if (cpu_is_offline(cpu))
++ goto wait_to_die;
++
++ /* grab task list */
++ raw_local_irq_disable();
++ tsk = per_cpu(posix_timer_tasklist, cpu);
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++ raw_local_irq_enable();
++
++ /* its possible the list is empty, just return */
++ if (!tsk) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ __set_current_state(TASK_RUNNING);
++ continue;
++ }
++
++ /* Process task list */
++ while (1) {
++ /* save next */
++ next = tsk->posix_timer_list;
++
++ /* run the task timers, clear its ptr and
++ * unreference it
++ */
++ __run_posix_cpu_timers(tsk);
++ tsk->posix_timer_list = NULL;
++ put_task_struct(tsk);
++
++ /* check if this is the last on the list */
++ if (next == tsk)
++ break;
++ tsk = next;
++ }
++ }
++ return 0;
++
++wait_to_die:
++ /* Wait for kthread_stop */
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++}
++
++static inline int __fastpath_timer_check(struct task_struct *tsk)
++{
++ /* tsk == current, ensure it is safe to use ->signal/sighand */
++ if (unlikely(tsk->exit_state))
++ return 0;
++
++ if (!task_cputime_zero(&tsk->cputime_expires))
++ return 1;
++
++ if (!task_cputime_zero(&tsk->signal->cputime_expires))
++ return 1;
++
++ return 0;
++}
++
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ unsigned long cpu = smp_processor_id();
++ struct task_struct *tasklist;
++
++ BUG_ON(!irqs_disabled());
++ if(!per_cpu(posix_timer_task, cpu))
++ return;
++ /* get per-cpu references */
++ tasklist = per_cpu(posix_timer_tasklist, cpu);
++
++ /* check to see if we're already queued */
++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
++ get_task_struct(tsk);
++ if (tasklist) {
++ tsk->posix_timer_list = tasklist;
++ } else {
++ /*
++ * The list is terminated by a self-pointing
++ * task_struct
++ */
++ tsk->posix_timer_list = tsk;
++ }
++ per_cpu(posix_timer_tasklist, cpu) = tsk;
++
++ wake_up_process(per_cpu(posix_timer_task, cpu));
++ }
++}
++
++/*
++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int posix_cpu_thread_call(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ int cpu = (long)hcpu;
++ struct task_struct *p;
++ struct sched_param param;
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ p = kthread_create(posix_cpu_timers_thread, hcpu,
++ "posixcputmr/%d",cpu);
++ if (IS_ERR(p))
++ return NOTIFY_BAD;
++ p->flags |= PF_NOFREEZE;
++ kthread_bind(p, cpu);
++ /* Must be high prio to avoid getting starved */
++ param.sched_priority = MAX_RT_PRIO-1;
++ sched_setscheduler(p, SCHED_FIFO, ¶m);
++ per_cpu(posix_timer_task,cpu) = p;
++ break;
++ case CPU_ONLINE:
++ /* Strictly unneccessary, as first user will wake it. */
++ wake_up_process(per_cpu(posix_timer_task,cpu));
++ break;
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_UP_CANCELED:
++ /* Unbind it from offline cpu so it can run. Fall thru. */
++ kthread_bind(per_cpu(posix_timer_task,cpu),
++ any_online_cpu(cpu_online_map));
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++ case CPU_DEAD:
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++#endif
++ }
++ return NOTIFY_OK;
++}
++
++/* Register at highest priority so that task migration (migrate_all_tasks)
++ * happens before everything else.
++ */
++static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
++ .notifier_call = posix_cpu_thread_call,
++ .priority = 10
++};
++
++static int __init posix_cpu_thread_init(void)
++{
++ void *hcpu = (void *)(long)smp_processor_id();
++ /* Start one for boot CPU. */
++ unsigned long cpu;
++
++ /* init the per-cpu posix_timer_tasklets */
++ for_each_cpu_mask(cpu, cpu_possible_map)
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
++ register_cpu_notifier(&posix_cpu_thread_notifier);
++ return 0;
++}
++early_initcall(posix_cpu_thread_init);
++#else /* CONFIG_PREEMPT_RT_BASE */
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ __run_posix_cpu_timers(tsk);
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ /*
+ * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
+ * The tsk->sighand->siglock must be held by the caller.
+Index: linux-2.6/kernel/sched_stats.h
+===================================================================
+--- linux-2.6.orig/kernel/sched_stats.h
++++ linux-2.6/kernel/sched_stats.h
+@@ -282,10 +282,10 @@ static inline void account_group_user_ti
+ if (!cputimer->running)
+ return;
+
+- spin_lock(&cputimer->lock);
++ raw_spin_lock(&cputimer->lock);
+ cputimer->cputime.utime =
+ cputime_add(cputimer->cputime.utime, cputime);
+- spin_unlock(&cputimer->lock);
++ raw_spin_unlock(&cputimer->lock);
+ }
+
+ /**
+@@ -306,10 +306,10 @@ static inline void account_group_system_
+ if (!cputimer->running)
+ return;
+
+- spin_lock(&cputimer->lock);
++ raw_spin_lock(&cputimer->lock);
+ cputimer->cputime.stime =
+ cputime_add(cputimer->cputime.stime, cputime);
+- spin_unlock(&cputimer->lock);
++ raw_spin_unlock(&cputimer->lock);
+ }
+
+ /**
+@@ -330,7 +330,7 @@ static inline void account_group_exec_ru
+ if (!cputimer->running)
+ return;
+
+- spin_lock(&cputimer->lock);
++ raw_spin_lock(&cputimer->lock);
+ cputimer->cputime.sum_exec_runtime += ns;
+- spin_unlock(&cputimer->lock);
++ raw_spin_unlock(&cputimer->lock);
+ }
+Index: linux-2.6/include/linux/semaphore.h
+===================================================================
+--- linux-2.6.orig/include/linux/semaphore.h
++++ linux-2.6/include/linux/semaphore.h
+@@ -14,14 +14,14 @@
+
+ /* Please don't access any members of this structure directly */
+ struct semaphore {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ unsigned int count;
+ struct list_head wait_list;
+ };
+
+ #define __SEMAPHORE_INITIALIZER(name, n) \
+ { \
+- .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
++ .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
+ .count = n, \
+ .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ }
+Index: linux-2.6/kernel/semaphore.c
+===================================================================
+--- linux-2.6.orig/kernel/semaphore.c
++++ linux-2.6/kernel/semaphore.c
+@@ -54,12 +54,12 @@ void down(struct semaphore *sem)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count > 0))
+ sem->count--;
+ else
+ __down(sem);
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
+ }
+ EXPORT_SYMBOL(down);
+
+@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore
+ unsigned long flags;
+ int result = 0;
+
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count > 0))
+ sem->count--;
+ else
+ result = __down_interruptible(sem);
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
+
+ return result;
+ }
+@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem)
+ unsigned long flags;
+ int result = 0;
+
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count > 0))
+ sem->count--;
+ else
+ result = __down_killable(sem);
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
+
+ return result;
+ }
+@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem)
+ unsigned long flags;
+ int count;
+
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ count = sem->count - 1;
+ if (likely(count >= 0))
+ sem->count = count;
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
+
+ return (count < 0);
+ }
+@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem,
+ unsigned long flags;
+ int result = 0;
+
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ if (likely(sem->count > 0))
+ sem->count--;
+ else
+ result = __down_timeout(sem, jiffies);
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
+
+ return result;
+ }
+@@ -179,12 +179,12 @@ void up(struct semaphore *sem)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&sem->lock, flags);
++ raw_spin_lock_irqsave(&sem->lock, flags);
+ if (likely(list_empty(&sem->wait_list)))
+ sem->count++;
+ else
+ __up(sem);
+- spin_unlock_irqrestore(&sem->lock, flags);
++ raw_spin_unlock_irqrestore(&sem->lock, flags);
+ }
+ EXPORT_SYMBOL(up);
+
+@@ -217,9 +217,9 @@ static inline int __sched __down_common(
+ if (timeout <= 0)
+ goto timed_out;
+ __set_task_state(task, state);
+- spin_unlock_irq(&sem->lock);
++ raw_spin_unlock_irq(&sem->lock);
+ timeout = schedule_timeout(timeout);
+- spin_lock_irq(&sem->lock);
++ raw_spin_lock_irq(&sem->lock);
+ if (waiter.up)
+ return 0;
+ }
+Index: linux-2.6/include/linux/rwsem-spinlock.h
+===================================================================
+--- linux-2.6.orig/include/linux/rwsem-spinlock.h
++++ linux-2.6/include/linux/rwsem-spinlock.h
+@@ -20,26 +20,42 @@
+ * - if activity is -1 then there is one active writer
+ * - if wait_list is not empty, then there are processes waiting for the semaphore
+ */
++struct rw_anon_semaphore {
++ __s32 activity;
++ raw_spinlock_t wait_lock;
++ struct list_head wait_list;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#ifndef CONFIG_PREEMPT_RT_FULL
++/*
++ * Non preempt-rt implementation of rw_semaphore. Same as above, but
++ * restricted vs. ownership. i.e. ownerless locked state and non owner
++ * release not allowed.
++ */
+ struct rw_semaphore {
+ __s32 activity;
+- spinlock_t wait_lock;
++ raw_spinlock_t wait_lock;
+ struct list_head wait_list;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ #endif
+ };
++#endif /* PREEMPT_RT_FULL */
+
+ #define RWSEM_UNLOCKED_VALUE 0x00000000
+
+-extern void __down_read(struct rw_semaphore *sem);
+-extern int __down_read_trylock(struct rw_semaphore *sem);
+-extern void __down_write(struct rw_semaphore *sem);
+-extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
+-extern int __down_write_trylock(struct rw_semaphore *sem);
+-extern void __up_read(struct rw_semaphore *sem);
+-extern void __up_write(struct rw_semaphore *sem);
+-extern void __downgrade_write(struct rw_semaphore *sem);
+-extern int rwsem_is_locked(struct rw_semaphore *sem);
++extern void __down_read(struct rw_anon_semaphore *sem);
++extern int __down_read_trylock(struct rw_anon_semaphore *sem);
++extern void __down_write(struct rw_anon_semaphore *sem);
++extern void __down_write_nested(struct rw_anon_semaphore *sem, int subclass);
++extern int __down_write_trylock(struct rw_anon_semaphore *sem);
++extern void __up_read(struct rw_anon_semaphore *sem);
++extern void __up_write(struct rw_anon_semaphore *sem);
++extern void __downgrade_write(struct rw_anon_semaphore *sem);
++extern int anon_rwsem_is_locked(struct rw_anon_semaphore *sem);
+
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_RWSEM_SPINLOCK_H */
+Index: linux-2.6/include/linux/rwsem.h
+===================================================================
+--- linux-2.6.orig/include/linux/rwsem.h
++++ linux-2.6/include/linux/rwsem.h
+@@ -17,37 +17,50 @@
+ #include <asm/system.h>
+ #include <asm/atomic.h>
+
++struct rw_anon_semaphore;
+ struct rw_semaphore;
+
+ #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+ #include <linux/rwsem-spinlock.h> /* use a generic implementation */
+-#else
++#else /* RWSEM_GENERIC_SPINLOCK */
++
+ /* All arch specific implementations share the same struct */
+-struct rw_semaphore {
++struct rw_anon_semaphore {
+ long count;
+- spinlock_t wait_lock;
++ raw_spinlock_t wait_lock;
+ struct list_head wait_list;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ #endif
+ };
+
+-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
++extern struct rw_anon_semaphore *rwsem_down_read_failed(struct rw_anon_semaphore *sem);
++extern struct rw_anon_semaphore *rwsem_down_write_failed(struct rw_anon_semaphore *sem);
++extern struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *);
++extern struct rw_anon_semaphore *rwsem_downgrade_wake(struct rw_anon_semaphore *sem);
+
+ /* Include the arch specific part */
+ #include <asm/rwsem.h>
+
+ /* In all implementations count != 0 means locked */
+-static inline int rwsem_is_locked(struct rw_semaphore *sem)
++static inline int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
+ {
+ return sem->count != 0;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++struct rw_semaphore {
++ long count;
++ raw_spinlock_t wait_lock;
++ struct list_head wait_list;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
+ #endif
+
++#endif /* !RWSEM_GENERIC_SPINLOCK */
++
+ /* Common initializer macros and functions */
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+@@ -56,57 +69,59 @@ static inline int rwsem_is_locked(struct
+ # define __RWSEM_DEP_MAP_INIT(lockname)
+ #endif
+
+-#define __RWSEM_INITIALIZER(name) \
+- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \
+- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
++#define __RWSEM_ANON_INITIALIZER(name) \
++ { RWSEM_UNLOCKED_VALUE, \
++ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ LIST_HEAD_INIT((name).wait_list) \
++ __RWSEM_DEP_MAP_INIT(name) }
+
+-#define DECLARE_RWSEM(name) \
+- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
++#define DECLARE_ANON_RWSEM(name) \
++ struct rw_anon_semaphore name = __RWSEM_INITIALIZER(name)
+
+-extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+- struct lock_class_key *key);
++extern void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
++ struct lock_class_key *key);
+
+-#define init_rwsem(sem) \
++#define init_anon_rwsem(sem) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+- __init_rwsem((sem), #sem, &__key); \
++ __init_anon_rwsem((sem), #sem, &__key); \
+ } while (0)
+
+ /*
+ * lock for reading
+ */
+-extern void down_read(struct rw_semaphore *sem);
++extern void anon_down_read(struct rw_anon_semaphore *sem);
+
+ /*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+-extern int down_read_trylock(struct rw_semaphore *sem);
++extern int anon_down_read_trylock(struct rw_anon_semaphore *sem);
+
+ /*
+ * lock for writing
+ */
+-extern void down_write(struct rw_semaphore *sem);
++extern void anon_down_write(struct rw_anon_semaphore *sem);
+
+ /*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+-extern int down_write_trylock(struct rw_semaphore *sem);
++extern int anon_down_write_trylock(struct rw_anon_semaphore *sem);
+
+ /*
+ * release a read lock
+ */
+-extern void up_read(struct rw_semaphore *sem);
++extern void anon_up_read(struct rw_anon_semaphore *sem);
+
+ /*
+ * release a write lock
+ */
+-extern void up_write(struct rw_semaphore *sem);
++extern void anon_up_write(struct rw_anon_semaphore *sem);
+
+ /*
+ * downgrade write lock to read lock
+ */
+-extern void downgrade_write(struct rw_semaphore *sem);
++extern void anon_downgrade_write(struct rw_anon_semaphore *sem);
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+@@ -122,21 +137,101 @@ extern void downgrade_write(struct rw_se
+ * lockdep_set_class() at lock initialization time.
+ * See Documentation/lockdep-design.txt for more details.)
+ */
+-extern void down_read_nested(struct rw_semaphore *sem, int subclass);
+-extern void down_write_nested(struct rw_semaphore *sem, int subclass);
++extern void anon_down_read_nested(struct rw_anon_semaphore *sem, int subclass);
++extern void anon_down_write_nested(struct rw_anon_semaphore *sem, int subclass);
+ /*
+ * Take/release a lock when not the owner will release it.
+ *
+ * [ This API should be avoided as much as possible - the
+ * proper abstraction for this case is completions. ]
+ */
+-extern void down_read_non_owner(struct rw_semaphore *sem);
+-extern void up_read_non_owner(struct rw_semaphore *sem);
++extern void anon_down_read_non_owner(struct rw_anon_semaphore *sem);
++extern void anon_up_read_non_owner(struct rw_anon_semaphore *sem);
+ #else
+-# define down_read_nested(sem, subclass) down_read(sem)
+-# define down_write_nested(sem, subclass) down_write(sem)
+-# define down_read_non_owner(sem) down_read(sem)
+-# define up_read_non_owner(sem) up_read(sem)
++# define anon_down_read_nested(sem, subclass) anon_down_read(sem)
++# define anon_down_write_nested(sem, subclass) anon_down_write(sem)
++# define anon_down_read_non_owner(sem) anon_down_read(sem)
++# define anon_up_read_non_owner(sem) anon_up_read(sem)
+ #endif
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
++#ifdef CONFIG_PREEMPT_RT_FULL
++#include <linux/rwsem_rt.h>
++#else /* PREEMPT_RT_FULL */
++/*
++ * Non preempt-rt implementations
++ */
++#define __RWSEM_INITIALIZER(name) \
++ { RWSEM_UNLOCKED_VALUE, \
++ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ LIST_HEAD_INIT((name).wait_list) \
++ __RWSEM_DEP_MAP_INIT(name) }
++
++#define DECLARE_RWSEM(name) \
++ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
++
++static inline void __init_rwsem(struct rw_semaphore *sem, const char *name,
++ struct lock_class_key *key)
++{
++ __init_anon_rwsem((struct rw_anon_semaphore *)sem, name, key);
++}
++
++#define init_rwsem(sem) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ __init_rwsem((sem), #sem, &__key); \
++} while (0)
++
++static inline void down_read(struct rw_semaphore *sem)
++{
++ anon_down_read((struct rw_anon_semaphore *)sem);
++}
++
++static inline int down_read_trylock(struct rw_semaphore *sem)
++{
++ return anon_down_read_trylock((struct rw_anon_semaphore *)sem);
++}
++
++static inline void down_write(struct rw_semaphore *sem)
++{
++ anon_down_write((struct rw_anon_semaphore *)sem);
++}
++
++static inline int down_write_trylock(struct rw_semaphore *sem)
++{
++ return anon_down_write_trylock((struct rw_anon_semaphore *)sem);
++}
++
++static inline void up_read(struct rw_semaphore *sem)
++{
++ anon_up_read((struct rw_anon_semaphore *)sem);
++}
++
++static inline void up_write(struct rw_semaphore *sem)
++{
++ anon_up_write((struct rw_anon_semaphore *)sem);
++}
++
++static inline void downgrade_write(struct rw_semaphore *sem)
++{
++ anon_downgrade_write((struct rw_anon_semaphore *)sem);
++}
++
++static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
++{
++ return anon_down_read_nested((struct rw_anon_semaphore *)sem, subclass);
++}
++
++static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
++{
++ anon_down_write_nested((struct rw_anon_semaphore *)sem, subclass);
++}
++
++static inline int rwsem_is_locked(struct rw_semaphore *sem)
++{
++ return anon_rwsem_is_locked((struct rw_anon_semaphore *)sem);
++}
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* _LINUX_RWSEM_H */
++
+Index: linux-2.6/lib/rwsem-spinlock.c
+===================================================================
+--- linux-2.6.orig/lib/rwsem-spinlock.c
++++ linux-2.6/lib/rwsem-spinlock.c
+@@ -17,24 +17,24 @@ struct rwsem_waiter {
+ #define RWSEM_WAITING_FOR_WRITE 0x00000002
+ };
+
+-int rwsem_is_locked(struct rw_semaphore *sem)
++int anon_rwsem_is_locked(struct rw_anon_semaphore *sem)
+ {
+ int ret = 1;
+ unsigned long flags;
+
+- if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
++ if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
+ ret = (sem->activity != 0);
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ }
+ return ret;
+ }
+-EXPORT_SYMBOL(rwsem_is_locked);
++EXPORT_SYMBOL(anon_rwsem_is_locked);
+
+ /*
+ * initialise the semaphore
+ */
+-void __init_rwsem(struct rw_semaphore *sem, const char *name,
+- struct lock_class_key *key)
++void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
++ struct lock_class_key *key)
+ {
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+@@ -44,10 +44,10 @@ void __init_rwsem(struct rw_semaphore *s
+ lockdep_init_map(&sem->dep_map, name, key, 0);
+ #endif
+ sem->activity = 0;
+- spin_lock_init(&sem->wait_lock);
++ raw_spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+ }
+-EXPORT_SYMBOL(__init_rwsem);
++EXPORT_SYMBOL(__init_anon_rwsem);
+
+ /*
+ * handle the lock release when processes blocked on it that can now run
+@@ -58,8 +58,8 @@ EXPORT_SYMBOL(__init_rwsem);
+ * - woken process blocks are discarded from the list after having task zeroed
+ * - writers are only woken if wakewrite is non-zero
+ */
+-static inline struct rw_semaphore *
+-__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
++static inline struct rw_anon_semaphore *
++__rwsem_do_wake(struct rw_anon_semaphore *sem, int wakewrite)
+ {
+ struct rwsem_waiter *waiter;
+ struct task_struct *tsk;
+@@ -117,8 +117,8 @@ __rwsem_do_wake(struct rw_semaphore *sem
+ /*
+ * wake a single writer
+ */
+-static inline struct rw_semaphore *
+-__rwsem_wake_one_writer(struct rw_semaphore *sem)
++static inline struct rw_anon_semaphore *
++__rwsem_wake_one_writer(struct rw_anon_semaphore *sem)
+ {
+ struct rwsem_waiter *waiter;
+ struct task_struct *tsk;
+@@ -139,18 +139,18 @@ __rwsem_wake_one_writer(struct rw_semaph
+ /*
+ * get a read lock on the semaphore
+ */
+-void __sched __down_read(struct rw_semaphore *sem)
++void __sched __down_read(struct rw_anon_semaphore *sem)
+ {
+ struct rwsem_waiter waiter;
+ struct task_struct *tsk;
+ unsigned long flags;
+
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+
+ if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+ /* granted */
+ sem->activity++;
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ goto out;
+ }
+
+@@ -165,7 +165,7 @@ void __sched __down_read(struct rw_semap
+ list_add_tail(&waiter.list, &sem->wait_list);
+
+ /* we don't need to touch the semaphore struct anymore */
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+ /* wait to be given the lock */
+ for (;;) {
+@@ -183,13 +183,13 @@ void __sched __down_read(struct rw_semap
+ /*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+-int __down_read_trylock(struct rw_semaphore *sem)
++int __down_read_trylock(struct rw_anon_semaphore *sem)
+ {
+ unsigned long flags;
+ int ret = 0;
+
+
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+
+ if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+ /* granted */
+@@ -197,7 +197,7 @@ int __down_read_trylock(struct rw_semaph
+ ret = 1;
+ }
+
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+ return ret;
+ }
+@@ -206,18 +206,18 @@ int __down_read_trylock(struct rw_semaph
+ * get a write lock on the semaphore
+ * - we increment the waiting count anyway to indicate an exclusive lock
+ */
+-void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
++void __sched __down_write_nested(struct rw_anon_semaphore *sem, int subclass)
+ {
+ struct rwsem_waiter waiter;
+ struct task_struct *tsk;
+ unsigned long flags;
+
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
+
+ if (sem->activity == 0 && list_empty(&sem->wait_list)) {
+ /* granted */
+ sem->activity = -1;
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ goto out;
+ }
+
+@@ -232,7 +232,7 @@ void __sched __down_write_nested(struct
+ list_add_tail(&waiter.list, &sem->wait_list);
+
+ /* we don't need to touch the semaphore struct anymore */
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+ /* wait to be given the lock */
+ for (;;) {
+@@ -247,7 +247,7 @@ void __sched __down_write_nested(struct
+ ;
+ }
- /* check if the requester has not been added already */
- dca = dca_find_provider_by_dev(dev);
- if (dca) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return -EEXIST;
- }
+-void __sched __down_write(struct rw_semaphore *sem)
++void __sched __down_write(struct rw_anon_semaphore *sem)
+ {
+ __down_write_nested(sem, 0);
+ }
+@@ -255,12 +255,12 @@ void __sched __down_write(struct rw_sema
+ /*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+-int __down_write_trylock(struct rw_semaphore *sem)
++int __down_write_trylock(struct rw_anon_semaphore *sem)
+ {
+ unsigned long flags;
+ int ret = 0;
- pci_rc = dca_pci_rc_from_dev(dev);
- domain = dca_find_domain(pci_rc);
- if (!domain) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return -ENODEV;
- }
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-@@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev
- break;
+ if (sem->activity == 0 && list_empty(&sem->wait_list)) {
+ /* granted */
+@@ -268,7 +268,7 @@ int __down_write_trylock(struct rw_semap
+ ret = 1;
}
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
- if (slot < 0)
- return slot;
+ return ret;
+ }
+@@ -276,48 +276,48 @@ int __down_write_trylock(struct rw_semap
+ /*
+ * release a read lock on the semaphore
+ */
+-void __up_read(struct rw_semaphore *sem)
++void __up_read(struct rw_anon_semaphore *sem)
+ {
+ unsigned long flags;
- err = dca_sysfs_add_req(dca, dev, slot);
- if (err) {
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
- if (dca == dca_find_provider_by_dev(dev))
- dca->ops->remove_requester(dca, dev);
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return err;
- }
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-@@ -251,14 +246,14 @@ int dca_remove_requester(struct device *
- if (!dev)
- return -EFAULT;
+ if (--sem->activity == 0 && !list_empty(&sem->wait_list))
+ sem = __rwsem_wake_one_writer(sem);
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
- dca = dca_find_provider_by_dev(dev);
- if (!dca) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return -ENODEV;
- }
- slot = dca->ops->remove_requester(dca, dev);
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ }
- if (slot < 0)
- return slot;
-@@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev
- u8 tag;
+ /*
+ * release a write lock on the semaphore
+ */
+-void __up_write(struct rw_semaphore *sem)
++void __up_write(struct rw_anon_semaphore *sem)
+ {
unsigned long flags;
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
- dca = dca_find_provider_by_dev(dev);
- if (!dca) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return -ENODEV;
- }
- tag = dca->ops->get_tag(dca, dev, cpu);
+ sem->activity = 0;
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem, 1);
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return tag;
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
-@@ -360,36 +355,51 @@ int register_dca_provider(struct dca_pro
+ /*
+ * downgrade a write lock into a read lock
+ * - just wake up any readers at the front of the queue
+ */
+-void __downgrade_write(struct rw_semaphore *sem)
++void __downgrade_write(struct rw_anon_semaphore *sem)
{
- int err;
unsigned long flags;
-- struct dca_domain *domain;
-+ struct dca_domain *domain, *newdomain = NULL;
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
- if (dca_providers_blocked) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- return -ENODEV;
- }
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
- err = dca_sysfs_add_provider(dca, dev);
- if (err)
- return err;
+ sem->activity = 1;
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem, 0);
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
- domain = dca_get_domain(dev);
- if (!domain) {
-+ struct pci_bus *rc;
-+
- if (dca_providers_blocked) {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
- dca_sysfs_remove_provider(dca);
- unregister_dca_providers();
-- } else {
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ return -ENODEV;
-+ }
-+
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
-+ rc = dca_pci_rc_from_dev(dev);
-+ newdomain = dca_allocate_domain(rc);
-+ if (!newdomain)
-+ return -ENODEV;
-+ raw_spin_lock_irqsave(&dca_lock, flags);
-+ /* Recheck, we might have raced after dropping the lock */
-+ domain = dca_get_domain(dev);
-+ if (!domain) {
-+ domain = newdomain;
-+ newdomain = NULL;
-+ list_add(&domain->node, &dca_domains);
- }
-- return -ENODEV;
- }
- list_add(&dca->node, &domain->dca_providers);
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+ }
+
+Index: linux-2.6/lib/rwsem.c
+===================================================================
+--- linux-2.6.orig/lib/rwsem.c
++++ linux-2.6/lib/rwsem.c
+@@ -11,8 +11,8 @@
+ /*
+ * Initialize an rwsem:
+ */
+-void __init_rwsem(struct rw_semaphore *sem, const char *name,
+- struct lock_class_key *key)
++void __init_anon_rwsem(struct rw_anon_semaphore *sem, const char *name,
++ struct lock_class_key *key)
+ {
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+@@ -22,11 +22,11 @@ void __init_rwsem(struct rw_semaphore *s
+ lockdep_init_map(&sem->dep_map, name, key, 0);
+ #endif
+ sem->count = RWSEM_UNLOCKED_VALUE;
+- spin_lock_init(&sem->wait_lock);
++ raw_spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+ }
+
+-EXPORT_SYMBOL(__init_rwsem);
++EXPORT_SYMBOL(__init_anon_rwsem);
+
+ struct rwsem_waiter {
+ struct list_head list;
+@@ -54,8 +54,8 @@ struct rwsem_waiter {
+ * - woken process blocks are discarded from the list after having task zeroed
+ * - writers are only woken if downgrading is false
+ */
+-static struct rw_semaphore *
+-__rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
++static struct rw_anon_semaphore *
++__rwsem_do_wake(struct rw_anon_semaphore *sem, int wake_type)
+ {
+ struct rwsem_waiter *waiter;
+ struct task_struct *tsk;
+@@ -169,8 +169,8 @@ __rwsem_do_wake(struct rw_semaphore *sem
+ /*
+ * wait for a lock to be granted
+ */
+-static struct rw_semaphore __sched *
+-rwsem_down_failed_common(struct rw_semaphore *sem,
++static struct rw_anon_semaphore __sched *
++rwsem_down_failed_common(struct rw_anon_semaphore *sem,
+ unsigned int flags, signed long adjustment)
+ {
+ struct rwsem_waiter waiter;
+@@ -180,7 +180,7 @@ rwsem_down_failed_common(struct rw_semap
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+
+ /* set up my own style of waitqueue */
+- spin_lock_irq(&sem->wait_lock);
++ raw_spin_lock_irq(&sem->wait_lock);
+ waiter.task = tsk;
+ waiter.flags = flags;
+ get_task_struct(tsk);
+@@ -204,7 +204,7 @@ rwsem_down_failed_common(struct rw_semap
+ adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
+
+- spin_unlock_irq(&sem->wait_lock);
++ raw_spin_unlock_irq(&sem->wait_lock);
+
+ /* wait to be given the lock */
+ for (;;) {
+@@ -222,7 +222,8 @@ rwsem_down_failed_common(struct rw_semap
+ /*
+ * wait for the read lock to be granted
+ */
+-struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
++struct rw_anon_semaphore __sched *
++rwsem_down_read_failed(struct rw_anon_semaphore *sem)
+ {
+ return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
+ -RWSEM_ACTIVE_READ_BIAS);
+@@ -231,7 +232,8 @@ struct rw_semaphore __sched *rwsem_down_
+ /*
+ * wait for the write lock to be granted
+ */
+-struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
++struct rw_anon_semaphore __sched *
++rwsem_down_write_failed(struct rw_anon_semaphore *sem)
+ {
+ return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
+ -RWSEM_ACTIVE_WRITE_BIAS);
+@@ -241,17 +243,17 @@ struct rw_semaphore __sched *rwsem_down_
+ * handle waking up a waiter on the semaphore
+ * - up_read/up_write has decremented the active part of count if we come here
+ */
+-struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
++struct rw_anon_semaphore *rwsem_wake(struct rw_anon_semaphore *sem)
+ {
+ unsigned long flags;
- blocking_notifier_call_chain(&dca_provider_chain,
- DCA_PROVIDER_ADD, NULL);
-+ kfree(newdomain);
- return 0;
- }
- EXPORT_SYMBOL_GPL(register_dca_provider);
-@@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_
- blocking_notifier_call_chain(&dca_provider_chain,
- DCA_PROVIDER_REMOVE, NULL);
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
-- spin_lock_irqsave(&dca_lock, flags);
-+ raw_spin_lock_irqsave(&dca_lock, flags);
+ /* do nothing if list empty */
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
- list_del(&dca->node);
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
-@@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_
- if (list_empty(&domain->dca_providers))
- dca_free_domain(domain);
+ return sem;
+ }
+@@ -261,17 +263,17 @@ struct rw_semaphore *rwsem_wake(struct r
+ * - caller incremented waiting part of count and discovered it still negative
+ * - just wake up any readers at the front of the queue
+ */
+-struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
++struct rw_anon_semaphore *rwsem_downgrade_wake(struct rw_anon_semaphore *sem)
+ {
+ unsigned long flags;
-- spin_unlock_irqrestore(&dca_lock, flags);
-+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+- spin_lock_irqsave(&sem->wait_lock, flags);
++ raw_spin_lock_irqsave(&sem->wait_lock, flags);
- dca_sysfs_remove_provider(dca);
+ /* do nothing if list empty */
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
+
+- spin_unlock_irqrestore(&sem->wait_lock, flags);
++ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+ return sem;
}
-Index: linux-2.6/arch/arm/common/gic.c
+Index: linux-2.6/kernel/time/timer_stats.c
===================================================================
---- linux-2.6.orig/arch/arm/common/gic.c
-+++ linux-2.6/arch/arm/common/gic.c
-@@ -33,7 +33,7 @@
- #include <asm/mach/irq.h>
- #include <asm/hardware/gic.h>
+--- linux-2.6.orig/kernel/time/timer_stats.c
++++ linux-2.6/kernel/time/timer_stats.c
+@@ -81,7 +81,7 @@ struct entry {
+ /*
+ * Spinlock protecting the tables - not taken during lookup:
+ */
+-static DEFINE_SPINLOCK(table_lock);
++static DEFINE_RAW_SPINLOCK(table_lock);
--static DEFINE_SPINLOCK(irq_controller_lock);
-+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+ /*
+ * Per-CPU lookup locks for fast hash lookup:
+@@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct
+ prev = NULL;
+ curr = *head;
- /* Address of GIC 0 CPU interface */
- void __iomem *gic_cpu_base_addr __read_mostly;
-@@ -88,30 +88,30 @@ static void gic_mask_irq(struct irq_data
- {
- u32 mask = 1 << (d->irq % 32);
+- spin_lock(&table_lock);
++ raw_spin_lock(&table_lock);
+ /*
+ * Make sure we have not raced with another CPU:
+ */
+@@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct
+ *head = curr;
+ }
+ out_unlock:
+- spin_unlock(&table_lock);
++ raw_spin_unlock(&table_lock);
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
- writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
- if (gic_arch_extn.irq_mask)
- gic_arch_extn.irq_mask(d);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
+ return curr;
}
+Index: linux-2.6/kernel/latencytop.c
+===================================================================
+--- linux-2.6.orig/kernel/latencytop.c
++++ linux-2.6/kernel/latencytop.c
+@@ -58,7 +58,7 @@
+ #include <linux/list.h>
+ #include <linux/stacktrace.h>
- static void gic_unmask_irq(struct irq_data *d)
- {
- u32 mask = 1 << (d->irq % 32);
+-static DEFINE_SPINLOCK(latency_lock);
++static DEFINE_RAW_SPINLOCK(latency_lock);
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
- if (gic_arch_extn.irq_unmask)
- gic_arch_extn.irq_unmask(d);
- writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
+ #define MAXLR 128
+ static struct latency_record latency_record[MAXLR];
+@@ -72,19 +72,19 @@ void clear_all_latency_tracing(struct ta
+ if (!latencytop_enabled)
+ return;
+
+- spin_lock_irqsave(&latency_lock, flags);
++ raw_spin_lock_irqsave(&latency_lock, flags);
+ memset(&p->latency_record, 0, sizeof(p->latency_record));
+ p->latency_record_count = 0;
+- spin_unlock_irqrestore(&latency_lock, flags);
++ raw_spin_unlock_irqrestore(&latency_lock, flags);
}
- static void gic_eoi_irq(struct irq_data *d)
+ static void clear_global_latency_tracing(void)
{
- if (gic_arch_extn.irq_eoi) {
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
- gic_arch_extn.irq_eoi(d);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
- }
+ unsigned long flags;
- writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
-@@ -135,7 +135,7 @@ static int gic_set_type(struct irq_data
- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
- return -EINVAL;
+- spin_lock_irqsave(&latency_lock, flags);
++ raw_spin_lock_irqsave(&latency_lock, flags);
+ memset(&latency_record, 0, sizeof(latency_record));
+- spin_unlock_irqrestore(&latency_lock, flags);
++ raw_spin_unlock_irqrestore(&latency_lock, flags);
+ }
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
+ static void __sched
+@@ -190,7 +190,7 @@ __account_scheduler_latency(struct task_
+ lat.max = usecs;
+ store_stacktrace(tsk, &lat);
- if (gic_arch_extn.irq_set_type)
- gic_arch_extn.irq_set_type(d, type);
-@@ -160,7 +160,7 @@ static int gic_set_type(struct irq_data
- if (enabled)
- writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+- spin_lock_irqsave(&latency_lock, flags);
++ raw_spin_lock_irqsave(&latency_lock, flags);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
+ account_global_scheduler_latency(tsk, &lat);
- return 0;
+@@ -231,7 +231,7 @@ __account_scheduler_latency(struct task_
+ memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
+
+ out_unlock:
+- spin_unlock_irqrestore(&latency_lock, flags);
++ raw_spin_unlock_irqrestore(&latency_lock, flags);
}
-@@ -188,11 +188,11 @@ static int gic_set_affinity(struct irq_d
- mask = 0xff << shift;
- bit = 1 << (cpu + shift);
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
- d->node = cpu;
- val = readl_relaxed(reg) & ~mask;
- writel_relaxed(val | bit, reg);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
+ static int lstats_show(struct seq_file *m, void *v)
+Index: linux-2.6/drivers/video/console/vgacon.c
+===================================================================
+--- linux-2.6.orig/drivers/video/console/vgacon.c
++++ linux-2.6/drivers/video/console/vgacon.c
+@@ -50,7 +50,7 @@
+ #include <video/vga.h>
+ #include <asm/io.h>
- return 0;
+-static DEFINE_SPINLOCK(vga_lock);
++static DEFINE_RAW_SPINLOCK(vga_lock);
+ static int cursor_size_lastfrom;
+ static int cursor_size_lastto;
+ static u32 vgacon_xres;
+@@ -157,7 +157,7 @@ static inline void write_vga(unsigned ch
+ * ddprintk might set the console position from interrupt
+ * handlers, thus the write has to be IRQ-atomic.
+ */
+- spin_lock_irqsave(&vga_lock, flags);
++ raw_spin_lock_irqsave(&vga_lock, flags);
+
+ #ifndef SLOW_VGA
+ v1 = reg + (val & 0xff00);
+@@ -170,7 +170,7 @@ static inline void write_vga(unsigned ch
+ outb_p(reg + 1, vga_video_port_reg);
+ outb_p(val & 0xff, vga_video_port_val);
+ #endif
+- spin_unlock_irqrestore(&vga_lock, flags);
++ raw_spin_unlock_irqrestore(&vga_lock, flags);
}
-@@ -222,9 +222,9 @@ static void gic_handle_cascade_irq(unsig
- chained_irq_enter(chip, desc);
+ static inline void vga_set_mem_top(struct vc_data *c)
+@@ -664,7 +664,7 @@ static void vgacon_set_cursor_size(int x
+ cursor_size_lastfrom = from;
+ cursor_size_lastto = to;
-- spin_lock(&irq_controller_lock);
-+ raw_spin_lock(&irq_controller_lock);
- status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
-- spin_unlock(&irq_controller_lock);
-+ raw_spin_unlock(&irq_controller_lock);
+- spin_lock_irqsave(&vga_lock, flags);
++ raw_spin_lock_irqsave(&vga_lock, flags);
+ if (vga_video_type >= VIDEO_TYPE_VGAC) {
+ outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg);
+ curs = inb_p(vga_video_port_val);
+@@ -682,7 +682,7 @@ static void vgacon_set_cursor_size(int x
+ outb_p(curs, vga_video_port_val);
+ outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg);
+ outb_p(cure, vga_video_port_val);
+- spin_unlock_irqrestore(&vga_lock, flags);
++ raw_spin_unlock_irqrestore(&vga_lock, flags);
+ }
- gic_irq = (status & 0x3ff);
- if (gic_irq == 1023)
-Index: linux-2.6/arch/arm/include/asm/dma.h
-===================================================================
---- linux-2.6.orig/arch/arm/include/asm/dma.h
-+++ linux-2.6/arch/arm/include/asm/dma.h
-@@ -33,18 +33,18 @@
- #define DMA_MODE_CASCADE 0xc0
- #define DMA_AUTOINIT 0x10
+ static void vgacon_cursor(struct vc_data *c, int mode)
+@@ -757,7 +757,7 @@ static int vgacon_doresize(struct vc_dat
+ unsigned int scanlines = height * c->vc_font.height;
+ u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
--extern spinlock_t dma_spin_lock;
-+extern raw_spinlock_t dma_spin_lock;
+- spin_lock_irqsave(&vga_lock, flags);
++ raw_spin_lock_irqsave(&vga_lock, flags);
- static inline unsigned long claim_dma_lock(void)
- {
- unsigned long flags;
-- spin_lock_irqsave(&dma_spin_lock, flags);
-+ raw_spin_lock_irqsave(&dma_spin_lock, flags);
- return flags;
+ vgacon_xres = width * VGA_FONTWIDTH;
+ vgacon_yres = height * c->vc_font.height;
+@@ -808,7 +808,7 @@ static int vgacon_doresize(struct vc_dat
+ outb_p(vsync_end, vga_video_port_val);
+ }
+
+- spin_unlock_irqrestore(&vga_lock, flags);
++ raw_spin_unlock_irqrestore(&vga_lock, flags);
+ return 0;
}
- static inline void release_dma_lock(unsigned long flags)
+@@ -891,11 +891,11 @@ static void vga_vesa_blank(struct vgasta
{
-- spin_unlock_irqrestore(&dma_spin_lock, flags);
-+ raw_spin_unlock_irqrestore(&dma_spin_lock, flags);
- }
+ /* save original values of VGA controller registers */
+ if (!vga_vesa_blanked) {
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I);
+ vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg);
+ vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
- /* Clear the 'DMA Pointer Flip Flop'.
-Index: linux-2.6/arch/arm/include/asm/mmu.h
-===================================================================
---- linux-2.6.orig/arch/arm/include/asm/mmu.h
-+++ linux-2.6/arch/arm/include/asm/mmu.h
-@@ -6,7 +6,7 @@
- typedef struct {
- #ifdef CONFIG_CPU_HAS_ASID
- unsigned int id;
-- spinlock_t id_lock;
-+ raw_spinlock_t id_lock;
- #endif
- unsigned int kvm_seq;
- } mm_context_t;
-@@ -16,7 +16,7 @@ typedef struct {
+ outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
+ vga_state.HorizontalTotal = inb_p(vga_video_port_val);
+@@ -918,7 +918,7 @@ static void vga_vesa_blank(struct vgasta
- /* init_mm.context.id_lock should be initialized. */
- #define INIT_MM_CONTEXT(name) \
-- .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
-+ .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
- #else
- #define ASID(mm) (0)
- #endif
-Index: linux-2.6/arch/arm/kernel/dma.c
-===================================================================
---- linux-2.6.orig/arch/arm/kernel/dma.c
-+++ linux-2.6/arch/arm/kernel/dma.c
-@@ -23,7 +23,7 @@
+ /* assure that video is enabled */
+ /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20);
- #include <asm/mach/dma.h>
+ /* test for vertical retrace in process.... */
+@@ -954,13 +954,13 @@ static void vga_vesa_blank(struct vgasta
+ /* restore both index registers */
+ vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
+ outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
+ }
--DEFINE_SPINLOCK(dma_spin_lock);
-+DEFINE_RAW_SPINLOCK(dma_spin_lock);
- EXPORT_SYMBOL(dma_spin_lock);
+ static void vga_vesa_unblank(struct vgastate *state)
+ {
+ /* restore original values of VGA controller registers */
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO);
- static dma_t *dma_chan[MAX_DMA_CHANNELS];
-Index: linux-2.6/arch/arm/kernel/smp.c
-===================================================================
---- linux-2.6.orig/arch/arm/kernel/smp.c
-+++ linux-2.6/arch/arm/kernel/smp.c
-@@ -531,7 +531,7 @@ static void percpu_timer_stop(void)
+ outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */
+@@ -985,7 +985,7 @@ static void vga_vesa_unblank(struct vgas
+ /* restore index/control registers */
+ vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex);
+ outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
}
+
+ static void vga_pal_blank(struct vgastate *state)
+@@ -1104,7 +1104,7 @@ static int vgacon_do_font_op(struct vgas
+ charmap += 4 * cmapsz;
#endif
--static DEFINE_SPINLOCK(stop_lock);
-+static DEFINE_RAW_SPINLOCK(stop_lock);
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ /* First, the Sequencer */
+ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1);
+ /* CPU writes only to map 2 */
+@@ -1120,7 +1120,7 @@ static int vgacon_do_font_op(struct vgas
+ vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00);
+ /* map start at A000:0000 */
+ vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
- /*
- * ipi_cpu_stop - handle IPI from smp_send_stop()
-@@ -540,10 +540,10 @@ static void ipi_cpu_stop(unsigned int cp
- {
- if (system_state == SYSTEM_BOOTING ||
- system_state == SYSTEM_RUNNING) {
-- spin_lock(&stop_lock);
-+ raw_spin_lock(&stop_lock);
- printk(KERN_CRIT "CPU%u: stopping\n", cpu);
- dump_stack();
-- spin_unlock(&stop_lock);
-+ raw_spin_unlock(&stop_lock);
+ if (arg) {
+ if (set)
+@@ -1147,7 +1147,7 @@ static int vgacon_do_font_op(struct vgas
+ }
}
- set_cpu_online(cpu, false);
-Index: linux-2.6/arch/arm/kernel/traps.c
-===================================================================
---- linux-2.6.orig/arch/arm/kernel/traps.c
-+++ linux-2.6/arch/arm/kernel/traps.c
-@@ -255,7 +255,7 @@ static int __die(const char *str, int er
- return ret;
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ /* First, the sequencer, Synchronous reset */
+ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01);
+ /* CPU writes to maps 0 and 1 */
+@@ -1186,7 +1186,7 @@ static int vgacon_do_font_op(struct vgas
+ inb_p(video_port_status);
+ vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
+ }
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
+ return 0;
}
--static DEFINE_SPINLOCK(die_lock);
-+static DEFINE_RAW_SPINLOCK(die_lock);
-
- /*
- * This function is protected against re-entrancy.
-@@ -267,7 +267,7 @@ void die(const char *str, struct pt_regs
+@@ -1211,26 +1211,26 @@ static int vgacon_adjust_height(struct v
+ registers; they are write-only on EGA, but it appears that they
+ are all don't care bits on EGA, so I guess it doesn't matter. */
- oops_enter();
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
+ ovr = inb_p(vga_video_port_val);
+ outb_p(0x09, vga_video_port_reg); /* Font size register */
+ fsr = inb_p(vga_video_port_val);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
-- spin_lock_irq(&die_lock);
-+ raw_spin_lock_irq(&die_lock);
- console_verbose();
- bust_spinlocks(1);
- ret = __die(str, err, thread, regs);
-@@ -277,7 +277,7 @@ void die(const char *str, struct pt_regs
+ vde = maxscan & 0xff; /* Vertical display end reg */
+ ovr = (ovr & 0xbd) + /* Overflow register */
+ ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3);
+ fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */
- bust_spinlocks(0);
- add_taint(TAINT_DIE);
-- spin_unlock_irq(&die_lock);
-+ raw_spin_unlock_irq(&die_lock);
- oops_exit();
+- spin_lock_irq(&vga_lock);
++ raw_spin_lock_irq(&vga_lock);
+ outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */
+ outb_p(ovr, vga_video_port_val);
+ outb_p(0x09, vga_video_port_reg); /* Font size */
+ outb_p(fsr, vga_video_port_val);
+ outb_p(0x12, vga_video_port_reg); /* Vertical display limit */
+ outb_p(vde, vga_video_port_val);
+- spin_unlock_irq(&vga_lock);
++ raw_spin_unlock_irq(&vga_lock);
+ vga_video_font_height = fontheight;
- if (in_interrupt())
-@@ -302,24 +302,24 @@ void arm_notify_die(const char *str, str
+ for (i = 0; i < MAX_NR_CONSOLES; i++) {
+Index: linux-2.6/arch/x86/oprofile/nmi_int.c
+===================================================================
+--- linux-2.6.orig/arch/x86/oprofile/nmi_int.c
++++ linux-2.6/arch/x86/oprofile/nmi_int.c
+@@ -355,10 +355,10 @@ static void nmi_cpu_setup(void *dummy)
+ int cpu = smp_processor_id();
+ struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
+ nmi_cpu_save_registers(msrs);
+- spin_lock(&oprofilefs_lock);
++ raw_spin_lock(&oprofilefs_lock);
+ model->setup_ctrs(model, msrs);
+ nmi_cpu_setup_mux(cpu, msrs);
+- spin_unlock(&oprofilefs_lock);
++ raw_spin_unlock(&oprofilefs_lock);
+ per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
}
-
- static LIST_HEAD(undef_hook);
--static DEFINE_SPINLOCK(undef_lock);
-+static DEFINE_RAW_SPINLOCK(undef_lock);
-
- void register_undef_hook(struct undef_hook *hook)
+Index: linux-2.6/drivers/oprofile/event_buffer.c
+===================================================================
+--- linux-2.6.orig/drivers/oprofile/event_buffer.c
++++ linux-2.6/drivers/oprofile/event_buffer.c
+@@ -82,10 +82,10 @@ int alloc_event_buffer(void)
{
unsigned long flags;
-- spin_lock_irqsave(&undef_lock, flags);
-+ raw_spin_lock_irqsave(&undef_lock, flags);
- list_add(&hook->node, &undef_hook);
-- spin_unlock_irqrestore(&undef_lock, flags);
-+ raw_spin_unlock_irqrestore(&undef_lock, flags);
- }
+- spin_lock_irqsave(&oprofilefs_lock, flags);
++ raw_spin_lock_irqsave(&oprofilefs_lock, flags);
+ buffer_size = oprofile_buffer_size;
+ buffer_watershed = oprofile_buffer_watershed;
+- spin_unlock_irqrestore(&oprofilefs_lock, flags);
++ raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
- void unregister_undef_hook(struct undef_hook *hook)
- {
- unsigned long flags;
+ if (buffer_watershed >= buffer_size)
+ return -EINVAL;
+Index: linux-2.6/drivers/oprofile/oprofile_perf.c
+===================================================================
+--- linux-2.6.orig/drivers/oprofile/oprofile_perf.c
++++ linux-2.6/drivers/oprofile/oprofile_perf.c
+@@ -160,9 +160,9 @@ static int oprofile_perf_create_files(st
-- spin_lock_irqsave(&undef_lock, flags);
-+ raw_spin_lock_irqsave(&undef_lock, flags);
- list_del(&hook->node);
-- spin_unlock_irqrestore(&undef_lock, flags);
-+ raw_spin_unlock_irqrestore(&undef_lock, flags);
+ static int oprofile_perf_setup(void)
+ {
+- spin_lock(&oprofilefs_lock);
++ raw_spin_lock(&oprofilefs_lock);
+ op_perf_setup();
+- spin_unlock(&oprofilefs_lock);
++ raw_spin_unlock(&oprofilefs_lock);
+ return 0;
}
- static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
-@@ -328,12 +328,12 @@ static int call_undef_hook(struct pt_reg
- unsigned long flags;
- int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
+Index: linux-2.6/drivers/oprofile/oprofilefs.c
+===================================================================
+--- linux-2.6.orig/drivers/oprofile/oprofilefs.c
++++ linux-2.6/drivers/oprofile/oprofilefs.c
+@@ -21,7 +21,7 @@
-- spin_lock_irqsave(&undef_lock, flags);
-+ raw_spin_lock_irqsave(&undef_lock, flags);
- list_for_each_entry(hook, &undef_hook, node)
- if ((instr & hook->instr_mask) == hook->instr_val &&
- (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
- fn = hook->fn;
-- spin_unlock_irqrestore(&undef_lock, flags);
-+ raw_spin_unlock_irqrestore(&undef_lock, flags);
+ #define OPROFILEFS_MAGIC 0x6f70726f
- return fn ? fn(regs, instr) : 1;
- }
-Index: linux-2.6/arch/arm/mach-footbridge/include/mach/hardware.h
-===================================================================
---- linux-2.6.orig/arch/arm/mach-footbridge/include/mach/hardware.h
-+++ linux-2.6/arch/arm/mach-footbridge/include/mach/hardware.h
-@@ -93,7 +93,7 @@
- #define CPLD_FLASH_WR_ENABLE 1
+-DEFINE_SPINLOCK(oprofilefs_lock);
++DEFINE_RAW_SPINLOCK(oprofilefs_lock);
- #ifndef __ASSEMBLY__
--extern spinlock_t nw_gpio_lock;
-+extern raw_spinlock_t nw_gpio_lock;
- extern void nw_gpio_modify_op(unsigned int mask, unsigned int set);
- extern void nw_gpio_modify_io(unsigned int mask, unsigned int in);
- extern unsigned int nw_gpio_read(void);
-Index: linux-2.6/arch/arm/mach-footbridge/netwinder-hw.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-footbridge/netwinder-hw.c
-+++ linux-2.6/arch/arm/mach-footbridge/netwinder-hw.c
-@@ -68,7 +68,7 @@ static inline void wb977_ww(int reg, int
- /*
- * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE
- */
--DEFINE_SPINLOCK(nw_gpio_lock);
-+DEFINE_RAW_SPINLOCK(nw_gpio_lock);
- EXPORT_SYMBOL(nw_gpio_lock);
+ static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
+ {
+@@ -76,9 +76,9 @@ int oprofilefs_ulong_from_user(unsigned
+ if (copy_from_user(tmpbuf, buf, count))
+ return -EFAULT;
- static unsigned int current_gpio_op;
-@@ -327,9 +327,9 @@ static inline void wb977_init_gpio(void)
- /*
- * Set Group1/Group2 outputs
- */
-- spin_lock_irqsave(&nw_gpio_lock, flags);
-+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
- nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN);
-- spin_unlock_irqrestore(&nw_gpio_lock, flags);
-+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+- spin_lock_irqsave(&oprofilefs_lock, flags);
++ raw_spin_lock_irqsave(&oprofilefs_lock, flags);
+ *val = simple_strtoul(tmpbuf, NULL, 0);
+- spin_unlock_irqrestore(&oprofilefs_lock, flags);
++ raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
+ return 0;
}
- /*
-@@ -390,9 +390,9 @@ static void __init cpld_init(void)
- {
- unsigned long flags;
+Index: linux-2.6/include/linux/oprofile.h
+===================================================================
+--- linux-2.6.orig/include/linux/oprofile.h
++++ linux-2.6/include/linux/oprofile.h
+@@ -166,7 +166,7 @@ ssize_t oprofilefs_ulong_to_user(unsigne
+ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
-- spin_lock_irqsave(&nw_gpio_lock, flags);
-+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
- nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE);
-- spin_unlock_irqrestore(&nw_gpio_lock, flags);
-+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ /** lock for read/write safety */
+-extern spinlock_t oprofilefs_lock;
++extern raw_spinlock_t oprofilefs_lock;
+
+ /**
+ * Add the contents of a circular buffer to the event buffer.
+Index: linux-2.6/drivers/acpi/processor_idle.c
+===================================================================
+--- linux-2.6.orig/drivers/acpi/processor_idle.c
++++ linux-2.6/drivers/acpi/processor_idle.c
+@@ -852,7 +852,7 @@ static int acpi_idle_enter_simple(struct
}
- static unsigned char rwa_unlock[] __initdata =
-@@ -616,9 +616,9 @@ static int __init nw_hw_init(void)
- cpld_init();
- rwa010_init();
+ static int c3_cpu_count;
+-static DEFINE_SPINLOCK(c3_lock);
++static DEFINE_RAW_SPINLOCK(c3_lock);
-- spin_lock_irqsave(&nw_gpio_lock, flags);
-+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
- nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS);
-- spin_unlock_irqrestore(&nw_gpio_lock, flags);
-+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ /**
+ * acpi_idle_enter_bm - enters C3 with proper BM handling
+@@ -930,12 +930,12 @@ static int acpi_idle_enter_bm(struct cpu
+ * without doing anything.
+ */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
+- spin_lock(&c3_lock);
++ raw_spin_lock(&c3_lock);
+ c3_cpu_count++;
+ /* Disable bus master arbitration when all CPUs are in C3 */
+ if (c3_cpu_count == num_online_cpus())
+ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
+- spin_unlock(&c3_lock);
++ raw_spin_unlock(&c3_lock);
+ } else if (!pr->flags.bm_check) {
+ ACPI_FLUSH_CPU_CACHE();
}
- return 0;
- }
-Index: linux-2.6/arch/arm/mach-footbridge/netwinder-leds.c
+@@ -944,10 +944,10 @@ static int acpi_idle_enter_bm(struct cpu
+
+ /* Re-enable bus master arbitration */
+ if (pr->flags.bm_check && pr->flags.bm_control) {
+- spin_lock(&c3_lock);
++ raw_spin_lock(&c3_lock);
+ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
+ c3_cpu_count--;
+- spin_unlock(&c3_lock);
++ raw_spin_unlock(&c3_lock);
+ }
+ kt2 = ktime_get_real();
+ idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
+Index: linux-2.6/arch/x86/kernel/cpu/mcheck/mce_intel.c
===================================================================
---- linux-2.6.orig/arch/arm/mach-footbridge/netwinder-leds.c
-+++ linux-2.6/arch/arm/mach-footbridge/netwinder-leds.c
-@@ -31,13 +31,13 @@
- static char led_state;
- static char hw_led_state;
+--- linux-2.6.orig/arch/x86/kernel/cpu/mcheck/mce_intel.c
++++ linux-2.6/arch/x86/kernel/cpu/mcheck/mce_intel.c
+@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_b
+ * cmci_discover_lock protects against parallel discovery attempts
+ * which could race against each other.
+ */
+-static DEFINE_SPINLOCK(cmci_discover_lock);
++static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
--static DEFINE_SPINLOCK(leds_lock);
-+static DEFINE_RAW_SPINLOCK(leds_lock);
+ #define CMCI_THRESHOLD 1
- static void netwinder_leds_event(led_event_t evt)
- {
- unsigned long flags;
+@@ -85,7 +85,7 @@ static void cmci_discover(int banks, int
+ int hdr = 0;
+ int i;
-- spin_lock_irqsave(&leds_lock, flags);
-+ raw_spin_lock_irqsave(&leds_lock, flags);
+- spin_lock_irqsave(&cmci_discover_lock, flags);
++ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ for (i = 0; i < banks; i++) {
+ u64 val;
- switch (evt) {
- case led_start:
-@@ -117,12 +117,12 @@ static void netwinder_leds_event(led_eve
- break;
+@@ -116,7 +116,7 @@ static void cmci_discover(int banks, int
+ WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
+ }
}
+- spin_unlock_irqrestore(&cmci_discover_lock, flags);
++ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ if (hdr)
+ printk(KERN_CONT "\n");
+ }
+@@ -150,7 +150,7 @@ void cmci_clear(void)
-- spin_unlock_irqrestore(&leds_lock, flags);
-+ raw_spin_unlock_irqrestore(&leds_lock, flags);
-
- if (led_state & LED_STATE_ENABLED) {
-- spin_lock_irqsave(&nw_gpio_lock, flags);
-+ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
- nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state);
-- spin_unlock_irqrestore(&nw_gpio_lock, flags);
-+ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ if (!cmci_supported(&banks))
+ return;
+- spin_lock_irqsave(&cmci_discover_lock, flags);
++ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ for (i = 0; i < banks; i++) {
+ if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
+ continue;
+@@ -160,7 +160,7 @@ void cmci_clear(void)
+ wrmsrl(MSR_IA32_MCx_CTL2(i), val);
+ __clear_bit(i, __get_cpu_var(mce_banks_owned));
}
+- spin_unlock_irqrestore(&cmci_discover_lock, flags);
++ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
-Index: linux-2.6/arch/arm/mach-integrator/core.c
+ /*
+Index: linux-2.6/arch/powerpc/sysdev/uic.c
===================================================================
---- linux-2.6.orig/arch/arm/mach-integrator/core.c
-+++ linux-2.6/arch/arm/mach-integrator/core.c
-@@ -205,7 +205,7 @@ static struct amba_pl010_data integrator
-
- #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL)
-
--static DEFINE_SPINLOCK(cm_lock);
-+static DEFINE_RAW_SPINLOCK(cm_lock);
+--- linux-2.6.orig/arch/powerpc/sysdev/uic.c
++++ linux-2.6/arch/powerpc/sysdev/uic.c
+@@ -47,7 +47,7 @@ struct uic {
+ int index;
+ int dcrbase;
- /**
- * cm_control - update the CM_CTRL register.
-@@ -217,10 +217,10 @@ void cm_control(u32 mask, u32 set)
- unsigned long flags;
- u32 val;
+- spinlock_t lock;
++ raw_spinlock_t lock;
-- spin_lock_irqsave(&cm_lock, flags);
-+ raw_spin_lock_irqsave(&cm_lock, flags);
- val = readl(CM_CTRL) & ~mask;
- writel(val | set, CM_CTRL);
-- spin_unlock_irqrestore(&cm_lock, flags);
-+ raw_spin_unlock_irqrestore(&cm_lock, flags);
- }
+ /* The remapper for this UIC */
+ struct irq_host *irqhost;
+@@ -61,14 +61,14 @@ static void uic_unmask_irq(struct irq_da
+ u32 er, sr;
- EXPORT_SYMBOL(cm_control);
-Index: linux-2.6/arch/arm/mach-integrator/pci_v3.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-integrator/pci_v3.c
-+++ linux-2.6/arch/arm/mach-integrator/pci_v3.c
-@@ -163,7 +163,7 @@
- * 7:2 register number
- *
- */
--static DEFINE_SPINLOCK(v3_lock);
-+static DEFINE_RAW_SPINLOCK(v3_lock);
+ sr = 1 << (31-src);
+- spin_lock_irqsave(&uic->lock, flags);
++ raw_spin_lock_irqsave(&uic->lock, flags);
+ /* ack level-triggered interrupts here */
+ if (irqd_is_level_type(d))
+ mtdcr(uic->dcrbase + UIC_SR, sr);
+ er = mfdcr(uic->dcrbase + UIC_ER);
+ er |= sr;
+ mtdcr(uic->dcrbase + UIC_ER, er);
+- spin_unlock_irqrestore(&uic->lock, flags);
++ raw_spin_unlock_irqrestore(&uic->lock, flags);
+ }
- #define PCI_BUS_NONMEM_START 0x00000000
- #define PCI_BUS_NONMEM_SIZE SZ_256M
-@@ -284,7 +284,7 @@ static int v3_read_config(struct pci_bus
+ static void uic_mask_irq(struct irq_data *d)
+@@ -78,11 +78,11 @@ static void uic_mask_irq(struct irq_data
unsigned long flags;
- u32 v;
-
-- spin_lock_irqsave(&v3_lock, flags);
-+ raw_spin_lock_irqsave(&v3_lock, flags);
- addr = v3_open_config_window(bus, devfn, where);
-
- switch (size) {
-@@ -302,7 +302,7 @@ static int v3_read_config(struct pci_bus
- }
+ u32 er;
- v3_close_config_window();
-- spin_unlock_irqrestore(&v3_lock, flags);
-+ raw_spin_unlock_irqrestore(&v3_lock, flags);
+- spin_lock_irqsave(&uic->lock, flags);
++ raw_spin_lock_irqsave(&uic->lock, flags);
+ er = mfdcr(uic->dcrbase + UIC_ER);
+ er &= ~(1 << (31 - src));
+ mtdcr(uic->dcrbase + UIC_ER, er);
+- spin_unlock_irqrestore(&uic->lock, flags);
++ raw_spin_unlock_irqrestore(&uic->lock, flags);
+ }
- *val = v;
- return PCIBIOS_SUCCESSFUL;
-@@ -314,7 +314,7 @@ static int v3_write_config(struct pci_bu
- unsigned long addr;
+ static void uic_ack_irq(struct irq_data *d)
+@@ -91,9 +91,9 @@ static void uic_ack_irq(struct irq_data
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
-- spin_lock_irqsave(&v3_lock, flags);
-+ raw_spin_lock_irqsave(&v3_lock, flags);
- addr = v3_open_config_window(bus, devfn, where);
-
- switch (size) {
-@@ -335,7 +335,7 @@ static int v3_write_config(struct pci_bu
- }
+- spin_lock_irqsave(&uic->lock, flags);
++ raw_spin_lock_irqsave(&uic->lock, flags);
+ mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
+- spin_unlock_irqrestore(&uic->lock, flags);
++ raw_spin_unlock_irqrestore(&uic->lock, flags);
+ }
- v3_close_config_window();
-- spin_unlock_irqrestore(&v3_lock, flags);
-+ raw_spin_unlock_irqrestore(&v3_lock, flags);
+ static void uic_mask_ack_irq(struct irq_data *d)
+@@ -104,7 +104,7 @@ static void uic_mask_ack_irq(struct irq_
+ u32 er, sr;
- return PCIBIOS_SUCCESSFUL;
+ sr = 1 << (31-src);
+- spin_lock_irqsave(&uic->lock, flags);
++ raw_spin_lock_irqsave(&uic->lock, flags);
+ er = mfdcr(uic->dcrbase + UIC_ER);
+ er &= ~sr;
+ mtdcr(uic->dcrbase + UIC_ER, er);
+@@ -118,7 +118,7 @@ static void uic_mask_ack_irq(struct irq_
+ */
+ if (!irqd_is_level_type(d))
+ mtdcr(uic->dcrbase + UIC_SR, sr);
+- spin_unlock_irqrestore(&uic->lock, flags);
++ raw_spin_unlock_irqrestore(&uic->lock, flags);
}
-@@ -510,7 +510,7 @@ void __init pci_v3_preinit(void)
- hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
- hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
-- spin_lock_irqsave(&v3_lock, flags);
-+ raw_spin_lock_irqsave(&v3_lock, flags);
+ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
+@@ -152,7 +152,7 @@ static int uic_set_irq_type(struct irq_d
- /*
- * Unlock V3 registers, but only if they were previously locked.
-@@ -583,7 +583,7 @@ void __init pci_v3_preinit(void)
- printk(KERN_ERR "PCI: unable to grab PCI error "
- "interrupt: %d\n", ret);
+ mask = ~(1 << (31 - src));
-- spin_unlock_irqrestore(&v3_lock, flags);
-+ raw_spin_unlock_irqrestore(&v3_lock, flags);
+- spin_lock_irqsave(&uic->lock, flags);
++ raw_spin_lock_irqsave(&uic->lock, flags);
+ tr = mfdcr(uic->dcrbase + UIC_TR);
+ pr = mfdcr(uic->dcrbase + UIC_PR);
+ tr = (tr & mask) | (trigger << (31-src));
+@@ -161,7 +161,7 @@ static int uic_set_irq_type(struct irq_d
+ mtdcr(uic->dcrbase + UIC_PR, pr);
+ mtdcr(uic->dcrbase + UIC_TR, tr);
+
+- spin_unlock_irqrestore(&uic->lock, flags);
++ raw_spin_unlock_irqrestore(&uic->lock, flags);
+
+ return 0;
}
+@@ -254,7 +254,7 @@ static struct uic * __init uic_init_one(
+ if (! uic)
+ return NULL; /* FIXME: panic? */
- void __init pci_v3_postinit(void)
-Index: linux-2.6/arch/arm/mach-ixp4xx/common-pci.c
+- spin_lock_init(&uic->lock);
++ raw_spin_lock_init(&uic->lock);
+ indexp = of_get_property(node, "cell-index", &len);
+ if (!indexp || (len != sizeof(u32))) {
+ printk(KERN_ERR "uic: Device node %s has missing or invalid "
+Index: linux-2.6/drivers/dca/dca-core.c
===================================================================
---- linux-2.6.orig/arch/arm/mach-ixp4xx/common-pci.c
-+++ linux-2.6/arch/arm/mach-ixp4xx/common-pci.c
-@@ -54,7 +54,7 @@ unsigned long ixp4xx_pci_reg_base = 0;
- * these transactions are atomic or we will end up
- * with corrupt data on the bus or in a driver.
- */
--static DEFINE_SPINLOCK(ixp4xx_pci_lock);
-+static DEFINE_RAW_SPINLOCK(ixp4xx_pci_lock);
+--- linux-2.6.orig/drivers/dca/dca-core.c
++++ linux-2.6/drivers/dca/dca-core.c
+@@ -35,7 +35,7 @@ MODULE_VERSION(DCA_VERSION);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Intel Corporation");
- /*
- * Read from PCI config space
-@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(ixp4xx_pci_lock);
- static void crp_read(u32 ad_cbe, u32 *data)
- {
- unsigned long flags;
-- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
- *PCI_CRP_AD_CBE = ad_cbe;
- *data = *PCI_CRP_RDATA;
-- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
- }
+-static DEFINE_SPINLOCK(dca_lock);
++static DEFINE_RAW_SPINLOCK(dca_lock);
- /*
-@@ -74,10 +74,10 @@ static void crp_read(u32 ad_cbe, u32 *da
- static void crp_write(u32 ad_cbe, u32 data)
- {
- unsigned long flags;
-- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
- *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe;
- *PCI_CRP_WDATA = data;
-- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
- }
+ static LIST_HEAD(dca_domains);
- static inline int check_master_abort(void)
-@@ -101,7 +101,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32
- int retval = 0;
- int i;
+@@ -101,10 +101,10 @@ static void unregister_dca_providers(voi
-- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ INIT_LIST_HEAD(&unregistered_providers);
- *PCI_NP_AD = addr;
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
-@@ -118,7 +118,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32
- if(check_master_abort())
- retval = 1;
+ if (list_empty(&dca_domains)) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return;
+ }
-- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
- return retval;
- }
+@@ -116,7 +116,7 @@ static void unregister_dca_providers(voi
-@@ -127,7 +127,7 @@ int ixp4xx_pci_read_no_errata(u32 addr,
- unsigned long flags;
- int retval = 0;
+ dca_free_domain(domain);
-- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
- *PCI_NP_AD = addr;
+ list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
+ dca_sysfs_remove_provider(dca);
+@@ -144,13 +144,8 @@ static struct dca_domain *dca_get_domain
+ domain = dca_find_domain(rc);
-@@ -140,7 +140,7 @@ int ixp4xx_pci_read_no_errata(u32 addr,
- if(check_master_abort())
- retval = 1;
+ if (!domain) {
+- if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
++ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
+ dca_providers_blocked = 1;
+- } else {
+- domain = dca_allocate_domain(rc);
+- if (domain)
+- list_add(&domain->node, &dca_domains);
+- }
+ }
-- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
- return retval;
- }
+ return domain;
+@@ -198,19 +193,19 @@ int dca_add_requester(struct device *dev
+ if (!dev)
+ return -EFAULT;
-@@ -149,7 +149,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd,
- unsigned long flags;
- int retval = 0;
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
-- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-+ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ /* check if the requester has not been added already */
+ dca = dca_find_provider_by_dev(dev);
+ if (dca) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -EEXIST;
+ }
- *PCI_NP_AD = addr;
+ pci_rc = dca_pci_rc_from_dev(dev);
+ domain = dca_find_domain(pci_rc);
+ if (!domain) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
-@@ -162,7 +162,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd,
- if(check_master_abort())
- retval = 1;
+@@ -220,17 +215,17 @@ int dca_add_requester(struct device *dev
+ break;
+ }
-- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
-+ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
- return retval;
- }
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
-Index: linux-2.6/arch/arm/mach-shark/leds.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-shark/leds.c
-+++ linux-2.6/arch/arm/mach-shark/leds.c
-@@ -36,7 +36,7 @@ static char led_state;
- static short hw_led_state;
- static short saved_state;
+ if (slot < 0)
+ return slot;
+
+ err = dca_sysfs_add_req(dca, dev, slot);
+ if (err) {
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
+ if (dca == dca_find_provider_by_dev(dev))
+ dca->ops->remove_requester(dca, dev);
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return err;
+ }
+
+@@ -251,14 +246,14 @@ int dca_remove_requester(struct device *
+ if (!dev)
+ return -EFAULT;
--static DEFINE_SPINLOCK(leds_lock);
-+static DEFINE_RAW_SPINLOCK(leds_lock);
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
+ dca = dca_find_provider_by_dev(dev);
+ if (!dca) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+ slot = dca->ops->remove_requester(dca, dev);
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
- short sequoia_read(int addr) {
- outw(addr,0x24);
-@@ -52,7 +52,7 @@ static void sequoia_leds_event(led_event
- {
+ if (slot < 0)
+ return slot;
+@@ -280,16 +275,16 @@ u8 dca_common_get_tag(struct device *dev
+ u8 tag;
unsigned long flags;
-- spin_lock_irqsave(&leds_lock, flags);
-+ raw_spin_lock_irqsave(&leds_lock, flags);
-
- hw_led_state = sequoia_read(0x09);
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
-@@ -144,7 +144,7 @@ static void sequoia_leds_event(led_event
- if (led_state & LED_STATE_ENABLED)
- sequoia_write(hw_led_state,0x09);
+ dca = dca_find_provider_by_dev(dev);
+ if (!dca) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+ tag = dca->ops->get_tag(dca, dev, cpu);
-- spin_unlock_irqrestore(&leds_lock, flags);
-+ raw_spin_unlock_irqrestore(&leds_lock, flags);
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return tag;
}
- static int __init leds_init(void)
-Index: linux-2.6/arch/arm/mm/cache-l2x0.c
-===================================================================
---- linux-2.6.orig/arch/arm/mm/cache-l2x0.c
-+++ linux-2.6/arch/arm/mm/cache-l2x0.c
-@@ -26,7 +26,7 @@
- #define CACHE_LINE_SIZE 32
-
- static void __iomem *l2x0_base;
--static DEFINE_SPINLOCK(l2x0_lock);
-+static DEFINE_RAW_SPINLOCK(l2x0_lock);
- static uint32_t l2x0_way_mask; /* Bitmask of active ways */
- static uint32_t l2x0_size;
-
-@@ -115,9 +115,9 @@ static void l2x0_cache_sync(void)
+@@ -360,36 +355,51 @@ int register_dca_provider(struct dca_pro
{
+ int err;
unsigned long flags;
+- struct dca_domain *domain;
++ struct dca_domain *domain, *newdomain = NULL;
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
- }
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
+ if (dca_providers_blocked) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
- static void __l2x0_flush_all(void)
-@@ -134,9 +134,9 @@ static void l2x0_flush_all(void)
- unsigned long flags;
+ err = dca_sysfs_add_provider(dca, dev);
+ if (err)
+ return err;
- /* clean all ways */
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- __l2x0_flush_all();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
+ domain = dca_get_domain(dev);
+ if (!domain) {
++ struct pci_bus *rc;
++
+ if (dca_providers_blocked) {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ dca_sysfs_remove_provider(dca);
+ unregister_dca_providers();
+- } else {
+- spin_unlock_irqrestore(&dca_lock, flags);
++ return -ENODEV;
++ }
++
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
++ rc = dca_pci_rc_from_dev(dev);
++ newdomain = dca_allocate_domain(rc);
++ if (!newdomain)
++ return -ENODEV;
++ raw_spin_lock_irqsave(&dca_lock, flags);
++ /* Recheck, we might have raced after dropping the lock */
++ domain = dca_get_domain(dev);
++ if (!domain) {
++ domain = newdomain;
++ newdomain = NULL;
++ list_add(&domain->node, &dca_domains);
+ }
+- return -ENODEV;
+ }
+ list_add(&dca->node, &domain->dca_providers);
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+
+ blocking_notifier_call_chain(&dca_provider_chain,
+ DCA_PROVIDER_ADD, NULL);
++ kfree(newdomain);
+ return 0;
}
+ EXPORT_SYMBOL_GPL(register_dca_provider);
+@@ -407,7 +417,7 @@ void unregister_dca_provider(struct dca_
+ blocking_notifier_call_chain(&dca_provider_chain,
+ DCA_PROVIDER_REMOVE, NULL);
- static void l2x0_clean_all(void)
-@@ -144,11 +144,11 @@ static void l2x0_clean_all(void)
- unsigned long flags;
+- spin_lock_irqsave(&dca_lock, flags);
++ raw_spin_lock_irqsave(&dca_lock, flags);
- /* clean all ways */
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
- cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
- }
+ list_del(&dca->node);
- static void l2x0_inv_all(void)
-@@ -156,13 +156,13 @@ static void l2x0_inv_all(void)
- unsigned long flags;
+@@ -416,7 +426,7 @@ void unregister_dca_provider(struct dca_
+ if (list_empty(&domain->dca_providers))
+ dca_free_domain(domain);
- /* invalidate all ways */
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- /* Invalidating when L2 is enabled is a nono */
- BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
- writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
- cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_unlock_irqrestore(&dca_lock, flags);
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
+
+ dca_sysfs_remove_provider(dca);
}
+Index: linux-2.6/arch/arm/common/gic.c
+===================================================================
+--- linux-2.6.orig/arch/arm/common/gic.c
++++ linux-2.6/arch/arm/common/gic.c
+@@ -33,7 +33,7 @@
+ #include <asm/mach/irq.h>
+ #include <asm/hardware/gic.h>
- static void l2x0_inv_range(unsigned long start, unsigned long end)
-@@ -170,7 +170,7 @@ static void l2x0_inv_range(unsigned long
- void __iomem *base = l2x0_base;
- unsigned long flags;
+-static DEFINE_SPINLOCK(irq_controller_lock);
++static DEFINE_RAW_SPINLOCK(irq_controller_lock);
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- if (start & (CACHE_LINE_SIZE - 1)) {
- start &= ~(CACHE_LINE_SIZE - 1);
- debug_writel(0x03);
-@@ -195,13 +195,13 @@ static void l2x0_inv_range(unsigned long
- }
+ /* Address of GIC 0 CPU interface */
+ void __iomem *gic_cpu_base_addr __read_mostly;
+@@ -88,30 +88,30 @@ static void gic_mask_irq(struct irq_data
+ {
+ u32 mask = 1 << (d->irq % 32);
- if (blk_end < end) {
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- }
- }
- cache_wait(base + L2X0_INV_LINE_PA, 1);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
+ if (gic_arch_extn.irq_mask)
+ gic_arch_extn.irq_mask(d);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
}
- static void l2x0_clean_range(unsigned long start, unsigned long end)
-@@ -214,7 +214,7 @@ static void l2x0_clean_range(unsigned lo
- return;
- }
-
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- start &= ~(CACHE_LINE_SIZE - 1);
- while (start < end) {
- unsigned long blk_end = start + min(end - start, 4096UL);
-@@ -225,13 +225,13 @@ static void l2x0_clean_range(unsigned lo
- }
+ static void gic_unmask_irq(struct irq_data *d)
+ {
+ u32 mask = 1 << (d->irq % 32);
- if (blk_end < end) {
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- }
- }
- cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+ if (gic_arch_extn.irq_unmask)
+ gic_arch_extn.irq_unmask(d);
+ writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
}
- static void l2x0_flush_range(unsigned long start, unsigned long end)
-@@ -244,7 +244,7 @@ static void l2x0_flush_range(unsigned lo
- return;
+ static void gic_eoi_irq(struct irq_data *d)
+ {
+ if (gic_arch_extn.irq_eoi) {
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+ gic_arch_extn.irq_eoi(d);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
}
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- start &= ~(CACHE_LINE_SIZE - 1);
- while (start < end) {
- unsigned long blk_end = start + min(end - start, 4096UL);
-@@ -257,24 +257,24 @@ static void l2x0_flush_range(unsigned lo
- debug_writel(0x00);
+ writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+@@ -135,7 +135,7 @@ static int gic_set_type(struct irq_data
+ if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
- if (blk_end < end) {
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- }
- }
- cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
- cache_sync();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+
+ if (gic_arch_extn.irq_set_type)
+ gic_arch_extn.irq_set_type(d, type);
+@@ -160,7 +160,7 @@ static int gic_set_type(struct irq_data
+ if (enabled)
+ writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
+
+ return 0;
}
+@@ -188,11 +188,11 @@ static int gic_set_affinity(struct irq_d
+ mask = 0xff << shift;
+ bit = 1 << (cpu + shift);
- static void l2x0_disable(void)
- {
- unsigned long flags;
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+ d->node = cpu;
+ val = readl_relaxed(reg) & ~mask;
+ writel_relaxed(val | bit, reg);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
-- spin_lock_irqsave(&l2x0_lock, flags);
-+ raw_spin_lock_irqsave(&l2x0_lock, flags);
- __l2x0_flush_all();
- writel_relaxed(0, l2x0_base + L2X0_CTRL);
- dsb();
-- spin_unlock_irqrestore(&l2x0_lock, flags);
-+ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ return 0;
}
+@@ -222,9 +222,9 @@ static void gic_handle_cascade_irq(unsig
- void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
-Index: linux-2.6/arch/arm/mm/context.c
+ chained_irq_enter(chip, desc);
+
+- spin_lock(&irq_controller_lock);
++ raw_spin_lock(&irq_controller_lock);
+ status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
+- spin_unlock(&irq_controller_lock);
++ raw_spin_unlock(&irq_controller_lock);
+
+ gic_irq = (status & 0x3ff);
+ if (gic_irq == 1023)
+Index: linux-2.6/arch/arm/include/asm/dma.h
===================================================================
---- linux-2.6.orig/arch/arm/mm/context.c
-+++ linux-2.6/arch/arm/mm/context.c
-@@ -16,7 +16,7 @@
- #include <asm/mmu_context.h>
- #include <asm/tlbflush.h>
+--- linux-2.6.orig/arch/arm/include/asm/dma.h
++++ linux-2.6/arch/arm/include/asm/dma.h
+@@ -33,18 +33,18 @@
+ #define DMA_MODE_CASCADE 0xc0
+ #define DMA_AUTOINIT 0x10
--static DEFINE_SPINLOCK(cpu_asid_lock);
-+static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
- unsigned int cpu_last_asid = ASID_FIRST_VERSION;
- #ifdef CONFIG_SMP
- DEFINE_PER_CPU(struct mm_struct *, current_mm);
-@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, curre
- void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+-extern spinlock_t dma_spin_lock;
++extern raw_spinlock_t dma_spin_lock;
+
+ static inline unsigned long claim_dma_lock(void)
{
- mm->context.id = 0;
-- spin_lock_init(&mm->context.id_lock);
-+ raw_spin_lock_init(&mm->context.id_lock);
+ unsigned long flags;
+- spin_lock_irqsave(&dma_spin_lock, flags);
++ raw_spin_lock_irqsave(&dma_spin_lock, flags);
+ return flags;
}
- static void flush_context(void)
-@@ -58,7 +58,7 @@ static void set_mm_context(struct mm_str
- * the broadcast. This function is also called via IPI so the
- * mm->context.id_lock has to be IRQ-safe.
- */
-- spin_lock_irqsave(&mm->context.id_lock, flags);
-+ raw_spin_lock_irqsave(&mm->context.id_lock, flags);
- if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
- /*
- * Old version of ASID found. Set the new one and
-@@ -67,7 +67,7 @@ static void set_mm_context(struct mm_str
- mm->context.id = asid;
- cpumask_clear(mm_cpumask(mm));
- }
-- spin_unlock_irqrestore(&mm->context.id_lock, flags);
-+ raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
-
- /*
- * Set the mm_cpumask(mm) bit for the current CPU.
-@@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm)
+ static inline void release_dma_lock(unsigned long flags)
{
- unsigned int asid;
+- spin_unlock_irqrestore(&dma_spin_lock, flags);
++ raw_spin_unlock_irqrestore(&dma_spin_lock, flags);
+ }
-- spin_lock(&cpu_asid_lock);
-+ raw_spin_lock(&cpu_asid_lock);
- #ifdef CONFIG_SMP
- /*
- * Check the ASID again, in case the change was broadcast from
-@@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm)
- */
- if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-- spin_unlock(&cpu_asid_lock);
-+ raw_spin_unlock(&cpu_asid_lock);
- return;
- }
+ /* Clear the 'DMA Pointer Flip Flop'.
+Index: linux-2.6/arch/arm/include/asm/mmu.h
+===================================================================
+--- linux-2.6.orig/arch/arm/include/asm/mmu.h
++++ linux-2.6/arch/arm/include/asm/mmu.h
+@@ -6,7 +6,7 @@
+ typedef struct {
+ #ifdef CONFIG_CPU_HAS_ASID
+ unsigned int id;
+- spinlock_t id_lock;
++ raw_spinlock_t id_lock;
#endif
-@@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm)
- }
+ unsigned int kvm_seq;
+ } mm_context_t;
+@@ -16,7 +16,7 @@ typedef struct {
- set_mm_context(mm, asid);
-- spin_unlock(&cpu_asid_lock);
-+ raw_spin_unlock(&cpu_asid_lock);
- }
-Index: linux-2.6/arch/arm/mm/copypage-v4mc.c
+ /* init_mm.context.id_lock should be initialized. */
+ #define INIT_MM_CONTEXT(name) \
+- .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
++ .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
+ #else
+ #define ASID(mm) (0)
+ #endif
+Index: linux-2.6/arch/arm/kernel/dma.c
===================================================================
---- linux-2.6.orig/arch/arm/mm/copypage-v4mc.c
-+++ linux-2.6/arch/arm/mm/copypage-v4mc.c
-@@ -30,7 +30,7 @@
- #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
- L_PTE_MT_MINICACHE)
+--- linux-2.6.orig/arch/arm/kernel/dma.c
++++ linux-2.6/arch/arm/kernel/dma.c
+@@ -23,7 +23,7 @@
--static DEFINE_SPINLOCK(minicache_lock);
-+static DEFINE_RAW_SPINLOCK(minicache_lock);
+ #include <asm/mach/dma.h>
+
+-DEFINE_SPINLOCK(dma_spin_lock);
++DEFINE_RAW_SPINLOCK(dma_spin_lock);
+ EXPORT_SYMBOL(dma_spin_lock);
+
+ static dma_t *dma_chan[MAX_DMA_CHANNELS];
+Index: linux-2.6/arch/arm/kernel/traps.c
+===================================================================
+--- linux-2.6.orig/arch/arm/kernel/traps.c
++++ linux-2.6/arch/arm/kernel/traps.c
+@@ -255,7 +255,7 @@ static int __die(const char *str, int er
+ return ret;
+ }
+
+-static DEFINE_SPINLOCK(die_lock);
++static DEFINE_RAW_SPINLOCK(die_lock);
/*
- * ARMv4 mini-dcache optimised copy_user_highpage
-@@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct pag
- if (!test_and_set_bit(PG_dcache_clean, &from->flags))
- __flush_dcache_page(page_mapping(from), from);
+ * This function is protected against re-entrancy.
+@@ -267,7 +267,7 @@ void die(const char *str, struct pt_regs
-- spin_lock(&minicache_lock);
-+ raw_spin_lock(&minicache_lock);
+ oops_enter();
- set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
- flush_tlb_kernel_page(0xffff8000);
+- spin_lock_irq(&die_lock);
++ raw_spin_lock_irq(&die_lock);
+ console_verbose();
+ bust_spinlocks(1);
+ ret = __die(str, err, thread, regs);
+@@ -277,7 +277,7 @@ void die(const char *str, struct pt_regs
- mc_copy_user_page((void *)0xffff8000, kto);
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE);
+- spin_unlock_irq(&die_lock);
++ raw_spin_unlock_irq(&die_lock);
+ oops_exit();
-- spin_unlock(&minicache_lock);
-+ raw_spin_unlock(&minicache_lock);
+ if (in_interrupt())
+@@ -302,24 +302,24 @@ void arm_notify_die(const char *str, str
+ }
+
+ static LIST_HEAD(undef_hook);
+-static DEFINE_SPINLOCK(undef_lock);
++static DEFINE_RAW_SPINLOCK(undef_lock);
+
+ void register_undef_hook(struct undef_hook *hook)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&undef_lock, flags);
++ raw_spin_lock_irqsave(&undef_lock, flags);
+ list_add(&hook->node, &undef_hook);
+- spin_unlock_irqrestore(&undef_lock, flags);
++ raw_spin_unlock_irqrestore(&undef_lock, flags);
+ }
+
+ void unregister_undef_hook(struct undef_hook *hook)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&undef_lock, flags);
++ raw_spin_lock_irqsave(&undef_lock, flags);
+ list_del(&hook->node);
+- spin_unlock_irqrestore(&undef_lock, flags);
++ raw_spin_unlock_irqrestore(&undef_lock, flags);
+ }
+
+ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+@@ -328,12 +328,12 @@ static int call_undef_hook(struct pt_reg
+ unsigned long flags;
+ int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
+
+- spin_lock_irqsave(&undef_lock, flags);
++ raw_spin_lock_irqsave(&undef_lock, flags);
+ list_for_each_entry(hook, &undef_hook, node)
+ if ((instr & hook->instr_mask) == hook->instr_val &&
+ (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
+ fn = hook->fn;
+- spin_unlock_irqrestore(&undef_lock, flags);
++ raw_spin_unlock_irqrestore(&undef_lock, flags);
- kunmap_atomic(kto, KM_USER1);
+ return fn ? fn(regs, instr) : 1;
}
-Index: linux-2.6/arch/arm/mm/copypage-v6.c
+Index: linux-2.6/arch/arm/mach-footbridge/include/mach/hardware.h
===================================================================
---- linux-2.6.orig/arch/arm/mm/copypage-v6.c
-+++ linux-2.6/arch/arm/mm/copypage-v6.c
-@@ -27,7 +27,7 @@
- #define from_address (0xffff8000)
- #define to_address (0xffffc000)
-
--static DEFINE_SPINLOCK(v6_lock);
-+static DEFINE_RAW_SPINLOCK(v6_lock);
+--- linux-2.6.orig/arch/arm/mach-footbridge/include/mach/hardware.h
++++ linux-2.6/arch/arm/mach-footbridge/include/mach/hardware.h
+@@ -93,7 +93,7 @@
+ #define CPLD_FLASH_WR_ENABLE 1
+ #ifndef __ASSEMBLY__
+-extern spinlock_t nw_gpio_lock;
++extern raw_spinlock_t nw_gpio_lock;
+ extern void nw_gpio_modify_op(unsigned int mask, unsigned int set);
+ extern void nw_gpio_modify_io(unsigned int mask, unsigned int in);
+ extern unsigned int nw_gpio_read(void);
+Index: linux-2.6/arch/arm/mach-footbridge/netwinder-hw.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-footbridge/netwinder-hw.c
++++ linux-2.6/arch/arm/mach-footbridge/netwinder-hw.c
+@@ -68,7 +68,7 @@ static inline void wb977_ww(int reg, int
/*
- * Copy the user page. No aliasing to deal with so we can just
-@@ -89,7 +89,7 @@ static void v6_copy_user_highpage_aliasi
- * Now copy the page using the same cache colour as the
- * pages ultimate destination.
+ * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE
+ */
+-DEFINE_SPINLOCK(nw_gpio_lock);
++DEFINE_RAW_SPINLOCK(nw_gpio_lock);
+ EXPORT_SYMBOL(nw_gpio_lock);
+
+ static unsigned int current_gpio_op;
+@@ -327,9 +327,9 @@ static inline void wb977_init_gpio(void)
+ /*
+ * Set Group1/Group2 outputs
*/
-- spin_lock(&v6_lock);
-+ raw_spin_lock(&v6_lock);
+- spin_lock_irqsave(&nw_gpio_lock, flags);
++ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN);
+- spin_unlock_irqrestore(&nw_gpio_lock, flags);
++ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ }
- set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
-@@ -102,7 +102,7 @@ static void v6_copy_user_highpage_aliasi
+ /*
+@@ -390,9 +390,9 @@ static void __init cpld_init(void)
+ {
+ unsigned long flags;
- copy_page((void *)kto, (void *)kfrom);
+- spin_lock_irqsave(&nw_gpio_lock, flags);
++ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE);
+- spin_unlock_irqrestore(&nw_gpio_lock, flags);
++ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ }
-- spin_unlock(&v6_lock);
-+ raw_spin_unlock(&v6_lock);
+ static unsigned char rwa_unlock[] __initdata =
+@@ -616,9 +616,9 @@ static int __init nw_hw_init(void)
+ cpld_init();
+ rwa010_init();
+
+- spin_lock_irqsave(&nw_gpio_lock, flags);
++ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS);
+- spin_unlock_irqrestore(&nw_gpio_lock, flags);
++ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ }
+ return 0;
}
+Index: linux-2.6/arch/arm/mach-footbridge/netwinder-leds.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-footbridge/netwinder-leds.c
++++ linux-2.6/arch/arm/mach-footbridge/netwinder-leds.c
+@@ -31,13 +31,13 @@
+ static char led_state;
+ static char hw_led_state;
- /*
-@@ -122,13 +122,13 @@ static void v6_clear_user_highpage_alias
- * Now clear the page using the same cache colour as
- * the pages ultimate destination.
- */
-- spin_lock(&v6_lock);
-+ raw_spin_lock(&v6_lock);
+-static DEFINE_SPINLOCK(leds_lock);
++static DEFINE_RAW_SPINLOCK(leds_lock);
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
- flush_tlb_kernel_page(to);
- clear_page((void *)to);
+ static void netwinder_leds_event(led_event_t evt)
+ {
+ unsigned long flags;
-- spin_unlock(&v6_lock);
-+ raw_spin_unlock(&v6_lock);
- }
+- spin_lock_irqsave(&leds_lock, flags);
++ raw_spin_lock_irqsave(&leds_lock, flags);
- struct cpu_user_fns v6_user_fns __initdata = {
-Index: linux-2.6/arch/arm/mm/copypage-xscale.c
-===================================================================
---- linux-2.6.orig/arch/arm/mm/copypage-xscale.c
-+++ linux-2.6/arch/arm/mm/copypage-xscale.c
-@@ -32,7 +32,7 @@
- #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
- L_PTE_MT_MINICACHE)
+ switch (evt) {
+ case led_start:
+@@ -117,12 +117,12 @@ static void netwinder_leds_event(led_eve
+ break;
+ }
--static DEFINE_SPINLOCK(minicache_lock);
-+static DEFINE_RAW_SPINLOCK(minicache_lock);
+- spin_unlock_irqrestore(&leds_lock, flags);
++ raw_spin_unlock_irqrestore(&leds_lock, flags);
- /*
- * XScale mini-dcache optimised copy_user_highpage
-@@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct
- if (!test_and_set_bit(PG_dcache_clean, &from->flags))
- __flush_dcache_page(page_mapping(from), from);
+ if (led_state & LED_STATE_ENABLED) {
+- spin_lock_irqsave(&nw_gpio_lock, flags);
++ raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state);
+- spin_unlock_irqrestore(&nw_gpio_lock, flags);
++ raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ }
+ }
-- spin_lock(&minicache_lock);
-+ raw_spin_lock(&minicache_lock);
+Index: linux-2.6/arch/arm/mach-integrator/core.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-integrator/core.c
++++ linux-2.6/arch/arm/mach-integrator/core.c
+@@ -205,7 +205,7 @@ static struct amba_pl010_data integrator
- set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
- flush_tlb_kernel_page(COPYPAGE_MINICACHE);
+ #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL)
- mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
+-static DEFINE_SPINLOCK(cm_lock);
++static DEFINE_RAW_SPINLOCK(cm_lock);
-- spin_unlock(&minicache_lock);
-+ raw_spin_unlock(&minicache_lock);
+ /**
+ * cm_control - update the CM_CTRL register.
+@@ -217,10 +217,10 @@ void cm_control(u32 mask, u32 set)
+ unsigned long flags;
+ u32 val;
- kunmap_atomic(kto, KM_USER1);
+- spin_lock_irqsave(&cm_lock, flags);
++ raw_spin_lock_irqsave(&cm_lock, flags);
+ val = readl(CM_CTRL) & ~mask;
+ writel(val | set, CM_CTRL);
+- spin_unlock_irqrestore(&cm_lock, flags);
++ raw_spin_unlock_irqrestore(&cm_lock, flags);
}
-Index: linux-2.6/drivers/dma/ipu/ipu_irq.c
+
+ EXPORT_SYMBOL(cm_control);
+Index: linux-2.6/arch/arm/mach-integrator/pci_v3.c
===================================================================
---- linux-2.6.orig/drivers/dma/ipu/ipu_irq.c
-+++ linux-2.6/drivers/dma/ipu/ipu_irq.c
-@@ -81,7 +81,7 @@ static struct ipu_irq_map irq_map[CONFIG
- /* Protects allocations from the above array of maps */
- static DEFINE_MUTEX(map_lock);
- /* Protects register accesses and individual mappings */
--static DEFINE_SPINLOCK(bank_lock);
-+static DEFINE_RAW_SPINLOCK(bank_lock);
+--- linux-2.6.orig/arch/arm/mach-integrator/pci_v3.c
++++ linux-2.6/arch/arm/mach-integrator/pci_v3.c
+@@ -163,7 +163,7 @@
+ * 7:2 register number
+ *
+ */
+-static DEFINE_SPINLOCK(v3_lock);
++static DEFINE_RAW_SPINLOCK(v3_lock);
- static struct ipu_irq_map *src2map(unsigned int src)
- {
-@@ -101,11 +101,11 @@ static void ipu_irq_unmask(struct irq_da
- uint32_t reg;
- unsigned long lock_flags;
+ #define PCI_BUS_NONMEM_START 0x00000000
+ #define PCI_BUS_NONMEM_SIZE SZ_256M
+@@ -284,7 +284,7 @@ static int v3_read_config(struct pci_bus
+ unsigned long flags;
+ u32 v;
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+- spin_lock_irqsave(&v3_lock, flags);
++ raw_spin_lock_irqsave(&v3_lock, flags);
+ addr = v3_open_config_window(bus, devfn, where);
- bank = map->bank;
- if (!bank) {
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
+ switch (size) {
+@@ -302,7 +302,7 @@ static int v3_read_config(struct pci_bus
}
-@@ -114,7 +114,7 @@ static void ipu_irq_unmask(struct irq_da
- reg |= (1UL << (map->source & 31));
- ipu_write_reg(bank->ipu, reg, bank->control);
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- }
+ v3_close_config_window();
+- spin_unlock_irqrestore(&v3_lock, flags);
++ raw_spin_unlock_irqrestore(&v3_lock, flags);
- static void ipu_irq_mask(struct irq_data *d)
-@@ -124,11 +124,11 @@ static void ipu_irq_mask(struct irq_data
- uint32_t reg;
- unsigned long lock_flags;
+ *val = v;
+ return PCIBIOS_SUCCESSFUL;
+@@ -314,7 +314,7 @@ static int v3_write_config(struct pci_bu
+ unsigned long addr;
+ unsigned long flags;
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+- spin_lock_irqsave(&v3_lock, flags);
++ raw_spin_lock_irqsave(&v3_lock, flags);
+ addr = v3_open_config_window(bus, devfn, where);
- bank = map->bank;
- if (!bank) {
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
+ switch (size) {
+@@ -335,7 +335,7 @@ static int v3_write_config(struct pci_bu
}
-@@ -137,7 +137,7 @@ static void ipu_irq_mask(struct irq_data
- reg &= ~(1UL << (map->source & 31));
- ipu_write_reg(bank->ipu, reg, bank->control);
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ v3_close_config_window();
+- spin_unlock_irqrestore(&v3_lock, flags);
++ raw_spin_unlock_irqrestore(&v3_lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+@@ -510,7 +510,7 @@ void __init pci_v3_preinit(void)
+ hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
+ hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
+
+- spin_lock_irqsave(&v3_lock, flags);
++ raw_spin_lock_irqsave(&v3_lock, flags);
+
+ /*
+ * Unlock V3 registers, but only if they were previously locked.
+@@ -583,7 +583,7 @@ void __init pci_v3_preinit(void)
+ printk(KERN_ERR "PCI: unable to grab PCI error "
+ "interrupt: %d\n", ret);
+
+- spin_unlock_irqrestore(&v3_lock, flags);
++ raw_spin_unlock_irqrestore(&v3_lock, flags);
}
- static void ipu_irq_ack(struct irq_data *d)
-@@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data
- struct ipu_irq_bank *bank;
- unsigned long lock_flags;
-
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+ void __init pci_v3_postinit(void)
+Index: linux-2.6/arch/arm/mach-ixp4xx/common-pci.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mach-ixp4xx/common-pci.c
++++ linux-2.6/arch/arm/mach-ixp4xx/common-pci.c
+@@ -54,7 +54,7 @@ unsigned long ixp4xx_pci_reg_base = 0;
+ * these transactions are atomic or we will end up
+ * with corrupt data on the bus or in a driver.
+ */
+-static DEFINE_SPINLOCK(ixp4xx_pci_lock);
++static DEFINE_RAW_SPINLOCK(ixp4xx_pci_lock);
- bank = map->bank;
- if (!bank) {
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
+ /*
+ * Read from PCI config space
+@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(ixp4xx_pci_lock);
+ static void crp_read(u32 ad_cbe, u32 *data)
+ {
+ unsigned long flags;
+- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
++ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ *PCI_CRP_AD_CBE = ad_cbe;
+ *data = *PCI_CRP_RDATA;
+- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
++ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ }
- ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ /*
+@@ -74,10 +74,10 @@ static void crp_read(u32 ad_cbe, u32 *da
+ static void crp_write(u32 ad_cbe, u32 data)
+ {
+ unsigned long flags;
+- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
++ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+ *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe;
+ *PCI_CRP_WDATA = data;
+- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
++ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
}
- /**
-@@ -172,11 +172,11 @@ bool ipu_irq_status(unsigned int irq)
- unsigned long lock_flags;
- bool ret;
+ static inline int check_master_abort(void)
+@@ -101,7 +101,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32
+ int retval = 0;
+ int i;
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
- bank = map->bank;
- ret = bank && ipu_read_reg(bank->ipu, bank->status) &
- (1UL << (map->source & 31));
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
++ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
- return ret;
- }
-@@ -213,10 +213,10 @@ int ipu_irq_map(unsigned int source)
- if (irq_map[i].source < 0) {
- unsigned long lock_flags;
+ *PCI_NP_AD = addr;
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
- irq_map[i].source = source;
- irq_map[i].bank = irq_bank + source / 32;
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+@@ -118,7 +118,7 @@ int ixp4xx_pci_read_errata(u32 addr, u32
+ if(check_master_abort())
+ retval = 1;
- ret = irq_map[i].irq;
- pr_debug("IPU: mapped source %u to IRQ %u\n",
-@@ -252,10 +252,10 @@ int ipu_irq_unmap(unsigned int source)
- pr_debug("IPU: unmapped source %u from IRQ %u\n",
- source, irq_map[i].irq);
+- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
++ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ return retval;
+ }
-- spin_lock_irqsave(&bank_lock, lock_flags);
-+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
- irq_map[i].source = -EINVAL;
- irq_map[i].bank = NULL;
-- spin_unlock_irqrestore(&bank_lock, lock_flags);
-+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+@@ -127,7 +127,7 @@ int ixp4xx_pci_read_no_errata(u32 addr,
+ unsigned long flags;
+ int retval = 0;
- ret = 0;
- break;
-@@ -276,7 +276,7 @@ static void ipu_irq_err(unsigned int irq
- for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
- struct ipu_irq_bank *bank = irq_bank + i;
+- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
++ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
-- spin_lock(&bank_lock);
-+ raw_spin_lock(&bank_lock);
- status = ipu_read_reg(ipu, bank->status);
- /*
- * Don't think we have to clear all interrupts here, they will
-@@ -284,18 +284,18 @@ static void ipu_irq_err(unsigned int irq
- * might want to clear unhandled interrupts after the loop...
- */
- status &= ipu_read_reg(ipu, bank->control);
-- spin_unlock(&bank_lock);
-+ raw_spin_unlock(&bank_lock);
- while ((line = ffs(status))) {
- struct ipu_irq_map *map;
+ *PCI_NP_AD = addr;
- line--;
- status &= ~(1UL << line);
+@@ -140,7 +140,7 @@ int ixp4xx_pci_read_no_errata(u32 addr,
+ if(check_master_abort())
+ retval = 1;
-- spin_lock(&bank_lock);
-+ raw_spin_lock(&bank_lock);
- map = src2map(32 * i + line);
- if (map)
- irq = map->irq;
-- spin_unlock(&bank_lock);
-+ raw_spin_unlock(&bank_lock);
+- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
++ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ return retval;
+ }
- if (!map) {
- pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
-@@ -317,22 +317,22 @@ static void ipu_irq_fn(unsigned int irq,
- for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
- struct ipu_irq_bank *bank = irq_bank + i;
+@@ -149,7 +149,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd,
+ unsigned long flags;
+ int retval = 0;
-- spin_lock(&bank_lock);
-+ raw_spin_lock(&bank_lock);
- status = ipu_read_reg(ipu, bank->status);
- /* Not clearing all interrupts, see above */
- status &= ipu_read_reg(ipu, bank->control);
-- spin_unlock(&bank_lock);
-+ raw_spin_unlock(&bank_lock);
- while ((line = ffs(status))) {
- struct ipu_irq_map *map;
+- spin_lock_irqsave(&ixp4xx_pci_lock, flags);
++ raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags);
- line--;
- status &= ~(1UL << line);
+ *PCI_NP_AD = addr;
-- spin_lock(&bank_lock);
-+ raw_spin_lock(&bank_lock);
- map = src2map(32 * i + line);
- if (map)
- irq = map->irq;
-- spin_unlock(&bank_lock);
-+ raw_spin_unlock(&bank_lock);
+@@ -162,7 +162,7 @@ int ixp4xx_pci_write(u32 addr, u32 cmd,
+ if(check_master_abort())
+ retval = 1;
- if (!map) {
- pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
-Index: linux-2.6/drivers/pci/dmar.c
+- spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
++ raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+ return retval;
+ }
+
+Index: linux-2.6/arch/arm/mach-shark/leds.c
===================================================================
---- linux-2.6.orig/drivers/pci/dmar.c
-+++ linux-2.6/drivers/pci/dmar.c
-@@ -800,7 +800,7 @@ int alloc_iommu(struct dmar_drhd_unit *d
- (unsigned long long)iommu->cap,
- (unsigned long long)iommu->ecap);
+--- linux-2.6.orig/arch/arm/mach-shark/leds.c
++++ linux-2.6/arch/arm/mach-shark/leds.c
+@@ -36,7 +36,7 @@ static char led_state;
+ static short hw_led_state;
+ static short saved_state;
-- spin_lock_init(&iommu->register_lock);
-+ raw_spin_lock_init(&iommu->register_lock);
+-static DEFINE_SPINLOCK(leds_lock);
++static DEFINE_RAW_SPINLOCK(leds_lock);
- drhd->iommu = iommu;
- return 0;
-@@ -921,11 +921,11 @@ int qi_submit_sync(struct qi_desc *desc,
- restart:
- rc = 0;
+ short sequoia_read(int addr) {
+ outw(addr,0x24);
+@@ -52,7 +52,7 @@ static void sequoia_leds_event(led_event
+ {
+ unsigned long flags;
-- spin_lock_irqsave(&qi->q_lock, flags);
-+ raw_spin_lock_irqsave(&qi->q_lock, flags);
- while (qi->free_cnt < 3) {
-- spin_unlock_irqrestore(&qi->q_lock, flags);
-+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
- cpu_relax();
-- spin_lock_irqsave(&qi->q_lock, flags);
-+ raw_spin_lock_irqsave(&qi->q_lock, flags);
- }
+- spin_lock_irqsave(&leds_lock, flags);
++ raw_spin_lock_irqsave(&leds_lock, flags);
- index = qi->free_head;
-@@ -965,15 +965,15 @@ int qi_submit_sync(struct qi_desc *desc,
- if (rc)
- break;
+ hw_led_state = sequoia_read(0x09);
-- spin_unlock(&qi->q_lock);
-+ raw_spin_unlock(&qi->q_lock);
- cpu_relax();
-- spin_lock(&qi->q_lock);
-+ raw_spin_lock(&qi->q_lock);
- }
+@@ -144,7 +144,7 @@ static void sequoia_leds_event(led_event
+ if (led_state & LED_STATE_ENABLED)
+ sequoia_write(hw_led_state,0x09);
- qi->desc_status[index] = QI_DONE;
+- spin_unlock_irqrestore(&leds_lock, flags);
++ raw_spin_unlock_irqrestore(&leds_lock, flags);
+ }
- reclaim_free_desc(qi);
-- spin_unlock_irqrestore(&qi->q_lock, flags);
-+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
+ static int __init leds_init(void)
+Index: linux-2.6/arch/arm/mm/cache-l2x0.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mm/cache-l2x0.c
++++ linux-2.6/arch/arm/mm/cache-l2x0.c
+@@ -26,7 +26,7 @@
+ #define CACHE_LINE_SIZE 32
- if (rc == -EAGAIN)
- goto restart;
-@@ -1062,7 +1062,7 @@ void dmar_disable_qi(struct intel_iommu
- if (!ecap_qis(iommu->ecap))
- return;
+ static void __iomem *l2x0_base;
+-static DEFINE_SPINLOCK(l2x0_lock);
++static DEFINE_RAW_SPINLOCK(l2x0_lock);
+ static uint32_t l2x0_way_mask; /* Bitmask of active ways */
+ static uint32_t l2x0_size;
-- spin_lock_irqsave(&iommu->register_lock, flags);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+@@ -115,9 +115,9 @@ static void l2x0_cache_sync(void)
+ {
+ unsigned long flags;
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
- if (!(sts & DMA_GSTS_QIES))
-@@ -1082,7 +1082,7 @@ void dmar_disable_qi(struct intel_iommu
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
- !(sts & DMA_GSTS_QIES), sts);
- end:
-- spin_unlock_irqrestore(&iommu->register_lock, flags);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
- /*
-@@ -1097,7 +1097,7 @@ static void __dmar_enable_qi(struct inte
- qi->free_head = qi->free_tail = 0;
- qi->free_cnt = QI_LENGTH;
+ static void __l2x0_flush_all(void)
+@@ -134,9 +134,9 @@ static void l2x0_flush_all(void)
+ unsigned long flags;
-- spin_lock_irqsave(&iommu->register_lock, flags);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ /* clean all ways */
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ __l2x0_flush_all();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
- /* write zero to the tail reg */
- writel(0, iommu->reg + DMAR_IQT_REG);
-@@ -1110,7 +1110,7 @@ static void __dmar_enable_qi(struct inte
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
+ static void l2x0_clean_all(void)
+@@ -144,11 +144,11 @@ static void l2x0_clean_all(void)
+ unsigned long flags;
-- spin_unlock_irqrestore(&iommu->register_lock, flags);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ /* clean all ways */
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
+ cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
- /*
-@@ -1159,7 +1159,7 @@ int dmar_enable_qi(struct intel_iommu *i
- qi->free_head = qi->free_tail = 0;
- qi->free_cnt = QI_LENGTH;
+ static void l2x0_inv_all(void)
+@@ -156,13 +156,13 @@ static void l2x0_inv_all(void)
+ unsigned long flags;
-- spin_lock_init(&qi->q_lock);
-+ raw_spin_lock_init(&qi->q_lock);
+ /* invalidate all ways */
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ /* Invalidating when L2 is enabled is a nono */
+ BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
+ writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+ cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
+ }
- __dmar_enable_qi(iommu);
+ static void l2x0_inv_range(unsigned long start, unsigned long end)
+@@ -170,7 +170,7 @@ static void l2x0_inv_range(unsigned long
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
-@@ -1225,11 +1225,11 @@ void dmar_msi_unmask(struct irq_data *da
- unsigned long flag;
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ start &= ~(CACHE_LINE_SIZE - 1);
+ debug_writel(0x03);
+@@ -195,13 +195,13 @@ static void l2x0_inv_range(unsigned long
+ }
- /* unmask it */
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(0, iommu->reg + DMAR_FECTL_REG);
- /* Read a reg to force flush the post write */
- readl(iommu->reg + DMAR_FECTL_REG);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ if (blk_end < end) {
+- spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_INV_LINE_PA, 1);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
- void dmar_msi_mask(struct irq_data *data)
-@@ -1238,11 +1238,11 @@ void dmar_msi_mask(struct irq_data *data
- struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
+ static void l2x0_clean_range(unsigned long start, unsigned long end)
+@@ -214,7 +214,7 @@ static void l2x0_clean_range(unsigned lo
+ return;
+ }
- /* mask it */
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
- /* Read a reg to force flush the post write */
- readl(iommu->reg + DMAR_FECTL_REG);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ start &= ~(CACHE_LINE_SIZE - 1);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+@@ -225,13 +225,13 @@ static void l2x0_clean_range(unsigned lo
+ }
+
+ if (blk_end < end) {
+- spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
- void dmar_msi_write(int irq, struct msi_msg *msg)
-@@ -1250,11 +1250,11 @@ void dmar_msi_write(int irq, struct msi_
- struct intel_iommu *iommu = irq_get_handler_data(irq);
- unsigned long flag;
+ static void l2x0_flush_range(unsigned long start, unsigned long end)
+@@ -244,7 +244,7 @@ static void l2x0_flush_range(unsigned lo
+ return;
+ }
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
- writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
- writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ start &= ~(CACHE_LINE_SIZE - 1);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+@@ -257,24 +257,24 @@ static void l2x0_flush_range(unsigned lo
+ debug_writel(0x00);
+
+ if (blk_end < end) {
+- spin_unlock_irqrestore(&l2x0_lock, flags);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+ cache_sync();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
- void dmar_msi_read(int irq, struct msi_msg *msg)
-@@ -1262,11 +1262,11 @@ void dmar_msi_read(int irq, struct msi_m
- struct intel_iommu *iommu = irq_get_handler_data(irq);
- unsigned long flag;
+ static void l2x0_disable(void)
+ {
+ unsigned long flags;
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
- msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
- msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+- spin_lock_irqsave(&l2x0_lock, flags);
++ raw_spin_lock_irqsave(&l2x0_lock, flags);
+ __l2x0_flush_all();
+ writel_relaxed(0, l2x0_base + L2X0_CTRL);
+ dsb();
+- spin_unlock_irqrestore(&l2x0_lock, flags);
++ raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
- static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
-@@ -1303,7 +1303,7 @@ irqreturn_t dmar_fault(int irq, void *de
- u32 fault_status;
- unsigned long flag;
-
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- if (fault_status)
- printk(KERN_ERR "DRHD: handling fault status reg %x\n",
-@@ -1342,7 +1342,7 @@ irqreturn_t dmar_fault(int irq, void *de
- writel(DMA_FRCD_F, iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN + 12);
+ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
+Index: linux-2.6/arch/arm/mm/context.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mm/context.c
++++ linux-2.6/arch/arm/mm/context.c
+@@ -16,7 +16,7 @@
+ #include <asm/mmu_context.h>
+ #include <asm/tlbflush.h>
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+-static DEFINE_SPINLOCK(cpu_asid_lock);
++static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
+ #ifdef CONFIG_SMP
+ DEFINE_PER_CPU(struct mm_struct *, current_mm);
+@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, curre
+ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ {
+ mm->context.id = 0;
+- spin_lock_init(&mm->context.id_lock);
++ raw_spin_lock_init(&mm->context.id_lock);
+ }
- dmar_fault_do_one(iommu, type, fault_reason,
- source_id, guest_addr);
-@@ -1350,14 +1350,14 @@ irqreturn_t dmar_fault(int irq, void *de
- fault_index++;
- if (fault_index >= cap_num_fault_regs(iommu->cap))
- fault_index = 0;
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ static void flush_context(void)
+@@ -58,7 +58,7 @@ static void set_mm_context(struct mm_str
+ * the broadcast. This function is also called via IPI so the
+ * mm->context.id_lock has to be IRQ-safe.
+ */
+- spin_lock_irqsave(&mm->context.id_lock, flags);
++ raw_spin_lock_irqsave(&mm->context.id_lock, flags);
+ if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
+ /*
+ * Old version of ASID found. Set the new one and
+@@ -67,7 +67,7 @@ static void set_mm_context(struct mm_str
+ mm->context.id = asid;
+ cpumask_clear(mm_cpumask(mm));
}
- clear_rest:
- /* clear all the other faults */
- fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- writel(fault_status, iommu->reg + DMAR_FSTS_REG);
-
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- return IRQ_HANDLED;
- }
+- spin_unlock_irqrestore(&mm->context.id_lock, flags);
++ raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
-@@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iomm
- return ret;
+ /*
+ * Set the mm_cpumask(mm) bit for the current CPU.
+@@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm)
+ {
+ unsigned int asid;
+
+- spin_lock(&cpu_asid_lock);
++ raw_spin_lock(&cpu_asid_lock);
+ #ifdef CONFIG_SMP
+ /*
+ * Check the ASID again, in case the change was broadcast from
+@@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm)
+ */
+ if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+- spin_unlock(&cpu_asid_lock);
++ raw_spin_unlock(&cpu_asid_lock);
+ return;
+ }
+ #endif
+@@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm)
}
-- ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
-+ ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
- if (ret)
- printk(KERN_ERR "IOMMU: can't request irq\n");
- return ret;
-Index: linux-2.6/drivers/pci/intel-iommu.c
+ set_mm_context(mm, asid);
+- spin_unlock(&cpu_asid_lock);
++ raw_spin_unlock(&cpu_asid_lock);
+ }
+Index: linux-2.6/arch/arm/mm/copypage-v4mc.c
===================================================================
---- linux-2.6.orig/drivers/pci/intel-iommu.c
-+++ linux-2.6/drivers/pci/intel-iommu.c
-@@ -933,7 +933,7 @@ static void iommu_set_root_entry(struct
-
- addr = iommu->root_entry;
+--- linux-2.6.orig/arch/arm/mm/copypage-v4mc.c
++++ linux-2.6/arch/arm/mm/copypage-v4mc.c
+@@ -30,7 +30,7 @@
+ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
+ L_PTE_MT_MINICACHE)
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
+-static DEFINE_SPINLOCK(minicache_lock);
++static DEFINE_RAW_SPINLOCK(minicache_lock);
- writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
-@@ -942,7 +942,7 @@ static void iommu_set_root_entry(struct
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_RTPS), sts);
+ /*
+ * ARMv4 mini-dcache optimised copy_user_highpage
+@@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct pag
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
+ __flush_dcache_page(page_mapping(from), from);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
+- spin_lock(&minicache_lock);
++ raw_spin_lock(&minicache_lock);
- static void iommu_flush_write_buffer(struct intel_iommu *iommu)
-@@ -953,14 +953,14 @@ static void iommu_flush_write_buffer(str
- if (!rwbf_quirk && !cap_rwbf(iommu->cap))
- return;
+ set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
+ flush_tlb_kernel_page(0xffff8000);
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
+ mc_copy_user_page((void *)0xffff8000, kto);
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(val & DMA_GSTS_WBFS)), val);
+- spin_unlock(&minicache_lock);
++ raw_spin_unlock(&minicache_lock);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ kunmap_atomic(kto, KM_USER1);
}
+Index: linux-2.6/arch/arm/mm/copypage-v6.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mm/copypage-v6.c
++++ linux-2.6/arch/arm/mm/copypage-v6.c
+@@ -27,7 +27,7 @@
+ #define from_address (0xffff8000)
+ #define to_address (0xffffc000)
- /* return value determine if we need a write buffer flush */
-@@ -987,14 +987,14 @@ static void __iommu_flush_context(struct
- }
- val |= DMA_CCMD_ICC;
-
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
-
- /* Make sure hardware complete it */
- IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
- dmar_readq, (!(val & DMA_CCMD_ICC)), val);
+-static DEFINE_SPINLOCK(v6_lock);
++static DEFINE_RAW_SPINLOCK(v6_lock);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- }
+ /*
+ * Copy the user page. No aliasing to deal with so we can just
+@@ -89,7 +89,7 @@ static void v6_copy_user_highpage_aliasi
+ * Now copy the page using the same cache colour as the
+ * pages ultimate destination.
+ */
+- spin_lock(&v6_lock);
++ raw_spin_lock(&v6_lock);
- /* return value determine if we need a write buffer flush */
-@@ -1033,7 +1033,7 @@ static void __iommu_flush_iotlb(struct i
- if (cap_write_drain(iommu->cap))
- val |= DMA_TLB_WRITE_DRAIN;
+ set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
+ set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
+@@ -102,7 +102,7 @@ static void v6_copy_user_highpage_aliasi
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- /* Note: Only uses first TLB reg currently */
- if (val_iva)
- dmar_writeq(iommu->reg + tlb_offset, val_iva);
-@@ -1043,7 +1043,7 @@ static void __iommu_flush_iotlb(struct i
- IOMMU_WAIT_OP(iommu, tlb_offset + 8,
- dmar_readq, (!(val & DMA_TLB_IVT)), val);
+ copy_page((void *)kto, (void *)kfrom);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+- spin_unlock(&v6_lock);
++ raw_spin_unlock(&v6_lock);
+ }
- /* check IOTLB invalidation granularity */
- if (DMA_TLB_IAIG(val) == 0)
-@@ -1159,7 +1159,7 @@ static void iommu_disable_protect_mem_re
- u32 pmen;
- unsigned long flags;
+ /*
+@@ -122,13 +122,13 @@ static void v6_clear_user_highpage_alias
+ * Now clear the page using the same cache colour as
+ * the pages ultimate destination.
+ */
+- spin_lock(&v6_lock);
++ raw_spin_lock(&v6_lock);
-- spin_lock_irqsave(&iommu->register_lock, flags);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
- pmen = readl(iommu->reg + DMAR_PMEN_REG);
- pmen &= ~DMA_PMEN_EPM;
- writel(pmen, iommu->reg + DMAR_PMEN_REG);
-@@ -1168,7 +1168,7 @@ static void iommu_disable_protect_mem_re
- IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
- readl, !(pmen & DMA_PMEN_PRS), pmen);
+ set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
+ flush_tlb_kernel_page(to);
+ clear_page((void *)to);
-- spin_unlock_irqrestore(&iommu->register_lock, flags);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+- spin_unlock(&v6_lock);
++ raw_spin_unlock(&v6_lock);
}
- static int iommu_enable_translation(struct intel_iommu *iommu)
-@@ -1176,7 +1176,7 @@ static int iommu_enable_translation(stru
- u32 sts;
- unsigned long flags;
+ struct cpu_user_fns v6_user_fns __initdata = {
+Index: linux-2.6/arch/arm/mm/copypage-xscale.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mm/copypage-xscale.c
++++ linux-2.6/arch/arm/mm/copypage-xscale.c
+@@ -32,7 +32,7 @@
+ #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
+ L_PTE_MT_MINICACHE)
-- spin_lock_irqsave(&iommu->register_lock, flags);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
- iommu->gcmd |= DMA_GCMD_TE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+-static DEFINE_SPINLOCK(minicache_lock);
++static DEFINE_RAW_SPINLOCK(minicache_lock);
-@@ -1184,7 +1184,7 @@ static int iommu_enable_translation(stru
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_TES), sts);
+ /*
+ * XScale mini-dcache optimised copy_user_highpage
+@@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct
+ if (!test_and_set_bit(PG_dcache_clean, &from->flags))
+ __flush_dcache_page(page_mapping(from), from);
-- spin_unlock_irqrestore(&iommu->register_lock, flags);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
- return 0;
- }
+- spin_lock(&minicache_lock);
++ raw_spin_lock(&minicache_lock);
-@@ -1193,7 +1193,7 @@ static int iommu_disable_translation(str
- u32 sts;
- unsigned long flag;
+ set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
+ flush_tlb_kernel_page(COPYPAGE_MINICACHE);
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
- iommu->gcmd &= ~DMA_GCMD_TE;
- writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+ mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
-@@ -1201,7 +1201,7 @@ static int iommu_disable_translation(str
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(sts & DMA_GSTS_TES)), sts);
+- spin_unlock(&minicache_lock);
++ raw_spin_unlock(&minicache_lock);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- return 0;
+ kunmap_atomic(kto, KM_USER1);
}
+Index: linux-2.6/drivers/dma/ipu/ipu_irq.c
+===================================================================
+--- linux-2.6.orig/drivers/dma/ipu/ipu_irq.c
++++ linux-2.6/drivers/dma/ipu/ipu_irq.c
+@@ -81,7 +81,7 @@ static struct ipu_irq_map irq_map[CONFIG
+ /* Protects allocations from the above array of maps */
+ static DEFINE_MUTEX(map_lock);
+ /* Protects register accesses and individual mappings */
+-static DEFINE_SPINLOCK(bank_lock);
++static DEFINE_RAW_SPINLOCK(bank_lock);
-@@ -3321,7 +3321,7 @@ static int iommu_suspend(void)
- for_each_active_iommu(iommu, drhd) {
- iommu_disable_translation(iommu);
-
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ static struct ipu_irq_map *src2map(unsigned int src)
+ {
+@@ -101,11 +101,11 @@ static void ipu_irq_unmask(struct irq_da
+ uint32_t reg;
+ unsigned long lock_flags;
- iommu->iommu_state[SR_DMAR_FECTL_REG] =
- readl(iommu->reg + DMAR_FECTL_REG);
-@@ -3332,7 +3332,7 @@ static int iommu_suspend(void)
- iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
- readl(iommu->reg + DMAR_FEUADDR_REG);
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ bank = map->bank;
+ if (!bank) {
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
+ return;
}
- return 0;
-
-@@ -3359,7 +3359,7 @@ static void iommu_resume(void)
+@@ -114,7 +114,7 @@ static void ipu_irq_unmask(struct irq_da
+ reg |= (1UL << (map->source & 31));
+ ipu_write_reg(bank->ipu, reg, bank->control);
- for_each_active_iommu(iommu, drhd) {
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ }
-- spin_lock_irqsave(&iommu->register_lock, flag);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ static void ipu_irq_mask(struct irq_data *d)
+@@ -124,11 +124,11 @@ static void ipu_irq_mask(struct irq_data
+ uint32_t reg;
+ unsigned long lock_flags;
- writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
- iommu->reg + DMAR_FECTL_REG);
-@@ -3370,7 +3370,7 @@ static void iommu_resume(void)
- writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
- iommu->reg + DMAR_FEUADDR_REG);
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
-- spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ bank = map->bank;
+ if (!bank) {
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
+ return;
}
+@@ -137,7 +137,7 @@ static void ipu_irq_mask(struct irq_data
+ reg &= ~(1UL << (map->source & 31));
+ ipu_write_reg(bank->ipu, reg, bank->control);
- for_each_active_iommu(iommu, drhd)
-Index: linux-2.6/drivers/pci/intr_remapping.c
-===================================================================
---- linux-2.6.orig/drivers/pci/intr_remapping.c
-+++ linux-2.6/drivers/pci/intr_remapping.c
-@@ -46,7 +46,7 @@ static __init int setup_intremap(char *s
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
- early_param("intremap", setup_intremap);
-
--static DEFINE_SPINLOCK(irq_2_ir_lock);
-+static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
- static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
- {
-@@ -63,12 +63,12 @@ int get_irte(int irq, struct irte *entry
- if (!entry || !irq_iommu)
- return -1;
+ static void ipu_irq_ack(struct irq_data *d)
+@@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data
+ struct ipu_irq_bank *bank;
+ unsigned long lock_flags;
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
- index = irq_iommu->irte_index + irq_iommu->sub_handle;
- *entry = *(irq_iommu->iommu->ir_table->base + index);
+ bank = map->bank;
+ if (!bank) {
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
+ pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
+ return;
+ }
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- return 0;
+ ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
-@@ -102,7 +102,7 @@ int alloc_irte(struct intel_iommu *iommu
- return -1;
- }
+ /**
+@@ -172,11 +172,11 @@ bool ipu_irq_status(unsigned int irq)
+ unsigned long lock_flags;
+ bool ret;
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
- do {
- for (i = index; i < index + count; i++)
- if (table->base[i].present)
-@@ -114,7 +114,7 @@ int alloc_irte(struct intel_iommu *iommu
- index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+ bank = map->bank;
+ ret = bank && ipu_read_reg(bank->ipu, bank->status) &
+ (1UL << (map->source & 31));
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- if (index == start_index) {
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- printk(KERN_ERR "can't allocate an IRTE\n");
- return -1;
- }
-@@ -128,7 +128,7 @@ int alloc_irte(struct intel_iommu *iommu
- irq_iommu->sub_handle = 0;
- irq_iommu->irte_mask = mask;
+ return ret;
+ }
+@@ -213,10 +213,10 @@ int ipu_irq_map(unsigned int source)
+ if (irq_map[i].source < 0) {
+ unsigned long lock_flags;
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+ irq_map[i].source = source;
+ irq_map[i].bank = irq_bank + source / 32;
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- return index;
- }
-@@ -153,10 +153,10 @@ int map_irq_to_irte_handle(int irq, u16
- if (!irq_iommu)
- return -1;
+ ret = irq_map[i].irq;
+ pr_debug("IPU: mapped source %u to IRQ %u\n",
+@@ -252,10 +252,10 @@ int ipu_irq_unmap(unsigned int source)
+ pr_debug("IPU: unmapped source %u from IRQ %u\n",
+ source, irq_map[i].irq);
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
- *sub_handle = irq_iommu->sub_handle;
- index = irq_iommu->irte_index;
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
- return index;
- }
+- spin_lock_irqsave(&bank_lock, lock_flags);
++ raw_spin_lock_irqsave(&bank_lock, lock_flags);
+ irq_map[i].source = -EINVAL;
+ irq_map[i].bank = NULL;
+- spin_unlock_irqrestore(&bank_lock, lock_flags);
++ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-@@ -168,14 +168,14 @@ int set_irte_irq(int irq, struct intel_i
- if (!irq_iommu)
- return -1;
+ ret = 0;
+ break;
+@@ -276,7 +276,7 @@ static void ipu_irq_err(unsigned int irq
+ for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
+ struct ipu_irq_bank *bank = irq_bank + i;
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+- spin_lock(&bank_lock);
++ raw_spin_lock(&bank_lock);
+ status = ipu_read_reg(ipu, bank->status);
+ /*
+ * Don't think we have to clear all interrupts here, they will
+@@ -284,18 +284,18 @@ static void ipu_irq_err(unsigned int irq
+ * might want to clear unhandled interrupts after the loop...
+ */
+ status &= ipu_read_reg(ipu, bank->control);
+- spin_unlock(&bank_lock);
++ raw_spin_unlock(&bank_lock);
+ while ((line = ffs(status))) {
+ struct ipu_irq_map *map;
- irq_iommu->iommu = iommu;
- irq_iommu->irte_index = index;
- irq_iommu->sub_handle = subhandle;
- irq_iommu->irte_mask = 0;
+ line--;
+ status &= ~(1UL << line);
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+- spin_lock(&bank_lock);
++ raw_spin_lock(&bank_lock);
+ map = src2map(32 * i + line);
+ if (map)
+ irq = map->irq;
+- spin_unlock(&bank_lock);
++ raw_spin_unlock(&bank_lock);
- return 0;
- }
-@@ -191,7 +191,7 @@ int modify_irte(int irq, struct irte *ir
- if (!irq_iommu)
- return -1;
+ if (!map) {
+ pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
+@@ -317,22 +317,22 @@ static void ipu_irq_fn(unsigned int irq,
+ for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
+ struct ipu_irq_bank *bank = irq_bank + i;
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+- spin_lock(&bank_lock);
++ raw_spin_lock(&bank_lock);
+ status = ipu_read_reg(ipu, bank->status);
+ /* Not clearing all interrupts, see above */
+ status &= ipu_read_reg(ipu, bank->control);
+- spin_unlock(&bank_lock);
++ raw_spin_unlock(&bank_lock);
+ while ((line = ffs(status))) {
+ struct ipu_irq_map *map;
- iommu = irq_iommu->iommu;
+ line--;
+ status &= ~(1UL << line);
-@@ -203,7 +203,7 @@ int modify_irte(int irq, struct irte *ir
- __iommu_flush_cache(iommu, irte, sizeof(*irte));
+- spin_lock(&bank_lock);
++ raw_spin_lock(&bank_lock);
+ map = src2map(32 * i + line);
+ if (map)
+ irq = map->irq;
+- spin_unlock(&bank_lock);
++ raw_spin_unlock(&bank_lock);
- rc = qi_flush_iec(iommu, index, 0);
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ if (!map) {
+ pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
+Index: linux-2.6/drivers/pci/dmar.c
+===================================================================
+--- linux-2.6.orig/drivers/pci/dmar.c
++++ linux-2.6/drivers/pci/dmar.c
+@@ -800,7 +800,7 @@ int alloc_iommu(struct dmar_drhd_unit *d
+ (unsigned long long)iommu->cap,
+ (unsigned long long)iommu->ecap);
- return rc;
- }
-@@ -271,7 +271,7 @@ int free_irte(int irq)
- if (!irq_iommu)
- return -1;
+- spin_lock_init(&iommu->register_lock);
++ raw_spin_lock_init(&iommu->register_lock);
-- spin_lock_irqsave(&irq_2_ir_lock, flags);
-+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ drhd->iommu = iommu;
+ return 0;
+@@ -921,11 +921,11 @@ int qi_submit_sync(struct qi_desc *desc,
+ restart:
+ rc = 0;
- rc = clear_entries(irq_iommu);
+- spin_lock_irqsave(&qi->q_lock, flags);
++ raw_spin_lock_irqsave(&qi->q_lock, flags);
+ while (qi->free_cnt < 3) {
+- spin_unlock_irqrestore(&qi->q_lock, flags);
++ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
+ cpu_relax();
+- spin_lock_irqsave(&qi->q_lock, flags);
++ raw_spin_lock_irqsave(&qi->q_lock, flags);
+ }
-@@ -280,7 +280,7 @@ int free_irte(int irq)
- irq_iommu->sub_handle = 0;
- irq_iommu->irte_mask = 0;
+ index = qi->free_head;
+@@ -965,15 +965,15 @@ restart:
+ if (rc)
+ break;
-- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+- spin_unlock(&qi->q_lock);
++ raw_spin_unlock(&qi->q_lock);
+ cpu_relax();
+- spin_lock(&qi->q_lock);
++ raw_spin_lock(&qi->q_lock);
+ }
- return rc;
- }
-@@ -410,7 +410,7 @@ static void iommu_set_intr_remapping(str
+ qi->desc_status[index] = QI_DONE;
+
+ reclaim_free_desc(qi);
+- spin_unlock_irqrestore(&qi->q_lock, flags);
++ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
- addr = virt_to_phys((void *)iommu->ir_table->base);
+ if (rc == -EAGAIN)
+ goto restart;
+@@ -1062,7 +1062,7 @@ void dmar_disable_qi(struct intel_iommu
+ if (!ecap_qis(iommu->ecap))
+ return;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
- dmar_writeq(iommu->reg + DMAR_IRTA_REG,
- (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
-@@ -421,7 +421,7 @@ static void iommu_set_intr_remapping(str
-
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_IRTPS), sts);
+ sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ if (!(sts & DMA_GSTS_QIES))
+@@ -1082,7 +1082,7 @@ void dmar_disable_qi(struct intel_iommu
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
+ !(sts & DMA_GSTS_QIES), sts);
+ end:
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ }
- /*
- * global invalidation of interrupt entry cache before enabling
-@@ -429,7 +429,7 @@ static void iommu_set_intr_remapping(str
- */
- qi_global_iec(iommu);
+ /*
+@@ -1097,7 +1097,7 @@ static void __dmar_enable_qi(struct inte
+ qi->free_head = qi->free_tail = 0;
+ qi->free_cnt = QI_LENGTH;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
- /* Enable interrupt-remapping */
- iommu->gcmd |= DMA_GCMD_IRE;
-@@ -438,7 +438,7 @@ static void iommu_set_intr_remapping(str
- IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_IRES), sts);
+ /* write zero to the tail reg */
+ writel(0, iommu->reg + DMAR_IQT_REG);
+@@ -1110,7 +1110,7 @@ static void __dmar_enable_qi(struct inte
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
+ /*
+@@ -1159,7 +1159,7 @@ int dmar_enable_qi(struct intel_iommu *i
+ qi->free_head = qi->free_tail = 0;
+ qi->free_cnt = QI_LENGTH;
-@@ -486,7 +486,7 @@ static void iommu_disable_intr_remapping
- */
- qi_global_iec(iommu);
+- spin_lock_init(&qi->q_lock);
++ raw_spin_lock_init(&qi->q_lock);
-- spin_lock_irqsave(&iommu->register_lock, flags);
-+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ __dmar_enable_qi(iommu);
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
- if (!(sts & DMA_GSTS_IRES))
-@@ -499,7 +499,7 @@ static void iommu_disable_intr_remapping
- readl, !(sts & DMA_GSTS_IRES), sts);
+@@ -1225,11 +1225,11 @@ void dmar_msi_unmask(struct irq_data *da
+ unsigned long flag;
- end:
-- spin_unlock_irqrestore(&iommu->register_lock, flags);
-+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ /* unmask it */
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(0, iommu->reg + DMAR_FECTL_REG);
+ /* Read a reg to force flush the post write */
+ readl(iommu->reg + DMAR_FECTL_REG);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
- int __init intr_remapping_supported(void)
-Index: linux-2.6/include/linux/intel-iommu.h
-===================================================================
---- linux-2.6.orig/include/linux/intel-iommu.h
-+++ linux-2.6/include/linux/intel-iommu.h
-@@ -271,7 +271,7 @@ struct qi_desc {
- };
+ void dmar_msi_mask(struct irq_data *data)
+@@ -1238,11 +1238,11 @@ void dmar_msi_mask(struct irq_data *data
+ struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
- struct q_inval {
-- spinlock_t q_lock;
-+ raw_spinlock_t q_lock;
- struct qi_desc *desc; /* invalidation queue */
- int *desc_status; /* desc status */
- int free_head; /* first free entry */
-@@ -311,7 +311,7 @@ struct intel_iommu {
- u64 cap;
- u64 ecap;
- u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
-- spinlock_t register_lock; /* protect register handling */
-+ raw_spinlock_t register_lock; /* protect register handling */
- int seq_id; /* sequence id of the iommu */
- int agaw; /* agaw of this iommu */
- int msagaw; /* max sagaw of this iommu */
-Index: linux-2.6/kernel/signal.c
-===================================================================
---- linux-2.6.orig/kernel/signal.c
-+++ linux-2.6/kernel/signal.c
-@@ -300,13 +300,45 @@ static bool task_participate_group_stop(
- return false;
+ /* mask it */
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
+ /* Read a reg to force flush the post write */
+ readl(iommu->reg + DMAR_FECTL_REG);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-+#ifdef __HAVE_ARCH_CMPXCHG
-+static inline struct sigqueue *get_task_cache(struct task_struct *t)
-+{
-+ struct sigqueue *q = t->sigqueue_cache;
-+
-+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
-+ return NULL;
-+ return q;
-+}
-+
-+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-+{
-+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
-+ return 0;
-+ return 1;
-+}
-+
-+#else
-+
-+static inline struct sigqueue *get_task_cache(struct task_struct *t)
-+{
-+ return NULL;
-+}
-+
-+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-+{
-+ return 1;
-+}
-+
-+#endif
-+
- /*
- * allocate a new signal queue record
- * - this may be called without locks if and only if t == current, otherwise an
- * appropriate lock must be held to stop the target task from exiting
- */
- static struct sigqueue *
--__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
-+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
-+ int override_rlimit, int fromslab)
- {
- struct sigqueue *q = NULL;
- struct user_struct *user;
-@@ -323,7 +355,10 @@ __sigqueue_alloc(int sig, struct task_st
- if (override_rlimit ||
- atomic_read(&user->sigpending) <=
- task_rlimit(t, RLIMIT_SIGPENDING)) {
-- q = kmem_cache_alloc(sigqueue_cachep, flags);
-+ if (!fromslab)
-+ q = get_task_cache(t);
-+ if (!q)
-+ q = kmem_cache_alloc(sigqueue_cachep, flags);
- } else {
- print_dropped_signal(sig);
+ void dmar_msi_write(int irq, struct msi_msg *msg)
+@@ -1250,11 +1250,11 @@ void dmar_msi_write(int irq, struct msi_
+ struct intel_iommu *iommu = irq_get_handler_data(irq);
+ unsigned long flag;
+
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
+ writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
+ writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+
+ void dmar_msi_read(int irq, struct msi_msg *msg)
+@@ -1262,11 +1262,11 @@ void dmar_msi_read(int irq, struct msi_m
+ struct intel_iommu *iommu = irq_get_handler_data(irq);
+ unsigned long flag;
+
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
+ msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
+ msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+
+ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
+@@ -1303,7 +1303,7 @@ irqreturn_t dmar_fault(int irq, void *de
+ u32 fault_status;
+ unsigned long flag;
+
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+ if (fault_status)
+ printk(KERN_ERR "DRHD: handling fault status reg %x\n",
+@@ -1342,7 +1342,7 @@ irqreturn_t dmar_fault(int irq, void *de
+ writel(DMA_FRCD_F, iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 12);
+
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+ dmar_fault_do_one(iommu, type, fault_reason,
+ source_id, guest_addr);
+@@ -1350,14 +1350,14 @@ irqreturn_t dmar_fault(int irq, void *de
+ fault_index++;
+ if (fault_index >= cap_num_fault_regs(iommu->cap))
+ fault_index = 0;
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
}
-@@ -340,6 +375,13 @@ __sigqueue_alloc(int sig, struct task_st
- return q;
+ clear_rest:
+ /* clear all the other faults */
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+ writel(fault_status, iommu->reg + DMAR_FSTS_REG);
+
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ return IRQ_HANDLED;
}
-+static struct sigqueue *
-+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
-+ int override_rlimit)
-+{
-+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
-+}
-+
- static void __sigqueue_free(struct sigqueue *q)
- {
- if (q->flags & SIGQUEUE_PREALLOC)
-@@ -349,6 +391,21 @@ static void __sigqueue_free(struct sigqu
- kmem_cache_free(sigqueue_cachep, q);
+@@ -1388,7 +1388,7 @@ int dmar_set_interrupt(struct intel_iomm
+ return ret;
+ }
+
+- ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
++ ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
+ if (ret)
+ printk(KERN_ERR "IOMMU: can't request irq\n");
+ return ret;
+Index: linux-2.6/drivers/pci/intel-iommu.c
+===================================================================
+--- linux-2.6.orig/drivers/pci/intel-iommu.c
++++ linux-2.6/drivers/pci/intel-iommu.c
+@@ -933,7 +933,7 @@ static void iommu_set_root_entry(struct
+
+ addr = iommu->root_entry;
+
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
+
+ writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
+@@ -942,7 +942,7 @@ static void iommu_set_root_entry(struct
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_RTPS), sts);
+
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ }
+
+ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
+@@ -953,14 +953,14 @@ static void iommu_flush_write_buffer(str
+ if (!rwbf_quirk && !cap_rwbf(iommu->cap))
+ return;
+
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (!(val & DMA_GSTS_WBFS)), val);
+
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-+static void sigqueue_free_current(struct sigqueue *q)
-+{
-+ struct user_struct *up;
-+
-+ if (q->flags & SIGQUEUE_PREALLOC)
-+ return;
-+
-+ up = q->user;
-+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
-+ atomic_dec(&up->sigpending);
-+ free_uid(up);
-+ } else
-+ __sigqueue_free(q);
-+}
-+
- void flush_sigqueue(struct sigpending *queue)
- {
- struct sigqueue *q;
-@@ -362,6 +419,21 @@ void flush_sigqueue(struct sigpending *q
+ /* return value determine if we need a write buffer flush */
+@@ -987,14 +987,14 @@ static void __iommu_flush_context(struct
+ }
+ val |= DMA_CCMD_ICC;
+
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
+
+ /* Make sure hardware complete it */
+ IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
+ dmar_readq, (!(val & DMA_CCMD_ICC)), val);
+
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
- /*
-+ * Called from __exit_signal. Flush tsk->pending and
-+ * tsk->sigqueue_cache
-+ */
-+void flush_task_sigqueue(struct task_struct *tsk)
-+{
-+ struct sigqueue *q;
-+
-+ flush_sigqueue(&tsk->pending);
-+
-+ q = get_task_cache(tsk);
-+ if (q)
-+ kmem_cache_free(sigqueue_cachep, q);
-+}
-+
-+/*
- * Flush all pending signals for a task.
- */
- void __flush_signals(struct task_struct *t)
-@@ -509,7 +581,7 @@ static void collect_signal(int sig, stru
- still_pending:
- list_del_init(&first->list);
- copy_siginfo(info, &first->info);
-- __sigqueue_free(first);
-+ sigqueue_free_current(first);
- } else {
- /*
- * Ok, it wasn't in the queue. This must be
-@@ -555,6 +627,8 @@ int dequeue_signal(struct task_struct *t
- {
- int signr;
+ /* return value determine if we need a write buffer flush */
+@@ -1033,7 +1033,7 @@ static void __iommu_flush_iotlb(struct i
+ if (cap_write_drain(iommu->cap))
+ val |= DMA_TLB_WRITE_DRAIN;
-+ WARN_ON_ONCE(tsk != current);
-+
- /* We only dequeue private signals from ourselves, we don't let
- * signalfd steal them
- */
-@@ -637,6 +711,9 @@ void signal_wake_up(struct task_struct *
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ /* Note: Only uses first TLB reg currently */
+ if (val_iva)
+ dmar_writeq(iommu->reg + tlb_offset, val_iva);
+@@ -1043,7 +1043,7 @@ static void __iommu_flush_iotlb(struct i
+ IOMMU_WAIT_OP(iommu, tlb_offset + 8,
+ dmar_readq, (!(val & DMA_TLB_IVT)), val);
- set_tsk_thread_flag(t, TIF_SIGPENDING);
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
-+ if (unlikely(t == current))
-+ return;
-+
- /*
- * For SIGKILL, we want to wake it up in the stopped/traced/killable
- * case. We don't check t->state here because there is a race with it
-@@ -1179,12 +1256,12 @@ struct sighand_struct *__lock_task_sigha
- struct sighand_struct *sighand;
+ /* check IOTLB invalidation granularity */
+ if (DMA_TLB_IAIG(val) == 0)
+@@ -1159,7 +1159,7 @@ static void iommu_disable_protect_mem_re
+ u32 pmen;
+ unsigned long flags;
- for (;;) {
-- local_irq_save(*flags);
-+ local_irq_save_nort(*flags);
- rcu_read_lock();
- sighand = rcu_dereference(tsk->sighand);
- if (unlikely(sighand == NULL)) {
- rcu_read_unlock();
-- local_irq_restore(*flags);
-+ local_irq_restore_nort(*flags);
- break;
- }
+- spin_lock_irqsave(&iommu->register_lock, flags);
++ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ pmen = readl(iommu->reg + DMAR_PMEN_REG);
+ pmen &= ~DMA_PMEN_EPM;
+ writel(pmen, iommu->reg + DMAR_PMEN_REG);
+@@ -1168,7 +1168,7 @@ static void iommu_disable_protect_mem_re
+ IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
+ readl, !(pmen & DMA_PMEN_PRS), pmen);
-@@ -1195,7 +1272,7 @@ struct sighand_struct *__lock_task_sigha
- }
- spin_unlock(&sighand->siglock);
- rcu_read_unlock();
-- local_irq_restore(*flags);
-+ local_irq_restore_nort(*flags);
- }
+- spin_unlock_irqrestore(&iommu->register_lock, flags);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ }
- return sighand;
-@@ -1434,7 +1511,8 @@ EXPORT_SYMBOL(kill_pid);
- */
- struct sigqueue *sigqueue_alloc(void)
- {
-- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
-+ /* Preallocated sigqueue objects always from the slabcache ! */
-+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
+ static int iommu_enable_translation(struct intel_iommu *iommu)
+@@ -1176,7 +1176,7 @@ static int iommu_enable_translation(stru
+ u32 sts;
+ unsigned long flags;
- if (q)
- q->flags |= SIGQUEUE_PREALLOC;
-@@ -1790,7 +1868,7 @@ static void ptrace_stop(int exit_code, i
- */
- preempt_disable();
- read_unlock(&tasklist_lock);
-- preempt_enable_no_resched();
-+ __preempt_enable_no_resched();
- schedule();
- } else {
- /*
-Index: linux-2.6/kernel/posix-timers.c
-===================================================================
---- linux-2.6.orig/kernel/posix-timers.c
-+++ linux-2.6/kernel/posix-timers.c
-@@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_
- static struct pid *good_sigevent(sigevent_t * event)
- {
- struct task_struct *rtn = current->group_leader;
-+ int sig = event->sigev_signo;
+- spin_lock_irqsave(&iommu->register_lock, flags);
++ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ iommu->gcmd |= DMA_GCMD_TE;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
- if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
- (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-@@ -447,7 +448,8 @@ static struct pid *good_sigevent(sigeven
- return NULL;
+@@ -1184,7 +1184,7 @@ static int iommu_enable_translation(stru
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_TES), sts);
- if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
-- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
-+ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
-+ sig_kernel_coredump(sig)))
- return NULL;
+- spin_unlock_irqrestore(&iommu->register_lock, flags);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ return 0;
+ }
- return task_pid(rtn);
-@@ -764,6 +766,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
- return overrun;
+@@ -1193,7 +1193,7 @@ static int iommu_disable_translation(str
+ u32 sts;
+ unsigned long flag;
+
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ iommu->gcmd &= ~DMA_GCMD_TE;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+
+@@ -1201,7 +1201,7 @@ static int iommu_disable_translation(str
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (!(sts & DMA_GSTS_TES)), sts);
+
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ return 0;
}
-+/*
-+ * Protected by RCU!
-+ */
-+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
-+{
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (kc->timer_set == common_timer_set)
-+ hrtimer_wait_for_timer(&timr->it.real.timer);
-+ else
-+ /* FIXME: Whacky hack for posix-cpu-timers */
-+ schedule_timeout(1);
-+#endif
-+}
-+
- /* Set a POSIX.1b interval timer. */
- /* timr->it_lock is taken. */
- static int
-@@ -841,6 +857,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
- if (!timr)
- return -EINVAL;
+@@ -3321,7 +3321,7 @@ static int iommu_suspend(void)
+ for_each_active_iommu(iommu, drhd) {
+ iommu_disable_translation(iommu);
-+ rcu_read_lock();
- kc = clockid_to_kclock(timr->it_clock);
- if (WARN_ON_ONCE(!kc || !kc->timer_set))
- error = -EINVAL;
-@@ -849,9 +866,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+
+ iommu->iommu_state[SR_DMAR_FECTL_REG] =
+ readl(iommu->reg + DMAR_FECTL_REG);
+@@ -3332,7 +3332,7 @@ static int iommu_suspend(void)
+ iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
+ readl(iommu->reg + DMAR_FEUADDR_REG);
- unlock_timer(timr, flag);
- if (error == TIMER_RETRY) {
-+ timer_wait_for_callback(kc, timr);
- rtn = NULL; // We already got the old time...
-+ rcu_read_unlock();
- goto retry;
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-+ rcu_read_unlock();
+ return 0;
- if (old_setting && !error &&
- copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
-@@ -889,10 +909,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
- if (!timer)
- return -EINVAL;
+@@ -3359,7 +3359,7 @@ static void iommu_resume(void)
-+ rcu_read_lock();
- if (timer_delete_hook(timer) == TIMER_RETRY) {
- unlock_timer(timer, flags);
-+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
-+ timer);
-+ rcu_read_unlock();
- goto retry_delete;
- }
-+ rcu_read_unlock();
+ for_each_active_iommu(iommu, drhd) {
- spin_lock(¤t->sighand->siglock);
- list_del(&timer->list);
-@@ -918,8 +943,18 @@ static void itimer_delete(struct k_itime
- retry_delete:
- spin_lock_irqsave(&timer->it_lock, flags);
+- spin_lock_irqsave(&iommu->register_lock, flag);
++ raw_spin_lock_irqsave(&iommu->register_lock, flag);
-+ /* On RT we can race with a deletion */
-+ if (!timer->it_signal) {
-+ unlock_timer(timer, flags);
-+ return;
-+ }
-+
- if (timer_delete_hook(timer) == TIMER_RETRY) {
-+ rcu_read_lock();
- unlock_timer(timer, flags);
-+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
-+ timer);
-+ rcu_read_unlock();
- goto retry_delete;
+ writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
+ iommu->reg + DMAR_FECTL_REG);
+@@ -3370,7 +3370,7 @@ static void iommu_resume(void)
+ writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
+ iommu->reg + DMAR_FEUADDR_REG);
+
+- spin_unlock_irqrestore(&iommu->register_lock, flag);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
- list_del(&timer->list);
-Index: linux-2.6/include/linux/signal.h
+
+ for_each_active_iommu(iommu, drhd)
+Index: linux-2.6/drivers/pci/intr_remapping.c
===================================================================
---- linux-2.6.orig/include/linux/signal.h
-+++ linux-2.6/include/linux/signal.h
-@@ -229,6 +229,7 @@ static inline void init_sigpending(struc
+--- linux-2.6.orig/drivers/pci/intr_remapping.c
++++ linux-2.6/drivers/pci/intr_remapping.c
+@@ -46,7 +46,7 @@ static __init int setup_intremap(char *s
}
+ early_param("intremap", setup_intremap);
- extern void flush_sigqueue(struct sigpending *queue);
-+extern void flush_task_sigqueue(struct task_struct *tsk);
+-static DEFINE_SPINLOCK(irq_2_ir_lock);
++static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
- /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
- static inline int valid_signal(unsigned long sig)
-Index: linux-2.6/kernel/exit.c
-===================================================================
---- linux-2.6.orig/kernel/exit.c
-+++ linux-2.6/kernel/exit.c
-@@ -142,7 +142,7 @@ static void __exit_signal(struct task_st
- * Do this under ->siglock, we can race with another thread
- * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
- */
-- flush_sigqueue(&tsk->pending);
-+ flush_task_sigqueue(tsk);
- tsk->sighand = NULL;
- spin_unlock(&sighand->siglock);
+ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
+ {
+@@ -63,12 +63,12 @@ int get_irte(int irq, struct irte *entry
+ if (!entry || !irq_iommu)
+ return -1;
-Index: linux-2.6/kernel/fork.c
-===================================================================
---- linux-2.6.orig/kernel/fork.c
-+++ linux-2.6/kernel/fork.c
-@@ -198,7 +198,18 @@ void __put_task_struct(struct task_struc
- if (!profile_handoff_task(tsk))
- free_task(tsk);
- }
-+#ifndef CONFIG_PREEMPT_RT_BASE
- EXPORT_SYMBOL_GPL(__put_task_struct);
-+#else
-+void __put_task_struct_cb(struct rcu_head *rhp)
-+{
-+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
-+
-+ __put_task_struct(tsk);
-+
-+}
-+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
-+#endif
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
- /*
- * macro override instead of weak attribute alias, to workaround
-@@ -546,6 +557,19 @@ void __mmdrop(struct mm_struct *mm)
+ index = irq_iommu->irte_index + irq_iommu->sub_handle;
+ *entry = *(irq_iommu->iommu->ir_table->base + index);
+
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ return 0;
}
- EXPORT_SYMBOL_GPL(__mmdrop);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+/*
-+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
-+ * want another facility to make this work.
-+ */
-+void __mmdrop_delayed(struct rcu_head *rhp)
-+{
-+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
-+
-+ __mmdrop(mm);
-+}
-+#endif
-+
- /*
- * Decrement the use count and release all resources for an mm.
- */
-@@ -1030,6 +1054,9 @@ void mm_init_owner(struct mm_struct *mm,
- */
- static void posix_cpu_timers_init(struct task_struct *tsk)
- {
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ tsk->posix_timer_list = NULL;
-+#endif
- tsk->cputime_expires.prof_exp = cputime_zero;
- tsk->cputime_expires.virt_exp = cputime_zero;
- tsk->cputime_expires.sched_exp = 0;
-@@ -1137,6 +1164,7 @@ static struct task_struct *copy_process(
- spin_lock_init(&p->alloc_lock);
+@@ -102,7 +102,7 @@ int alloc_irte(struct intel_iommu *iommu
+ return -1;
+ }
- init_sigpending(&p->pending);
-+ p->sigqueue_cache = NULL;
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ do {
+ for (i = index; i < index + count; i++)
+ if (table->base[i].present)
+@@ -114,7 +114,7 @@ int alloc_irte(struct intel_iommu *iommu
+ index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
- p->utime = cputime_zero;
- p->stime = cputime_zero;
-@@ -1194,6 +1222,9 @@ static struct task_struct *copy_process(
- p->hardirq_context = 0;
- p->softirq_context = 0;
- #endif
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ p->pagefault_disabled = 0;
-+#endif
- #ifdef CONFIG_LOCKDEP
- p->lockdep_depth = 0; /* no locks held yet */
- p->curr_chain_key = 0;
-Index: linux-2.6/kernel/sched_rt.c
-===================================================================
---- linux-2.6.orig/kernel/sched_rt.c
-+++ linux-2.6/kernel/sched_rt.c
-@@ -631,6 +631,7 @@ static int sched_rt_runtime_exceeded(str
+ if (index == start_index) {
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ printk(KERN_ERR "can't allocate an IRTE\n");
+ return -1;
+ }
+@@ -128,7 +128,7 @@ int alloc_irte(struct intel_iommu *iommu
+ irq_iommu->sub_handle = 0;
+ irq_iommu->irte_mask = mask;
- if (rt_rq->rt_time > runtime) {
- rt_rq->rt_throttled = 1;
-+ printk_once(KERN_WARNING "sched: RT throttling activated\n");
- if (rt_rq_throttled(rt_rq)) {
- sched_rt_rq_dequeue(rt_rq);
- return 1;
-@@ -1186,7 +1187,7 @@ static void deactivate_task(struct rq *r
- static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
- {
- if (!task_running(rq, p) &&
-- (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
-+ (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
- (p->rt.nr_cpus_allowed > 1))
- return 1;
- return 0;
-@@ -1331,7 +1332,7 @@ static struct rq *find_lock_lowest_rq(st
- */
- if (unlikely(task_rq(task) != rq ||
- !cpumask_test_cpu(lowest_rq->cpu,
-- &task->cpus_allowed) ||
-+ tsk_cpus_allowed(task)) ||
- task_running(rq, task) ||
- !task->on_rq)) {
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-@@ -1614,9 +1615,6 @@ static void set_cpus_allowed_rt(struct t
+ return index;
+ }
+@@ -153,10 +153,10 @@ int map_irq_to_irte_handle(int irq, u16
+ if (!irq_iommu)
+ return -1;
- update_rt_migration(&rq->rt);
- }
--
-- cpumask_copy(&p->cpus_allowed, new_mask);
-- p->rt.nr_cpus_allowed = weight;
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ *sub_handle = irq_iommu->sub_handle;
+ index = irq_iommu->irte_index;
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+ return index;
}
- /* Assumes rq->lock is held */
-Index: linux-2.6/include/asm-generic/cmpxchg-local.h
-===================================================================
---- linux-2.6.orig/include/asm-generic/cmpxchg-local.h
-+++ linux-2.6/include/asm-generic/cmpxchg-local.h
-@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_lo
- if (size == 8 && sizeof(unsigned long) != 8)
- wrong_size_cmpxchg(ptr);
+@@ -168,14 +168,14 @@ int set_irte_irq(int irq, struct intel_i
+ if (!irq_iommu)
+ return -1;
-- local_irq_save(flags);
-+ raw_local_irq_save(flags);
- switch (size) {
- case 1: prev = *(u8 *)ptr;
- if (prev == old)
-@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_lo
- default:
- wrong_size_cmpxchg(ptr);
- }
-- local_irq_restore(flags);
-+ raw_local_irq_restore(flags);
- return prev;
- }
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+
+ irq_iommu->iommu = iommu;
+ irq_iommu->irte_index = index;
+ irq_iommu->sub_handle = subhandle;
+ irq_iommu->irte_mask = 0;
-@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_gene
- u64 prev;
- unsigned long flags;
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-- local_irq_save(flags);
-+ raw_local_irq_save(flags);
- prev = *(u64 *)ptr;
- if (prev == old)
- *(u64 *)ptr = new;
-- local_irq_restore(flags);
-+ raw_local_irq_restore(flags);
- return prev;
+ return 0;
}
+@@ -191,7 +191,7 @@ int modify_irte(int irq, struct irte *ir
+ if (!irq_iommu)
+ return -1;
-Index: linux-2.6/kernel/rtmutex-debug.h
-===================================================================
---- linux-2.6.orig/kernel/rtmutex-debug.h
-+++ linux-2.6/kernel/rtmutex-debug.h
-@@ -17,17 +17,17 @@ extern void debug_rt_mutex_free_waiter(s
- extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
- extern void debug_rt_mutex_lock(struct rt_mutex *lock);
- extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
--extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
-- struct task_struct *powner);
-+extern void
-+debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner);
- extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
- extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
- struct rt_mutex *lock);
- extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
--# define debug_rt_mutex_reset_waiter(w) \
-+# define debug_rt_mutex_reset_waiter(w) \
- do { (w)->deadlock_lock = NULL; } while (0)
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
--static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
-- int detect)
-+static inline int
-+debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, int detect)
- {
-- return (waiter != NULL);
-+ return waiter != NULL;
+ iommu = irq_iommu->iommu;
+
+@@ -203,7 +203,7 @@ int modify_irte(int irq, struct irte *ir
+ __iommu_flush_cache(iommu, irte, sizeof(*irte));
+
+ rc = qi_flush_iec(iommu, index, 0);
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+
+ return rc;
}
-Index: linux-2.6/drivers/char/random.c
-===================================================================
---- linux-2.6.orig/drivers/char/random.c
-+++ linux-2.6/drivers/char/random.c
-@@ -433,7 +433,7 @@ static struct entropy_store input_pool =
- .poolinfo = &poolinfo_table[0],
- .name = "input",
- .limit = 1,
-- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
-+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
- .pool = input_pool_data
- };
+@@ -271,7 +271,7 @@ int free_irte(int irq)
+ if (!irq_iommu)
+ return -1;
-@@ -442,7 +442,7 @@ static struct entropy_store blocking_poo
- .name = "blocking",
- .limit = 1,
- .pull = &input_pool,
-- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
-+ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
- .pool = blocking_pool_data
- };
+- spin_lock_irqsave(&irq_2_ir_lock, flags);
++ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
-@@ -450,7 +450,7 @@ static struct entropy_store nonblocking_
- .poolinfo = &poolinfo_table[1],
- .name = "nonblocking",
- .pull = &input_pool,
-- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
-+ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
- .pool = nonblocking_pool_data
- };
+ rc = clear_entries(irq_iommu);
-@@ -633,8 +633,11 @@ static void add_timer_randomness(struct
- preempt_disable();
- /* if over the trickle threshold, use only 1 in 4096 samples */
- if (input_pool.entropy_count > trickle_thresh &&
-- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
-- goto out;
-+ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) {
-+ preempt_enable();
-+ return;
-+ }
-+ preempt_enable();
+@@ -280,7 +280,7 @@ int free_irte(int irq)
+ irq_iommu->sub_handle = 0;
+ irq_iommu->irte_mask = 0;
- sample.jiffies = jiffies;
- sample.cycles = get_cycles();
-@@ -676,8 +679,6 @@ static void add_timer_randomness(struct
- credit_entropy_bits(&input_pool,
- min_t(int, fls(delta>>1), 11));
- }
--out:
-- preempt_enable();
+- spin_unlock_irqrestore(&irq_2_ir_lock, flags);
++ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+
+ return rc;
}
+@@ -410,7 +410,7 @@ static void iommu_set_intr_remapping(str
- void add_input_randomness(unsigned int type, unsigned int code,
-Index: linux-2.6/fs/ioprio.c
-===================================================================
---- linux-2.6.orig/fs/ioprio.c
-+++ linux-2.6/fs/ioprio.c
-@@ -226,6 +226,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which,
- if (!user)
- break;
+ addr = virt_to_phys((void *)iommu->ir_table->base);
-+ rcu_read_lock();
- do_each_thread(g, p) {
- if (__task_cred(p)->uid != user->uid)
- continue;
-@@ -237,6 +238,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which,
- else
- ret = ioprio_best(ret, tmpio);
- } while_each_thread(g, p);
-+ rcu_read_unlock();
+- spin_lock_irqsave(&iommu->register_lock, flags);
++ raw_spin_lock_irqsave(&iommu->register_lock, flags);
- if (who)
- free_uid(user);
-Index: linux-2.6/arch/arm/mach-at91/at91rm9200_time.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-at91/at91rm9200_time.c
-+++ linux-2.6/arch/arm/mach-at91/at91rm9200_time.c
-@@ -114,6 +114,7 @@ clkevt32k_mode(enum clock_event_mode mod
- last_crtr = read_CRTR();
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
-+ setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
- /* PIT for periodic irqs; fixed rate of 1/HZ */
- irqmask = AT91_ST_PITS;
- at91_sys_write(AT91_ST_PIMR, LATCH);
-@@ -127,6 +128,7 @@ clkevt32k_mode(enum clock_event_mode mod
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
-+ remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
- case CLOCK_EVT_MODE_RESUME:
- irqmask = 0;
- break;
-Index: linux-2.6/arch/arm/mach-at91/at91sam926x_time.c
-===================================================================
---- linux-2.6.orig/arch/arm/mach-at91/at91sam926x_time.c
-+++ linux-2.6/arch/arm/mach-at91/at91sam926x_time.c
-@@ -54,7 +54,7 @@ static struct clocksource pit_clk = {
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- };
+ dmar_writeq(iommu->reg + DMAR_IRTA_REG,
+ (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
+@@ -421,7 +421,7 @@ static void iommu_set_intr_remapping(str
--
-+static struct irqaction at91sam926x_pit_irq;
- /*
- * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
- */
-@@ -63,6 +63,9 @@ pit_clkevt_mode(enum clock_event_mode mo
- {
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
-+ /* Set up irq handler */
-+ setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
-+
- /* update clocksource counter */
- pit_cnt += pit_cycle * PIT_PICNT(at91_sys_read(AT91_PIT_PIVR));
- at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
-@@ -75,6 +78,7 @@ pit_clkevt_mode(enum clock_event_mode mo
- case CLOCK_EVT_MODE_UNUSED:
- /* disable irq, leaving the clocksource active */
- at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
-+ remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
- break;
- case CLOCK_EVT_MODE_RESUME:
- break;
-Index: linux-2.6/drivers/clocksource/tcb_clksrc.c
-===================================================================
---- linux-2.6.orig/drivers/clocksource/tcb_clksrc.c
-+++ linux-2.6/drivers/clocksource/tcb_clksrc.c
-@@ -21,8 +21,7 @@
- * resolution better than 200 nsec).
- *
- * - The third channel may be used to provide a 16-bit clockevent
-- * source, used in either periodic or oneshot mode. This runs
-- * at 32 KiHZ, and can handle delays of up to two seconds.
-+ * source, used in either periodic or oneshot mode.
- *
- * A boot clocksource and clockevent source are also currently needed,
- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
-@@ -68,6 +67,7 @@ static struct clocksource clksrc = {
- struct tc_clkevt_device {
- struct clock_event_device clkevt;
- struct clk *clk;
-+ u32 freq;
- void __iomem *regs;
- };
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_IRTPS), sts);
+- spin_unlock_irqrestore(&iommu->register_lock, flags);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
-@@ -76,13 +76,6 @@ static struct tc_clkevt_device *to_tc_cl
- return container_of(clkevt, struct tc_clkevt_device, clkevt);
- }
+ /*
+ * global invalidation of interrupt entry cache before enabling
+@@ -429,7 +429,7 @@ static void iommu_set_intr_remapping(str
+ */
+ qi_global_iec(iommu);
--/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
-- * because using one of the divided clocks would usually mean the
-- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
-- *
-- * A divided clock could be good for high resolution timers, since
-- * 30.5 usec resolution can seem "low".
-- */
- static u32 timer_clock;
+- spin_lock_irqsave(&iommu->register_lock, flags);
++ raw_spin_lock_irqsave(&iommu->register_lock, flags);
- static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
-@@ -105,11 +98,12 @@ static void tc_mode(enum clock_event_mod
- case CLOCK_EVT_MODE_PERIODIC:
- clk_enable(tcd->clk);
+ /* Enable interrupt-remapping */
+ iommu->gcmd |= DMA_GCMD_IRE;
+@@ -438,7 +438,7 @@ static void iommu_set_intr_remapping(str
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, (sts & DMA_GSTS_IRES), sts);
-- /* slow clock, count up to RC, then irq and restart */
-+ /* count up to RC, then irq and restart */
- __raw_writel(timer_clock
- | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
-- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
-+ __raw_writel((tcd->freq + HZ/2)/HZ,
-+ tcaddr + ATMEL_TC_REG(2, RC));
+- spin_unlock_irqrestore(&iommu->register_lock, flags);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ }
+
+
+@@ -486,7 +486,7 @@ static void iommu_disable_intr_remapping
+ */
+ qi_global_iec(iommu);
- /* Enable clock and interrupts on RC compare */
- __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -122,7 +116,7 @@ static void tc_mode(enum clock_event_mod
- case CLOCK_EVT_MODE_ONESHOT:
- clk_enable(tcd->clk);
+- spin_lock_irqsave(&iommu->register_lock, flags);
++ raw_spin_lock_irqsave(&iommu->register_lock, flags);
-- /* slow clock, count up to RC, then irq and stop */
-+ /* count up to RC, then irq and stop */
- __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
- | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
-@@ -152,8 +146,12 @@ static struct tc_clkevt_device clkevt =
- .features = CLOCK_EVT_FEAT_PERIODIC
- | CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
-+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- /* Should be lower than at91rm9200's system timer */
- .rating = 125,
-+#else
-+ .rating = 200,
-+#endif
- .set_next_event = tc_next_event,
- .set_mode = tc_mode,
- },
-@@ -179,8 +177,9 @@ static struct irqaction tc_irqaction = {
- .handler = ch2_irq,
+ sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ if (!(sts & DMA_GSTS_IRES))
+@@ -499,7 +499,7 @@ static void iommu_disable_intr_remapping
+ readl, !(sts & DMA_GSTS_IRES), sts);
+
+ end:
+- spin_unlock_irqrestore(&iommu->register_lock, flags);
++ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+ }
+
+ int __init intr_remapping_supported(void)
+Index: linux-2.6/include/linux/intel-iommu.h
+===================================================================
+--- linux-2.6.orig/include/linux/intel-iommu.h
++++ linux-2.6/include/linux/intel-iommu.h
+@@ -271,7 +271,7 @@ struct qi_desc {
};
--static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
-+static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
- {
-+ unsigned divisor = atmel_tc_divisors[divisor_idx];
- struct clk *t2_clk = tc->clk[2];
- int irq = tc->irq[2];
+ struct q_inval {
+- spinlock_t q_lock;
++ raw_spinlock_t q_lock;
+ struct qi_desc *desc; /* invalidation queue */
+ int *desc_status; /* desc status */
+ int free_head; /* first free entry */
+@@ -311,7 +311,7 @@ struct intel_iommu {
+ u64 cap;
+ u64 ecap;
+ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
+- spinlock_t register_lock; /* protect register handling */
++ raw_spinlock_t register_lock; /* protect register handling */
+ int seq_id; /* sequence id of the iommu */
+ int agaw; /* agaw of this iommu */
+ int msagaw; /* max sagaw of this iommu */
+Index: linux-2.6/lib/atomic64.c
+===================================================================
+--- linux-2.6.orig/lib/atomic64.c
++++ linux-2.6/lib/atomic64.c
+@@ -29,7 +29,7 @@
+ * Ensure each lock is in a separate cacheline.
+ */
+ static union {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ char pad[L1_CACHE_BYTES];
+ } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
-@@ -188,11 +187,17 @@ static void __init setup_clkevents(struc
- clkevt.clk = t2_clk;
- tc_irqaction.dev_id = &clkevt;
+@@ -48,9 +48,9 @@ long long atomic64_read(const atomic64_t
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+ }
+ EXPORT_SYMBOL(atomic64_read);
+@@ -60,9 +60,9 @@ void atomic64_set(atomic64_t *v, long lo
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
-- timer_clock = clk32k_divisor_idx;
-+ timer_clock = divisor_idx;
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ v->counter = i;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ }
+ EXPORT_SYMBOL(atomic64_set);
-- clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
-- clkevt.clkevt.max_delta_ns
-- = clockevent_delta2ns(0xffff, &clkevt.clkevt);
-+ if (!divisor)
-+ clkevt.freq = 32768;
-+ else
-+ clkevt.freq = clk_get_rate(t2_clk)/divisor;
-+
-+ clkevt.clkevt.mult = div_sc(clkevt.freq, NSEC_PER_SEC,
-+ clkevt.clkevt.shift);
-+ clkevt.clkevt.max_delta_ns =
-+ clockevent_delta2ns(0xffff, &clkevt.clkevt);
- clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
- clkevt.clkevt.cpumask = cpumask_of(0);
+@@ -71,9 +71,9 @@ void atomic64_add(long long a, atomic64_
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
-@@ -295,8 +300,11 @@ static int __init tcb_clksrc_init(void)
- clocksource_register(&clksrc);
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ v->counter += a;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ }
+ EXPORT_SYMBOL(atomic64_add);
+
+@@ -83,9 +83,9 @@ long long atomic64_add_return(long long
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter += a;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+ }
+ EXPORT_SYMBOL(atomic64_add_return);
+@@ -95,9 +95,9 @@ void atomic64_sub(long long a, atomic64_
+ unsigned long flags;
+ spinlock_t *lock = lock_addr(v);
- /* channel 2: periodic and oneshot timer support */
-+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- setup_clkevents(tc, clk32k_divisor_idx);
--
-+#else
-+ setup_clkevents(tc, best_divisor_idx);
-+#endif
- return 0;
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ v->counter -= a;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ }
+ EXPORT_SYMBOL(atomic64_sub);
+
+@@ -107,9 +107,9 @@ long long atomic64_sub_return(long long
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter -= a;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+ }
+ EXPORT_SYMBOL(atomic64_sub_return);
+@@ -120,11 +120,11 @@ long long atomic64_dec_if_positive(atomi
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter - 1;
+ if (val >= 0)
+ v->counter = val;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+ }
+ EXPORT_SYMBOL(atomic64_dec_if_positive);
+@@ -135,11 +135,11 @@ long long atomic64_cmpxchg(atomic64_t *v
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ if (val == o)
+ v->counter = n;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+ }
+ EXPORT_SYMBOL(atomic64_cmpxchg);
+@@ -150,10 +150,10 @@ long long atomic64_xchg(atomic64_t *v, l
+ spinlock_t *lock = lock_addr(v);
+ long long val;
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ v->counter = new;
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+ }
+ EXPORT_SYMBOL(atomic64_xchg);
+@@ -164,12 +164,12 @@ int atomic64_add_unless(atomic64_t *v, l
+ spinlock_t *lock = lock_addr(v);
+ int ret = 0;
+
+- spin_lock_irqsave(lock, flags);
++ raw_spin_lock_irqsave(lock, flags);
+ if (v->counter != u) {
+ v->counter += a;
+ ret = 1;
+ }
+- spin_unlock_irqrestore(lock, flags);
++ raw_spin_unlock_irqrestore(lock, flags);
+ return ret;
}
- arch_initcall(tcb_clksrc_init);
-Index: linux-2.6/drivers/misc/Kconfig
-===================================================================
---- linux-2.6.orig/drivers/misc/Kconfig
-+++ linux-2.6/drivers/misc/Kconfig
-@@ -82,6 +82,7 @@ config AB8500_PWM
- config ATMEL_TCLIB
- bool "Atmel AT32/AT91 Timer/Counter Library"
- depends on (AVR32 || ARCH_AT91)
-+ default y if PREEMPT_RT_FULL
- help
- Select this if you want a library to allocate the Timer/Counter
- blocks found on many Atmel processors. This facilitates using
-@@ -97,8 +98,7 @@ config ATMEL_TCB_CLKSRC
- are combined to make a single 32-bit timer.
+ EXPORT_SYMBOL(atomic64_add_unless);
+@@ -179,7 +179,7 @@ static int init_atomic64_lock(void)
+ int i;
- When GENERIC_CLOCKEVENTS is defined, the third timer channel
-- may be used as a clock event device supporting oneshot mode
-- (delays of up to two seconds) based on the 32 KiHz clock.
-+ may be used as a clock event device supporting oneshot mode.
+ for (i = 0; i < NR_LOCKS; ++i)
+- spin_lock_init(&atomic64_lock[i].lock);
++ raw_spin_lock_init(&atomic64_lock[i].lock);
+ return 0;
+ }
- config ATMEL_TCB_CLKSRC_BLOCK
- int
-@@ -112,6 +112,14 @@ config ATMEL_TCB_CLKSRC_BLOCK
- TC can be used for other purposes, such as PWM generation and
- interval timing.
+Index: linux-2.6/kernel/signal.c
+===================================================================
+--- linux-2.6.orig/kernel/signal.c
++++ linux-2.6/kernel/signal.c
+@@ -300,13 +300,45 @@ static bool task_participate_group_stop(
+ return false;
+ }
-+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
-+ bool "TC Block use 32 KiHz clock"
-+ depends on ATMEL_TCB_CLKSRC
-+ default y if !PREEMPT_RT_FULL
-+ help
-+ Select this to use 32 KiHz base clock rate as TC block clock
-+ source for clock events.
++#ifdef __HAVE_ARCH_CMPXCHG
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++ struct sigqueue *q = t->sigqueue_cache;
+
- config IBM_ASM
- tristate "Device driver for IBM RSA service processor"
- depends on X86 && PCI && INPUT && EXPERIMENTAL
-@@ -133,6 +141,35 @@ config IBM_ASM
- for information on the specific driver level and support statement
- for your IBM server.
-
-+config HWLAT_DETECTOR
-+ tristate "Testing module to detect hardware-induced latencies"
-+ depends on DEBUG_FS
-+ depends on RING_BUFFER
-+ default m
-+ ---help---
-+ A simple hardware latency detector. Use this module to detect
-+ large latencies introduced by the behavior of the underlying
-+ system firmware external to Linux. We do this using periodic
-+ use of stop_machine to grab all available CPUs and measure
-+ for unexplainable gaps in the CPU timestamp counter(s). By
-+ default, the module is not enabled until the "enable" file
-+ within the "hwlat_detector" debugfs directory is toggled.
++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
++ return NULL;
++ return q;
++}
+
-+ This module is often used to detect SMI (System Management
-+ Interrupts) on x86 systems, though is not x86 specific. To
-+ this end, we default to using a sample window of 1 second,
-+ during which we will sample for 0.5 seconds. If an SMI or
-+ similar event occurs during that time, it is recorded
-+ into an 8K samples global ring buffer until retreived.
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
++ return 0;
++ return 1;
++}
+
-+ WARNING: This software should never be enabled (it can be built
-+ but should not be turned on after it is loaded) in a production
-+ environment where high latencies are a concern since the
-+ sampling mechanism actually introduces latencies for
-+ regular tasks while the CPU(s) are being held.
++#else
+
-+ If unsure, say N
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++ return NULL;
++}
+
- config PHANTOM
- tristate "Sensable PHANToM (PCI)"
- depends on PCI
-Index: linux-2.6/drivers/net/tulip/tulip_core.c
-===================================================================
---- linux-2.6.orig/drivers/net/tulip/tulip_core.c
-+++ linux-2.6/drivers/net/tulip/tulip_core.c
-@@ -1951,6 +1951,7 @@ static void __devexit tulip_remove_one (
- pci_iounmap(pdev, tp->base_addr);
- free_netdev (dev);
- pci_release_regions (pdev);
-+ pci_disable_device (pdev);
- pci_set_drvdata (pdev, NULL);
-
- /* pci_power_off (pdev, -1); */
-Index: linux-2.6/drivers/net/8139too.c
-===================================================================
---- linux-2.6.orig/drivers/net/8139too.c
-+++ linux-2.6/drivers/net/8139too.c
-@@ -2173,7 +2173,11 @@ static irqreturn_t rtl8139_interrupt (in
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++ return 1;
++}
++
++#endif
++
+ /*
+ * allocate a new signal queue record
+ * - this may be called without locks if and only if t == current, otherwise an
+ * appropriate lock must be held to stop the target task from exiting
*/
- static void rtl8139_poll_controller(struct net_device *dev)
+ static struct sigqueue *
+-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit, int fromslab)
{
-- disable_irq(dev->irq);
-+ /*
-+ * use _nosync() variant - might be used by netconsole
-+ * from atomic contexts:
-+ */
-+ disable_irq_nosync(dev->irq);
- rtl8139_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
+ struct sigqueue *q = NULL;
+ struct user_struct *user;
+@@ -323,7 +355,10 @@ __sigqueue_alloc(int sig, struct task_st
+ if (override_rlimit ||
+ atomic_read(&user->sigpending) <=
+ task_rlimit(t, RLIMIT_SIGPENDING)) {
+- q = kmem_cache_alloc(sigqueue_cachep, flags);
++ if (!fromslab)
++ q = get_task_cache(t);
++ if (!q)
++ q = kmem_cache_alloc(sigqueue_cachep, flags);
+ } else {
+ print_dropped_signal(sig);
+ }
+@@ -340,6 +375,13 @@ __sigqueue_alloc(int sig, struct task_st
+ return q;
}
-Index: linux-2.6/drivers/net/ehea/ehea_main.c
-===================================================================
---- linux-2.6.orig/drivers/net/ehea/ehea_main.c
-+++ linux-2.6/drivers/net/ehea/ehea_main.c
-@@ -1369,7 +1369,7 @@ static int ehea_reg_interrupts(struct ne
- "%s-queue%d", dev->name, i);
- ret = ibmebus_request_irq(pr->eq->attr.ist1,
- ehea_recv_irq_handler,
-- IRQF_DISABLED, pr->int_send_name,
-+ IRQF_NO_THREAD, pr->int_send_name,
- pr);
- if (ret) {
- netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
-Index: linux-2.6/drivers/net/arm/at91_ether.c
-===================================================================
---- linux-2.6.orig/drivers/net/arm/at91_ether.c
-+++ linux-2.6/drivers/net/arm/at91_ether.c
-@@ -199,7 +199,9 @@ static irqreturn_t at91ether_phy_interru
- struct net_device *dev = (struct net_device *) dev_id;
- struct at91_private *lp = netdev_priv(dev);
- unsigned int phy;
-+ unsigned long flags;
-
-+ spin_lock_irqsave(&lp->lock, flags);
- /*
- * This hander is triggered on both edges, but the PHY chips expect
- * level-triggering. We therefore have to check if the PHY actually has
-@@ -241,6 +243,7 @@ static irqreturn_t at91ether_phy_interru
-
- done:
- disable_mdi();
-+ spin_unlock_irqrestore(&lp->lock, flags);
- return IRQ_HANDLED;
++static struct sigqueue *
++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit)
++{
++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
++}
++
+ static void __sigqueue_free(struct sigqueue *q)
+ {
+ if (q->flags & SIGQUEUE_PREALLOC)
+@@ -349,6 +391,21 @@ static void __sigqueue_free(struct sigqu
+ kmem_cache_free(sigqueue_cachep, q);
}
-@@ -397,9 +400,11 @@ static void at91ether_check_link(unsigne
- struct net_device *dev = (struct net_device *) dev_id;
- struct at91_private *lp = netdev_priv(dev);
-
-+ spin_lock_irq(&lp->lock);
- enable_mdi();
- update_linkspeed(dev, 1);
- disable_mdi();
-+ spin_unlock_irq(&lp->lock);
- mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
++static void sigqueue_free_current(struct sigqueue *q)
++{
++ struct user_struct *up;
++
++ if (q->flags & SIGQUEUE_PREALLOC)
++ return;
++
++ up = q->user;
++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
++ atomic_dec(&up->sigpending);
++ free_uid(up);
++ } else
++ __sigqueue_free(q);
++}
++
+ void flush_sigqueue(struct sigpending *queue)
+ {
+ struct sigqueue *q;
+@@ -362,6 +419,21 @@ void flush_sigqueue(struct sigpending *q
}
-Index: linux-2.6/include/linux/preempt.h
-===================================================================
---- linux-2.6.orig/include/linux/preempt.h
-+++ linux-2.6/include/linux/preempt.h
-@@ -33,12 +33,18 @@ do { \
- barrier(); \
- } while (0)
-
--#define preempt_enable_no_resched() \
-+#define __preempt_enable_no_resched() \
- do { \
- barrier(); \
- dec_preempt_count(); \
- } while (0)
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+# define preempt_enable_no_resched() __preempt_enable_no_resched()
-+#else
-+# define preempt_enable_no_resched() preempt_enable()
-+#endif
+ /*
++ * Called from __exit_signal. Flush tsk->pending and
++ * tsk->sigqueue_cache
++ */
++void flush_task_sigqueue(struct task_struct *tsk)
++{
++ struct sigqueue *q;
+
- #define preempt_check_resched() \
- do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
-@@ -47,7 +53,7 @@ do { \
-
- #define preempt_enable() \
- do { \
-- preempt_enable_no_resched(); \
-+ __preempt_enable_no_resched(); \
- barrier(); \
- preempt_check_resched(); \
- } while (0)
-@@ -83,6 +89,7 @@ do { \
- #else
++ flush_sigqueue(&tsk->pending);
++
++ q = get_task_cache(tsk);
++ if (q)
++ kmem_cache_free(sigqueue_cachep, q);
++}
++
++/*
+ * Flush all pending signals for a task.
+ */
+ void __flush_signals(struct task_struct *t)
+@@ -509,7 +581,7 @@ static void collect_signal(int sig, stru
+ still_pending:
+ list_del_init(&first->list);
+ copy_siginfo(info, &first->info);
+- __sigqueue_free(first);
++ sigqueue_free_current(first);
+ } else {
+ /*
+ * Ok, it wasn't in the queue. This must be
+@@ -555,6 +627,8 @@ int dequeue_signal(struct task_struct *t
+ {
+ int signr;
- #define preempt_disable() do { } while (0)
-+#define __preempt_enable_no_resched() do { } while (0)
- #define preempt_enable_no_resched() do { } while (0)
- #define preempt_enable() do { } while (0)
- #define preempt_check_resched() do { } while (0)
-@@ -93,6 +100,27 @@ do { \
++ WARN_ON_ONCE(tsk != current);
++
+ /* We only dequeue private signals from ourselves, we don't let
+ * signalfd steal them
+ */
+@@ -637,6 +711,9 @@ void signal_wake_up(struct task_struct *
- #endif
+ set_tsk_thread_flag(t, TIF_SIGPENDING);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define preempt_disable_rt() preempt_disable()
-+# define preempt_enable_rt() preempt_enable()
-+# define preempt_disable_nort() do { } while (0)
-+# define preempt_enable_nort() do { } while (0)
-+#ifdef CONFIG_SMP
-+extern void migrate_disable(void);
-+extern void migrate_enable(void);
-+#else /* CONFIG_SMP */
-+# define migrate_disable() do { } while (0)
-+# define migrate_enable() do { } while (0)
-+#endif /* CONFIG_SMP */
-+#else
-+# define preempt_disable_rt() do { } while (0)
-+# define preempt_enable_rt() do { } while (0)
-+# define preempt_disable_nort() preempt_disable()
-+# define preempt_enable_nort() preempt_enable()
-+# define migrate_disable() preempt_disable()
-+# define migrate_enable() preempt_enable()
-+#endif
++ if (unlikely(t == current))
++ return;
+
- #ifdef CONFIG_PREEMPT_NOTIFIERS
+ /*
+ * For SIGKILL, we want to wake it up in the stopped/traced/killable
+ * case. We don't check t->state here because there is a race with it
+@@ -1179,12 +1256,12 @@ struct sighand_struct *__lock_task_sigha
+ struct sighand_struct *sighand;
- struct preempt_notifier;
-Index: linux-2.6/include/linux/uaccess.h
-===================================================================
---- linux-2.6.orig/include/linux/uaccess.h
-+++ linux-2.6/include/linux/uaccess.h
-@@ -6,38 +6,37 @@
+ for (;;) {
+- local_irq_save(*flags);
++ local_irq_save_nort(*flags);
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
+ if (unlikely(sighand == NULL)) {
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_irq_restore_nort(*flags);
+ break;
+ }
- /*
- * These routines enable/disable the pagefault handler in that
-- * it will not take any locks and go straight to the fixup table.
-- *
-- * They have great resemblance to the preempt_disable/enable calls
-- * and in fact they are identical; this is because currently there is
-- * no other way to make the pagefault handlers do this. So we do
-- * disable preemption but we don't necessarily care about that.
-+ * it will not take any MM locks and go straight to the fixup table.
+@@ -1195,7 +1272,7 @@ struct sighand_struct *__lock_task_sigha
+ }
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_irq_restore_nort(*flags);
+ }
+
+ return sighand;
+@@ -1434,7 +1511,8 @@ EXPORT_SYMBOL(kill_pid);
*/
--static inline void pagefault_disable(void)
-+static inline void raw_pagefault_disable(void)
+ struct sigqueue *sigqueue_alloc(void)
{
- inc_preempt_count();
-- /*
-- * make sure to have issued the store before a pagefault
-- * can hit.
-- */
- barrier();
- }
+- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
++ /* Preallocated sigqueue objects always from the slabcache ! */
++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
+
+ if (q)
+ q->flags |= SIGQUEUE_PREALLOC;
+@@ -1790,7 +1868,7 @@ static void ptrace_stop(int exit_code, i
+ */
+ preempt_disable();
+ read_unlock(&tasklist_lock);
+- preempt_enable_no_resched();
++ __preempt_enable_no_resched();
+ schedule();
+ } else {
+ /*
+Index: linux-2.6/kernel/posix-timers.c
+===================================================================
+--- linux-2.6.orig/kernel/posix-timers.c
++++ linux-2.6/kernel/posix-timers.c
+@@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_
+ static struct pid *good_sigevent(sigevent_t * event)
+ {
+ struct task_struct *rtn = current->group_leader;
++ int sig = event->sigev_signo;
+
+ if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
+ (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
+@@ -447,7 +448,8 @@ static struct pid *good_sigevent(sigeven
+ return NULL;
+
+ if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
+- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
++ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
++ sig_kernel_coredump(sig)))
+ return NULL;
--static inline void pagefault_enable(void)
-+static inline void raw_pagefault_enable(void)
- {
-- /*
-- * make sure to issue those last loads/stores before enabling
-- * the pagefault handler again.
-- */
- barrier();
- dec_preempt_count();
-- /*
-- * make sure we do..
-- */
- barrier();
- preempt_check_resched();
+ return task_pid(rtn);
+@@ -764,6 +766,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
+ return overrun;
}
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+static inline void pagefault_disable(void)
-+{
-+ raw_pagefault_disable();
-+}
-+
-+static inline void pagefault_enable(void)
++/*
++ * Protected by RCU!
++ */
++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
+{
-+ raw_pagefault_enable();
-+}
-+#else
-+extern void pagefault_disable(void);
-+extern void pagefault_enable(void);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (kc->timer_set == common_timer_set)
++ hrtimer_wait_for_timer(&timr->it.real.timer);
++ else
++ /* FIXME: Whacky hack for posix-cpu-timers */
++ schedule_timeout(1);
+#endif
++}
+
- #ifndef ARCH_HAS_NOCACHE_UACCESS
+ /* Set a POSIX.1b interval timer. */
+ /* timr->it_lock is taken. */
+ static int
+@@ -841,6 +857,7 @@ retry:
+ if (!timr)
+ return -EINVAL;
- static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
-@@ -77,9 +76,9 @@ static inline unsigned long __copy_from_
- mm_segment_t old_fs = get_fs(); \
- \
- set_fs(KERNEL_DS); \
-- pagefault_disable(); \
-+ raw_pagefault_disable(); \
- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
-- pagefault_enable(); \
-+ raw_pagefault_enable(); \
- set_fs(old_fs); \
- ret; \
- })
-Index: linux-2.6/mm/memory.c
++ rcu_read_lock();
+ kc = clockid_to_kclock(timr->it_clock);
+ if (WARN_ON_ONCE(!kc || !kc->timer_set))
+ error = -EINVAL;
+@@ -849,9 +866,12 @@ retry:
+
+ unlock_timer(timr, flag);
+ if (error == TIMER_RETRY) {
++ timer_wait_for_callback(kc, timr);
+ rtn = NULL; // We already got the old time...
++ rcu_read_unlock();
+ goto retry;
+ }
++ rcu_read_unlock();
+
+ if (old_setting && !error &&
+ copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
+@@ -889,10 +909,15 @@ retry_delete:
+ if (!timer)
+ return -EINVAL;
+
++ rcu_read_lock();
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
+ unlock_timer(timer, flags);
++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++ timer);
++ rcu_read_unlock();
+ goto retry_delete;
+ }
++ rcu_read_unlock();
+
+ spin_lock(¤t->sighand->siglock);
+ list_del(&timer->list);
+@@ -918,8 +943,18 @@ static void itimer_delete(struct k_itime
+ retry_delete:
+ spin_lock_irqsave(&timer->it_lock, flags);
+
++ /* On RT we can race with a deletion */
++ if (!timer->it_signal) {
++ unlock_timer(timer, flags);
++ return;
++ }
++
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
++ rcu_read_lock();
+ unlock_timer(timer, flags);
++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++ timer);
++ rcu_read_unlock();
+ goto retry_delete;
+ }
+ list_del(&timer->list);
+Index: linux-2.6/include/linux/signal.h
===================================================================
---- linux-2.6.orig/mm/memory.c
-+++ linux-2.6/mm/memory.c
-@@ -1290,10 +1290,13 @@ static unsigned long unmap_page_range(st
- return addr;
+--- linux-2.6.orig/include/linux/signal.h
++++ linux-2.6/include/linux/signal.h
+@@ -229,6 +229,7 @@ static inline void init_sigpending(struc
}
--#ifdef CONFIG_PREEMPT
-+#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_RT_FULL)
- # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
- #else
--/* No preempt: go for improved straight-line efficiency */
-+/*
-+ * No preempt: go for improved straight-line efficiency
-+ * on PREEMPT_RT this is not a critical latency-path.
-+ */
- # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
- #endif
+ extern void flush_sigqueue(struct sigpending *queue);
++extern void flush_task_sigqueue(struct task_struct *tsk);
-@@ -3435,6 +3438,32 @@ int handle_pte_fault(struct mm_struct *m
- return 0;
- }
+ /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
+ static inline int valid_signal(unsigned long sig)
+Index: linux-2.6/kernel/exit.c
+===================================================================
+--- linux-2.6.orig/kernel/exit.c
++++ linux-2.6/kernel/exit.c
+@@ -142,7 +142,7 @@ static void __exit_signal(struct task_st
+ * Do this under ->siglock, we can race with another thread
+ * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
+ */
+- flush_sigqueue(&tsk->pending);
++ flush_task_sigqueue(tsk);
+ tsk->sighand = NULL;
+ spin_unlock(&sighand->siglock);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+void pagefault_disable(void)
+Index: linux-2.6/kernel/fork.c
+===================================================================
+--- linux-2.6.orig/kernel/fork.c
++++ linux-2.6/kernel/fork.c
+@@ -198,7 +198,18 @@ void __put_task_struct(struct task_struc
+ if (!profile_handoff_task(tsk))
+ free_task(tsk);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ EXPORT_SYMBOL_GPL(__put_task_struct);
++#else
++void __put_task_struct_cb(struct rcu_head *rhp)
+{
-+ migrate_disable();
-+ current->pagefault_disabled++;
-+ /*
-+ * make sure to have issued the store before a pagefault
-+ * can hit.
-+ */
-+ barrier();
-+}
-+EXPORT_SYMBOL_GPL(pagefault_disable);
++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
++
++ __put_task_struct(tsk);
+
-+void pagefault_enable(void)
-+{
-+ /*
-+ * make sure to issue those last loads/stores before enabling
-+ * the pagefault handler again.
-+ */
-+ barrier();
-+ current->pagefault_disabled--;
-+ migrate_enable();
+}
-+EXPORT_SYMBOL_GPL(pagefault_enable);
++EXPORT_SYMBOL_GPL(__put_task_struct_cb);
+#endif
-+
+
/*
- * By the time we get here, we already hold the mm semaphore
- */
-@@ -3983,3 +4012,35 @@ void copy_user_huge_page(struct page *ds
- }
+ * macro override instead of weak attribute alias, to workaround
+@@ -546,6 +557,19 @@ void __mmdrop(struct mm_struct *mm)
}
- #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-+
-+#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0)
+ EXPORT_SYMBOL_GPL(__mmdrop);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
+/*
-+ * Heinous hack, relies on the caller doing something like:
-+ *
-+ * pte = alloc_pages(PGALLOC_GFP, 0);
-+ * if (pte)
-+ * pgtable_page_ctor(pte);
-+ * return pte;
-+ *
-+ * This ensures we release the page and return NULL when the
-+ * lock allocation fails.
++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
++ * want another facility to make this work.
+ */
-+struct page *pte_lock_init(struct page *page)
++void __mmdrop_delayed(struct rcu_head *rhp)
+{
-+ page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
-+ if (page->ptl) {
-+ spin_lock_init(__pte_lockptr(page));
-+ } else {
-+ __free_page(page);
-+ page = NULL;
-+ }
-+ return page;
-+}
++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
+
-+void pte_lock_deinit(struct page *page)
-+{
-+ kfree(page->ptl);
-+ page->mapping = NULL;
++ __mmdrop(mm);
+}
++#endif
+
+ /*
+ * Decrement the use count and release all resources for an mm.
+ */
+@@ -1030,6 +1054,9 @@ void mm_init_owner(struct mm_struct *mm,
+ */
+ static void posix_cpu_timers_init(struct task_struct *tsk)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ tsk->posix_timer_list = NULL;
+#endif
-Index: linux-2.6/arch/alpha/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/alpha/mm/fault.c
-+++ linux-2.6/arch/alpha/mm/fault.c
-@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, uns
-
- /* If we're in an interrupt context, or have no user context,
- we must not take the fault. */
-- if (!mm || in_atomic())
-+ if (!mm || pagefault_disabled())
- goto no_context;
-
- #ifdef CONFIG_ALPHA_LARGE_VMALLOC
-Index: linux-2.6/arch/arm/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/arm/mm/fault.c
-+++ linux-2.6/arch/arm/mm/fault.c
-@@ -289,7 +289,7 @@ do_page_fault(unsigned long addr, unsign
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+ tsk->cputime_expires.prof_exp = cputime_zero;
+ tsk->cputime_expires.virt_exp = cputime_zero;
+ tsk->cputime_expires.sched_exp = 0;
+@@ -1137,6 +1164,7 @@ static struct task_struct *copy_process(
+ spin_lock_init(&p->alloc_lock);
- /*
-Index: linux-2.6/arch/avr32/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/avr32/mm/fault.c
-+++ linux-2.6/arch/avr32/mm/fault.c
-@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned l
- * If we're in an interrupt or have no user context, we must
- * not take the fault...
- */
-- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
-+ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
- goto no_context;
+ init_sigpending(&p->pending);
++ p->sigqueue_cache = NULL;
- local_irq_enable();
-Index: linux-2.6/arch/cris/mm/fault.c
+ p->utime = cputime_zero;
+ p->stime = cputime_zero;
+@@ -1194,6 +1222,9 @@ static struct task_struct *copy_process(
+ p->hardirq_context = 0;
+ p->softirq_context = 0;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ p->pagefault_disabled = 0;
++#endif
+ #ifdef CONFIG_LOCKDEP
+ p->lockdep_depth = 0; /* no locks held yet */
+ p->curr_chain_key = 0;
+Index: linux-2.6/kernel/sched_rt.c
===================================================================
---- linux-2.6.orig/arch/cris/mm/fault.c
-+++ linux-2.6/arch/cris/mm/fault.c
-@@ -111,7 +111,7 @@ do_page_fault(unsigned long address, str
- * user context, we must not take the fault.
- */
+--- linux-2.6.orig/kernel/sched_rt.c
++++ linux-2.6/kernel/sched_rt.c
+@@ -631,6 +631,7 @@ static int sched_rt_runtime_exceeded(str
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+ if (rt_rq->rt_time > runtime) {
+ rt_rq->rt_throttled = 1;
++ printk_once(KERN_WARNING "sched: RT throttling activated\n");
+ if (rt_rq_throttled(rt_rq)) {
+ sched_rt_rq_dequeue(rt_rq);
+ return 1;
+@@ -1186,7 +1187,7 @@ static void deactivate_task(struct rq *r
+ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
+ {
+ if (!task_running(rq, p) &&
+- (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
++ (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
+ (p->rt.nr_cpus_allowed > 1))
+ return 1;
+ return 0;
+@@ -1331,7 +1332,7 @@ static struct rq *find_lock_lowest_rq(st
+ */
+ if (unlikely(task_rq(task) != rq ||
+ !cpumask_test_cpu(lowest_rq->cpu,
+- &task->cpus_allowed) ||
++ tsk_cpus_allowed(task)) ||
+ task_running(rq, task) ||
+ !task->on_rq)) {
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/frv/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/frv/mm/fault.c
-+++ linux-2.6/arch/frv/mm/fault.c
-@@ -79,7 +79,7 @@ asmlinkage void do_page_fault(int datamm
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+@@ -1614,9 +1615,6 @@ static void set_cpus_allowed_rt(struct t
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/ia64/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/ia64/mm/fault.c
-+++ linux-2.6/arch/ia64/mm/fault.c
-@@ -89,7 +89,7 @@ ia64_do_page_fault (unsigned long addres
- /*
- * If we're in an interrupt or have no user context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+ update_rt_migration(&rq->rt);
+ }
+-
+- cpumask_copy(&p->cpus_allowed, new_mask);
+- p->rt.nr_cpus_allowed = weight;
+ }
- #ifdef CONFIG_VIRTUAL_MEM_MAP
-Index: linux-2.6/arch/m32r/mm/fault.c
+ /* Assumes rq->lock is held */
+Index: linux-2.6/include/asm-generic/cmpxchg-local.h
===================================================================
---- linux-2.6.orig/arch/m32r/mm/fault.c
-+++ linux-2.6/arch/m32r/mm/fault.c
-@@ -115,7 +115,7 @@ asmlinkage void do_page_fault(struct pt_
- * If we're in an interrupt or have no user context or are running in an
- * atomic region then we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto bad_area_nosemaphore;
+--- linux-2.6.orig/include/asm-generic/cmpxchg-local.h
++++ linux-2.6/include/asm-generic/cmpxchg-local.h
+@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_lo
+ if (size == 8 && sizeof(unsigned long) != 8)
+ wrong_size_cmpxchg(ptr);
- /* When running in the kernel we expect faults to occur only to
-Index: linux-2.6/arch/m68k/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/m68k/mm/fault.c
-+++ linux-2.6/arch/m68k/mm/fault.c
-@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs,
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+- local_irq_save(flags);
++ raw_local_irq_save(flags);
+ switch (size) {
+ case 1: prev = *(u8 *)ptr;
+ if (prev == old)
+@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_lo
+ default:
+ wrong_size_cmpxchg(ptr);
+ }
+- local_irq_restore(flags);
++ raw_local_irq_restore(flags);
+ return prev;
+ }
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/microblaze/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/microblaze/mm/fault.c
-+++ linux-2.6/arch/microblaze/mm/fault.c
-@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs,
- if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
- is_write = 0;
+@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_gene
+ u64 prev;
+ unsigned long flags;
-- if (unlikely(in_atomic() || !mm)) {
-+ if (unlikely(!mm || pagefault_disabled())) {
- if (kernel_mode(regs))
- goto bad_area_nosemaphore;
+- local_irq_save(flags);
++ raw_local_irq_save(flags);
+ prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = new;
+- local_irq_restore(flags);
++ raw_local_irq_restore(flags);
+ return prev;
+ }
-Index: linux-2.6/arch/mips/mm/fault.c
+Index: linux-2.6/kernel/rtmutex-debug.h
===================================================================
---- linux-2.6.orig/arch/mips/mm/fault.c
-+++ linux-2.6/arch/mips/mm/fault.c
-@@ -88,7 +88,7 @@ asmlinkage void __kprobes do_page_fault(
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto bad_area_nosemaphore;
+--- linux-2.6.orig/kernel/rtmutex-debug.h
++++ linux-2.6/kernel/rtmutex-debug.h
+@@ -17,17 +17,17 @@ extern void debug_rt_mutex_free_waiter(s
+ extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
+ extern void debug_rt_mutex_lock(struct rt_mutex *lock);
+ extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
+-extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
+- struct task_struct *powner);
++extern void
++debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner);
+ extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
+ extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
+ struct rt_mutex *lock);
+ extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
+-# define debug_rt_mutex_reset_waiter(w) \
++# define debug_rt_mutex_reset_waiter(w) \
+ do { (w)->deadlock_lock = NULL; } while (0)
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/mn10300/mm/fault.c
+-static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
+- int detect)
++static inline int
++debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, int detect)
+ {
+- return (waiter != NULL);
++ return waiter != NULL;
+ }
+Index: linux-2.6/drivers/char/random.c
===================================================================
---- linux-2.6.orig/arch/mn10300/mm/fault.c
-+++ linux-2.6/arch/mn10300/mm/fault.c
-@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+--- linux-2.6.orig/drivers/char/random.c
++++ linux-2.6/drivers/char/random.c
+@@ -433,7 +433,7 @@ static struct entropy_store input_pool =
+ .poolinfo = &poolinfo_table[0],
+ .name = "input",
+ .limit = 1,
+- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+ .pool = input_pool_data
+ };
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/parisc/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/parisc/mm/fault.c
-+++ linux-2.6/arch/parisc/mm/fault.c
-@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs,
- unsigned long acc_type;
- int fault;
+@@ -442,7 +442,7 @@ static struct entropy_store blocking_poo
+ .name = "blocking",
+ .limit = 1,
+ .pull = &input_pool,
+- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
+ .pool = blocking_pool_data
+ };
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+@@ -450,7 +450,7 @@ static struct entropy_store nonblocking_
+ .poolinfo = &poolinfo_table[1],
+ .name = "nonblocking",
+ .pull = &input_pool,
+- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
+ .pool = nonblocking_pool_data
+ };
+
+@@ -633,8 +633,11 @@ static void add_timer_randomness(struct
+ preempt_disable();
+ /* if over the trickle threshold, use only 1 in 4096 samples */
+ if (input_pool.entropy_count > trickle_thresh &&
+- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
+- goto out;
++ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) {
++ preempt_enable();
++ return;
++ }
++ preempt_enable();
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/powerpc/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/powerpc/mm/fault.c
-+++ linux-2.6/arch/powerpc/mm/fault.c
-@@ -162,7 +162,7 @@ int __kprobes do_page_fault(struct pt_re
+ sample.jiffies = jiffies;
+ sample.cycles = get_cycles();
+@@ -676,8 +679,6 @@ static void add_timer_randomness(struct
+ credit_entropy_bits(&input_pool,
+ min_t(int, fls(delta>>1), 11));
}
- #endif
+-out:
+- preempt_enable();
+ }
-- if (in_atomic() || mm == NULL) {
-+ if (!mm || pagefault_disabled()) {
- if (!user_mode(regs))
- return SIGSEGV;
- /* in_atomic() in user mode is really bad,
-Index: linux-2.6/arch/s390/mm/fault.c
+ void add_input_randomness(unsigned int type, unsigned int code,
+Index: linux-2.6/fs/ioprio.c
===================================================================
---- linux-2.6.orig/arch/s390/mm/fault.c
-+++ linux-2.6/arch/s390/mm/fault.c
-@@ -295,7 +295,8 @@ static inline int do_exception(struct pt
- * user context.
- */
- fault = VM_FAULT_BADCONTEXT;
-- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
-+ if (unlikely(!user_space_fault(trans_exc_code) ||
-+ !mm || pagefault_disabled()))
- goto out;
-
- address = trans_exc_code & __FAIL_ADDR_MASK;
-@@ -410,7 +411,8 @@ void __kprobes do_asce_exception(struct
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+--- linux-2.6.orig/fs/ioprio.c
++++ linux-2.6/fs/ioprio.c
+@@ -226,6 +226,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which,
+ if (!user)
+ break;
-- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
-+ if (unlikely(!user_space_fault(trans_exc_code) ||
-+ !mm || pagefault_disabled()))
- goto no_context;
++ rcu_read_lock();
+ do_each_thread(g, p) {
+ if (__task_cred(p)->uid != user->uid)
+ continue;
+@@ -237,6 +238,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which,
+ else
+ ret = ioprio_best(ret, tmpio);
+ } while_each_thread(g, p);
++ rcu_read_unlock();
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/score/mm/fault.c
+ if (who)
+ free_uid(user);
+Index: linux-2.6/arch/arm/mach-at91/at91rm9200_time.c
===================================================================
---- linux-2.6.orig/arch/score/mm/fault.c
-+++ linux-2.6/arch/score/mm/fault.c
-@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto bad_area_nosemaphore;
-
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/sh/mm/fault_32.c
+--- linux-2.6.orig/arch/arm/mach-at91/at91rm9200_time.c
++++ linux-2.6/arch/arm/mach-at91/at91rm9200_time.c
+@@ -114,6 +114,7 @@ clkevt32k_mode(enum clock_event_mode mod
+ last_crtr = read_CRTR();
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
++ setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ /* PIT for periodic irqs; fixed rate of 1/HZ */
+ irqmask = AT91_ST_PITS;
+ at91_sys_write(AT91_ST_PIMR, LATCH);
+@@ -127,6 +128,7 @@ clkevt32k_mode(enum clock_event_mode mod
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
++ remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ case CLOCK_EVT_MODE_RESUME:
+ irqmask = 0;
+ break;
+Index: linux-2.6/arch/arm/mach-at91/at91sam926x_time.c
===================================================================
---- linux-2.6.orig/arch/sh/mm/fault_32.c
-+++ linux-2.6/arch/sh/mm/fault_32.c
-@@ -166,7 +166,7 @@ asmlinkage void __kprobes do_page_fault(
- * If we're in an interrupt, have no user context or are running
- * in an atomic region then we must not take the fault:
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto no_context;
+--- linux-2.6.orig/arch/arm/mach-at91/at91sam926x_time.c
++++ linux-2.6/arch/arm/mach-at91/at91sam926x_time.c
+@@ -54,7 +54,7 @@ static struct clocksource pit_clk = {
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/sparc/mm/fault_32.c
+-
++static struct irqaction at91sam926x_pit_irq;
+ /*
+ * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
+ */
+@@ -63,6 +63,9 @@ pit_clkevt_mode(enum clock_event_mode mo
+ {
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
++ /* Set up irq handler */
++ setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
++
+ /* update clocksource counter */
+ pit_cnt += pit_cycle * PIT_PICNT(at91_sys_read(AT91_PIT_PIVR));
+ at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
+@@ -75,6 +78,7 @@ pit_clkevt_mode(enum clock_event_mode mo
+ case CLOCK_EVT_MODE_UNUSED:
+ /* disable irq, leaving the clocksource active */
+ at91_sys_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
++ remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+Index: linux-2.6/drivers/clocksource/tcb_clksrc.c
===================================================================
---- linux-2.6.orig/arch/sparc/mm/fault_32.c
-+++ linux-2.6/arch/sparc/mm/fault_32.c
-@@ -248,8 +248,8 @@ asmlinkage void do_sparc_fault(struct pt
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-- goto no_context;
-+ if (!mm || pagefault_disabled())
-+ goto no_context;
+--- linux-2.6.orig/drivers/clocksource/tcb_clksrc.c
++++ linux-2.6/drivers/clocksource/tcb_clksrc.c
+@@ -21,8 +21,7 @@
+ * resolution better than 200 nsec).
+ *
+ * - The third channel may be used to provide a 16-bit clockevent
+- * source, used in either periodic or oneshot mode. This runs
+- * at 32 KiHZ, and can handle delays of up to two seconds.
++ * source, used in either periodic or oneshot mode.
+ *
+ * A boot clocksource and clockevent source are also currently needed,
+ * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
+@@ -68,6 +67,7 @@ static struct clocksource clksrc = {
+ struct tc_clkevt_device {
+ struct clock_event_device clkevt;
+ struct clk *clk;
++ u32 freq;
+ void __iomem *regs;
+ };
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+@@ -76,13 +76,6 @@ static struct tc_clkevt_device *to_tc_cl
+ return container_of(clkevt, struct tc_clkevt_device, clkevt);
+ }
-Index: linux-2.6/arch/sparc/mm/fault_64.c
-===================================================================
---- linux-2.6.orig/arch/sparc/mm/fault_64.c
-+++ linux-2.6/arch/sparc/mm/fault_64.c
-@@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fau
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (!mm || pagefault_disabled())
- goto intr_or_no_mm;
+-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
+- * because using one of the divided clocks would usually mean the
+- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
+- *
+- * A divided clock could be good for high resolution timers, since
+- * 30.5 usec resolution can seem "low".
+- */
+ static u32 timer_clock;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
-Index: linux-2.6/arch/tile/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/tile/mm/fault.c
-+++ linux-2.6/arch/tile/mm/fault.c
-@@ -346,7 +346,7 @@ static int handle_page_fault(struct pt_r
- * If we're in an interrupt, have no user context or are running in an
- * atomic region then we must not take the fault.
- */
-- if (in_atomic() || !mm) {
-+ if (!mm || pagefault_disabled()) {
- vma = NULL; /* happy compiler */
- goto bad_area_nosemaphore;
- }
-Index: linux-2.6/arch/um/kernel/trap.c
-===================================================================
---- linux-2.6.orig/arch/um/kernel/trap.c
-+++ linux-2.6/arch/um/kernel/trap.c
-@@ -37,7 +37,7 @@ int handle_page_fault(unsigned long addr
- * If the fault was during atomic operation, don't take the fault, just
- * fail.
- */
-- if (in_atomic())
-+ if (!mm || pagefault_disabled())
- goto out_nosemaphore;
+ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
+@@ -105,11 +98,12 @@ static void tc_mode(enum clock_event_mod
+ case CLOCK_EVT_MODE_PERIODIC:
+ clk_enable(tcd->clk);
- down_read(&mm->mmap_sem);
-Index: linux-2.6/arch/x86/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/x86/mm/fault.c
-+++ linux-2.6/arch/x86/mm/fault.c
-@@ -1065,7 +1065,7 @@ do_page_fault(struct pt_regs *regs, unsi
- * If we're in an interrupt, have no user context or are running
- * in an atomic region then we must not take the fault:
- */
-- if (unlikely(in_atomic() || !mm)) {
-+ if (unlikely(!mm || pagefault_disabled())) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
-Index: linux-2.6/arch/xtensa/mm/fault.c
-===================================================================
---- linux-2.6.orig/arch/xtensa/mm/fault.c
-+++ linux-2.6/arch/xtensa/mm/fault.c
-@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
- /* If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm) {
-+ if (!mm || pagefault_disabled()) {
- bad_page_fault(regs, address, SIGSEGV);
- return;
- }
-Index: linux-2.6/mm/filemap.c
-===================================================================
---- linux-2.6.orig/mm/filemap.c
-+++ linux-2.6/mm/filemap.c
-@@ -2040,7 +2040,7 @@ size_t iov_iter_copy_from_user_atomic(st
- char *kaddr;
- size_t copied;
+- /* slow clock, count up to RC, then irq and restart */
++ /* count up to RC, then irq and restart */
+ __raw_writel(timer_clock
+ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
++ __raw_writel((tcd->freq + HZ/2)/HZ,
++ tcaddr + ATMEL_TC_REG(2, RC));
-- BUG_ON(!in_atomic());
-+ BUG_ON(!pagefault_disabled());
- kaddr = kmap_atomic(page, KM_USER0);
- if (likely(i->nr_segs == 1)) {
- int left;
-Index: linux-2.6/arch/x86/mm/highmem_32.c
-===================================================================
---- linux-2.6.orig/arch/x86/mm/highmem_32.c
-+++ linux-2.6/arch/x86/mm/highmem_32.c
-@@ -43,7 +43,7 @@ void *kmap_atomic_prot(struct page *page
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-- BUG_ON(!pte_none(*(kmap_pte-idx)));
-+ WARN_ON(!pte_none(*(kmap_pte-idx)));
- set_pte(kmap_pte-idx, mk_pte(page, prot));
+ /* Enable clock and interrupts on RC compare */
+ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+@@ -122,7 +116,7 @@ static void tc_mode(enum clock_event_mod
+ case CLOCK_EVT_MODE_ONESHOT:
+ clk_enable(tcd->clk);
- return (void *)vaddr;
-Index: linux-2.6/include/linux/kernel.h
-===================================================================
---- linux-2.6.orig/include/linux/kernel.h
-+++ linux-2.6/include/linux/kernel.h
-@@ -346,7 +346,7 @@ extern enum system_states {
- SYSTEM_HALT,
- SYSTEM_POWER_OFF,
- SYSTEM_RESTART,
-- SYSTEM_SUSPEND_DISK,
-+ SYSTEM_SUSPEND,
- } system_state;
+- /* slow clock, count up to RC, then irq and stop */
++ /* count up to RC, then irq and stop */
+ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
+ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+@@ -152,8 +146,12 @@ static struct tc_clkevt_device clkevt =
+ .features = CLOCK_EVT_FEAT_PERIODIC
+ | CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 32,
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ /* Should be lower than at91rm9200's system timer */
+ .rating = 125,
++#else
++ .rating = 200,
++#endif
+ .set_next_event = tc_next_event,
+ .set_mode = tc_mode,
+ },
+@@ -179,8 +177,9 @@ static struct irqaction tc_irqaction = {
+ .handler = ch2_irq,
+ };
- #define TAINT_PROPRIETARY_MODULE 0
-Index: linux-2.6/kernel/power/hibernate.c
-===================================================================
---- linux-2.6.orig/kernel/power/hibernate.c
-+++ linux-2.6/kernel/power/hibernate.c
-@@ -278,6 +278,8 @@ static int create_image(int platform_mod
+-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
++static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
+ {
++ unsigned divisor = atmel_tc_divisors[divisor_idx];
+ struct clk *t2_clk = tc->clk[2];
+ int irq = tc->irq[2];
- local_irq_disable();
+@@ -188,11 +187,17 @@ static void __init setup_clkevents(struc
+ clkevt.clk = t2_clk;
+ tc_irqaction.dev_id = &clkevt;
-+ system_state = SYSTEM_SUSPEND;
+- timer_clock = clk32k_divisor_idx;
++ timer_clock = divisor_idx;
+
+- clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
+- clkevt.clkevt.max_delta_ns
+- = clockevent_delta2ns(0xffff, &clkevt.clkevt);
++ if (!divisor)
++ clkevt.freq = 32768;
++ else
++ clkevt.freq = clk_get_rate(t2_clk)/divisor;
+
- error = syscore_suspend();
- if (error) {
- printk(KERN_ERR "PM: Some system devices failed to power down, "
-@@ -305,6 +307,7 @@ static int create_image(int platform_mod
- syscore_resume();
++ clkevt.clkevt.mult = div_sc(clkevt.freq, NSEC_PER_SEC,
++ clkevt.clkevt.shift);
++ clkevt.clkevt.max_delta_ns =
++ clockevent_delta2ns(0xffff, &clkevt.clkevt);
+ clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
+ clkevt.clkevt.cpumask = cpumask_of(0);
- Enable_irqs:
-+ system_state = SYSTEM_RUNNING;
- local_irq_enable();
+@@ -295,8 +300,11 @@ static int __init tcb_clksrc_init(void)
+ clocksource_register(&clksrc);
- Enable_cpus:
-@@ -412,6 +415,7 @@ static int resume_target_kernel(bool pla
- goto Enable_cpus;
+ /* channel 2: periodic and oneshot timer support */
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ setup_clkevents(tc, clk32k_divisor_idx);
+-
++#else
++ setup_clkevents(tc, best_divisor_idx);
++#endif
+ return 0;
+ }
+ arch_initcall(tcb_clksrc_init);
+Index: linux-2.6/drivers/misc/Kconfig
+===================================================================
+--- linux-2.6.orig/drivers/misc/Kconfig
++++ linux-2.6/drivers/misc/Kconfig
+@@ -82,6 +82,7 @@ config AB8500_PWM
+ config ATMEL_TCLIB
+ bool "Atmel AT32/AT91 Timer/Counter Library"
+ depends on (AVR32 || ARCH_AT91)
++ default y if PREEMPT_RT_FULL
+ help
+ Select this if you want a library to allocate the Timer/Counter
+ blocks found on many Atmel processors. This facilitates using
+@@ -97,8 +98,7 @@ config ATMEL_TCB_CLKSRC
+ are combined to make a single 32-bit timer.
- local_irq_disable();
-+ system_state = SYSTEM_SUSPEND;
+ When GENERIC_CLOCKEVENTS is defined, the third timer channel
+- may be used as a clock event device supporting oneshot mode
+- (delays of up to two seconds) based on the 32 KiHz clock.
++ may be used as a clock event device supporting oneshot mode.
- error = syscore_suspend();
- if (error)
-@@ -445,6 +449,7 @@ static int resume_target_kernel(bool pla
- syscore_resume();
+ config ATMEL_TCB_CLKSRC_BLOCK
+ int
+@@ -112,6 +112,14 @@ config ATMEL_TCB_CLKSRC_BLOCK
+ TC can be used for other purposes, such as PWM generation and
+ interval timing.
- Enable_irqs:
-+ system_state = SYSTEM_RUNNING;
- local_irq_enable();
++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
++ bool "TC Block use 32 KiHz clock"
++ depends on ATMEL_TCB_CLKSRC
++ default y if !PREEMPT_RT_FULL
++ help
++ Select this to use 32 KiHz base clock rate as TC block clock
++ source for clock events.
++
+ config IBM_ASM
+ tristate "Device driver for IBM RSA service processor"
+ depends on X86 && PCI && INPUT && EXPERIMENTAL
+@@ -133,6 +141,35 @@ config IBM_ASM
+ for information on the specific driver level and support statement
+ for your IBM server.
- Enable_cpus:
-@@ -524,6 +529,7 @@ int hibernation_platform_enter(void)
- goto Platform_finish;
++config HWLAT_DETECTOR
++ tristate "Testing module to detect hardware-induced latencies"
++ depends on DEBUG_FS
++ depends on RING_BUFFER
++ default m
++ ---help---
++ A simple hardware latency detector. Use this module to detect
++ large latencies introduced by the behavior of the underlying
++ system firmware external to Linux. We do this using periodic
++ use of stop_machine to grab all available CPUs and measure
++ for unexplainable gaps in the CPU timestamp counter(s). By
++ default, the module is not enabled until the "enable" file
++ within the "hwlat_detector" debugfs directory is toggled.
++
++ This module is often used to detect SMI (System Management
++ Interrupts) on x86 systems, though is not x86 specific. To
++ this end, we default to using a sample window of 1 second,
++ during which we will sample for 0.5 seconds. If an SMI or
++ similar event occurs during that time, it is recorded
++ into an 8K samples global ring buffer until retreived.
++
++ WARNING: This software should never be enabled (it can be built
++ but should not be turned on after it is loaded) in a production
++ environment where high latencies are a concern since the
++ sampling mechanism actually introduces latencies for
++ regular tasks while the CPU(s) are being held.
++
++ If unsure, say N
++
+ config PHANTOM
+ tristate "Sensable PHANToM (PCI)"
+ depends on PCI
+Index: linux-2.6/drivers/net/tulip/tulip_core.c
+===================================================================
+--- linux-2.6.orig/drivers/net/tulip/tulip_core.c
++++ linux-2.6/drivers/net/tulip/tulip_core.c
+@@ -1951,6 +1951,7 @@ static void __devexit tulip_remove_one (
+ pci_iounmap(pdev, tp->base_addr);
+ free_netdev (dev);
+ pci_release_regions (pdev);
++ pci_disable_device (pdev);
+ pci_set_drvdata (pdev, NULL);
- local_irq_disable();
-+ system_state = SYSTEM_SUSPEND;
- syscore_suspend();
- if (pm_wakeup_pending()) {
- error = -EAGAIN;
-@@ -536,6 +542,7 @@ int hibernation_platform_enter(void)
+ /* pci_power_off (pdev, -1); */
+Index: linux-2.6/drivers/net/8139too.c
+===================================================================
+--- linux-2.6.orig/drivers/net/8139too.c
++++ linux-2.6/drivers/net/8139too.c
+@@ -2173,7 +2173,11 @@ static irqreturn_t rtl8139_interrupt (in
+ */
+ static void rtl8139_poll_controller(struct net_device *dev)
+ {
+- disable_irq(dev->irq);
++ /*
++ * use _nosync() variant - might be used by netconsole
++ * from atomic contexts:
++ */
++ disable_irq_nosync(dev->irq);
+ rtl8139_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+ }
+Index: linux-2.6/drivers/net/ehea/ehea_main.c
+===================================================================
+--- linux-2.6.orig/drivers/net/ehea/ehea_main.c
++++ linux-2.6/drivers/net/ehea/ehea_main.c
+@@ -1369,7 +1369,7 @@ static int ehea_reg_interrupts(struct ne
+ "%s-queue%d", dev->name, i);
+ ret = ibmebus_request_irq(pr->eq->attr.ist1,
+ ehea_recv_irq_handler,
+- IRQF_DISABLED, pr->int_send_name,
++ IRQF_NO_THREAD, pr->int_send_name,
+ pr);
+ if (ret) {
+ netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
+Index: linux-2.6/drivers/net/arm/at91_ether.c
+===================================================================
+--- linux-2.6.orig/drivers/net/arm/at91_ether.c
++++ linux-2.6/drivers/net/arm/at91_ether.c
+@@ -199,7 +199,9 @@ static irqreturn_t at91ether_phy_interru
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned int phy;
++ unsigned long flags;
+
++ spin_lock_irqsave(&lp->lock, flags);
+ /*
+ * This hander is triggered on both edges, but the PHY chips expect
+ * level-triggering. We therefore have to check if the PHY actually has
+@@ -241,6 +243,7 @@ static irqreturn_t at91ether_phy_interru
+
+ done:
+ disable_mdi();
++ spin_unlock_irqrestore(&lp->lock, flags);
- Power_up:
- syscore_resume();
-+ system_state = SYSTEM_RUNNING;
- local_irq_enable();
- enable_nonboot_cpus();
+ return IRQ_HANDLED;
+ }
+@@ -397,9 +400,11 @@ static void at91ether_check_link(unsigne
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct at91_private *lp = netdev_priv(dev);
-Index: linux-2.6/kernel/power/suspend.c
++ spin_lock_irq(&lp->lock);
+ enable_mdi();
+ update_linkspeed(dev, 1);
+ disable_mdi();
++ spin_unlock_irq(&lp->lock);
+
+ mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
+ }
+Index: linux-2.6/include/linux/preempt.h
===================================================================
---- linux-2.6.orig/kernel/power/suspend.c
-+++ linux-2.6/kernel/power/suspend.c
-@@ -163,6 +163,8 @@ static int suspend_enter(suspend_state_t
- arch_suspend_disable_irqs();
- BUG_ON(!irqs_disabled());
+--- linux-2.6.orig/include/linux/preempt.h
++++ linux-2.6/include/linux/preempt.h
+@@ -33,12 +33,18 @@ do { \
+ barrier(); \
+ } while (0)
-+ system_state = SYSTEM_SUSPEND;
+-#define preempt_enable_no_resched() \
++#define __preempt_enable_no_resched() \
+ do { \
+ barrier(); \
+ dec_preempt_count(); \
+ } while (0)
+
++#ifndef CONFIG_PREEMPT_RT_BASE
++# define preempt_enable_no_resched() __preempt_enable_no_resched()
++#else
++# define preempt_enable_no_resched() preempt_enable()
++#endif
+
- error = syscore_suspend();
- if (!error) {
- if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
-@@ -172,6 +174,8 @@ static int suspend_enter(suspend_state_t
- syscore_resume();
- }
+ #define preempt_check_resched() \
+ do { \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+@@ -47,7 +53,7 @@ do { \
-+ system_state = SYSTEM_RUNNING;
+ #define preempt_enable() \
+ do { \
+- preempt_enable_no_resched(); \
++ __preempt_enable_no_resched(); \
+ barrier(); \
+ preempt_check_resched(); \
+ } while (0)
+@@ -83,6 +89,7 @@ do { \
+ #else
+
+ #define preempt_disable() do { } while (0)
++#define __preempt_enable_no_resched() do { } while (0)
+ #define preempt_enable_no_resched() do { } while (0)
+ #define preempt_enable() do { } while (0)
+ #define preempt_check_resched() do { } while (0)
+@@ -93,6 +100,27 @@ do { \
+
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define preempt_disable_rt() preempt_disable()
++# define preempt_enable_rt() preempt_enable()
++# define preempt_disable_nort() do { } while (0)
++# define preempt_enable_nort() do { } while (0)
++# ifdef CONFIG_SMP
++ extern void migrate_disable(void);
++ extern void migrate_enable(void);
++# else /* CONFIG_SMP */
++# define migrate_disable() do { } while (0)
++# define migrate_enable() do { } while (0)
++# endif /* CONFIG_SMP */
++#else
++# define preempt_disable_rt() do { } while (0)
++# define preempt_enable_rt() do { } while (0)
++# define preempt_disable_nort() preempt_disable()
++# define preempt_enable_nort() preempt_enable()
++# define migrate_disable() preempt_disable()
++# define migrate_enable() preempt_enable()
++#endif
+
- arch_suspend_enable_irqs();
- BUG_ON(irqs_disabled());
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
-Index: linux-2.6/drivers/of/base.c
+ struct preempt_notifier;
+Index: linux-2.6/include/linux/uaccess.h
===================================================================
---- linux-2.6.orig/drivers/of/base.c
-+++ linux-2.6/drivers/of/base.c
-@@ -29,7 +29,7 @@ struct device_node *of_chosen;
- /* use when traversing tree through the allnext, child, sibling,
- * or parent members of struct device_node.
- */
--DEFINE_RWLOCK(devtree_lock);
-+DEFINE_RAW_SPINLOCK(devtree_lock);
+--- linux-2.6.orig/include/linux/uaccess.h
++++ linux-2.6/include/linux/uaccess.h
+@@ -6,38 +6,37 @@
- int of_n_addr_cells(struct device_node *np)
+ /*
+ * These routines enable/disable the pagefault handler in that
+- * it will not take any locks and go straight to the fixup table.
+- *
+- * They have great resemblance to the preempt_disable/enable calls
+- * and in fact they are identical; this is because currently there is
+- * no other way to make the pagefault handlers do this. So we do
+- * disable preemption but we don't necessarily care about that.
++ * it will not take any MM locks and go straight to the fixup table.
+ */
+-static inline void pagefault_disable(void)
++static inline void raw_pagefault_disable(void)
{
-@@ -138,16 +138,14 @@ void of_node_put(struct device_node *nod
- EXPORT_SYMBOL(of_node_put);
- #endif /* !CONFIG_SPARC */
+ inc_preempt_count();
+- /*
+- * make sure to have issued the store before a pagefault
+- * can hit.
+- */
+ barrier();
+ }
--struct property *of_find_property(const struct device_node *np,
-- const char *name,
-- int *lenp)
-+static struct property *__of_find_property(const struct device_node *np,
-+ const char *name, int *lenp)
+-static inline void pagefault_enable(void)
++static inline void raw_pagefault_enable(void)
{
- struct property *pp;
-
- if (!np)
- return NULL;
+- /*
+- * make sure to issue those last loads/stores before enabling
+- * the pagefault handler again.
+- */
+ barrier();
+ dec_preempt_count();
+- /*
+- * make sure we do..
+- */
+ barrier();
+ preempt_check_resched();
+ }
-- read_lock(&devtree_lock);
- for (pp = np->properties; pp != 0; pp = pp->next) {
- if (of_prop_cmp(pp->name, name) == 0) {
- if (lenp != 0)
-@@ -155,7 +153,20 @@ struct property *of_find_property(const
- break;
- }
- }
-- read_unlock(&devtree_lock);
-+
-+ return pp;
++#ifndef CONFIG_PREEMPT_RT_FULL
++static inline void pagefault_disable(void)
++{
++ raw_pagefault_disable();
+}
+
-+struct property *of_find_property(const struct device_node *np,
-+ const char *name,
-+ int *lenp)
++static inline void pagefault_enable(void)
+{
-+ struct property *pp;
-+ unsigned long flags;
++ raw_pagefault_enable();
++}
++#else
++extern void pagefault_disable(void);
++extern void pagefault_enable(void);
++#endif
+
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
-+ pp = __of_find_property(np, name, lenp);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ #ifndef ARCH_HAS_NOCACHE_UACCESS
- return pp;
- }
-@@ -173,13 +184,13 @@ struct device_node *of_find_all_nodes(st
- {
- struct device_node *np;
+ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+@@ -77,9 +76,9 @@ static inline unsigned long __copy_from_
+ mm_segment_t old_fs = get_fs(); \
+ \
+ set_fs(KERNEL_DS); \
+- pagefault_disable(); \
++ raw_pagefault_disable(); \
+ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
+- pagefault_enable(); \
++ raw_pagefault_enable(); \
+ set_fs(old_fs); \
+ ret; \
+ })
+Index: linux-2.6/arch/alpha/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/alpha/mm/fault.c
++++ linux-2.6/arch/alpha/mm/fault.c
+@@ -107,7 +107,7 @@ do_page_fault(unsigned long address, uns
-- read_lock(&devtree_lock);
-+ raw_spin_lock(&devtree_lock);
- np = prev ? prev->allnext : allnodes;
- for (; np != NULL; np = np->allnext)
- if (of_node_get(np))
- break;
- of_node_put(prev);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock(&devtree_lock);
- return np;
- }
- EXPORT_SYMBOL(of_find_all_nodes);
-@@ -188,8 +199,20 @@ EXPORT_SYMBOL(of_find_all_nodes);
- * Find a property with a given name for a given node
- * and return the value.
- */
-+static const void *__of_get_property(const struct device_node *np,
-+ const char *name, int *lenp)
-+{
-+ struct property *pp = __of_find_property(np, name, lenp);
-+
-+ return pp ? pp->value : NULL;
-+}
-+
-+/*
-+ * Find a property with a given name for a given node
-+ * and return the value.
-+ */
- const void *of_get_property(const struct device_node *np, const char *name,
-- int *lenp)
-+ int *lenp)
- {
- struct property *pp = of_find_property(np, name, lenp);
+ /* If we're in an interrupt context, or have no user context,
+ we must not take the fault. */
+- if (!mm || in_atomic())
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ #ifdef CONFIG_ALPHA_LARGE_VMALLOC
+Index: linux-2.6/arch/arm/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/arm/mm/fault.c
++++ linux-2.6/arch/arm/mm/fault.c
+@@ -289,7 +289,7 @@ do_page_fault(unsigned long addr, unsign
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ /*
+Index: linux-2.6/arch/avr32/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/avr32/mm/fault.c
++++ linux-2.6/arch/avr32/mm/fault.c
+@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned l
+ * If we're in an interrupt or have no user context, we must
+ * not take the fault...
+ */
+- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
++ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
+ goto no_context;
-@@ -200,13 +223,13 @@ EXPORT_SYMBOL(of_get_property);
- /** Checks if the given "compat" string matches one of the strings in
- * the device's "compatible" property
- */
--int of_device_is_compatible(const struct device_node *device,
-- const char *compat)
-+static int __of_device_is_compatible(const struct device_node *device,
-+ const char *compat)
- {
- const char* cp;
-- int cplen, l;
-+ int uninitialized_var(cplen), l;
+ local_irq_enable();
+Index: linux-2.6/arch/cris/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/cris/mm/fault.c
++++ linux-2.6/arch/cris/mm/fault.c
+@@ -111,7 +111,7 @@ do_page_fault(unsigned long address, str
+ * user context, we must not take the fault.
+ */
-- cp = of_get_property(device, "compatible", &cplen);
-+ cp = __of_get_property(device, "compatible", &cplen);
- if (cp == NULL)
- return 0;
- while (cplen > 0) {
-@@ -219,6 +242,21 @@ int of_device_is_compatible(const struct
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
- return 0;
- }
-+
-+/** Checks if the given "compat" string matches one of the strings in
-+ * the device's "compatible" property
-+ */
-+int of_device_is_compatible(const struct device_node *device,
-+ const char *compat)
-+{
-+ unsigned long flags;
-+ int res;
-+
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
-+ res = __of_device_is_compatible(device, compat);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
-+ return res;
-+}
- EXPORT_SYMBOL(of_device_is_compatible);
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/frv/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/frv/mm/fault.c
++++ linux-2.6/arch/frv/mm/fault.c
+@@ -79,7 +79,7 @@ asmlinkage void do_page_fault(int datamm
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
- /**
-@@ -278,13 +316,14 @@ EXPORT_SYMBOL(of_device_is_available);
- struct device_node *of_get_parent(const struct device_node *node)
- {
- struct device_node *np;
-+ unsigned long flags;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/ia64/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/ia64/mm/fault.c
++++ linux-2.6/arch/ia64/mm/fault.c
+@@ -89,7 +89,7 @@ ia64_do_page_fault (unsigned long addres
+ /*
+ * If we're in an interrupt or have no user context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
- if (!node)
- return NULL;
+ #ifdef CONFIG_VIRTUAL_MEM_MAP
+Index: linux-2.6/arch/m32r/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/m32r/mm/fault.c
++++ linux-2.6/arch/m32r/mm/fault.c
+@@ -115,7 +115,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user context or are running in an
+ * atomic region then we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = of_node_get(node->parent);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_get_parent);
-@@ -303,14 +342,15 @@ EXPORT_SYMBOL(of_get_parent);
- struct device_node *of_get_next_parent(struct device_node *node)
- {
- struct device_node *parent;
-+ unsigned long flags;
+ /* When running in the kernel we expect faults to occur only to
+Index: linux-2.6/arch/m68k/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/m68k/mm/fault.c
++++ linux-2.6/arch/m68k/mm/fault.c
+@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs,
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
- if (!node)
- return NULL;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/microblaze/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/microblaze/mm/fault.c
++++ linux-2.6/arch/microblaze/mm/fault.c
+@@ -107,7 +107,7 @@ void do_page_fault(struct pt_regs *regs,
+ if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
+ is_write = 0;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- parent = of_node_get(node->parent);
- of_node_put(node);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return parent;
- }
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ if (kernel_mode(regs))
+ goto bad_area_nosemaphore;
-@@ -326,14 +366,15 @@ struct device_node *of_get_next_child(co
- struct device_node *prev)
- {
- struct device_node *next;
-+ unsigned long flags;
+Index: linux-2.6/arch/mips/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/mips/mm/fault.c
++++ linux-2.6/arch/mips/mm/fault.c
+@@ -88,7 +88,7 @@ asmlinkage void __kprobes do_page_fault(
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- next = prev ? prev->sibling : node->child;
- for (; next; next = next->sibling)
- if (of_node_get(next))
- break;
- of_node_put(prev);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return next;
- }
- EXPORT_SYMBOL(of_get_next_child);
-@@ -348,14 +389,15 @@ EXPORT_SYMBOL(of_get_next_child);
- struct device_node *of_find_node_by_path(const char *path)
- {
- struct device_node *np = allnodes;
-+ unsigned long flags;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/mn10300/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/mn10300/mm/fault.c
++++ linux-2.6/arch/mn10300/mm/fault.c
+@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- for (; np; np = np->allnext) {
- if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
- && of_node_get(np))
- break;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/parisc/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/parisc/mm/fault.c
++++ linux-2.6/arch/parisc/mm/fault.c
+@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs,
+ unsigned long acc_type;
+ int fault;
+
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/powerpc/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/powerpc/mm/fault.c
++++ linux-2.6/arch/powerpc/mm/fault.c
+@@ -162,7 +162,7 @@ int __kprobes do_page_fault(struct pt_re
}
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_node_by_path);
-@@ -375,15 +417,16 @@ struct device_node *of_find_node_by_name
- const char *name)
- {
- struct device_node *np;
-+ unsigned long flags;
+ #endif
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : allnodes;
- for (; np; np = np->allnext)
- if (np->name && (of_node_cmp(np->name, name) == 0)
- && of_node_get(np))
- break;
- of_node_put(from);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_node_by_name);
-@@ -404,15 +447,16 @@ struct device_node *of_find_node_by_type
- const char *type)
- {
- struct device_node *np;
-+ unsigned long flags;
+- if (in_atomic() || mm == NULL) {
++ if (!mm || pagefault_disabled()) {
+ if (!user_mode(regs))
+ return SIGSEGV;
+ /* in_atomic() in user mode is really bad,
+Index: linux-2.6/arch/s390/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/s390/mm/fault.c
++++ linux-2.6/arch/s390/mm/fault.c
+@@ -295,7 +295,8 @@ static inline int do_exception(struct pt
+ * user context.
+ */
+ fault = VM_FAULT_BADCONTEXT;
+- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
++ if (unlikely(!user_space_fault(trans_exc_code) ||
++ !mm || pagefault_disabled()))
+ goto out;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : allnodes;
- for (; np; np = np->allnext)
- if (np->type && (of_node_cmp(np->type, type) == 0)
- && of_node_get(np))
- break;
- of_node_put(from);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_node_by_type);
-@@ -435,18 +479,20 @@ struct device_node *of_find_compatible_n
- const char *type, const char *compatible)
- {
- struct device_node *np;
-+ unsigned long flags;
+ address = trans_exc_code & __FAIL_ADDR_MASK;
+@@ -410,7 +411,8 @@ void __kprobes do_asce_exception(struct
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : allnodes;
- for (; np; np = np->allnext) {
- if (type
- && !(np->type && (of_node_cmp(np->type, type) == 0)))
- continue;
-- if (of_device_is_compatible(np, compatible) && of_node_get(np))
-+ if (__of_device_is_compatible(np, compatible) &&
-+ of_node_get(np))
- break;
- }
- of_node_put(from);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_compatible_node);
-@@ -468,8 +514,9 @@ struct device_node *of_find_node_with_pr
- {
- struct device_node *np;
- struct property *pp;
-+ unsigned long flags;
+- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
++ if (unlikely(!user_space_fault(trans_exc_code) ||
++ !mm || pagefault_disabled()))
+ goto no_context;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : allnodes;
- for (; np; np = np->allnext) {
- for (pp = np->properties; pp != 0; pp = pp->next) {
-@@ -481,20 +528,14 @@ struct device_node *of_find_node_with_pr
- }
- out:
- of_node_put(from);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_node_with_property);
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/score/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/score/mm/fault.c
++++ linux-2.6/arch/score/mm/fault.c
+@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
--/**
-- * of_match_node - Tell if an device_node has a matching of_match structure
-- * @matches: array of of device match structures to search in
-- * @node: the of device structure to match against
-- *
-- * Low level utility function used by device matching.
-- */
--const struct of_device_id *of_match_node(const struct of_device_id *matches,
-- const struct device_node *node)
-+static
-+const struct of_device_id *__of_match_node(const struct of_device_id *matches,
-+ const struct device_node *node)
- {
- if (!matches)
- return NULL;
-@@ -508,14 +549,33 @@ const struct of_device_id *of_match_node
- match &= node->type
- && !strcmp(matches->type, node->type);
- if (matches->compatible[0])
-- match &= of_device_is_compatible(node,
-- matches->compatible);
-+ match &= __of_device_is_compatible(node,
-+ matches->compatible);
- if (match)
- return matches;
- matches++;
- }
- return NULL;
- }
-+
-+/**
-+ * of_match_node - Tell if an device_node has a matching of_match structure
-+ * @matches: array of of device match structures to search in
-+ * @node: the of device structure to match against
-+ *
-+ * Low level utility function used by device matching.
-+ */
-+const struct of_device_id *of_match_node(const struct of_device_id *matches,
-+ const struct device_node *node)
-+{
-+ const struct of_device_id *match;
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
-+ match = __of_match_node(matches, node);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
-+ return match;
-+}
- EXPORT_SYMBOL(of_match_node);
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/sh/mm/fault_32.c
+===================================================================
+--- linux-2.6.orig/arch/sh/mm/fault_32.c
++++ linux-2.6/arch/sh/mm/fault_32.c
+@@ -166,7 +166,7 @@ asmlinkage void __kprobes do_page_fault(
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
- /**
-@@ -534,15 +594,16 @@ struct device_node *of_find_matching_nod
- const struct of_device_id *matches)
- {
- struct device_node *np;
-+ unsigned long flags;
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/sparc/mm/fault_32.c
+===================================================================
+--- linux-2.6.orig/arch/sparc/mm/fault_32.c
++++ linux-2.6/arch/sparc/mm/fault_32.c
+@@ -248,8 +248,8 @@ asmlinkage void do_sparc_fault(struct pt
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
+- goto no_context;
++ if (!mm || pagefault_disabled())
++ goto no_context;
-- read_lock(&devtree_lock);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np = from ? from->allnext : allnodes;
- for (; np; np = np->allnext) {
-- if (of_match_node(matches, np) && of_node_get(np))
-+ if (__of_match_node(matches, np) && of_node_get(np))
- break;
- }
- of_node_put(from);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return np;
- }
- EXPORT_SYMBOL(of_find_matching_node);
-@@ -585,12 +646,12 @@ struct device_node *of_find_node_by_phan
- {
- struct device_node *np;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
-- read_lock(&devtree_lock);
-+ raw_spin_lock(&devtree_lock);
- for (np = allnodes; np; np = np->allnext)
- if (np->phandle == handle)
- break;
- of_node_get(np);
-- read_unlock(&devtree_lock);
-+ raw_spin_unlock(&devtree_lock);
- return np;
- }
- EXPORT_SYMBOL(of_find_node_by_phandle);
-@@ -745,18 +806,18 @@ int prom_add_property(struct device_node
- unsigned long flags;
+Index: linux-2.6/arch/sparc/mm/fault_64.c
+===================================================================
+--- linux-2.6.orig/arch/sparc/mm/fault_64.c
++++ linux-2.6/arch/sparc/mm/fault_64.c
+@@ -322,7 +322,7 @@ asmlinkage void __kprobes do_sparc64_fau
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto intr_or_no_mm;
- prop->next = NULL;
-- write_lock_irqsave(&devtree_lock, flags);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (strcmp(prop->name, (*next)->name) == 0) {
- /* duplicate ! don't insert it */
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- return -1;
- }
- next = &(*next)->next;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+Index: linux-2.6/arch/tile/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/tile/mm/fault.c
++++ linux-2.6/arch/tile/mm/fault.c
+@@ -346,7 +346,7 @@ static int handle_page_fault(struct pt_r
+ * If we're in an interrupt, have no user context or are running in an
+ * atomic region then we must not take the fault.
+ */
+- if (in_atomic() || !mm) {
++ if (!mm || pagefault_disabled()) {
+ vma = NULL; /* happy compiler */
+ goto bad_area_nosemaphore;
+ }
+Index: linux-2.6/arch/um/kernel/trap.c
+===================================================================
+--- linux-2.6.orig/arch/um/kernel/trap.c
++++ linux-2.6/arch/um/kernel/trap.c
+@@ -37,7 +37,7 @@ int handle_page_fault(unsigned long addr
+ * If the fault was during atomic operation, don't take the fault, just
+ * fail.
+ */
+- if (in_atomic())
++ if (!mm || pagefault_disabled())
+ goto out_nosemaphore;
+
+ down_read(&mm->mmap_sem);
+Index: linux-2.6/arch/x86/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/x86/mm/fault.c
++++ linux-2.6/arch/x86/mm/fault.c
+@@ -1065,7 +1065,7 @@ do_page_fault(struct pt_regs *regs, unsi
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
+ */
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+Index: linux-2.6/arch/xtensa/mm/fault.c
+===================================================================
+--- linux-2.6.orig/arch/xtensa/mm/fault.c
++++ linux-2.6/arch/xtensa/mm/fault.c
+@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
+ /* If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm) {
++ if (!mm || pagefault_disabled()) {
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
}
- *next = prop;
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+Index: linux-2.6/mm/filemap.c
+===================================================================
+--- linux-2.6.orig/mm/filemap.c
++++ linux-2.6/mm/filemap.c
+@@ -2040,7 +2040,7 @@ size_t iov_iter_copy_from_user_atomic(st
+ char *kaddr;
+ size_t copied;
- #ifdef CONFIG_PROC_DEVICETREE
- /* try to add to proc as well if it was initialized */
-@@ -781,7 +842,7 @@ int prom_remove_property(struct device_n
- unsigned long flags;
- int found = 0;
+- BUG_ON(!in_atomic());
++ BUG_ON(!pagefault_disabled());
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (likely(i->nr_segs == 1)) {
+ int left;
+Index: linux-2.6/arch/x86/mm/highmem_32.c
+===================================================================
+--- linux-2.6.orig/arch/x86/mm/highmem_32.c
++++ linux-2.6/arch/x86/mm/highmem_32.c
+@@ -43,7 +43,7 @@ void *kmap_atomic_prot(struct page *page
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- BUG_ON(!pte_none(*(kmap_pte-idx)));
++ WARN_ON(!pte_none(*(kmap_pte-idx)));
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
-- write_lock_irqsave(&devtree_lock, flags);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (*next == prop) {
-@@ -794,7 +855,7 @@ int prom_remove_property(struct device_n
- }
- next = &(*next)->next;
- }
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return (void *)vaddr;
+Index: linux-2.6/include/linux/kernel.h
+===================================================================
+--- linux-2.6.orig/include/linux/kernel.h
++++ linux-2.6/include/linux/kernel.h
+@@ -346,7 +346,7 @@ extern enum system_states {
+ SYSTEM_HALT,
+ SYSTEM_POWER_OFF,
+ SYSTEM_RESTART,
+- SYSTEM_SUSPEND_DISK,
++ SYSTEM_SUSPEND,
+ } system_state;
- if (!found)
- return -ENODEV;
-@@ -824,7 +885,7 @@ int prom_update_property(struct device_n
- unsigned long flags;
- int found = 0;
+ #define TAINT_PROPRIETARY_MODULE 0
+Index: linux-2.6/kernel/power/hibernate.c
+===================================================================
+--- linux-2.6.orig/kernel/power/hibernate.c
++++ linux-2.6/kernel/power/hibernate.c
+@@ -278,6 +278,8 @@ static int create_image(int platform_mod
-- write_lock_irqsave(&devtree_lock, flags);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- next = &np->properties;
- while (*next) {
- if (*next == oldprop) {
-@@ -838,7 +899,7 @@ int prom_update_property(struct device_n
- }
- next = &(*next)->next;
- }
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ local_irq_disable();
- if (!found)
- return -ENODEV;
-@@ -868,12 +929,12 @@ void of_attach_node(struct device_node *
- {
- unsigned long flags;
++ system_state = SYSTEM_SUSPEND;
++
+ error = syscore_suspend();
+ if (error) {
+ printk(KERN_ERR "PM: Some system devices failed to power down, "
+@@ -305,6 +307,7 @@ static int create_image(int platform_mod
+ syscore_resume();
-- write_lock_irqsave(&devtree_lock, flags);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
- np->sibling = np->parent->child;
- np->allnext = allnodes;
- np->parent->child = np;
- allnodes = np;
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- }
+ Enable_irqs:
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
- /**
-@@ -887,7 +948,7 @@ void of_detach_node(struct device_node *
- struct device_node *parent;
- unsigned long flags;
+ Enable_cpus:
+@@ -412,6 +415,7 @@ static int resume_target_kernel(bool pla
+ goto Enable_cpus;
-- write_lock_irqsave(&devtree_lock, flags);
-+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ local_irq_disable();
++ system_state = SYSTEM_SUSPEND;
- parent = np->parent;
- if (!parent)
-@@ -918,7 +979,7 @@ void of_detach_node(struct device_node *
- of_node_set_flag(np, OF_DETACHED);
+ error = syscore_suspend();
+ if (error)
+@@ -445,6 +449,7 @@ static int resume_target_kernel(bool pla
+ syscore_resume();
- out_unlock:
-- write_unlock_irqrestore(&devtree_lock, flags);
-+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- }
- #endif /* defined(CONFIG_OF_DYNAMIC) */
+ Enable_irqs:
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
-Index: linux-2.6/arch/sparc/kernel/prom_common.c
-===================================================================
---- linux-2.6.orig/arch/sparc/kernel/prom_common.c
-+++ linux-2.6/arch/sparc/kernel/prom_common.c
-@@ -67,7 +67,7 @@ int of_set_property(struct device_node *
- err = -ENODEV;
+ Enable_cpus:
+@@ -524,6 +529,7 @@ int hibernation_platform_enter(void)
+ goto Platform_finish;
- mutex_lock(&of_set_property_mutex);
-- write_lock(&devtree_lock);
-+ raw_spin_lock(&devtree_lock);
- prevp = &dp->properties;
- while (*prevp) {
- struct property *prop = *prevp;
-@@ -94,7 +94,7 @@ int of_set_property(struct device_node *
- }
- prevp = &(*prevp)->next;
- }
-- write_unlock(&devtree_lock);
-+ raw_spin_unlock(&devtree_lock);
- mutex_unlock(&of_set_property_mutex);
+ local_irq_disable();
++ system_state = SYSTEM_SUSPEND;
+ syscore_suspend();
+ if (pm_wakeup_pending()) {
+ error = -EAGAIN;
+@@ -536,6 +542,7 @@ int hibernation_platform_enter(void)
- /* XXX Upate procfs if necessary... */
-Index: linux-2.6/include/linux/of.h
-===================================================================
---- linux-2.6.orig/include/linux/of.h
-+++ linux-2.6/include/linux/of.h
-@@ -68,7 +68,7 @@ struct device_node {
- /* Pointer for first entry in chain of all nodes. */
- extern struct device_node *allnodes;
- extern struct device_node *of_chosen;
--extern rwlock_t devtree_lock;
-+extern raw_spinlock_t devtree_lock;
+ Power_up:
+ syscore_resume();
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+ enable_nonboot_cpus();
- static inline bool of_have_populated_dt(void)
- {
-Index: linux-2.6/include/linux/list.h
+Index: linux-2.6/kernel/power/suspend.c
===================================================================
---- linux-2.6.orig/include/linux/list.h
-+++ linux-2.6/include/linux/list.h
-@@ -362,6 +362,17 @@ static inline void list_splice_tail_init
- list_entry((ptr)->next, type, member)
+--- linux-2.6.orig/kernel/power/suspend.c
++++ linux-2.6/kernel/power/suspend.c
+@@ -163,6 +163,8 @@ static int suspend_enter(suspend_state_t
+ arch_suspend_disable_irqs();
+ BUG_ON(!irqs_disabled());
- /**
-+ * list_last_entry - get the last element from a list
-+ * @ptr: the list head to take the element from.
-+ * @type: the type of the struct this is embedded in.
-+ * @member: the name of the list_struct within the struct.
-+ *
-+ * Note, that list is expected to be not empty.
-+ */
-+#define list_last_entry(ptr, type, member) \
-+ list_entry((ptr)->prev, type, member)
++ system_state = SYSTEM_SUSPEND;
+
-+/**
- * list_for_each - iterate over a list
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
-Index: linux-2.6/mm/page_alloc.c
-===================================================================
---- linux-2.6.orig/mm/page_alloc.c
-+++ linux-2.6/mm/page_alloc.c
-@@ -57,6 +57,7 @@
- #include <linux/ftrace_event.h>
- #include <linux/memcontrol.h>
- #include <linux/prefetch.h>
-+#include <linux/locallock.h>
-
- #include <asm/tlbflush.h>
- #include <asm/div64.h>
-@@ -222,6 +223,18 @@ EXPORT_SYMBOL(nr_node_ids);
- EXPORT_SYMBOL(nr_online_nodes);
- #endif
+ error = syscore_suspend();
+ if (!error) {
+ if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
+@@ -172,6 +174,8 @@ static int suspend_enter(suspend_state_t
+ syscore_resume();
+ }
-+static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
-+
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+# define cpu_lock_irqsave(cpu, flags) \
-+ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags)
-+# define cpu_unlock_irqrestore(cpu, flags) \
-+ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags)
-+#else
-+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
-+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
-+#endif
++ system_state = SYSTEM_RUNNING;
+
- int page_group_by_mobility_disabled __read_mostly;
+ arch_suspend_enable_irqs();
+ BUG_ON(irqs_disabled());
+
+Index: linux-2.6/drivers/of/base.c
+===================================================================
+--- linux-2.6.orig/drivers/of/base.c
++++ linux-2.6/drivers/of/base.c
+@@ -29,7 +29,7 @@ struct device_node *of_chosen;
+ /* use when traversing tree through the allnext, child, sibling,
+ * or parent members of struct device_node.
+ */
+-DEFINE_RWLOCK(devtree_lock);
++DEFINE_RAW_SPINLOCK(devtree_lock);
- static void set_pageblock_migratetype(struct page *page, int migratetype)
-@@ -580,7 +593,7 @@ static inline int free_pages_check(struc
- }
+ int of_n_addr_cells(struct device_node *np)
+ {
+@@ -138,16 +138,14 @@ void of_node_put(struct device_node *nod
+ EXPORT_SYMBOL(of_node_put);
+ #endif /* !CONFIG_SPARC */
- /*
-- * Frees a number of pages from the PCP lists
-+ * Frees a number of pages which have been collected from the pcp lists.
- * Assumes all pages on list are in same zone, and of same order.
- * count is the number of pages to free.
- *
-@@ -591,16 +604,42 @@ static inline int free_pages_check(struc
- * pinned" detection logic.
- */
- static void free_pcppages_bulk(struct zone *zone, int count,
-- struct per_cpu_pages *pcp)
-+ struct list_head *list)
+-struct property *of_find_property(const struct device_node *np,
+- const char *name,
+- int *lenp)
++static struct property *__of_find_property(const struct device_node *np,
++ const char *name, int *lenp)
{
-- int migratetype = 0;
-- int batch_free = 0;
- int to_free = count;
-+ unsigned long flags;
+ struct property *pp;
-- spin_lock(&zone->lock);
-+ spin_lock_irqsave(&zone->lock, flags);
- zone->all_unreclaimable = 0;
- zone->pages_scanned = 0;
+ if (!np)
+ return NULL;
-+ while (!list_empty(list)) {
-+ struct page *page = list_first_entry(list, struct page, lru);
+- read_lock(&devtree_lock);
+ for (pp = np->properties; pp != 0; pp = pp->next) {
+ if (of_prop_cmp(pp->name, name) == 0) {
+ if (lenp != 0)
+@@ -155,7 +153,20 @@ struct property *of_find_property(const
+ break;
+ }
+ }
+- read_unlock(&devtree_lock);
+
-+ /* must delete as __free_one_page list manipulates */
-+ list_del(&page->lru);
-+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
-+ __free_one_page(page, zone, 0, page_private(page));
-+ trace_mm_page_pcpu_drain(page, 0, page_private(page));
-+ to_free--;
-+ }
-+ WARN_ON(to_free != 0);
-+ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-+ spin_unlock_irqrestore(&zone->lock, flags);
++ return pp;
+}
+
-+/*
-+ * Moves a number of pages from the PCP lists to free list which
-+ * is freed outside of the locked region.
-+ *
-+ * Assumes all pages on list are in same zone, and of same order.
-+ * count is the number of pages to free.
-+ */
-+static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
-+ struct list_head *dst)
++struct property *of_find_property(const struct device_node *np,
++ const char *name,
++ int *lenp)
+{
-+ int migratetype = 0, batch_free = 0;
-+
- while (to_free) {
- struct page *page;
- struct list_head *list;
-@@ -616,7 +655,7 @@ static void free_pcppages_bulk(struct zo
- batch_free++;
- if (++migratetype == MIGRATE_PCPTYPES)
- migratetype = 0;
-- list = &pcp->lists[migratetype];
-+ list = &src->lists[migratetype];
- } while (list_empty(list));
-
- /* This is the only non-empty list. Free them all. */
-@@ -624,28 +663,25 @@ static void free_pcppages_bulk(struct zo
- batch_free = to_free;
-
- do {
-- page = list_entry(list->prev, struct page, lru);
-- /* must delete as __free_one_page list manipulates */
-+ page = list_last_entry(list, struct page, lru);
- list_del(&page->lru);
-- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
-- __free_one_page(page, zone, 0, page_private(page));
-- trace_mm_page_pcpu_drain(page, 0, page_private(page));
-+ list_add(&page->lru, dst);
- } while (--to_free && --batch_free && !list_empty(list));
- }
-- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-- spin_unlock(&zone->lock);
- }
-
- static void free_one_page(struct zone *zone, struct page *page, int order,
- int migratetype)
- {
-- spin_lock(&zone->lock);
++ struct property *pp;
+ unsigned long flags;
+
-+ spin_lock_irqsave(&zone->lock, flags);
- zone->all_unreclaimable = 0;
- zone->pages_scanned = 0;
-
- __free_one_page(page, zone, order, migratetype);
- __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
-- spin_unlock(&zone->lock);
-+ spin_unlock_irqrestore(&zone->lock, flags);
- }
-
- static bool free_pages_prepare(struct page *page, unsigned int order)
-@@ -682,13 +718,13 @@ static void __free_pages_ok(struct page
- if (!free_pages_prepare(page, order))
- return;
++ raw_spin_lock_irqsave(&devtree_lock, flags);
++ pp = __of_find_property(np, name, lenp);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- if (unlikely(wasMlocked))
- free_page_mlock(page);
- __count_vm_events(PGFREE, 1 << order);
- free_one_page(page_zone(page), page, order,
- get_pageblock_migratetype(page));
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
+ return pp;
}
-
- /*
-@@ -1064,16 +1100,18 @@ static int rmqueue_bulk(struct zone *zon
- void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+@@ -173,13 +184,13 @@ struct device_node *of_find_all_nodes(st
{
- unsigned long flags;
-+ LIST_HEAD(dst);
- int to_drain;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- if (pcp->count >= pcp->batch)
- to_drain = pcp->batch;
- else
- to_drain = pcp->count;
-- free_pcppages_bulk(zone, to_drain, pcp);
-+ isolate_pcp_pages(to_drain, pcp, &dst);
- pcp->count -= to_drain;
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
-+ free_pcppages_bulk(zone, to_drain, &dst);
- }
- #endif
-
-@@ -1092,16 +1130,21 @@ static void drain_pages(unsigned int cpu
- for_each_populated_zone(zone) {
- struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
-+ LIST_HEAD(dst);
-+ int count;
-
-- local_irq_save(flags);
-+ cpu_lock_irqsave(cpu, flags);
- pset = per_cpu_ptr(zone->pageset, cpu);
+ struct device_node *np;
- pcp = &pset->pcp;
-- if (pcp->count) {
-- free_pcppages_bulk(zone, pcp->count, pcp);
-+ count = pcp->count;
-+ if (count) {
-+ isolate_pcp_pages(count, pcp, &dst);
- pcp->count = 0;
- }
-- local_irq_restore(flags);
-+ cpu_unlock_irqrestore(cpu, flags);
-+ if (count)
-+ free_pcppages_bulk(zone, count, &dst);
- }
+- read_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ np = prev ? prev->allnext : allnodes;
+ for (; np != NULL; np = np->allnext)
+ if (of_node_get(np))
+ break;
+ of_node_put(prev);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ return np;
}
-
-@@ -1118,7 +1161,14 @@ void drain_local_pages(void *arg)
+ EXPORT_SYMBOL(of_find_all_nodes);
+@@ -188,8 +199,20 @@ EXPORT_SYMBOL(of_find_all_nodes);
+ * Find a property with a given name for a given node
+ * and return the value.
*/
- void drain_all_pages(void)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- on_each_cpu(drain_local_pages, NULL, 1);
-+#else
-+ int i;
++static const void *__of_get_property(const struct device_node *np,
++ const char *name, int *lenp)
++{
++ struct property *pp = __of_find_property(np, name, lenp);
+
-+ for_each_online_cpu(i)
-+ drain_pages(i);
-+#endif
- }
-
- #ifdef CONFIG_HIBERNATION
-@@ -1174,7 +1224,7 @@ void free_hot_cold_page(struct page *pag
-
- migratetype = get_pageblock_migratetype(page);
- set_page_private(page, migratetype);
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- if (unlikely(wasMlocked))
- free_page_mlock(page);
- __count_vm_event(PGFREE);
-@@ -1201,12 +1251,19 @@ void free_hot_cold_page(struct page *pag
- list_add(&page->lru, &pcp->lists[migratetype]);
- pcp->count++;
- if (pcp->count >= pcp->high) {
-- free_pcppages_bulk(zone, pcp->batch, pcp);
-+ LIST_HEAD(dst);
-+ int count;
++ return pp ? pp->value : NULL;
++}
+
-+ isolate_pcp_pages(pcp->batch, pcp, &dst);
- pcp->count -= pcp->batch;
-+ count = pcp->batch;
-+ local_unlock_irqrestore(pa_lock, flags);
-+ free_pcppages_bulk(zone, count, &dst);
-+ return;
- }
++/*
++ * Find a property with a given name for a given node
++ * and return the value.
++ */
+ const void *of_get_property(const struct device_node *np, const char *name,
+- int *lenp)
++ int *lenp)
+ {
+ struct property *pp = of_find_property(np, name, lenp);
- out:
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
- }
+@@ -200,13 +223,13 @@ EXPORT_SYMBOL(of_get_property);
+ /** Checks if the given "compat" string matches one of the strings in
+ * the device's "compatible" property
+ */
+-int of_device_is_compatible(const struct device_node *device,
+- const char *compat)
++static int __of_device_is_compatible(const struct device_node *device,
++ const char *compat)
+ {
+ const char* cp;
+- int cplen, l;
++ int uninitialized_var(cplen), l;
- /*
-@@ -1301,7 +1358,7 @@ struct page *buffered_rmqueue(struct zon
- struct per_cpu_pages *pcp;
- struct list_head *list;
+- cp = of_get_property(device, "compatible", &cplen);
++ cp = __of_get_property(device, "compatible", &cplen);
+ if (cp == NULL)
+ return 0;
+ while (cplen > 0) {
+@@ -219,6 +242,21 @@ int of_device_is_compatible(const struct
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
-@@ -1333,17 +1390,19 @@ struct page *buffered_rmqueue(struct zon
- */
- WARN_ON_ONCE(order > 1);
- }
-- spin_lock_irqsave(&zone->lock, flags);
-+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
- page = __rmqueue(zone, order, migratetype);
-- spin_unlock(&zone->lock);
-- if (!page)
-+ if (!page) {
-+ spin_unlock(&zone->lock);
- goto failed;
-+ }
- __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
-+ spin_unlock(&zone->lock);
- }
+ return 0;
+ }
++
++/** Checks if the given "compat" string matches one of the strings in
++ * the device's "compatible" property
++ */
++int of_device_is_compatible(const struct device_node *device,
++ const char *compat)
++{
++ unsigned long flags;
++ int res;
++
++ raw_spin_lock_irqsave(&devtree_lock, flags);
++ res = __of_device_is_compatible(device, compat);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
++ return res;
++}
+ EXPORT_SYMBOL(of_device_is_compatible);
- __count_zone_vm_events(PGALLOC, zone, 1 << order);
- zone_statistics(preferred_zone, zone, gfp_flags);
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
+ /**
+@@ -278,13 +316,14 @@ EXPORT_SYMBOL(of_device_is_available);
+ struct device_node *of_get_parent(const struct device_node *node)
+ {
+ struct device_node *np;
++ unsigned long flags;
- VM_BUG_ON(bad_range(zone, page));
- if (prep_new_page(page, order, gfp_flags))
-@@ -1351,7 +1410,7 @@ struct page *buffered_rmqueue(struct zon
- return page;
+ if (!node)
+ return NULL;
- failed:
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(pa_lock, flags);
- return NULL;
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = of_node_get(node->parent);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
}
+ EXPORT_SYMBOL(of_get_parent);
+@@ -303,14 +342,15 @@ EXPORT_SYMBOL(of_get_parent);
+ struct device_node *of_get_next_parent(struct device_node *node)
+ {
+ struct device_node *parent;
++ unsigned long flags;
-@@ -1884,8 +1943,8 @@ __alloc_pages_direct_compact(gfp_t gfp_m
- if (*did_some_progress != COMPACT_SKIPPED) {
+ if (!node)
+ return NULL;
- /* Page migration frees to the PCP lists but we want merging */
-- drain_pages(get_cpu());
-- put_cpu();
-+ drain_pages(get_cpu_light());
-+ put_cpu_light();
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ parent = of_node_get(node->parent);
+ of_node_put(node);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return parent;
+ }
- page = get_page_from_freelist(gfp_mask, nodemask,
- order, zonelist, high_zoneidx,
-@@ -3653,14 +3712,16 @@ static int __zone_pcp_update(void *data)
- for_each_possible_cpu(cpu) {
- struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
-+ LIST_HEAD(dst);
+@@ -326,14 +366,15 @@ struct device_node *of_get_next_child(co
+ struct device_node *prev)
+ {
+ struct device_node *next;
++ unsigned long flags;
- pset = per_cpu_ptr(zone->pageset, cpu);
- pcp = &pset->pcp;
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = prev ? prev->sibling : node->child;
+ for (; next; next = next->sibling)
+ if (of_node_get(next))
+ break;
+ of_node_put(prev);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return next;
+ }
+ EXPORT_SYMBOL(of_get_next_child);
+@@ -348,14 +389,15 @@ EXPORT_SYMBOL(of_get_next_child);
+ struct device_node *of_find_node_by_path(const char *path)
+ {
+ struct device_node *np = allnodes;
++ unsigned long flags;
-- local_irq_save(flags);
-- free_pcppages_bulk(zone, pcp->count, pcp);
-+ cpu_lock_irqsave(cpu, flags);
-+ isolate_pcp_pages(pcp->count, pcp, &dst);
-+ free_pcppages_bulk(zone, pcp->count, &dst);
- setup_pageset(pset, batch);
-- local_irq_restore(flags);
-+ cpu_unlock_irqrestore(cpu, flags);
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ for (; np; np = np->allnext) {
+ if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
+ && of_node_get(np))
+ break;
}
- return 0;
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
}
-@@ -4972,6 +5033,7 @@ static int page_alloc_cpu_notify(struct
- void __init page_alloc_init(void)
+ EXPORT_SYMBOL(of_find_node_by_path);
+@@ -375,15 +417,16 @@ struct device_node *of_find_node_by_name
+ const char *name)
{
- hotcpu_notifier(page_alloc_cpu_notify, 0);
-+ local_irq_lock_init(pa_lock);
- }
-
- /*
-Index: linux-2.6/mm/slab.c
-===================================================================
---- linux-2.6.orig/mm/slab.c
-+++ linux-2.6/mm/slab.c
-@@ -116,6 +116,7 @@
- #include <linux/kmemcheck.h>
- #include <linux/memory.h>
- #include <linux/prefetch.h>
-+#include <linux/locallock.h>
+ struct device_node *np;
++ unsigned long flags;
- #include <asm/cacheflush.h>
- #include <asm/tlbflush.h>
-@@ -620,6 +621,51 @@ int slab_is_available(void)
- static struct lock_class_key on_slab_l3_key;
- static struct lock_class_key on_slab_alc_key;
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext)
+ if (np->name && (of_node_cmp(np->name, name) == 0)
+ && of_node_get(np))
+ break;
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_name);
+@@ -404,15 +447,16 @@ struct device_node *of_find_node_by_type
+ const char *type)
+ {
+ struct device_node *np;
++ unsigned long flags;
-+static struct lock_class_key debugobj_l3_key;
-+static struct lock_class_key debugobj_alc_key;
-+
-+static void slab_set_lock_classes(struct kmem_cache *cachep,
-+ struct lock_class_key *l3_key, struct lock_class_key *alc_key,
-+ int q)
-+{
-+ struct array_cache **alc;
-+ struct kmem_list3 *l3;
-+ int r;
-+
-+ l3 = cachep->nodelists[q];
-+ if (!l3)
-+ return;
-+
-+ lockdep_set_class(&l3->list_lock, l3_key);
-+ alc = l3->alien;
-+ /*
-+ * FIXME: This check for BAD_ALIEN_MAGIC
-+ * should go away when common slab code is taught to
-+ * work even without alien caches.
-+ * Currently, non NUMA code returns BAD_ALIEN_MAGIC
-+ * for alloc_alien_cache,
-+ */
-+ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
-+ return;
-+ for_each_node(r) {
-+ if (alc[r])
-+ lockdep_set_class(&alc[r]->lock, alc_key);
-+ }
-+}
-+
-+static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
-+{
-+ slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
-+}
-+
-+static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
-+{
-+ int node;
-+
-+ for_each_online_node(node)
-+ slab_set_debugobj_lock_classes_node(cachep, node);
-+}
-+
- static void init_node_lock_keys(int q)
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext)
+ if (np->type && (of_node_cmp(np->type, type) == 0)
+ && of_node_get(np))
+ break;
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_type);
+@@ -435,18 +479,20 @@ struct device_node *of_find_compatible_n
+ const char *type, const char *compatible)
{
- struct cache_sizes *s = malloc_sizes;
-@@ -628,29 +674,14 @@ static void init_node_lock_keys(int q)
- return;
-
- for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
-- struct array_cache **alc;
- struct kmem_list3 *l3;
-- int r;
+ struct device_node *np;
++ unsigned long flags;
- l3 = s->cs_cachep->nodelists[q];
- if (!l3 || OFF_SLAB(s->cs_cachep))
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+ if (type
+ && !(np->type && (of_node_cmp(np->type, type) == 0)))
continue;
-- lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
-- alc = l3->alien;
-- /*
-- * FIXME: This check for BAD_ALIEN_MAGIC
-- * should go away when common slab code is taught to
-- * work even without alien caches.
-- * Currently, non NUMA code returns BAD_ALIEN_MAGIC
-- * for alloc_alien_cache,
-- */
-- if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
-- continue;
-- for_each_node(r) {
-- if (alc[r])
-- lockdep_set_class(&alc[r]->lock,
-- &on_slab_alc_key);
-- }
-+
-+ slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
-+ &on_slab_alc_key, q);
+- if (of_device_is_compatible(np, compatible) && of_node_get(np))
++ if (__of_device_is_compatible(np, compatible) &&
++ of_node_get(np))
+ break;
}
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
}
-
-@@ -669,6 +700,14 @@ static void init_node_lock_keys(int q)
- static inline void init_lock_keys(void)
+ EXPORT_SYMBOL(of_find_compatible_node);
+@@ -468,8 +514,9 @@ struct device_node *of_find_node_with_pr
{
- }
-+
-+static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
-+{
-+}
-+
-+static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
-+{
-+}
- #endif
+ struct device_node *np;
+ struct property *pp;
++ unsigned long flags;
- /*
-@@ -678,12 +717,66 @@ static DEFINE_MUTEX(cache_chain_mutex);
- static struct list_head cache_chain;
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+ for (pp = np->properties; pp != 0; pp = pp->next) {
+@@ -481,20 +528,14 @@ struct device_node *of_find_node_with_pr
+ }
+ out:
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_with_property);
- static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
-+static DEFINE_PER_CPU(struct list_head, slab_free_list);
-+static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
+-/**
+- * of_match_node - Tell if an device_node has a matching of_match structure
+- * @matches: array of of device match structures to search in
+- * @node: the of device structure to match against
+- *
+- * Low level utility function used by device matching.
+- */
+-const struct of_device_id *of_match_node(const struct of_device_id *matches,
+- const struct device_node *node)
++static
++const struct of_device_id *__of_match_node(const struct of_device_id *matches,
++ const struct device_node *node)
+ {
+ if (!matches)
+ return NULL;
+@@ -508,14 +549,33 @@ const struct of_device_id *of_match_node
+ match &= node->type
+ && !strcmp(matches->type, node->type);
+ if (matches->compatible[0])
+- match &= of_device_is_compatible(node,
+- matches->compatible);
++ match &= __of_device_is_compatible(node,
++ matches->compatible);
+ if (match)
+ return matches;
+ matches++;
+ }
+ return NULL;
+ }
+
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1)
-+#else
-+/*
-+ * execute func() for all CPUs. On PREEMPT_RT we dont actually have
-+ * to run on the remote CPUs - we only have to take their CPU-locks.
-+ * (This is a rare operation, so cacheline bouncing is not an issue.)
++/**
++ * of_match_node - Tell if an device_node has a matching of_match structure
++ * @matches: array of of device match structures to search in
++ * @node: the of device structure to match against
++ *
++ * Low level utility function used by device matching.
+ */
-+static void
-+slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
-+{
-+ unsigned int i;
-+
-+ for_each_online_cpu(i)
-+ func(arg, i);
-+}
-+#endif
-+
-+static void free_delayed(struct list_head *h)
-+{
-+ while(!list_empty(h)) {
-+ struct page *page = list_first_entry(h, struct page, lru);
-+
-+ list_del(&page->lru);
-+ __free_pages(page, page->index);
-+ }
-+}
-+
-+static void unlock_l3_and_free_delayed(spinlock_t *list_lock)
-+{
-+ LIST_HEAD(tmp);
-+
-+ list_splice_init(&__get_cpu_var(slab_free_list), &tmp);
-+ local_spin_unlock_irq(slab_lock, list_lock);
-+ free_delayed(&tmp);
-+}
-+
-+static void unlock_slab_and_free_delayed(unsigned long flags)
++const struct of_device_id *of_match_node(const struct of_device_id *matches,
++ const struct device_node *node)
+{
-+ LIST_HEAD(tmp);
++ const struct of_device_id *match;
++ unsigned long flags;
+
-+ list_splice_init(&__get_cpu_var(slab_free_list), &tmp);
-+ local_unlock_irqrestore(slab_lock, flags);
-+ free_delayed(&tmp);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
++ match = __of_match_node(matches, node);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
++ return match;
+}
+ EXPORT_SYMBOL(of_match_node);
- static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
- {
- return cachep->array[smp_processor_id()];
- }
-
-+static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep,
-+ int cpu)
-+{
-+ return cachep->array[cpu];
-+}
-+
- static inline struct kmem_cache *__find_general_cachep(size_t size,
- gfp_t gfpflags)
+ /**
+@@ -534,15 +594,16 @@ struct device_node *of_find_matching_nod
+ const struct of_device_id *matches)
{
-@@ -1021,9 +1114,10 @@ static void reap_alien(struct kmem_cache
- if (l3->alien) {
- struct array_cache *ac = l3->alien[node];
-
-- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
-+ if (ac && ac->avail &&
-+ local_spin_trylock_irq(slab_lock, &ac->lock)) {
- __drain_alien_cache(cachep, ac, node);
-- spin_unlock_irq(&ac->lock);
-+ local_spin_unlock_irq(slab_lock, &ac->lock);
- }
- }
- }
-@@ -1038,9 +1132,9 @@ static void drain_alien_cache(struct kme
- for_each_online_node(i) {
- ac = alien[i];
- if (ac) {
-- spin_lock_irqsave(&ac->lock, flags);
-+ local_spin_lock_irqsave(slab_lock, &ac->lock, flags);
- __drain_alien_cache(cachep, ac, i);
-- spin_unlock_irqrestore(&ac->lock, flags);
-+ local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags);
- }
- }
- }
-@@ -1119,11 +1213,11 @@ static int init_cache_nodelists_node(int
- cachep->nodelists[node] = l3;
- }
+ struct device_node *np;
++ unsigned long flags;
-- spin_lock_irq(&cachep->nodelists[node]->list_lock);
-+ local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
- cachep->nodelists[node]->free_limit =
- (1 + nr_cpus_node(node)) *
- cachep->batchcount + cachep->num;
-- spin_unlock_irq(&cachep->nodelists[node]->list_lock);
-+ local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+- if (of_match_node(matches, np) && of_node_get(np))
++ if (__of_match_node(matches, np) && of_node_get(np))
+ break;
}
- return 0;
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
}
-@@ -1148,7 +1242,7 @@ static void __cpuinit cpuup_canceled(lon
- if (!l3)
- goto free_array_cache;
-
-- spin_lock_irq(&l3->list_lock);
-+ local_spin_lock_irq(slab_lock, &l3->list_lock);
+ EXPORT_SYMBOL(of_find_matching_node);
+@@ -585,12 +646,12 @@ struct device_node *of_find_node_by_phan
+ {
+ struct device_node *np;
- /* Free limit for this kmem_list3 */
- l3->free_limit -= cachep->batchcount;
-@@ -1156,7 +1250,7 @@ static void __cpuinit cpuup_canceled(lon
- free_block(cachep, nc->entry, nc->avail, node);
+- read_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ for (np = allnodes; np; np = np->allnext)
+ if (np->phandle == handle)
+ break;
+ of_node_get(np);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_phandle);
+@@ -745,18 +806,18 @@ int prom_add_property(struct device_node
+ unsigned long flags;
- if (!cpumask_empty(mask)) {
-- spin_unlock_irq(&l3->list_lock);
-+ unlock_l3_and_free_delayed(&l3->list_lock);
- goto free_array_cache;
+ prop->next = NULL;
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = &np->properties;
+ while (*next) {
+ if (strcmp(prop->name, (*next)->name) == 0) {
+ /* duplicate ! don't insert it */
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return -1;
}
+ next = &(*next)->next;
+ }
+ *next = prop;
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
-@@ -1170,7 +1264,7 @@ static void __cpuinit cpuup_canceled(lon
- alien = l3->alien;
- l3->alien = NULL;
+ #ifdef CONFIG_PROC_DEVICETREE
+ /* try to add to proc as well if it was initialized */
+@@ -781,7 +842,7 @@ int prom_remove_property(struct device_n
+ unsigned long flags;
+ int found = 0;
-- spin_unlock_irq(&l3->list_lock);
-+ unlock_l3_and_free_delayed(&l3->list_lock);
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = &np->properties;
+ while (*next) {
+ if (*next == prop) {
+@@ -794,7 +855,7 @@ int prom_remove_property(struct device_n
+ }
+ next = &(*next)->next;
+ }
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
- kfree(shared);
- if (alien) {
-@@ -1244,7 +1338,7 @@ static int __cpuinit cpuup_prepare(long
- l3 = cachep->nodelists[node];
- BUG_ON(!l3);
+ if (!found)
+ return -ENODEV;
+@@ -824,7 +885,7 @@ int prom_update_property(struct device_n
+ unsigned long flags;
+ int found = 0;
-- spin_lock_irq(&l3->list_lock);
-+ local_spin_lock_irq(slab_lock, &l3->list_lock);
- if (!l3->shared) {
- /*
- * We are serialised from CPU_DEAD or
-@@ -1259,9 +1353,11 @@ static int __cpuinit cpuup_prepare(long
- alien = NULL;
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = &np->properties;
+ while (*next) {
+ if (*next == oldprop) {
+@@ -838,7 +899,7 @@ int prom_update_property(struct device_n
}
- #endif
-- spin_unlock_irq(&l3->list_lock);
-+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
- kfree(shared);
- free_alien_cache(alien);
-+ if (cachep->flags & SLAB_DEBUG_OBJECTS)
-+ slab_set_debugobj_lock_classes_node(cachep, node);
+ next = &(*next)->next;
}
- init_node_lock_keys(node);
-
-@@ -1448,6 +1544,10 @@ void __init kmem_cache_init(void)
- if (num_possible_nodes() == 1)
- use_alien_caches = 0;
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
-+ local_irq_lock_init(slab_lock);
-+ for_each_possible_cpu(i)
-+ INIT_LIST_HEAD(&per_cpu(slab_free_list, i));
-+
- for (i = 0; i < NUM_INIT_LISTS; i++) {
- kmem_list3_init(&initkmem_list3[i]);
- if (i < MAX_NUMNODES)
-@@ -1625,6 +1725,9 @@ void __init kmem_cache_init_late(void)
+ if (!found)
+ return -ENODEV;
+@@ -868,12 +929,12 @@ void of_attach_node(struct device_node *
{
- struct kmem_cache *cachep;
+ unsigned long flags;
-+ /* Annotate slab for lockdep -- annotate the malloc caches */
-+ init_lock_keys();
-+
- /* 6) resize the head arrays to their final sizes */
- mutex_lock(&cache_chain_mutex);
- list_for_each_entry(cachep, &cache_chain, next)
-@@ -1635,9 +1738,6 @@ void __init kmem_cache_init_late(void)
- /* Done! */
- g_cpucache_up = FULL;
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np->sibling = np->parent->child;
+ np->allnext = allnodes;
+ np->parent->child = np;
+ allnodes = np;
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ }
-- /* Annotate slab for lockdep -- annotate the malloc caches */
-- init_lock_keys();
--
- /*
- * Register a cpu startup notifier callback that initializes
- * cpu_cache_get for all new cpus
-@@ -1725,12 +1825,14 @@ static void *kmem_getpages(struct kmem_c
- /*
- * Interface to system's page release.
- */
--static void kmem_freepages(struct kmem_cache *cachep, void *addr)
-+static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed)
- {
- unsigned long i = (1 << cachep->gfporder);
-- struct page *page = virt_to_page(addr);
-+ struct page *page, *basepage = virt_to_page(addr);
- const unsigned long nr_freed = i;
+ /**
+@@ -887,7 +948,7 @@ void of_detach_node(struct device_node *
+ struct device_node *parent;
+ unsigned long flags;
-+ page = basepage;
-+
- kmemcheck_free_shadow(page, cachep->gfporder);
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
-@@ -1746,7 +1848,13 @@ static void kmem_freepages(struct kmem_c
- }
- if (current->reclaim_state)
- current->reclaim_state->reclaimed_slab += nr_freed;
-- free_pages((unsigned long)addr, cachep->gfporder);
-+
-+ if (!delayed) {
-+ free_pages((unsigned long)addr, cachep->gfporder);
-+ } else {
-+ basepage->index = cachep->gfporder;
-+ list_add(&basepage->lru, &__get_cpu_var(slab_free_list));
-+ }
+ parent = np->parent;
+ if (!parent)
+@@ -918,7 +979,7 @@ void of_detach_node(struct device_node *
+ of_node_set_flag(np, OF_DETACHED);
+
+ out_unlock:
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
}
+ #endif /* defined(CONFIG_OF_DYNAMIC) */
- static void kmem_rcu_free(struct rcu_head *head)
-@@ -1754,7 +1862,7 @@ static void kmem_rcu_free(struct rcu_hea
- struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
- struct kmem_cache *cachep = slab_rcu->cachep;
+Index: linux-2.6/arch/sparc/kernel/prom_common.c
+===================================================================
+--- linux-2.6.orig/arch/sparc/kernel/prom_common.c
++++ linux-2.6/arch/sparc/kernel/prom_common.c
+@@ -67,7 +67,7 @@ int of_set_property(struct device_node *
+ err = -ENODEV;
-- kmem_freepages(cachep, slab_rcu->addr);
-+ kmem_freepages(cachep, slab_rcu->addr, false);
- if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slab_rcu);
- }
-@@ -1973,7 +2081,8 @@ static void slab_destroy_debugcheck(stru
- * Before calling the slab must have been unlinked from the cache. The
- * cache-lock is not held/needed.
- */
--static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
-+static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp,
-+ bool delayed)
+ mutex_lock(&of_set_property_mutex);
+- write_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ prevp = &dp->properties;
+ while (*prevp) {
+ struct property *prop = *prevp;
+@@ -94,7 +94,7 @@ int of_set_property(struct device_node *
+ }
+ prevp = &(*prevp)->next;
+ }
+- write_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ mutex_unlock(&of_set_property_mutex);
+
+ /* XXX Upate procfs if necessary... */
+Index: linux-2.6/include/linux/of.h
+===================================================================
+--- linux-2.6.orig/include/linux/of.h
++++ linux-2.6/include/linux/of.h
+@@ -68,7 +68,7 @@ struct device_node {
+ /* Pointer for first entry in chain of all nodes. */
+ extern struct device_node *allnodes;
+ extern struct device_node *of_chosen;
+-extern rwlock_t devtree_lock;
++extern raw_spinlock_t devtree_lock;
+
+ static inline bool of_have_populated_dt(void)
{
- void *addr = slabp->s_mem - slabp->colouroff;
+Index: linux-2.6/include/linux/list.h
+===================================================================
+--- linux-2.6.orig/include/linux/list.h
++++ linux-2.6/include/linux/list.h
+@@ -362,6 +362,17 @@ static inline void list_splice_tail_init
+ list_entry((ptr)->next, type, member)
-@@ -1986,7 +2095,7 @@ static void slab_destroy(struct kmem_cac
- slab_rcu->addr = addr;
- call_rcu(&slab_rcu->head, kmem_rcu_free);
- } else {
-- kmem_freepages(cachep, addr);
-+ kmem_freepages(cachep, addr, delayed);
- if (OFF_SLAB(cachep))
- kmem_cache_free(cachep->slabp_cache, slabp);
- }
-@@ -2424,6 +2533,16 @@ kmem_cache_create (const char *name, siz
- goto oops;
- }
+ /**
++ * list_last_entry - get the last element from a list
++ * @ptr: the list head to take the element from.
++ * @type: the type of the struct this is embedded in.
++ * @member: the name of the list_struct within the struct.
++ *
++ * Note, that list is expected to be not empty.
++ */
++#define list_last_entry(ptr, type, member) \
++ list_entry((ptr)->prev, type, member)
++
++/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+Index: linux-2.6/mm/page_alloc.c
+===================================================================
+--- linux-2.6.orig/mm/page_alloc.c
++++ linux-2.6/mm/page_alloc.c
+@@ -57,6 +57,7 @@
+ #include <linux/ftrace_event.h>
+ #include <linux/memcontrol.h>
+ #include <linux/prefetch.h>
++#include <linux/locallock.h>
+
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+@@ -222,6 +223,18 @@ EXPORT_SYMBOL(nr_node_ids);
+ EXPORT_SYMBOL(nr_online_nodes);
+ #endif
-+ if (flags & SLAB_DEBUG_OBJECTS) {
-+ /*
-+ * Would deadlock through slab_destroy()->call_rcu()->
-+ * debug_object_activate()->kmem_cache_alloc().
-+ */
-+ WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
++static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+
-+ slab_set_debugobj_lock_classes(cachep);
-+ }
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define cpu_lock_irqsave(cpu, flags) \
++ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags)
++# define cpu_unlock_irqrestore(cpu, flags) \
++ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags)
++#else
++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
++#endif
+
- /* cache setup completed, link it into the list */
- list_add(&cachep->next, &cache_chain);
- oops:
-@@ -2441,7 +2560,7 @@ EXPORT_SYMBOL(kmem_cache_create);
- #if DEBUG
- static void check_irq_off(void)
- {
-- BUG_ON(!irqs_disabled());
-+ BUG_ON_NONRT(!irqs_disabled());
- }
+ int page_group_by_mobility_disabled __read_mostly;
- static void check_irq_on(void)
-@@ -2476,13 +2595,12 @@ static void drain_array(struct kmem_cach
- struct array_cache *ac,
- int force, int node);
+ static void set_pageblock_migratetype(struct page *page, int migratetype)
+@@ -580,7 +593,7 @@ static inline int free_pages_check(struc
+ }
--static void do_drain(void *arg)
-+static void __do_drain(void *arg, unsigned int cpu)
+ /*
+- * Frees a number of pages from the PCP lists
++ * Frees a number of pages which have been collected from the pcp lists.
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ *
+@@ -591,16 +604,42 @@ static inline int free_pages_check(struc
+ * pinned" detection logic.
+ */
+ static void free_pcppages_bulk(struct zone *zone, int count,
+- struct per_cpu_pages *pcp)
++ struct list_head *list)
{
- struct kmem_cache *cachep = arg;
- struct array_cache *ac;
-- int node = numa_mem_id();
-+ int node = cpu_to_mem(cpu);
+- int migratetype = 0;
+- int batch_free = 0;
+ int to_free = count;
++ unsigned long flags;
-- check_irq_off();
- ac = cpu_cache_get(cachep);
- spin_lock(&cachep->nodelists[node]->list_lock);
- free_block(cachep, ac->entry, ac->avail, node);
-@@ -2490,12 +2608,30 @@ static void do_drain(void *arg)
- ac->avail = 0;
- }
+- spin_lock(&zone->lock);
++ spin_lock_irqsave(&zone->lock, flags);
+ zone->all_unreclaimable = 0;
+ zone->pages_scanned = 0;
-+#ifndef CONFIG_PREEMPT_RT_BASE
-+static void do_drain(void *arg)
-+{
-+ __do_drain(arg, smp_processor_id());
-+}
-+#else
-+static void do_drain(void *arg, int cpu)
-+{
-+ LIST_HEAD(tmp);
++ while (!list_empty(list)) {
++ struct page *page = list_first_entry(list, struct page, lru);
+
-+ spin_lock_irq(&per_cpu(slab_lock, cpu).lock);
-+ __do_drain(arg, cpu);
-+ list_splice_init(&per_cpu(slab_free_list, cpu), &tmp);
-+ spin_unlock_irq(&per_cpu(slab_lock, cpu).lock);
-+ free_delayed(&tmp);
++ /* must delete as __free_one_page list manipulates */
++ list_del(&page->lru);
++ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
++ __free_one_page(page, zone, 0, page_private(page));
++ trace_mm_page_pcpu_drain(page, 0, page_private(page));
++ to_free--;
++ }
++ WARN_ON(to_free != 0);
++ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
++ spin_unlock_irqrestore(&zone->lock, flags);
+}
-+#endif
+
- static void drain_cpu_caches(struct kmem_cache *cachep)
- {
- struct kmem_list3 *l3;
- int node;
-
-- on_each_cpu(do_drain, cachep, 1);
-+ slab_on_each_cpu(do_drain, cachep);
- check_irq_on();
- for_each_online_node(node) {
- l3 = cachep->nodelists[node];
-@@ -2526,10 +2662,10 @@ static int drain_freelist(struct kmem_ca
- nr_freed = 0;
- while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
++/*
++ * Moves a number of pages from the PCP lists to free list which
++ * is freed outside of the locked region.
++ *
++ * Assumes all pages on list are in same zone, and of same order.
++ * count is the number of pages to free.
++ */
++static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
++ struct list_head *dst)
++{
++ int migratetype = 0, batch_free = 0;
++
+ while (to_free) {
+ struct page *page;
+ struct list_head *list;
+@@ -616,7 +655,7 @@ static void free_pcppages_bulk(struct zo
+ batch_free++;
+ if (++migratetype == MIGRATE_PCPTYPES)
+ migratetype = 0;
+- list = &pcp->lists[migratetype];
++ list = &src->lists[migratetype];
+ } while (list_empty(list));
-- spin_lock_irq(&l3->list_lock);
-+ local_spin_lock_irq(slab_lock, &l3->list_lock);
- p = l3->slabs_free.prev;
- if (p == &l3->slabs_free) {
-- spin_unlock_irq(&l3->list_lock);
-+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
- goto out;
- }
+ /* This is the only non-empty list. Free them all. */
+@@ -624,28 +663,25 @@ static void free_pcppages_bulk(struct zo
+ batch_free = to_free;
-@@ -2543,8 +2679,8 @@ static int drain_freelist(struct kmem_ca
- * to the cache.
- */
- l3->free_objects -= cache->num;
-- spin_unlock_irq(&l3->list_lock);
-- slab_destroy(cache, slabp);
-+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
-+ slab_destroy(cache, slabp, false);
- nr_freed++;
+ do {
+- page = list_entry(list->prev, struct page, lru);
+- /* must delete as __free_one_page list manipulates */
++ page = list_last_entry(list, struct page, lru);
+ list_del(&page->lru);
+- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+- __free_one_page(page, zone, 0, page_private(page));
+- trace_mm_page_pcpu_drain(page, 0, page_private(page));
++ list_add(&page->lru, dst);
+ } while (--to_free && --batch_free && !list_empty(list));
}
- out:
-@@ -2838,7 +2974,7 @@ static int cache_grow(struct kmem_cache
- offset *= cachep->colour_off;
-
- if (local_flags & __GFP_WAIT)
-- local_irq_enable();
-+ local_unlock_irq(slab_lock);
-
- /*
- * The test for missing atomic flag is performed here, rather than
-@@ -2868,7 +3004,7 @@ static int cache_grow(struct kmem_cache
- cache_init_objs(cachep, slabp);
-
- if (local_flags & __GFP_WAIT)
-- local_irq_disable();
-+ local_lock_irq(slab_lock);
- check_irq_off();
- spin_lock(&l3->list_lock);
-
-@@ -2879,10 +3015,10 @@ static int cache_grow(struct kmem_cache
- spin_unlock(&l3->list_lock);
- return 1;
- opps1:
-- kmem_freepages(cachep, objp);
-+ kmem_freepages(cachep, objp, false);
- failed:
- if (local_flags & __GFP_WAIT)
-- local_irq_disable();
-+ local_lock_irq(slab_lock);
- return 0;
+- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
+- spin_unlock(&zone->lock);
}
-@@ -3280,11 +3416,11 @@ static void *fallback_alloc(struct kmem_
- * set and go into memory reserves if necessary.
- */
- if (local_flags & __GFP_WAIT)
-- local_irq_enable();
-+ local_unlock_irq(slab_lock);
- kmem_flagcheck(cache, flags);
- obj = kmem_getpages(cache, local_flags, numa_mem_id());
- if (local_flags & __GFP_WAIT)
-- local_irq_disable();
-+ local_lock_irq(slab_lock);
- if (obj) {
- /*
- * Insert into the appropriate per node queues
-@@ -3400,7 +3536,7 @@ __cache_alloc_node(struct kmem_cache *ca
- return NULL;
-
- cache_alloc_debugcheck_before(cachep, flags);
-- local_irq_save(save_flags);
-+ local_lock_irqsave(slab_lock, save_flags);
-
- if (nodeid == -1)
- nodeid = slab_node;
-@@ -3425,7 +3561,7 @@ __cache_alloc_node(struct kmem_cache *ca
- /* ___cache_alloc_node can fall back to other nodes */
- ptr = ____cache_alloc_node(cachep, flags, nodeid);
- out:
-- local_irq_restore(save_flags);
-+ local_unlock_irqrestore(slab_lock, save_flags);
- ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
- kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
- flags);
-@@ -3485,9 +3621,9 @@ __cache_alloc(struct kmem_cache *cachep,
- return NULL;
-
- cache_alloc_debugcheck_before(cachep, flags);
-- local_irq_save(save_flags);
-+ local_lock_irqsave(slab_lock, save_flags);
- objp = __do_cache_alloc(cachep, flags);
-- local_irq_restore(save_flags);
-+ local_unlock_irqrestore(slab_lock, save_flags);
- objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
- kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
- flags);
-@@ -3535,7 +3671,7 @@ static void free_block(struct kmem_cache
- * a different cache, refer to comments before
- * alloc_slabmgmt.
- */
-- slab_destroy(cachep, slabp);
-+ slab_destroy(cachep, slabp, true);
- } else {
- list_add(&slabp->list, &l3->slabs_free);
- }
-@@ -3798,12 +3934,12 @@ void kmem_cache_free(struct kmem_cache *
- {
- unsigned long flags;
-
-- local_irq_save(flags);
- debug_check_no_locks_freed(objp, obj_size(cachep));
- if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(objp, obj_size(cachep));
-+ local_lock_irqsave(slab_lock, flags);
- __cache_free(cachep, objp, __builtin_return_address(0));
-- local_irq_restore(flags);
-+ unlock_slab_and_free_delayed(flags);
-
- trace_kmem_cache_free(_RET_IP_, objp);
+ static void free_one_page(struct zone *zone, struct page *page, int order,
+ int migratetype)
+ {
+- spin_lock(&zone->lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&zone->lock, flags);
+ zone->all_unreclaimable = 0;
+ zone->pages_scanned = 0;
+
+ __free_one_page(page, zone, order, migratetype);
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+- spin_unlock(&zone->lock);
++ spin_unlock_irqrestore(&zone->lock, flags);
}
-@@ -3827,13 +3963,13 @@ void kfree(const void *objp)
- if (unlikely(ZERO_OR_NULL_PTR(objp)))
+ static bool free_pages_prepare(struct page *page, unsigned int order)
+@@ -682,13 +718,13 @@ static void __free_pages_ok(struct page
+ if (!free_pages_prepare(page, order))
return;
+
- local_irq_save(flags);
- kfree_debugcheck(objp);
- c = virt_to_cache(objp);
- debug_check_no_locks_freed(objp, obj_size(c));
- debug_check_no_obj_freed(objp, obj_size(c));
-+ local_lock_irqsave(slab_lock, flags);
- __cache_free(c, (void *)objp, __builtin_return_address(0));
++ local_lock_irqsave(pa_lock, flags);
+ if (unlikely(wasMlocked))
+ free_page_mlock(page);
+ __count_vm_events(PGFREE, 1 << order);
+ free_one_page(page_zone(page), page, order,
+ get_pageblock_migratetype(page));
- local_irq_restore(flags);
-+ unlock_slab_and_free_delayed(flags);
++ local_unlock_irqrestore(pa_lock, flags);
}
- EXPORT_SYMBOL(kfree);
-@@ -3876,7 +4012,7 @@ static int alloc_kmemlist(struct kmem_ca
- if (l3) {
- struct array_cache *shared = l3->shared;
+ /*
+@@ -1064,16 +1100,18 @@ static int rmqueue_bulk(struct zone *zon
+ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ {
+ unsigned long flags;
++ LIST_HEAD(dst);
+ int to_drain;
-- spin_lock_irq(&l3->list_lock);
-+ local_spin_lock_irq(slab_lock, &l3->list_lock);
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (pcp->count >= pcp->batch)
+ to_drain = pcp->batch;
+ else
+ to_drain = pcp->count;
+- free_pcppages_bulk(zone, to_drain, pcp);
++ isolate_pcp_pages(to_drain, pcp, &dst);
+ pcp->count -= to_drain;
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
++ free_pcppages_bulk(zone, to_drain, &dst);
+ }
+ #endif
- if (shared)
- free_block(cachep, shared->entry,
-@@ -3889,7 +4025,8 @@ static int alloc_kmemlist(struct kmem_ca
- }
- l3->free_limit = (1 + nr_cpus_node(node)) *
- cachep->batchcount + cachep->num;
-- spin_unlock_irq(&l3->list_lock);
-+ unlock_l3_and_free_delayed(&l3->list_lock);
-+
- kfree(shared);
- free_alien_cache(new_alien);
- continue;
-@@ -3936,17 +4073,30 @@ struct ccupdate_struct {
- struct array_cache *new[NR_CPUS];
- };
+@@ -1092,16 +1130,21 @@ static void drain_pages(unsigned int cpu
+ for_each_populated_zone(zone) {
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
++ LIST_HEAD(dst);
++ int count;
--static void do_ccupdate_local(void *info)
-+static void __do_ccupdate_local(void *info, int cpu)
- {
- struct ccupdate_struct *new = info;
- struct array_cache *old;
+- local_irq_save(flags);
++ cpu_lock_irqsave(cpu, flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
-- check_irq_off();
-- old = cpu_cache_get(new->cachep);
-+ old = cpu_cache_get_on_cpu(new->cachep, cpu);
+ pcp = &pset->pcp;
+- if (pcp->count) {
+- free_pcppages_bulk(zone, pcp->count, pcp);
++ count = pcp->count;
++ if (count) {
++ isolate_pcp_pages(count, pcp, &dst);
+ pcp->count = 0;
+ }
+- local_irq_restore(flags);
++ cpu_unlock_irqrestore(cpu, flags);
++ if (count)
++ free_pcppages_bulk(zone, count, &dst);
+ }
+ }
-- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
-- new->new[smp_processor_id()] = old;
-+ new->cachep->array[cpu] = new->new[cpu];
-+ new->new[cpu] = old;
-+}
-+
+@@ -1118,7 +1161,14 @@ void drain_local_pages(void *arg)
+ */
+ void drain_all_pages(void)
+ {
+#ifndef CONFIG_PREEMPT_RT_BASE
-+static void do_ccupdate_local(void *info)
-+{
-+ __do_ccupdate_local(info, smp_processor_id());
-+}
+ on_each_cpu(drain_local_pages, NULL, 1);
+#else
-+static void do_ccupdate_local(void *info, int cpu)
-+{
-+ spin_lock_irq(&per_cpu(slab_lock, cpu).lock);
-+ __do_ccupdate_local(info, cpu);
-+ spin_unlock_irq(&per_cpu(slab_lock, cpu).lock);
- }
++ int i;
++
++ for_each_online_cpu(i)
++ drain_pages(i);
+#endif
+ }
- /* Always called with the cache_chain_mutex held */
- static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
-@@ -3971,7 +4121,7 @@ static int do_tune_cpucache(struct kmem_
- }
- new->cachep = cachep;
-
-- on_each_cpu(do_ccupdate_local, (void *)new, 1);
-+ slab_on_each_cpu(do_ccupdate_local, (void *)new);
+ #ifdef CONFIG_HIBERNATION
+@@ -1174,7 +1224,7 @@ void free_hot_cold_page(struct page *pag
- check_irq_on();
- cachep->batchcount = batchcount;
-@@ -3982,9 +4132,11 @@ static int do_tune_cpucache(struct kmem_
- struct array_cache *ccold = new->new[i];
- if (!ccold)
- continue;
-- spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
-+ local_spin_lock_irq(slab_lock,
-+ &cachep->nodelists[cpu_to_mem(i)]->list_lock);
- free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
-- spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ migratetype = get_pageblock_migratetype(page);
+ set_page_private(page, migratetype);
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (unlikely(wasMlocked))
+ free_page_mlock(page);
+ __count_vm_event(PGFREE);
+@@ -1201,12 +1251,19 @@ void free_hot_cold_page(struct page *pag
+ list_add(&page->lru, &pcp->lists[migratetype]);
+ pcp->count++;
+ if (pcp->count >= pcp->high) {
+- free_pcppages_bulk(zone, pcp->batch, pcp);
++ LIST_HEAD(dst);
++ int count;
+
-+ unlock_l3_and_free_delayed(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
- kfree(ccold);
++ isolate_pcp_pages(pcp->batch, pcp, &dst);
+ pcp->count -= pcp->batch;
++ count = pcp->batch;
++ local_unlock_irqrestore(pa_lock, flags);
++ free_pcppages_bulk(zone, count, &dst);
++ return;
}
- kfree(new);
-@@ -4060,7 +4212,7 @@ static void drain_array(struct kmem_cach
- if (ac->touched && !force) {
- ac->touched = 0;
- } else {
-- spin_lock_irq(&l3->list_lock);
-+ local_spin_lock_irq(slab_lock, &l3->list_lock);
- if (ac->avail) {
- tofree = force ? ac->avail : (ac->limit + 4) / 5;
- if (tofree > ac->avail)
-@@ -4070,7 +4222,7 @@ static void drain_array(struct kmem_cach
- memmove(ac->entry, &(ac->entry[tofree]),
- sizeof(void *) * ac->avail);
+
+ out:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ /*
+@@ -1301,7 +1358,7 @@ again:
+ struct per_cpu_pages *pcp;
+ struct list_head *list;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
+ if (list_empty(list)) {
+@@ -1333,17 +1390,19 @@ again:
+ */
+ WARN_ON_ONCE(order > 1);
}
-- spin_unlock_irq(&l3->list_lock);
-+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+- spin_lock_irqsave(&zone->lock, flags);
++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
+ page = __rmqueue(zone, order, migratetype);
+- spin_unlock(&zone->lock);
+- if (!page)
++ if (!page) {
++ spin_unlock(&zone->lock);
+ goto failed;
++ }
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
++ spin_unlock(&zone->lock);
}
- }
-@@ -4209,7 +4361,7 @@ static int s_show(struct seq_file *m, vo
- continue;
+ __count_zone_vm_events(PGALLOC, zone, 1 << order);
+ zone_statistics(preferred_zone, zone, gfp_flags);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
- check_irq_on();
-- spin_lock_irq(&l3->list_lock);
-+ local_spin_lock_irq(slab_lock, &l3->list_lock);
+ VM_BUG_ON(bad_range(zone, page));
+ if (prep_new_page(page, order, gfp_flags))
+@@ -1351,7 +1410,7 @@ again:
+ return page;
- list_for_each_entry(slabp, &l3->slabs_full, list) {
- if (slabp->inuse != cachep->num && !error)
-@@ -4234,7 +4386,7 @@ static int s_show(struct seq_file *m, vo
- if (l3->shared)
- shared_avail += l3->shared->avail;
+ failed:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ return NULL;
+ }
-- spin_unlock_irq(&l3->list_lock);
-+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
- }
- num_slabs += active_slabs;
- num_objs = num_slabs * cachep->num;
-@@ -4463,13 +4615,13 @@ static int leaks_show(struct seq_file *m
- continue;
+@@ -1884,8 +1943,8 @@ __alloc_pages_direct_compact(gfp_t gfp_m
+ if (*did_some_progress != COMPACT_SKIPPED) {
+
+ /* Page migration frees to the PCP lists but we want merging */
+- drain_pages(get_cpu());
+- put_cpu();
++ drain_pages(get_cpu_light());
++ put_cpu_light();
- check_irq_on();
-- spin_lock_irq(&l3->list_lock);
-+ local_spin_lock_irq(slab_lock, &l3->list_lock);
+ page = get_page_from_freelist(gfp_mask, nodemask,
+ order, zonelist, high_zoneidx,
+@@ -3653,14 +3712,16 @@ static int __zone_pcp_update(void *data)
+ for_each_possible_cpu(cpu) {
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
++ LIST_HEAD(dst);
- list_for_each_entry(slabp, &l3->slabs_full, list)
- handle_slab(n, cachep, slabp);
- list_for_each_entry(slabp, &l3->slabs_partial, list)
- handle_slab(n, cachep, slabp);
-- spin_unlock_irq(&l3->list_lock);
-+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ pset = per_cpu_ptr(zone->pageset, cpu);
+ pcp = &pset->pcp;
+
+- local_irq_save(flags);
+- free_pcppages_bulk(zone, pcp->count, pcp);
++ cpu_lock_irqsave(cpu, flags);
++ isolate_pcp_pages(pcp->count, pcp, &dst);
++ free_pcppages_bulk(zone, pcp->count, &dst);
+ setup_pageset(pset, batch);
+- local_irq_restore(flags);
++ cpu_unlock_irqrestore(cpu, flags);
}
- name = cachep->name;
- if (n[0] == n[1]) {
+ return 0;
+ }
+@@ -4972,6 +5033,7 @@ static int page_alloc_cpu_notify(struct
+ void __init page_alloc_init(void)
+ {
+ hotcpu_notifier(page_alloc_cpu_notify, 0);
++ local_irq_lock_init(pa_lock);
+ }
+
+ /*
Index: linux-2.6/kernel/sched_fair.c
===================================================================
--- linux-2.6.orig/kernel/sched_fair.c
@@ -11249,7 +11725,7 @@
want_affine = 1;
new_cpu = prev_cpu;
}
-@@ -2067,7 +2067,7 @@ int can_migrate_task(struct task_struct
+@@ -2067,7 +2067,7 @@ int can_migrate_task(struct task_struct
* 2) cannot be migrated to this CPU due to cpus_allowed, or
* 3) are cache-hot on their current CPU.
*/
@@ -11290,7 +11766,7 @@
}
rcu_read_unlock();
-@@ -3418,7 +3436,7 @@ static int load_balance(int this_cpu, st
+@@ -3418,7 +3436,7 @@ redo:
* moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu,
@@ -11634,7 +12110,7 @@
===================================================================
--- linux-2.6.orig/include/linux/interrupt.h
+++ linux-2.6/include/linux/interrupt.h
-@@ -202,7 +202,7 @@ extern void devm_free_irq(struct device
+@@ -202,7 +202,7 @@ extern void devm_free_irq(struct device
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
@@ -11826,7 +12302,7 @@
jmp resume_userspace_sig
ALIGN
-@@ -638,7 +642,11 @@ work_notifysig: # deal with pending s
+@@ -638,7 +642,11 @@ work_notifysig_v86:
movl %esp, %eax
#endif
xorl %edx, %edx
@@ -11891,7 +12367,7 @@
/*
* Queue an RCU for invocation after a quicker grace period.
*/
-@@ -1587,6 +1592,7 @@ void call_rcu_bh(struct rcu_head *head,
+@@ -1587,6 +1592,7 @@ void call_rcu_bh(struct rcu_head *head,
__call_rcu(head, func, &rcu_bh_state);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -12417,6 +12893,194 @@
+}
#endif /* __LINUX_SEQLOCK_H */
+Index: linux-2.6/drivers/cpufreq/cpufreq.c
+===================================================================
+--- linux-2.6.orig/drivers/cpufreq/cpufreq.c
++++ linux-2.6/drivers/cpufreq/cpufreq.c
+@@ -43,7 +43,7 @@ static DEFINE_PER_CPU(struct cpufreq_pol
+ /* This one keeps track of the previously set governor of a removed CPU */
+ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
+ #endif
+-static DEFINE_SPINLOCK(cpufreq_driver_lock);
++static DEFINE_RAW_SPINLOCK(cpufreq_driver_lock);
+
+ /*
+ * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
+@@ -138,7 +138,7 @@ struct cpufreq_policy *cpufreq_cpu_get(u
+ goto err_out;
+
+ /* get the cpufreq driver */
+- spin_lock_irqsave(&cpufreq_driver_lock, flags);
++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ if (!cpufreq_driver)
+ goto err_out_unlock;
+@@ -156,13 +156,13 @@ struct cpufreq_policy *cpufreq_cpu_get(u
+ if (!kobject_get(&data->kobj))
+ goto err_out_put_module;
+
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return data;
+
+ err_out_put_module:
+ module_put(cpufreq_driver->owner);
+ err_out_unlock:
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ err_out:
+ return NULL;
+ }
+@@ -722,10 +722,10 @@ static int cpufreq_add_dev_policy(unsign
+ return -EBUSY;
+ }
+
+- spin_lock_irqsave(&cpufreq_driver_lock, flags);
++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpumask_copy(managed_policy->cpus, policy->cpus);
+ per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ pr_debug("CPU already managed, adding link\n");
+ ret = sysfs_create_link(&sys_dev->kobj,
+@@ -821,14 +821,16 @@ static int cpufreq_add_dev_interface(uns
+ goto err_out_kobj_put;
+ }
+
+- spin_lock_irqsave(&cpufreq_driver_lock, flags);
++ get_online_cpus();
+ for_each_cpu(j, policy->cpus) {
+ if (!cpu_online(j))
+ continue;
++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ per_cpu(cpufreq_cpu_data, j) = policy;
+ per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ }
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ put_online_cpus();
+
+ ret = cpufreq_add_dev_symlink(cpu, policy);
+ if (ret)
+@@ -970,10 +972,13 @@ static int cpufreq_add_dev(struct sys_de
+
+
+ err_out_unregister:
+- spin_lock_irqsave(&cpufreq_driver_lock, flags);
+- for_each_cpu(j, policy->cpus)
++ get_online_cpus();
++ for_each_cpu(j, policy->cpus) {
++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ per_cpu(cpufreq_cpu_data, j) = NULL;
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ }
++ put_online_cpus();
+
+ kobject_put(&policy->kobj);
+ wait_for_completion(&policy->kobj_unregister);
+@@ -1013,11 +1018,11 @@ static int __cpufreq_remove_dev(struct s
+
+ pr_debug("unregistering CPU %u\n", cpu);
+
+- spin_lock_irqsave(&cpufreq_driver_lock, flags);
++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ data = per_cpu(cpufreq_cpu_data, cpu);
+
+ if (!data) {
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ unlock_policy_rwsem_write(cpu);
+ return -EINVAL;
+ }
+@@ -1031,7 +1036,7 @@ static int __cpufreq_remove_dev(struct s
+ if (unlikely(cpu != data->cpu)) {
+ pr_debug("removing link\n");
+ cpumask_clear_cpu(cpu, data->cpus);
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ kobj = &sys_dev->kobj;
+ cpufreq_cpu_put(data);
+ unlock_policy_rwsem_write(cpu);
+@@ -1040,6 +1045,7 @@ static int __cpufreq_remove_dev(struct s
+ }
+ #endif
+
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ #ifdef CONFIG_SMP
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -1052,15 +1058,17 @@ static int __cpufreq_remove_dev(struct s
+ * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
+ * the sysfs links afterwards.
+ */
++ get_online_cpus();
+ if (unlikely(cpumask_weight(data->cpus) > 1)) {
+ for_each_cpu(j, data->cpus) {
+ if (j == cpu)
+ continue;
++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ per_cpu(cpufreq_cpu_data, j) = NULL;
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ }
+ }
+-
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ put_online_cpus();
+
+ if (unlikely(cpumask_weight(data->cpus) > 1)) {
+ for_each_cpu(j, data->cpus) {
+@@ -1079,8 +1087,6 @@ static int __cpufreq_remove_dev(struct s
+ cpufreq_cpu_put(data);
+ }
+ }
+-#else
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ #endif
+
+ if (cpufreq_driver->target)
+@@ -1802,13 +1808,13 @@ int cpufreq_register_driver(struct cpufr
+ if (driver_data->setpolicy)
+ driver_data->flags |= CPUFREQ_CONST_LOOPS;
+
+- spin_lock_irqsave(&cpufreq_driver_lock, flags);
++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ if (cpufreq_driver) {
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return -EBUSY;
+ }
+ cpufreq_driver = driver_data;
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ ret = sysdev_driver_register(&cpu_sysdev_class,
+ &cpufreq_sysdev_driver);
+@@ -1842,9 +1848,9 @@ err_sysdev_unreg:
+ sysdev_driver_unregister(&cpu_sysdev_class,
+ &cpufreq_sysdev_driver);
+ err_null_driver:
+- spin_lock_irqsave(&cpufreq_driver_lock, flags);
++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_register_driver);
+@@ -1870,9 +1876,9 @@ int cpufreq_unregister_driver(struct cpu
+ sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
+ unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
+
+- spin_lock_irqsave(&cpufreq_driver_lock, flags);
++ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ return 0;
+ }
Index: linux-2.6/arch/ia64/kernel/time.c
===================================================================
--- linux-2.6.orig/arch/ia64/kernel/time.c
@@ -13201,7 +13865,7 @@
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
-@@ -104,7 +104,7 @@ static inline void rwsem_atomic_add(int
+@@ -104,7 +104,7 @@ static inline void rwsem_atomic_add(int
/*
* downgrade write lock to read lock
*/
@@ -13479,7 +14143,7 @@
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
-@@ -109,7 +109,7 @@ static inline void rwsem_atomic_add(int
+@@ -109,7 +109,7 @@ static inline void rwsem_atomic_add(int
/*
* downgrade write lock to read lock
*/
@@ -13704,7 +14368,7 @@
===================================================================
--- linux-2.6.orig/fs/ext4/inode.c
+++ linux-2.6/fs/ext4/inode.c
-@@ -5853,7 +5853,7 @@ int ext4_page_mkwrite(struct vm_area_str
+@@ -5875,7 +5875,7 @@ int ext4_page_mkwrite(struct vm_area_str
* Get i_alloc_sem to stop truncates messing with the inode. We cannot
* get i_mutex because we are already holding mmap_sem.
*/
@@ -13713,7 +14377,7 @@
size = i_size_read(inode);
if (page->mapping != mapping || size <= page_offset(page)
|| !PageUptodate(page)) {
-@@ -5865,7 +5865,7 @@ int ext4_page_mkwrite(struct vm_area_str
+@@ -5887,7 +5887,7 @@ int ext4_page_mkwrite(struct vm_area_str
lock_page(page);
wait_on_page_writeback(page);
if (PageMappedToDisk(page)) {
@@ -13722,7 +14386,7 @@
return VM_FAULT_LOCKED;
}
-@@ -5883,7 +5883,7 @@ int ext4_page_mkwrite(struct vm_area_str
+@@ -5905,7 +5905,7 @@ int ext4_page_mkwrite(struct vm_area_str
if (page_has_buffers(page)) {
if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
ext4_bh_unmapped)) {
@@ -13731,7 +14395,7 @@
return VM_FAULT_LOCKED;
}
}
-@@ -5912,11 +5912,11 @@ int ext4_page_mkwrite(struct vm_area_str
+@@ -5934,11 +5934,11 @@ int ext4_page_mkwrite(struct vm_area_str
*/
lock_page(page);
wait_on_page_writeback(page);
@@ -13816,7 +14480,7 @@
have_alloc_sem = 1;
/* communicate with ocfs2_dio_end_io */
ocfs2_iocb_set_sem_locked(iocb);
-@@ -2290,7 +2290,7 @@ static ssize_t ocfs2_file_aio_write(stru
+@@ -2290,7 +2290,7 @@ relock:
*/
if (direct_io && !can_do_direct) {
ocfs2_rw_unlock(inode, rw_level);
@@ -13825,7 +14489,7 @@
have_alloc_sem = 0;
rw_level = -1;
-@@ -2379,7 +2379,7 @@ static ssize_t ocfs2_file_aio_write(stru
+@@ -2379,7 +2379,7 @@ out:
out_sems:
if (have_alloc_sem) {
@@ -14610,7 +15274,7 @@
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
-@@ -1278,6 +1448,14 @@ void hrtimer_interrupt(struct clock_even
+@@ -1278,6 +1448,14 @@ retry:
timer = container_of(node, struct hrtimer, node);
@@ -14625,7 +15289,7 @@
/*
* The immediate goal for using the softexpires is
* minimizing wakeups, not running timers at the
-@@ -1301,7 +1479,10 @@ void hrtimer_interrupt(struct clock_even
+@@ -1301,7 +1479,10 @@ retry:
break;
}
@@ -14637,7 +15301,7 @@
}
}
-@@ -1316,6 +1497,10 @@ void hrtimer_interrupt(struct clock_even
+@@ -1316,6 +1497,10 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
@@ -17364,6 +18028,12 @@
+
+module_init(detector_init);
+module_exit(detector_exit);
+Index: linux-2.6/localversion-rt
+===================================================================
+--- /dev/null
++++ linux-2.6/localversion-rt
+@@ -0,0 +1 @@
++-rt13
Index: linux-2.6/arch/arm/kernel/early_printk.c
===================================================================
--- linux-2.6.orig/arch/arm/kernel/early_printk.c
@@ -17566,7 +18236,7 @@
===================================================================
--- linux-2.6.orig/arch/sparc/kernel/setup_64.c
+++ linux-2.6/arch/sparc/kernel/setup_64.c
-@@ -278,6 +278,12 @@ void __init boot_cpu_id_too_large(int cp
+@@ -463,6 +463,12 @@ static void __init init_sparc64_elf_hwca
popc_patch();
}
@@ -17579,7 +18249,7 @@
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
-@@ -289,7 +295,7 @@ void __init setup_arch(char **cmdline_p)
+@@ -474,7 +480,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_EARLYFB
if (btext_find_display())
#endif
@@ -17688,7 +18358,7 @@
===================================================================
--- linux-2.6.orig/arch/unicore32/kernel/early_printk.c
+++ linux-2.6/arch/unicore32/kernel/early_printk.c
-@@ -33,21 +33,17 @@ static struct console early_ocd_console
+@@ -33,21 +33,17 @@ static struct console early_ocd_console
.index = -1,
};
@@ -17796,8 +18466,6 @@
extern int printk_delay_msec;
extern int dmesg_restrict;
extern int kptr_restrict;
-Index: linux-2.6/localversion-rt
-===================================================================
Index: linux-2.6/kernel/Kconfig.preempt
===================================================================
--- linux-2.6.orig/kernel/Kconfig.preempt
@@ -17966,7 +18634,7 @@
} else {
buf = page_address(page);
ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
-@@ -863,7 +863,7 @@ static int __atapi_pio_bytes(struct ata_
+@@ -863,7 +863,7 @@ next_sg:
unsigned long flags;
/* FIXME: use bounce buffer */
@@ -17975,7 +18643,7 @@
buf = kmap_atomic(page, KM_IRQ0);
/* do the actual data transfer */
-@@ -871,7 +871,7 @@ static int __atapi_pio_bytes(struct ata_
+@@ -871,7 +871,7 @@ next_sg:
count, rw);
kunmap_atomic(buf, KM_IRQ0);
@@ -17997,7 +18665,7 @@
if (m5229_revision < 0xC2) {
/*
-@@ -325,7 +325,7 @@ static int init_chipset_ali15x3(struct p
+@@ -325,7 +325,7 @@ out:
}
pci_dev_put(north);
pci_dev_put(isa_dev);
@@ -18032,7 +18700,7 @@
===================================================================
--- linux-2.6.orig/drivers/ide/ide-io-std.c
+++ linux-2.6/drivers/ide/ide-io-std.c
-@@ -174,7 +174,7 @@ void ide_input_data(ide_drive_t *drive,
+@@ -174,7 +174,7 @@ void ide_input_data(ide_drive_t *drive,
unsigned long uninitialized_var(flags);
if ((io_32bit & 2) && !mmio) {
@@ -18041,7 +18709,7 @@
ata_vlb_sync(io_ports->nsect_addr);
}
-@@ -185,7 +185,7 @@ void ide_input_data(ide_drive_t *drive,
+@@ -185,7 +185,7 @@ void ide_input_data(ide_drive_t *drive,
insl(data_addr, buf, words);
if ((io_32bit & 2) && !mmio)
@@ -18085,7 +18753,7 @@
===================================================================
--- linux-2.6.orig/drivers/ide/ide-iops.c
+++ linux-2.6/drivers/ide/ide-iops.c
-@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive,
+@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive,
if ((stat & ATA_BUSY) == 0)
break;
@@ -18249,7 +18917,7 @@
for (c = counter; c != NULL; c = c->parent) {
spin_lock(&c->lock);
ret = res_counter_charge_locked(c, val);
-@@ -62,7 +62,7 @@ int res_counter_charge(struct res_counte
+@@ -62,7 +62,7 @@ undo:
spin_unlock(&u->lock);
}
done:
@@ -18640,7 +19308,7 @@
===================================================================
--- linux-2.6.orig/kernel/irq/handle.c
+++ linux-2.6/kernel/irq/handle.c
-@@ -156,8 +156,11 @@ handle_irq_event_percpu(struct irq_desc
+@@ -156,8 +156,11 @@ handle_irq_event_percpu(struct irq_desc
action = action->next;
} while (action);
@@ -19439,7 +20107,7 @@
===================================================================
--- linux-2.6.orig/include/linux/mm.h
+++ linux-2.6/include/linux/mm.h
-@@ -1222,27 +1222,59 @@ static inline pmd_t *pmd_alloc(struct mm
+@@ -1230,27 +1230,59 @@ static inline pmd_t *pmd_alloc(struct mm
* overflow into the next struct page (as it might with DEBUG_SPINLOCK).
* When freeing, reset page->mapping so free_pages_check won't complain.
*/
@@ -19632,7 +20300,7 @@
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
-@@ -240,6 +242,7 @@ int radix_tree_preload(gfp_t gfp_mask)
+@@ -240,6 +242,7 @@ out:
return ret;
}
EXPORT_SYMBOL(radix_tree_preload);
@@ -19725,7 +20393,7 @@
}
}
-@@ -611,6 +619,12 @@ static inline int pipelined_send(struct
+@@ -611,6 +619,12 @@ static inline int pipelined_send(struct
!security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
msr->r_msgtype, msr->r_mode)) {
@@ -19738,7 +20406,7 @@
list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) {
msr->r_msg = NULL;
-@@ -624,9 +638,11 @@ static inline int pipelined_send(struct
+@@ -624,9 +638,11 @@ static inline int pipelined_send(struct
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = msg;
@@ -19793,7 +20461,7 @@
===================================================================
--- linux-2.6.orig/net/ipv4/route.c
+++ linux-2.6/net/ipv4/route.c
-@@ -241,7 +241,7 @@ struct rt_hash_bucket {
+@@ -242,7 +242,7 @@ struct rt_hash_bucket {
};
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
@@ -19806,7 +20474,7 @@
===================================================================
--- linux-2.6.orig/include/linux/timer.h
+++ linux-2.6/include/linux/timer.h
-@@ -276,7 +276,7 @@ extern void add_timer(struct timer_list
+@@ -276,7 +276,7 @@ extern void add_timer(struct timer_list
extern int try_to_del_timer_sync(struct timer_list *timer);
@@ -19927,7 +20595,7 @@
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
-@@ -953,7 +1004,7 @@ int try_to_del_timer_sync(struct timer_l
+@@ -953,7 +1004,7 @@ out:
}
EXPORT_SYMBOL(try_to_del_timer_sync);
@@ -20110,7 +20778,7 @@
===================================================================
--- linux-2.6.orig/kernel/itimer.c
+++ linux-2.6/kernel/itimer.c
-@@ -214,6 +214,7 @@ int do_setitimer(int which, struct itime
+@@ -214,6 +214,7 @@ again:
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
@@ -20256,7 +20924,7 @@
}
/* signal completion unless @done is NULL */
-@@ -55,8 +55,10 @@ static void cpu_stop_signal_done(struct
+@@ -55,8 +55,10 @@ static void cpu_stop_signal_done(struct
if (done) {
if (executed)
done->executed = true;
@@ -20312,14 +20980,14 @@
return done.executed ? done.ret : -ENOENT;
}
-@@ -134,6 +152,7 @@ void stop_one_cpu_nowait(unsigned int cp
+@@ -133,6 +151,7 @@ void stop_one_cpu_nowait(unsigned int cp
+ }
- /* static data for stop_cpus */
- static DEFINE_MUTEX(stop_cpus_mutex);
+ DEFINE_MUTEX(stop_cpus_mutex);
+static DEFINE_MUTEX(stopper_lock);
+ /* static data for stop_cpus */
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
- int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
@@ -152,17 +171,16 @@ int __stop_cpus(const struct cpumask *cp
cpu_stop_init_done(&done, cpumask_weight(cpumask));
@@ -20343,7 +21011,7 @@
return done.executed ? done.ret : -ENOENT;
}
-@@ -250,13 +268,13 @@ static int cpu_stopper_thread(void *data
+@@ -250,13 +268,13 @@ repeat:
}
work = NULL;
@@ -20359,7 +21027,7 @@
if (work) {
cpu_stop_fn_t fn = work->fn;
-@@ -266,6 +284,16 @@ static int cpu_stopper_thread(void *data
+@@ -266,6 +284,16 @@ repeat:
__set_current_state(TASK_RUNNING);
@@ -20376,7 +21044,7 @@
/* cpu stop callbacks are not allowed to sleep */
preempt_disable();
-@@ -280,7 +308,13 @@ static int cpu_stopper_thread(void *data
+@@ -280,7 +308,13 @@ repeat:
kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
ksym_buf), arg);
@@ -20580,7 +21248,7 @@
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -224,7 +323,19 @@ static int __ref _cpu_down(unsigned int
+@@ -224,7 +323,19 @@ static int __ref _cpu_down(unsigned int
if (!cpu_online(cpu))
return -EINVAL;
@@ -20601,7 +21269,7 @@
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
-@@ -232,7 +343,16 @@ static int __ref _cpu_down(unsigned int
+@@ -232,7 +343,16 @@ static int __ref _cpu_down(unsigned int
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
printk("%s: attempt to take down CPU %u failed\n",
__func__, cpu);
@@ -20619,7 +21287,7 @@
}
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
-@@ -263,6 +383,9 @@ static int __ref _cpu_down(unsigned int
+@@ -263,6 +383,9 @@ static int __ref _cpu_down(unsigned int
check_for_tasks(cpu);
out_release:
@@ -20661,48 +21329,51 @@
+++ linux-2.6/kernel/trace/trace_output.c
@@ -591,6 +591,11 @@ int trace_print_lat_fmt(struct trace_seq
else
- ret = trace_seq_putc(s, '.');
-
-+ if (entry->migrate_disable)
-+ ret = trace_seq_printf(s, "%x", entry->migrate_disable);
-+ else
-+ ret = trace_seq_putc(s, '.');
-+
- return ret;
- }
-
-Index: linux-2.6/kernel/lockdep.c
-===================================================================
---- linux-2.6.orig/kernel/lockdep.c
-+++ linux-2.6/kernel/lockdep.c
-@@ -2859,10 +2859,7 @@ static int mark_lock(struct task_struct
- void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass)
- {
-- int i;
--
-- for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
-- lock->class_cache[i] = NULL;
-+ memset(lock, 0, sizeof(*lock));
+ ret = trace_seq_putc(s, '.');
- #ifdef CONFIG_LOCK_STAT
- lock->cpu = raw_smp_processor_id();
-@@ -3341,6 +3338,7 @@ static void check_flags(unsigned long fl
- }
- }
++ if (entry->migrate_disable)
++ ret = trace_seq_printf(s, "%x", entry->migrate_disable);
++ else
++ ret = trace_seq_putc(s, '.');
++
+ return ret;
+ }
-+#ifndef CONFIG_PREEMPT_RT_FULL
- /*
- * We dont accurately track softirq state in e.g.
- * hardirq contexts (such as on 4KSTACKS), so only
-@@ -3352,6 +3350,7 @@ static void check_flags(unsigned long fl
- else
- DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
- }
+Index: linux-2.6/kernel/sched_debug.c
+===================================================================
+--- linux-2.6.orig/kernel/sched_debug.c
++++ linux-2.6/kernel/sched_debug.c
+@@ -235,6 +235,7 @@ void print_rt_rq(struct seq_file *m, int
+ P(rt_throttled);
+ PN(rt_time);
+ PN(rt_runtime);
++ P(rt_nr_migratory);
+
+ #undef PN
+ #undef P
+@@ -484,6 +485,10 @@ void proc_sched_show_task(struct task_st
+ P(se.load.weight);
+ P(policy);
+ P(prio);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ P(migrate_disable);
+#endif
++ P(rt.nr_cpus_allowed);
+ #undef PN
+ #undef __PN
+ #undef P
+Index: linux-2.6/kernel/trace/trace.h
+===================================================================
+--- linux-2.6.orig/kernel/trace/trace.h
++++ linux-2.6/kernel/trace/trace.h
+@@ -322,7 +322,6 @@ void trace_init_global_iter(struct trace
- if (!debug_locks)
- print_irqtrace_events(current);
+ void tracing_iter_reset(struct trace_iterator *iter, int cpu);
+
+-void default_wait_pipe(struct trace_iterator *iter);
+ void poll_wait_pipe(struct trace_iterator *iter);
+
+ void ftrace(struct trace_array *tr,
Index: linux-2.6/kernel/Kconfig.locks
===================================================================
--- linux-2.6.orig/kernel/Kconfig.locks
@@ -21082,7 +21753,7 @@
/*
* Calculate task priority from the waiter list priority
*
-@@ -136,6 +153,14 @@ static void rt_mutex_adjust_prio(struct
+@@ -136,6 +153,14 @@ static void rt_mutex_adjust_prio(struct
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
@@ -21675,7 +22346,7 @@
===================================================================
--- linux-2.6.orig/kernel/futex.c
+++ linux-2.6/kernel/futex.c
-@@ -1380,6 +1380,16 @@ static int futex_requeue(u32 __user *uad
+@@ -1410,6 +1410,16 @@ retry_private:
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -21692,7 +22363,7 @@
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
-@@ -2224,7 +2234,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2254,7 +2264,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
@@ -21701,7 +22372,7 @@
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -2246,8 +2256,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2276,8 +2286,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -21711,7 +22382,7 @@
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
-@@ -2268,20 +2277,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2298,20 +2307,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -21778,7 +22449,7 @@
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -2290,9 +2334,10 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2320,9 +2364,10 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -21791,7 +22462,7 @@
}
} else {
/*
-@@ -2305,7 +2350,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2335,7 +2380,8 @@ static int futex_wait_requeue_pi(u32 __u
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
@@ -23517,7 +24188,7 @@
} while (l != end);
spin_unlock(&i->lock);
-@@ -2892,14 +2895,14 @@ serial8250_console_write(struct console
+@@ -2892,14 +2895,14 @@ serial8250_console_write(struct console
touch_nmi_watchdog();
@@ -23539,7 +24210,7 @@
/*
* First save the IER then disable the interrupts
-@@ -2931,8 +2934,7 @@ serial8250_console_write(struct console
+@@ -2931,8 +2934,7 @@ serial8250_console_write(struct console
check_modem_status(up);
if (locked)
@@ -23911,7 +24582,7 @@
===================================================================
--- linux-2.6.orig/arch/x86/kernel/cpu/common.c
+++ linux-2.6/arch/x86/kernel/cpu/common.c
-@@ -1050,7 +1050,9 @@ DEFINE_PER_CPU(unsigned int, irq_count)
+@@ -1050,7 +1050,9 @@ DEFINE_PER_CPU(unsigned int, irq_count)
*/
static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
@@ -24026,7 +24697,7 @@
===================================================================
--- linux-2.6.orig/mm/vmalloc.c
+++ linux-2.6/mm/vmalloc.c
-@@ -788,7 +788,7 @@ static struct vmap_block *new_vmap_block
+@@ -789,7 +789,7 @@ static struct vmap_block *new_vmap_block
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -24035,7 +24706,7 @@
node = numa_node_id();
-@@ -827,12 +827,13 @@ static struct vmap_block *new_vmap_block
+@@ -828,12 +828,13 @@ static struct vmap_block *new_vmap_block
BUG_ON(err);
radix_tree_preload_end();
@@ -24051,7 +24722,7 @@
return vb;
}
-@@ -913,7 +914,7 @@ static void *vb_alloc(unsigned long size
+@@ -914,7 +915,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
unsigned long addr = 0;
unsigned int order;
@@ -24060,7 +24731,7 @@
BUG_ON(size & ~PAGE_MASK);
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -921,7 +922,8 @@ static void *vb_alloc(unsigned long size
+@@ -922,7 +923,8 @@ static void *vb_alloc(unsigned long size
again:
rcu_read_lock();
@@ -24070,7 +24741,7 @@
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
int i;
-@@ -958,7 +960,7 @@ static void *vb_alloc(unsigned long size
+@@ -959,7 +961,7 @@ next:
if (purge)
purge_fragmented_blocks_thiscpu();
@@ -24257,7 +24928,7 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
-@@ -799,6 +800,30 @@ static void icmp_redirect(struct sk_buff
+@@ -801,6 +802,30 @@ out_err:
}
/*
@@ -24288,7 +24959,7 @@
* Handle ICMP_ECHO ("ping") requests.
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
-@@ -825,6 +850,11 @@ static void icmp_echo(struct sk_buff *sk
+@@ -827,6 +852,11 @@ static void icmp_echo(struct sk_buff *sk
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
@@ -24358,7 +25029,7 @@
/* Serialize kdb_printf if multiple cpus try to write at once.
* But if any cpu goes recursive in kdb, just print the output,
-@@ -807,7 +804,6 @@ int vkdb_printf(const char *fmt, va_list
+@@ -807,7 +804,6 @@ kdb_print_out:
} else {
__release(kdb_printf_lock);
}
@@ -24377,235 +25048,7 @@
+ kdb_trap_printk--;
return r;
- }
-Index: linux-2.6/arch/Kconfig
-===================================================================
---- linux-2.6.orig/arch/Kconfig
-+++ linux-2.6/arch/Kconfig
-@@ -6,6 +6,7 @@ config OPROFILE
- tristate "OProfile system profiling"
- depends on PROFILING
- depends on HAVE_OPROFILE
-+ depends on !PREEMPT_RT_FULL
- select RING_BUFFER
- select RING_BUFFER_ALLOW_SWAP
- help
-Index: linux-2.6/drivers/net/Kconfig
-===================================================================
---- linux-2.6.orig/drivers/net/Kconfig
-+++ linux-2.6/drivers/net/Kconfig
-@@ -3410,6 +3410,7 @@ config NET_FC
-
- config NETCONSOLE
- tristate "Network console logging support"
-+ depends on !PREEMPT_RT_FULL
- ---help---
- If you want to log kernel messages over the network, enable this.
- See <file:Documentation/networking/netconsole.txt> for details.
-Index: linux-2.6/kernel/time/Kconfig
-===================================================================
---- linux-2.6.orig/kernel/time/Kconfig
-+++ linux-2.6/kernel/time/Kconfig
-@@ -7,6 +7,7 @@ config TICK_ONESHOT
- config NO_HZ
- bool "Tickless System (Dynamic Ticks)"
- depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
-+ depends on !PREEMPT_RT_FULL
- select TICK_ONESHOT
- help
- This option enables a tickless system: timer interrupts will
-Index: linux-2.6/mm/Kconfig
-===================================================================
---- linux-2.6.orig/mm/Kconfig
-+++ linux-2.6/mm/Kconfig
-@@ -304,7 +304,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
-
- config TRANSPARENT_HUGEPAGE
- bool "Transparent Hugepage Support"
-- depends on X86 && MMU
-+ depends on X86 && MMU && !PREEMPT_RT_FULL
- select COMPACTION
- help
- Transparent Hugepages allows the kernel to use huge pages and
-Index: linux-2.6/init/Makefile
-===================================================================
---- linux-2.6.orig/init/Makefile
-+++ linux-2.6/init/Makefile
-@@ -29,4 +29,4 @@ $(obj)/version.o: include/generated/comp
- include/generated/compile.h: FORCE
- @$($(quiet)chk_compile.h)
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
-- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
-+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
-Index: linux-2.6/scripts/mkcompile_h
-===================================================================
---- linux-2.6.orig/scripts/mkcompile_h
-+++ linux-2.6/scripts/mkcompile_h
-@@ -4,7 +4,8 @@ TARGET=$1
- ARCH=$2
- SMP=$3
- PREEMPT=$4
--CC=$5
-+RT=$5
-+CC=$6
-
- vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
-
-@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
- CONFIG_FLAGS=""
- if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
- if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
-+if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
- UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
-
- # Truncate to maximum length
-Index: linux-2.6/kernel/sched_cpupri.c
-===================================================================
---- linux-2.6.orig/kernel/sched_cpupri.c
-+++ linux-2.6/kernel/sched_cpupri.c
-@@ -47,9 +47,6 @@ static int convert_prio(int prio)
- return cpupri;
- }
-
--#define for_each_cpupri_active(array, idx) \
-- for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES)
--
- /**
- * cpupri_find - find the best (lowest-pri) CPU in the system
- * @cp: The cpupri context
-@@ -71,11 +68,38 @@ int cpupri_find(struct cpupri *cp, struc
- int idx = 0;
- int task_pri = convert_prio(p->prio);
-
-- for_each_cpupri_active(cp->pri_active, idx) {
-+ if (task_pri >= MAX_RT_PRIO)
-+ return 0;
-+
-+ for (idx = 0; idx < task_pri; idx++) {
- struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
-+ int skip = 0;
-
-- if (idx >= task_pri)
-- break;
-+ if (!atomic_read(&(vec)->count))
-+ skip = 1;
-+ /*
-+ * When looking at the vector, we need to read the counter,
-+ * do a memory barrier, then read the mask.
-+ *
-+ * Note: This is still all racey, but we can deal with it.
-+ * Ideally, we only want to look at masks that are set.
-+ *
-+ * If a mask is not set, then the only thing wrong is that we
-+ * did a little more work than necessary.
-+ *
-+ * If we read a zero count but the mask is set, because of the
-+ * memory barriers, that can only happen when the highest prio
-+ * task for a run queue has left the run queue, in which case,
-+ * it will be followed by a pull. If the task we are processing
-+ * fails to find a proper place to go, that pull request will
-+ * pull this task if the run queue is running at a lower
-+ * priority.
-+ */
-+ smp_rmb();
-+
-+ /* Need to do the rmb for every iteration */
-+ if (skip)
-+ continue;
-
- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
- continue;
-@@ -115,7 +139,7 @@ void cpupri_set(struct cpupri *cp, int c
- {
- int *currpri = &cp->cpu_to_pri[cpu];
- int oldpri = *currpri;
-- unsigned long flags;
-+ int do_mb = 0;
-
- newpri = convert_prio(newpri);
-
-@@ -134,26 +158,41 @@ void cpupri_set(struct cpupri *cp, int c
- if (likely(newpri != CPUPRI_INVALID)) {
- struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
-
-- raw_spin_lock_irqsave(&vec->lock, flags);
--
- cpumask_set_cpu(cpu, vec->mask);
-- vec->count++;
-- if (vec->count == 1)
-- set_bit(newpri, cp->pri_active);
--
-- raw_spin_unlock_irqrestore(&vec->lock, flags);
-+ /*
-+ * When adding a new vector, we update the mask first,
-+ * do a write memory barrier, and then update the count, to
-+ * make sure the vector is visible when count is set.
-+ */
-+ smp_mb__before_atomic_inc();
-+ atomic_inc(&(vec)->count);
-+ do_mb = 1;
- }
- if (likely(oldpri != CPUPRI_INVALID)) {
- struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
-
-- raw_spin_lock_irqsave(&vec->lock, flags);
--
-- vec->count--;
-- if (!vec->count)
-- clear_bit(oldpri, cp->pri_active);
-+ /*
-+ * Because the order of modification of the vec->count
-+ * is important, we must make sure that the update
-+ * of the new prio is seen before we decrement the
-+ * old prio. This makes sure that the loop sees
-+ * one or the other when we raise the priority of
-+ * the run queue. We don't care about when we lower the
-+ * priority, as that will trigger an rt pull anyway.
-+ *
-+ * We only need to do a memory barrier if we updated
-+ * the new priority vec.
-+ */
-+ if (do_mb)
-+ smp_mb__after_atomic_inc();
-+
-+ /*
-+ * When removing from the vector, we decrement the counter first
-+ * do a memory barrier and then clear the mask.
-+ */
-+ atomic_dec(&(vec)->count);
-+ smp_mb__after_atomic_inc();
- cpumask_clear_cpu(cpu, vec->mask);
--
-- raw_spin_unlock_irqrestore(&vec->lock, flags);
- }
-
- *currpri = newpri;
-@@ -175,8 +214,7 @@ int cpupri_init(struct cpupri *cp)
- for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
- struct cpupri_vec *vec = &cp->pri_to_cpu[i];
-
-- raw_spin_lock_init(&vec->lock);
-- vec->count = 0;
-+ atomic_set(&vec->count, 0);
- if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
- goto cleanup;
- }
-Index: linux-2.6/kernel/sched_cpupri.h
-===================================================================
---- linux-2.6.orig/kernel/sched_cpupri.h
-+++ linux-2.6/kernel/sched_cpupri.h
-@@ -12,9 +12,8 @@
- /* values 2-101 are RT priorities 0-99 */
-
- struct cpupri_vec {
-- raw_spinlock_t lock;
-- int count;
-- cpumask_var_t mask;
-+ atomic_t count;
-+ cpumask_var_t mask;
- };
-
- struct cpupri {
+ }
Index: linux-2.6/kernel/ksysfs.c
===================================================================
--- linux-2.6.orig/kernel/ksysfs.c
@@ -24636,191 +25079,84 @@
NULL
};
-Index: linux-2.6/drivers/cpufreq/cpufreq.c
+Index: linux-2.6/arch/Kconfig
===================================================================
---- linux-2.6.orig/drivers/cpufreq/cpufreq.c
-+++ linux-2.6/drivers/cpufreq/cpufreq.c
-@@ -43,7 +43,7 @@ static DEFINE_PER_CPU(struct cpufreq_pol
- /* This one keeps track of the previously set governor of a removed CPU */
- static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
- #endif
--static DEFINE_SPINLOCK(cpufreq_driver_lock);
-+static DEFINE_RAW_SPINLOCK(cpufreq_driver_lock);
-
- /*
- * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
-@@ -138,7 +138,7 @@ struct cpufreq_policy *cpufreq_cpu_get(u
- goto err_out;
-
- /* get the cpufreq driver */
-- spin_lock_irqsave(&cpufreq_driver_lock, flags);
-+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
-
- if (!cpufreq_driver)
- goto err_out_unlock;
-@@ -156,13 +156,13 @@ struct cpufreq_policy *cpufreq_cpu_get(u
- if (!kobject_get(&data->kobj))
- goto err_out_put_module;
-
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return data;
-
- err_out_put_module:
- module_put(cpufreq_driver->owner);
- err_out_unlock:
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- err_out:
- return NULL;
- }
-@@ -722,10 +722,10 @@ static int cpufreq_add_dev_policy(unsign
- return -EBUSY;
- }
-
-- spin_lock_irqsave(&cpufreq_driver_lock, flags);
-+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpumask_copy(managed_policy->cpus, policy->cpus);
- per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- pr_debug("CPU already managed, adding link\n");
- ret = sysfs_create_link(&sys_dev->kobj,
-@@ -821,14 +821,16 @@ static int cpufreq_add_dev_interface(uns
- goto err_out_kobj_put;
- }
-
-- spin_lock_irqsave(&cpufreq_driver_lock, flags);
-+ get_online_cpus();
- for_each_cpu(j, policy->cpus) {
- if (!cpu_online(j))
- continue;
-+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
- per_cpu(cpufreq_cpu_data, j) = policy;
- per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- }
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ put_online_cpus();
-
- ret = cpufreq_add_dev_symlink(cpu, policy);
- if (ret)
-@@ -970,10 +972,13 @@ static int cpufreq_add_dev(struct sys_de
-
-
- err_out_unregister:
-- spin_lock_irqsave(&cpufreq_driver_lock, flags);
-- for_each_cpu(j, policy->cpus)
-+ get_online_cpus();
-+ for_each_cpu(j, policy->cpus) {
-+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
- per_cpu(cpufreq_cpu_data, j) = NULL;
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ }
-+ put_online_cpus();
-
- kobject_put(&policy->kobj);
- wait_for_completion(&policy->kobj_unregister);
-@@ -1013,11 +1018,11 @@ static int __cpufreq_remove_dev(struct s
-
- pr_debug("unregistering CPU %u\n", cpu);
-
-- spin_lock_irqsave(&cpufreq_driver_lock, flags);
-+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
- data = per_cpu(cpufreq_cpu_data, cpu);
-
- if (!data) {
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- unlock_policy_rwsem_write(cpu);
- return -EINVAL;
- }
-@@ -1031,7 +1036,7 @@ static int __cpufreq_remove_dev(struct s
- if (unlikely(cpu != data->cpu)) {
- pr_debug("removing link\n");
- cpumask_clear_cpu(cpu, data->cpus);
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobj = &sys_dev->kobj;
- cpufreq_cpu_put(data);
- unlock_policy_rwsem_write(cpu);
-@@ -1040,6 +1045,7 @@ static int __cpufreq_remove_dev(struct s
- }
- #endif
-
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- #ifdef CONFIG_SMP
-
- #ifdef CONFIG_HOTPLUG_CPU
-@@ -1052,15 +1058,17 @@ static int __cpufreq_remove_dev(struct s
- * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
- * the sysfs links afterwards.
- */
-+ get_online_cpus();
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
-+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
- per_cpu(cpufreq_cpu_data, j) = NULL;
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- }
- }
--
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ put_online_cpus();
-
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
-@@ -1079,8 +1087,6 @@ static int __cpufreq_remove_dev(struct s
- cpufreq_cpu_put(data);
- }
- }
--#else
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- #endif
+--- linux-2.6.orig/arch/Kconfig
++++ linux-2.6/arch/Kconfig
+@@ -6,6 +6,7 @@ config OPROFILE
+ tristate "OProfile system profiling"
+ depends on PROFILING
+ depends on HAVE_OPROFILE
++ depends on !PREEMPT_RT_FULL
+ select RING_BUFFER
+ select RING_BUFFER_ALLOW_SWAP
+ help
+Index: linux-2.6/drivers/net/Kconfig
+===================================================================
+--- linux-2.6.orig/drivers/net/Kconfig
++++ linux-2.6/drivers/net/Kconfig
+@@ -3410,6 +3410,7 @@ config NET_FC
- if (cpufreq_driver->target)
-@@ -1802,13 +1808,13 @@ int cpufreq_register_driver(struct cpufr
- if (driver_data->setpolicy)
- driver_data->flags |= CPUFREQ_CONST_LOOPS;
+ config NETCONSOLE
+ tristate "Network console logging support"
++ depends on !PREEMPT_RT_FULL
+ ---help---
+ If you want to log kernel messages over the network, enable this.
+ See <file:Documentation/networking/netconsole.txt> for details.
+Index: linux-2.6/kernel/time/Kconfig
+===================================================================
+--- linux-2.6.orig/kernel/time/Kconfig
++++ linux-2.6/kernel/time/Kconfig
+@@ -7,6 +7,7 @@ config TICK_ONESHOT
+ config NO_HZ
+ bool "Tickless System (Dynamic Ticks)"
+ depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
++ depends on !PREEMPT_RT_FULL
+ select TICK_ONESHOT
+ help
+ This option enables a tickless system: timer interrupts will
+Index: linux-2.6/mm/Kconfig
+===================================================================
+--- linux-2.6.orig/mm/Kconfig
++++ linux-2.6/mm/Kconfig
+@@ -304,7 +304,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
-- spin_lock_irqsave(&cpufreq_driver_lock, flags);
-+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
- if (cpufreq_driver) {
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EBUSY;
- }
- cpufreq_driver = driver_data;
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ config TRANSPARENT_HUGEPAGE
+ bool "Transparent Hugepage Support"
+- depends on X86 && MMU
++ depends on X86 && MMU && !PREEMPT_RT_FULL
+ select COMPACTION
+ help
+ Transparent Hugepages allows the kernel to use huge pages and
+Index: linux-2.6/init/Makefile
+===================================================================
+--- linux-2.6.orig/init/Makefile
++++ linux-2.6/init/Makefile
+@@ -29,4 +29,4 @@ silent_chk_compile.h = :
+ include/generated/compile.h: FORCE
+ @$($(quiet)chk_compile.h)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
+Index: linux-2.6/scripts/mkcompile_h
+===================================================================
+--- linux-2.6.orig/scripts/mkcompile_h
++++ linux-2.6/scripts/mkcompile_h
+@@ -4,7 +4,8 @@ TARGET=$1
+ ARCH=$2
+ SMP=$3
+ PREEMPT=$4
+-CC=$5
++RT=$5
++CC=$6
- ret = sysdev_driver_register(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
-@@ -1842,9 +1848,9 @@ int cpufreq_register_driver(struct cpufr
- sysdev_driver_unregister(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
- err_null_driver:
-- spin_lock_irqsave(&cpufreq_driver_lock, flags);
-+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpufreq_driver = NULL;
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return ret;
- }
- EXPORT_SYMBOL_GPL(cpufreq_register_driver);
-@@ -1870,9 +1876,9 @@ int cpufreq_unregister_driver(struct cpu
- sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
- unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
+ vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
-- spin_lock_irqsave(&cpufreq_driver_lock, flags);
-+ raw_spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpufreq_driver = NULL;
-- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-+ raw_spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
+ CONFIG_FLAGS=""
+ if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
+ if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
+ UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
- return 0;
- }
+ # Truncate to maximum length
Added: dists/sid/linux-2.6/debian/patches/series/4-extra
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/series/4-extra Sat Sep 10 21:22:48 2011 (r18073)
@@ -0,0 +1 @@
++ features/all/rt/patch-3.0.4-rt13.patch featureset=rt
More information about the Kernel-svn-changes
mailing list